code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package edu.iastate.cs.pal
/**
* Danilo Dominguez Perez
*
* Iowa State University
* Department of Computer Science
* Program Analysis Laboratory
*
*/
object AST {
sealed abstract class Expr
// Numbers are int or float
sealed abstract class Number extends Expr
case class IntNumber(number: Int) extends Number {
override def toString() = number.toString
}
case class FloatNumber(number: Float) extends Number {
override def toString() = number.toString
}
case class Var(name: String) extends Expr {
override def toString() = name
}
case class Quote(expr: Expr) extends Expr {
override def toString() = "(quote " + expr.toString + ")"
}
case class IfTest(test: Expr, conseq: Expr, alt: Expr) extends Expr {
override def toString() = "(if " + test.toString + " " + conseq.toString + " " + alt.toString + ")"
}
case class Define(varName: Var, expr: Expr) extends Expr {
override def toString() = "(define " + varName.toString() + " " + expr.toString + ")"
}
case class Lambda(args: List[Var], body: Expr) extends Expr {
override def toString() = "(lambda " + args.toString + " " + body.toString + ")"
}
case class Proc(app: Expr, args: List[Expr]) extends Expr {
override def toString() = "(" + app.toString() + " " + args.toString + ")"
}
}
| danilo04/scheme-compiler | src/main/scala/edu/iastate/cs/pal/AST.scala | Scala | agpl-3.0 | 1,318 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.ws2.fsm
import io.gatling.core.action.Action
import io.gatling.core.akka.BaseActor
import io.gatling.core.session.Session
import io.gatling.http.action.ws2.{ WsCheck, WsCheckSequence }
import akka.actor.FSM
import org.asynchttpclient.ws.WebSocket
sealed trait WsActorState
case object Init extends WsActorState
case object Connecting extends WsActorState
case object PerformingCheck extends WsActorState
case object Idle extends WsActorState
case object Closing extends WsActorState
case object Crashed extends WsActorState
sealed trait WsActorData
case object InitData extends WsActorData
case class ConnectingData(session: Session, next: Either[Action, SendTextMessage], timestamp: Long, remainingTries: Int) extends WsActorData
case class PerformingCheckData(
webSocket: WebSocket,
currentCheck: WsCheck,
remainingChecks: List[WsCheck],
checkSequenceStart: Long,
checkSequenceTimeoutId: Long,
remainingCheckSequences: List[WsCheckSequence],
session: Session,
next: Either[Action, SendTextMessage]
) extends WsActorData
case class IdleData(session: Session, webSocket: WebSocket) extends WsActorData
case class ClosingData(actionName: String, session: Session, next: Action, timestamp: Long) extends WsActorData
case class CrashedData(errorMessage: Option[String]) extends WsActorData
class WsActorFSM extends BaseActor with FSM[WsActorState, WsActorData]
| wiacekm/gatling | gatling-http/src/main/scala/io/gatling/http/action/ws2/fsm/WsActorFSM.scala | Scala | apache-2.0 | 2,118 |
package testfeature
import feature._
import org.scalatest.FunSuite
/**
* Created by prussell on 9/30/16.
*/
class RegionEqualitySuite extends FunSuite {
equalityBlock()
equalityBlockSet()
equalityEmpty()
def equalityEmpty(): Unit = {
test("Empty equality") {
assert(Empty === Empty, "Empty should be equal to Empty")
}
}
def equalityBlock(): Unit = {
test("Block equality") {
assert(chr1_1000_2000_plus_1 === chr1_1000_2000_plus_3, "Blocks with same span and same name should be equal")
assert(!(Block("1", 1000, 2000, Plus) === Block("2", 1000, 2000, Plus)), "Blocks with different chromosomes should not be equal")
assert(!(Block("1", 1000, 2000, Plus) === Block("1", 1000, 2000, Unstranded)), "Blocks with different strand should not be equal")
}
}
def equalityBlockSet(): Unit = {
test("BlockSet equality") {
assert(BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 400, Plus)
)) === BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 400, Plus)
)), "Block set equality")
assert(BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 400, Plus)
)) != BlockSet(List(
Block("2", 100, 200, Plus),
Block("2", 300, 400, Plus)
)), "Block sets with different chromosomes should not be equal")
assert(BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 400, Plus)
)) != BlockSet(List(
Block("1", 100, 200, Unstranded),
Block("1", 300, 400, Unstranded)
)), "Block sets with different strands should not be equal")
assert(BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 400, Plus)
)) != BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 401, Plus)
)), "Block sets with different blocks should not be equal")
assert(BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 400, Plus)
)) != BlockSet(List(
Block("1", 100, 200, Plus),
Block("1", 300, 400, Plus),
Block("1", 500, 600, Plus)
)), "Block sets with different blocks should not be equal")
}
}
}
| pamelarussell/sgxlib | src/test/scala/testfeature/RegionEqualitySuite.scala | Scala | mit | 2,234 |
package org.atnos.site
object Installation extends UserGuidePage { def is = "Installation".title ^ s2"""
You add `eff` as an sbt dependency:
```scala
libraryDependencies += "org.atnos" %% "eff" % "$version"
// to write types like Reader[String, *]
addCompilerPlugin("org.typelevel" %% "kind-projector" % "0.13.2" cross CrossVersion.full)
```
To get types like `Reader[String, *]` (with more than one type parameter) correctly inferred, you'll have to use the following compiler option
```scala
scalacOptions += "-Ypartial-unification"
```
##### Additional dependencies
This table lists the other available eff modules:
Name | Functionality
----------------- | ----------------------------------------------------
`eff-scalaz` | if you want to use [Scalaz](https://github.com/scalaz/scalaz) as a library for functional programming. This gives you a `Scalaz` `Monad` instance for `Eff` and a Scalaz's `\\/` effect
`eff-monix` | to use Monix's `Task` effect
`eff-cats-effect` | to use cats's `IO` effect
`eff-twitter` | to use Twitter's `Future` effect
`eff-doobie` | to use Doobie's `ConnectionIO` effect
<p/>
### Imports
#### Main types
The main `eff` types: `Eff`, `Member`, `Fx` are accessible in the `org.atnos.eff` package:
```scala
import org.atnos.eff._
```
Many other effects are also available $OutOfTheBox.
#### Creating effects
The functions used to create effects are grouped under different objects named after the effect type. For example if you want to create the `Eval` effect you need to
import:
```scala
import org.atnos.eff.eval._
```
You can also import most of the effects at once with:
```scala
import org.atnos.eff.all._
```
The only effects not included in the previous import are:
- the `Error` effect. This effect requires a type parameter representing the "failure" type and must be provided by the user of the library
- the `Future` effect. This effect shares some operations like `runAsync` with other "async" effects like monix's `TaskEffect`
and the import could clash with `import org.atnos.eff.addon.monix.task._`
#### Interpreting effects
Interpreting effects usually requires some syntax to "run" a given effect. For example to "run" the `Option` effect you will import:
```scala
// to create the effect
import org.atnos.eff.option._
// to access the runOption method
import org.atnos.eff.syntax.option._
fromOption(Option(1)).runOption
```
You can also access all the syntax imports at once with:
```scala
import org.atnos.eff.syntax.all._
```
#### Intellij support
Intellij error highlighting doesn't support implicit-directed type inference yet, check https://youtrack.jetbrains.com/issue/SCL-11140 or https://youtrack.jetbrains.com/issue/SCL-10753 for progress.
#### With Scalaz
If you use Scalaz as your functional programming library you might need additional imports in order to use some creation
methods specific to Scalaz. For example:
```scala
import org.atnos.eff.addon.scalaz.either._
fromDisjunction(\\/-(1))
```
There is also an `all` object importing all those methods at once:
```scala
import org.atnos.eff.addon.scalaz.all._
fromDisjunction(\\/-(1))
```
And you can already guess, there are some syntax imports following the same pattern:
```scala
import org.atnos.eff.addon.scalaz.either._
import org.atnos.eff.addon.scalaz.syntax.either._
fromDisjunction(\\/-(1)).runDisjunction
```
"""
}
| atnos-org/eff | src/test/scala/org/atnos/site/Installation.scala | Scala | mit | 3,420 |
import scalaz._, Scalaz._
package object asmfreef {
object algebra {
sealed trait AsmF[F[_]] extends Applicative[F] {
def push(v: Int): F[Unit]
def pop: F[Int]
def sum: F[Int]
def mul: F[Int]
}
}
object dsl {
import algebra._
sealed trait Dsl[A] {
def apply[F[_]: AsmF]: F[A]
}
def push(v: Int): Dsl[Unit] = new Dsl[Unit] { def apply[F[_]: AsmF] = implicitly[AsmF[F]].push(v) }
def pop: Dsl[Int] = new Dsl[Int] { def apply[F[_]: AsmF] = implicitly[AsmF[F]].pop }
def sum: Dsl[Int] = new Dsl[Int] { def apply[F[_]: AsmF] = implicitly[AsmF[F]].sum }
def mul: Dsl[Int] = new Dsl[Int] { def apply[F[_]: AsmF] = implicitly[AsmF[F]].mul }
implicit val ApplicativeDsl: Applicative[Dsl] = new Applicative[Dsl] {
def point[A](a: => A): Dsl[A] = new Dsl[A] { def apply[F[_]: AsmF] = a.point[F] }
def ap[A, B](fa: => Dsl[A])(f: => Dsl[A => B]): Dsl[B] = new Dsl[B] {
def apply[F[_]: AsmF] = fa.apply[F] <*> f.apply[F]
}
}
}
object printer {
import algebra._
import dsl._
def print[A](p: Dsl[A]): String = {
type Printer[A] = String
p.apply(new AsmF[Printer] {
def point[A](a: => A): Printer[A] = ""
def ap[A, B](fa: => Printer[A])(f: => Printer[A => B]): Printer[B] = f + fa
def push(v: Int) = "push " + v + "\\n"
def pop = "pop\\n"
def sum = "sum\\n"
def mul = "mul\\n"
})
}
}
object evaluator {
import algebra._
import dsl._
def evaluate[A](p: Dsl[A]): A = {
type EvaluatorState = List[Int]
type Evaluator[A] = State[List[Int], A]
p.apply(new AsmF[Evaluator] {
def point[A](a: => A): Evaluator[A] = a.point[Evaluator]
def ap[A, B](fa: => Evaluator[A])(f: => Evaluator[A => B]): Evaluator[B] = fa <*> f
def push(v: Int) = State.modify[EvaluatorState](v :: _)
def pop = State.get[EvaluatorState].map(_.head)
def sum = for {
l <- State.get[EvaluatorState]
(x1 :: x2 :: Nil, ys) = l.splitAt(2)
v = x1 + x2
_ <- State.put[EvaluatorState](v :: ys)
} yield v
def mul = for {
l <- State.get[EvaluatorState]
(x1 :: x2 :: Nil, ys) = l.splitAt(2)
v = x1 * x2
_ <- State.put[EvaluatorState](v :: ys)
} yield v
}).eval(Nil)
}
}
object example {
import algebra._
import dsl._
def program =
push(1) *>
push(2) *>
sum *>
push(9) *>
mul
lazy val printed = printer.print(program)
lazy val evaled = evaluator.evaluate(program)
}
} | jdegoes/scalaworld-2015 | src/main/scala/asmfreef.scala | Scala | apache-2.0 | 2,671 |
package intellijhocon.highlight
import com.intellij.openapi.editor.colors.TextAttributesKey
import com.intellij.openapi.editor.{DefaultLanguageHighlighterColors => DLHC, HighlighterColors}
import com.intellij.codeInsight.daemon.impl.HighlightInfoType
object HoconHighlighterColors {
final val BadCharacter = key("HOCON_BAD_CHARACTER", HighlighterColors.BAD_CHARACTER)
final val HashComment = key("HOCON_HASH_COMMENT", DLHC.LINE_COMMENT)
final val DoubleSlashComment = key("HOCON_DOUBLE_SLASH_COMMENT", DLHC.LINE_COMMENT)
final val Null = key("HOCON_NULL", DLHC.KEYWORD)
final val Boolean = key("HOCON_BOOLEAN", DLHC.KEYWORD)
final val Number = key("HOCON_NUMBER", DLHC.NUMBER)
final val QuotedString = key("HOCON_QUOTED_STRING", DLHC.STRING)
final val MultilineString = key("HOCON_MULTILINE_STRING", DLHC.STRING)
final val ValidStringEscape = key("HOCON_VALID_STRING_ESCAPE", DLHC.VALID_STRING_ESCAPE)
final val InvalidStringEscape = key("HOCON_INVALID_STRING_ESCAPE", DLHC.INVALID_STRING_ESCAPE)
final val Brackets = key("HOCON_BRACKETS", DLHC.BRACKETS)
final val Braces = key("HOCON_OBJECT_BRACES", DLHC.BRACES)
final val IncludeModifierParens = key("HOCON_INCLUDE_MODIFIER_PARENS", DLHC.PARENTHESES)
final val SubBraces = key("HOCON_SUBSTITUTION_BRACES", DLHC.BRACES)
final val PathValueSeparator = key("HOCON_PATH_VALUE_SEPARATOR", DLHC.OPERATION_SIGN)
final val Comma = key("HOCON_COMMA", DLHC.COMMA)
final val Include = key("HOCON_INCLUDE", DLHC.KEYWORD)
final val IncludeModifier = key("HOCON_INCLUDE_MODIFIER", HighlightInfoType.STATIC_METHOD.getAttributesKey)
final val SubstitutionSign = key("HOCON_SUBSTITUTION_SIGN", DLHC.OPERATION_SIGN)
final val OptionalSubstitutionSign = key("HOCON_OPTIONAL_SUBSTITUTION_SIGN", DLHC.OPERATION_SIGN)
final val UnquotedString = key("HOCON_UNQUOTED_STRING", DLHC.IDENTIFIER)
final val PathSeparator = key("PATH_SEPARATOR", DLHC.DOT)
final val FieldKey = key("FIELD_KEY", DLHC.INSTANCE_METHOD)
final val SubstitutionKey = key("SUBSTITUTION_KEY", DLHC.INSTANCE_FIELD)
private def key(name: String, prototype: TextAttributesKey) =
TextAttributesKey.createTextAttributesKey(name, prototype)
}
| consulo/consulo-scala | intellij-hocon/src/main/scala/intellijhocon/highlight/HoconHighlighterColors.scala | Scala | apache-2.0 | 2,193 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
/**
* A client that communicates with the cluster manager to request or kill executors.
* This is currently supported only in YARN mode.
*/
private[spark] trait ExecutorAllocationClient {
/** Get the list of currently active executors */
private[spark] def getExecutorIds(): Seq[String]
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
private[spark] def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: Map[String, Int]): Boolean
/**
* Request an additional number of executors from the cluster manager.
* @return whether the request is acknowledged by the cluster manager.
*/
def requestExecutors(numAdditionalExecutors: Int): Boolean
/**
* Request that the cluster manager kill the specified executors.
*
* When asking the executor to be replaced, the executor loss is considered a failure, and
* killed tasks that are running on the executor will count towards the failure limits. If no
* replacement is being requested, then the tasks will not count towards the limit.
*
* @param executorIds identifiers of executors to kill
* @param replace whether to replace the killed executors with new ones, default false
* @param force whether to force kill busy executors, default false
* @return the ids of the executors acknowledged by the cluster manager to be removed.
*/
def killExecutors(
executorIds: Seq[String],
replace: Boolean = false,
force: Boolean = false): Seq[String]
/**
* Request that the cluster manager kill every executor on the specified host.
* Results in a call to killExecutors for each executor on the host, with the replace
* and force arguments set to true.
* @return whether the request is acknowledged by the cluster manager.
*/
def killExecutorsOnHost(host: String): Boolean
/**
* Request that the cluster manager kill the specified executor.
* @return whether the request is acknowledged by the cluster manager.
*/
def killExecutor(executorId: String): Boolean = {
val killedExecutors = killExecutors(Seq(executorId))
killedExecutors.nonEmpty && killedExecutors(0).equals(executorId)
}
}
| jianran/spark | core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala | Scala | apache-2.0 | 3,966 |
package com.twitter.zipkin.storage.redis
import com.twitter.finagle.redis.Client
import com.twitter.util.{Duration, Future}
import org.jboss.netty.buffer.ChannelBuffer
trait ExpirationSupport {
val client: Client
/** Expires keys older than this many seconds. */
val defaultTtl: Option[Duration]
def expireOnTtl(redisKey: ChannelBuffer, ttl: Option[Duration] = defaultTtl): Future[Unit] = {
if (ttl.isDefined) client.expire(redisKey, ttl.get.inLongSeconds).unit else Future.Unit
}
}
| c3p0hz/zipkin | zipkin-redis/src/main/scala/com/twitter/zipkin/storage/redis/ExpirationSupport.scala | Scala | apache-2.0 | 501 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tree.impl
import scala.collection.mutable
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.classification.DecisionTreeClassificationModel
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.tree._
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.tree.{DecisionTreeSuite => OldDTSuite, EnsembleTestHelper}
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo, QuantileStrategy, Strategy => OldStrategy}
import org.apache.spark.mllib.tree.impurity.{Entropy, Gini, GiniCalculator, Variance}
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.util.collection.OpenHashMap
/**
* Test suite for [[RandomForest]].
*/
class RandomForestSuite extends SparkFunSuite with MLlibTestSparkContext {
import RandomForestSuite.mapToVec
/////////////////////////////////////////////////////////////////////////////
// Tests for split calculation
/////////////////////////////////////////////////////////////////////////////
test("Binary classification with continuous features: split calculation") {
val arr = OldDTSuite.generateOrderedLabeledPointsWithLabel1().map(_.asML)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, 3, 2, 100)
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(splits.length === 2)
assert(splits(0).length === 99)
}
test("Binary classification with binary (ordered) categorical features: split calculation") {
val arr = OldDTSuite.generateCategoricalDataPoints().map(_.asML)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, maxDepth = 2, numClasses = 2,
maxBins = 100, categoricalFeaturesInfo = Map(0 -> 2, 1 -> 2))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(!metadata.isUnordered(featureIndex = 0))
assert(!metadata.isUnordered(featureIndex = 1))
assert(splits.length === 2)
// no splits pre-computed for ordered categorical features
assert(splits(0).length === 0)
}
test("Binary classification with 3-ary (ordered) categorical features," +
" with no samples for one category: split calculation") {
val arr = OldDTSuite.generateCategoricalDataPoints().map(_.asML)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, maxDepth = 2, numClasses = 2,
maxBins = 100, categoricalFeaturesInfo = Map(0 -> 3, 1 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
assert(!metadata.isUnordered(featureIndex = 1))
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(splits.length === 2)
// no splits pre-computed for ordered categorical features
assert(splits(0).length === 0)
}
test("find splits for a continuous feature") {
// find splits for normal case
{
val fakeMetadata = new DecisionTreeMetadata(1, 0, 0, 0,
Map(), Set(),
Array(6), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0, 0
)
val featureSamples = Array.fill(200000)(math.random)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
assert(splits.length === 5)
assert(fakeMetadata.numSplits(0) === 5)
assert(fakeMetadata.numBins(0) === 6)
// check returned splits are distinct
assert(splits.distinct.length === splits.length)
}
// SPARK-16957: Use midpoints for split values.
{
val fakeMetadata = new DecisionTreeMetadata(1, 0, 0, 0,
Map(), Set(),
Array(3), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0, 0
)
// possibleSplits <= numSplits
{
val featureSamples = Array(0, 1, 0, 0, 1, 0, 1, 1).map(_.toDouble)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((0.0 + 1.0) / 2)
assert(splits === expectedSplits)
}
// possibleSplits > numSplits
{
val featureSamples = Array(0, 0, 1, 1, 2, 2, 3, 3).map(_.toDouble)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((0.0 + 1.0) / 2, (2.0 + 3.0) / 2)
assert(splits === expectedSplits)
}
}
// find splits should not return identical splits
// when there are not enough split candidates, reduce the number of splits in metadata
{
val fakeMetadata = new DecisionTreeMetadata(1, 0, 0, 0,
Map(), Set(),
Array(5), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0, 0
)
val featureSamples = Array(1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3).map(_.toDouble)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((1.0 + 2.0) / 2, (2.0 + 3.0) / 2)
assert(splits === expectedSplits)
// check returned splits are distinct
assert(splits.distinct.length === splits.length)
}
// find splits when most samples close to the minimum
{
val fakeMetadata = new DecisionTreeMetadata(1, 0, 0, 0,
Map(), Set(),
Array(3), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0, 0
)
val featureSamples = Array(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5)
.map(_.toDouble)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((2.0 + 3.0) / 2, (3.0 + 4.0) / 2)
assert(splits === expectedSplits)
}
// find splits when most samples close to the maximum
{
val fakeMetadata = new DecisionTreeMetadata(1, 0, 0, 0,
Map(), Set(),
Array(2), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0, 0
)
val featureSamples = Array(0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2).map(_.toDouble)
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
val expectedSplits = Array((1.0 + 2.0) / 2)
assert(splits === expectedSplits)
}
// find splits for constant feature
{
val fakeMetadata = new DecisionTreeMetadata(1, 0, 0, 0,
Map(), Set(),
Array(3), Gini, QuantileStrategy.Sort,
0, 0, 0.0, 0, 0
)
val featureSamples = Array(0, 0, 0).map(_.toDouble)
val featureSamplesEmpty = Array.empty[Double]
val splits = RandomForest.findSplitsForContinuousFeature(featureSamples, fakeMetadata, 0)
assert(splits === Array.empty[Double])
val splitsEmpty =
RandomForest.findSplitsForContinuousFeature(featureSamplesEmpty, fakeMetadata, 0)
assert(splitsEmpty === Array.empty[Double])
}
}
test("train with empty arrays") {
val lp = LabeledPoint(1.0, Vectors.dense(Array.empty[Double]))
val data = Array.fill(5)(lp)
val rdd = sc.parallelize(data)
val strategy = new OldStrategy(OldAlgo.Regression, Gini, maxDepth = 2,
maxBins = 5)
withClue("DecisionTree requires number of features > 0," +
" but was given an empty features vector") {
intercept[IllegalArgumentException] {
RandomForest.run(rdd, strategy, 1, "all", 42L, instr = None)
}
}
}
test("train with constant features") {
val lp = LabeledPoint(1.0, Vectors.dense(0.0, 0.0, 0.0))
val data = Array.fill(5)(lp)
val rdd = sc.parallelize(data)
val strategy = new OldStrategy(
OldAlgo.Classification,
Gini,
maxDepth = 2,
numClasses = 2,
maxBins = 5,
categoricalFeaturesInfo = Map(0 -> 1, 1 -> 5))
val Array(tree) = RandomForest.run(rdd, strategy, 1, "all", 42L, instr = None)
assert(tree.rootNode.impurity === -1.0)
assert(tree.depth === 0)
assert(tree.rootNode.prediction === lp.label)
// Test with no categorical features
val strategy2 = new OldStrategy(
OldAlgo.Regression,
Variance,
maxDepth = 2,
maxBins = 5)
val Array(tree2) = RandomForest.run(rdd, strategy2, 1, "all", 42L, instr = None)
assert(tree2.rootNode.impurity === -1.0)
assert(tree2.depth === 0)
assert(tree2.rootNode.prediction === lp.label)
}
test("Multiclass classification with unordered categorical features: split calculations") {
val arr = OldDTSuite.generateCategoricalDataPoints().map(_.asML)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(
OldAlgo.Classification,
Gini,
maxDepth = 2,
numClasses = 100,
maxBins = 100,
categoricalFeaturesInfo = Map(0 -> 3, 1 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(metadata.isUnordered(featureIndex = 0))
assert(metadata.isUnordered(featureIndex = 1))
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(splits.length === 2)
assert(splits(0).length === 3)
assert(metadata.numSplits(0) === 3)
assert(metadata.numBins(0) === 3)
assert(metadata.numSplits(1) === 3)
assert(metadata.numBins(1) === 3)
// Expecting 2^2 - 1 = 3 splits per feature
def checkCategoricalSplit(s: Split, featureIndex: Int, leftCategories: Array[Double]): Unit = {
assert(s.featureIndex === featureIndex)
assert(s.isInstanceOf[CategoricalSplit])
val s0 = s.asInstanceOf[CategoricalSplit]
assert(s0.leftCategories === leftCategories)
assert(s0.numCategories === 3) // for this unit test
}
// Feature 0
checkCategoricalSplit(splits(0)(0), 0, Array(0.0))
checkCategoricalSplit(splits(0)(1), 0, Array(1.0))
checkCategoricalSplit(splits(0)(2), 0, Array(0.0, 1.0))
// Feature 1
checkCategoricalSplit(splits(1)(0), 1, Array(0.0))
checkCategoricalSplit(splits(1)(1), 1, Array(1.0))
checkCategoricalSplit(splits(1)(2), 1, Array(0.0, 1.0))
}
test("Multiclass classification with ordered categorical features: split calculations") {
val arr = OldDTSuite.generateCategoricalDataPointsForMulticlassForOrderedFeatures().map(_.asML)
assert(arr.length === 3000)
val rdd = sc.parallelize(arr)
val strategy = new OldStrategy(OldAlgo.Classification, Gini, maxDepth = 2, numClasses = 100,
maxBins = 100, categoricalFeaturesInfo = Map(0 -> 10, 1 -> 10))
// 2^(10-1) - 1 > 100, so categorical features will be ordered
val metadata = DecisionTreeMetadata.buildMetadata(rdd, strategy)
assert(!metadata.isUnordered(featureIndex = 0))
assert(!metadata.isUnordered(featureIndex = 1))
val splits = RandomForest.findSplits(rdd, metadata, seed = 42)
assert(splits.length === 2)
// no splits pre-computed for ordered categorical features
assert(splits(0).length === 0)
}
/////////////////////////////////////////////////////////////////////////////
// Tests of other algorithm internals
/////////////////////////////////////////////////////////////////////////////
test("extract categories from a number for multiclass classification") {
val l = RandomForest.extractMultiClassCategories(13, 10)
assert(l.length === 3)
assert(Seq(3.0, 2.0, 0.0) === l)
}
test("Avoid aggregation on the last level") {
val arr = Array(
LabeledPoint(0.0, Vectors.dense(1.0, 0.0, 0.0)),
LabeledPoint(1.0, Vectors.dense(0.0, 1.0, 1.0)),
LabeledPoint(0.0, Vectors.dense(2.0, 0.0, 0.0)),
LabeledPoint(1.0, Vectors.dense(0.0, 2.0, 1.0)))
val input = sc.parallelize(arr)
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 1,
numClasses = 2, categoricalFeaturesInfo = Map(0 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(input, strategy)
val splits = RandomForest.findSplits(input, metadata, seed = 42)
val treeInput = TreePoint.convertToTreeRDD(input, splits, metadata)
val baggedInput = BaggedPoint.convertToBaggedRDD(treeInput, 1.0, 1, withReplacement = false)
val topNode = LearningNode.emptyNode(nodeIndex = 1)
assert(topNode.isLeaf === false)
assert(topNode.stats === null)
val nodesForGroup = Map((0, Array(topNode)))
val treeToNodeToIndexInfo = Map((0, Map(
(topNode.id, new RandomForest.NodeIndexInfo(0, None))
)))
val nodeStack = new mutable.Stack[(Int, LearningNode)]
RandomForest.findBestSplits(baggedInput, metadata, Map(0 -> topNode),
nodesForGroup, treeToNodeToIndexInfo, splits, nodeStack)
// don't enqueue leaf nodes into node queue
assert(nodeStack.isEmpty)
// set impurity and predict for topNode
assert(topNode.stats !== null)
assert(topNode.stats.impurity > 0.0)
// set impurity and predict for child nodes
assert(topNode.leftChild.get.toNode.prediction === 0.0)
assert(topNode.rightChild.get.toNode.prediction === 1.0)
assert(topNode.leftChild.get.stats.impurity === 0.0)
assert(topNode.rightChild.get.stats.impurity === 0.0)
}
test("Avoid aggregation if impurity is 0.0") {
val arr = Array(
LabeledPoint(0.0, Vectors.dense(1.0, 0.0, 0.0)),
LabeledPoint(1.0, Vectors.dense(0.0, 1.0, 1.0)),
LabeledPoint(0.0, Vectors.dense(2.0, 0.0, 0.0)),
LabeledPoint(1.0, Vectors.dense(0.0, 2.0, 1.0)))
val input = sc.parallelize(arr)
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 5,
numClasses = 2, categoricalFeaturesInfo = Map(0 -> 3))
val metadata = DecisionTreeMetadata.buildMetadata(input, strategy)
val splits = RandomForest.findSplits(input, metadata, seed = 42)
val treeInput = TreePoint.convertToTreeRDD(input, splits, metadata)
val baggedInput = BaggedPoint.convertToBaggedRDD(treeInput, 1.0, 1, withReplacement = false)
val topNode = LearningNode.emptyNode(nodeIndex = 1)
assert(topNode.isLeaf === false)
assert(topNode.stats === null)
val nodesForGroup = Map((0, Array(topNode)))
val treeToNodeToIndexInfo = Map((0, Map(
(topNode.id, new RandomForest.NodeIndexInfo(0, None))
)))
val nodeStack = new mutable.Stack[(Int, LearningNode)]
RandomForest.findBestSplits(baggedInput, metadata, Map(0 -> topNode),
nodesForGroup, treeToNodeToIndexInfo, splits, nodeStack)
// don't enqueue a node into node queue if its impurity is 0.0
assert(nodeStack.isEmpty)
// set impurity and predict for topNode
assert(topNode.stats !== null)
assert(topNode.stats.impurity > 0.0)
// set impurity and predict for child nodes
assert(topNode.leftChild.get.toNode.prediction === 0.0)
assert(topNode.rightChild.get.toNode.prediction === 1.0)
assert(topNode.leftChild.get.stats.impurity === 0.0)
assert(topNode.rightChild.get.stats.impurity === 0.0)
}
test("Use soft prediction for binary classification with ordered categorical features") {
// The following dataset is set up such that the best split is {1} vs. {0, 2}.
// If the hard prediction is used to order the categories, then {0} vs. {1, 2} is chosen.
val arr = Array(
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(0.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(1.0, Vectors.dense(2.0)))
val input = sc.parallelize(arr)
// Must set maxBins s.t. the feature will be treated as an ordered categorical feature.
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 1,
numClasses = 2, categoricalFeaturesInfo = Map(0 -> 3), maxBins = 3)
val model = RandomForest.run(input, strategy, numTrees = 1, featureSubsetStrategy = "all",
seed = 42, instr = None).head
model.rootNode match {
case n: InternalNode => n.split match {
case s: CategoricalSplit =>
assert(s.leftCategories === Array(1.0))
case _ => throw new AssertionError("model.rootNode.split was not a CategoricalSplit")
}
case _ => throw new AssertionError("model.rootNode was not an InternalNode")
}
}
test("Second level node building with vs. without groups") {
val arr = OldDTSuite.generateOrderedLabeledPoints().map(_.asML)
assert(arr.length === 1000)
val rdd = sc.parallelize(arr)
// For tree with 1 group
val strategy1 =
new OldStrategy(OldAlgo.Classification, Entropy, 3, 2, 100, maxMemoryInMB = 1000)
// For tree with multiple groups
val strategy2 =
new OldStrategy(OldAlgo.Classification, Entropy, 3, 2, 100, maxMemoryInMB = 0)
val tree1 = RandomForest.run(rdd, strategy1, numTrees = 1, featureSubsetStrategy = "all",
seed = 42, instr = None).head
val tree2 = RandomForest.run(rdd, strategy2, numTrees = 1, featureSubsetStrategy = "all",
seed = 42, instr = None).head
def getChildren(rootNode: Node): Array[InternalNode] = rootNode match {
case n: InternalNode =>
assert(n.leftChild.isInstanceOf[InternalNode])
assert(n.rightChild.isInstanceOf[InternalNode])
Array(n.leftChild.asInstanceOf[InternalNode], n.rightChild.asInstanceOf[InternalNode])
case _ => throw new AssertionError("rootNode was not an InternalNode")
}
// Single group second level tree construction.
val children1 = getChildren(tree1.rootNode)
val children2 = getChildren(tree2.rootNode)
// Verify whether the splits obtained using single group and multiple group level
// construction strategies are the same.
for (i <- 0 until 2) {
assert(children1(i).gain > 0)
assert(children2(i).gain > 0)
assert(children1(i).split === children2(i).split)
assert(children1(i).impurity === children2(i).impurity)
assert(children1(i).impurityStats.stats === children2(i).impurityStats.stats)
assert(children1(i).leftChild.impurity === children2(i).leftChild.impurity)
assert(children1(i).rightChild.impurity === children2(i).rightChild.impurity)
assert(children1(i).prediction === children2(i).prediction)
}
}
def binaryClassificationTestWithContinuousFeaturesAndSubsampledFeatures(strategy: OldStrategy) {
val numFeatures = 50
val arr = EnsembleTestHelper.generateOrderedLabeledPoints(numFeatures, 1000)
val rdd = sc.parallelize(arr).map(_.asML)
// Select feature subset for top nodes. Return true if OK.
def checkFeatureSubsetStrategy(
numTrees: Int,
featureSubsetStrategy: String,
numFeaturesPerNode: Int): Unit = {
val seeds = Array(123, 5354, 230, 349867, 23987)
val maxMemoryUsage: Long = 128 * 1024L * 1024L
val metadata =
DecisionTreeMetadata.buildMetadata(rdd, strategy, numTrees, featureSubsetStrategy)
seeds.foreach { seed =>
val failString = s"Failed on test with:" +
s"numTrees=$numTrees, featureSubsetStrategy=$featureSubsetStrategy," +
s" numFeaturesPerNode=$numFeaturesPerNode, seed=$seed"
val nodeStack = new mutable.Stack[(Int, LearningNode)]
val topNodes: Array[LearningNode] = new Array[LearningNode](numTrees)
Range(0, numTrees).foreach { treeIndex =>
topNodes(treeIndex) = LearningNode.emptyNode(nodeIndex = 1)
nodeStack.push((treeIndex, topNodes(treeIndex)))
}
val rng = new scala.util.Random(seed = seed)
val (nodesForGroup: Map[Int, Array[LearningNode]],
treeToNodeToIndexInfo: Map[Int, Map[Int, RandomForest.NodeIndexInfo]]) =
RandomForest.selectNodesToSplit(nodeStack, maxMemoryUsage, metadata, rng)
assert(nodesForGroup.size === numTrees, failString)
assert(nodesForGroup.values.forall(_.length == 1), failString) // 1 node per tree
if (numFeaturesPerNode == numFeatures) {
// featureSubset values should all be None
assert(treeToNodeToIndexInfo.values.forall(_.values.forall(_.featureSubset.isEmpty)),
failString)
} else {
// Check number of features.
assert(treeToNodeToIndexInfo.values.forall(_.values.forall(
_.featureSubset.get.length === numFeaturesPerNode)), failString)
}
}
}
checkFeatureSubsetStrategy(numTrees = 1, "auto", numFeatures)
checkFeatureSubsetStrategy(numTrees = 1, "all", numFeatures)
checkFeatureSubsetStrategy(numTrees = 1, "sqrt", math.sqrt(numFeatures).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 1, "log2",
(math.log(numFeatures) / math.log(2)).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 1, "onethird", (numFeatures / 3.0).ceil.toInt)
val realStrategies = Array(".1", ".10", "0.10", "0.1", "0.9", "1.0")
for (strategy <- realStrategies) {
val expected = (strategy.toDouble * numFeatures).ceil.toInt
checkFeatureSubsetStrategy(numTrees = 1, strategy, expected)
}
val integerStrategies = Array("1", "10", "100", "1000", "10000")
for (strategy <- integerStrategies) {
val expected = if (strategy.toInt < numFeatures) strategy.toInt else numFeatures
checkFeatureSubsetStrategy(numTrees = 1, strategy, expected)
}
val invalidStrategies = Array("-.1", "-.10", "-0.10", ".0", "0.0", "1.1", "0")
for (invalidStrategy <- invalidStrategies) {
intercept[IllegalArgumentException]{
val metadata =
DecisionTreeMetadata.buildMetadata(rdd, strategy, numTrees = 1, invalidStrategy)
}
}
checkFeatureSubsetStrategy(numTrees = 2, "all", numFeatures)
checkFeatureSubsetStrategy(numTrees = 2, "auto", math.sqrt(numFeatures).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 2, "sqrt", math.sqrt(numFeatures).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 2, "log2",
(math.log(numFeatures) / math.log(2)).ceil.toInt)
checkFeatureSubsetStrategy(numTrees = 2, "onethird", (numFeatures / 3.0).ceil.toInt)
for (strategy <- realStrategies) {
val expected = (strategy.toDouble * numFeatures).ceil.toInt
checkFeatureSubsetStrategy(numTrees = 2, strategy, expected)
}
for (strategy <- integerStrategies) {
val expected = if (strategy.toInt < numFeatures) strategy.toInt else numFeatures
checkFeatureSubsetStrategy(numTrees = 2, strategy, expected)
}
for (invalidStrategy <- invalidStrategies) {
intercept[IllegalArgumentException]{
val metadata =
DecisionTreeMetadata.buildMetadata(rdd, strategy, numTrees = 2, invalidStrategy)
}
}
}
test("Binary classification with continuous features: subsampling features") {
val categoricalFeaturesInfo = Map.empty[Int, Int]
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 2,
numClasses = 2, categoricalFeaturesInfo = categoricalFeaturesInfo)
binaryClassificationTestWithContinuousFeaturesAndSubsampledFeatures(strategy)
}
test("Binary classification with continuous features and node Id cache: subsampling features") {
val categoricalFeaturesInfo = Map.empty[Int, Int]
val strategy = new OldStrategy(algo = OldAlgo.Classification, impurity = Gini, maxDepth = 2,
numClasses = 2, categoricalFeaturesInfo = categoricalFeaturesInfo,
useNodeIdCache = true)
binaryClassificationTestWithContinuousFeaturesAndSubsampledFeatures(strategy)
}
test("computeFeatureImportance, featureImportances") {
/* Build tree for testing, with this structure:
grandParent
left2 parent
left right
*/
val leftImp = new GiniCalculator(Array(3.0, 2.0, 1.0))
val left = new LeafNode(0.0, leftImp.calculate(), leftImp)
val rightImp = new GiniCalculator(Array(1.0, 2.0, 5.0))
val right = new LeafNode(2.0, rightImp.calculate(), rightImp)
val parent = TreeTests.buildParentNode(left, right, new ContinuousSplit(0, 0.5))
val parentImp = parent.impurityStats
val left2Imp = new GiniCalculator(Array(1.0, 6.0, 1.0))
val left2 = new LeafNode(0.0, left2Imp.calculate(), left2Imp)
val grandParent = TreeTests.buildParentNode(left2, parent, new ContinuousSplit(1, 1.0))
val grandImp = grandParent.impurityStats
// Test feature importance computed at different subtrees.
def testNode(node: Node, expected: Map[Int, Double]): Unit = {
val map = new OpenHashMap[Int, Double]()
TreeEnsembleModel.computeFeatureImportance(node, map)
assert(mapToVec(map.toMap) ~== mapToVec(expected) relTol 0.01)
}
// Leaf node
testNode(left, Map.empty[Int, Double])
// Internal node with 2 leaf children
val feature0importance = parentImp.calculate() * parentImp.count -
(leftImp.calculate() * leftImp.count + rightImp.calculate() * rightImp.count)
testNode(parent, Map(0 -> feature0importance))
// Full tree
val feature1importance = grandImp.calculate() * grandImp.count -
(left2Imp.calculate() * left2Imp.count + parentImp.calculate() * parentImp.count)
testNode(grandParent, Map(0 -> feature0importance, 1 -> feature1importance))
// Forest consisting of (full tree) + (internal node with 2 leafs)
val trees = Array(parent, grandParent).map { root =>
new DecisionTreeClassificationModel(root, numFeatures = 2, numClasses = 3)
.asInstanceOf[DecisionTreeModel]
}
val importances: Vector = TreeEnsembleModel.featureImportances(trees, 2)
val tree2norm = feature0importance + feature1importance
val expected = Vectors.dense((1.0 + feature0importance / tree2norm) / 2.0,
(feature1importance / tree2norm) / 2.0)
assert(importances ~== expected relTol 0.01)
}
test("normalizeMapValues") {
val map = new OpenHashMap[Int, Double]()
map(0) = 1.0
map(2) = 2.0
TreeEnsembleModel.normalizeMapValues(map)
val expected = Map(0 -> 1.0 / 3.0, 2 -> 2.0 / 3.0)
assert(mapToVec(map.toMap) ~== mapToVec(expected) relTol 0.01)
}
}
private object RandomForestSuite {
def mapToVec(map: Map[Int, Double]): Vector = {
val size = (map.keys.toSeq :+ 0).max + 1
val (indices, values) = map.toSeq.sortBy(_._1).unzip
Vectors.sparse(size, indices.toArray, values.toArray)
}
}
| aokolnychyi/spark | mllib/src/test/scala/org/apache/spark/ml/tree/impl/RandomForestSuite.scala | Scala | apache-2.0 | 27,614 |
package com.github.sekruse.wordcount.scala
import org.qcri.rheem.api._
import org.qcri.rheem.core.api.{Configuration, RheemContext}
import org.qcri.rheem.core.optimizer.ProbabilisticDoubleInterval
import org.qcri.rheem.core.plugin.Plugin
import org.qcri.rheem.java.Java
import org.qcri.rheem.spark.Spark
/**
* This is app counts words in a file using Rheem via its Scala API.
*/
class WordCount(configuration: Configuration, plugin: Plugin*) {
/**
* Run the word count over a given file.
*
* @param inputUrl URL to the file
* @param wordsPerLine optional estimate of how many words there are in each line
* @return the counted words
*/
def apply(inputUrl: String,
wordsPerLine: ProbabilisticDoubleInterval = new ProbabilisticDoubleInterval(100, 10000, .8d)) = {
val rheemCtx = new RheemContext(configuration)
plugin.foreach(rheemCtx.register)
val planBuilder = new PlanBuilder(rheemCtx)
planBuilder
// Do some set up.
.withJobName(s"WordCount ($inputUrl)")
.withUdfJarsOf(this.getClass)
// Read the text file.
.readTextFile(inputUrl).withName("Load file")
// Split each line by non-word characters.
.flatMap(_.split("\\\\W+"), selectivity = wordsPerLine).withName("Split words")
// Filter empty tokens.
.filter(_.nonEmpty, selectivity = 0.99).withName("Filter empty words")
// Attach counter to each word.
.map(word => (word.toLowerCase, 1)).withName("To lower case, add counter")
// Sum up counters for every word.
.reduceByKey(_._1, (c1, c2) => (c1._1, c1._2 + c2._2)).withName("Add counters")
.withCardinalityEstimator((in: Long) => math.round(in * 0.01))
// Execute the plan and collect the results.
.collect()
}
}
/**
* Companion object for [[WordCount]].
*/
object WordCount {
def main(args: Array[String]) {
// Parse args.
if (args.isEmpty) {
println("Usage: <main class> <plugin(,plugin)*> <input file> [<words per line a..b>]")
sys.exit(1)
}
val plugins = parsePlugins(args(0))
val inputFile = args(1)
val wordsPerLine = if (args.length >= 3) parseWordsPerLine(args(2)) else null
// Set up our wordcount app.
val configuration = new Configuration
val wordCount = new WordCount(configuration, plugins: _*)
// Run the wordcount.
val words = wordCount(inputFile, wordsPerLine)
// Print results.
println(s"Found ${words.size} words:")
words.take(10).foreach(wc => println(s"${wc._2}x ${wc._1}"))
if (words.size > 10) println(s"${words.size - 10} more...")
}
/**
* Parse a comma-separated list of plugins.
*
* @param arg the list
* @return the [[Plugin]]s
*/
def parsePlugins(arg: String) = arg.split(",").map {
case "java" => Java.basicPlugin
case "spark" => Spark.basicPlugin
case other: String => sys.error(s"Unknown plugin: $other")
}
/**
* Parse an interval string shaped as `<lower>..<upper>`.
*
* @param arg the string
* @return the interval
*/
def parseWordsPerLine(arg: String): ProbabilisticDoubleInterval = {
val Array(low, high) = arg.split("""\\.\\.""").map(_.toDouble)
new ProbabilisticDoubleInterval(low, high, 0.8)
}
}
| sekruse/rheem-examples | wordcount/src/main/scala/com/github/sekruse/wordcount/scala/WordCount.scala | Scala | apache-2.0 | 3,269 |
package org.vaadin.addons.rinne
import com.vaadin.ui.{Component, CssLayout}
import org.vaadin.addons.rinne.mixins.{LayoutClickNotifierMixin, AbstractLayoutMixin}
class VCssLayout extends CssLayout with AbstractLayoutMixin with LayoutClickNotifierMixin {
private var _cssMap = Map[Component, String]()
def add[C <: Component](component: C, css: => String = null): C = {
add(component)
if (css != null) {
_cssMap += component -> css
}
component
}
def getCssForComponent(component: Component): Option[String] = _cssMap.get(component)
override def removeComponent(component: Component) = {
super.removeComponent(component)
_cssMap -= component
}
}
| LukaszByczynski/rinne | src/main/scala/org/vaadin/addons/rinne/VCssLayout.scala | Scala | apache-2.0 | 692 |
package com.geishatokyo.diffsql.parser
import scala.util.parsing.combinator.RegexParsers
/**
* Created by takezoux2 on 15/07/02.
*/
trait SkippingParsers { self: RegexParsers =>
/**
* 先頭のWhiteSpaceを飛ばす機能を追加
*/
trait SkipHeadWhiteSpace extends Parser[String] {
abstract override def apply(in: Input) = {
val s = handleWhiteSpace(in.source,in.offset)
super.apply(in.drop(s - in.offset))
}
}
/**
* 対応するリテラルまでをパースする
* リテラルのネスト、エスケープ、内部での追加リテラルをサポート
* @param openLiteral
* @param closeLiteral
* @param escapeChar
* @param escapeBlocks
*/
case class PairLiteralSkipParser(
openLiteral : String,
closeLiteral : String,
escapeChar: String = "",
escapeBlocks: List[PairLiteralSkipParser] = Nil) extends Parser[String] {
assert(openLiteral != null)
assert(openLiteral.length > 0)
assert(closeLiteral != null)
assert(closeLiteral.length > 0)
assert(escapeChar != null)
override def apply(in: Input): ParseResult[String] = {
val startStr = in.source.subSequence(in.offset,in.offset + openLiteral.length)
if(startStr != openLiteral){
return Failure(s"Not start with open literal:${openLiteral}",in)
}
val s = in.source.toString
var openLiteralCount = 1
var i = in.offset + openLiteral.length
val builder = new StringBuilder
while(i < s.length && openLiteralCount > 0) {
lazy val escape_? = s.substring(i,i + escapeChar.length)
if(escapeChar.length > 0 && escape_? == escapeChar){
builder.append(s.substring(i,i + escapeChar.length + 1))
i += escapeChar.length + 1
}else{
var processed = false
val close_? = s.substring(i,i + closeLiteral.length)
if(close_? == closeLiteral) {
processed = true
openLiteralCount -= 1
}else {
val open_? = s.substring(i, i + openLiteral.length)
if (open_? == openLiteral) {
processed = true
openLiteralCount += 1
}
}
if(!processed && escapeBlocks.size > 0){
val in2 = in.drop(i - in.offset)
escapeBlocks.view.map(p => (p,p.apply(in2))).find(_._2.successful) match{
case Some((p,Success(s,lest))) => {
builder.append(p.openLiteral + s + p.closeLiteral)
i = lest.offset
}
case _ => {
builder.append(s.charAt(i))
i += 1
}
}
}else{
if (openLiteralCount > 0) {
builder.append(s.charAt(i))
}
i += 1
}
}
}
if(openLiteralCount == 0){
Success(builder.toString(),in.drop(i - in.offset))
}else{
Failure(s"Close literal not found:${closeLiteral}",in)
}
}
}
}
| geishatokyo/diff-sql-table | parser/src/main/scala/com/geishatokyo/diffsql/parser/SkippingParsers.scala | Scala | mit | 3,024 |
/**
* Copyright (C) 2014 Kaj Magnus Lindberg (born 1979)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package debiki
import com.debiki.core._
import com.debiki.core.Prelude._
/** Provides default values for special content pages, e.g. a default website
* Content License, or a default Jurisdiction section, for the Terms of Use page.
*
* These special content pages have special ids so they can be looked up
* easily. The ids starts with "_" to indicate that they're special page ids.
* E.g. "_tou_content_license".
*/
object SpecialContentPages {
case class Content(text: String)
/** A magic string that means the default contents is to be used. */
val UseDefaultContentMark = "__use_default__"
def lookup(pageId: PageId): Option[Content] = Some(pageId match {
case StylesheetId => Content("")
case TermsOfUseContentLicenseId => TermsOfUseContentLicense
case TermsOfUseJurisdictionId => TermsOfUseJurisdiction
case x => return None
})
val StylesheetId = "_stylesheet"
val TermsOfUseContentLicenseId = "_tou_content_license"
val TermsOfUseContentLicense = Content(
text = o"""
User contributions are licensed under a
<a href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons
Attribution-NonCommercial-ShareAlike 4.0 International License</a>.""")
val TermsOfUseJurisdictionId = "_tou_jurisdiction"
/** I don't know if this makes sense or is legally enforceable, but,
* since forum owners might live just anywhere, this feels better
* than using WordPress'default license text that stipulates that the site
* be governed by the laws of California and that disputes be resolved in
* San Francisco.
*/
val TermsOfUseJurisdiction = Content(
text = o"""
Except to the extent applicable law, if any, provides otherwise,
this Agreement, any access to or use of the Website will be governed by
the laws of the country and state where the owners of %{company_short_name} live,
and the proper venue for any disputes arising out of or relating to any of the same
will be the federal courts (or courts, if there are no federal courts) located
in the city where they live, or closest to where they live if there are no courts
in that city.
The language used shall be the English language or the native language
of the owners of %{company_short_name}.""")
}
| debiki/debiki-server-old | app/debiki/SpecialContentPages.scala | Scala | agpl-3.0 | 3,062 |
package moulder
import moulder.values._
import org.specs2.mutable.Specification
import org.jsoup.Jsoup
import java.lang.String
class MoulderUseCases extends Specification {
import Values._
import Moulds._
"A complex use case" in {
val document = Jsoup.parse("<html><body><h1>[...]</h1></body></html>")
val s = MoulderShop()
val items = "Spring" :: "Summer" :: "Autumn" :: "Winter" :: Nil
s.register("h1",
repeat(items)
:: attr("class", SeqValue("even" :: "odd" :: Nil).cycle)
:: text(SeqValue(items))
:: append(html(transform(SeqValue(items), (c: String) => "<p>" + c + "</p>")))
:: Nil)
s.process(document)
println(document)
success
}
"Usage for the blog" in {
object TaskType extends Enumeration {
type TaskType = Value
val BUG, ENHANCEMENT, NEW_FEATURE = Value
}
object TaskStatus extends Enumeration {
type TaskStatus = Value
val OPEN, CLOSED = Value
}
import TaskType._
import TaskStatus._
case class Task(val title: String, val description: String, val typ: TaskType, val status: TaskStatus, val urgent: Boolean)
val tasks = Task("Fix the bug", "bug", BUG, OPEN, true) ::
Task("Enhance the thing", "enhance", ENHANCEMENT, CLOSED, false) ::
Task("Add dat", "add", NEW_FEATURE, OPEN, false) ::
Nil
val m = MoulderShop()
m.register("#tasks li", repeat(tasks),
attr("class", transform(SeqValue(tasks), (t: Task) => if (t.status == CLOSED) "closed" else "")),
sub().register("span", remove(transform(SeqValue(tasks), (t: Task) => !t.urgent))),
sub().register("img", attr("src", transform(SeqValue(tasks), (t: Task) => "/images/" + (t.typ match {
case BUG => "circle_red.png"
case ENHANCEMENT => "circle_green.png"
case _ => "circle_blue.png"
})))),
sub().register("h2", text(transform(SeqValue(tasks), (t: Task) => t.title))),
sub().register("p", text(transform(SeqValue(tasks), (t: Task) => t.description))))
val doc = m.process(classOf[MoulderUseCases].getResourceAsStream("tasks.html"));
println(doc)
success
}
}
| jawher/moulder-s | src/test/scala/moulder/UseCases.scala | Scala | mit | 2,156 |
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.apache.spark.rdd.cl.tests
import java.util.LinkedList
import com.amd.aparapi.internal.writer.ScalaArrayParameter
import com.amd.aparapi.internal.model.Tuple2ClassModel
import org.apache.spark.rdd.cl.SyncCodeGenTest
import org.apache.spark.rdd.cl.CodeGenTest
import org.apache.spark.rdd.cl.CodeGenTests
import org.apache.spark.rdd.cl.CodeGenUtil
import com.amd.aparapi.internal.model.ClassModel
import com.amd.aparapi.internal.model.HardCodedClassModels
object Tuple2ObjectInputDirectTest extends SyncCodeGenTest[(Int, Point), Float] {
def getExpectedException() : String = { return null }
def getExpectedKernel() : String = { getExpectedKernelHelper(getClass) }
def getExpectedNumInputs() : Int = {
1
}
def init() : HardCodedClassModels = {
val inputClassType1Name = CodeGenUtil.cleanClassName("I")
val inputClassType2Name = CodeGenUtil.cleanClassName("org.apache.spark.rdd.cl.tests.Point")
val tuple2ClassModel : Tuple2ClassModel = Tuple2ClassModel.create(
inputClassType1Name, inputClassType2Name, false)
val models = new HardCodedClassModels()
models.addClassModelFor(classOf[Tuple2[_, _]], tuple2ClassModel)
models
}
def complete(params : LinkedList[ScalaArrayParameter]) {
params.get(0).addTypeParameter("I", false)
params.get(0).addTypeParameter("Lorg.apache.spark.rdd.cl.tests.Point;", true)
}
def getFunction() : Function1[(Int, Point), Float] = {
new Function[(Int, Point), Float] {
override def apply(in : (Int, Point)) : Float = {
in._2.x + in._2.y + in._2.z
}
}
}
}
| agrippa/spark-swat | swat/src/test/scala/org/apache/spark/rdd/cl/tests/Tuple2ObjectInputDirectTest.scala | Scala | bsd-3-clause | 3,092 |
package mesosphere.marathon
package integration.raml18
import mesosphere.marathon.raml._
// format: OFF
/**
* ================================= NOTE =================================
* This is a copy of [[mesosphere.marathon.raml.PodInstanceStatus]] class that doesn't the`role` field.
* This is ONLY used in UpgradeIntegrationTest where we query old Marathon instances.
* ========================================================================
*
* @param id Unique ID of this pod instance in the cluster.
* TODO(jdef) Probably represents the Mesos executor ID.
* @param statusSince Time at which the status code was last modified.
* @param message Human-friendly explanation for reason of the current status.
* @param conditions Set of status conditions that apply to this pod instance.
* @param agentHostname Hostname that this instance was launched on.
* May be an IP address if the agent was configured to advertise its hostname that way.
* @param agentId The Mesos-generated ID of the agent upon which the instance was launched.
* @param agentRegion The @region property of the agent.
* @param agentZone The @zone property of the agent.
* @param resources Sum of all resources allocated for this pod instance.
* May include additional, system-allocated resources for the default executor.
* @param networks Status of the networks to which this instance is attached.
* @param containers status for each running container of this instance.
* @param specReference Location of the version of the pod specification this instance was created from.
* maxLength: 1024
* minLength: 1
* @param lastUpdated Time that this status was last checked and updated (even if nothing changed)
* @param lastChanged Time that this status was last modified (some aspect of status did change)
*/
case class PodInstanceStatus18(id: String, status: PodInstanceState, statusSince: java.time.OffsetDateTime, message: Option[String] = None, conditions: scala.collection.immutable.Seq[StatusCondition] = Nil, agentHostname: Option[String] = None, agentId: Option[String] = None, agentRegion: Option[String] = None, agentZone: Option[String] = None, resources: Option[Resources] = None, networks: scala.collection.immutable.Seq[NetworkStatus] = Nil, containers: scala.collection.immutable.Seq[ContainerStatus] = Nil, specReference: Option[String] = None, localVolumes: scala.collection.immutable.Seq[LocalVolumeId] = Nil, lastUpdated: java.time.OffsetDateTime, lastChanged: java.time.OffsetDateTime) extends RamlGenerated
object PodInstanceStatus18 {
import play.api.libs.json.Reads._
import play.api.libs.functional.syntax._
implicit object playJsonFormat extends play.api.libs.json.Format[PodInstanceStatus18] {
def reads(json: play.api.libs.json.JsValue): play.api.libs.json.JsResult[PodInstanceStatus18] = {
val id = json.\\("id").validate[String](play.api.libs.json.JsPath.read[String])
val status = json.\\("status").validate[PodInstanceState](play.api.libs.json.JsPath.read[PodInstanceState])
val statusSince = json.\\("statusSince").validate[java.time.OffsetDateTime](play.api.libs.json.JsPath.read[java.time.OffsetDateTime])
val message = json.\\("message").validateOpt[String](play.api.libs.json.JsPath.read[String])
val conditions = json.\\("conditions").validateOpt[scala.collection.immutable.Seq[StatusCondition]](play.api.libs.json.JsPath.read[scala.collection.immutable.Seq[StatusCondition]]).map(_.getOrElse(scala.collection.immutable.Seq[StatusCondition]()))
val agentHostname = json.\\("agentHostname").validateOpt[String](play.api.libs.json.JsPath.read[String])
val agentId = json.\\("agentId").validateOpt[String](play.api.libs.json.JsPath.read[String])
val agentRegion = json.\\("agentRegion").validateOpt[String](play.api.libs.json.JsPath.read[String])
val agentZone = json.\\("agentZone").validateOpt[String](play.api.libs.json.JsPath.read[String])
val resources = json.\\("resources").validateOpt[Resources](play.api.libs.json.JsPath.read[Resources])
val networks = json.\\("networks").validateOpt[scala.collection.immutable.Seq[NetworkStatus]](play.api.libs.json.JsPath.read[scala.collection.immutable.Seq[NetworkStatus]]).map(_.getOrElse(scala.collection.immutable.Seq[NetworkStatus]()))
val containers = json.\\("containers").validateOpt[scala.collection.immutable.Seq[ContainerStatus]](play.api.libs.json.JsPath.read[scala.collection.immutable.Seq[ContainerStatus]]).map(_.getOrElse(scala.collection.immutable.Seq[ContainerStatus]()))
val specReference = json.\\("specReference").validateOpt[String](play.api.libs.json.JsPath.read[String](maxLength[String](1024) keepAnd minLength[String](1)))
val localVolumes = json.\\("localVolumes").validateOpt[scala.collection.immutable.Seq[LocalVolumeId]](play.api.libs.json.JsPath.read[scala.collection.immutable.Seq[LocalVolumeId]]).map(_.getOrElse(scala.collection.immutable.Seq[LocalVolumeId]()))
val lastUpdated = json.\\("lastUpdated").validate[java.time.OffsetDateTime](play.api.libs.json.JsPath.read[java.time.OffsetDateTime])
val lastChanged = json.\\("lastChanged").validate[java.time.OffsetDateTime](play.api.libs.json.JsPath.read[java.time.OffsetDateTime])
val _errors = Seq(("id", id), ("status", status), ("statusSince", statusSince), ("message", message), ("conditions", conditions), ("agentHostname", agentHostname), ("agentId", agentId), ("agentRegion", agentRegion), ("agentZone", agentZone), ("resources", resources), ("networks", networks), ("containers", containers), ("specReference", specReference), ("localVolumes", localVolumes), ("lastUpdated", lastUpdated), ("lastChanged", lastChanged)).collect({
case (field, e:play.api.libs.json.JsError) => e.repath(play.api.libs.json.JsPath.\\(field)).asInstanceOf[play.api.libs.json.JsError]
})
if (_errors.nonEmpty) _errors.reduceOption[play.api.libs.json.JsError](_.++(_)).getOrElse(_errors.head)
else play.api.libs.json.JsSuccess(PodInstanceStatus18(id = id.get, status = status.get, statusSince = statusSince.get, message = message.get, conditions = conditions.get, agentHostname = agentHostname.get, agentId = agentId.get, agentRegion = agentRegion.get, agentZone = agentZone.get, resources = resources.get, networks = networks.get, containers = containers.get, specReference = specReference.get, localVolumes = localVolumes.get, lastUpdated = lastUpdated.get, lastChanged = lastChanged.get))
}
def writes(o: PodInstanceStatus18): play.api.libs.json.JsValue = {
val id = play.api.libs.json.Json.toJson(o.id)
val status = play.api.libs.json.Json.toJson(o.status)
val statusSince = play.api.libs.json.Json.toJson(o.statusSince)
val message = play.api.libs.json.Json.toJson(o.message)
val conditions = play.api.libs.json.Json.toJson(o.conditions)
val agentHostname = play.api.libs.json.Json.toJson(o.agentHostname)
val agentId = play.api.libs.json.Json.toJson(o.agentId)
val agentRegion = play.api.libs.json.Json.toJson(o.agentRegion)
val agentZone = play.api.libs.json.Json.toJson(o.agentZone)
val resources = play.api.libs.json.Json.toJson(o.resources)
val networks = play.api.libs.json.Json.toJson(o.networks)
val containers = play.api.libs.json.Json.toJson(o.containers)
val specReference = play.api.libs.json.Json.toJson(o.specReference)
val localVolumes = play.api.libs.json.Json.toJson(o.localVolumes)
val lastUpdated = play.api.libs.json.Json.toJson(o.lastUpdated)
val lastChanged = play.api.libs.json.Json.toJson(o.lastChanged)
play.api.libs.json.JsObject(Seq(("id", id), ("status", status), ("statusSince", statusSince), ("message", message), ("conditions", conditions), ("agentHostname", agentHostname), ("agentId", agentId), ("agentRegion", agentRegion), ("agentZone", agentZone), ("resources", resources), ("networks", networks), ("containers", containers), ("specReference", specReference), ("localVolumes", localVolumes), ("lastUpdated", lastUpdated), ("lastChanged", lastChanged)).filter(_._2 != play.api.libs.json.JsNull).++(Seq.empty))
}
}
val DefaultMessage: Option[String] = None
val DefaultConditions: scala.collection.immutable.Seq[StatusCondition] = Nil
val DefaultAgentHostname: Option[String] = None
val DefaultAgentId: Option[String] = None
val DefaultAgentRegion: Option[String] = None
val DefaultAgentZone: Option[String] = None
val DefaultResources: Option[Resources] = None
val DefaultNetworks: scala.collection.immutable.Seq[NetworkStatus] = Nil
val DefaultContainers: scala.collection.immutable.Seq[ContainerStatus] = Nil
val DefaultSpecReference: Option[String] = None
val DefaultLocalVolumes: scala.collection.immutable.Seq[LocalVolumeId] = Nil
val ConstraintSpecreferenceMaxlength = 1024
val ConstraintSpecreferenceMinlength = 1
}
| mesosphere/marathon | tests/integration/src/test/scala/mesosphere/marathon/integration/raml18/PodInstanceStatus18.scala | Scala | apache-2.0 | 8,903 |
class W[T <: AnyRef](val t: T) {
val v: T {} = t
}
object W {
def apply[T <: AnyRef](t: T) = new W[t.type](t)
}
object RightAssoc {
def ra_:[T](t: T): Unit = ()
}
object Boom {
W("fooo").v ra_: RightAssoc
}
| loskutov/intellij-scala | testdata/scalacTests/pos/shapeless-regression.scala | Scala | apache-2.0 | 219 |
package me.dribba
import akka.actor.{Actor, ActorLogging, Props}
import akka.pattern.after
import scala.concurrent.Future
import scala.concurrent.duration._
import com.pi4j.io.gpio.GpioController
import com.pi4j.io.gpio.GpioFactory
import com.pi4j.io.gpio.GpioPinDigitalOutput
import com.pi4j.io.gpio.PinState
import com.pi4j.io.gpio.RaspiPin
class Relay1Actor extends Actor with ActorLogging {
import Relay1Actor._
implicit val _ = context.dispatcher
val gpio = GpioFactory.getInstance()
val pin = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_00, "relay", PinState.LOW)
def receive = {
case On =>
pin.toggle()
after(1 seconds, context.system.scheduler)(Future.successful(self ! Off))
case m: RelayState =>
pin.toggle()
after(5 seconds, context.system.scheduler)(Future.successful(self ! m.toggle))
}
}
object Relay1Actor {
val props = Props[Relay1Actor]
trait RelayState {
def toggle: RelayState
}
case object On extends RelayState {
def toggle = Off
}
case object Off extends RelayState {
def toggle = On
}
} | dribba/akkaponics | src/main/scala/me/dribba/Relay1Actor.scala | Scala | mit | 1,091 |
/*
* Copyright 2016 Guy Van den Broeck and Wannes Meert (UCLA and KU Leuven)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.ucla.cs.starai.forclift.examples.models
import org.junit.runner.RunWith
import org.scalatest.Finders
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TestFriendsSmokerDrinker1 extends ModelBehaviours {
describe("FriendsSmokerDrinkerModel of size 2") {
val correctLogWMC = 6.919575403176769 +- 0.00001
val model = new FriendsSmokerDrinkerModel(2)
it should behave like verySmallModel(model, correctLogWMC)
}
describe("FriendsSmokerDrinkerModel of size 8") {
val correctLogWMC = 76.44237474352845 +- 0.00001
val model = new FriendsSmokerDrinkerModel(8)
it should behave like smallModel(model, correctLogWMC)
}
describe("FriendsSmokerDrinkerModel of size 200") {
val correctLogWMC = 42312.99807235724 +- 0.00001
val model = new FriendsSmokerDrinkerModel(200)
it should behave like slowBigModel(model, correctLogWMC)
}
}
@RunWith(classOf[JUnitRunner])
class TestFriendsSmokerDrinkerWithEvidence extends ModelBehaviours {
describe("FriendsSmokerDrinkerModel of size 2 with evidence") {
val correctLogWMC = 4.881748742732568 +- 0.00001
val model = new FriendsSmokerDrinkerModel(2, List("guy", "luc"), List("drinks(guy)", "drinks(luc)", "smokes(luc)"))
it should behave like verySmallModel(model, correctLogWMC)
}
describe("FriendsSmokerDrinkerModel of size 8 with evidence") {
val correctLogWMC = 74.4227005248472 +- 0.00001
val model = new FriendsSmokerDrinkerModel(8, List("guy", "luc"), List("drinks(guy)", "drinks(luc)", "smokes(luc)"))
it should behave like smallModel(model, correctLogWMC)
}
describe("FriendsSmokerDrinkerModel of size 100 with evidence") {
val correctLogWMC = 10577.938944961066 +- 0.00001
val model = new FriendsSmokerDrinkerModel(100, List("guy", "luc"), List("drinks(guy)", "drinks(luc)", "smokes(luc)"))
it should behave like bigModel(model, correctLogWMC)
}
}
| UCLA-StarAI/Forclift | src/test/scala/edu/ucla/cs/starai/forclift/models/TestFriendsSmokerDrinker.scala | Scala | apache-2.0 | 2,569 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.csv
import java.math.BigDecimal
import scala.util.control.Exception.allCatch
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis.TypeCoercion
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
object CSVInferSchema {
/**
* Similar to the JSON schema inference
* 1. Infer type of each row
* 2. Merge row types to find common type
* 3. Replace any null types with string type
*/
def infer(
tokenRDD: RDD[Array[String]],
header: Array[String],
options: CSVOptions): StructType = {
val fields = if (options.inferSchemaFlag) {
val startType: Array[DataType] = Array.fill[DataType](header.length)(NullType)
val rootTypes: Array[DataType] =
tokenRDD.aggregate(startType)(inferRowType(options), mergeRowTypes)
toStructFields(rootTypes, header, options)
} else {
// By default fields are assumed to be StringType
header.map(fieldName => StructField(fieldName, StringType, nullable = true))
}
StructType(fields)
}
def toStructFields(
fieldTypes: Array[DataType],
header: Array[String],
options: CSVOptions): Array[StructField] = {
header.zip(fieldTypes).map { case (thisHeader, rootType) =>
val dType = rootType match {
case _: NullType => StringType
case other => other
}
StructField(thisHeader, dType, nullable = true)
}
}
def inferRowType(options: CSVOptions)
(rowSoFar: Array[DataType], next: Array[String]): Array[DataType] = {
var i = 0
while (i < math.min(rowSoFar.length, next.length)) { // May have columns on right missing.
rowSoFar(i) = inferField(rowSoFar(i), next(i), options)
i+=1
}
rowSoFar
}
def mergeRowTypes(first: Array[DataType], second: Array[DataType]): Array[DataType] = {
first.zipAll(second, NullType, NullType).map { case (a, b) =>
compatibleType(a, b).getOrElse(NullType)
}
}
/**
* Infer type of string field. Given known type Double, and a string "1", there is no
* point checking if it is an Int, as the final type must be Double or higher.
*/
def inferField(typeSoFar: DataType, field: String, options: CSVOptions): DataType = {
if (field == null || field.isEmpty || field == options.nullValue) {
typeSoFar
} else {
typeSoFar match {
case NullType => tryParseInteger(field, options)
case IntegerType => tryParseInteger(field, options)
case LongType => tryParseLong(field, options)
case _: DecimalType =>
// DecimalTypes have different precisions and scales, so we try to find the common type.
compatibleType(typeSoFar, tryParseDecimal(field, options)).getOrElse(StringType)
case DoubleType => tryParseDouble(field, options)
case TimestampType => tryParseTimestamp(field, options)
case BooleanType => tryParseBoolean(field, options)
case StringType => StringType
case other: DataType =>
throw new UnsupportedOperationException(s"Unexpected data type $other")
}
}
}
private def isInfOrNan(field: String, options: CSVOptions): Boolean = {
field == options.nanValue || field == options.negativeInf || field == options.positiveInf
}
private def tryParseInteger(field: String, options: CSVOptions): DataType = {
if ((allCatch opt field.toInt).isDefined) {
IntegerType
} else {
tryParseLong(field, options)
}
}
private def tryParseLong(field: String, options: CSVOptions): DataType = {
if ((allCatch opt field.toLong).isDefined) {
LongType
} else {
tryParseDecimal(field, options)
}
}
private def tryParseDecimal(field: String, options: CSVOptions): DataType = {
val decimalTry = allCatch opt {
// `BigDecimal` conversion can fail when the `field` is not a form of number.
val bigDecimal = new BigDecimal(field)
// Because many other formats do not support decimal, it reduces the cases for
// decimals by disallowing values having scale (eg. `1.1`).
if (bigDecimal.scale <= 0) {
// `DecimalType` conversion can fail when
// 1. The precision is bigger than 38.
// 2. scale is bigger than precision.
DecimalType(bigDecimal.precision, bigDecimal.scale)
} else {
tryParseDouble(field, options)
}
}
decimalTry.getOrElse(tryParseDouble(field, options))
}
private def tryParseDouble(field: String, options: CSVOptions): DataType = {
if ((allCatch opt field.toDouble).isDefined || isInfOrNan(field, options)) {
DoubleType
} else {
tryParseTimestamp(field, options)
}
}
private def tryParseTimestamp(field: String, options: CSVOptions): DataType = {
// This case infers a custom `dataFormat` is set.
if ((allCatch opt options.timestampFormat.parse(field)).isDefined) {
TimestampType
} else if ((allCatch opt DateTimeUtils.stringToTime(field)).isDefined) {
// We keep this for backwards compatibility.
TimestampType
} else {
tryParseBoolean(field, options)
}
}
private def tryParseBoolean(field: String, options: CSVOptions): DataType = {
if ((allCatch opt field.toBoolean).isDefined) {
BooleanType
} else {
stringType()
}
}
// Defining a function to return the StringType constant is necessary in order to work around
// a Scala compiler issue which leads to runtime incompatibilities with certain Spark versions;
// see issue #128 for more details.
private def stringType(): DataType = {
StringType
}
/**
* Returns the common data type given two input data types so that the return type
* is compatible with both input data types.
*/
private def compatibleType(t1: DataType, t2: DataType): Option[DataType] = {
TypeCoercion.findTightestCommonType(t1, t2).orElse(findCompatibleTypeForCSV(t1, t2))
}
/**
* The following pattern matching represents additional type promotion rules that
* are CSV specific.
*/
private val findCompatibleTypeForCSV: (DataType, DataType) => Option[DataType] = {
case (StringType, t2) => Some(StringType)
case (t1, StringType) => Some(StringType)
// These two cases below deal with when `IntegralType` is larger than `DecimalType`.
case (t1: IntegralType, t2: DecimalType) =>
compatibleType(DecimalType.forType(t1), t2)
case (t1: DecimalType, t2: IntegralType) =>
compatibleType(t1, DecimalType.forType(t2))
// Double support larger range than fixed decimal, DecimalType.Maximum should be enough
// in most case, also have better precision.
case (DoubleType, _: DecimalType) | (_: DecimalType, DoubleType) =>
Some(DoubleType)
case (t1: DecimalType, t2: DecimalType) =>
val scale = math.max(t1.scale, t2.scale)
val range = math.max(t1.precision - t1.scale, t2.precision - t2.scale)
if (range + scale > 38) {
// DecimalType can't support precision > 38
Some(DoubleType)
} else {
Some(DecimalType(range + scale, scale))
}
case _ => None
}
}
| ahnqirage/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/csv/CSVInferSchema.scala | Scala | apache-2.0 | 8,004 |
package im.actor.server.session
import akka.testkit.TestProbe
import com.google.protobuf.ByteString
import im.actor.api.rpc.auth._
import im.actor.api.rpc.codecs._
import im.actor.api.rpc.collections.ApiStringValue
import im.actor.api.rpc.contacts.{ RequestGetContacts, UpdateContactRegistered }
import im.actor.api.rpc.messaging.RequestLoadDialogs
import im.actor.api.rpc.misc.ResponseVoid
import im.actor.api.rpc.peers.ApiUserOutPeer
import im.actor.api.rpc.sequence.{ RequestGetDifference, RequestGetState, RequestSubscribeToOnline, UpdateRawUpdate }
import im.actor.api.rpc.misc.ResponseSeq
import im.actor.api.rpc.raw.RequestRawRequest
import im.actor.api.rpc.weak.UpdateUserOffline
import im.actor.api.rpc.{ AuthorizedClientData, Request, RpcOk }
import im.actor.server.api.rpc.RawApiExtension
import im.actor.server.api.rpc.service.auth.AuthErrors
import im.actor.server.api.rpc.service.raw.EchoService
import im.actor.server.mtproto.protocol._
import im.actor.server.mtproto.transport._
import im.actor.server.persist.AuthSessionRepo
import im.actor.server.sequence.{ SeqUpdatesExtension, UserSequence, WeakUpdatesExtension }
import im.actor.server.user.UserExtension
import org.scalatest.BeforeAndAfterEach
import scodec.bits._
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Random
final class SessionSpec extends BaseSessionSpec with BeforeAndAfterEach {
behavior of "Session actor"
it should "send Drop on message on wrong message box" in sessions().wrongMessageBox
it should "send NewSession on first HandleMessageBox" in sessions().newSession
it should "reply to RpcRequestBox" in sessions().rpc
it should "handle user authorization" in sessions().auth
it should "subscribe to sequence updates" in sessions().seq
it should "subscribe to weak updates" in sessions().weak
it should "subscribe to presences" in sessions().pres
it should "receive fat updates" in sessions().fatSeq
it should "react to SessionHello" in sessions().hello
it should "send SeqUpdateTooLong" in sessions().seqUpdateTooLong
it should "cache small results" in sessions().cacheSmallResults
it should "not cache big results" in sessions().notCacheBigResults
@volatile var count = 0
override def beforeEach = {
RawApiExtension(system).register("echo", new EchoService(system) { override def onEcho() = count += 1 })
}
case class sessions() {
implicit val probe = TestProbe()
val weakUpdatesExt = WeakUpdatesExtension(system)
val seqUpdExt = SeqUpdatesExtension(system)
def wrongMessageBox() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val session = system.actorOf(Session.props(sessionConfig), s"${authId}_$sessionId")
probe.send(session, HandleMessageBox(ByteString.copyFrom(BitVector.empty.toByteBuffer)))
probe watch session
probe.expectMsg(Drop(0, 0, "Failed to parse MessageBox"))
probe.expectTerminated(session)
}
def newSession() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val messageId = Random.nextLong()
val encodedRequest = RequestCodec.encode(Request(RequestStartPhoneAuth(
phoneNumber = 75553333333L,
appId = 1,
apiKey = "apiKey",
deviceHash = Random.nextLong.toBinaryString.getBytes,
deviceTitle = "Specs Has You",
timeZone = None,
preferredLanguages = Vector.empty
))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, ProtoRpcRequest(encodedRequest))
expectNewSession(authId, sessionId, messageId)
probe.receiveOne(1.second)
probe.receiveOne(1.second)
probe.expectNoMsg()
}
def rpc() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val messageId = Random.nextLong()
val encodedStartRequest = RequestCodec.encode(Request(RequestStartPhoneAuth(
phoneNumber = 75553333334L,
appId = 1,
apiKey = "apiKey",
deviceHash = Random.nextLong.toBinaryString.getBytes,
deviceTitle = "Specs Has You",
timeZone = None,
preferredLanguages = Vector.empty
))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, ProtoRpcRequest(encodedStartRequest))
expectNewSession(authId, sessionId, messageId)
expectRpcResult(authId, sessionId) should matchPattern {
case RpcOk(ResponseStartPhoneAuth(_, false, _)) ⇒
}
probe.expectNoMsg(20.seconds)
}
def auth() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val phoneNumber = 75550000000L
val code = phoneNumber.toString.charAt(4).toString * 4
val firstMessageId = Random.nextLong()
val encodedCodeRequest = RequestCodec.encode(Request(RequestStartPhoneAuth(
phoneNumber = phoneNumber,
appId = 1,
apiKey = "apiKey",
deviceHash = Random.nextLong.toBinaryString.getBytes,
deviceTitle = "Specs Has You",
timeZone = None,
preferredLanguages = Vector.empty
))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, firstMessageId, ProtoRpcRequest(encodedCodeRequest))
expectNewSession(authId, sessionId, firstMessageId)
val txHash = expectRpcResult(authId, sessionId).asInstanceOf[RpcOk].response.asInstanceOf[ResponseStartPhoneAuth].transactionHash
val secondMessageId = Random.nextLong()
val encodedValidateRequest = RequestCodec.encode(Request(RequestValidateCode(txHash, code))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, secondMessageId, ProtoRpcRequest(encodedValidateRequest))
expectRpcResult(authId, sessionId) should matchPattern {
case AuthErrors.PhoneNumberUnoccupied ⇒
}
val thirdMessageId = Random.nextLong()
val encodedSignUpRequest = RequestCodec.encode(Request(RequestSignUp(txHash, "Name", None, None))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, thirdMessageId, ProtoRpcRequest(encodedSignUpRequest))
expectRpcResult(authId, sessionId) should matchPattern {
case RpcOk(ResponseAuth(_, _)) ⇒
}
{
val encodedLoadDialogs = RequestCodec.encode(Request(RequestLoadDialogs(0L, 100, Vector.empty))).require
val encodedGetDifference = RequestCodec.encode(Request(RequestGetDifference(0, Array(), Vector.empty))).require
val encodedGetContacts = RequestCodec.encode(Request(RequestGetContacts("", Vector.empty))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, Random.nextLong, ProtoRpcRequest(encodedLoadDialogs))
sendMessageBox(authId, sessionId, sessionRegion.ref, Random.nextLong, ProtoRpcRequest(encodedGetDifference))
sendMessageBox(authId, sessionId, sessionRegion.ref, Random.nextLong, ProtoRpcRequest(encodedGetContacts))
expectRpcResult(authId, sessionId)
expectRpcResult(authId, sessionId)
expectRpcResult(authId, sessionId)
}
val encodedSignOutRequest = RequestCodec.encode(Request(RequestSignOut)).require
val forthMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, forthMessageId, ProtoRpcRequest(encodedSignOutRequest))
expectRpcResult(authId, sessionId) should matchPattern {
case RpcOk(ResponseVoid) ⇒
}
}
def seqUpdateTooLong() = {
val (user, authId, _, _) = createUser()
val sessionId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, Random.nextLong(), SessionHello)
ignoreNewSession()
sendRequest(authId, sessionId, sessionRegion.ref, RequestGetState(Vector.empty))
expectMessageAck()
expectRpcResult(authId, sessionId)
val updatesCount = 32
// each update is 1024 bytes
val payload = Array(List.range(0, 1005).map(_.toByte): _*)
val update = UpdateRawUpdate(None, payload)
for (_ ← 1 to updatesCount) {
whenReady(seqUpdExt.deliverUserUpdate(user.id, update))(identity)
}
// expect 30Kb of updates to be pushed, then SeqUpdateTooLong (no ack)
for (_ ← 1 until updatesCount) {
expectSeqUpdate(authId, sessionId, None)
}
expectSeqUpdateTooLong(authId, sessionId)
expectSeqUpdate(authId, sessionId)
probe.expectNoMsg(5.seconds)
}
def seq() = {
val phoneNumber = 75550000000L + Random.nextInt(100000)
val user = createUser(phoneNumber)._1
val authId = createAuthId()
val sessionId = Random.nextLong()
val code = phoneNumber.toString.charAt(4).toString * 4
val firstMessageId = Random.nextLong()
val encodedCodeRequest = RequestCodec.encode(Request(RequestStartPhoneAuth(
phoneNumber = phoneNumber,
appId = 1,
apiKey = "apiKey",
deviceHash = Random.nextLong.toBinaryString.getBytes,
deviceTitle = "Specs Has You",
timeZone = None,
preferredLanguages = Vector.empty
))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, firstMessageId, ProtoRpcRequest(encodedCodeRequest))
expectNewSession(authId, sessionId, firstMessageId)
val txHash = expectRpcResult(authId, sessionId).asInstanceOf[RpcOk].response.asInstanceOf[ResponseStartPhoneAuth].transactionHash
val secondMessageId = Random.nextLong()
val encodedValidateRequest = RequestCodec.encode(Request(RequestValidateCode(txHash, code))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, secondMessageId, ProtoRpcRequest(encodedValidateRequest))
val authResult = expectRpcResult(authId, sessionId)
authResult should matchPattern {
case RpcOk(ResponseAuth(_, _)) ⇒
}
val authSession = Await.result(db.run(AuthSessionRepo.findByAuthId(authId)), 5.seconds).get
implicit val clientData = AuthorizedClientData(authId, sessionId, authResult.asInstanceOf[RpcOk].response.asInstanceOf[ResponseAuth].user.id, authSession.id, 42)
val encodedGetSeqRequest = RequestCodec.encode(Request(RequestGetState(Vector.empty))).require
val thirdMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, thirdMessageId, ProtoRpcRequest(encodedGetSeqRequest))
expectRpcResult(authId, sessionId) should matchPattern {
case RpcOk(ResponseSeq(_, _)) ⇒
}
val update = UpdateContactRegistered(1, true, 1L, 2L)
seqUpdExt.deliverUserUpdate(user.id, update)
expectSeqUpdate(authId, sessionId).update should ===(update.toByteArray)
}
def fatSeq() = {
val (user, authId, authSid, _) = createUser()
val sessionId = Random.nextLong
implicit val clientData = AuthorizedClientData(authId, sessionId, user.id, authSid, 42)
val encodedGetSeqRequest = RequestCodec.encode(Request(RequestGetState(Vector.empty))).require
val thirdMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, thirdMessageId, ProtoRpcRequest(encodedGetSeqRequest))
ignoreNewSession()
expectRpcResult(authId, sessionId) should matchPattern {
case RpcOk(ResponseSeq(_, _)) ⇒
}
val update = UpdateContactRegistered(user.id, true, 1L, 2L)
whenReady(seqUpdExt.deliverUserUpdate(
user.id,
update,
pushRules = seqUpdExt.pushRules(isFat = true, Some("text"))
))(identity)
val fat = expectFatSeqUpdate(authId, sessionId)
fat.users.head.id should ===(user.id)
fat.update should ===(update.toByteArray)
}
def weak() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val phoneNumber = 75550000000L
val code = phoneNumber.toString.charAt(4).toString * 4
val firstMessageId = Random.nextLong()
val encodedCodeRequest = RequestCodec.encode(Request(RequestStartPhoneAuth(
phoneNumber = phoneNumber,
appId = 1,
apiKey = "apiKey",
deviceHash = Random.nextLong.toBinaryString.getBytes,
deviceTitle = "Specs Has You",
timeZone = None,
preferredLanguages = Vector.empty
))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, firstMessageId, ProtoRpcRequest(encodedCodeRequest))
expectNewSession(authId, sessionId, firstMessageId)
val txHash = expectRpcResult(authId, sessionId).asInstanceOf[RpcOk].response.asInstanceOf[ResponseStartPhoneAuth].transactionHash
val secondMessageId = Random.nextLong()
val encodedValidateRequest = RequestCodec.encode(Request(RequestValidateCode(txHash, code))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, secondMessageId, ProtoRpcRequest(encodedValidateRequest))
val authResult = expectRpcResult(authId, sessionId)
authResult should matchPattern {
case RpcOk(ResponseAuth(_, _)) ⇒
}
implicit val clientData = AuthorizedClientData(authId, sessionId, authResult.asInstanceOf[RpcOk].response.asInstanceOf[ResponseAuth].user.id, Await.result(db.run(AuthSessionRepo.findByAuthId(authId)), 5.seconds).get.id, 42)
val update = UpdateContactRegistered(1, isSilent = true, 1L, 5L)
Await.result(weakUpdatesExt.broadcastUserWeakUpdate(clientData.userId, update, reduceKey = None), 1.second)
expectWeakUpdate(authId, sessionId).update should ===(update.toByteArray)
}
def pres() = {
val authId = createAuthId()
val sessionId = Random.nextLong()
val phoneNumber = 75550000000L
val code = phoneNumber.toString.charAt(4).toString * 4
val firstMessageId = Random.nextLong()
val encodedCodeRequest = RequestCodec.encode(Request(RequestStartPhoneAuth(
phoneNumber = phoneNumber,
appId = 1,
apiKey = "apiKey",
deviceHash = Random.nextLong.toBinaryString.getBytes,
deviceTitle = "Specs Has You",
timeZone = None,
preferredLanguages = Vector.empty
))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, firstMessageId, ProtoRpcRequest(encodedCodeRequest))
expectNewSession(authId, sessionId, firstMessageId)
val txHash = expectRpcResult(authId, sessionId).asInstanceOf[RpcOk].response.asInstanceOf[ResponseStartPhoneAuth].transactionHash
val secondMessageId = Random.nextLong()
val encodedValidateRequest = RequestCodec.encode(Request(RequestValidateCode(txHash, code))).require
sendMessageBox(authId, sessionId, sessionRegion.ref, secondMessageId, ProtoRpcRequest(encodedValidateRequest))
val authResult = expectRpcResult(authId, sessionId)
authResult should matchPattern {
case RpcOk(ResponseAuth(_, _)) ⇒
}
{
val userForSubscribe = 2
// FIXME: real user and real accessHash
val encodedSubscribeRequest = RequestCodec.encode(Request(RequestSubscribeToOnline(Vector(ApiUserOutPeer(userForSubscribe, 0L))))).require
val messageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, ProtoRpcRequest(encodedSubscribeRequest))
val subscribeResult = expectRpcResult(authId, sessionId)
subscribeResult should matchPattern {
case RpcOk(ResponseVoid) ⇒
}
}
val ub = expectWeakUpdate(authId, sessionId)
ub.updateHeader should ===(UpdateUserOffline.header)
}
def hello() = {
val (_, authId, _, _) = createUser()
val sessionId = Random.nextLong()
val messageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, messageId, SessionHello)
expectNewSession(authId, sessionId, messageId)
expectMessageAck(messageId)
probe.expectNoMsg()
}
def cacheSmallResults(): Unit = {
implicit val probe = TestProbe()
val authId = createAuthId()
val sessionId = Random.nextLong()
val helloMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, helloMessageId, SessionHello)
expectNewSession(authId, sessionId, helloMessageId)
expectMessageAck()
val messageId = Random.nextLong()
count = 0
for (_ ← 1 to 3) {
sendRequest(
authId,
sessionId,
sessionRegion.ref,
messageId,
RequestRawRequest("echo", "makeEcho", Some(ApiStringValue("...")))
)
expectRpcResult(authId, sessionId, ignoreAcks = true)
}
count shouldBe 1
}
def notCacheBigResults(): Unit = {
implicit val probe = TestProbe()
val authId = createAuthId()
val sessionId = Random.nextLong()
val helloMessageId = Random.nextLong()
sendMessageBox(authId, sessionId, sessionRegion.ref, helloMessageId, SessionHello)
expectNewSession(authId, sessionId, helloMessageId)
expectMessageAck()
val longString = List.range(1, 40000).map(_ ⇒ ".").mkString
val messageId = Random.nextLong()
count = 0
for (_ ← 1 to 3) {
sendRequest(
authId,
sessionId,
sessionRegion.ref,
messageId,
RequestRawRequest("echo", "makeEcho", Some(ApiStringValue(longString)))
)
expectRpcResult(authId, sessionId, ignoreAcks = true)
}
count shouldBe 3
}
}
}
| EaglesoftZJ/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/session/SessionSpec.scala | Scala | agpl-3.0 | 17,377 |
package priv.sp
import collection._
import java.io._
import priv.sp.update._
object CardSpec {
val runFlag = 1
val stunFlag = 2
val invincibleFlag = 4
val blockedFlag = 8
val pausedFlag = 16
val cursedFlag = 32
val onHold = stunFlag + blockedFlag + pausedFlag
type Phase = Int
val Direct = 0
val OnTurn = 1
val OnEndTurn = 2
val OnStart = 3
val phases = Array(Direct, OnTurn, OnEndTurn, OnStart)
type Effect = GameCardEffect.Env ⇒ Unit
type PhaseEffect = (CardSpec.Phase, CardSpec.Effect)
type Description = (GameState, PlayerId) => String
implicit def toDescription(s : String) : Description = (_, _) => s
def effects(effects: PhaseEffect*) = toEffectMap(effects)
def toEffectMap(effects: Traversable[PhaseEffect]) = {
def effectAt(phase: Phase): Option[Effect] = {
val filtereds = effects collect { case (ph, f) if ph == phase ⇒ f }
if (filtereds.isEmpty) None
else if (filtereds.size == 1) Some(filtereds.head)
else Some(new ComposedEffect(filtereds))
}
phases map (effectAt _)
}
val noEffects = phases map (_ ⇒ Option.empty[Effect])
class ComposedEffect(effects: Traversable[Effect]) extends Function[GameCardEffect.Env, Unit] {
def apply(env: GameCardEffect.Env) = {
effects foreach (_(env))
}
}
val defaultReaction = () ⇒ new Reaction
}
import CardSpec._
object Card {
val currentId = new java.util.concurrent.atomic.AtomicInteger
}
sealed abstract class Card {
def name: String
def label : String
def image: String
def inputSpec: Option[CardInputSpec]
def effects: Array[Option[Effect]]
def description: Description
var cost = 0
var id = Card.currentId.incrementAndGet
var houseId = 0
var houseIndex = 0
var cardIndex = 0
def cardIndex1 = cardIndex + 1
final val isSpell = isInstanceOf[Spell]
def isSpecial = houseIndex == 4
def asCreature = {
this match {
case creature: Creature ⇒ creature
case _ ⇒ sys.error(this + " is not a creature")
}
}
override def toString() = s"Card($name)"
override def hashCode(): Int = id
override def equals(o: Any) = {
o match {
case c: Card ⇒ c.hashCode() == hashCode()
case _ ⇒ false
}
}
}
class Creature(
val key: String,
val attack: AttackSources,
val life: Int,
val description: Description = "",
val inputSpec: Option[CardInputSpec] = Some(SelectOwnerSlot),
var effects: Array[Option[CardSpec.Effect]] = CardSpec.noEffects,
val mod: Option[Mod] = None,
reaction: ⇒ Reaction = new Reaction,
val data: AnyRef = null, // initialize slot custom data
val runAttack: RunAttack = SingleTargetAttack,
val isAltar: Boolean = false,
val status: Int = 0) extends Card {
val name = I18n.default(key)
val label = I18n(key)
def this() = this(null, AttackSources(), 0)
def newReaction = reaction
final def inflict(damage: Damage, life: Int) = life - damage.amount
def image = name + ".jpg"
}
case class Spell(
key: String,
description: Description = "",
inputSpec: Option[CardInputSpec] = None,
effects: Array[Option[CardSpec.Effect]] = CardSpec.noEffects) extends Card {
val name = I18n.default(key)
val label = I18n(key)
def this() = this(null)
def image = name + ".tga"
}
trait CommandFlag
case class Command(player: PlayerId, card: Card, input: Option[SlotInput], cost: Int, flag: Option[CommandFlag] = None) {
final override def toString() = player + "> " + card.name + input.map(i ⇒ " at " + i.num).getOrElse("")
}
object Context {
val noSelection = -1
def apply(playerId: PlayerId, card: Option[Card] = None, selected: Int = Context.noSelection) = new ContextImpl(playerId, card, selected)
}
trait Context {
def playerId: PlayerId
def card: Option[Card]
def selected: Int
def selectedOption = if (selected == Context.noSelection) None else Some(selected)
}
// context is mostly for interception, so it's not that important if it's not supplied for self damage for example
class ContextImpl(val playerId: PlayerId, val card: Option[Card], val selected: Int) extends Context
case class Damage(amount: Int, context: Context, isAbility: Boolean = false, isSpell: Boolean = false) {
def isEffect = isAbility || isSpell
}
sealed trait CardInputSpec
trait OwnerInputSlot
case class SelectOwner(f: (PlayerId, GameState) ⇒ Seq[Int]) extends CardInputSpec with OwnerInputSlot
case object SelectOwnerSlot extends CardInputSpec with OwnerInputSlot
case object SelectOwnerCreature extends CardInputSpec with OwnerInputSlot
case class SelectTarget(f: (PlayerId, GameState) ⇒ Seq[Int]) extends CardInputSpec
case object SelectTargetSlot extends CardInputSpec
case object SelectTargetCreature extends CardInputSpec
class SlotInput(val num: Int) extends AnyVal with Serializable
trait Mod
case class SpellMod(modify: Int ⇒ Int) extends Mod
sealed trait BoardEvent
trait PlayerEvent extends BoardEvent {
def player: PlayerUpdate
def otherPlayer = player.otherPlayer
}
case class Dead(num: Int, slot: SlotState, player: PlayerUpdate, damage: Option[Damage]) extends PlayerEvent {
def isEffect = damage.isEmpty || damage.get.isEffect
def isSpell = damage.isEmpty || damage.get.isSpell
def isDestroy = damage.isEmpty
def card = slot.card
}
// need source if no target
case class DamageEvent(damage: Damage, target: Option[Int], player: PlayerUpdate) extends PlayerEvent
case class SummonEvent(num: Int, card: Creature, player: PlayerUpdate) extends PlayerEvent
trait SlotMod {
def apply(slotState: SlotState): SlotState
}
// some crap
trait Actions {
protected var selected: SlotUpdate = null
def use(s: SlotUpdate) { selected = s }
def heal(amount: Int) { selected write Some(SlotState.addLife(selected.get, amount)) }
def inflict(damage: Damage) { selected damageSlot damage }
def destroy() { selected.privDestroy() }
def stun() { selected toggle CardSpec.stunFlag }
}
object Reaction {
val falseNone = (false, None)
}
class Reaction extends Actions {
// when a creature is added on same side
def onAdd(slot: SlotUpdate) {}
def onRemove(slot: SlotUpdate) {}
// used by stone golem and archphoenix where overriding inflict doesn't suffice, because needs to know the context => TODO remove overrided inflict
def selfProtect(d: Damage) = d
// used by black monk to heal by the amount even when dying, and by errant to wakeup
def onMyDamage(damage : Damage) {}
// /!\ the slot is not yet empty but is about to (used for f5, f7, schizo, crossbow)
def onMyRemove(dead: Option[Dead]) {}
def onMyDeath(dead: Dead) {}
// TODO call this from house listener?
def onSummon(summoned: SummonEvent) {}
def onSpawnOver: Option[SlotMod] = { selected.destroy(); None }
def onOverwrite(c: Creature) {}
def cleanUp() {} // bs for warp
/**
* Events that needs to be broadcasted manually in a house listener
*/
// broadcast is already done for player target (for ice guard)
def onProtect(d: DamageEvent) = d.damage
def onDeath(dead: Dead) {}
}
trait RunAttack {
var isMultiTarget = false
def apply(target: List[Int], d: Damage, player: PlayerUpdate)
}
object SingleTargetAttack extends RunAttack {
def apply(target: List[Int], d: Damage, player: PlayerUpdate) {
val otherPlayer = player.otherPlayer
if (target.isEmpty) {
otherPlayer inflict d
} else {
val hits = target.count { num ⇒
val slot = otherPlayer.slots(num)
val test = slot.value.isDefined
if (test) {
slot inflict d
}
test
}
if (hits < target.size) otherPlayer inflict d
}
}
// BS todo refactor
// return true if killed a creature
def attack(target: List[Int], d: Damage, player: PlayerUpdate) = {
val otherSlots = player.otherPlayer.slots
val targetExists = target filter { num ⇒ otherSlots(num).value.isDefined }
apply(target, d, player)
targetExists.nonEmpty && targetExists.exists(num ⇒ otherSlots(num).value.isEmpty)
}
}
object MultiTargetAttack extends RunAttack {
isMultiTarget = true
def apply(target: List[Int], d: Damage, player: PlayerUpdate) {
val otherPlayer = player.otherPlayer
otherPlayer inflict d
otherPlayer.slots inflictCreatures d
}
}
object Attack {
def apply(base: Int): AttackSources = AttackSources(Some(base))
}
case class AttackSources(base: Option[Int] = None, sources: Vector[AttackSource] = Vector.empty) {
def add(source: AttackSource) = copy(sources = sources :+ source)
def removeFirst(source: AttackSource) = {
val idx = sources.indexOf(source)
if (idx != -1) {
copy(sources = sources.patch(idx, Vector.empty, 1))
} else this
}
def removeFirstEq(source: AttackSource) = {
val idx = sources.indexWhere(_ eq source)
if (idx != -1) {
copy(sources = sources.patch(idx, Vector.empty, 1))
} else this
}
def removeAny(source: AttackSource) = copy(sources = sources.filterNot(_ == source))
}
trait AttackSource
trait AttackFunc extends AttackSource {
def apply(attack: Int): Int
}
trait AttackStateFunc extends AttackSource {
def apply(attack: Int, player: PlayerUpdate): Int
}
trait AttackSlotStateFunc extends AttackSource {
def apply(attack: Int, slot: SlotUpdate): Int
}
case class ManaAttack(houseIndex: Int) extends AttackStateFunc {
def apply(attack: Int, player: PlayerUpdate): Int = attack + player.getHouses(houseIndex).mana
}
trait DescMod {
def apply(house: House, cards: Vector[CardDesc]): Vector[CardDesc]
}
| illim/freespectrogdx | core/src/main/scala/priv/sp/CardSpec.scala | Scala | gpl-3.0 | 9,553 |
package org.apache.spark.sql
import org.apache.spark.SparkContext
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.execution.{QueryExecution, SQLExecution}
object KarpsStubs {
def withExecutionId[T](sc: SparkContext, executionId: String)(body: => T): T= {
SQLExecution.withExecutionId(sc, executionId)(body)
}
def withNewExecutionId[T](
sparkSession: SparkSession,
queryExecution: QueryExecution)(body: => T): T = {
SQLExecution.withNewExecutionId(sparkSession, queryExecution)(body)
}
def getBoundEncoder(df: DataFrame): ExpressionEncoder[Row] = {
df.exprEnc.resolveAndBind(df.logicalPlan.output,
df.sparkSession.sessionState.analyzer)
}
def getExpression(c: Column): Expression = c.expr
def makeColumn(exp: Expression): Column = Column.apply(exp)
}
| krapsh/kraps-server | src/main/scala/org/apache/spark/sql/KarpsStubs.scala | Scala | apache-2.0 | 906 |
package de.tu_berlin.formic.datastructure.tree.client
import akka.actor.ActorRef
import de.tu_berlin.formic.common.datastructure.DataStructureName
import de.tu_berlin.formic.common.{ClientId, DataStructureInstanceId}
/**
* @author Ronny Bräunlich
*/
class FormicStringTreeFactory extends FormicTreeDataStructureFactory[String] {
override def createWrapper(dataStructureInstanceId: DataStructureInstanceId, dataStructure: ActorRef, localClientId: ClientId): FormicTree[String] = {
new FormicStringTree((ClientDataStructureEvent) => {}, RemoteDataStructureInitiator, dataStructureInstanceId, dataStructure, localClientId)
}
override val name: DataStructureName = FormicStringTreeFactory.name
}
object FormicStringTreeFactory {
val name = DataStructureName("StringTree")
}
| rbraeunlich/formic | tree/shared/src/main/scala/de/tu_berlin/formic/datastructure/tree/client/FormicStringTreeFactory.scala | Scala | apache-2.0 | 795 |
package dresden.sim.crdt
import java.util.UUID
import com.typesafe.scalalogging.StrictLogging
import dresden.crdt.Ports._
import dresden.crdt.set.TwoPSet
import dresden.crdt.set.TwoPSetManager.{AddOperation, RemoveOperation}
import dresden.sim.SimUtil.DresdenTimeout
import dresden.sim.{SimUtil, SimulationResultSingleton}
import se.sics.kompics.Start
import se.sics.kompics.sl._
import se.sics.kompics.timer.{SchedulePeriodicTimeout, Timer}
import se.sics.ktoolbox.util.network.KAddress
object TwoPSetSimApp {
case class Init(selfAdr: KAddress) extends se.sics.kompics.Init[TwoPSetSimApp]
}
class TwoPSetSimApp(val init: TwoPSetSimApp.Init) extends ComponentDefinition with StrictLogging {
val mngr = requires[TwoPSetManagement]
val timer = requires[Timer]
var twopset: Option[TwoPSet[String]] = None
val self = init match {
case TwoPSetSimApp.Init(self) => self
}
private val period: Long = 1000 // TODO
private var timerIds: Set[UUID] = Set.empty[UUID]
private var numSends: Int = 0
private var maxSends: Int = 20
ctrl uponEvent {
case _: Start => handle {
logger.info(s"$self starting...")
val spt = new SchedulePeriodicTimeout(0, period)
val timeout = DresdenTimeout(spt)
spt.setTimeoutEvent(timeout)
trigger(spt -> timer)
timerIds += timeout.getTimeoutId
// Fetch our set
trigger(Get(SimUtil.CRDT_SET_KEY) -> mngr)
}
}
timer uponEvent {
case DresdenTimeout(_) => handle {
// Either send another 'add' or remove a random
if (math.random < 0.3 && twopset.get.entries.nonEmpty) {
removeRandom()
} else {
sendAdd()
}
}
}
mngr uponEvent {
case Response(id, crdt: TwoPSet[String]) => handle {
twopset = Some(crdt)
logger.info(s"$self Received $crdt")
sendAdd()
}
case Update(id, crdt: TwoPSet[String]) => handle {
logger.info(s"Received CRDT update for $id")
twopset = Some(crdt)
import scala.collection.JavaConverters._
SimulationResultSingleton.getInstance().put(self.getId + SimUtil.TWOPSET_STR, crdt.entries.asJava)
}
}
private def sendAdd(): Unit = {
if (numSends < maxSends) {
logger.debug(s"$self Triggering send")
trigger(Op(SimUtil.CRDT_SET_KEY, AddOperation(self.toString + SimUtil.DELIM_STR + numSends)) -> mngr)
numSends += 1
}
}
private def removeRandom(): Unit = {
if (numSends < maxSends) {
logger.debug(s"$self Triggering remove")
val it = random[String](twopset.get.entries)
trigger(Op(SimUtil.CRDT_SET_KEY, RemoveOperation(it)) -> mngr)
numSends += 1
}
}
def random[T](s: Set[T]): T = {
val n = util.Random.nextInt(s.size)
s.iterator.drop(n).next
}
}
| jarlopez/dresden | src/test/scala-2.11/dresden/sim/crdt/TwoPSetSimApp.scala | Scala | mit | 3,043 |
/*
* Copyright 2015 Matt Massie
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// GENERATED SOURCE: DO NOT EDIT.
package com.github.massie.avrotuples
import java.io._
import java.util
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
import com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.avro.Schema
import org.apache.avro.generic.GenericData
import org.apache.avro.io.{DecoderFactory, EncoderFactory}
import org.apache.avro.specific.{SpecificDatumReader, SpecificDatumWriter, SpecificRecord}
import org.apache.avro.util.Utf8
object AvroTuple12 {
val SCHEMA$ = AvroTupleSchemas.recursiveSchemas(11)
val reader = new SpecificDatumReader[AvroTuple12[_, _, _, _, _, _, _, _, _, _, _, _]](SCHEMA$)
val writer = new SpecificDatumWriter[AvroTuple12[_, _, _, _, _, _, _, _, _, _, _, _]](SCHEMA$)
def readFromInputStream(tuple: AvroTuple12[_, _, _, _, _, _, _, _, _, _, _, _], in: InputStream) = {
AvroTuple12.reader.read(tuple, DecoderFactory.get.directBinaryDecoder(in, null))
}
def writeToOutputStream(tuple: AvroTuple12[_, _, _, _, _, _, _, _, _, _, _, _], out: OutputStream) = {
AvroTuple12.writer.write(tuple, EncoderFactory.get.directBinaryEncoder(out, null))
}
def fromInputStream(in: InputStream) : AvroTuple12[_, _, _, _, _, _, _, _, _, _, _, _] = {
readFromInputStream(null.asInstanceOf[AvroTuple12[_, _, _, _, _, _, _, _, _, _, _, _]], in)
}
def fromBytes(bytes: Array[Byte]): AvroTuple12[_, _, _, _, _, _, _, _, _, _, _, _] = {
val in = new ByteArrayInputStream(bytes)
val tuple = fromInputStream(in)
in.close()
tuple
}
}
final case class AvroTuple12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12](
@transient var _1: T1,
@transient var _2: T2,
@transient var _3: T3,
@transient var _4: T4,
@transient var _5: T5,
@transient var _6: T6,
@transient var _7: T7,
@transient var _8: T8,
@transient var _9: T9,
@transient var _10: T10,
@transient var _11: T11,
@transient var _12: T12)
extends Product12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12] with SpecificRecord with KryoSerializable with Externalizable {
def this() = this(null.asInstanceOf[T1],
null.asInstanceOf[T2],
null.asInstanceOf[T3],
null.asInstanceOf[T4],
null.asInstanceOf[T5],
null.asInstanceOf[T6],
null.asInstanceOf[T7],
null.asInstanceOf[T8],
null.asInstanceOf[T9],
null.asInstanceOf[T10],
null.asInstanceOf[T11],
null.asInstanceOf[T12])
def update(n1: T1, n2: T2, n3: T3, n4: T4, n5: T5, n6: T6, n7: T7, n8: T8, n9: T9, n10: T10, n11: T11, n12: T12): AvroTuple12[T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12] = {
_1 = n1
_2 = n2
_3 = n3
_4 = n4
_5 = n5
_6 = n6
_7 = n7
_8 = n8
_9 = n9
_10 = n10
_11 = n11
_12 = n12
this
}
@throws(classOf[IndexOutOfBoundsException])
override def get(i: Int): AnyRef = i match {
case 0 => val values = new util.ArrayList[AnyRef](productArity)
values.add(0, _1.asInstanceOf[AnyRef])
values.add(1, _2.asInstanceOf[AnyRef])
values.add(2, _3.asInstanceOf[AnyRef])
values.add(3, _4.asInstanceOf[AnyRef])
values.add(4, _5.asInstanceOf[AnyRef])
values.add(5, _6.asInstanceOf[AnyRef])
values.add(6, _7.asInstanceOf[AnyRef])
values.add(7, _8.asInstanceOf[AnyRef])
values.add(8, _9.asInstanceOf[AnyRef])
values.add(9, _10.asInstanceOf[AnyRef])
values.add(10, _11.asInstanceOf[AnyRef])
values.add(11, _12.asInstanceOf[AnyRef])
values.asInstanceOf[AnyRef]
case _ => throw new IndexOutOfBoundsException(i.toString)
}
private def utf8string(obj: Any) = obj match {
case u: Utf8 => u.toString
case _ => obj
}
@throws(classOf[IndexOutOfBoundsException])
override def put(i: Int, v: scala.Any): Unit = i match {
case 0 =>
val array = v match {
case avroArray: GenericData.Array[_]=> avroArray
case javaArray: util.ArrayList[_]=> javaArray
}
assert(array.size == productArity,
s"Tried to put ${array.size} values into AvroTuple with productArity of $productArity")
_1 = utf8string(array.get(0)).asInstanceOf[T1]
_2 = utf8string(array.get(1)).asInstanceOf[T2]
_3 = utf8string(array.get(2)).asInstanceOf[T3]
_4 = utf8string(array.get(3)).asInstanceOf[T4]
_5 = utf8string(array.get(4)).asInstanceOf[T5]
_6 = utf8string(array.get(5)).asInstanceOf[T6]
_7 = utf8string(array.get(6)).asInstanceOf[T7]
_8 = utf8string(array.get(7)).asInstanceOf[T8]
_9 = utf8string(array.get(8)).asInstanceOf[T9]
_10 = utf8string(array.get(9)).asInstanceOf[T10]
_11 = utf8string(array.get(10)).asInstanceOf[T11]
_12 = utf8string(array.get(11)).asInstanceOf[T12]
case _ => throw new IndexOutOfBoundsException(i.toString)
}
override def getSchema: Schema = AvroTuple12.SCHEMA$
override def toString: String = "(" + _1 + "," + _2 + "," + _3 + "," + _4 + "," + _5 + "," + _6 + "," + _7 + "," + _8 + "," + _9 + "," + _10 + "," + _11 + "," + _12 + ")"
def toBytes: Array[Byte] = {
val byteStream = new ByteArrayOutputStream()
AvroTuple12.writeToOutputStream(this, byteStream)
byteStream.flush()
val bytes = byteStream.toByteArray
byteStream.close()
bytes
}
override def readExternal(in: ObjectInput): Unit = {
AvroTuple12.readFromInputStream(this, ExternalizableInput(in))
}
override def writeExternal(out: ObjectOutput): Unit = {
AvroTuple12.writeToOutputStream(this, ExternalizableOutput(out))
}
override def write(kryo: Kryo, output: Output): Unit = {
AvroTuple12.writeToOutputStream(this, output.getOutputStream)
}
override def read(kryo: Kryo, input: Input): Unit = {
AvroTuple12.readFromInputStream(this, input.getInputStream)
}
} | massie/avrotuples | src/main/scala/com/github/massie/avrotuples/AvroTuple12.scala | Scala | apache-2.0 | 6,566 |
package controllers
import jp.t2v.lab.play2.auth.{AuthenticityToken, AsyncIdContainer, AuthConfig}
import jp.t2v.lab.play2.auth.sample.{Role, Account}
import jp.t2v.lab.play2.auth.sample.Role._
import play.api.mvc.RequestHeader
import play.api.mvc.Results._
import scala.concurrent.{Future, ExecutionContext}
import scala.reflect._
import play.Logger
import scala.collection.concurrent.TrieMap
import scala.util.Random
import java.security.SecureRandom
import scala.annotation.tailrec
import play.api.cache.Cache
trait BaseAuthConfig extends AuthConfig {
type Id = Int
type User = Account
type Authority = Role
val idTag: ClassTag[Id] = classTag[Id]
val sessionTimeoutInSeconds = 3600
def resolveUser(id: Id)(implicit ctx: ExecutionContext) = Future.successful(Account.findById(id))
def authorizationFailed(request: RequestHeader)(implicit ctx: ExecutionContext) = throw new AssertionError("don't use")
override def authorizationFailed(request: RequestHeader, user: User, authority: Option[Authority])(implicit ctx: ExecutionContext) = {
Logger.info(s"authorizationFailed. userId: ${user.id}, userName: ${user.name}, authority: $authority")
Future.successful(Forbidden("no permission"))
}
def authorize(user: User, authority: Authority)(implicit ctx: ExecutionContext) = Future.successful((user.role, authority) match {
case (Administrator, _) => true
case (NormalUser, NormalUser) => true
case _ => false
})
override lazy val idContainer: AsyncIdContainer[Id] = new AsyncIdContainer[Id] {
private val tokenSuffix = ":token"
private val userIdSuffix = ":userId"
private val random = new Random(new SecureRandom())
override def startNewSession(userId: Id, timeoutInSeconds: Int)(implicit request: RequestHeader, context: ExecutionContext): Future[AuthenticityToken] = {
removeByUserId(userId)
val token = generate()
store(token, userId, timeoutInSeconds)
Future.successful(token)
}
@tailrec
private final def generate(): AuthenticityToken = {
val table = "abcdefghijklmnopqrstuvwxyz1234567890_.~*'()"
val token = Iterator.continually(random.nextInt(table.size)).map(table).take(64).mkString
if (syncGet(token).isDefined) generate() else token
}
private def removeByUserId(userId: Id) {
GlobalMap.container.get(userId.toString + userIdSuffix).map(_.asInstanceOf[String]) foreach unsetToken
unsetUserId(userId)
}
override def remove(token: AuthenticityToken)(implicit context: ExecutionContext): Future[Unit] = {
get(token).map(_ foreach unsetUserId)
Future.successful(unsetToken(token))
}
private def unsetToken(token: AuthenticityToken) {
GlobalMap.container.remove(token + tokenSuffix)
}
private def unsetUserId(userId: Id) {
GlobalMap.container.remove(userId.toString + userIdSuffix)
}
override def get(token: AuthenticityToken)(implicit context: ExecutionContext): Future[Option[Id]] = {
Future.successful(syncGet(token))
}
private def syncGet(token: AuthenticityToken): Option[Id] = {
GlobalMap.container.get(token + tokenSuffix).map(_.asInstanceOf[Id])
}
private def store(token: AuthenticityToken, userId: Id, timeoutInSeconds: Int) {
GlobalMap.container.put(token + tokenSuffix, userId.asInstanceOf[AnyRef]/*, timeoutInSeconds*/) // TODO:
GlobalMap.container.put(userId.toString + userIdSuffix, token.asInstanceOf[AnyRef]/*, timeoutInSeconds*/) // TODO:
}
override def prolongTimeout(token: AuthenticityToken, timeoutInSeconds: Int)(implicit request: RequestHeader, context: ExecutionContext): Future[Unit] = {
Future.successful(syncGet(token).foreach(store(token, _, timeoutInSeconds)))
}
}
}
object GlobalMap {
private[controllers] val container: TrieMap[String, AnyRef] = new TrieMap[String, AnyRef]()
} | kuenzaa/play2-auth | sample/app/controllers/BaseAuthConfig.scala | Scala | apache-2.0 | 3,886 |
package fpinscala
package parsing
import ReferenceTypes._
import scala.util.matching.Regex
object ReferenceTypes {
/** A parser is a kind of state action that can fail. */
type Parser[+A] = ParseState => Result[A]
/** `ParseState` wraps a `Location` and provides some extra
* convenience functions. The sliceable parsers defined
* in `Sliceable.scala` add an `isSliced` `Boolean` flag
* to `ParseState`.
*/
case class ParseState(loc: Location) {
def advanceBy(numChars: Int): ParseState =
copy(loc = loc.copy(offset = loc.offset + numChars))
def input: String = loc.input.substring(loc.offset)
def slice(n: Int) = loc.input.substring(loc.offset, loc.offset + n)
}
/* Likewise, we define a few helper functions on `Result`. */
sealed trait Result[+A] {
def extract: Either[ParseError,A] = this match {
case Failure(e,_) => Left(e)
case Success(a,_) => Right(a)
}
/* Used by `attempt`. */
def uncommit: Result[A] = this match {
case Failure(e,true) => Failure(e,false)
case _ => this
}
/* Used by `flatMap` */
def addCommit(isCommitted: Boolean): Result[A] = this match {
case Failure(e,c) => Failure(e, c || isCommitted)
case _ => this
}
/* Used by `scope`, `label`. */
def mapError(f: ParseError => ParseError): Result[A] = this match {
case Failure(e,c) => Failure(f(e),c)
case _ => this
}
def advanceSuccess(n: Int): Result[A] = this match {
case Success(a,m) => Success(a,n+m)
case _ => this
}
}
case class Success[+A](get: A, length: Int) extends Result[A]
case class Failure(get: ParseError, isCommitted: Boolean) extends Result[Nothing]
/** Returns -1 if s1.startsWith(s2), otherwise returns the
* first index where the two strings differed. If s2 is
* longer than s1, returns s1.length. */
def firstNonmatchingIndex(s1: String, s2: String, offset: Int): Int = {
var i = 0
while (i + offset < s1.length && i < s2.length) {
if (s1.charAt(i+offset) != s2.charAt(i)) return i
i += 1
}
if (s1.length-offset >= s2.length) -1
else s1.length-offset
}
}
object Reference extends Parsers[Parser] {
def run[A](p: Parser[A])(s: String): Either[ParseError,A] = {
val s0 = ParseState(Location(s))
p(s0).extract
}
// consume no characters and succeed with the given value
def succeed[A](a: A): Parser[A] = s => Success(a, 0)
def or[A](p: Parser[A], p2: => Parser[A]): Parser[A] =
s => p(s) match {
case Failure(e,false) => p2(s)
case r => r // committed failure or success skips running `p2`
}
def flatMap[A,B](f: Parser[A])(g: A => Parser[B]): Parser[B] =
s => f(s) match {
case Success(a,n) => g(a)(s.advanceBy(n))
.addCommit(n != 0)
.advanceSuccess(n)
case f@Failure(_,_) => f
}
def string(w: String): Parser[String] = {
val msg = "'" + w + "'"
s => {
val i = firstNonmatchingIndex(s.loc.input, w, s.loc.offset)
if (i == -1) // they matched
Success(w, w.length)
else
Failure(s.loc.advanceBy(i).toError(msg), i != 0)
}
}
/* note, regex matching is 'all-or-nothing':
* failures are uncommitted */
def regex(r: Regex): Parser[String] = {
val msg = "regex " + r
s => r.findPrefixOf(s.input) match {
case None => Failure(s.loc.toError(msg), false)
case Some(m) => Success(m,m.length)
}
}
def scope[A](msg: String)(p: Parser[A]): Parser[A] =
s => p(s).mapError(_.push(s.loc,msg))
def label[A](msg: String)(p: Parser[A]): Parser[A] =
s => p(s).mapError(_.label(msg))
def fail[A](msg: String): Parser[A] =
s => Failure(s.loc.toError(msg), true)
def attempt[A](p: Parser[A]): Parser[A] =
s => p(s).uncommit
def slice[A](p: Parser[A]): Parser[String] =
s => p(s) match {
case Success(_,n) => Success(s.slice(n),n)
case f@Failure(_,_) => f
}
/* We provide an overridden version of `many` that accumulates
* the list of results using a monolithic loop. This avoids
* stack overflow errors for most grammars.
*/
override def many[A](p: Parser[A]): Parser[List[A]] =
s => {
var nConsumed: Int = 0
val buf = new collection.mutable.ListBuffer[A]
def go(p: Parser[A], offset: Int): Result[List[A]] = {
p(s.advanceBy(offset)) match {
case Success(a,n) => buf += a; go(p, offset+n)
case f@Failure(e,true) => f
case Failure(e,_) => Success(buf.toList,offset)
}
}
go(p, 0)
}
}
| jonas/fpinscala | exercises/src/test/scala/fpinscala/parsing/Reference.scala | Scala | mit | 4,623 |
/**
* The MIT License (MIT) Copyright (c) 2014 University of Applied Sciences, Berlin, Germany
* For more detailed information, please read the licence.txt in the root directory.
**/
package org.onepercent.utils
import org.onepercent.JobResult
import sys.process._
/**
* Represents a message which is based on the initiated method.
* @param output Message based on the initiated method.
*/
case class ApacheSparkResult(output: String) extends JobResult
/**
* Class to control the Apache Spark service.
* @author Patrick Mariot
*
* http://alvinalexander.com/scala/scala-execute-exec-external-system-commands-in-scala
**/
class ApacheSparkController() {
val apacheSparkHome: String = "/home/05/40031/spark/"
val apacheSparkInitSkript: String = "spark-agent.sh"
val apacheSparkLogFile: String = "logs/org.onepercent.App.log"
/**
* Decides which method to run and calls the assigned method.
*
* @param method String that contains the desired method
* @return Output of the function
*/
def execute(method: String): JobResult = {
method match{
case "restart" => restart()
case "status" => status()
case "log" => log()
case "debug" => debug()
case "update" => update()
case _ => ErrorMessage("No Method " + method + " available!", 100)
}
}
/**
* Runs the start Method from the apacheSparkInitSkript, to start the Apache Spark Service.
* @return Output of the start Method
*/
private def start(): ApacheSparkResult ={
val output = Process(apacheSparkHome + apacheSparkInitSkript + " start").lines_!
ApacheSparkResult(output.mkString)
}
/**
* Runs the stop Method from the apacheSparkInitSkript, to stop the Apache Spark Service.
* @return Output of the stop Method
*/
private def stop(): ApacheSparkResult ={
val output = Process(apacheSparkHome + apacheSparkInitSkript + " stop").lines_!
ApacheSparkResult(output.mkString)
}
/**
* Runs the restart Method from the apacheSparkInitSkript, to restart the Apache Spark Service.
* May return nothing, cause the Service gets restarted.
* @return Output of the restart Method
*/
private def restart(): ApacheSparkResult ={
val output = Process(apacheSparkHome + apacheSparkInitSkript + " restart").lines_!
ApacheSparkResult(output.mkString)
}
/**
* Runs the status Method from the apacheSparkInitSkript, to list in which state the Apache Spark Service is.
* @return Output of the status Method
*/
private def status(): ApacheSparkResult ={
val output = Process(apacheSparkHome + apacheSparkInitSkript + " status").lines_!
ApacheSparkResult(output.mkString)
}
/**
* Shows the Log of the Apache Spark Service.
* @return Outputs the last 20 Lines of the status Method
*/
private def log(): ApacheSparkResult ={
val output = Process("tail -n 20 " + apacheSparkHome + apacheSparkLogFile).lines_!
ApacheSparkResult(output.mkString)
}
/**
* Runs the debug Method from the apacheSparkInitSkript, to show the debug messages of the Apache Spark Service.
* @return Outputs the last 20 Lines that start with '### DEBUG ###'
*/
private def debug(): ApacheSparkResult ={
val output = Process(apacheSparkHome + apacheSparkInitSkript + " debug").lines_!
ApacheSparkResult(output.mkString)
}
/**
* Runs the update Method from the apacheSparkInitSkript, to update the Apache Spark Service.
* May return nothing, cause the Service gets restarted.
* @return Output of the update Method
*/
private def update(): ApacheSparkResult ={
val output = Process(apacheSparkHome + apacheSparkInitSkript + " update").lines_!
ApacheSparkResult(output.mkString)
}
}
| isn0gud/onepercent | src/main/scala/org/onepercent/utils/ApacheSparkController.scala | Scala | mit | 3,740 |
package ru.maizy.cheesecake.server.tests
import akka.testkit.TestKit
import org.scalatest.{ Suite, BeforeAndAfterAll }
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2016
* See LICENSE.txt for details.
*/
trait KillActorSystemAfterAllTests extends BeforeAndAfterAll {
this: TestKit with Suite =>
override protected def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
}
| maizy/cheesecake | server/src/test/scala/ru/maizy/cheesecake/server/tests/KillActorSystemAfterAllTests.scala | Scala | apache-2.0 | 397 |
package collins.graphs
import java.net.URLEncoder
import scala.util.control.Exception
import play.twirl.api.Content
import com.codahale.jerkson.Json.generate
import collins.models.asset.AssetView
sealed trait GraphView {
def get(asset: AssetView): Option[Content]
def isGraphable(asset: AssetView): Boolean = true
}
sealed class FibrGraphs extends GraphView {
override def get(asset: AssetView): Option[Content] = {
if (isGraphable(asset)){
Some(getIframe(asset.getHostnameMetaValue.get))
} else {
None
}
}
override def isGraphable(asset: AssetView): Boolean = {
asset.isServerNode && asset.getHostnameMetaValue.isDefined
}
protected def getIframe(hostname: String) = {
collins.graphs.templates.html.fibr(hostname)
}
}
sealed class GangliaGraphs extends GraphView {
override def get(asset: AssetView): Option[Content] = {
if (isGraphable(asset)){
Some(getIframe(generate_dynamic_view_json(asset.getHostnameMetaValue.get)))
} else {
None
}
}
protected def getIframe(view_json: String) = {
collins.graphs.templates.html.ganglia(URLEncoder.encode(view_json, "UTF-8"))
}
override def isGraphable(asset: AssetView): Boolean = {
asset.isServerNode && asset.getHostnameMetaValue.isDefined
}
def hostKey(hostname: String): String = {
hostname + GangliaGraphConfig.hostSuffix
}
def generate_dynamic_view_json(hostname: String): String = {
generate(
Map(
"view_name" -> "ad-hoc",
"view_type" -> "standard",
"items" -> (GangliaGraphConfig.defaultGraphs.map(g =>
Map(
"hostname" -> hostKey(hostname),
"graph" -> g
)) ++ GangliaGraphConfig.defaultMetrics.map(m =>
Map(
"hostname" -> hostKey(hostname),
"metric" -> m
)
))
)
)
}
}
object GraphView extends GraphView {
protected val underlying: Option[GraphView] = getGraphInstance()
override def isGraphable(asset: AssetView): Boolean = {
return underlying.map(_.isGraphable(asset)).getOrElse(false)
}
override def get(asset: AssetView): Option[Content] = {
underlying.flatMap(_.get(asset))
}
private def getGraphInstance(): Option[GraphView] = {
if (GraphConfig.enabled) {
GraphConfig.className match {
case "collins.graphs.FibrGraphs" => Some(new FibrGraphs())
case "collins.graphs.GangliaGraphs" => Some(new GangliaGraphs())
}
} else {
None
}
}
} | funzoneq/collins | app/collins/graphs/GraphView.scala | Scala | apache-2.0 | 2,526 |
package com.twitter.finagle.serverset2.client
import com.twitter.io.Buf
import com.twitter.util.{Closable, Duration, Future}
private[serverset2] trait ZooKeeperClient extends Closable {
/**
* The session id for this ZooKeeper client instance. The value returned is
* not valid until the client connects to a server and may change after a
* re-connect.
*
* @return current session id
*/
def sessionId: Long
/**
* The session password for this ZooKeeper client instance. The value
* returned is not valid until the client connects to a server and may
* change after a re-connect.
*
* @return current session password
*/
def sessionPasswd: Buf
/**
* The negotiated session timeout for this ZooKeeper client instance. The
* value returned is not valid until the client connects to a server and
* may change after a re-connect.
*
* @return current session timeout
*/
def sessionTimeout: Duration
/**
* Add the specified scheme: auth information to this connection.
*
* @param scheme the authentication scheme to use.
* @param auth the authentication credentials.
* @return a Future[Unit]
*/
def addAuthInfo(scheme: String, auth: Buf): Future[Unit]
/**
* Get the existing ephemeral nodes created with the current session ID.
*
* NOTE: This method is not universally implemented. The Future will fail
* with KeeperException.Unimplemented if this is the case.
*
* @return a Future[Seq[String]] of ephemeral node paths.
*/
def getEphemerals(): Future[Seq[String]]
/**
* String representation of this ZooKeeper client. Suitable for things
* like logging.
*
* Do NOT count on the format of this string, it may change without
* warning.
*
* @return string representation of the current client
*/
def toString: String
}
private[serverset2] trait ZooKeeperReader extends ZooKeeperClient {
/**
* Check if a node exists.
*
* @param path the path of the node to check.
* @return a Future[Option[Data.Stat] containing Some[Stat] if the node exists,
* or None if the node does not exist.
*/
def exists(path: String): Future[Option[Data.Stat]]
/**
* A version of exists that sets a watch and returns a Future[Watched[Option[Data.Stat]]]
*/
def existsWatch(path: String): Future[Watched[Option[Data.Stat]]]
/**
* Return the data of the node of the given path.
*
* @param path the path of the node to read.
* @return a Future[Node.Data]
*/
def getData(path: String): Future[Node.Data]
/**
* A version of getData that sets a watch and returns a Future[Watched[Node.Data]]
*/
def getDataWatch(path: String): Future[Watched[Node.Data]]
/**
* Get the ACL of the node of the given path.
*
* @param path the path of the node to read.
* @return a Future[Node.ACL]
*/
def getACL(path: String): Future[Node.ACL]
/**
* For a node at a given path return its stat and a list of children.
*
* @param path the path of the node to read.
* @return a Future[Node.Children]
*/
def getChildren(path: String): Future[Node.Children]
/**
* A version of getChildren that sets and returns a Future[Watched[Node.Children]]
*/
def getChildrenWatch(path: String): Future[Watched[Node.Children]]
/**
* Sync. Flushes channel between process and leader.
*
* @param path the path of the node to sync.
* @return a Future[Unit]
*/
def sync(path: String): Future[Unit]
}
object ZooKeeperReader {
def patToPathAndPrefix(pat: String): (String, String) = {
if (pat.isEmpty || pat(0) != '/')
throw new IllegalArgumentException("Invalid glob pattern")
val slash = pat.lastIndexOf('/')
if (slash < 0)
throw new IllegalArgumentException("Invalid prefix")
val path = if (slash == 0) "/" else pat.substring(0, slash)
val prefix = pat.substring(slash + 1, pat.length)
(path, prefix)
}
/** An implementation helper for ZooKeeperReader.glob */
def processGlob(path: String, prefix: String, children: java.util.List[String]): Seq[String] = {
val seq = Seq.newBuilder[String]
val iter = children.iterator()
while (iter.hasNext()) {
val el = iter.next()
if (el startsWith prefix)
seq += path + "/" + el
}
seq.result
}
}
private[serverset2] trait ZooKeeperWriter extends ZooKeeperClient {
/**
* Create a node of a given type with the given path. The node data will be the
* given data, and node acl will be the given acl.
*
* @param path the path for the node.
* @param data the initial data for the node.
* @param acl a sequence of ACLs for the node.
* @param createMode specifies what type of node to create.
* @return a Future[String] containing the actual path of the created node.
*/
def create(
path: String,
data: Option[Buf],
acl: Seq[Data.ACL],
createMode: CreateMode
): Future[String]
/**
* Delete the node with the given path. The call will succeed if such a node
* exists, and the given version matches the node's version (if the given
* version is None, it matches any node's versions).
*
* This operation, if successful, will trigger all the watches on the node
* of the given path left by existsWatch API calls, and the watches on the parent
* node left by getChildrenWatch API calls.
*
* @param path the path of the node to be deleted.
* @param version the expected node version.
* @return a Future[Unit]
*/
def delete(path: String, version: Option[Int]): Future[Unit]
/**
* Set the data for the node of the given path if such a node exists and the
* given version matches the version of the node (if the given version is None,
* it matches any node's versions).
*
* This operation, if successful, will trigger all the watches on the node
* of the given path left by getDataWatch calls.
*
* @param path the path of the node to write.
* @param data the data to set.
* @param version the expected matching version.
* @return a Future[Data.Stat]
*/
def setData(path: String, data: Option[Buf], version: Option[Int]): Future[Data.Stat]
/**
* Set the ACL for the node of the given path if such a node exists and the
* given version matches the version of the node (if the given version is None,
* it matches any node's versions)
*
* @param path the path of the node to write.
* @param acl a list of Data.ACL to apply to the node.
* @param version the expected matching version.
* @return a Future[Data.Stat]
*/
def setACL(path: String, acl: Seq[Data.ACL], version: Option[Int]): Future[Data.Stat]
}
private[serverset2] trait ZooKeeperMulti extends ZooKeeperClient {
/**
* Transactional operation. Execute all operations or none of them.
*
* @param ops a list of operations to apply.
* @return a Future[Seq[OpResult]]
*/
def multi(ops: Seq[Op]): Future[Seq[OpResult]]
}
private[serverset2] trait ZooKeeperRW extends ZooKeeperReader with ZooKeeperWriter
private[serverset2] trait ZooKeeperRWMulti extends ZooKeeperRW with ZooKeeperMulti
| luciferous/finagle | finagle-serversets/src/main/scala/com/twitter/finagle/serverset2/client/ZooKeeperClient.scala | Scala | apache-2.0 | 7,147 |
package net.sansa_stack.rdf.flink.utils
import java.lang.Iterable
import scala.reflect.ClassTag
import org.apache.flink.api.common.functions.CoGroupFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.scala.DataSet
import org.apache.flink.table.runtime.IntersectCoGroupFunction
import org.apache.flink.util.Collector
/**
* @author Lorenz Buehmann
*/
object DataSetUtils {
implicit class DataSetOps[T: ClassTag: TypeInformation](dataset: DataSet[T]) {
/**
* Splits an RDD into two parts based on the given filter function. Note, that filtering is done twice on the same
* data twice, thus, caching beforehand is recommended!
*
* @param f the boolean filter function
* @return two RDDs
*/
def partitionBy(f: T => Boolean): (DataSet[T], DataSet[T]) = {
val passes = dataset.filter(f)
val fails = dataset.filter(e => !f(e)) // Flink doesn't have filterNot
(passes, fails)
}
def subtract(other: DataSet[T]): DataSet[T] = {
dataset.coGroup(other).where("*").equalTo("*")(new MinusCoGroupFunction[T](true)).name("subtract")
}
def intersect(other: DataSet[T]): DataSet[T] = {
dataset.coGroup(other).where("*").equalTo("*")(new IntersectCoGroupFunction[T](true)).name("intersect")
}
}
}
class MinusCoGroupFunction[T: ClassTag: TypeInformation](all: Boolean) extends CoGroupFunction[T, T, T] {
override def coGroup(first: Iterable[T], second: Iterable[T], out: Collector[T]): Unit = {
if (first == null || second == null) return
val leftIter = first.iterator
val rightIter = second.iterator
if (all) {
while (rightIter.hasNext && leftIter.hasNext) {
leftIter.next()
rightIter.next()
}
while (leftIter.hasNext) {
out.collect(leftIter.next())
}
} else {
if (!rightIter.hasNext && leftIter.hasNext) {
out.collect(leftIter.next())
}
}
}
}
| SANSA-Stack/Spark-RDF | sansa-rdf-flink/src/main/scala/net/sansa_stack/rdf/flink/utils/DataSetUtils.scala | Scala | gpl-3.0 | 1,971 |
package tethys
package object derivation {
case class SimpleType(i: Int, s: String, d: Double)
case class SimpleTypeWithAny(i: Int, s: String, d: Double, any: Any)
case class JsonTreeTestData(a: Int, b: Boolean, c: C)
case class C(d: D)
case class D(a: Int)
case class RecursiveType(a: Int, children: Seq[RecursiveType] = Seq.empty)
case class ComplexRecursionA(a: Int, b: Option[ComplexRecursionB])
case class ComplexRecursionB(b: Int, a: ComplexRecursionA)
trait JsonComplexTestData
case class JsonComplexTestDataImpl1(a: Int) extends JsonComplexTestData
case class JsonComplexTestDataImpl2(b: String) extends JsonComplexTestData
case class SeqMaster1(a: Seq[SeqMaster2])
case class SeqMaster2(a: Seq[SeqMaster3])
case class SeqMaster3(a: Seq[SeqMaster4])
case class SeqMaster4(a: Seq[Int])
case class CamelCaseNames(someParam: Int, IDParam: Int, simple: Int)
}
| tethys-json/tethys | modules/macro-derivation/src/test/scala/tethys/derivation/package.scala | Scala | apache-2.0 | 904 |
/**
* Copyright: Copyright (C) 2016, ATS Advanced Telematic Systems GmbH
* License: MPL-2.0
*/
package org.genivi.sota.device_registry
import java.time.OffsetDateTime
import akka.http.scaladsl.model.Uri.{Path, Query}
import akka.http.scaladsl.model.{HttpRequest, StatusCodes, Uri}
import akka.http.scaladsl.server.Route
import cats.syntax.show._
import io.circe.Json
import io.circe.generic.auto._
import org.genivi.sota.data._
import org.genivi.sota.marshalling.CirceMarshallingSupport._
import scala.concurrent.ExecutionContext
/**
* Generic test resource object
* Used in property-based testing
*/
object Resource {
def uri(pathSuffixes: String*): Uri = {
val BasePath = Path("/api") / "v1"
Uri.Empty.withPath(pathSuffixes.foldLeft(BasePath)(_/_))
}
}
/**
* Testing Trait for building Device requests
*/
trait DeviceRequests { self: ResourceSpec =>
import Device._
import StatusCodes._
val api = "devices"
def fetchDevice(uuid: Uuid): HttpRequest =
Get(Resource.uri(api, uuid.show))
def listDevices(): HttpRequest =
Get(Resource.uri(api))
def searchDevice(namespace: Namespace, regex: String, offset: Long = 0, limit: Long = 50): HttpRequest =
Get(Resource.uri(api).withQuery(Query("regex" -> regex, "offset" -> offset.toString, "limit" -> limit.toString)))
def fetchByDeviceId(namespace: Namespace, deviceId: Device.DeviceId): HttpRequest =
Get(Resource.uri(api).withQuery(Query("namespace" -> namespace.get, "deviceId" -> deviceId.show)))
def updateDevice(uuid: Uuid, device: DeviceT)
(implicit ec: ExecutionContext): HttpRequest =
Put(Resource.uri(api, uuid.show), device)
def createDevice(device: DeviceT)
(implicit ec: ExecutionContext): HttpRequest =
Post(Resource.uri(api), device)
def createDeviceOk(device: DeviceT)
(implicit ec: ExecutionContext): Uuid =
createDevice(device) ~> route ~> check {
status shouldBe Created
responseAs[Uuid]
}
def deleteDevice(uuid: Uuid): HttpRequest =
Delete(Resource.uri(api, uuid.show))
def deleteDeviceOk(uuid: Uuid)
(implicit ec: ExecutionContext): Unit =
deleteDevice(uuid) ~> route ~> check {
status shouldBe OK
}
def fetchSystemInfo(uuid: Uuid): HttpRequest =
Get(Resource.uri(api, uuid.show, "system_info"))
def createSystemInfo(uuid: Uuid, json: Json)
(implicit ec: ExecutionContext): HttpRequest =
Post(Resource.uri(api, uuid.show,"system_info"), json)
def updateSystemInfo(uuid: Uuid, json: Json)
(implicit ec: ExecutionContext): HttpRequest =
Put(Resource.uri(api, uuid.show,"system_info"), json)
def listGroupsForDevice(device: Uuid)
(implicit ec: ExecutionContext): HttpRequest =
Get(Resource.uri(api, device.show, "groups"))
def installSoftware(device: Uuid, packages: Set[PackageId]): HttpRequest =
Put(Resource.uri("mydevice", device.show, "packages"), packages)
def installSoftwareOk(device: Uuid, packages: Set[PackageId])
(implicit route: Route): Unit =
installSoftware(device, packages) ~> route ~> check {
status shouldBe StatusCodes.NoContent
}
def listPackages(device: Uuid, regex: Option[String] = None)(implicit ec: ExecutionContext): HttpRequest =
regex match {
case Some(r) => Get(Resource.uri("devices", device.show, "packages").withQuery(Query("regex" -> r)))
case None => Get(Resource.uri("devices", device.show, "packages"))
}
def getStatsForPackage(pkg: PackageId)(implicit ec: ExecutionContext): HttpRequest =
Get(Resource.uri("device_count", pkg.name.get, pkg.version.get))
def getActiveDeviceCount(start: OffsetDateTime, end: OffsetDateTime): HttpRequest =
Get(Resource.uri("active_device_count").withQuery(Query("start" -> start.show, "end" -> end.show)))
def getInstalledForAllDevices(offset: Long = 0, limit: Long = 50): HttpRequest =
Get(Resource.uri("device_packages").withQuery(Query("offset" -> offset.toString, "limit" -> limit.toString)))
def getAffected(pkgs: Set[PackageId]): HttpRequest =
Post(Resource.uri("packages", "affected"), pkgs)
def getPackageStats(name: PackageId.Name): HttpRequest =
Get(Resource.uri("package_stats", name.get))
}
| PDXostc/rvi_sota_server | device-registry/src/test/scala/org/genivi/sota/device_registry/DeviceRequests.scala | Scala | mpl-2.0 | 4,336 |
package microtools.ws
import microtools.models.{AuthRequestContext, ExtraHeaders, RequestContext, ServiceName}
import play.api.http.HeaderNames
import play.api.libs.ws.{WSClient, WSRequest}
class WSClientWithFlow(val underlying: WSClient) {
private[this] object ForwardProto extends Enumeration {
type Type = Value
val HTTP = Value("http")
val HTTPS = Value("https")
}
def url(rawUrl: String)(implicit ctx: RequestContext): WSRequest = {
val (url, forwardProto): (String, ForwardProto.Type) =
if (rawUrl.startsWith("https:"))
s"http:${rawUrl.drop(6)}" -> ForwardProto.HTTPS
else rawUrl -> ForwardProto.HTTP
underlying
.url(url)
.addHttpHeaders(
ExtraHeaders.FLOW_ID_HEADER -> ctx.flowId,
HeaderNames.X_FORWARDED_PROTO -> forwardProto.toString
)
}
def urlWithAuthFromContext(rawUrl: String)(implicit ctx: AuthRequestContext): WSRequest = {
url(rawUrl)
.addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Bearer ${ctx.token}")
}
def urlWithServiceAuth(
rawUrl: String
)(implicit serviceName: ServiceName, ctx: RequestContext): WSRequest = {
url(rawUrl)
.addHttpHeaders(
ExtraHeaders.AUTH_SUBJECT_HEADER -> s"service/$serviceName"
)
}
}
| 21re/play-error-handling | src/main/scala/microtools/ws/WSClientWithFlow.scala | Scala | mit | 1,287 |
/* This is the application shown in the online tutorial.
*
* It has two small issues:
* - Without special configuration, MTurk requires that tasks
* have at least 10 HITs. For this question, the minimum
* number of HITs is 3, so this version is a little
* inefficient.
* - If AutoMan is unable to deliver a high-confidence answer,
* it will not be clear to the user, since the `println`
* method calls the `toString` method on the return value
* of `which_one()`.
*
* A better, but slightly more complicated version of this same
* program can be found in the apps/simple/SimpleRadioButton
* folder. It solves the above problems by
* - providing special configuration, and
* - by pattern matching on the result.
*
* Caveats aside, this application does work and is relatively
* easy to understand.
*/
import org.automanlang.adapters.mturk.DSL._
object SuperDuperSimplest extends App {
implicit val a = mturk (
access_key_id = args(0),
secret_access_key = args(1),
sandbox_mode = true
)
def which_one() = radio (
budget = 5.00,
text = "Which one of these does not belong?",
options = (
choice('oscar, "Oscar the Grouch", "https://tinyurl.com/y2nf2h76"),
choice('kermit, "Kermit the Frog", "https://tinyurl.com/yxh2emmr"),
choice('spongebob, "Spongebob Squarepants", "https://tinyurl.com/y3uv2oew"),
choice('cookiemonster, "Cookie Monster", "https://tinyurl.com/y68x9zvx"),
choice('thecount, "The Count", "https://tinyurl.com/y6na5a8a")
)
)
automan(a) {
println("Answer is: " + which_one())
}
} | dbarowy/AutoMan | apps/simple/SuperDuperSimplest/src/main/scala/SuperDuperSimplest.scala | Scala | gpl-2.0 | 1,614 |
package org.lolhens.minechanics.common.block
import net.minecraft.block.Block
//import net.minecraft.client.renderer.texture.IIconRegister
//import net.minecraft.util.IIcon
class BlockFoliage extends BlockBase("foliage") {
setStepSound(Block.soundTypeGrass)
//private var foliage_top, foliage_bottom: IIcon = null
/*@SideOnly(Side.CLIENT)
override def getIcon(side: Int, meta: Int) = side match {
case 0 => foliage_bottom
case 1 => foliage_top
case _ => blockIcon
}
override def registerBlockIcons(iconRegister: IIconRegister) = {
super.registerBlockIcons(iconRegister)
foliage_top = iconRegister.registerIcon("minechanics:foliage_top")
foliage_bottom = iconRegister.registerIcon("minecraft:dirt")
}*/
override def onTextureRegistered: Unit = ???
} | LolHens/Minechanics | src/main/scala/org/lolhens/minechanics/common/block/BlockFoliage.scala | Scala | gpl-2.0 | 794 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.holdenkarau.spark.testing
import org.scalatest.FunSuite
/**
* Illustrate using per-test sample test. This is the one to use
* when your tests may be destructive to the Spark context
* (e.g. stopping it)
*/
class PerTestSampleTest extends FunSuite with PerTestSparkContext {
test("sample test stops a context") {
sc.stop()
}
test("can still parallelize") {
val input = List(1,2,3)
assert(input === sc.parallelize(input).collect())
}
}
| snithish/spark-testing-base | src/test/1.3/scala/com/holdenkarau/spark/testing/PerTestSampleTest.scala | Scala | apache-2.0 | 1,276 |
// Copyright (c) 2013, Johns Hopkins University. All rights reserved.
// This software is released under the 2-clause BSD license.
// See /LICENSE.txt
// Travis Wolfe, [email protected], 30 July 2013
package edu.jhu.hlt.parma.inference.topics
import scala.collection.immutable
import scala.collection.mutable
import java.io._
class Document(doc: Map[Int,Int]) {
val words = Array.ofDim[Int](doc.size)
val counts = Array.ofDim[Int](doc.size)
val length = doc.size
val total = doc.values.sum
var n = 0
for(key <- doc.keys.toList.sortWith((x,y) => x < y)) {
words(n) = key
counts(n) = doc(key)
n += 1
}
override def toString = {
val buf = new StringBuilder
n = 0
while(n < length) {
buf ++= (Document.getWord(words(n)) + ":" + counts(n) + " ")
}
buf.toString
}
}
object Document {
var wordMap : immutable.Map[String,Int] = Map.empty
var keyMap : immutable.Map[Int,String] = Map.empty
def hasIndex(w: String) = {
wordMap contains w
}
def getIndex(w: String) : Int = {
if(wordMap contains w) {
wordMap(w)
} else {
val key = wordMap.keys.size
wordMap += (w -> key)
keyMap += (key -> w)
key
}
}
def getWord(i: Int) : String = {
keyMap(i)
}
def writeWordMap(filename: String) {
val fw = new FileWriter(filename)
for((key,value) <- wordMap) {
fw.write(key + "\t" + value + "\n")
}
fw.close
}
def readWordMap(filename: String) {
for( ln <- io.Source.fromFile(filename).getLines.map(_.trim) ) {
val tokens = ln.split("\t")
if(tokens.size != 2) {
println("malformed line: " + ln)
}
wordMap += tokens(0) -> tokens(1).toInt
keyMap += tokens(1).toInt -> tokens(0)
}
}
def getVocabSize() : Int = {
wordMap.size
}
def splitLine(line: String) : Array[String] = {
line.split("\\s+")
}
def normalizeAndFilter(words: Array[String]) = {
var list = mutable.ListBuffer[String]()
for(w <- words) {
val norm_w = w.toLowerCase.replaceAll("\\W", "")
if(norm_w.length > 0) {
list += norm_w
}
}
list
}
def fromRawString(s: String) = {
val lines = s.split("\n")
val words = normalizeAndFilter(lines.flatMap(splitLine(_)))
var accum = mutable.Map[Int, Int]().withDefault(x=>0)
for(w <- words) {
accum(Document.getIndex(w)) += 1
}
var counts = immutable.Map[Int, Int]()
for((key,value) <- accum) {
counts += key -> value
}
new Document(counts)
}
def fromNormalizedString(s: String) = {
val words = s.split(" ")
var accum = mutable.Map[Int, Int]().withDefault(x=>0)
for(w <- words) {
accum(Document.getIndex(w)) += 1
}
var counts = immutable.Map[Int, Int]()
for((key,value) <- accum) {
counts += key -> value
}
new Document(counts)
}
// token sequence
def fromRawFile(f: File) = {
val source = f.getAbsolutePath
fromRawString(scala.io.Source.fromFile(source).mkString)
}
// length type:count type:count ...
def fromPreprocessedFile(f: File, vocabFile: File) = {
// Load the dictionary file
for(line <- scala.io.Source.fromFile(vocabFile.getAbsolutePath).getLines) {
val trimmed = line.trim
Document.getIndex(trimmed)
}
// Load the document type counts
val docs = scala.collection.mutable.ArrayBuffer.empty[Document]
val source = f.getAbsolutePath
val lines = scala.io.Source.fromFile(source).getLines
for(line <- lines) {
val tokens = line.split("\\s+")
val len = tokens.head
val cs = tokens.drop(1) // list of type:count pairs
var counts = immutable.Map[Int,Int]()
for(c <- cs) {
val wc = c.split(":")
counts += wc(0).toInt -> wc(1).toInt
}
docs += new Document(counts)
}
docs
}
}
| hltcoe/parma | src/main/scala/edu/jhu/hlt/parma/inference/topics/Document.scala | Scala | bsd-2-clause | 3,868 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.plan.metadata.FlinkMetadata.ColumnInterval
import org.apache.flink.table.planner.plan.nodes.calcite.{Expand, Rank, TableAggregate, WindowAggregate}
import org.apache.flink.table.planner.plan.nodes.physical.batch._
import org.apache.flink.table.planner.plan.nodes.physical.stream._
import org.apache.flink.table.planner.plan.schema.FlinkPreparingTableBase
import org.apache.flink.table.planner.plan.stats._
import org.apache.flink.table.planner.plan.utils.{AggregateUtil, ColumnIntervalUtil, FlinkRelOptUtil, RankUtil}
import org.apache.flink.table.runtime.operators.rank.{ConstantRankRange, VariableRankRange}
import org.apache.flink.util.Preconditions
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata._
import org.apache.calcite.rel.{RelNode, SingleRel}
import org.apache.calcite.rex._
import org.apache.calcite.sql.SqlKind._
import org.apache.calcite.sql.`type`.SqlTypeName
import org.apache.calcite.sql.{SqlBinaryOperator, SqlKind}
import org.apache.calcite.util.Util
import java.math.{BigDecimal => JBigDecimal}
import scala.collection.JavaConversions._
/**
* FlinkRelMdColumnInterval supplies a default implementation of
* [[FlinkRelMetadataQuery.getColumnInterval]] for the standard logical algebra.
*/
class FlinkRelMdColumnInterval private extends MetadataHandler[ColumnInterval] {
override def getDef: MetadataDef[ColumnInterval] = FlinkMetadata.ColumnInterval.DEF
/**
* Gets interval of the given column on TableScan.
*
* @param ts TableScan RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on TableScan
*/
def getColumnInterval(ts: TableScan, mq: RelMetadataQuery, index: Int): ValueInterval = {
val relOptTable = ts.getTable.asInstanceOf[FlinkPreparingTableBase]
val fieldNames = relOptTable.getRowType.getFieldNames
Preconditions.checkArgument(index >= 0 && index < fieldNames.size())
val fieldName = fieldNames.get(index)
val statistic = relOptTable.getStatistic
val colStats = statistic.getColumnStats(fieldName)
if (colStats != null) {
val minValue = colStats.getMinValue
val maxValue = colStats.getMaxValue
val min = colStats.getMin
val max = colStats.getMax
Preconditions.checkArgument(
(minValue == null && maxValue == null) || (max == null && min == null))
if (minValue != null || maxValue != null) {
ValueInterval(convertNumberToBigDecimal(minValue), convertNumberToBigDecimal(maxValue))
} else if (max != null || min != null) {
ValueInterval(convertNumberToBigDecimal(min), convertNumberToBigDecimal(max))
} else {
null
}
} else {
null
}
}
private def convertNumberToBigDecimal(number: Number): Number = {
if (number != null) {
new JBigDecimal(number.toString)
} else {
number
}
}
private def convertNumberToBigDecimal(comparable: Comparable[_]): Comparable[_] = {
if (comparable != null && comparable.isInstanceOf[Number]) {
new JBigDecimal(comparable.toString)
} else {
comparable
}
}
/**
* Gets interval of the given column on Values.
*
* @param values Values RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Values
*/
def getColumnInterval(values: Values, mq: RelMetadataQuery, index: Int): ValueInterval = {
val tuples = values.tuples
if (tuples.isEmpty) {
EmptyValueInterval
} else {
val values = tuples.map {
t => FlinkRelOptUtil.getLiteralValueByBroadType(t.get(index))
}.filter(_ != null)
if (values.isEmpty) {
EmptyValueInterval
} else {
values.map(v => ValueInterval(v, v)).reduceLeft(ValueInterval.union)
}
}
}
/**
* Gets interval of the given column on Snapshot.
*
* @param snapshot Snapshot RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Snapshot.
*/
def getColumnInterval(snapshot: Snapshot, mq: RelMetadataQuery, index: Int): ValueInterval = null
/**
* Gets interval of the given column on Project.
*
* Note: Only support the simple RexNode, e.g RexInputRef.
*
* @param project Project RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Project
*/
def getColumnInterval(project: Project, mq: RelMetadataQuery, index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val projects = project.getProjects
Preconditions.checkArgument(index >= 0 && index < projects.size())
projects.get(index) match {
case inputRef: RexInputRef => fmq.getColumnInterval(project.getInput, inputRef.getIndex)
case literal: RexLiteral =>
val literalValue = FlinkRelOptUtil.getLiteralValueByBroadType(literal)
if (literalValue == null) {
ValueInterval.empty
} else {
ValueInterval(literalValue, literalValue)
}
case rexCall: RexCall =>
getRexNodeInterval(rexCall, project, mq)
case _ => null
}
}
/**
* Gets interval of the given column on Filter.
*
* @param filter Filter RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Filter
*/
def getColumnInterval(filter: Filter, mq: RelMetadataQuery, index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val inputValueInterval = fmq.getColumnInterval(filter.getInput, index)
ColumnIntervalUtil.getColumnIntervalWithFilter(
Option(inputValueInterval),
RexUtil.expandSearch(
filter.getCluster.getRexBuilder,
null,
filter.getCondition),
index,
filter.getCluster.getRexBuilder)
}
/**
* Gets interval of the given column on Calc.
*
* @param calc Filter RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Calc
*/
def getColumnInterval(calc: Calc, mq: RelMetadataQuery, index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val rexProgram = calc.getProgram
val project = rexProgram.split().left.get(index)
getColumnIntervalOfCalc(calc, fmq, project)
}
/**
* Calculate interval of column which results from the given rex node in calc.
* Note that this function is called by function above, and is reclusive in case
* of "AS" rex call, and is private, too.
*/
private def getColumnIntervalOfCalc(
calc: Calc,
mq: RelMetadataQuery,
project: RexNode): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
project match {
case call: RexCall if call.getKind == SqlKind.AS =>
getColumnIntervalOfCalc(calc, fmq, call.getOperands.head)
case inputRef: RexInputRef =>
val program = calc.getProgram
val sourceFieldIndex = inputRef.getIndex
val inputValueInterval = fmq.getColumnInterval(calc.getInput, sourceFieldIndex)
val condition = program.getCondition
if (condition != null) {
val predicate = program.expandLocalRef(program.getCondition)
ColumnIntervalUtil.getColumnIntervalWithFilter(
Option(inputValueInterval),
predicate,
sourceFieldIndex,
calc.getCluster.getRexBuilder)
} else {
inputValueInterval
}
case literal: RexLiteral =>
val literalValue = FlinkRelOptUtil.getLiteralValueByBroadType(literal)
if (literalValue == null) {
ValueInterval.empty
} else {
ValueInterval(literalValue, literalValue)
}
case rexCall: RexCall =>
getRexNodeInterval(rexCall, calc, mq)
case _ => null
}
}
private def getRexNodeInterval(
rexNode: RexNode,
baseNode: SingleRel,
mq: RelMetadataQuery): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
rexNode match {
case inputRef: RexInputRef =>
fmq.getColumnInterval(baseNode.getInput, inputRef.getIndex)
case literal: RexLiteral =>
val literalValue = FlinkRelOptUtil.getLiteralValueByBroadType(literal)
if (literalValue == null) {
ValueInterval.empty
} else {
ValueInterval(literalValue, literalValue)
}
case caseCall: RexCall if caseCall.getKind == SqlKind.CASE =>
// compute all the possible result values of this case when clause,
// the result values is the value interval
val operands = caseCall.getOperands
val operandCount = operands.size()
val possibleValueIntervals = operands.indices
// filter expressions which is condition
.filter(i => i % 2 != 0 || i == operandCount - 1)
.map(operands(_))
.map(getRexNodeInterval(_, baseNode, mq))
possibleValueIntervals.reduceLeft(ValueInterval.union)
case searchCall: RexCall if searchCall.getKind == SqlKind.SEARCH =>
val expanded = RexUtil.expandSearch(
baseNode.getCluster.getRexBuilder, null, searchCall)
getRexNodeInterval(expanded, baseNode, mq)
// TODO supports ScalarSqlFunctions.IF
// TODO supports CAST
case rexCall: RexCall if rexCall.op.isInstanceOf[SqlBinaryOperator] =>
val leftValueInterval = getRexNodeInterval(rexCall.operands.get(0), baseNode, mq)
val rightValueInterval = getRexNodeInterval(rexCall.operands.get(1), baseNode, mq)
ColumnIntervalUtil.getValueIntervalOfRexCall(rexCall, leftValueInterval, rightValueInterval)
case _ => null
}
}
/**
* Gets interval of the given column on Exchange.
*
* @param exchange Exchange RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Exchange
*/
def getColumnInterval(exchange: Exchange, mq: RelMetadataQuery, index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
fmq.getColumnInterval(exchange.getInput, index)
}
/**
* Gets interval of the given column on Sort.
*
* @param sort Sort RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Sort
*/
def getColumnInterval(sort: Sort, mq: RelMetadataQuery, index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
fmq.getColumnInterval(sort.getInput, index)
}
/**
* Gets interval of the given column of Expand.
*
* @param expand expand RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column in batch sort
*/
def getColumnInterval(
expand: Expand,
mq: RelMetadataQuery,
index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val intervals = expand.projects.flatMap { project =>
project(index) match {
case inputRef: RexInputRef =>
Some(fmq.getColumnInterval(expand.getInput, inputRef.getIndex))
case l: RexLiteral if l.getTypeName eq SqlTypeName.DECIMAL =>
val v = l.getValueAs(classOf[JBigDecimal])
Some(ValueInterval(v, v))
case l: RexLiteral if l.getValue == null =>
None
case p@_ =>
throw new TableException(s"Column interval can't handle $p type in expand.")
}
}
if (intervals.contains(null)) {
// null union any value interval is null
null
} else {
intervals.reduce((a, b) => ValueInterval.union(a, b))
}
}
/**
* Gets interval of the given column on Rank.
*
* @param rank [[Rank]] instance to analyze
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Rank
*/
def getColumnInterval(
rank: Rank,
mq: RelMetadataQuery,
index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val rankFunColumnIndex = RankUtil.getRankNumberColumnIndex(rank).getOrElse(-1)
if (index == rankFunColumnIndex) {
rank.rankRange match {
case r: ConstantRankRange =>
ValueInterval(JBigDecimal.valueOf(r.getRankStart), JBigDecimal.valueOf(r.getRankEnd))
case v: VariableRankRange =>
val interval = fmq.getColumnInterval(rank.getInput, v.getRankEndIndex)
interval match {
case hasUpper: WithUpper =>
val lower = JBigDecimal.valueOf(1)
ValueInterval(lower, hasUpper.upper, includeUpper = hasUpper.includeUpper)
case _ => null
}
}
} else {
fmq.getColumnInterval(rank.getInput, index)
}
}
/**
* Gets interval of the given column on Aggregates.
*
* @param aggregate Aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Aggregate
*/
def getColumnInterval(aggregate: Aggregate, mq: RelMetadataQuery, index: Int): ValueInterval =
estimateColumnIntervalOfAggregate(aggregate, mq, index)
/**
* Gets interval of the given column on TableAggregates.
*
* @param aggregate TableAggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on TableAggregate
*/
def getColumnInterval(
aggregate: TableAggregate,
mq: RelMetadataQuery, index: Int): ValueInterval =
estimateColumnIntervalOfAggregate(aggregate, mq, index)
/**
* Gets interval of the given column on batch group aggregate.
*
* @param aggregate batch group aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on batch group aggregate
*/
def getColumnInterval(
aggregate: BatchPhysicalGroupAggregateBase,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(aggregate, mq, index)
/**
* Gets interval of the given column on stream group aggregate.
*
* @param aggregate stream group aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on stream group Aggregate
*/
def getColumnInterval(
aggregate: StreamPhysicalGroupAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(aggregate, mq, index)
/**
* Gets interval of the given column on stream group table aggregate.
*
* @param aggregate stream group table aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on stream group TableAggregate
*/
def getColumnInterval(
aggregate: StreamPhysicalGroupTableAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(aggregate, mq, index)
/**
* Gets interval of the given column on stream local group aggregate.
*
* @param aggregate stream local group aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on stream local group Aggregate
*/
def getColumnInterval(
aggregate: StreamPhysicalLocalGroupAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(aggregate, mq, index)
/**
* Gets interval of the given column on stream global group aggregate.
*
* @param aggregate stream global group aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on stream global group Aggregate
*/
def getColumnInterval(
aggregate: StreamPhysicalGlobalGroupAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(aggregate, mq, index)
/**
* Gets interval of the given column on window aggregate.
*
* @param agg window aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on window Aggregate
*/
def getColumnInterval(
agg: WindowAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(agg, mq, index)
/**
* Gets interval of the given column on batch window aggregate.
*
* @param agg batch window aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on batch window Aggregate
*/
def getColumnInterval(
agg: BatchPhysicalWindowAggregateBase,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(agg, mq, index)
/**
* Gets interval of the given column on stream window aggregate.
*
* @param agg stream window aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on stream window Aggregate
*/
def getColumnInterval(
agg: StreamPhysicalGroupWindowAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(agg, mq, index)
/**
* Gets interval of the given column on stream window table aggregate.
*
* @param agg stream window table aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on stream window Aggregate
*/
def getColumnInterval(
agg: StreamPhysicalGroupWindowTableAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = estimateColumnIntervalOfAggregate(agg, mq, index)
private def estimateColumnIntervalOfAggregate(
aggregate: SingleRel,
mq: RelMetadataQuery,
index: Int): ValueInterval = {
val input = aggregate.getInput
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val groupSet = aggregate match {
case agg: StreamPhysicalGroupAggregate => agg.grouping
case agg: StreamPhysicalLocalGroupAggregate => agg.grouping
case agg: StreamPhysicalGlobalGroupAggregate => agg.grouping
case agg: StreamPhysicalIncrementalGroupAggregate => agg.partialAggGrouping
case agg: StreamPhysicalGroupWindowAggregate => agg.grouping
case agg: BatchPhysicalGroupAggregateBase => agg.grouping ++ agg.auxGrouping
case agg: Aggregate => AggregateUtil.checkAndGetFullGroupSet(agg)
case agg: BatchPhysicalLocalSortWindowAggregate =>
// grouping + assignTs + auxGrouping
agg.grouping ++ Array(agg.inputTimeFieldIndex) ++ agg.auxGrouping
case agg: BatchPhysicalLocalHashWindowAggregate =>
// grouping + assignTs + auxGrouping
agg.grouping ++ Array(agg.inputTimeFieldIndex) ++ agg.auxGrouping
case agg: BatchPhysicalWindowAggregateBase => agg.grouping ++ agg.auxGrouping
case agg: TableAggregate => agg.getGroupSet.toArray
case agg: StreamPhysicalGroupTableAggregate => agg.grouping
case agg: StreamPhysicalGroupWindowTableAggregate => agg.grouping
}
if (index < groupSet.length) {
// estimates group keys according to the input relNodes.
val sourceFieldIndex = groupSet(index)
fmq.getColumnInterval(input, sourceFieldIndex)
} else {
def getAggCallFromLocalAgg(
index: Int,
aggCalls: Seq[AggregateCall],
inputType: RelDataType): AggregateCall = {
val outputIndexToAggCallIndexMap = AggregateUtil.getOutputIndexToAggCallIndexMap(
aggCalls, inputType)
if (outputIndexToAggCallIndexMap.containsKey(index)) {
val realIndex = outputIndexToAggCallIndexMap.get(index)
aggCalls(realIndex)
} else {
null
}
}
def getAggCallIndexInLocalAgg(
index: Int,
globalAggCalls: Seq[AggregateCall],
inputRowType: RelDataType): Integer = {
val outputIndexToAggCallIndexMap = AggregateUtil.getOutputIndexToAggCallIndexMap(
globalAggCalls, inputRowType)
outputIndexToAggCallIndexMap.foreach {
case (k, v) => if (v == index) {
return k
}
}
null.asInstanceOf[Integer]
}
if (index < groupSet.length) {
// estimates group keys according to the input relNodes.
val sourceFieldIndex = groupSet(index)
fmq.getColumnInterval(aggregate.getInput, sourceFieldIndex)
} else {
val aggCallIndex = index - groupSet.length
val aggCall = aggregate match {
case agg: StreamPhysicalGroupAggregate if agg.aggCalls.length > aggCallIndex =>
agg.aggCalls(aggCallIndex)
case agg: StreamPhysicalGlobalGroupAggregate
if agg.aggCalls.length > aggCallIndex =>
val aggCallIndexInLocalAgg = getAggCallIndexInLocalAgg(
aggCallIndex, agg.aggCalls, agg.localAggInputRowType)
if (aggCallIndexInLocalAgg != null) {
return fmq.getColumnInterval(agg.getInput, groupSet.length + aggCallIndexInLocalAgg)
} else {
null
}
case agg: StreamPhysicalLocalGroupAggregate =>
getAggCallFromLocalAgg(aggCallIndex, agg.aggCalls, agg.getInput.getRowType)
case agg: StreamPhysicalIncrementalGroupAggregate
if agg.partialAggCalls.length > aggCallIndex =>
agg.partialAggCalls(aggCallIndex)
case agg: StreamPhysicalGroupWindowAggregate if agg.aggCalls.length > aggCallIndex =>
agg.aggCalls(aggCallIndex)
case agg: BatchPhysicalLocalHashAggregate =>
getAggCallFromLocalAgg(aggCallIndex, agg.getAggCallList, agg.getInput.getRowType)
case agg: BatchPhysicalHashAggregate if agg.isMerge =>
val aggCallIndexInLocalAgg = getAggCallIndexInLocalAgg(
aggCallIndex, agg.getAggCallList, agg.aggInputRowType)
if (aggCallIndexInLocalAgg != null) {
return fmq.getColumnInterval(agg.getInput, groupSet.length + aggCallIndexInLocalAgg)
} else {
null
}
case agg: BatchPhysicalLocalSortAggregate =>
getAggCallFromLocalAgg(aggCallIndex, agg.getAggCallList, agg.getInput.getRowType)
case agg: BatchPhysicalSortAggregate if agg.isMerge =>
val aggCallIndexInLocalAgg = getAggCallIndexInLocalAgg(
aggCallIndex, agg.getAggCallList, agg.aggInputRowType)
if (aggCallIndexInLocalAgg != null) {
return fmq.getColumnInterval(agg.getInput, groupSet.length + aggCallIndexInLocalAgg)
} else {
null
}
case agg: BatchPhysicalGroupAggregateBase if agg.getAggCallList.length > aggCallIndex =>
agg.getAggCallList(aggCallIndex)
case agg: Aggregate =>
val (_, aggCalls) = AggregateUtil.checkAndSplitAggCalls(agg)
if (aggCalls.length > aggCallIndex) {
aggCalls(aggCallIndex)
} else {
null
}
case agg: BatchPhysicalWindowAggregateBase if agg.getAggCallList.length > aggCallIndex =>
agg.getAggCallList(aggCallIndex)
case _ => null
}
if (aggCall != null) {
aggCall.getAggregation.getKind match {
case SUM | SUM0 =>
val inputInterval = fmq.getColumnInterval(input, aggCall.getArgList.get(0))
if (inputInterval != null) {
inputInterval match {
case withLower: WithLower if withLower.lower.isInstanceOf[Number] =>
if (withLower.lower.asInstanceOf[Number].doubleValue() >= 0.0) {
RightSemiInfiniteValueInterval(withLower.lower, withLower.includeLower)
} else {
null.asInstanceOf[ValueInterval]
}
case withUpper: WithUpper if withUpper.upper.isInstanceOf[Number] =>
if (withUpper.upper.asInstanceOf[Number].doubleValue() <= 0.0) {
LeftSemiInfiniteValueInterval(withUpper.upper, withUpper.includeUpper)
} else {
null
}
case _ => null
}
} else {
null
}
case COUNT =>
RightSemiInfiniteValueInterval(JBigDecimal.valueOf(0), includeLower = true)
// TODO add more built-in agg functions
case _ => null
}
} else {
null
}
}
}
}
/**
* Gets interval of the given column on calcite window.
*
* @param window Window RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on window
*/
def getColumnInterval(
window: Window,
mq: RelMetadataQuery,
index: Int): ValueInterval = {
getColumnIntervalOfOverAgg(window, mq, index)
}
/**
* Gets interval of the given column on batch over aggregate.
*
* @param agg batch over aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index he index of the given column
* @return interval of the given column on batch over aggregate.
*/
def getColumnInterval(
agg: BatchExecOverAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = getColumnIntervalOfOverAgg(agg, mq, index)
/**
* Gets interval of the given column on stream over aggregate.
*
* @param agg stream over aggregate RelNode
* @param mq RelMetadataQuery instance
* @param index he index of the given column
* @return interval of the given column on stream over aggregate.
*/
def getColumnInterval(
agg: StreamExecOverAggregate,
mq: RelMetadataQuery,
index: Int): ValueInterval = getColumnIntervalOfOverAgg(agg, mq, index)
private def getColumnIntervalOfOverAgg(
overAgg: SingleRel,
mq: RelMetadataQuery,
index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val input = overAgg.getInput
val fieldsCountOfInput = input.getRowType.getFieldCount
if (index < fieldsCountOfInput) {
fmq.getColumnInterval(input, index)
} else {
// cannot estimate aggregate function calls columnInterval.
null
}
}
/**
* Gets interval of the given column on Join.
*
* @param join Join RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Join
*/
def getColumnInterval(join: Join, mq: RelMetadataQuery, index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val joinCondition = join.getCondition
val nLeftColumns = join.getLeft.getRowType.getFieldCount
val inputValueInterval = if (index < nLeftColumns) {
fmq.getColumnInterval(join.getLeft, index)
} else {
fmq.getColumnInterval(join.getRight, index - nLeftColumns)
}
//TODO if column at index position is EuqiJoinKey in a Inner Join, its interval is
// origin interval intersect interval in the pair joinJoinKey.
// for example, if join is a InnerJoin with condition l.A = r.A
// the valueInterval of l.A is the intersect of l.A with r.A
if (joinCondition == null || joinCondition.isAlwaysTrue) {
inputValueInterval
} else {
ColumnIntervalUtil.getColumnIntervalWithFilter(
Option(inputValueInterval),
joinCondition,
index,
join.getCluster.getRexBuilder)
}
}
/**
* Gets interval of the given column on Union.
*
* @param union Union RelNode
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return interval of the given column on Union
*/
def getColumnInterval(union: Union, mq: RelMetadataQuery, index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val subIntervals = union
.getInputs
.map(fmq.getColumnInterval(_, index))
subIntervals.reduceLeft(ValueInterval.union)
}
/**
* Gets interval of the given column on RelSubset.
*
* @param subset RelSubset to analyze
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return If exist best relNode, then transmit to it, else transmit to the original relNode
*/
def getColumnInterval(subset: RelSubset, mq: RelMetadataQuery, index: Int): ValueInterval = {
val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq)
val rel = Util.first(subset.getBest, subset.getOriginal)
fmq.getColumnInterval(rel, index)
}
/**
* Catches-all rule when none of the others apply.
*
* @param rel RelNode to analyze
* @param mq RelMetadataQuery instance
* @param index the index of the given column
* @return Always returns null
*/
def getColumnInterval(rel: RelNode, mq: RelMetadataQuery, index: Int): ValueInterval = null
}
object FlinkRelMdColumnInterval {
private val INSTANCE = new FlinkRelMdColumnInterval
val SOURCE: RelMetadataProvider = ReflectiveRelMetadataProvider.reflectiveSource(
FlinkMetadata.ColumnInterval.METHOD, INSTANCE)
}
| aljoscha/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdColumnInterval.scala | Scala | apache-2.0 | 31,168 |
package scala.scalanative
package native
import scalanative.runtime.{divULong, remULong}
import java.lang.{Long => JLong}
/** `ULong`, a 64-bit unsigned integer. */
final class ULong private[scala] (private val underlying: Long)
extends AnyVal
with java.io.Serializable
with Comparable[ULong] {
@inline final def toByte: Byte = underlying.toByte
@inline final def toShort: Short = underlying.toShort
@inline final def toChar: Char = underlying.toChar
@inline final def toInt: Int = underlying.toInt
@inline final def toLong: Long = underlying
@inline final def toFloat: Float = toDouble.toFloat
@inline final def toDouble: Double =
if (underlying >= 0) underlying.toDouble
else 18446744073709551616.0 - underlying.toDouble // TODO Verify precision
@inline final def toUByte: UByte = new UByte(toByte)
@inline final def toUShort: UShort = new UShort(toShort)
@inline final def toUInt: UInt = new UInt(toInt)
@inline final def toULong: ULong = this
/**
* Returns the bitwise negation of this value.
*/
@inline final def unary_~ : ULong = new ULong(~underlying)
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the new right bits with zeroes.
* @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}
*/
@inline final def <<(x: Int): ULong = new ULong(underlying << x)
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the new right bits with zeroes.
* @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}
*/
@inline final def <<(x: Long): ULong = new ULong(underlying << x)
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling the new left bits with zeroes.
* @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}
* @example {{{
* 4294967275 >>> 3 == 536870909
* // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==
* // 00011111 11111111 11111111 11111101
* }}}
*/
@inline final def >>>(x: Int): ULong = new ULong(underlying >>> x)
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling the new left bits with zeroes.
* @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}
* @example {{{
* 4294967275 >>> 3 == 536870909
* // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==
* // 00011111 11111111 11111111 11111101
* }}}
*/
@inline final def >>>(x: Long): ULong = new ULong(underlying >>> x)
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the right bits with the same value as the left-most bit of this.
* @example {{{
* 4294967275 >> 3 == 4294967293
* // in binary: 11111111 11111111 11111111 11101011 >> 3 ==
* // 11111111 11111111 11111111 11111101
* }}}
*/
@inline final def >>(x: Int): ULong = new ULong(underlying >> x)
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the right bits with the same value as the left-most bit of this.
* @example {{{
* 4294967275 >> 3 == 4294967293
* // in binary: 11111111 11111111 11111111 11101011 >> 3 ==
* // 11111111 11111111 11111111 11111101
* }}}
*/
@inline final def >>(x: Long): ULong = new ULong(underlying >> x)
@inline final override def compareTo(x: ULong): Int =
JLong.compareUnsigned(underlying, x.underlying)
/** Returns `true` if this value is equal to x, `false` otherwise. */
@inline final def ==(x: UByte): Boolean = this == x.toULong
/** Returns `true` if this value is equal to x, `false` otherwise. */
@inline final def ==(x: UShort): Boolean = this == x.toULong
/** Returns `true` if this value is equal to x, `false` otherwise. */
@inline final def ==(x: UInt): Boolean = this == x.toULong
/** Returns `true` if this value is equal to x, `false` otherwise. */
@inline final def ==(x: ULong): Boolean = underlying == x.underlying
/** Returns `true` if this value is not equal to x, `false` otherwise. */
@inline final def !=(x: UByte): Boolean = this != x.toULong
/** Returns `true` if this value is not equal to x, `false` otherwise. */
@inline final def !=(x: UShort): Boolean = this != x.toULong
/** Returns `true` if this value is not equal to x, `false` otherwise. */
@inline final def !=(x: UInt): Boolean = this != x.toULong
/** Returns `true` if this value is not equal to x, `false` otherwise. */
@inline final def !=(x: ULong): Boolean = underlying != x.underlying
/** Returns `true` if this value is less than x, `false` otherwise. */
@inline final def <(x: UByte): Boolean = this < x.toULong
/** Returns `true` if this value is less than x, `false` otherwise. */
@inline final def <(x: UShort): Boolean = this < x.toULong
/** Returns `true` if this value is less than x, `false` otherwise. */
@inline final def <(x: UInt): Boolean = this < x.toULong
/** Returns `true` if this value is less than x, `false` otherwise. */
@inline final def <(x: ULong): Boolean = compareTo(x) < 0
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
@inline final def <=(x: UByte): Boolean = this <= x.toULong
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
@inline final def <=(x: UShort): Boolean = this <= x.toULong
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
@inline final def <=(x: UInt): Boolean = this <= x.toULong
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
@inline final def <=(x: ULong): Boolean = compareTo(x) <= 0
/** Returns `true` if this value is greater than x, `false` otherwise. */
@inline final def >(x: UByte): Boolean = this > x.toULong
/** Returns `true` if this value is greater than x, `false` otherwise. */
@inline final def >(x: UShort): Boolean = this > x.toULong
/** Returns `true` if this value is greater than x, `false` otherwise. */
@inline final def >(x: UInt): Boolean = this > x.toULong
/** Returns `true` if this value is greater than x, `false` otherwise. */
@inline final def >(x: ULong): Boolean = compareTo(x) > 0
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
@inline final def >=(x: UByte): Boolean = this >= x.toULong
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
@inline final def >=(x: UShort): Boolean = this >= x.toULong
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
@inline final def >=(x: UInt): Boolean = this >= x.toULong
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
@inline final def >=(x: ULong): Boolean = compareTo(x) >= 0
/** * Returns the bitwise OR of this value and `x`. */
@inline final def |(x: UByte): ULong = this | x.toULong
/** * Returns the bitwise OR of this value and `x`. */
@inline final def |(x: UShort): ULong = this | x.toULong
/** * Returns the bitwise OR of this value and `x`. */
@inline final def |(x: UInt): ULong = this | x.toULong
/** * Returns the bitwise OR of this value and `x`. */
@inline final def |(x: ULong): ULong = new ULong(underlying | x.underlying)
/** * Returns the bitwise AND of this value and `x`. */
@inline final def &(x: UByte): ULong = this & x.toULong
/** * Returns the bitwise AND of this value and `x`. */
@inline final def &(x: UShort): ULong = this & x.toULong
/** * Returns the bitwise AND of this value and `x`. */
@inline final def &(x: UInt): ULong = this & x.toULong
/** * Returns the bitwise AND of this value and `x`. */
@inline final def &(x: ULong): ULong = new ULong(underlying & x.underlying)
/** * Returns the bitwise XOR of this value and `x`. */
@inline final def ^(x: UByte): ULong = this ^ x.toULong
/** * Returns the bitwise XOR of this value and `x`. */
@inline final def ^(x: UShort): ULong = this ^ x.toULong
/** * Returns the bitwise XOR of this value and `x`. */
@inline final def ^(x: UInt): ULong = this ^ x.toULong
/** * Returns the bitwise XOR of this value and `x`. */
@inline final def ^(x: ULong): ULong = new ULong(underlying ^ x.underlying)
/** Returns the sum of this value and `x`. */
@inline final def +(x: UByte): ULong = this + x.toULong
/** Returns the sum of this value and `x`. */
@inline final def +(x: UShort): ULong = this + x.toULong
/** Returns the sum of this value and `x`. */
@inline final def +(x: UInt): ULong = this + x.toULong
/** Returns the sum of this value and `x`. */
@inline final def +(x: ULong): ULong = new ULong(underlying + x.underlying)
/** Returns the difference of this value and `x`. */
@inline final def -(x: UByte): ULong = this - x.toULong
/** Returns the difference of this value and `x`. */
@inline final def -(x: UShort): ULong = this - x.toULong
/** Returns the difference of this value and `x`. */
@inline final def -(x: UInt): ULong = this - x.toULong
/** Returns the difference of this value and `x`. */
@inline final def -(x: ULong): ULong = new ULong(underlying - x.underlying)
/** Returns the product of this value and `x`. */
@inline final def *(x: UByte): ULong = this * x.toULong
/** Returns the product of this value and `x`. */
@inline final def *(x: UShort): ULong = this * x.toULong
/** Returns the product of this value and `x`. */
@inline final def *(x: UInt): ULong = this * x.toULong
/** Returns the product of this value and `x`. */
@inline final def *(x: ULong): ULong = new ULong(underlying * x.underlying)
/** Returns the quotient of this value and `x`. */
@inline final def /(x: UByte): ULong = this / x.toULong
/** Returns the quotient of this value and `x`. */
@inline final def /(x: UShort): ULong = this / x.toULong
/** Returns the quotient of this value and `x`. */
@inline final def /(x: UInt): ULong = this / x.toULong
/** Returns the quotient of this value and `x`. */
@inline final def /(x: ULong): ULong =
new ULong(divULong(underlying, x.underlying))
/** Returns the remainder of the division of this value by `x`. */
@inline final def %(x: UByte): ULong = this % x.toULong
/** Returns the remainder of the division of this value by `x`. */
@inline final def %(x: UShort): ULong = this % x.toULong
/** Returns the remainder of the division of this value by `x`. */
@inline final def %(x: UInt): ULong = this % x.toULong
/** Returns the remainder of the division of this value by `x`. */
@inline final def %(x: ULong): ULong =
new ULong(remULong(underlying, x.underlying))
@inline final override def toString(): String =
JLong.toUnsignedString(underlying)
// "Rich" API
@inline final def max(that: ULong): ULong = if (this >= that) this else that
@inline final def min(that: ULong): ULong = if (this <= that) this else that
@inline final def toBinaryString: String = toLong.toBinaryString
@inline final def toHexString: String = toLong.toHexString
@inline final def toOctalString: String = toLong.toOctalString
}
object ULong {
/** The smallest value representable as a ULong. */
final val MinValue = new ULong(0L)
/** The largest value representable as a ULong. */
final val MaxValue = new ULong(-1L)
/** The String representation of the scala.ULong companion object. */
override def toString(): String = "object scala.ULong"
}
| cedricviaccoz/scala-native | nativelib/src/main/scala/scala/scalanative/native/ULong.scala | Scala | bsd-3-clause | 11,638 |
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package org.openapitools.client.model
case class BranchImplpermissions (
create: Option[Boolean] = None,
read: Option[Boolean] = None,
start: Option[Boolean] = None,
stop: Option[Boolean] = None,
`class`: Option[String] = None
)
| cliffano/swaggy-jenkins | clients/scala-httpclient-deprecated/generated/src/main/scala/org/openapitools/client/model/BranchImplpermissions.scala | Scala | mit | 590 |
package model
import org.bson.types.ObjectId
import org.joda.time.DateTime
import dao.CategoryDao
/**
* The Class Blog.
*
* @author Nguyen Duc Dung
* @since 1/7/14 12:24 PM
*
*/
case class Blog(
_id: ObjectId = new ObjectId(),
name: String,
url: String,
iconId: Option[ObjectId] = None,
uniqueName: String,
rssUrl: String,
status: String = BlogStatus.UPDATED,
description: Option[String] = None,
isEnable: Boolean = false,
homePage: Boolean = false,
categoryId: ObjectId,
read: Int = 0,
lastUpdated: DateTime = DateTime.now
) extends BaseModel(_id) {
def category = CategoryDao.findOneById(categoryId)
}
object BlogStatus {
lazy val UPDATED = "updated"
lazy val UPDATING = "updating"
lazy val ERROR = "error"
lazy val ACCEPTABLE_ERROR_COUNT = 10
def asSelectValue = Seq(
UPDATED -> "UPDATED",
UPDATING -> "UPDATING",
ERROR -> "ERROR"
)
} | SunriseSoftVN/hayhayblog | core/app/model/Blog.scala | Scala | gpl-2.0 | 1,111 |
/*
* Copyright (c) 2015-2017 Toby Weston
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s4j.scala.chapter17
import java.util.Date
object TimerExample {
def exampleTimer() = {
val timer = new NaiveTimer()
timer.start()
someLongRunningTask()
val time = timer.stop()
println("process took " + time + "ms")
}
def anotherExampleTimer() = {
val timer = Timer()
timer.time {
someLongRunningTask()
null
}
println("process took " + timer + "ms")
}
def yetAnotherExampleTimer() = {
val timer = Timer()
timer.time(() => {
someLongRunningTask()
})
println("process took " + timer + "ms")
}
def someLongRunningTask() = Thread.sleep(1000)
}
class NaiveTimer {
private var startDate: Date = null
def start() = startDate = new Date
def stop(): Long = new Date().getTime - startDate.getTime
}
object Timer {
def apply() = new Timer()
}
class Timer {
private val start = new Date
def time(function: () => Unit) {
try {
function.apply()
} finally {
new Date().getTime - start.getTime
}
}
} | tobyweston/learn-scala-java-devs | src/main/scala/s4j/scala/chapter17/Timer.scala | Scala | apache-2.0 | 1,635 |
package edu.umd.mith.util
import argonaut._, Argonaut._
import java.net.URL
import org.apache.commons.lang.StringEscapeUtils.unescapeJavaScript
import scala.concurrent.duration.Duration
import scalaz.\\/
import scalaz.concurrent.Task
object ArgonautUtils {
case class ArgonautError(msg: String) extends Exception(msg)
}
trait ArgonautUtils {
def disjunctionToResult[A](h: CursorHistory)(t: Throwable \\/ A): DecodeResult[A] =
t.fold(
e => DecodeResult.fail(e.getMessage, h),
DecodeResult.ok
)
def tryResult[A](h: CursorHistory)(a: => A): DecodeResult[A] =
disjunctionToResult(h)(\\/.fromTryCatch(a))
def taskResult[A](h: CursorHistory)(t: Task[A]): DecodeResult[A] =
disjunctionToResult(h)(t.get.run)
implicit val URLCodecJson: CodecJson[URL] = CodecJson(
(a: URL) => Json.jString(a.toString),
(c: HCursor) => c.as[String].map(unescapeJavaScript).flatMap(s =>
tryResult(c.history)(new URL(s))
)
)
}
| umd-mith/hathi | util/src/main/scala/util/argonaut.scala | Scala | apache-2.0 | 962 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.sql.agg
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{TableException, Types, ValidationException}
import org.apache.flink.table.planner.utils.{StreamTableTestUtil, TableTestBase}
import org.apache.flink.table.runtime.typeutils.DecimalTypeInfo
import org.junit.Test
class AggregateTest extends TableTestBase {
private val util: StreamTableTestUtil = streamTestUtil()
util.addTableSource[(Int, String, Long)](
"MyTable", 'a, 'b, 'c, 'proctime.proctime, 'rowtime.rowtime)
util.addTableSource[(Int, Long, String, Boolean)]("T", 'a, 'b, 'c, 'd)
util.addTableSource[(Long, Int, String)]("T1", 'a, 'b, 'c)
util.addTableSource[(Long, Int, String)]("T2", 'a, 'b, 'c)
util.addTableSource("MyTable1",
Array[TypeInformation[_]](
Types.BYTE, Types.SHORT, Types.INT, Types.LONG, Types.FLOAT, Types.DOUBLE, Types.BOOLEAN,
Types.STRING, Types.LOCAL_DATE, Types.LOCAL_TIME, Types.LOCAL_DATE_TIME,
DecimalTypeInfo.of(30, 20), DecimalTypeInfo.of(10, 5)),
Array("byte", "short", "int", "long", "float", "double", "boolean",
"string", "date", "time", "timestamp", "decimal3020", "decimal105"))
@Test(expected = classOf[ValidationException])
def testGroupingOnNonExistentField(): Unit = {
util.verifyPlan("SELECT COUNT(*) FROM MyTable GROUP BY foo")
}
@Test(expected = classOf[ValidationException])
def testGroupingInvalidSelection(): Unit = {
util.verifyPlan("SELECT b FROM MyTable GROUP BY a")
}
@Test
def testCannotCountOnMultiFields(): Unit = {
thrown.expect(classOf[TableException])
thrown.expectMessage("We now only support the count of one field")
util.verifyPlan("SELECT b, COUNT(a, c) FROM MyTable GROUP BY b")
}
@Test
def testAggWithMiniBatch(): Unit = {
util.tableEnv.getConfig.getConfiguration.setBoolean(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED, true)
util.tableEnv.getConfig.getConfiguration.setString(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ALLOW_LATENCY, "1 s")
util.verifyPlan("SELECT b, COUNT(DISTINCT a), MAX(b), SUM(c) FROM MyTable GROUP BY b")
}
@Test
def testAggAfterUnionWithMiniBatch(): Unit = {
util.tableEnv.getConfig.getConfiguration.setBoolean(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED, true)
util.tableEnv.getConfig.getConfiguration.setString(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ALLOW_LATENCY, "1 s")
val query =
"""
|SELECT a, sum(b), count(distinct c)
|FROM (
| SELECT * FROM T1
| UNION ALL
| SELECT * FROM T2
|) GROUP BY a
""".stripMargin
util.verifyPlan(query)
}
@Test
def testGroupByWithoutWindow(): Unit = {
util.verifyPlan("SELECT COUNT(a) FROM MyTable GROUP BY b")
}
@Test
def testLocalGlobalAggAfterUnion(): Unit = {
// enable local global optimize
util.tableEnv.getConfig.getConfiguration.setBoolean(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED, true)
util.tableEnv.getConfig.getConfiguration.setString(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ALLOW_LATENCY, "1 s")
val sql =
"""
|SELECT a, SUM(b), COUNT(DISTINCT c)
|FROM (
| SELECT * FROM T1
| UNION ALL
| SELECT * FROM T2
|) GROUP BY a
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testAggWithFilterClause(): Unit = {
val sql =
"""
|SELECT
| a,
| SUM(b) FILTER (WHERE c = 'A'),
| COUNT(DISTINCT c) FILTER (WHERE d is true),
| MAX(b)
|FROM T GROUP BY a
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testAggWithFilterClauseWithLocalGlobal(): Unit = {
util.tableEnv.getConfig.getConfiguration.setBoolean(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED, true)
util.tableEnv.getConfig.getConfiguration.setString(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ALLOW_LATENCY, "1 s")
val sql =
"""
|SELECT
| a,
| SUM(b) FILTER (WHERE c = 'A'),
| COUNT(DISTINCT c) FILTER (WHERE d is true),
| COUNT(DISTINCT c) FILTER (WHERE b = 1),
| MAX(b)
|FROM T GROUP BY a
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testAggOnDifferentTypes(): Unit = {
// FlinkRelMdModifiedMonotonicity will analyse sum argument's column interval
// this test covers all column interval types
val sql =
"""
|SELECT
| a,
| SUM(CAST(1 as INT)),
| SUM(CAST(2 as BIGINT)),
| SUM(CAST(3 as TINYINT)),
| SUM(CAST(4 as SMALLINT)),
| SUM(CAST(5 as FLOAT)),
| SUM(CAST(6 as DECIMAL)),
| SUM(CAST(7 as DOUBLE))
|FROM T GROUP BY a
""".stripMargin
util.verifyPlanWithType(sql)
}
@Test
def testAvgOnDifferentTypes(): Unit = {
util.verifyPlanWithType(
"""
|SELECT AVG(`byte`),
| AVG(`short`),
| AVG(`int`),
| AVG(`long`),
| AVG(`float`),
| AVG(`double`),
| AVG(`decimal3020`),
| AVG(`decimal105`)
|FROM MyTable1
""".stripMargin)
}
@Test
def testAvgWithRetract(): Unit = {
util.verifyPlanWithTrait("SELECT AVG(a) FROM (SELECT AVG(a) AS a FROM T GROUP BY b)")
}
@Test
def testSum(): Unit = {
util.verifyPlanWithType(
"""
|SELECT SUM(`byte`),
| SUM(`short`),
| SUM(`int`),
| SUM(`long`),
| SUM(`float`),
| SUM(`double`),
| SUM(`decimal3020`),
| SUM(`decimal105`)
|FROM MyTable1
""".stripMargin)
}
@Test
def testSumWithRetract(): Unit = {
util.verifyPlanWithTrait("SELECT SUM(a) FROM (SELECT SUM(a) AS a FROM T GROUP BY b)")
}
@Test
def testMinOnDifferentTypes(): Unit = {
util.verifyPlanWithType(
"""
|SELECT MIN(`byte`),
| MIN(`short`),
| MIN(`int`),
| MIN(`long`),
| MIN(`float`),
| MIN(`double`),
| MIN(`decimal3020`),
| MIN(`decimal105`),
| MIN(`boolean`),
| MIN(`date`),
| MIN(`time`),
| MIN(`timestamp`),
| MIN(`string`)
|FROM MyTable1
""".stripMargin)
}
@Test
def testMinWithRetract(): Unit = {
util.verifyPlanWithTrait("SELECT MIN(a) FROM (SELECT MIN(a) AS a FROM T GROUP BY b)")
}
@Test
def testMaxOnDifferentTypes(): Unit = {
util.verifyPlanWithType(
"""
|SELECT MAX(`byte`),
| MAX(`short`),
| MAX(`int`),
| MAX(`long`),
| MAX(`float`),
| MAX(`double`),
| MAX(`decimal3020`),
| MAX(`decimal105`),
| MAX(`boolean`),
| MAX(`date`),
| MAX(`time`),
| MAX(`timestamp`),
| MAX(`string`)
|FROM MyTable1
""".stripMargin)
}
@Test
def testMaxWithRetract(): Unit = {
util.verifyPlanWithTrait("SELECT MAX(a) FROM (SELECT MAX(a) AS a FROM T GROUP BY b)")
}
@Test
def testGroupByWithConstantKey(): Unit = {
val sql =
"""
|SELECT a, MAX(b), c FROM (SELECT a, 'test' AS c, b FROM T) t GROUP BY a, c
""".stripMargin
util.verifyPlan(sql)
}
}
| bowenli86/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/sql/agg/AggregateTest.scala | Scala | apache-2.0 | 8,453 |
package org.eigengo.scalalp.collections
import scala.io.Source
object Offline extends App {
lazy val products: List[Product] = {
val is = Source.fromInputStream(Offline.getClass.getResourceAsStream("/booze.txt"))
is.getLines().flatMap { line ⇒
line.split('|') match {
case Array(name, price, shelfCategoryName) ⇒ Some(Product(name, BigDecimal(price), shelfCategoryName))
case _ ⇒ None
}
}.toList
}
def search(query: String): List[Product] =
products.filter(p ⇒ p.name.contains(query) || p.shelfCategoryName.contains(query))
def mostExpensive(query: String): Product =
search(query).maxBy(_.price)
def cheapest(query: String): Product =
search(query).minBy(_.price)
println(mostExpensive("Cider"))
}
| eigengo/scala-launchpad | src/main/scala/org/eigengo/scalalp/collections/Offline.scala | Scala | apache-2.0 | 780 |
package edu.rice.habanero.benchmarks.banking
import edu.rice.habanero.actors.{ScalazActor, ScalazActorState, ScalazPool}
import edu.rice.habanero.benchmarks.banking.BankingConfig._
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
import scala.concurrent.Promise
import scala.util.Random
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> ([email protected])
*/
object BankingScalazActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new BankingScalazActorBenchmark)
}
private final class BankingScalazActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
BankingConfig.parseArgs(args)
}
def printArgInfo() {
BankingConfig.printArgs()
}
def runIteration() {
val master = new Teller(BankingConfig.A, BankingConfig.N)
master.start()
master.send(StartMessage.ONLY)
ScalazActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
if (lastIteration) {
ScalazPool.shutdown()
}
}
}
protected class Teller(numAccounts: Int, numBankings: Int) extends ScalazActor[AnyRef] {
private val self = this
private val accounts = Array.tabulate[Account](numAccounts)((i) => {
new Account(i, BankingConfig.INITIAL_BALANCE)
})
private var numCompletedBankings = 0
private val randomGen = new Random(123456)
protected override def onPostStart() {
accounts.foreach(loopAccount => loopAccount.start())
}
override def process(theMsg: AnyRef) {
theMsg match {
case sm: BankingConfig.StartMessage =>
var m = 0
while (m < numBankings) {
generateWork()
m += 1
}
case sm: BankingConfig.ReplyMessage =>
numCompletedBankings += 1
if (numCompletedBankings == numBankings) {
accounts.foreach(loopAccount => loopAccount.send(StopMessage.ONLY))
exit()
}
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
def generateWork(): Unit = {
// src is lower than dest id to ensure there is never a deadlock
val srcAccountId = randomGen.nextInt((accounts.length / 10) * 8)
var loopId = randomGen.nextInt(accounts.length - srcAccountId)
if (loopId == 0) {
loopId += 1
}
val destAccountId = srcAccountId + loopId
val srcAccount = accounts(srcAccountId)
val destAccount = accounts(destAccountId)
val amount = Math.abs(randomGen.nextDouble()) * 1000
val sender = self
val cm = new CreditMessage(sender, amount, destAccount)
srcAccount.send(cm)
}
}
protected class Account(id: Int, var balance: Double) extends ScalazActor[AnyRef] {
override def process(theMsg: AnyRef) {
theMsg match {
case dm: DebitMessage =>
balance += dm.amount
val creditor = dm.sender.asInstanceOf[Promise[ReplyMessage]]
creditor.success(ReplyMessage.ONLY)
case cm: CreditMessage =>
balance -= cm.amount
val teller = cm.sender.asInstanceOf[ScalazActor[AnyRef]]
val sender = Promise[ReplyMessage]()
val destAccount = cm.recipient.asInstanceOf[Account]
destAccount.send(new DebitMessage(sender, cm.amount))
ScalazPool.await[ReplyMessage](sender)
teller.send(ReplyMessage.ONLY)
case _: StopMessage =>
exit()
case message =>
val ex = new IllegalArgumentException("Unsupported message: " + message)
ex.printStackTrace(System.err)
}
}
}
}
| smarr/savina | src/main/scala/edu/rice/habanero/benchmarks/banking/BankingScalazActorBenchmark.scala | Scala | gpl-2.0 | 3,783 |
package org.jetbrains.plugins.scala.caches.stats
import java.util.concurrent.atomic.AtomicReference
private class MyConcurrentMap[K, V >: Null] {
private def emptyMap = java.util.Collections.emptyMap[K, V]()
private val ref: AtomicReference[java.util.Map[K, V]] = new AtomicReference(emptyMap)
def computeIfAbsent(k: K, v: K => V): V = {
do {
val prev = ref.get()
prev.get(k) match {
case null =>
val newValue = v(k)
val newMap = add(prev, k, newValue)
if (ref.compareAndSet(prev, newMap))
return newValue
case v =>
return v
}
} while (true)
//will be never reached
null
}
def clear(): Unit = ref.set(emptyMap)
def values: java.util.Collection[V] = ref.get.values()
def mapValues[U](f: V => U): java.util.Map[K, U] = {
val map = ref.get()
val result = new java.util.HashMap[K, U]
map.forEach((k, v) => result.put(k, f(v)))
result
}
def map[T](f: (K, V) => T): java.util.List[T] = {
val map = ref.get()
val result = new java.util.ArrayList[T]
map.forEach((k, v) => result.add(f(k, v)))
result
}
private def add(oldMap: java.util.Map[K, V], key: K, value: V): java.util.Map[K, V] = {
val newMap = new java.util.HashMap[K, V](oldMap)
newMap.put(key, value)
java.util.Collections.unmodifiableMap(newMap)
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/caches/stats/MyConcurrentMap.scala | Scala | apache-2.0 | 1,383 |
package scutil.lang
import minitest._
import scutil.core.implicits._
object OrderingTest extends SimpleTestSuite {
test("seqOrdering should order by base order (same)") {
assertEquals(
Seq(Seq(1), Seq(2), Seq(1), Seq(2)) sorted Ordering.sequence[Int](true),
Seq(Seq(1), Seq(1), Seq(2), Seq(2))
)
}
test("seqOrdering should order missing first") {
assertEquals(
Seq(Seq(1), Seq(1,2), Seq(1), Seq(1,2)) sorted Ordering.sequence[Int](true),
Seq(Seq(1), Seq(1), Seq(1,2), Seq(1,2))
)
}
test("seqOrdering should order missing last") {
assertEquals(
Seq(Seq(1), Seq(1,2), Seq(1), Seq(1,2)) sorted Ordering.sequence[Int](false),
Seq(Seq(1,2), Seq(1,2), Seq(1), Seq(1))
)
}
test("seqOrdering should order left to right") {
assertEquals(
Seq(Seq(1,1), Seq(1,2), Seq(2,1), Seq(2,2), Seq(1,1), Seq(1,2), Seq(2,1), Seq(2,2)) sorted Ordering.sequence[Int](false),
Seq(Seq(1,1), Seq(1,1), Seq(1,2), Seq(1,2), Seq(2,1), Seq(2,1), Seq(2,2), Seq(2,2))
)
}
}
| ritschwumm/scutil | modules/core/src/test/scala/scutil/lang/OrderingTest.scala | Scala | bsd-2-clause | 995 |
package org.broadinstitute.clio.server.webservice
import io.circe.Json
import io.circe.syntax._
import org.broadinstitute.clio.util.model.Location
import org.broadinstitute.clio.server.service.UbamService
import org.broadinstitute.clio.transfer.model.UbamIndex
import org.broadinstitute.clio.transfer.model.ubam.UbamKey
class UbamWebServiceSpec extends IndexWebServiceSpec[UbamIndex.type] {
def webServiceName = "UbamWebService"
val mockService: UbamService = mock[UbamService]
val webService = new UbamWebService(mockService)
val onPremKey = UbamKey(
Location.OnPrem,
"barcode",
1,
"library"
)
val cloudKey: UbamKey = onPremKey
.copy(
location = Location.GCP
)
val badMetadataMap = Map(
"gvcf_size" -> "not longable"
)
val badQueryInputMap = Map(
"version" -> "not intable"
)
val emptyOutput: Json = {
UbamKey(
Location.GCP,
"barcode",
1,
"library"
).asJson(UbamIndex.keyEncoder)
}
}
| broadinstitute/clio | clio-server/src/test/scala/org/broadinstitute/clio/server/webservice/UbamWebServiceSpec.scala | Scala | bsd-3-clause | 988 |
package xyz.hyperreal.sprolog
import org.scalatest._
import prop.PropertyChecks
import Prolog.{program, query, queryOnce, db}
class Lists extends FreeSpec with PropertyChecks with Matchers
{
"member" in
{
query( db, "member( d, [a, b, c] )." ) shouldBe "no"
query( db, "member( b, [a, b, c] )." ) shouldBe "yes"
query( db, "member( E, [a, b, c] )." ) shouldBe
""" |E = a
|E = b
|E = c
""".stripMargin.trim
query( db, "L1 = [a, b, c, e], L2 = [a, c, d, e], member( M, L1 ), member( M, L2 )." ) shouldBe
""" |L1 = [a, b, c, e], L2 = [a, c, d, e], M = a
|L1 = [a, b, c, e], L2 = [a, c, d, e], M = c
|L1 = [a, b, c, e], L2 = [a, c, d, e], M = e
""".stripMargin.trim
}
"subset" in
{
query( db, "subset( [b, c], [d, a, c, b] )." ) shouldBe "yes"
query( db, "subset( [e, b, c], [d, a, c, b] )." ) shouldBe "no"
}
"sum_list" in
{
query( db, "sum_list( [4, 5, 6], S )." ) shouldBe "S = 15"
query( db, "sum_list( [], S )." ) shouldBe "S = 0"
}
"is_list" in
{
query( db, "is_list( [4, 5, 6] )." ) shouldBe "yes"
query( db, "is_list( [] )." ) shouldBe "yes"
query( db, "is_list( 4 )." ) shouldBe "no"
}
"length" in
{
query( db, "length( [4, 5, 6], L )." ) shouldBe "L = 3"
query( db, "length( [], L )." ) shouldBe "L = 0"
}
"intersection" in
{
queryOnce( db, "intersection( [a, b, c, d], [b, c, d, e], L )." ) shouldBe "L = [b, c, d]"
}
"powerset" in
{
queryOnce( db, "powerset([1,2,3], P)." ) shouldBe "P = [[], [3], [2], [3, 2], [1], [3, 1], [2, 1], [3, 2, 1]]"
}
"permutation" in
{
query( db, "permutation( [a, b, c], [b, c, a] )." ) shouldBe "yes"
query( db, "permutation( [a, b, c], [c, a] )." ) shouldBe "no"
query( db, "permutation( [a, b, c], P )." ) shouldBe
"""
|P = [a, b, c]
|P = [b, a, c]
|P = [b, c, a]
|P = [a, c, b]
|P = [c, a, b]
|P = [c, b, a]
""".stripMargin.trim
}
"sublist" in
{
query( db, "sublist( [b, c], [a, b, c, d] )." ) shouldBe "yes"
query( db, "sublist( [c, b], [a, b, c, d] )." ) shouldBe "no"
query( db, "sublist( [b, A], [a, b, c, b, d] )." ) shouldBe
""" |A = c
|A = d
""".stripMargin.trim
}
"reverse" in
{
query( db, "reverse( [a, b, c], L )." ) shouldBe "L = [c, b, a]"
}
"append" in
{
query( db, "append( [a, b, c], [d, e, f], [a, b, c, d, e, f] )." ) shouldBe "yes"
query( db, "append( [], [d, e, f], L )." ) shouldBe "L = [d, e, f]"
query( db, "append( [a, b, c], [], L )." ) shouldBe "L = [a, b, c]"
query( db, "append( [a, b], [d, e, f], L )." ) shouldBe "L = [a, b, d, e, f]"
query( db, "append( [a, b, c], [d, e], L )." ) shouldBe "L = [a, b, c, d, e]"
query( db, "append( [a, b, c], L, [a, b, c, d, e, f] )." ) shouldBe "L = [d, e, f]"
}
}
| edadma/sprolog | src/test/scala/Lists.scala | Scala | mit | 2,760 |
/*
* Copyright (c) 2015.
* Created by MrTJP.
* All rights reserved.
*/
package mrtjp.core.fx
import mrtjp.core.fx.particles.CoreParticle
import scala.collection.mutable.{Seq => MSeq}
class SequenceAction extends ParticleAction
{
var actions = MSeq[ParticleAction]()
override def tickLife()
{
super.tickLife()
actions.find(!_.isFinished) match
{
case Some(action) => action.tickLife()
case None =>
}
}
override def runOn(p:CoreParticle, frame:Float)
{
super.runOn(p, frame)
actions.find(!_.isFinished) match
{
case Some(action) => action.runOn(p, frame)
case None =>
isFinished = true
return
}
if (actions.forall(_.isFinished))
isFinished = true
}
override def operate(p:CoreParticle, time:Double){}
override def compile(p:CoreParticle)
{
super.compile(p)
actions.foreach(_.compile(p))
}
override def reset()
{
super.reset()
actions.foreach(_.reset())
}
override def copy = ParticleAction.sequence(actions.map(_.copy).toList:_*)
}
| MrTJP/MrTJPCore | src/main/scala/mrtjp/core/fx/SequenceAction.scala | Scala | lgpl-3.0 | 1,201 |
package org.daydev.scala.bb.parse
import org.daydev.scala.bb.parse.lexer.GrammarCombinatorsBbLexer
class TokenStackBbParserSpec
extends BbParserSpec
with TokenStackBbParser
with GrammarCombinatorsBbLexer | daydev/bbcode-scala | src/test/scala/org/daydev/scala/bb/parse/TokenStackBbParserSpec.scala | Scala | mit | 211 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import java.time.{Instant, LocalDate}
import org.apache.orc.storage.common.`type`.HiveDecimal
import org.apache.orc.storage.ql.io.sarg.{PredicateLeaf, SearchArgument}
import org.apache.orc.storage.ql.io.sarg.SearchArgument.Builder
import org.apache.orc.storage.ql.io.sarg.SearchArgumentFactory.newBuilder
import org.apache.orc.storage.serde2.io.HiveDecimalWritable
import org.apache.spark.SparkException
import org.apache.spark.sql.catalyst.util.DateTimeUtils.{instantToMicros, localDateToDays, toJavaDate, toJavaTimestamp}
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.quoteIfNeeded
import org.apache.spark.sql.sources.Filter
import org.apache.spark.sql.types._
/**
* Helper object for building ORC `SearchArgument`s, which are used for ORC predicate push-down.
*
* Due to limitation of ORC `SearchArgument` builder, we had to implement separate checking and
* conversion passes through the Filter to make sure we only convert predicates that are known
* to be convertible.
*
* An ORC `SearchArgument` must be built in one pass using a single builder. For example, you can't
* build `a = 1` and `b = 2` first, and then combine them into `a = 1 AND b = 2`. This is quite
* different from the cases in Spark SQL or Parquet, where complex filters can be easily built using
* existing simpler ones.
*
* The annoying part is that, `SearchArgument` builder methods like `startAnd()`, `startOr()`, and
* `startNot()` mutate internal state of the builder instance. This forces us to translate all
* convertible filters with a single builder instance. However, if we try to translate a filter
* before checking whether it can be converted or not, we may end up with a builder whose internal
* state is inconsistent in the case of an inconvertible filter.
*
* For example, to convert an `And` filter with builder `b`, we call `b.startAnd()` first, and then
* try to convert its children. Say we convert `left` child successfully, but find that `right`
* child is inconvertible. Alas, `b.startAnd()` call can't be rolled back, and `b` is inconsistent
* now.
*
* The workaround employed here is to trim the Spark filters before trying to convert them. This
* way, we can only do the actual conversion on the part of the Filter that is known to be
* convertible.
*
* P.S.: Hive seems to use `SearchArgument` together with `ExprNodeGenericFuncDesc` only. Usage of
* builder methods mentioned above can only be found in test code, where all tested filters are
* known to be convertible.
*/
private[sql] object OrcFilters extends OrcFiltersBase {
/**
* Create ORC filter as a SearchArgument instance.
*/
def createFilter(schema: StructType, filters: Seq[Filter]): Option[SearchArgument] = {
val dataTypeMap = schema.map(f => quoteIfNeeded(f.name) -> f.dataType).toMap
// Combines all convertible filters using `And` to produce a single conjunction
// TODO (SPARK-25557): ORC doesn't support nested predicate pushdown, so they are removed.
val newFilters = filters.filter(!_.containsNestedColumn)
val conjunctionOptional = buildTree(convertibleFilters(schema, dataTypeMap, newFilters))
conjunctionOptional.map { conjunction =>
// Then tries to build a single ORC `SearchArgument` for the conjunction predicate.
// The input predicate is fully convertible. There should not be any empty result in the
// following recursive method call `buildSearchArgument`.
buildSearchArgument(dataTypeMap, conjunction, newBuilder).build()
}
}
def convertibleFilters(
schema: StructType,
dataTypeMap: Map[String, DataType],
filters: Seq[Filter]): Seq[Filter] = {
import org.apache.spark.sql.sources._
def convertibleFiltersHelper(
filter: Filter,
canPartialPushDown: Boolean): Option[Filter] = filter match {
// At here, it is not safe to just convert one side and remove the other side
// if we do not understand what the parent filters are.
//
// Here is an example used to explain the reason.
// Let's say we have NOT(a = 2 AND b in ('1')) and we do not understand how to
// convert b in ('1'). If we only convert a = 2, we will end up with a filter
// NOT(a = 2), which will generate wrong results.
//
// Pushing one side of AND down is only safe to do at the top level or in the child
// AND before hitting NOT or OR conditions, and in this case, the unsupported predicate
// can be safely removed.
case And(left, right) =>
val leftResultOptional = convertibleFiltersHelper(left, canPartialPushDown)
val rightResultOptional = convertibleFiltersHelper(right, canPartialPushDown)
(leftResultOptional, rightResultOptional) match {
case (Some(leftResult), Some(rightResult)) => Some(And(leftResult, rightResult))
case (Some(leftResult), None) if canPartialPushDown => Some(leftResult)
case (None, Some(rightResult)) if canPartialPushDown => Some(rightResult)
case _ => None
}
// The Or predicate is convertible when both of its children can be pushed down.
// That is to say, if one/both of the children can be partially pushed down, the Or
// predicate can be partially pushed down as well.
//
// Here is an example used to explain the reason.
// Let's say we have
// (a1 AND a2) OR (b1 AND b2),
// a1 and b1 is convertible, while a2 and b2 is not.
// The predicate can be converted as
// (a1 OR b1) AND (a1 OR b2) AND (a2 OR b1) AND (a2 OR b2)
// As per the logical in And predicate, we can push down (a1 OR b1).
case Or(left, right) =>
for {
lhs <- convertibleFiltersHelper(left, canPartialPushDown)
rhs <- convertibleFiltersHelper(right, canPartialPushDown)
} yield Or(lhs, rhs)
case Not(pred) =>
val childResultOptional = convertibleFiltersHelper(pred, canPartialPushDown = false)
childResultOptional.map(Not)
case other =>
for (_ <- buildLeafSearchArgument(dataTypeMap, other, newBuilder())) yield other
}
filters.flatMap { filter =>
convertibleFiltersHelper(filter, true)
}
}
/**
* Get PredicateLeafType which is corresponding to the given DataType.
*/
private def getPredicateLeafType(dataType: DataType) = dataType match {
case BooleanType => PredicateLeaf.Type.BOOLEAN
case ByteType | ShortType | IntegerType | LongType => PredicateLeaf.Type.LONG
case FloatType | DoubleType => PredicateLeaf.Type.FLOAT
case StringType => PredicateLeaf.Type.STRING
case DateType => PredicateLeaf.Type.DATE
case TimestampType => PredicateLeaf.Type.TIMESTAMP
case _: DecimalType => PredicateLeaf.Type.DECIMAL
case _ => throw new UnsupportedOperationException(s"DataType: ${dataType.catalogString}")
}
/**
* Cast literal values for filters.
*
* We need to cast to long because ORC raises exceptions
* at 'checkLiteralType' of SearchArgumentImpl.java.
*/
private def castLiteralValue(value: Any, dataType: DataType): Any = dataType match {
case ByteType | ShortType | IntegerType | LongType =>
value.asInstanceOf[Number].longValue
case FloatType | DoubleType =>
value.asInstanceOf[Number].doubleValue()
case _: DecimalType =>
new HiveDecimalWritable(HiveDecimal.create(value.asInstanceOf[java.math.BigDecimal]))
case _: DateType if value.isInstanceOf[LocalDate] =>
toJavaDate(localDateToDays(value.asInstanceOf[LocalDate]))
case _: TimestampType if value.isInstanceOf[Instant] =>
toJavaTimestamp(instantToMicros(value.asInstanceOf[Instant]))
case _ => value
}
/**
* Build a SearchArgument and return the builder so far.
*
* @param dataTypeMap a map from the attribute name to its data type.
* @param expression the input predicates, which should be fully convertible to SearchArgument.
* @param builder the input SearchArgument.Builder.
* @return the builder so far.
*/
private def buildSearchArgument(
dataTypeMap: Map[String, DataType],
expression: Filter,
builder: Builder): Builder = {
import org.apache.spark.sql.sources._
expression match {
case And(left, right) =>
val lhs = buildSearchArgument(dataTypeMap, left, builder.startAnd())
val rhs = buildSearchArgument(dataTypeMap, right, lhs)
rhs.end()
case Or(left, right) =>
val lhs = buildSearchArgument(dataTypeMap, left, builder.startOr())
val rhs = buildSearchArgument(dataTypeMap, right, lhs)
rhs.end()
case Not(child) =>
buildSearchArgument(dataTypeMap, child, builder.startNot()).end()
case other =>
buildLeafSearchArgument(dataTypeMap, other, builder).getOrElse {
throw new SparkException(
"The input filter of OrcFilters.buildSearchArgument should be fully convertible.")
}
}
}
/**
* Build a SearchArgument for a leaf predicate and return the builder so far.
*
* @param dataTypeMap a map from the attribute name to its data type.
* @param expression the input filter predicates.
* @param builder the input SearchArgument.Builder.
* @return the builder so far.
*/
private def buildLeafSearchArgument(
dataTypeMap: Map[String, DataType],
expression: Filter,
builder: Builder): Option[Builder] = {
def getType(attribute: String): PredicateLeaf.Type =
getPredicateLeafType(dataTypeMap(attribute))
import org.apache.spark.sql.sources._
// NOTE: For all case branches dealing with leaf predicates below, the additional `startAnd()`
// call is mandatory. ORC `SearchArgument` builder requires that all leaf predicates must be
// wrapped by a "parent" predicate (`And`, `Or`, or `Not`).
// Since ORC 1.5.0 (ORC-323), we need to quote for column names with `.` characters
// in order to distinguish predicate pushdown for nested columns.
expression match {
case EqualTo(name, value) if isSearchableType(dataTypeMap(name)) =>
val castedValue = castLiteralValue(value, dataTypeMap(name))
Some(builder.startAnd().equals(name, getType(name), castedValue).end())
case EqualNullSafe(name, value) if isSearchableType(dataTypeMap(name)) =>
val castedValue = castLiteralValue(value, dataTypeMap(name))
Some(builder.startAnd().nullSafeEquals(name, getType(name), castedValue).end())
case LessThan(name, value) if isSearchableType(dataTypeMap(name)) =>
val castedValue = castLiteralValue(value, dataTypeMap(name))
Some(builder.startAnd().lessThan(name, getType(name), castedValue).end())
case LessThanOrEqual(name, value) if isSearchableType(dataTypeMap(name)) =>
val castedValue = castLiteralValue(value, dataTypeMap(name))
Some(builder.startAnd().lessThanEquals(name, getType(name), castedValue).end())
case GreaterThan(name, value) if isSearchableType(dataTypeMap(name)) =>
val castedValue = castLiteralValue(value, dataTypeMap(name))
Some(builder.startNot().lessThanEquals(name, getType(name), castedValue).end())
case GreaterThanOrEqual(name, value) if isSearchableType(dataTypeMap(name)) =>
val castedValue = castLiteralValue(value, dataTypeMap(name))
Some(builder.startNot().lessThan(name, getType(name), castedValue).end())
case IsNull(name) if isSearchableType(dataTypeMap(name)) =>
Some(builder.startAnd().isNull(name, getType(name)).end())
case IsNotNull(name) if isSearchableType(dataTypeMap(name)) =>
Some(builder.startNot().isNull(name, getType(name)).end())
case In(name, values) if isSearchableType(dataTypeMap(name)) =>
val castedValues = values.map(v => castLiteralValue(v, dataTypeMap(name)))
Some(builder.startAnd().in(name, getType(name),
castedValues.map(_.asInstanceOf[AnyRef]): _*).end())
case _ => None
}
}
}
| ConeyLiu/spark | sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilters.scala | Scala | apache-2.0 | 12,871 |
package com.github.mdr.ascii.layout.layering
import org.scalacheck.Prop.forAll
import org.scalacheck.Properties
import com.github.mdr.ascii.graph.Graph
import com.github.mdr.ascii.graph.GraphGenerators._
object LongestDistanceToSinkSpecification extends Properties("LongestDistanceToSink") {
property("longest distance to sink") = forAll(dags) { g: Graph[String] ⇒
val distances = LongestDistancesToSinkCalculator.longestDistancesToSink(g)
g.vertices.forall { v ⇒
distances(v) == paths(g, v).map(_.length).max
}
}
def paths[V](graph: Graph[V], v: V): List[List[V]] = {
graph.outVertices(v) match {
case Nil ⇒ List(List[V]())
case vs ⇒ vs.flatMap(v2 ⇒ paths(graph, v2).map(v2 :: _))
}
}
} | mdr/ascii-graphs | src/test/scala/com/github/mdr/ascii/layout/layering/LongestDistanceToSinkSpecification.scala | Scala | mit | 750 |
package wrangler.data
/** Maven style artifact with well understood versioning.*/
case class Artifact(group: String, name: String, version: Version) {
/** Produces sbt style formatting of the artifact.*/
def pretty = s"$group % $name % ${version.pretty}"
/** Convert to a generic artifact by changing the version to well formatted string.*/
def toGeneric: GenericArtifact = GenericArtifact(group, name, version.pretty)
}
/** Artifact companion object.*/
object Artifact {
/** Implicit conversion to a generic artifact by changing the version to well formatted string.*/
implicit def ArtifactToGenericArtifact(a: Artifact): GenericArtifact =
a.toGeneric
}
/** Maven style artifact where the version is just a string.*/
case class GenericArtifact(group: String, name: String, version: String) {
def pretty = s"$group % $name % $version"
}
| CommBank/wrangler | src/main/scala/wrangler/data/Artifact.scala | Scala | apache-2.0 | 859 |
package debop4s.data.orm.jpa
import javax.persistence._
import debop4s.core.ToStringHelper
import debop4s.core.utils.Hashs
import debop4s.data.orm.model.LongEntity
import org.hibernate.{annotations => hba}
import _root_.scala.beans.BeanProperty
/**
* JpaScalaEntity
* Created by debop on 2014. 1. 29.
*/
@Entity
@hba.Cache(region = "scala.jpa", usage = hba.CacheConcurrencyStrategy.READ_WRITE)
@Access(AccessType.FIELD)
class ScalaJpaEntity extends LongEntity {
def this(name: String) {
this()
this.name = name
}
@Column(name = "entityName", nullable = false, length = 32)
@BeanProperty
var name: String = _
override def hashCode(): Int = Hashs.compute(name)
override
def buildStringHelper(): ToStringHelper =
super.buildStringHelper()
.add("name", name)
}
| debop/debop4s | debop4s-data-orm/src/test/scala/debop4s/data/orm/jpa/ScalaJpaEntity.scala | Scala | apache-2.0 | 801 |
package utils
import org.apache.log4j.Logger
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import utils.Launcher.Technique
import techniques._
/**
* Created by fabien and mathieu on 4/28/15.
*/
object TestCases {
val logger = Logger.getLogger(TestCases.getClass.getName)
// TODO: use the files as input test cases and parameters bounds
// contains test cases of the form: wordToTest similar1,similar2,... nonSimilar1,nonSimilar2,...
val inputCases = "hdfs:///projects/temporal-profiles/Tests/testCases"
// contains techniques and parameters bound: name lowerBound1,upperBound1,nbSteps1 lowerBound2,upperBound2,nbSteps2 for each parameter
val inputParams = "hdfs:///projects/temporal-profiles/Tests/params"
/**
* This function was used for debugging because we didn't manage to log stuff inside of the map of a RDD
* @param log The list containing previous log entries
* @param msg The message to add to the log
* @return The Updated log
*/
def printLog(log: List[String], msg: String): List[String] = msg::log
/**
* Read and parse the test cases from an input file
* @param spark The spark context
* @return An array containing the test cases
*/
def parseTestCases(spark: SparkContext): Array[(String, List[String], List[String])] = {
val testCases = spark.textFile(inputCases)
testCases.map(line => {
val tmp = line.split("\\\\s")
(tmp(0), tmp(1).split(",").toList, tmp(2).split(",").toList)
}).collect()
}
/**
* Read and parse the technique that we will use for the tuning
* @param spark The spark context
* @return An array with the techniques and their parameters
*/
def parseTechniques(spark: SparkContext): Array[(Technique, String, List[(Double, Double, Double)])] = {
val params = spark.textFile(inputParams)
params.map(line => {
val lineSplit = line.split("\\\\s")
(lineSplit(0), lineSplit.drop(1).map(s => {
val tuple = s.split(",")
(tuple(0), tuple(1), tuple(2))
}).toList)
}).map(x => (getTechnique(x._1), x._1, x._2.map(y => (y._1.toDouble, y._2.toDouble, y._3.toDouble)))).collect()
}
/**
* Counts how many words from the given test case appear in the result of a technique
* @param result The result of a technique
* @param wordList A list of word from the test cases
* @return How many words are contained in both lists
*/
def count(result: RDD[(String)], wordList: List[String]): Int = {
result.mapPartitions(it => {
var value=0
while(it.hasNext) {
if(wordList.contains(it.next())) {
value += 1
}
}
(value::Nil).iterator
},true).collect().sum
}
/**
* runs a technique with the given parameters
* @param data The data set containing all the words
* @param testedWord The word to test
* @param similarWords The list of similar words
* @param differentWords The list of different words
* @param parameters The parameters for the technique
* @param similarityTechnique The technique we want to use
* @return A double between 0 and 1 that tells how well the technique matches the test caes
*/
def test(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]), similarWords: List[String],
differentWords: List[String], parameters: List[Double], similarityTechnique: Technique): (Double, List[String]) = {
//var log = List[String]()
//log = printLog(log,"Trying with parameters "+parameters+".")
val result: RDD[(String)] = similarityTechnique(data, testedWord, parameters)
//log = printLog(log,result.count() + " Matching words:")
//result.collect().foreach(x => log=printLog(log,"word: "+x))
//log = printLog(log, similarWords.size + " similar words: ")
//similarWords.foreach(x => log=printLog(log,"word: "+x))
val simWords = count(result, similarWords)
val diffWords = count(result, differentWords)
//log = printLog(log, simWords + " match(es)")
val simRatio = simWords.toDouble / similarWords.size.toDouble
val diffRatio = diffWords.toDouble / differentWords.size.toDouble
(((simRatio + (1 - diffRatio)) / 2),Nil)
//(simRatio,Nil)
}
/**
* Recursive function to iterate over the parameters of a metric. This function shouldn't be called directly,
* one should use testParameters instead.
* @param data The data set containing all the words
* @param testedWord The word to test
* @param similarWords The list of similar words
* @param differentWords The list of different words
* @param params The list of fixed parameters (that were chosen by the previous iteration of the function)
* @param bounds The bounds for the remaining parameters
* @param similarityTechnique The technique we want to use
* @return The parameters that outputs the best value
*/
def getBestParams(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]), similarWords: List[String],
differentWords: List[String], params: List[Double], bounds: List[(Double, Double, Double)],
similarityTechnique: Technique): (Double, List[Double], List[String]) = {
bounds match {
case x :: xs => {
val step = x._3
//if step == 0 we do no optimization for this parameter
if (step == 0.0) {
getBestParams(data, testedWord, similarWords, differentWords, params ++ (x._1 :: Nil), xs,
similarityTechnique)
}
else {
var best = (0.0, List[Double](),List[String]())
for (y <- Range.Double.inclusive(x._1, x._2, step)) {
//for (y <- x._1 to(x._2, (x._2 - x._1) / step)) {
val res = getBestParams(data, testedWord, similarWords, differentWords, params ++ (y :: Nil), xs,
similarityTechnique)
if (res._1 > best._1) {
best = res
}
}
best
}
}
case Nil => {
val testProut = test(data, testedWord, similarWords, differentWords, params, similarityTechnique)
(testProut._1, params, testProut._2)
}
}
}
/**
* Iterate the technique over all the possible parameters and returns a list containing the best parameters.
* This is a wrapper for the getBestParams function.
* @param data The data set containing all the words
* @param testedWord The word to test
* @param similarWords The list of similar words
* @param differentWords The list of different words
* @param bounds The bounds for the parameters
* @param similarityTechnique The technique we want to use
* @return The parameters that outputs the best value
*/
def testParameters(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]),
similarWords: List[String], differentWords: List[String], bounds: List[(Double, Double, Double)],
similarityTechnique: Technique): (Double, List[Double], List[String]) = {
getBestParams(data, testedWord, similarWords, differentWords, Nil, bounds, similarityTechnique)
}
/**
* Maps a technique name with the actual technique
* @param name Name of the technique
* @return The technique corresponding to this name
*/
def getTechnique(name: String): Technique = {
name.toLowerCase match {
// Add your technique methods here. All lowercase for the name pliz
case "naivedifference" => NaiveComparisons.naiveDifferenceTopKScalingAverage
case "naivedivision" => NaiveComparisons.naiveDivisionTopKScalingAverage
case "inverse" => NaiveComparisons.naiveInverseDifference
case "shift" => NaiveComparisons.naiveDifferenceScalingAverageWithShifting
case "divergence" => Divergence.naiveDifferenceDivergence
case "smarterdivergence" => SubTechniques.smarterDivergence
case "peaks" => PeakComparison.peakComparisonWithMeanDerivative
case "dtw" => DynamicTimeWrapping.dtwComparison
case "dtwtopk" => DynamicTimeWrapping.dtwSimpleTopK
case "dtwscaleavgtopk" => DynamicTimeWrapping.dtwComparisonScaleAvgTopK
case "dtwscalemaxtopk" => DynamicTimeWrapping.dtwComparisonScaleMaxTopK
case "peakstopk" => PeakComparison.peaksTopK
case _ => NaiveComparisons.naiveDifferenceTopKScalingAverage
}
}
/**
* Read test cases and technique parameters from hdfs:///projects/temporal-profiles/Tests/testCases
* and hdfs:///projects/temporal-profiles/Tests/params respectively, and tries to find optimal
* parameters for each technique and each test case.
* @param spark SparkContext used to read the config files
* @param data collection of word, frequency to tuple to look into
* @return optimal parameters for each technique and each test case
*/
def runTestsAll(spark: SparkContext,
data: RDD[(String, Array[Double])]): Array[Array[(String, Double, List[Double], List[String])]] = {
val techniques = parseTechniques(spark)
val testCases = parseTestCases(spark)
techniques.map(x => runTests(spark, data, x._1, x._2, x._3, testCases))
}
/**
* Same as runTestsAll, but for a single technique.
* @param spark SparkContext used to read the config files
* @param data collection of word, frequency to tuple to look into
* @param technique the actual technique
* @param techniqueName the name of the technique (used in the result file)
* @param techniqueParams the bounds of the parameters
* @param testCases the test case (1 word to test, 1 list of similar words and 1 list of different words)
* @return optimal parameters for each test case
*/
def runTests(spark: SparkContext, data: RDD[(String, Array[Double])], technique: Technique,
techniqueName: String, techniqueParams: (List[(Double, Double, Double)]),
testCases: Array[(String, List[String], List[String])] = null): Array[(String, Double, List[Double], List[String])] = {
val test = testCases match {
case null => parseTestCases(spark)
case _ => testCases
}
test.map(t => {
val testName = techniqueName+"_"+t._1
logger.debug("Starting optimization for \\""+techniqueName+"\\".")
val result = testParameters(data, data.filter(x => x._1.equals(t._1)).first(), t._2, t._3, techniqueParams,
technique)
(testName, result._1, result._2, result._3)
})
}
}
| SidneyBovet/smargn | SparkCommander/src/main/scala/utils/TestCases.scala | Scala | gpl-2.0 | 10,488 |
package controller
/**
* Json戻り値保持Class.
* @param msgs メッセージ(存在しない場合、空Seq)
* @param errorMsg エラーメッセージ(存在しない場合、空Map)
* @param result 結果
*/
case class JsonResult(
msgs: Seq[String],
errorMsg: Map[String, Seq[String]],
result: AnyRef
) | nemuzuka/vss-kanban | src/main/scala/controller/JsonResult.scala | Scala | mit | 320 |
package com.emstlk.nacl4s.crypto.sign
import scala.util.control.Breaks._
object GroupElement {
@inline def equal(b: Byte, c: Byte): Byte =
(((b ^ c) - 1) >>> 31).toByte
@inline def negative(b: Byte): Byte =
(b.toLong >>> 63).toByte
@inline def cmov(t: Precomp, u: Precomp, b: Int) = {
FieldElement.cmov(t.yplusx, u.yplusx, b)
FieldElement.cmov(t.yminusx, u.yminusx, b)
FieldElement.cmov(t.xy2d, u.xy2d, b)
}
/** r = 2 * p */
def p2Dbl(r: P1p1, p: P2) = {
val t0 = new Array[Int](10)
FieldElement.sq(r.x, p.x, false)
FieldElement.sq(r.z, p.y, false)
FieldElement.sq(r.t, p.z, true)
FieldElement.add(r.y, p.x, p.y)
FieldElement.sq(t0, r.y, false)
FieldElement.add(r.y, r.z, r.x)
FieldElement.sub(r.z, r.z, r.x)
FieldElement.sub(r.x, t0, r.y)
FieldElement.sub(r.t, r.t, r.z)
}
/** r = 2 * p */
def p3Dbl(r: P1p1, p: P3) = {
val q = new P2
Array.copy(p.x, 0, q.x, 0, 10)
Array.copy(p.y, 0, q.y, 0, 10)
Array.copy(p.z, 0, q.z, 0, 10)
p2Dbl(r, q)
}
def select(t: Precomp, pos: Int, b: Byte) = {
val bNegative = negative(b)
val babs = (b - (((-bNegative) & b) << 1)).toByte
t.yplusx(0) = 1
t.yminusx(0) = 1
t.xy2d(0) = 0
var i = 1
while (i <= 9) {
t.yplusx(i) = 0
t.yminusx(i) = 0
t.xy2d(i) = 0
i += 1
}
val base = Const.base
cmov(t, base(pos)(0), equal(babs, 1).toInt)
cmov(t, base(pos)(1), equal(babs, 2).toInt)
cmov(t, base(pos)(2), equal(babs, 3).toInt)
cmov(t, base(pos)(3), equal(babs, 4).toInt)
cmov(t, base(pos)(4), equal(babs, 5).toInt)
cmov(t, base(pos)(5), equal(babs, 6).toInt)
cmov(t, base(pos)(6), equal(babs, 7).toInt)
cmov(t, base(pos)(7), equal(babs, 8).toInt)
val minust = new Precomp
Array.copy(t.yminusx, 0, minust.yplusx, 0, 10)
Array.copy(t.yplusx, 0, minust.yminusx, 0, 10)
i = 0
while (i <= 9) {
minust.xy2d(i) = -t.xy2d(i)
i += 1
}
cmov(t, minust, bNegative.toInt)
}
/** r = p + q */
def add(r: P1p1, p: P3, q: Cached) = {
val t0 = new Array[Int](10)
FieldElement.add(r.x, p.y, p.x)
FieldElement.sub(r.y, p.y, p.x)
FieldElement.mul(r.z, r.x, q.yplusx)
FieldElement.mul(r.y, r.y, q.yminusx)
FieldElement.mul(r.t, q.t2d, p.t)
FieldElement.mul(r.x, p.z, q.z)
FieldElement.add(t0, r.x, r.x)
FieldElement.sub(r.x, r.z, r.y)
FieldElement.add(r.y, r.z, r.y)
FieldElement.add(r.z, t0, r.t)
FieldElement.sub(r.t, t0, r.t)
}
/** r = p - q */
def sub(r: P1p1, p: P3, q: Cached) = {
val t0 = new Array[Int](10)
FieldElement.add(r.x, p.y, p.x)
FieldElement.sub(r.y, p.y, p.x)
FieldElement.mul(r.z, r.x, q.yminusx)
FieldElement.mul(r.y, r.y, q.yplusx)
FieldElement.mul(r.t, q.t2d, p.t)
FieldElement.mul(r.x, p.z, q.z)
FieldElement.add(t0, r.x, r.x)
FieldElement.sub(r.x, r.z, r.y)
FieldElement.add(r.y, r.z, r.y)
FieldElement.sub(r.z, t0, r.t)
FieldElement.add(r.t, t0, r.t)
}
/** r = p + q */
def madd(r: P1p1, p: P3, q: Precomp) = {
val t0 = new Array[Int](10)
FieldElement.add(r.x, p.y, p.x)
FieldElement.sub(r.y, p.y, p.x)
FieldElement.mul(r.z, r.x, q.yplusx)
FieldElement.mul(r.y, r.y, q.yminusx)
FieldElement.mul(r.t, q.xy2d, p.t)
FieldElement.add(t0, p.z, p.z)
FieldElement.sub(r.x, r.z, r.y)
FieldElement.add(r.y, r.z, r.y)
FieldElement.add(r.z, t0, r.t)
FieldElement.sub(r.t, t0, r.t)
}
/** r = p - q */
def msub(r: P1p1, p: P3, q: Precomp) = {
val t0 = new Array[Int](10)
FieldElement.add(r.x, p.y, p.x)
FieldElement.sub(r.y, p.y, p.x)
FieldElement.mul(r.z, r.x, q.yminusx)
FieldElement.mul(r.y, r.y, q.yplusx)
FieldElement.mul(r.t, q.xy2d, p.t)
FieldElement.add(t0, p.z, p.z)
FieldElement.sub(r.x, r.z, r.y)
FieldElement.add(r.y, r.z, r.y)
FieldElement.sub(r.z, t0, r.t)
FieldElement.add(r.t, t0, r.t)
}
/** r = p */
def p1p1ToP2(r: P2, p: P1p1) = {
FieldElement.mul(r.x, p.x, p.t)
FieldElement.mul(r.y, p.y, p.z)
FieldElement.mul(r.z, p.z, p.t)
}
/** r = p */
def p1p1ToP3(r: P3, p: P1p1) = {
FieldElement.mul(r.x, p.x, p.t)
FieldElement.mul(r.y, p.y, p.z)
FieldElement.mul(r.z, p.z, p.t)
FieldElement.mul(r.t, p.x, p.y)
}
def p3ToCached(r: Cached, p: P3) = {
FieldElement.add(r.yplusx, p.y, p.x)
FieldElement.sub(r.yminusx, p.y, p.x)
Array.copy(p.z, 0, r.z, 0, 10)
FieldElement.mul(r.t2d, p.t, Const.d2)
}
def p3ToBytes(s: Array[Byte], h: P3) = {
val recip = new Array[Int](10)
val x = new Array[Int](10)
val y = new Array[Int](10)
FieldElement.invert(recip, h.z)
FieldElement.mul(x, h.x, recip)
FieldElement.mul(y, h.y, recip)
FieldElement.toBytes(s, y)
s(31) = (s(31) ^ (FieldElement.isNegative(x) << 7)).toByte
}
//TODO the same as p3ToBytes
def toBytes(s: Array[Byte], h: P2) = {
val recip = new Array[Int](10)
val x = new Array[Int](10)
val y = new Array[Int](10)
FieldElement.invert(recip, h.z)
FieldElement.mul(x, h.x, recip)
FieldElement.mul(y, h.y, recip)
FieldElement.toBytes(s, y)
s(31) = (s(31) ^ (FieldElement.isNegative(x) << 7)).toByte
}
def scalarmultBase(h: P3, a: Array[Byte]) = {
val e = new Array[Byte](64)
var i = 0
while (i < 32) {
e(2 * i) = (a(i) & 15).toByte
e(2 * i + 1) = ((a(i) >>> 4) & 15).toByte
i += 1
}
var carry: Byte = 0
i = 0
while (i < 63) {
e(i) = (e(i) + carry).toByte
carry = ((e(i) + 8) >> 4).toByte
e(i) = (e(i) - (carry << 4)).toByte
i += 1
}
e(63) = (e(63) + carry).toByte
h.y(0) = 1
h.z(0) = 1
val t = new Precomp
val r = new P1p1
i = 1
while (i < 64) {
select(t, i / 2, e(i))
madd(r, h, t)
p1p1ToP3(h, r)
i += 2
}
val s = new P2
p3Dbl(r, h)
p1p1ToP2(s, r)
p2Dbl(r, s)
p1p1ToP2(s, r)
p2Dbl(r, s)
p1p1ToP2(s, r)
p2Dbl(r, s)
p1p1ToP3(h, r)
i = 0
while (i < 64) {
select(t, i / 2, e(i))
madd(r, h, t)
p1p1ToP3(h, r)
i += 2
}
}
def fromBytesNegateVartime(h: P3, s: Array[Byte]) = {
FieldElement.fromBytes(h.y, s)
h.z(0) = 1
val u = new Array[Int](10)
FieldElement.sq(u, h.y, false)
val v = new Array[Int](10)
FieldElement.mul(v, u, Const.d)
FieldElement.sub(u, u, h.z)
FieldElement.add(v, v, h.z)
val v3 = new Array[Int](10)
FieldElement.sq(v3, v, false)
FieldElement.mul(v3, v3, v)
FieldElement.sq(h.x, v3, false)
FieldElement.mul(h.x, h.x, v)
FieldElement.mul(h.x, h.x, u)
FieldElement.pow22523(h.x, h.x)
FieldElement.mul(h.x, h.x, v3)
FieldElement.mul(h.x, h.x, u)
val vxx = new Array[Int](10)
FieldElement.sq(vxx, h.x, false)
FieldElement.mul(vxx, vxx, v)
val check = new Array[Int](10)
FieldElement.sub(check, vxx, u)
if (FieldElement.isNonZero(check)) {
FieldElement.add(check, vxx, u)
require(!FieldElement.isNonZero(check))
FieldElement.mul(h.x, h.x, Const.sqrtm1)
}
if (FieldElement.isNegative(h.x) == ((s(31) & 0xff) >>> 7))
FieldElement.neg(h.x, h.x)
FieldElement.mul(h.t, h.x, h.y)
}
def slide(r: Array[Byte], a: Array[Byte], aOffset: Int) = {
var i = 0
while (i < 256) {
r(i) = (1 & (a(aOffset + (i >> 3)) >>> (i & 7))).toByte
i += 1
}
i = 0
while (i < 256) {
if (r(i) != 0) {
breakable {
var b = 1
while (b <= 6 && i + b < 256) {
if (r(i + b) != 0) {
if (r(i) + (r(i + b) << b) <= 15) {
r(i) = (r(i) + (r(i + b) << b)).toByte
r(i + b) = 0
} else if (r(i) - (r(i + b) << b) >= -15) {
r(i) = (r(i) - (r(i + b) << b)).toByte
breakable {
var k = i + b
while (k < 256) {
if (r(k) == 0) {
r(k) = 1
break()
}
r(k) = 0
k += 1
}
}
} else break()
}
b += 1
}
}
}
i += 1
}
}
def doubleScalarmultVartime(r: P2, a: Array[Byte], p: P3, b: Array[Byte], bOffset: Int) = {
val aSlide = new Array[Byte](256)
val bSlide = new Array[Byte](256)
slide(aSlide, a, 0)
slide(bSlide, b, bOffset)
val ai = Array(
new Cached, new Cached, new Cached, new Cached,
new Cached, new Cached, new Cached, new Cached
)
p3ToCached(ai(0), p)
val t = new P1p1
p3Dbl(t, p)
val a2 = new P3
p1p1ToP3(a2, t)
val u = new P3
add(t, a2, ai(0))
p1p1ToP3(u, t)
p3ToCached(ai(1), u)
add(t, a2, ai(1))
p1p1ToP3(u, t)
p3ToCached(ai(2), u)
add(t, a2, ai(2))
p1p1ToP3(u, t)
p3ToCached(ai(3), u)
add(t, a2, ai(3))
p1p1ToP3(u, t)
p3ToCached(ai(4), u)
add(t, a2, ai(4))
p1p1ToP3(u, t)
p3ToCached(ai(5), u)
add(t, a2, ai(5))
p1p1ToP3(u, t)
p3ToCached(ai(6), u)
add(t, a2, ai(6))
p1p1ToP3(u, t)
p3ToCached(ai(7), u)
r.y(0) = 1
r.z(0) = 1
var i = 255
breakable {
while (i >= 0) {
if (aSlide(i) != 0 || bSlide(i) != 0) break()
i -= 1
}
}
while (i >= 0) {
p2Dbl(t, r)
if (aSlide(i) > 0) {
p1p1ToP3(u, t)
add(t, u, ai(aSlide(i) / 2))
} else if (aSlide(i) < 0) {
p1p1ToP3(u, t)
sub(t, u, ai(-aSlide(i) / 2))
}
if (bSlide(i) > 0) {
p1p1ToP3(u, t)
madd(t, u, Const.bi(bSlide(i) / 2))
} else if (bSlide(i) < 0) {
p1p1ToP3(u, t)
msub(t, u, Const.bi(-bSlide(i) / 2))
}
p1p1ToP2(r, t)
i -= 1
}
}
}
class Precomp(val yplusx: Array[Int] = new Array[Int](10),
val yminusx: Array[Int] = new Array[Int](10),
val xy2d: Array[Int] = new Array[Int](10))
class Cached(val yplusx: Array[Int] = new Array[Int](10),
val yminusx: Array[Int] = new Array[Int](10),
val z: Array[Int] = new Array[Int](10),
val t2d: Array[Int] = new Array[Int](10))
class P2(val x: Array[Int] = new Array[Int](10),
val y: Array[Int] = new Array[Int](10),
val z: Array[Int] = new Array[Int](10))
class P3(val x: Array[Int] = new Array[Int](10),
val y: Array[Int] = new Array[Int](10),
val z: Array[Int] = new Array[Int](10),
val t: Array[Int] = new Array[Int](10))
class P1p1(val x: Array[Int] = new Array[Int](10),
val y: Array[Int] = new Array[Int](10),
val z: Array[Int] = new Array[Int](10),
val t: Array[Int] = new Array[Int](10))
| emstlk/nacl4s | src/main/scala/com/emstlk/nacl4s/crypto/sign/GroupElement.scala | Scala | mit | 10,882 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.scheduler.queue.test
import akka.stream.ActorMaterializer
import com.sksamuel.elastic4s.http.ElasticDsl._
import com.sksamuel.elastic4s.http.{ElasticClient, ElasticProperties, NoOpRequestConfigCallback}
import common._
import common.rest.WskRestOperations
import org.apache.http.auth.{AuthScope, UsernamePasswordCredentials}
import org.apache.http.impl.client.BasicCredentialsProvider
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.database.elasticsearch.ElasticSearchActivationStoreConfig
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.entity.size._
import org.apache.openwhisk.core.entity.test.ExecHelpers
import org.apache.openwhisk.core.scheduler.queue.{DurationCheckResult, ElasticSearchDurationChecker}
import org.elasticsearch.client.RestClientBuilder.HttpClientConfigCallback
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FlatSpec, Matchers}
import org.scalatestplus.junit.JUnitRunner
import pureconfig.generic.auto._
import pureconfig.loadConfigOrThrow
import java.time.Instant
import java.time.temporal.ChronoUnit
import scala.language.postfixOps
import scala.collection.mutable
import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration._
/**
* This test will try to fetch the average duration from activation documents. This class guarantee the minimum compatibility.
* In case there are any updates in the activation document, it will catch the difference between the expected and the real.
*/
@RunWith(classOf[JUnitRunner])
class ElasticSearchDurationCheckerTests
extends FlatSpec
with Matchers
with ScalaFutures
with WskTestHelpers
with StreamLogging
with ExecHelpers
with BeforeAndAfterAll
with BeforeAndAfter {
private val namespace = "durationCheckNamespace"
val wskadmin: RunCliCmd = new RunCliCmd {
override def baseCommand: mutable.Buffer[String] = WskAdmin.baseCommand
}
implicit val mt: ActorMaterializer = ActorMaterializer()
implicit val ec: ExecutionContextExecutor = actorSystem.dispatcher
implicit val timeoutConfig: PatienceConfig = PatienceConfig(5 seconds, 15 milliseconds)
private val auth = BasicAuthenticationAuthKey()
implicit val wskprops: WskProps = WskProps(authKey = auth.compact, namespace = namespace)
implicit val transid: TransactionId = TransactionId.testing
val wsk = new WskRestOperations
val elasticSearchConfig: ElasticSearchActivationStoreConfig =
loadConfigOrThrow[ElasticSearchActivationStoreConfig](ConfigKeys.elasticSearchActivationStore)
val testIndex: String = generateIndex(namespace)
val concurrency = 1
val actionMem: ByteSize = 256.MB
val defaultDurationCheckWindow = 5.seconds
private val httpClientCallback = new HttpClientConfigCallback {
override def customizeHttpClient(httpClientBuilder: HttpAsyncClientBuilder): HttpAsyncClientBuilder = {
val provider = new BasicCredentialsProvider
provider.setCredentials(
AuthScope.ANY,
new UsernamePasswordCredentials(elasticSearchConfig.username, elasticSearchConfig.password))
httpClientBuilder.setDefaultCredentialsProvider(provider)
}
}
private val client =
ElasticClient(
ElasticProperties(s"${elasticSearchConfig.protocol}://${elasticSearchConfig.hosts}"),
NoOpRequestConfigCallback,
httpClientCallback)
private val elasticSearchDurationChecker = new ElasticSearchDurationChecker(client, defaultDurationCheckWindow)
override def beforeAll(): Unit = {
val res = wskadmin.cli(Seq("user", "create", namespace, "-u", auth.compact))
res.exitCode shouldBe 0
println(s"namespace: $namespace, auth: ${auth.compact}")
super.beforeAll()
}
override def afterAll(): Unit = {
client.execute {
deleteIndex(testIndex)
}
wskadmin.cli(Seq("user", "delete", namespace))
logLines.foreach(println)
super.afterAll()
}
behavior of "ElasticSearchDurationChecker"
it should "fetch the proper duration from ES" in withAssetCleaner(wskprops) { (_, assetHelper) =>
val actionName = "avgDuration"
val dummyActionName = "dummyAction"
var totalDuration = 0L
val count = 3
assetHelper.withCleaner(wsk.action, actionName) { (action, _) =>
action.create(actionName, Some(TestUtils.getTestActionFilename("hello.js")))
}
assetHelper.withCleaner(wsk.action, dummyActionName) { (action, _) =>
action.create(dummyActionName, Some(TestUtils.getTestActionFilename("hello.js")))
}
val actionMetaData =
WhiskActionMetaData(
EntityPath(namespace),
EntityName(actionName),
js10MetaData(Some("jsMain"), binary = false),
limits = actionLimits(actionMem, concurrency))
val run1 = wsk.action.invoke(actionName, Map())
withActivation(wsk.activation, run1) { activation =>
activation.response.status shouldBe "success"
}
// wait for 1s
Thread.sleep(1000)
val start = Instant.now()
val run2 = wsk.action.invoke(dummyActionName, Map())
withActivation(wsk.activation, run2) { activation =>
activation.response.status shouldBe "success"
}
1 to count foreach { _ =>
val run = wsk.action.invoke(actionName, Map())
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
totalDuration += activation.duration
}
}
val end = Instant.now()
val timeWindow = math.ceil(ChronoUnit.MILLIS.between(start, end) / 1000.0).seconds
val durationChecker = new ElasticSearchDurationChecker(client, timeWindow)
// it should aggregate the recent activations in 5 seconds
val durationCheckResult: DurationCheckResult =
durationChecker.checkAverageDuration(namespace, actionMetaData)(res => res).futureValue
/**
* Expected sample data
{
"_shards": {
"failed": 0,
"skipped": 0,
"successful": 5,
"total": 5
},
"aggregations": {
"filterAggregation": {
"averageAggregation": {
"value": 14
},
"doc_count": 3
}
},
"hits": {
"hits": [],
"max_score": 0,
"total": 3
},
"timed_out": false,
"took": 2
}
*/
truncateDouble(durationCheckResult.averageDuration.getOrElse(0.0)) shouldBe truncateDouble(
totalDuration.toDouble / count.toDouble)
durationCheckResult.hitCount shouldBe count
}
it should "fetch proper average duration for a package action" in withAssetCleaner(wskprops) { (_, assetHelper) =>
val packageName = "samplePackage"
val actionName = "packageAction"
val fqn = s"$namespace/$packageName/$actionName"
val actionMetaData =
WhiskActionMetaData(
EntityPath(s"$namespace/$packageName"),
EntityName(actionName),
js10MetaData(Some("jsMain"), binary = false),
limits = actionLimits(actionMem, concurrency))
var totalDuration = 0L
val count = 3
assetHelper.withCleaner(wsk.pkg, packageName) { (pkg, _) =>
pkg.create(packageName)
}
assetHelper.withCleaner(wsk.action, fqn) { (action, _) =>
action.create(fqn, Some(TestUtils.getTestActionFilename("hello.js")))
}
1 to count foreach { _ =>
val run = wsk.action.invoke(fqn, Map())
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
}
}
// wait for 1s
Thread.sleep(1000)
val start = Instant.now()
1 to count foreach { _ =>
val run = wsk.action.invoke(fqn, Map())
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
totalDuration += activation.duration
}
}
val end = Instant.now()
val timeWindow = math.ceil(ChronoUnit.MILLIS.between(start, end) / 1000.0).seconds
val durationChecker = new ElasticSearchDurationChecker(client, timeWindow)
val durationCheckResult: DurationCheckResult =
durationChecker.checkAverageDuration(namespace, actionMetaData)(res => res).futureValue
/**
* Expected sample data
{
"_shards": {
"failed": 0,
"skipped": 0,
"successful": 5,
"total": 5
},
"aggregations": {
"filterAggregation": {
"averageAggregation": {
"value": 13
},
"doc_count": 3
}
},
"hits": {
"hits": [],
"max_score": 0,
"total": 6
},
"timed_out": false,
"took": 0
}
*/
truncateDouble(durationCheckResult.averageDuration.getOrElse(0.0)) shouldBe truncateDouble(
totalDuration.toDouble / count.toDouble)
durationCheckResult.hitCount shouldBe count
}
it should "fetch the duration for binding action" in withAssetCleaner(wskprops) { (_, assetHelper) =>
val packageName = "testPackage"
val actionName = "testAction"
val originalFQN = s"$namespace/$packageName/$actionName"
val boundPackageName = "boundPackage"
val actionMetaData =
WhiskActionMetaData(
EntityPath(s"$namespace/$boundPackageName"),
EntityName(actionName),
js10MetaData(Some("jsMain"), binary = false),
limits = actionLimits(actionMem, concurrency),
binding = Some(EntityPath(s"$namespace/$packageName")))
var totalDuration = 0L
val count = 3
assetHelper.withCleaner(wsk.pkg, packageName) { (pkg, _) =>
pkg.create(packageName, shared = Some(true))
}
assetHelper.withCleaner(wsk.action, originalFQN) { (action, _) =>
action.create(originalFQN, Some(TestUtils.getTestActionFilename("hello.js")))
}
assetHelper.withCleaner(wsk.pkg, boundPackageName) { (pkg, _) =>
pkg.bind(packageName, boundPackageName)
}
1 to count foreach { _ =>
val run = wsk.action.invoke(s"$boundPackageName/$actionName", Map())
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
}
}
// wait for 1s
Thread.sleep(1000)
val start = Instant.now()
1 to count foreach { _ =>
val run = wsk.action.invoke(s"$boundPackageName/$actionName", Map())
withActivation(wsk.activation, run) { activation =>
activation.response.status shouldBe "success"
totalDuration += activation.duration
}
}
val end = Instant.now()
val timeWindow = math.ceil(ChronoUnit.MILLIS.between(start, end) / 1000.0).seconds
val durationChecker = new ElasticSearchDurationChecker(client, timeWindow)
val durationCheckResult: DurationCheckResult =
durationChecker.checkAverageDuration(namespace, actionMetaData)(res => res).futureValue
/**
* Expected sample data
{
"_shards": {
"failed": 0,
"skipped": 0,
"successful": 5,
"total": 5
},
"aggregations": {
"averageAggregation": {
"value": 14
}
},
"hits": {
"hits": [],
"max_score": 0,
"total": 3
},
"timed_out": false,
"took": 0
}
*/
truncateDouble(durationCheckResult.averageDuration.getOrElse(0.0)) shouldBe truncateDouble(
totalDuration.toDouble / count.toDouble)
durationCheckResult.hitCount shouldBe count
}
it should "return nothing properly if there is no activation yet" in withAssetCleaner(wskprops) { (_, _) =>
val actionName = "noneAction"
val actionMetaData =
WhiskActionMetaData(
EntityPath(s"$namespace"),
EntityName(actionName),
js10MetaData(Some("jsMain"), binary = false),
limits = actionLimits(actionMem, concurrency))
val durationCheckResult: DurationCheckResult =
elasticSearchDurationChecker.checkAverageDuration(namespace, actionMetaData)(res => res).futureValue
durationCheckResult.averageDuration shouldBe None
durationCheckResult.hitCount shouldBe 0
}
it should "return nothing properly if there is no activation for binding action yet" in withAssetCleaner(wskprops) {
(_, _) =>
val packageName = "testPackage2"
val actionName = "noneAction"
val boundPackageName = "boundPackage2"
val actionMetaData =
WhiskActionMetaData(
EntityPath(s"$namespace/$boundPackageName"),
EntityName(actionName),
js10MetaData(Some("jsMain"), false),
limits = actionLimits(actionMem, concurrency),
binding = Some(EntityPath(s"${namespace}/${packageName}")))
val durationCheckResult: DurationCheckResult =
elasticSearchDurationChecker.checkAverageDuration(namespace, actionMetaData)(res => res).futureValue
durationCheckResult.averageDuration shouldBe None
durationCheckResult.hitCount shouldBe 0
}
private def truncateDouble(number: Double, scale: Int = 2) = {
BigDecimal(number).setScale(scale, BigDecimal.RoundingMode.HALF_UP).toDouble
}
private def generateIndex(namespace: String): String = {
elasticSearchConfig.indexPattern.dropWhile(_ == '/') format namespace.toLowerCase
}
}
| akrabat/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/scheduler/queue/test/ElasticSearchDurationCheckerTests.scala | Scala | apache-2.0 | 14,482 |
package org.jetbrains.sbt.shell.sbt13_7
import org.jetbrains.sbt.shell.UseSbtTestRunTest
import org.junit.Ignore
/**
* Created by Roman.Shein on 13.04.2017.
*/
@Ignore
class UseSbtTestRunTest_13_7 extends UseSbtTestRunTest {
override def getPath: String = "sbt/shell/sbtTestRunTest_07"
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/sbt/shell/sbt13_7/UseSbtTestRunTest_13_7.scala | Scala | apache-2.0 | 296 |
/**
* This file is part of agora-mixnet.
* Copyright (C) 2015-2016 Agora Voting SL <[email protected]>
* agora-mixnet is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License.
* agora-mixnet is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License
* along with agora-mixnet. If not, see <http://www.gnu.org/licenses/>.
**/
package accumulator
import shapeless._
import ops.nat._
import app._
import models._
import play.api.libs.json._
import java.sql.Timestamp
import election.Election
import election.Combined
object ElectionDTOData {
val REGISTERED = "registered"
val CREATED = "created"
val CREATE_ERROR = "create_error"
val STARTED = "started"
val STOPPED = "stopped"
val TALLY_OK = "tally_ok"
val TALLY_ERROR = "tally_error"
val RESULTS_OK = "results_ok"
val DOING_TALLY = "doing_tally"
val RESULTS_PUB = "results_pub"
}
class ElectionDTOData(val id: Long, val numAuth: Int) {
private var state = initDTO()
def apply() = state
private def genAuthArray(): Array[String] = {
var authArray : Array[String] = Array()
if(numAuth > 1) {
for(index <- 2 to numAuth) {
authArray = authArray :+ ("auth" + index)
}
}
authArray
}
private def initDTO(): ElectionDTO = {
val startDate = new Timestamp(2015, 1, 27, 16, 0, 0, 1)
ElectionDTO(
id,
ElectionConfig(
id,
"simple",
"auth1",
genAuthArray(),
"Election title",
"Election description",
Seq(
Question(
"Question 0",
"accordion",
1,
1,
1,
"Question title",
true,
"plurality-at-large",
"over-total-valid-votes",
Seq(
Answer(
0,
"",
"",
0,
Seq(),
"voting option A"
),
Answer(
1,
"",
"",
1,
Seq(),
"voting option B"
)
)
)
),
startDate,
startDate,
ElectionPresentation(
"",
"default",
Seq(),
"",
None
),
false,
None
),
ElectionDTOData.REGISTERED,
startDate,
startDate,
None,
None,
None,
false
)
}
def setDTO(dto: ElectionDTO) = {
state = dto
}
def setState(newState: String) {
state =
ElectionDTO(
state.id,
state.configuration,
newState,
state.startDate,
state.endDate,
state.pks,
state.results,
state.resultsUpdated,
state.real
)
}
def setResults(results: String) {
state =
ElectionDTO(
state.id,
state.configuration,
state.state,
state.startDate,
state.endDate,
state.pks,
Some(results),
state.resultsUpdated,
state.real
)
}
def setPublicKeys[W <: Nat : ToInt](combined: Election[W, Combined]) {
val jsPk : JsValue =
Json.arr(Json.obj(
"q" -> combined.state.cSettings.group.getOrder().toString(),
"p" -> combined.state.cSettings.group.getModulus().toString(),
"y" -> combined.state.publicKey,
"g" -> combined.state.cSettings.generator.getValue().toString()
))
state =
ElectionDTO(
state.id,
state.configuration,
state.state,
state.startDate,
state.endDate,
Some(jsPk.toString()),
state.results,
state.resultsUpdated,
state.real
)
}
} | agoravoting/agora-mixnet | src/main/scala/accumulator/ElectionDTOData.scala | Scala | agpl-3.0 | 4,585 |
package io.skysail.api.osgi.events.impl
import io.skysail.api.osgi.events.EventsService
import org.osgi.service.event.{Event, EventAdmin, EventHandler}
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
class DefaultEventsService(eventAdmin: EventAdmin) extends EventsService with EventHandler {
private val log = LoggerFactory.getLogger(this.getClass)
override def send(msg: String) = {
// val properties = new util.Hashtable[String, Any]
// properties.put("time", System.currentTimeMillis())
// properties.put("message", msg)
var properties = scala.collection.mutable.Map[String, Any]();
properties += "message" -> msg
properties += "time" -> System.currentTimeMillis()
val reportGeneratedEvent = new org.osgi.service.event.Event(
"io/skysail/server/demo/services/GENERATED", properties.asJava)
eventAdmin.sendEvent(reportGeneratedEvent)
}
override def handleEvent(event: Event): Unit = {
log info s"$event"
}
}
| evandor/skysail-server | skysail.api/src/io/skysail/api/osgi/events/impl/DefaultEventsService.scala | Scala | apache-2.0 | 1,002 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.io.File
import java.util.{Collections, List => JList}
import java.util.concurrent.locks.ReentrantLock
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.concurrent.Future
import org.apache.mesos.Protos.{TaskInfo => MesosTaskInfo, _}
import org.apache.spark.{SecurityManager, SparkContext, SparkException, TaskState}
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.shuffle.mesos.MesosExternalShuffleClient
import org.apache.spark.rpc.RpcEndpointAddress
import org.apache.spark.scheduler.{SlaveLost, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.util.Utils
/**
* A SchedulerBackend that runs tasks on Mesos, but uses "coarse-grained" tasks, where it holds
* onto each Mesos node for the duration of the Spark job instead of relinquishing cores whenever
* a task is done. It launches Spark tasks within the coarse-grained Mesos tasks using the
* CoarseGrainedSchedulerBackend mechanism. This class is useful for lower and more predictable
* latency.
*
* Unfortunately this has a bit of duplication from [[MesosFineGrainedSchedulerBackend]],
* but it seems hard to remove this.
*/
private[spark] class MesosCoarseGrainedSchedulerBackend(
scheduler: TaskSchedulerImpl,
sc: SparkContext,
master: String,
securityManager: SecurityManager)
extends CoarseGrainedSchedulerBackend(scheduler, sc.env.rpcEnv)
with org.apache.mesos.Scheduler
with MesosSchedulerUtils {
val MAX_SLAVE_FAILURES = 2 // Blacklist a slave after this many failures
// Maximum number of cores to acquire (TODO: we'll need more flexible controls here)
val maxCores = conf.get("spark.cores.max", Int.MaxValue.toString).toInt
val maxGpus = conf.getInt("spark.mesos.gpus.max", 0)
private[this] val shutdownTimeoutMS =
conf.getTimeAsMs("spark.mesos.coarse.shutdownTimeout", "10s")
.ensuring(_ >= 0, "spark.mesos.coarse.shutdownTimeout must be >= 0")
// Synchronization protected by stateLock
private[this] var stopCalled: Boolean = false
// If shuffle service is enabled, the Spark driver will register with the shuffle service.
// This is for cleaning up shuffle files reliably.
private val shuffleServiceEnabled = conf.getBoolean("spark.shuffle.service.enabled", false)
// Cores we have acquired with each Mesos task ID
val coresByTaskId = new mutable.HashMap[String, Int]
val gpusByTaskId = new mutable.HashMap[String, Int]
var totalCoresAcquired = 0
var totalGpusAcquired = 0
// SlaveID -> Slave
// This map accumulates entries for the duration of the job. Slaves are never deleted, because
// we need to maintain e.g. failure state and connection state.
private val slaves = new mutable.HashMap[String, Slave]
/**
* The total number of executors we aim to have. Undefined when not using dynamic allocation.
* Initially set to 0 when using dynamic allocation, the executor allocation manager will send
* the real initial limit later.
*/
private var executorLimitOption: Option[Int] = {
if (Utils.isDynamicAllocationEnabled(conf)) {
Some(0)
} else {
None
}
}
/**
* Return the current executor limit, which may be [[Int.MaxValue]]
* before properly initialized.
*/
private[mesos] def executorLimit: Int = executorLimitOption.getOrElse(Int.MaxValue)
// private lock object protecting mutable state above. Using the intrinsic lock
// may lead to deadlocks since the superclass might also try to lock
private val stateLock = new ReentrantLock
val extraCoresPerExecutor = conf.getInt("spark.mesos.extra.cores", 0)
// Offer constraints
private val slaveOfferConstraints =
parseConstraintString(sc.conf.get("spark.mesos.constraints", ""))
// Reject offers with mismatched constraints in seconds
private val rejectOfferDurationForUnmetConstraints =
getRejectOfferDurationForUnmetConstraints(sc)
// Reject offers when we reached the maximum number of cores for this framework
private val rejectOfferDurationForReachedMaxCores =
getRejectOfferDurationForReachedMaxCores(sc)
// A client for talking to the external shuffle service
private val mesosExternalShuffleClient: Option[MesosExternalShuffleClient] = {
if (shuffleServiceEnabled) {
Some(getShuffleClient())
} else {
None
}
}
// This method is factored out for testability
protected def getShuffleClient(): MesosExternalShuffleClient = {
new MesosExternalShuffleClient(
SparkTransportConf.fromSparkConf(conf, "shuffle"),
securityManager,
securityManager.isAuthenticationEnabled(),
securityManager.isSaslEncryptionEnabled())
}
var nextMesosTaskId = 0
@volatile var appId: String = _
def newMesosTaskId(): String = {
val id = nextMesosTaskId
nextMesosTaskId += 1
id.toString
}
override def start() {
super.start()
val driver = createSchedulerDriver(
master,
MesosCoarseGrainedSchedulerBackend.this,
sc.sparkUser,
sc.appName,
sc.conf,
sc.conf.getOption("spark.mesos.driver.webui.url").orElse(sc.ui.map(_.appUIAddress)),
None,
None,
sc.conf.getOption("spark.mesos.driver.frameworkId")
)
unsetFrameworkID(sc)
startScheduler(driver)
}
def createCommand(offer: Offer, numCores: Int, taskId: String): CommandInfo = {
val environment = Environment.newBuilder()
val extraClassPath = conf.getOption("spark.executor.extraClassPath")
extraClassPath.foreach { cp =>
environment.addVariables(
Environment.Variable.newBuilder().setName("SPARK_CLASSPATH").setValue(cp).build())
}
val extraJavaOpts = conf.get("spark.executor.extraJavaOptions", "")
// Set the environment variable through a command prefix
// to append to the existing value of the variable
val prefixEnv = conf.getOption("spark.executor.extraLibraryPath").map { p =>
Utils.libraryPathEnvPrefix(Seq(p))
}.getOrElse("")
environment.addVariables(
Environment.Variable.newBuilder()
.setName("SPARK_EXECUTOR_OPTS")
.setValue(extraJavaOpts)
.build())
sc.executorEnvs.foreach { case (key, value) =>
environment.addVariables(Environment.Variable.newBuilder()
.setName(key)
.setValue(value)
.build())
}
val command = CommandInfo.newBuilder()
.setEnvironment(environment)
val uri = conf.getOption("spark.executor.uri")
.orElse(Option(System.getenv("SPARK_EXECUTOR_URI")))
if (uri.isEmpty) {
val executorSparkHome = conf.getOption("spark.mesos.executor.home")
.orElse(sc.getSparkHome())
.getOrElse {
throw new SparkException("Executor Spark home `spark.mesos.executor.home` is not set!")
}
val runScript = new File(executorSparkHome, "./bin/spark-class").getPath
command.setValue(
"%s \\"%s\\" org.apache.spark.executor.CoarseGrainedExecutorBackend"
.format(prefixEnv, runScript) +
s" --driver-url $driverURL" +
s" --executor-id $taskId" +
s" --hostname ${offer.getHostname}" +
s" --cores $numCores" +
s" --app-id $appId")
} else {
// Grab everything to the first '.'. We'll use that and '*' to
// glob the directory "correctly".
val basename = uri.get.split('/').last.split('.').head
command.setValue(
s"cd $basename*; $prefixEnv " +
"./bin/spark-class org.apache.spark.executor.CoarseGrainedExecutorBackend" +
s" --driver-url $driverURL" +
s" --executor-id $taskId" +
s" --hostname ${offer.getHostname}" +
s" --cores $numCores" +
s" --app-id $appId")
command.addUris(CommandInfo.URI.newBuilder().setValue(uri.get))
}
conf.getOption("spark.mesos.uris").foreach(setupUris(_, command))
command.build()
}
protected def driverURL: String = {
if (conf.contains("spark.testing")) {
"driverURL"
} else {
RpcEndpointAddress(
conf.get("spark.driver.host"),
conf.get("spark.driver.port").toInt,
CoarseGrainedSchedulerBackend.ENDPOINT_NAME).toString
}
}
override def offerRescinded(d: org.apache.mesos.SchedulerDriver, o: OfferID) {}
override def registered(
d: org.apache.mesos.SchedulerDriver, frameworkId: FrameworkID, masterInfo: MasterInfo) {
appId = frameworkId.getValue
mesosExternalShuffleClient.foreach(_.init(appId))
markRegistered()
}
override def sufficientResourcesRegistered(): Boolean = {
totalCoresAcquired >= maxCores * minRegisteredRatio
}
override def disconnected(d: org.apache.mesos.SchedulerDriver) {}
override def reregistered(d: org.apache.mesos.SchedulerDriver, masterInfo: MasterInfo) {}
/**
* Method called by Mesos to offer resources on slaves. We respond by launching an executor,
* unless we've already launched more than we wanted to.
*/
override def resourceOffers(d: org.apache.mesos.SchedulerDriver, offers: JList[Offer]) {
stateLock.synchronized {
if (stopCalled) {
logDebug("Ignoring offers during shutdown")
// Driver should simply return a stopped status on race
// condition between this.stop() and completing here
offers.asScala.map(_.getId).foreach(d.declineOffer)
return
}
logDebug(s"Received ${offers.size} resource offers.")
val (matchedOffers, unmatchedOffers) = offers.asScala.partition { offer =>
val offerAttributes = toAttributeMap(offer.getAttributesList)
matchesAttributeRequirements(slaveOfferConstraints, offerAttributes)
}
declineUnmatchedOffers(d, unmatchedOffers)
handleMatchedOffers(d, matchedOffers)
}
}
private def declineUnmatchedOffers(
d: org.apache.mesos.SchedulerDriver, offers: mutable.Buffer[Offer]): Unit = {
offers.foreach { offer =>
declineOffer(d, offer, Some("unmet constraints"),
Some(rejectOfferDurationForUnmetConstraints))
}
}
private def declineOffer(
d: org.apache.mesos.SchedulerDriver,
offer: Offer,
reason: Option[String] = None,
refuseSeconds: Option[Long] = None): Unit = {
val id = offer.getId.getValue
val offerAttributes = toAttributeMap(offer.getAttributesList)
val mem = getResource(offer.getResourcesList, "mem")
val cpus = getResource(offer.getResourcesList, "cpus")
val ports = getRangeResource(offer.getResourcesList, "ports")
logDebug(s"Declining offer: $id with attributes: $offerAttributes mem: $mem" +
s" cpu: $cpus port: $ports for $refuseSeconds seconds" +
reason.map(r => s" (reason: $r)").getOrElse(""))
refuseSeconds match {
case Some(seconds) =>
val filters = Filters.newBuilder().setRefuseSeconds(seconds).build()
d.declineOffer(offer.getId, filters)
case _ => d.declineOffer(offer.getId)
}
}
/**
* Launches executors on accepted offers, and declines unused offers. Executors are launched
* round-robin on offers.
*
* @param d SchedulerDriver
* @param offers Mesos offers that match attribute constraints
*/
private def handleMatchedOffers(
d: org.apache.mesos.SchedulerDriver, offers: mutable.Buffer[Offer]): Unit = {
val tasks = buildMesosTasks(offers)
for (offer <- offers) {
val offerAttributes = toAttributeMap(offer.getAttributesList)
val offerMem = getResource(offer.getResourcesList, "mem")
val offerCpus = getResource(offer.getResourcesList, "cpus")
val offerPorts = getRangeResource(offer.getResourcesList, "ports")
val id = offer.getId.getValue
if (tasks.contains(offer.getId)) { // accept
val offerTasks = tasks(offer.getId)
logDebug(s"Accepting offer: $id with attributes: $offerAttributes " +
s"mem: $offerMem cpu: $offerCpus ports: $offerPorts." +
s" Launching ${offerTasks.size} Mesos tasks.")
for (task <- offerTasks) {
val taskId = task.getTaskId
val mem = getResource(task.getResourcesList, "mem")
val cpus = getResource(task.getResourcesList, "cpus")
val ports = getRangeResource(task.getResourcesList, "ports").mkString(",")
logDebug(s"Launching Mesos task: ${taskId.getValue} with mem: $mem cpu: $cpus" +
s" ports: $ports")
}
d.launchTasks(
Collections.singleton(offer.getId),
offerTasks.asJava)
} else if (totalCoresAcquired >= maxCores) {
// Reject an offer for a configurable amount of time to avoid starving other frameworks
declineOffer(d, offer, Some("reached spark.cores.max"),
Some(rejectOfferDurationForReachedMaxCores))
} else {
declineOffer(d, offer)
}
}
}
/**
* Returns a map from OfferIDs to the tasks to launch on those offers. In order to maximize
* per-task memory and IO, tasks are round-robin assigned to offers.
*
* @param offers Mesos offers that match attribute constraints
* @return A map from OfferID to a list of Mesos tasks to launch on that offer
*/
private def buildMesosTasks(offers: mutable.Buffer[Offer]): Map[OfferID, List[MesosTaskInfo]] = {
// offerID -> tasks
val tasks = new mutable.HashMap[OfferID, List[MesosTaskInfo]].withDefaultValue(Nil)
// offerID -> resources
val remainingResources = mutable.Map(offers.map(offer =>
(offer.getId.getValue, offer.getResourcesList)): _*)
var launchTasks = true
// TODO(mgummelt): combine offers for a single slave
//
// round-robin create executors on the available offers
while (launchTasks) {
launchTasks = false
for (offer <- offers) {
val slaveId = offer.getSlaveId.getValue
val offerId = offer.getId.getValue
val resources = remainingResources(offerId)
if (canLaunchTask(slaveId, resources)) {
// Create a task
launchTasks = true
val taskId = newMesosTaskId()
val offerCPUs = getResource(resources, "cpus").toInt
val taskGPUs = Math.min(
Math.max(0, maxGpus - totalGpusAcquired), getResource(resources, "gpus").toInt)
val taskCPUs = executorCores(offerCPUs)
val taskMemory = executorMemory(sc)
slaves.getOrElseUpdate(slaveId, new Slave(offer.getHostname)).taskIDs.add(taskId)
val (resourcesLeft, resourcesToUse) =
partitionTaskResources(resources, taskCPUs, taskMemory, taskGPUs)
val taskBuilder = MesosTaskInfo.newBuilder()
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
.setSlaveId(offer.getSlaveId)
.setCommand(createCommand(offer, taskCPUs + extraCoresPerExecutor, taskId))
.setName("Task " + taskId)
taskBuilder.addAllResources(resourcesToUse.asJava)
sc.conf.getOption("spark.mesos.executor.docker.image").foreach { image =>
MesosSchedulerBackendUtil.setupContainerBuilderDockerInfo(
image,
sc.conf,
taskBuilder.getContainerBuilder
)
}
tasks(offer.getId) ::= taskBuilder.build()
remainingResources(offerId) = resourcesLeft.asJava
totalCoresAcquired += taskCPUs
coresByTaskId(taskId) = taskCPUs
if (taskGPUs > 0) {
totalGpusAcquired += taskGPUs
gpusByTaskId(taskId) = taskGPUs
}
}
}
}
tasks.toMap
}
/** Extracts task needed resources from a list of available resources. */
private def partitionTaskResources(
resources: JList[Resource],
taskCPUs: Int,
taskMemory: Int,
taskGPUs: Int)
: (List[Resource], List[Resource]) = {
// partition cpus & mem
val (afterCPUResources, cpuResourcesToUse) = partitionResources(resources, "cpus", taskCPUs)
val (afterMemResources, memResourcesToUse) =
partitionResources(afterCPUResources.asJava, "mem", taskMemory)
val (afterGPUResources, gpuResourcesToUse) =
partitionResources(afterMemResources.asJava, "gpus", taskGPUs)
// If user specifies port numbers in SparkConfig then consecutive tasks will not be launched
// on the same host. This essentially means one executor per host.
// TODO: handle network isolator case
val (nonPortResources, portResourcesToUse) =
partitionPortResources(nonZeroPortValuesFromConfig(sc.conf), afterGPUResources)
(nonPortResources,
cpuResourcesToUse ++ memResourcesToUse ++ portResourcesToUse ++ gpuResourcesToUse)
}
private def canLaunchTask(slaveId: String, resources: JList[Resource]): Boolean = {
val offerMem = getResource(resources, "mem")
val offerCPUs = getResource(resources, "cpus").toInt
val cpus = executorCores(offerCPUs)
val mem = executorMemory(sc)
val ports = getRangeResource(resources, "ports")
val meetsPortRequirements = checkPorts(sc.conf, ports)
cpus > 0 &&
cpus <= offerCPUs &&
cpus + totalCoresAcquired <= maxCores &&
mem <= offerMem &&
numExecutors() < executorLimit &&
slaves.get(slaveId).map(_.taskFailures).getOrElse(0) < MAX_SLAVE_FAILURES &&
meetsPortRequirements
}
private def executorCores(offerCPUs: Int): Int = {
sc.conf.getInt("spark.executor.cores",
math.min(offerCPUs, maxCores - totalCoresAcquired))
}
override def statusUpdate(d: org.apache.mesos.SchedulerDriver, status: TaskStatus) {
val taskId = status.getTaskId.getValue
val slaveId = status.getSlaveId.getValue
val state = mesosToTaskState(status.getState)
logInfo(s"Mesos task $taskId is now ${status.getState}")
stateLock.synchronized {
val slave = slaves(slaveId)
// If the shuffle service is enabled, have the driver register with each one of the
// shuffle services. This allows the shuffle services to clean up state associated with
// this application when the driver exits. There is currently not a great way to detect
// this through Mesos, since the shuffle services are set up independently.
if (state.equals(TaskState.RUNNING) &&
shuffleServiceEnabled &&
!slave.shuffleRegistered) {
assume(mesosExternalShuffleClient.isDefined,
"External shuffle client was not instantiated even though shuffle service is enabled.")
// TODO: Remove this and allow the MesosExternalShuffleService to detect
// framework termination when new Mesos Framework HTTP API is available.
val externalShufflePort = conf.getInt("spark.shuffle.service.port", 7337)
logDebug(s"Connecting to shuffle service on slave $slaveId, " +
s"host ${slave.hostname}, port $externalShufflePort for app ${conf.getAppId}")
mesosExternalShuffleClient.get
.registerDriverWithShuffleService(
slave.hostname,
externalShufflePort,
sc.conf.getTimeAsMs("spark.storage.blockManagerSlaveTimeoutMs",
s"${sc.conf.getTimeAsMs("spark.network.timeout", "120s")}ms"),
sc.conf.getTimeAsMs("spark.executor.heartbeatInterval", "10s"))
slave.shuffleRegistered = true
}
if (TaskState.isFinished(state)) {
// Remove the cores we have remembered for this task, if it's in the hashmap
for (cores <- coresByTaskId.get(taskId)) {
totalCoresAcquired -= cores
coresByTaskId -= taskId
}
// Also remove the gpus we have remembered for this task, if it's in the hashmap
for (gpus <- gpusByTaskId.get(taskId)) {
totalGpusAcquired -= gpus
gpusByTaskId -= taskId
}
// If it was a failure, mark the slave as failed for blacklisting purposes
if (TaskState.isFailed(state)) {
slave.taskFailures += 1
if (slave.taskFailures >= MAX_SLAVE_FAILURES) {
logInfo(s"Blacklisting Mesos slave $slaveId due to too many failures; " +
"is Spark installed on it?")
}
}
executorTerminated(d, slaveId, taskId, s"Executor finished with state $state")
// In case we'd rejected everything before but have now lost a node
d.reviveOffers()
}
}
}
override def error(d: org.apache.mesos.SchedulerDriver, message: String) {
logError(s"Mesos error: $message")
scheduler.error(message)
}
override def stop() {
// Make sure we're not launching tasks during shutdown
stateLock.synchronized {
if (stopCalled) {
logWarning("Stop called multiple times, ignoring")
return
}
stopCalled = true
super.stop()
}
// Wait for executors to report done, or else mesosDriver.stop() will forcefully kill them.
// See SPARK-12330
val startTime = System.nanoTime()
// slaveIdsWithExecutors has no memory barrier, so this is eventually consistent
while (numExecutors() > 0 &&
System.nanoTime() - startTime < shutdownTimeoutMS * 1000L * 1000L) {
Thread.sleep(100)
}
if (numExecutors() > 0) {
logWarning(s"Timed out waiting for ${numExecutors()} remaining executors "
+ s"to terminate within $shutdownTimeoutMS ms. This may leave temporary files "
+ "on the mesos nodes.")
}
// Close the mesos external shuffle client if used
mesosExternalShuffleClient.foreach(_.close())
if (mesosDriver != null) {
mesosDriver.stop()
}
}
override def frameworkMessage(
d: org.apache.mesos.SchedulerDriver, e: ExecutorID, s: SlaveID, b: Array[Byte]) {}
/**
* Called when a slave is lost or a Mesos task finished. Updates local view on
* what tasks are running. It also notifies the driver that an executor was removed.
*/
private def executorTerminated(
d: org.apache.mesos.SchedulerDriver,
slaveId: String,
taskId: String,
reason: String): Unit = {
stateLock.synchronized {
// Do not call removeExecutor() after this scheduler backend was stopped because
// removeExecutor() internally will send a message to the driver endpoint but
// the driver endpoint is not available now, otherwise an exception will be thrown.
if (!stopCalled) {
removeExecutor(taskId, SlaveLost(reason))
}
slaves(slaveId).taskIDs.remove(taskId)
}
}
override def slaveLost(d: org.apache.mesos.SchedulerDriver, slaveId: SlaveID): Unit = {
logInfo(s"Mesos slave lost: ${slaveId.getValue}")
}
override def executorLost(
d: org.apache.mesos.SchedulerDriver, e: ExecutorID, s: SlaveID, status: Int): Unit = {
logInfo("Mesos executor lost: %s".format(e.getValue))
}
override def applicationId(): String =
Option(appId).getOrElse {
logWarning("Application ID is not initialized yet.")
super.applicationId
}
override def doRequestTotalExecutors(requestedTotal: Int): Future[Boolean] = Future.successful {
// We don't truly know if we can fulfill the full amount of executors
// since at coarse grain it depends on the amount of slaves available.
logInfo("Capping the total amount of executors to " + requestedTotal)
executorLimitOption = Some(requestedTotal)
true
}
override def doKillExecutors(executorIds: Seq[String]): Future[Boolean] = Future.successful {
if (mesosDriver == null) {
logWarning("Asked to kill executors before the Mesos driver was started.")
false
} else {
for (executorId <- executorIds) {
val taskId = TaskID.newBuilder().setValue(executorId).build()
mesosDriver.killTask(taskId)
}
// no need to adjust `executorLimitOption` since the AllocationManager already communicated
// the desired limit through a call to `doRequestTotalExecutors`.
// See [[o.a.s.scheduler.cluster.CoarseGrainedSchedulerBackend.killExecutors]]
true
}
}
private def numExecutors(): Int = {
slaves.values.map(_.taskIDs.size).sum
}
}
private class Slave(val hostname: String) {
val taskIDs = new mutable.HashSet[String]()
var taskFailures = 0
var shuffleRegistered = false
}
| likithkailas/StreamingSystems | mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala | Scala | apache-2.0 | 25,069 |
package com.example
//#user-registry-actor
import akka.actor.{ Actor, ActorLogging, Props }
//#user-case-classes
final case class User(name: String, age: Int, countryOfResidence: String)
final case class Users(users: Seq[User])
//#user-case-classes
object UserRegistryActor {
final case class ActionPerformed(description: String)
final case object GetUsers
final case class CreateUser(user: User)
final case class GetUser(name: String)
final case class DeleteUser(name: String)
def props: Props = Props[UserRegistryActor]
}
class UserRegistryActor extends Actor with ActorLogging {
import UserRegistryActor._
var users = Set.empty[User]
def receive: Receive = {
case GetUsers =>
sender() ! Users(users.toSeq)
case CreateUser(user) =>
users += user
sender() ! ActionPerformed(s"User ${user.name} created.")
case GetUser(name) =>
sender() ! users.find(_.name == name)
case DeleteUser(name) =>
users.find(_.name == name) foreach { user => users -= user }
sender() ! ActionPerformed(s"User ${name} deleted.")
}
}
//#user-registry-actor | ddki/my_study_project | language/scala/akkahttp/test-akka/src/main/scala/com/example/UserRegistryActor.scala | Scala | mit | 1,110 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.scalatest.{Matchers, WordSpec}
import org.scalatestplus.mockito.MockitoSugar
import uk.gov.hmrc.ct.BoxValidationFixture
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
class CP33Spec extends WordSpec with MockitoSugar with Matchers with BoxValidationFixture[ComputationsBoxRetriever] {
val boxRetriever = mock[ComputationsBoxRetriever]
noFailureIntegerBox("CP33", CP33.apply)
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/CP33Spec.scala | Scala | apache-2.0 | 1,061 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.testng
import org.scalatest._
import org.scalatest.Suite
import org.scalatest.TestRerunner
import org.scalatest.events._
import Suite.getIndentedText
import Suite.formatterForSuiteAborted
import Suite.formatterForSuiteStarting
import Suite.formatterForSuiteCompleted
import events.MotionToSuppress
import org.testng.TestNG
import org.testng.TestListenerAdapter
/**
* A suite of tests that can be run with either TestNG or ScalaTest. This trait allows you to mark any
* method as a test using TestNG's <code>@Test</code> annotation, and supports all other TestNG annotations.
* Here's an example:
*
* <p><b>BECAUSE OF A SCALADOC BUG IN SCALA 2.8, I HAD TO PUT A SPACE AFTER THE AT SIGN IN ANNOTATION EXAMPLES. IF YOU
* WANT TO COPY AND PASTE FROM THESE EXAMPLES, YOU'LL NEED TO REMOVE THE SPACE BY HAND, OR COPY FROM
* THE <a href="http://www.scalatest.org/scaladoc/doc-1.1/org/scalatest/testng/TestNGSuite.html">TESTNGSUITE SCALADOC FOR VERSION 1.1</a> INSTEAD, WHICH IS ALSO VALID FOR 1.3. - Bill Venners</b></p>
*
* <pre class="stHighlight">
* import org.scalatest.testng.TestNGSuite
* import org.testng.annotations.Test
* import org.testng.annotations.Configuration
* import scala.collection.mutable.ListBuffer
*
* class MySuite extends TestNGSuite {
*
* var sb: StringBuilder = _
* var lb: ListBuffer[String] = _
*
* @ Configuration(beforeTestMethod = true)
* def setUpFixture() {
* sb = new StringBuilder("ScalaTest is ")
* lb = new ListBuffer[String]
* }
*
* @ Test(invocationCount = 3)
* def easyTest() {
* sb.append("easy!")
* assert(sb.toString === "ScalaTest is easy!")
* assert(lb.isEmpty)
* lb += "sweet"
* }
*
* @ Test(groups = Array("com.mycompany.groups.SlowTest"))
* def funTest() {
* sb.append("fun!")
* assert(sb.toString === "ScalaTest is fun!")
* assert(lb.isEmpty)
* }
* }
* </pre>
*
* <p>
* To execute <code>TestNGSuite</code>s with ScalaTest's <code>Runner</code>, you must include TestNG's jar file on the class path or runpath.
* This version of <code>TestNGSuite</code> was tested with TestNG version 5.7.
* </p>
*
* <p>
* See also: <a href="http://www.scalatest.org/getting_started_with_testng" target="_blank">Getting started with TestNG and ScalaTest.</a>
* </p>
*
* @author Josh Cough
* @author Bill Venners
*/
trait TestNGSuite extends Suite { thisSuite =>
/**
* Execute this <code>TestNGSuite</code>.
*
* @param testName an optional name of one test to execute. If <code>None</code>, this class will execute all relevant tests.
* I.e., <code>None</code> acts like a wildcard that means execute all relevant tests in this <code>TestNGSuite</code>.
* @param reporter The reporter to be notified of test events (success, failure, etc).
* @param groupsToInclude Contains the names of groups to run. Only tests in these groups will be executed.
* @param groupsToExclude Tests in groups in this Set will not be executed.
*
* @param stopper the <code>Stopper</code> may be used to request an early termination of a suite of tests. However, because TestNG does
* not support the notion of aborting a run early, this class ignores this parameter.
* @param properties a <code>Map</code> of properties that can be used by the executing <code>Suite</code> of tests. This class
* does not use this parameter.
* @param distributor an optional <code>Distributor</code>, into which nested <code>Suite</code>s could be put to be executed
* by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be executed sequentially.
* Because TestNG handles its own concurrency, this class ignores this parameter.
* <br><br>
*/
override def run(testName: Option[String], reporter: Reporter, stopper: Stopper,
filter: Filter, properties: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) {
runTestNG(testName, reporter, filter, tracker)
}
/**
* Runs TestNG with no test name, no groups. All tests in the class will be executed.
* @param reporter the reporter to be notified of test events (success, failure, etc)
*/
private[testng] def runTestNG(reporter: Reporter, tracker: Tracker) {
runTestNG(None, reporter, Filter(), tracker)
}
/**
* Runs TestNG, running only the test method with the given name.
* @param testName the name of the method to run
* @param reporter the reporter to be notified of test events (success, failure, etc)
*/
private[testng] def runTestNG(testName: String, reporter: Reporter, tracker: Tracker) {
runTestNG(Some(testName), reporter, Filter(), tracker)
}
/**
* Runs TestNG. The meat and potatoes.
*
* @param testName if present (Some), then only the method with the supplied name is executed and groups will be ignored
* @param reporter the reporter to be notified of test events (success, failure, etc)
* @param groupsToInclude contains the names of groups to run. only tests in these groups will be executed
* @param groupsToExclude tests in groups in this Set will not be executed
*/
private[testng] def runTestNG(testName: Option[String], reporter: Reporter,
filter: Filter, tracker: Tracker) {
val tagsToInclude =
filter.tagsToInclude match {
case None => Set[String]()
case Some(tti) => tti
}
val tagsToExclude = filter.tagsToExclude
val testng = new TestNG()
// only run the test methods in this class
testng.setTestClasses(Array(this.getClass))
// if testName is supplied, ignore groups.
testName match {
case Some(tn) => setupTestNGToRunSingleMethod(tn, testng)
case None => handleGroups(tagsToInclude, tagsToExclude, testng)
}
this.run(testng, reporter, tracker)
}
/**
* Runs the TestNG object which calls back to the given Reporter.
*/
private[testng] def run(testng: TestNG, reporter: Reporter, tracker: Tracker) {
// setup the callback mechanism
val tla = new MyTestListenerAdapter(reporter, tracker)
testng.addListener(tla)
// finally, run TestNG
testng.run()
}
/**
* Tells TestNG which groups to include and exclude, which is directly a one-to-one mapping.
*/
private[testng] def handleGroups(groupsToInclude: Set[String], groupsToExclude: Set[String], testng: TestNG) {
testng.setGroups(groupsToInclude.mkString(","))
testng.setExcludedGroups(groupsToExclude.mkString(","))
}
/**
* This method ensures that TestNG will only run the test method whos name matches testName.
*
* The approach is a bit odd however because TestNG doesn't have an easy API for
* running a single method. To get around that we chose to use an AnnotationTransformer
* to add a secret group to the test method's annotation. We then set up TestNG to run only that group.
*
* NOTE: There was another option - we could TestNG's XmlSuites to specify which method to run.
* This approach was about as much work, offered no clear benefits, and no additional problems either.
*
* @param testName the name of the test method to be executed
*/
private def setupTestNGToRunSingleMethod(testName: String, testng: TestNG) = {
import org.testng.internal.annotations.IAnnotationTransformer
import org.testng.internal.annotations.ITest
import java.lang.reflect.Method
import java.lang.reflect.Constructor
class MyTransformer extends IAnnotationTransformer {
override def transform( annotation: ITest, testClass: java.lang.Class[_], testConstructor: Constructor[_], testMethod: Method){
if (testName.equals(testMethod.getName)) {
annotation.setGroups(Array("org.scalatest.testng.singlemethodrun.methodname"))
}
}
}
testng.setGroups("org.scalatest.testng.singlemethodrun.methodname")
testng.setAnnotationTransformer(new MyTransformer())
}
/**
* This class hooks TestNG's callback mechanism (TestListenerAdapter) to ScalaTest's
* reporting mechanism. TestNG has many different callback points which are a near one-to-one
* mapping with ScalaTest. At each callback point, this class simply creates ScalaTest
* reports and calls the appropriate method on the Reporter.
*
* TODO:
* (12:02:27 AM) bvenners: onTestFailedButWithinSuccessPercentage(ITestResult tr)
* (12:02:34 AM) bvenners: maybe a TestSucceeded with some extra info in the report
*/
private[testng] class MyTestListenerAdapter(reporter: Reporter, tracker: Tracker) extends TestListenerAdapter {
// TODO: Put the tracker in an atomic, because TestNG can go multithreaded?
val report = reporter
import org.testng.ITestContext
import org.testng.ITestResult
private val className = TestNGSuite.this.getClass.getName
/**
* This method is called when TestNG starts, and maps to ScalaTest's suiteStarting.
* @TODO TestNG doesn't seem to know how many tests are going to be executed.
* We are currently telling ScalaTest that 0 tests are about to be run. Investigate
* and/or chat with Cedric to determine if its possible to get this number from TestNG.
*/
override def onStart(itc: ITestContext) = {
val formatter = formatterForSuiteStarting(thisSuite)
report(SuiteStarting(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), formatter))
}
/**
* TestNG's onFinish maps cleanly to suiteCompleted.
* TODO: TestNG does have some extra info here. One thing we could do is map the info
* in the ITestContext object into ScalaTest Reports and fire InfoProvided
*/
override def onFinish(itc: ITestContext) = {
val formatter = formatterForSuiteCompleted(thisSuite)
report(SuiteCompleted(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), None, formatter))
}
/**
* TestNG's onTestStart maps cleanly to TestStarting. Simply build a report
* and pass it to the Reporter.
*/
override def onTestStart(result: ITestResult) = {
report(TestStarting(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), result.getName + params(result),
Some(MotionToSuppress), Some(new TestRerunner(className, result.getName))))
}
/**
* TestNG's onTestSuccess maps cleanly to TestSucceeded. Again, simply build
* a report and pass it to the Reporter.
*/
override def onTestSuccess(result: ITestResult) = {
val testName = result.getName + params(result)
val formatter = getIndentedText(testName, 1, true)
report(TestSucceeded(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), testName,
None, Some(formatter), Some(new TestRerunner(className, result.getName)))) // Can I add a duration?
}
/**
* TestNG's onTestSkipped maps cleanly to TestIgnored. Again, simply build
* a report and pass it to the Reporter.
*/
override def onTestSkipped(result: ITestResult) = {
val testName = result.getName + params(result)
val formatter = getIndentedText(testName, 1, true)
report(TestIgnored(tracker.nextOrdinal(), thisSuite.suiteName, Some(thisSuite.getClass.getName), testName, Some(formatter)))
}
/**
* TestNG's onTestFailure maps cleanly to TestFailed.
*/
override def onTestFailure(result: ITestResult) = {
val throwableOrNull = result.getThrowable
val throwable = if (throwableOrNull != null) Some(throwableOrNull) else None
val message = if (throwableOrNull != null && throwableOrNull.getMessage != null) throwableOrNull.getMessage else Resources("testNGConfigFailed")
val testName = result.getName + params(result)
val formatter = getIndentedText(testName, 1, true)
report(TestFailed(tracker.nextOrdinal(), message, thisSuite.suiteName, Some(thisSuite.getClass.getName), testName, throwable, None, Some(formatter), Some(new TestRerunner(className, result.getName)))) // Can I add a duration?
}
/**
* A TestNG setup method resulted in an exception, and a test method will later fail to run.
* This TestNG callback method has the exception that caused the problem, as well
* as the name of the method that failed. Create a Report with the method name and the
* exception and call reporter(SuiteAborted).
*/
override def onConfigurationFailure(result: ITestResult) = {
val throwableOrNull = result.getThrowable
val throwable = if (throwableOrNull != null) Some(throwableOrNull) else None
val message = if (throwableOrNull != null && throwableOrNull.getMessage != null) throwableOrNull.getMessage else Resources("testNGConfigFailed")
val formatter = formatterForSuiteAborted(thisSuite, message)
report(SuiteAborted(tracker.nextOrdinal(), message, thisSuite.suiteName, Some(thisSuite.getClass.getName), throwable, None, formatter))
}
/**
* TestNG's onConfigurationSuccess doesn't have a clean mapping in ScalaTest.
* Simply create a Report and fire InfoProvided. This works well
* because there may be a large number of setup methods and InfoProvided doesn't
* show up in your face on the UI, and so doesn't clutter the UI.
*/
override def onConfigurationSuccess(result: ITestResult) = { // TODO: Work on this report
// For now don't print anything. Succeed with silence. Is adding clutter.
// report(InfoProvided(tracker.nextOrdinal(), result.getName, Some(NameInfo(thisSuite.suiteName, Some(thisSuite.getClass.getName), None))))
}
private def params(itr: ITestResult): String = {
itr.getParameters match {
case Array() => ""
case _ => "(" + itr.getParameters.mkString(",") + ")"
}
}
}
/**
TODO
(12:02:27 AM) bvenners: onTestFailedButWithinSuccessPercentage(ITestResult tr)
(12:02:34 AM) bvenners: maybe a TestSucceeded with some extra info in the report
**/
/**
* Throws <code>UnsupportedOperationException</code>, because this method is unused by this
* class, given this class's <code>run</code> method delegates to JUnit to run
* its tests.
*
* <p>
* The main purpose of this method implementation is to render a compiler error an attempt
* to mix in a trait that overrides <code>withFixture</code>. Because this
* trait does not actually use <code>withFixture</code>, the attempt to mix
* in behavior would very likely not work.
* </p>
*
*
* @param test the no-arg test function to run with a fixture
*/
override final protected def withFixture(test: NoArgTest) {
throw new UnsupportedOperationException
}
/**
* Throws <code>UnsupportedOperationException</code>, because this method is unused by this
* trait, given this trait's <code>run</code> method delegates to TestNG to run
* its tests.
*
* <p>
* The main purpose of this method implementation is to render a compiler error an attempt
* to mix in a trait that overrides <code>runNestedSuites</code>. Because this
* trait does not actually use <code>runNestedSuites</code>, the attempt to mix
* in behavior would very likely not work.
* </p>
*
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param filter a <code>Filter</code> with which to filter tests based on their tags
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param distributor an optional <code>Distributor</code>, into which to put nested <code>Suite</code>s to be run
* by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be run sequentially.
* @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread.
*
* @throws UnsupportedOperationException always.
*/
override final protected def runNestedSuites(reporter: Reporter, stopper: Stopper, filter: Filter,
configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) {
throw new UnsupportedOperationException
}
/**
* Throws <code>UnsupportedOperationException</code>, because this method is unused by this
* trait, given this trait's <code>run</code> method delegates to TestNG to run
* its tests.
*
* <p>
* The main purpose of this method implementation is to render a compiler error an attempt
* to mix in a trait that overrides <code>runTests</code>. Because this
* trait does not actually use <code>runTests</code>, the attempt to mix
* in behavior would very likely not work.
* </p>
*
* @param testName an optional name of one test to run. If <code>None</code>, all relevant tests should be run.
* I.e., <code>None</code> acts like a wildcard that means run all relevant tests in this <code>Suite</code>.
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param filter a <code>Filter</code> with which to filter tests based on their tags
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param distributor an optional <code>Distributor</code>, into which to put nested <code>Suite</code>s to be run
* by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be run sequentially.
* @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread.
* @throws UnsupportedOperationException always.
*/
override protected final def runTests(testName: Option[String], reporter: Reporter, stopper: Stopper, filter: Filter,
configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) {
throw new UnsupportedOperationException
}
/**
* Throws <code>UnsupportedOperationException</code>, because this method is unused by this
* trait, given this trait's <code>run</code> method delegates to TestNG to run
* its tests.
*
* <p>
* The main purpose of this method implementation is to render a compiler error an attempt
* to mix in a trait that overrides <code>runTest</code>. Because this
* trait does not actually use <code>runTest</code>, the attempt to mix
* in behavior would very likely not work.
* </p>
*
* @param testName the name of one test to run.
* @param reporter the <code>Reporter</code> to which results will be reported
* @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early.
* @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests.
* @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread.
* @throws UnsupportedOperationException always.
*/
override protected final def runTest(testName: String, reporter: Reporter, stopper: Stopper, configMap: Map[String, Any], tracker: Tracker) {
throw new UnsupportedOperationException
}
}
| JimCallahan/Graphics | external/scalatest/src/main/scala/org/scalatest/testng/TestNGSuite.scala | Scala | apache-2.0 | 20,312 |
package com.shorrockin.narrator
import org.specs._
import java.util.concurrent.TimeUnit
class ActionSpec extends Specification with IntervalCreator {
"an action" can {
val story = new Story(1, Map[String, String]())
import story._
"be constructed from a string and function" in {
val action = "action description" as { println("action") }
action.description must beEqual("action description")
action.worker must beSome
}
"use fixed interval timing" in {
val action = "fixed interval" every (5 minutes) as { println("hello") }
action.description must beEqual("fixed interval")
action.interval must beSome[Interval].which(_.start.equals(5))
action.interval must beSome[Interval].which(_.end.equals(None))
action.interval must beSome[Interval].which(_.unit.equals(TimeUnit.MINUTES))
}
"use ranged interval timing" in {
val action = "ranged interval" every (2 to 7 msecs) as { println("hello") }
action.description must beEqual("ranged interval")
action.interval must beSome[Interval].which(_.start.equals(2))
action.interval must beSome[Interval].which(_.end.equals(Some(7)))
action.interval must beSome[Interval].which(_.unit.equals(TimeUnit.MILLISECONDS))
}
"utilize a starting range with 'in' method" in {
// explicitly specify story.in, as the specification class also contains
// an implicit 'in' statement.
val action = story.in (0 to 3 minutes) execute "time started" every (2 to 8 seconds) as { println("hello")}
action.description must beEqual("time started")
action.start must beSome[Interval].which(_.start.equals(0))
action.start must beSome[Interval].which(_.end.equals(Some(3)))
action.start must beSome[Interval].which(_.unit.equals(TimeUnit.MINUTES))
}
"be defined as a followup action" in {
val action = "following" after "initial" as { println("hello") }
action.description must beEqual("following")
action.follows must beSome[String].which(_.equals("initial"))
}
}
} | shorrockin/narrator | src/test/scala/ActionSpec.scala | Scala | apache-2.0 | 2,080 |
package com.ataraxer.apps.chess.scala.pieces
import com.ataraxer.apps.chess.scala.Color._
import com.ataraxer.apps.chess.scala.{Board, Cell, Coord, Shift}
/*
* Piece is abstract class that represents single
* chess piece which can be located somewhere on the board
*
*/
object Piece {
class ImpossibleMoveException extends Exception
}
trait Piece {
val color: Color
def hasMoved: Boolean
def shortName: String = getClass.getName.split('.').last.substring(0, 2)
protected val directionShifts: List[(Int, Int)]
protected def addMove(position: Coord, board: Board, shift: Shift): List[Coord]
def moveIsValid(board: Board, to: Coord) =
board(to).color != Some(color)
def possibleMoves(position: Coord, board: Board): List[Coord] =
for (shift <- directionShifts;
move <- addMove(position, board, shift))
yield move
def movePossible(board: Board, from: Coord, to: Coord): Boolean =
possibleMoves(from, board) contains to
def copy(hasMoved: Boolean = false): Piece = this match {
case Pawn(_, _) => Pawn(color, hasMoved)
case Rook(_, _) => Rook(color, hasMoved)
case Knight(_, _) => Knight(color, hasMoved)
case Bishop(_, _) => Bishop(color, hasMoved)
case King(_, _) => King(color, hasMoved)
case Queen(_, _) => Queen(color, hasMoved)
}
def move(board: Board, from: Coord, to: Coord): Board = {
val moveIsPossible = possibleMoves(from, board) contains to
if (moveIsPossible)
board.update(
Cell(to, Some(this.copy(hasMoved=true))),
Cell(from, None)
)
else
throw new Piece.ImpossibleMoveException
}
override def toString = color.shortName + shortName
}
| ataraxer/chess-game-scala | src/main/scala/pieces/Piece.scala | Scala | mit | 1,687 |
package extruder.laws
import cats.data._
import cats.{Eq, Monad, Monoid, Order}
import extruder.core._
import org.scalacheck.Prop.forAll
import org.scalacheck.ScalacheckShapeless._
import org.scalacheck.{Arbitrary, Prop}
import org.typelevel.discipline.Laws
trait EncoderDecoderGenericTests[F[_], S <: Settings, E, D, O] extends EncoderDecoderMapTests[F, S, E, D, O] {
def genericEncodeDecode[A: Arbitrary: Parser: Show: Order, B: Arbitrary: Parser: Show, C: Arbitrary](
implicit eqFA: Eq[F[A]],
eqFEitherAC: Eq[F[Either[C, A]]],
eqFListA: Eq[F[List[A]]],
encoder: Encoder[F, S, A, E],
decoder: Decoder[F, S, A, O],
cEncoder: Encoder[F, S, C, E],
cDecoder: Decoder[F, S, C, O],
eqFOptA: Eq[F[Option[A]]],
optEncoder: Encoder[F, S, Option[A], E],
optDecoder: Decoder[F, S, Option[A], O],
eqFChainA: Eq[F[Chain[A]]],
eqFNelA: Eq[F[NonEmptyList[A]]],
eqFNevA: Eq[F[NonEmptyVector[A]]],
eqFNecA: Eq[F[NonEmptyChain[A]]],
eqFNeSetA: Eq[F[NonEmptySet[A]]],
chainEncoder: Encoder[F, S, Chain[A], E],
chainDecoder: Decoder[F, S, Chain[A], O],
nelEncoder: Encoder[F, S, NonEmptyList[A], E],
nelDecoder: Decoder[F, S, NonEmptyList[A], O],
nevEncoder: Encoder[F, S, NonEmptyVector[A], E],
nevDecoder: Decoder[F, S, NonEmptyVector[A], O],
necEncoder: Encoder[F, S, NonEmptyChain[A], E],
necDecoder: Decoder[F, S, NonEmptyChain[A], O],
neSetEncoder: Encoder[F, S, NonEmptySet[A], E],
neSetDecoder: Decoder[F, S, NonEmptySet[A], O],
listDecoder: Decoder[F, S, List[A], O],
optStringDecoder: Decoder[F, S, Option[String], O],
stringEncoder: Encoder[F, S, String, E],
eqFMultiClass: Eq[F[MultiClass[A, B]]],
optEqFMultiClass: Eq[F[Option[MultiClass[A, B]]]],
mapEncoder: Encoder[F, S, Map[B, A], E],
mapDecoder: Decoder[F, S, Map[B, A], O],
eqFMapBA: Eq[F[Map[B, A]]],
caseClassEncoder: Encoder[F, S, CaseClass, E],
caseClassDecoder: Decoder[F, S, CaseClass, O],
eqFCaseClass: Eq[F[CaseClass]]
): RuleSet = new RuleSet {
override def name: String = "mapEncodeDecode"
override def bases: Seq[(String, Laws#RuleSet)] = Nil
override def parents: Seq[RuleSet] = Seq(mapEncodeDecode[A, B, C])
override def props: Seq[(String, Prop)] =
Seq(
"generic encodeFinalizePrepareDecode" -> forAll(laws.encodeFinalizePrepareDecode[CaseClass] _),
"generic encodeDecodeWithPartiallyApplied" -> forAll(laws.encodeDecodeWithPartiallyApplied[CaseClass] _)
)
}
}
object EncoderDecoderGenericTests {
def apply[F[_]: Monad: ExtruderErrors, S <: Settings, E: Monoid, D, O: Monoid](settings: S)(
implicit fin: Transform[F, S, E, D],
prep: Transform[F, S, D, O],
hv: HasValue[F, S, O]
): EncoderDecoderGenericTests[F, S, E, D, O] =
new EncoderDecoderGenericTests[F, S, E, D, O] {
override def F: Monad[F] = Monad[F]
override def monoid: Monoid[E] = Monoid[E]
override def errors: ExtruderErrors[F] = ExtruderErrors[F]
override def laws: EncoderDecoderDerivedLaws[F, S, E, D, O] = EncoderDecoderDerivedLaws[F, S, E, D, O](settings)
}
}
| janstenpickle/extruder | laws/src/main/scala/extruder/laws/EncoderDecoderGenericTests.scala | Scala | mit | 3,138 |
package rip.hansolo.http4s.service
import org.http4s.HttpService
import org.http4s.MediaType._
import org.http4s.dsl._
import org.http4s.headers.`Content-Type`
/**
* Created by Giymo11 on 08.02.2016.
*/
object TilService {
def apply() = service
val service = HttpService.lift( req =>
Ok(<html>
<body style="margin: 0">
<iframe style="width: 100%; height: 100%; border: 0; margin: 0"
src="https://docs.google.com/document/d/1EKExnRpNokVGlOvYpKQA2COOrITBlywvgANAN9qNnEE/pub?embedded=true"></iframe>
</body>
</html>.toString()
).withContentType(Some(`Content-Type`(`text/html`)))
)
}
| Giymo11/http4s-hansolo.rip | hansolo/jvm/src/main/scala/rip/hansolo/http4s/service/TilService.scala | Scala | mit | 643 |
package sbt
import java.io.File
import org.specs._
import IO.{createDirectory, delete, touch, withTemporaryDirectory}
import org.apache.ivy.util.ChecksumHelper
import IfMissing.Fail
object ComponentManagerTest extends Specification
{
val TestID = "manager-test"
"Component manager" should {
"throw an exception if 'file' is called for a non-existing component" in {
withManager { _.file(TestID)(Fail) must throwA[InvalidComponent] }
}
"throw an exception if 'file' is called for an empty component" in {
withManager { manager =>
manager.define(TestID, Nil)
( manager.file(TestID)(Fail) ) must throwA[InvalidComponent]
}
}
"return the file for a single-file component" in {
withManager { manager =>
val hash = defineFile(manager, TestID, "a")
checksum(manager.file(TestID)(Fail)) must beEqualTo(hash)
}
}
"throw an exception if 'file' is called for multi-file component" in {
withManager { manager =>
defineFiles(manager, TestID, "a", "b")
( manager.file(TestID)(Fail) ) must throwA[InvalidComponent]
}
}
"return the files for a multi-file component" in {
withManager { manager =>
val hashes = defineFiles(manager, TestID, "a", "b")
checksum(manager.files(TestID)(Fail)) must haveTheSameElementsAs(hashes)
}
}
"return the files for a single-file component" in {
withManager { manager =>
val hashes = defineFiles(manager, TestID, "a")
checksum(manager.files(TestID)(Fail)) must haveTheSameElementsAs(hashes)
}
}
"throw an exception if 'files' is called for a non-existing component" in {
withManager { _.files(TestID)(Fail) must throwA[InvalidComponent] }
}
"properly cache a file and then retrieve it to an unresolved component" in {
withManager { definingManager =>
val hash = defineFile(definingManager, TestID, "a")
try
{
definingManager.cache(TestID)
withManager { usingManager =>
checksum(usingManager.file(TestID)(Fail)) must beEqualTo(hash)
}
}
finally { definingManager.clearCache(TestID) }
}
}
}
private def checksum(files: Iterable[File]): Seq[String] = files.map(checksum).toSeq
private def checksum(file: File): String = if(file.exists) ChecksumHelper.computeAsString(file, "sha1") else ""
private def defineFile(manager: ComponentManager, id: String, name: String): String = createFile(manager, id, name)(checksum)
private def defineFiles(manager: ComponentManager, id: String, names: String*): Seq[String] = createFiles(manager, id, names : _*)(checksum)
private def createFile[T](manager: ComponentManager, id: String, name: String)(f: File => T): T = createFiles(manager, id, name)(files => f(files.toList.head))
private def createFiles[T](manager: ComponentManager, id: String, names: String*)(f: Seq[File] => T): T =
withTemporaryDirectory { dir =>
val files = names.map(name => new File(dir, name) )
files.foreach(writeRandomContent)
manager.define(id, files)
f(files)
}
private def writeRandomContent(file: File) = IO.write(file, randomString)
private def randomString = "asdf"
private def withManager[T](f: ComponentManager => T): T =
TestLogger( logger => withTemporaryDirectory { temp => f(new ComponentManager(xsbt.boot.Locks, new xsbt.boot.ComponentProvider(temp, true), logger)) } )
} | ornicar/xsbt | ivy/src/test/scala/ComponentManagerTest.scala | Scala | bsd-3-clause | 3,301 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras.nn
import com.intel.analytics.bigdl.keras.KerasBaseSpec
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.nn.keras.{GRU, Sequential => KSequential}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
class GRUSpec extends KerasBaseSpec {
def weightConverter(in: Array[Tensor[Float]]): Array[Tensor[Float]] = {
val w1 = Tensor[Float](in(0).size(2)*3, in(0).size(1))
val w2 = Tensor[Float](in(2).size(1)*3)
val w3 = Tensor[Float](in(1).size(2)*2, in(1).size(1))
w1.narrow(1, 1, in(0).size(2)).copy(in(3).t())
w1.narrow(1, 1 + in(0).size(2), in(0).size(2)).copy(in(0).t())
w1.narrow(1, 1 + 2*in(0).size(2), in(0).size(2)).copy(in(6).t())
w2.narrow(1, 1, in(2).size(1)).copy(in(5))
w2.narrow(1, 1 + in(2).size(1), in(2).size(1)).copy(in(2))
w2.narrow(1, 1 + 2*in(2).size(1), in(2).size(1)).copy(in(8))
w3.narrow(1, 1, in(1).size(2)).copy(in(4).t())
w3.narrow(1, 1 + in(1).size(2), in(1).size(2)).copy(in(1).t())
Array(w1, w2, w3, in(7).t())
}
"GRU not return sequences" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[28, 28])
|input = np.random.random([2, 28, 28])
|output_tensor = GRU(128)(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = GRU[Float](128, inputShape = Shape(28, 28))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 128))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, weightConverter)
}
"GRU return sequences" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[32, 32])
|input = np.random.random([2, 32, 32])
|output_tensor = GRU(36, return_sequences=True, activation="relu")(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = GRU[Float](36, returnSequences = true,
activation = "relu", inputShape = Shape(32, 32))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 32, 36))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, weightConverter)
}
"GRU go backwards and return sequences" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[28, 32])
|input = np.random.random([1, 28, 32])
|output_tensor = GRU(16, return_sequences=True, go_backwards=True)(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = GRU[Float](16, returnSequences = true,
goBackwards = true, inputShape = Shape(28, 32))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 28, 16))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, weightConverter)
}
}
class GRUSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = GRU[Float](16, returnSequences = true,
goBackwards = true, inputShape = Shape(28, 32))
layer.build(Shape(2, 28, 32))
val input = Tensor[Float](2, 28, 32).apply1(_ => Random.nextFloat())
runSerializationTest(layer, input)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/keras/nn/GRUSpec.scala | Scala | apache-2.0 | 4,289 |
package com.sopranoworks.bolt.values
import com.sopranoworks.bolt._
import com.google.cloud.spanner.{ResultSet, ResultSets, Struct, Type, Value=>SValue, Database => SDatabase}
import org.specs2.mutable.Specification
import scala.collection.JavaConversions._
class IdentifierWithFieldValueTest extends Specification {
class DummyDatabase extends Database {
var tables = Map.empty[String,Table]
override def table(name: String): Option[Table] = tables.get(name)
}
class DummyNut extends Bolt.Nut(null) {
private val _database = new DummyDatabase
override def database: Database = _database
private var _queryCount = 0
def queryCount = _queryCount
override def executeNativeQuery(sql: String): ResultSet = {
val sb = Struct.newBuilder()
sb.set("ONE").to(SValue.int64(1))
sb.set("TWO").to(SValue.int64(2))
sb.set("THREE").to(SValue.int64(2))
_queryCount += 1
ResultSets.forRows(Type.struct(List(Type.StructField.of("ONE",Type.int64()),Type.StructField.of("TWO",Type.int64()),Type.StructField.of("THREE",Type.int64()) )),List(sb.build()))
}
}
"resolveReference" should {
"column name" in {
val tbl = Table(null,"TEST_TABLE",
List(Column("x",0,"INT64",false),Column("y",1,"INT64",false),Column("y",2,"INT64",false)),
Index("PRIMARY_KEY",List(IndexColumn("x",0,"INT64",false,"ASC"))),
Map.empty[String,Index])
val nat = new DummyNut
nat.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->tbl)
val qc = QueryContext(nat,null)
val v = IdentifierWithFieldValue("TEST_TABLE",List("x"),qc)
v.resolveReference()
v.eval.apply().isInstanceOf[TableColumnValue] must_== true
v().asInstanceOf[TableColumnValue].text must_== "TEST_TABLE.x"
}
"subquery field name" in {
val nat = new DummyNut
val qc = QueryContext(nat,null)
qc.setSubquery(SubqueryValue(nat,"SELECT *",qc))
qc.addAlias(ExpressionAlias("x",qc.subquery.get))
val v = IdentifierWithFieldValue("x",List("ONE"),qc)
v.resolveReference()
v.eval.asValue.isInstanceOf[IntValue] must_== true
v.asValue.asInstanceOf[IntValue].value must_== 1
}
"struct field name" in {
val st = StructValue()
st.addValue(IntValue("ONE",1,true))
st.addValue(IntValue("TWO",2,true))
st.addValue(IntValue("THREE",3,true))
st.addFieldName("ONE",0)
st.addFieldName("TWO",1)
st.addFieldName("THREE",2)
val nat = new DummyNut
val qc = QueryContext(nat,null)
qc.addAlias(ExpressionAlias("x",st))
val v = IdentifierWithFieldValue("x",List("ONE"),qc)
v.resolveReference()
v.eval.asValue.isInstanceOf[IntValue] must_== true
v.asValue.asInstanceOf[IntValue].value must_== 1
}
"alias table column name" in {
val tbl = Table(null,"TEST_TABLE",
List(Column("x",0,"INT64",false),Column("y",1,"INT64",false),Column("y",2,"INT64",false)),
Index("PRIMARY_KEY",List(IndexColumn("x",0,"INT64",false,"ASC"))),
Map.empty[String,Index])
val nat = new DummyNut
nat.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->tbl)
val qc = QueryContext(nat,null)
qc.addAlias(new TableAlias("T","TEST_TABLE"))
val v = IdentifierWithFieldValue("T",List("x"),qc)
v.resolveReference()
v.eval.apply().isInstanceOf[TableColumnValue] must_== true
v().asInstanceOf[TableColumnValue].text must_== "TEST_TABLE.x"
}
}
}
| OsamuTakahashi/bolt | src/test/scala/com/sopranoworks/bolt/values/IdentifierWithFieldValueTest.scala | Scala | mit | 3,524 |
package vanadis.modules.examples.scalacalc.calculator
import vanadis.ext.{Inject, Module, Expose}
import vanadis.modules.examples.scalacalc.calcservices._
@Module { val moduleType = "scalacalc-calculator" }
class CalculatorModule {
@Inject var subtractor:Subtractor = null
@Inject var adder:Adder = null
@Inject var divisor:Divisor = null
@Inject var multiplier:Multiplier = null
@Expose
def getCalculator() : PocketCalculator = new MyPocketCalculator(this)
}
| kjetilv/vanadis | modules/examples/scalacalc/calculator/src/main/scala/vanadis/modules/examples/scalacalc/calculator/CalculatorModule.scala | Scala | apache-2.0 | 479 |
// The MIT License (MIT)
//
// Copyright (c) 2015 David Heidrich, BowlingX <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.bowlingx.commentp
import javax.inject.Inject
import _root_.akka.actor.{ActorSystem, ActorRef}
import _root_.akka.pattern.ask
import _root_.akka.util.Timeout
import com.bowlingx.commentp.akka.{AkkaBroadcaster, AkkaCluster, DidBroadcast}
import org.atmosphere.cpr.{AtmosphereResource, BroadcasterFactory}
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration
import com.sksamuel.elastic4s.ElasticClient
/**
* Reflects the environment this Application runs in
* Shares Access to common services
*/
trait Environment {
val actorSystem:ActorSystem
val actionActor: ActorRef
val broadcasterFactory: BroadcasterFactory
val elasticClient:ElasticClient
def getBroadcaster: AkkaBroadcaster = broadcasterFactory.get().asInstanceOf[AkkaBroadcaster]
/**
* Will run a given channel with protocol and execute action
* @param c Channel
* @param timeout a timeout until the future should be completed
* @return
*/
def run(c: Channel)(implicit timeout: Timeout): Future[Any] = {
actionActor ? c
}
/**
* Broadcast a message once and will destroy the broadcaster after
* @param id the broadcast id
* @param msg the message
* @param resources resources to bind to this broadcaster
* @param duration timeout for receiving a message
* @return
*/
def broadcastOnce(id: String, msg: Any, resources:List[AtmosphereResource] = List.empty[AtmosphereResource])
(implicit duration: FiniteDuration, executionContext:ExecutionContext): Future[DidBroadcast] = {
val broadcaster = broadcasterFactory.lookup(id, true).asInstanceOf[AkkaBroadcaster]
resources foreach(resource => broadcaster.addAtmosphereResource(resource))
val future = broadcaster.future(msg)
broadcaster.broadcast(msg)
future foreach { didBroadcast =>
broadcaster.destroy()
}
future
}
}
class ServletEnvironment @Inject()(val actorSystem: ActorSystem,
val broadcasterFactory: BroadcasterFactory,
val actionActor: ActorRef,
val elasticClient:ElasticClient) extends Environment {
}
| BowlingX/commentp | src/main/scala/com/bowlingx/commentp/Environment.scala | Scala | mit | 3,379 |
/*
* Copyright (C) 2014-2017 Juergen Pfundt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org
import org.parboiled2.CharPredicate
import org.parboiled2.CharPredicate.AlphaNum
package object arktos {
val arktos_version = "Arktos version 0.9"
/* Surrogate treatment lent from discussion at
https://gitter.im/sirthias/parboiled2?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge */
val isHighSurrogate = CharPredicate.from(Character.isHighSurrogate)
val isLowSurrogate = CharPredicate.from(Character.isLowSurrogate)
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
val unreserved = AlphaNum ++ '-' ++ '.' ++ '_' ++ '~'
// gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
val gen_delims = CharPredicate(':', '/', '?', '#', '[', ']', '@')
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" / "*" / "+" / "," / ";" / "="
val sub_delims = CharPredicate('!', '$', '&', '\\'', '(', ')', '*', '+', ',', ';', '=')
// query_delims = CharPredicate('!', '$', ''', '(', ')', '*', '+', ',', ';')
val query_delims = CharPredicate('!', '$', '\\'', '(', ')', '*', '+', ',', ';')
// reserved = gen-delims / sub-delims
val reserved = gen_delims ++ sub_delims
val notEncoded = unreserved ++ reserved ++ query_delims
}
| JuPfu/arktos | shared/src/main/scala/org/arktos/package.scala | Scala | apache-2.0 | 1,785 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import scala.reflect.ClassTag
import scala.util.control.ControlThrowable
import org.apache.spark.sql._
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.sources.StreamSourceProvider
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
import org.apache.spark.util.ManualClock
class StreamSuite extends StreamTest {
import testImplicits._
test("map with recovery") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map(_ + 1)
testStream(mapped)(
AddData(inputData, 1, 2, 3),
StartStream(),
CheckAnswer(2, 3, 4),
StopStream,
AddData(inputData, 4, 5, 6),
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7))
}
test("join") {
// Make a table and ensure it will be broadcast.
val smallTable = Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word")
// Join the input stream with a table.
val inputData = MemoryStream[Int]
val joined = inputData.toDS().toDF().join(smallTable, $"value" === $"number")
testStream(joined)(
AddData(inputData, 1, 2, 3),
CheckAnswer(Row(1, 1, "one"), Row(2, 2, "two")),
AddData(inputData, 4),
CheckAnswer(Row(1, 1, "one"), Row(2, 2, "two"), Row(4, 4, "four")))
}
test("union two streams") {
val inputData1 = MemoryStream[Int]
val inputData2 = MemoryStream[Int]
val unioned = inputData1.toDS().union(inputData2.toDS())
testStream(unioned)(
AddData(inputData1, 1, 3, 5),
CheckAnswer(1, 3, 5),
AddData(inputData2, 2, 4, 6),
CheckAnswer(1, 2, 3, 4, 5, 6),
StopStream,
AddData(inputData1, 7),
StartStream(),
AddData(inputData2, 8),
CheckAnswer(1, 2, 3, 4, 5, 6, 7, 8))
}
test("sql queries") {
val inputData = MemoryStream[Int]
inputData.toDF().createOrReplaceTempView("stream")
val evens = sql("SELECT * FROM stream WHERE value % 2 = 0")
testStream(evens)(
AddData(inputData, 1, 2, 3, 4),
CheckAnswer(2, 4))
}
test("DataFrame reuse") {
def assertDF(df: DataFrame) {
withTempDir { outputDir =>
withTempDir { checkpointDir =>
val query = df.writeStream.format("parquet")
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.start(outputDir.getAbsolutePath)
try {
query.processAllAvailable()
val outputDf = spark.read.parquet(outputDir.getAbsolutePath).as[Long]
checkDataset[Long](outputDf, (0L to 10L).toArray: _*)
} finally {
query.stop()
}
}
}
}
val df = spark.readStream.format(classOf[FakeDefaultSource].getName).load()
assertDF(df)
assertDF(df)
}
test("unsupported queries") {
val streamInput = MemoryStream[Int]
val batchInput = Seq(1, 2, 3).toDS()
def assertError(expectedMsgs: Seq[String])(body: => Unit): Unit = {
val e = intercept[AnalysisException] {
body
}
expectedMsgs.foreach { s => assert(e.getMessage.contains(s)) }
}
// Running streaming plan as a batch query
assertError("start" :: Nil) {
streamInput.toDS.map { i => i }.count()
}
// Running non-streaming plan with as a streaming query
assertError("without streaming sources" :: "start" :: Nil) {
val ds = batchInput.map { i => i }
testStream(ds)()
}
// Running streaming plan that cannot be incrementalized
assertError("not supported" :: "streaming" :: Nil) {
val ds = streamInput.toDS.map { i => i }.sort()
testStream(ds)()
}
}
test("minimize delay between batch construction and execution") {
// For each batch, we would retrieve new data's offsets and log them before we run the execution
// This checks whether the key of the offset log is the expected batch id
def CheckOffsetLogLatestBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.offsetLog.getLatest().get._1 == expectedId,
s"offsetLog's latest should be $expectedId")
// For each batch, we would log the state change during the execution
// This checks whether the key of the state change log is the expected batch id
def CheckIncrementalExecutionCurrentBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.lastExecution.asInstanceOf[IncrementalExecution].currentBatchId == expectedId,
s"lastExecution's currentBatchId should be $expectedId")
// For each batch, we would log the sink change after the execution
// This checks whether the key of the sink change log is the expected batch id
def CheckSinkLatestBatchId(expectedId: Int): AssertOnQuery =
AssertOnQuery(_.sink.asInstanceOf[MemorySink].latestBatchId.get == expectedId,
s"sink's lastBatchId should be $expectedId")
val inputData = MemoryStream[Int]
testStream(inputData.toDS())(
StartStream(ProcessingTime("10 seconds"), new StreamManualClock),
/* -- batch 0 ----------------------- */
// Add some data in batch 0
AddData(inputData, 1, 2, 3),
AdvanceManualClock(10 * 1000), // 10 seconds
/* -- batch 1 ----------------------- */
// Check the results of batch 0
CheckAnswer(1, 2, 3),
CheckIncrementalExecutionCurrentBatchId(0),
CheckOffsetLogLatestBatchId(0),
CheckSinkLatestBatchId(0),
// Add some data in batch 1
AddData(inputData, 4, 5, 6),
AdvanceManualClock(10 * 1000),
/* -- batch _ ----------------------- */
// Check the results of batch 1
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckIncrementalExecutionCurrentBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
AdvanceManualClock(10 * 1000),
AdvanceManualClock(10 * 1000),
AdvanceManualClock(10 * 1000),
/* -- batch __ ---------------------- */
// Check the results of batch 1 again; this is to make sure that, when there's no new data,
// the currentId does not get logged (e.g. as 2) even if the clock has advanced many times
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckIncrementalExecutionCurrentBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
/* Stop then restart the Stream */
StopStream,
StartStream(ProcessingTime("10 seconds"), new StreamManualClock(60 * 1000)),
/* -- batch 1 rerun ----------------- */
// this batch 1 would re-run because the latest batch id logged in offset log is 1
AdvanceManualClock(10 * 1000),
/* -- batch 2 ----------------------- */
// Check the results of batch 1
CheckAnswer(1, 2, 3, 4, 5, 6),
CheckIncrementalExecutionCurrentBatchId(1),
CheckOffsetLogLatestBatchId(1),
CheckSinkLatestBatchId(1),
// Add some data in batch 2
AddData(inputData, 7, 8, 9),
AdvanceManualClock(10 * 1000),
/* -- batch 3 ----------------------- */
// Check the results of batch 2
CheckAnswer(1, 2, 3, 4, 5, 6, 7, 8, 9),
CheckIncrementalExecutionCurrentBatchId(2),
CheckOffsetLogLatestBatchId(2),
CheckSinkLatestBatchId(2))
}
test("insert an extraStrategy") {
try {
spark.experimental.extraStrategies = TestStrategy :: Nil
val inputData = MemoryStream[(String, Int)]
val df = inputData.toDS().map(_._1).toDF("a")
testStream(df)(
AddData(inputData, ("so slow", 1)),
CheckAnswer("so fast"))
} finally {
spark.experimental.extraStrategies = Nil
}
}
testQuietly("fatal errors from a source should be sent to the user") {
for (e <- Seq(
new VirtualMachineError {},
new ThreadDeath,
new LinkageError,
new ControlThrowable {}
)) {
val source = new Source {
override def getOffset: Option[Offset] = {
throw e
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
throw e
}
override def schema: StructType = StructType(Array(StructField("value", IntegerType)))
override def stop(): Unit = {}
}
val df = Dataset[Int](sqlContext.sparkSession, StreamingExecutionRelation(source))
testStream(df)(
ExpectFailure()(ClassTag(e.getClass))
)
}
}
test("output mode API in Scala") {
val o1 = OutputMode.Append
assert(o1 === InternalOutputModes.Append)
val o2 = OutputMode.Complete
assert(o2 === InternalOutputModes.Complete)
}
test("explain") {
val inputData = MemoryStream[String]
val df = inputData.toDS().map(_ + "foo")
// Test `explain` not throwing errors
df.explain()
val q = df.writeStream.queryName("memory_explain").format("memory").start()
.asInstanceOf[StreamExecution]
try {
assert("No physical plan. Waiting for data." === q.explainInternal(false))
assert("No physical plan. Waiting for data." === q.explainInternal(true))
inputData.addData("abc")
q.processAllAvailable()
val explainWithoutExtended = q.explainInternal(false)
// `extended = false` only displays the physical plan.
assert("LocalRelation".r.findAllMatchIn(explainWithoutExtended).size === 0)
assert("LocalTableScan".r.findAllMatchIn(explainWithoutExtended).size === 1)
val explainWithExtended = q.explainInternal(true)
// `extended = true` displays 3 logical plans (Parsed/Optimized/Optimized) and 1 physical
// plan.
assert("LocalRelation".r.findAllMatchIn(explainWithExtended).size === 3)
assert("LocalTableScan".r.findAllMatchIn(explainWithExtended).size === 1)
} finally {
q.stop()
}
}
}
/**
* A fake StreamSourceProvider thats creates a fake Source that cannot be reused.
*/
class FakeDefaultSource extends StreamSourceProvider {
private val fakeSchema = StructType(StructField("a", IntegerType) :: Nil)
override def sourceSchema(
spark: SQLContext,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): (String, StructType) = ("fakeSource", fakeSchema)
override def createSource(
spark: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
// Create a fake Source that emits 0 to 10.
new Source {
private var offset = -1L
override def schema: StructType = StructType(StructField("a", IntegerType) :: Nil)
override def getOffset: Option[Offset] = {
if (offset >= 10) {
None
} else {
offset += 1
Some(LongOffset(offset))
}
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
val startOffset = start.map(_.asInstanceOf[LongOffset].offset).getOrElse(-1L) + 1
spark.range(startOffset, end.asInstanceOf[LongOffset].offset + 1).toDF("a")
}
override def stop() {}
}
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala | Scala | apache-2.0 | 11,797 |
// Copyright 2018 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scalarules.test
import org.scalatest._
object TestUtil {
def foo: String = "bar"
}
class ScalaSuite extends FlatSpec {
"HelloLib" should "call scala" in {
assert(HelloLib.getText("hello").equals("hello world!"))
}
}
| bazelbuild/bazel-federation | rules_scala/HelloLibTest.scala | Scala | apache-2.0 | 853 |
/**
* Copyright 2015 ICT.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.ac.ict.acs.netflow.loader
class Loader {
}
| DataSysLab/netflow | core/src/main/scala/cn/ac/ict/acs/netflow/loader/Loader.scala | Scala | apache-2.0 | 884 |
package spa.client.modules
import diode.Circuit
import diode.react._
import diode.react.ReactPot._
import diode.data.{Empty, Pot}
import japgolly.scalajs.react.vdom.prefix_<^._
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra.router.RouterCtl
import spa.client.SPAMain._
import spa.client.components.Bootstrap.{Button, Panel}
import spa.client.components.GlobalStyles
import spa.client.logger._
import spa.client.services.{FeedbackHandler, FeedbackResponse, SendFeedback}
import spa.shared.{EmailFormData, EmailValidation}
/**
* Created by skypage on 6/14/16.
*/
object Feedback {
case class Props(router: RouterCtl[Loc])
case class FeedbackResponseModel(sent: Pot[FeedbackResponse])
object FeedbackCircuit extends Circuit[FeedbackResponseModel] with ReactConnector[FeedbackResponseModel] {
// initial application model
override protected def initialModel = FeedbackResponseModel(Empty)
// combine all handlers into one
override protected val actionHandler = composeHandlers(
new FeedbackHandler(zoomRW(_.sent)((m, v) => m.copy(sent = v)))
)
}
val feedbackWrapper = FeedbackCircuit.connect(_.sent)
private val component = ReactComponentB[Props]("SearchableComponent")
.render_P { p =>
feedbackWrapper(FeedbackForm(_))
}.build
/** Returns a function compatible with router location system while using our own props */
def apply(router: RouterCtl[Loc]) = component(Props(router))
}
object FeedbackForm {
@inline private def bss = GlobalStyles.bootstrapStyles
case class Props(proxy: ModelProxy[Pot[FeedbackResponse]])
case class State(name: String = "",
message: String = "",
subject: String = "",
email: String = "",
validInput: Boolean = true,
inputMessage: String = "",
submitDisabled: Boolean = false,
submitClicked: Boolean = false
)
class Backend($: BackendScope[Props, State]) {
def onSubmit (proxy: ModelProxy[Pot[FeedbackResponse]]) = {
var validInput: Boolean = true
$.modState( s => {
if (s.subject.trim.isEmpty) {
validInput = false
s.copy(validInput = false, inputMessage = "You're missing a subject", submitClicked = true)
}
else if (s.message.trim.isEmpty) {
validInput = false
s.copy(validInput = false, inputMessage = "You're missing a message", submitClicked = true)
}
else if (s.email.trim.nonEmpty) {
if (EmailValidation.isValid(s.email)) {
s.copy(validInput = true, inputMessage = "", submitDisabled = true, submitClicked = true)
}
else {
validInput = false
s.copy(validInput = false, inputMessage = "Your email is invalid", submitClicked = true)
}
}
else {
s.copy(validInput = true, inputMessage = "", submitDisabled = true, submitClicked = true)
}
}) >> $.state >>= {s =>
//This chains the state modification Callback to the Callback that communicates with the server
if (validInput) {
val submitData = EmailFormData(s.name, s.email, s.subject, s.message)
proxy.dispatch(SendFeedback(submitData))
}
else Callback.log("Input isn't valid yet")
}
}
def onSubjectChange(e: ReactEventI) = {
val text = e.target.value.toString
$.modState(s => {
s.copy(subject = text)
})
}
def onMessageChange(e: ReactEventI) = {
val text = e.target.value.toString
$.modState(s => {
s.copy(message = text)
})
}
def onNameChange(e: ReactEventI) = {
val text = e.target.value.toString
$.modState(s => {
s.copy(name = text)
})
}
def onEmailChange(e: ReactEventI) = {
val text = e.target.value.toString
$.modState(s => {
s.copy(email = text)
})
}
def render(p: Props, s: State) = {
<.div(^.className:="col-md-8 col-md-offset-2")(
Panel(Panel.Props("Feedback"),
<.form()(
<.div(^.className:="form-group")(
<.label(^.`for`:="subject")("Subject:"), <.br,
<.input.text(^.id:="subject", ^.className:="form-control", ^.onChange ==> onSubjectChange), <.br,
<.label(^.`for`:="message")("Message:"), <.br,
<.textarea(^.id:="message", ^.className:="form-control", ^.rows:="5", ^.onChange ==> onMessageChange), <.br, <.br,
<.b("Optional information:"),
<.p()(
"""You may leave everything below blank to send a completely anonymous note,
or you may provide what information you like. I can't reply to your message
without an email address, so keep that in mind. Your email address will only
be used as a possible way for me to reply to you."""),
<.label(^.`for`:="name")("Your name:"), <.br,
<.input.text(^.id:="name", ^.className:="form-control", ^.onChange ==> onNameChange), <.br,
<.label(^.`for`:="email")("Your email:"), <.br,
<.input.email(^.id:="email", ^.className:="form-control", ^.onChange ==> onEmailChange), <.br,
Button(Button.Props(onSubmit(p.proxy), addStyles = Seq(bss.pullRight, bss.button), disabled = s.submitDisabled), "Submit"),
<.div(
if (s.submitClicked) {
if (s.validInput) {
<.span(
p.proxy().renderFailed(ex => <.p("Load failed")), p.proxy().renderPending(pend => <.p("Loading...")),
p.proxy().render(result =>
if (result.sent) {
<.p("Success! Thanks for your input!")
}
else <.p("Submitting your message...")
)
)
}
else {
<.span(
<.p(^.color := "red")("Looks like you made a mistake:"),
<.p(^.color := "red")(s.inputMessage)
)
}
} else <.span
)
)
)
)
)
}
}
val component = ReactComponentB[Props]("Feedback")
.initialState({
State()
})
.renderBackend[Backend]
.build
def apply(sent: ModelProxy[Pot[FeedbackResponse]]) = component(Props(sent))
}
| IceGiant/internet-abridged | client/src/main/scala/spa/client/modules/Feedback.scala | Scala | apache-2.0 | 6,578 |
package pl.touk.nussknacker.engine.compiledgraph
import cats.data.NonEmptyList
import pl.touk.nussknacker.engine.api.MetaData
case class CompiledProcessParts(metaData: MetaData, sources: NonEmptyList[part.PotentiallyStartPart]) | TouK/nussknacker | interpreter/src/main/scala/pl/touk/nussknacker/engine/compiledgraph/CompiledProcessParts.scala | Scala | apache-2.0 | 229 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.osgi
import java.io.{InputStream, IOException, File}
import scala.reflect.io.AbstractFile
import java.net.URL
import java.lang.String
import org.osgi.framework.{ServiceReference, Bundle}
import collection.mutable.{ListBuffer,LinkedHashSet}
import org.osgi.service.packageadmin.PackageAdmin
import org.fusesource.scalate.util.{Log, Strings}
/**
* Helper methods to transform OSGi bundles into {@link AbstractFile} implementations
* suitable for use with the Scala compiler
*/
object BundleClassPathBuilder {
val log = Log(getClass); import log._
/**
* Create a list of AbstractFile instances, representing the bundle and its wired depedencies
*/
def fromBundle(bundle: Bundle) : List[AbstractFile] = {
require(bundle != null, "Bundle should not be null")
// add the bundle itself
val files = ListBuffer(create(bundle))
// also add all bundles that have exports wired to imports from this bundle
files.appendAll(fromWires(bundle))
files.toList
}
/**
* Find bundles that have exports wired to the given and bundle
*/
def fromWires(bundle: Bundle) : List[AbstractFile] = {
debug("Checking OSGi bundle wiring for %s", bundle)
val context = bundle.getBundleContext
var ref: ServiceReference = context.getServiceReference(classOf[PackageAdmin].getName)
if (ref == null) {
warn("PackageAdmin service is unavailable - unable to check bundle wiring information")
return List()
}
try {
var admin: PackageAdmin = context.getService(ref).asInstanceOf[PackageAdmin]
if (admin == null) {
warn("PackageAdmin service is unavailable - unable to check bundle wiring information")
return List()
}
return fromWires(admin, bundle)
} finally {
context.ungetService(ref)
}
}
def fromWires(admin: PackageAdmin, bundle: Bundle) : List[AbstractFile] = {
val exported = admin.getExportedPackages(null : Bundle)
val set = new LinkedHashSet[Bundle]
for (pkg <- exported; if pkg.getExportingBundle.getBundleId != 0) {
val bundles = pkg.getImportingBundles();
if (bundles != null) {
for (b <- bundles; if b.getBundleId == bundle.getBundleId) {
debug("Bundle imports %s from %s",pkg,pkg.getExportingBundle)
if (b.getBundleId == 0) {
debug("Ignoring system bundle")
} else {
set += pkg.getExportingBundle
}
}
}
}
set.map(create(_)).toList
}
/**
* Create a new { @link AbstractFile } instance representing an
* { @link org.osgi.framework.Bundle }
*
* @param bundle the bundle
*/
def create(bundle: Bundle): AbstractFile = {
require(bundle != null, "Bundle should not be null")
abstract class BundleEntry(url: URL, parent: DirEntry) extends AbstractFile {
require(url != null, "url must not be null")
lazy val (path: String, name: String) = getPathAndName(url)
lazy val fullName: String = (path::name::Nil).filter(n => !Strings.isEmpty(n)).mkString("/")
/**
* @return null
*/
def file: File = null
/**
* @return last modification time or 0 if not known
*/
def lastModified: Long =
try { url.openConnection.getLastModified }
catch { case _: Exception => 0 }
@throws(classOf[IOException])
def container: AbstractFile =
valueOrElse(parent) {
throw new IOException("No container")
}
@throws(classOf[IOException])
def input: InputStream = url.openStream
/**
* Not supported. Always throws an IOException.
* @throws IOException
*/
@throws(classOf[IOException])
def output = throw new IOException("not supported: output")
private def getPathAndName(url: URL): (String, String) = {
val u = url.getPath
var k = u.length
while( (k > 0) && (u(k - 1) == '/') )
k = k - 1
var j = k
while( (j > 0) && (u(j - 1) != '/') )
j = j - 1
(u.substring(if (j > 0) 1 else 0, if (j > 1) j - 1 else j), u.substring(j, k))
}
override def toString = fullName
}
class DirEntry(url: URL, parent: DirEntry) extends BundleEntry(url, parent) {
/**
* @return true
*/
def isDirectory: Boolean = true
override def iterator: Iterator[AbstractFile] = {
new Iterator[AbstractFile]() {
val dirs = bundle.getEntryPaths(fullName)
var nextEntry = prefetch()
def hasNext() = {
if (nextEntry == null)
nextEntry = prefetch()
nextEntry != null
}
def next() = {
if (hasNext()) {
val entry = nextEntry
nextEntry = null
entry
}
else {
throw new NoSuchElementException()
}
}
private def prefetch() = {
if (dirs.hasMoreElements) {
val entry = dirs.nextElement.asInstanceOf[String]
var entryUrl = bundle.getResource("/" + entry)
// Bundle.getResource seems to be inconsistent with respect to requiring
// a trailing slash
if (entryUrl == null)
entryUrl = bundle.getResource("/" + removeTralingSlash(entry))
// If still null OSGi wont let use load that resource for some reason
if (entryUrl == null) {
null
}
else {
if (entry.endsWith(".class"))
new FileEntry(entryUrl, DirEntry.this)
else
new DirEntry(entryUrl, DirEntry.this)
}
}
else
null
}
private def removeTralingSlash(s: String): String =
if (s == null || s.length == 0)
s
else if (s.last == '/')
removeTralingSlash(s.substring(0, s.length - 1))
else
s
}
}
def lookupName(name: String, directory: Boolean): AbstractFile = {
val entry = bundle.getEntry(fullName + "/" + name)
nullOrElse(entry) { entry =>
if (directory)
new DirEntry(entry, DirEntry.this)
else
new FileEntry(entry, DirEntry.this)
}
}
override def lookupPathUnchecked(path: String, directory: Boolean) = lookupPath(path, directory)
def lookupNameUnchecked(name: String, directory: Boolean) = lookupName(path, directory)
def absolute = unsupported("absolute() is unsupported")
def create = unsupported("create() is unsupported")
def delete = unsupported("create() is unsupported")
}
class FileEntry(url: URL, parent: DirEntry) extends BundleEntry(url, parent) {
/**
* @return false
*/
def isDirectory: Boolean = false
override def sizeOption: Option[Int] = Some(bundle.getEntry(fullName).openConnection().getContentLength())
def lookupName(name: String, directory: Boolean): AbstractFile = null
override def lookupPathUnchecked(path: String, directory: Boolean) = lookupPath(path, directory)
def lookupNameUnchecked(name: String, directory: Boolean) = lookupName(path, directory)
def iterator = Iterator.empty
def absolute = unsupported("absolute() is unsupported")
def create = unsupported("create() is unsupported")
def delete = unsupported("create() is unsupported")
}
new DirEntry(bundle.getResource("/"), null) {
override def toString = "AbstractFile[" + bundle + "]"
}
}
/**
* Evaluate <code>f</code> on <code>s</code> if <code>s</code> is not null.
* @param s
* @param f
* @return <code>f(s)</code> if s is not <code>null</code>, <code>null</code> otherwise.
*/
def nullOrElse[S, T](s: S)(f: S => T): T =
if (s == null) null.asInstanceOf[T]
else f(s)
/**
* @param t
* @param default
* @return <code>t</code> or <code>default</code> if <code>null</code>.
*/
def valueOrElse[T](t: T)(default: => T) =
if (t == null) default
else t
}
| janurag/scalate | scalate-core/src/main/scala/org/fusesource/scalate/osgi/BundleClassPathBuilder.scala | Scala | apache-2.0 | 9,175 |
/**
* A simple text based RPG
*
* @package simplerpg
* @copyright 2015
*/
package simplerpg.action
import simplerpg.Player
import simplerpg.World
final class InvalidAction extends Action {
def run(currentPlayer: Player, world: World): Option[Action] = {
printAction("Invalid action given")
}
}
| mcross1882/SimpleRPG | src/main/scala/simplerpg/action/InvalidAction.scala | Scala | mit | 320 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.eventhubs.checkpoint
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkContext
import org.apache.spark.internal.Logging
/**
* A DFS based OffsetStore implementation
*/
@SerialVersionUID(1L)
class DfsBasedOffsetStore(
directory: String,
namespace: String,
name: String,
partition: String) extends OffsetStore with Logging {
if (!SparkContext.getOrCreate().isLocal) {
require(directory.startsWith("hdfs://") || directory.startsWith("adl://"),
"we only support to store offset in HDFS/ADLS when running Spark in non-local mode ")
}
var path: Path = _
var backupPath: Path = _
var checkpointFile: FileSystem = _
var backupCheckpointFile: FileSystem = _
/**
* Open two files, the actual checkpoint file and the backup checkpoint file
*/
override def open(): Unit = {
if (checkpointFile == null) {
path = new Path(directory + "/" + namespace + "/" + name + "/" + partition)
checkpointFile = path.getFileSystem(new Configuration())
}
if (backupCheckpointFile == null) {
backupPath = new Path(directory + "/" + namespace + "/" + name + "/" + partition + ".bk")
backupCheckpointFile = backupPath.getFileSystem(new Configuration())
}
}
/**
* @param offset
* Write happens in three steps - one read and two writes. The first is a read attempt made on
* the actual checkpoint file. This is to ensure that the checkpoint file contains valid offset.
* If successful it means the actual checkpoint file can be updated only if the backup checkpoint
* update is successful. The second is a write attempt on the backup checkpoint file. Once that
* write is successful or the read of the actual checkpoint file was unsuccessful the third is a
* write attempt on the actual checkpoint file. In case of any failure at the time of write at
* least one file will contain a valid offset value which in the worst case will be a previous
* offset value (if one or more of them had valid offset values to begin with). The at least once
* guarantee still holds.
*/
override def write(offset: String): Unit = {
var readSuccessful: Boolean = false
var writeSuccessful: Boolean = false
if (checkpointFile.exists(path)) {
val stream = checkpointFile.open(path)
try {
stream.readUTF()
readSuccessful = true
} catch {
case e: Exception =>
logTrace(s"Failed to read offset from checkpoint file $path before write.", e)
} finally {
stream.close()
}
}
if (readSuccessful) {
val backupStream = backupCheckpointFile.create(backupPath, true)
try {
backupStream.writeUTF(offset)
writeSuccessful = true
} catch {
case e: Exception =>
logError(s"Failed to write offset to backup checkpoint file $backupPath", e)
} finally {
backupStream.close()
}
}
if (writeSuccessful || !readSuccessful) {
val stream = checkpointFile.create(path, true)
try {
stream.writeUTF(offset)
writeSuccessful = true
} catch {
case e: Exception => logError(s"Failed to write offset to checkpoint file $path.", e)
} finally {
stream.close()
}
}
if (!writeSuccessful) {
throw new Exception(s"Failed to write offset information for partition $partition.")
}
}
/**
* Read happens in two steps. The first read attempt happens on the actual checkpoint file.
* There are three possible situations:
* 1.1) The actual checkpoint directory does not exist.
* 1.2) The actual checkpoint directory exists but empty.
* 1.3) The actual checkpoint directory exists and contains offset information.
* For case 1.3) offset is read and the job continues. For cases 1.1) and 1.2) the second read
* attempt happens on the backup checkpoint file. There are again three possible situations:
* 2.1) The backup checkpoint directory does not exist.
* 2.2) The backup checkpoint directory exists but empty.
* 2.3) The backup checkpoint directory exists and contains offset information.
* The possible actions for the combination of events 1.1, 1.2, 2.1, 2.2, 2.3 are listed below:
* 1.1 + 2.1: Start from starting offset (-1).
* 1.1 + 2.2: Cannot happen.
* 1.1 + 2.3: Start from the offset in the backup checkpoint file.
* 1.2 + 2.1: Cannot happen.
* 1.2 + 2.2: Cannot happen.
* 1.2 + 2.3: Start from the offset in the backup checkpoint file.
*/
override def read(): String = {
var fileExists: Boolean = false
var readSuccessful: Boolean = false
var offset: String = "-1"
if (checkpointFile.exists(path)) {
fileExists = true
val stream = checkpointFile.open(path)
try {
offset = stream.readUTF()
readSuccessful = true
} catch {
case e: Exception => logError(s"Failed to read offset from checkpoint file $path.", e)
} finally {
stream.close()
}
}
if (!readSuccessful) {
if (backupCheckpointFile.exists(backupPath)) {
fileExists = true
val backupStream = backupCheckpointFile.open(backupPath)
try {
offset = backupStream.readUTF()
readSuccessful = true
} catch {
case e: Exception =>
logError(s"Failed to read offset from backup checkpoint file $backupPath.")
} finally {
backupStream.close()
}
}
}
if (fileExists && !readSuccessful) {
throw new Exception(s"Failed to read offset information for partition $partition.")
}
offset
}
override def close(): Unit = {
// pass
}
}
| CodingCat/spark-eventhubs | core/src/main/scala/org/apache/spark/streaming/eventhubs/checkpoint/DfsBasedOffsetStore.scala | Scala | apache-2.0 | 6,581 |
/**
* Created by Romain Reuillon on 02/11/16.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package fr.iscpif.doors.server
import java.io.StringWriter
import javax.script.{ScriptEngineManager, SimpleScriptContext}
import better.files._
import fr.iscpif.doors.ext.Data
import com.roundeights.hasher.Hasher
import scala.tools.nsc.interpreter.IMain
object Settings {
def defaultDir = {
val dir = System.getProperty("user.home") / ".doors"
dir.toJava.mkdirs
dir
}
def compile(content: String): Settings = {
def imports =
"""
|import fr.iscpif.doors.server._
|import fr.iscpif.doors.server.db._
|import slick.driver.H2Driver.api._
|
""".stripMargin
val e = new ScriptEngineManager().getEngineByName("scala")
e.asInstanceOf[IMain].settings.embeddedDefaults[Settings]
//e.asInstanceOf[IMain].settings.usejavacp.value = true
val settings = e.eval(imports ++ content).asInstanceOf[Settings]
settings
}
}
case class Settings(
port: Int,
publicURL: String,
salt: String,
smtp: SMTPSettings,
emailValidation: String => lock.EmailValidation = url => lock.EmailValidation()(url),
dbLocation: File = Settings.defaultDir / "h2",
hashingAlgorithm: HashingAlgorithm = HashingAlgorithm.default
) {
def emailValidationInstance = emailValidation(publicURL)
def resetPassword = lock.ResetPassword()(publicURL)
//def lock[T](id: LockID): Some[T]
}
| ISCPIF/doors | server/src/main/scala/fr/iscpif/doors/server/Settings.scala | Scala | agpl-3.0 | 2,079 |
package scala.collection.immutable
import org.junit.Assert.{assertEquals, assertFalse, assertSame, assertTrue}
import org.junit.{Assert, Test}
object ChampMapSmokeTest {
private def emptyMap[K, V]: HashMap[K, V] =
HashMap.empty[K, V]
private def mapOf[K, V](keyValuePairs: (K, V)*): HashMap[K, V] = {
val builder = HashMap.newBuilder[K, V]
keyValuePairs.foreach(builder.addOne)
builder.result()
}
def mkTuple[KV](keyValue: KV): (KV, KV) = keyValue -> keyValue
def mkTuple[K, V](key: K, value: V): (K, V) = key -> value
def mkValue(value: Int) = new CustomHashInt(value, value)
def mkValue(value: Int, hash: Int) = new CustomHashInt(value, hash)
}
class ChampMapSmokeTest {
import ChampMapSmokeTest._
val v11h1 = mkValue(11, 1)
val v12h1 = mkValue(12, 1)
val v1h1 = mkValue(1, 1)
val v5h5 = mkValue(5, 5)
val v32769 = mkValue(32769, 32769)
val v32769a = mkValue(32769*10+1, 32769)
val v32769b = mkValue(32769*10+2, 32769)
@Test def testCheckPrefixConstruction(): Unit = {
val map: HashMap[Int, Int] = emptyMap
val res1 = map + mkTuple(63) + mkTuple(64) + mkTuple(32768) + mkTuple(2147483647) + mkTuple(65536)
assert(res1.contains(63))
assert(res1.contains(64))
assert(res1.contains(32768))
assert(res1.contains(65536))
assert(res1.contains(2147483647))
val res2 = map + mkTuple(2147483647) + mkTuple(32768) + mkTuple(63) + mkTuple(64) + mkTuple(65536)
assert(res2.contains(63))
assert(res2.contains(64))
assert(res2.contains(32768))
assert(res2.contains(65536))
assert(res2.contains(2147483647))
assert(res1 == res2)
}
@Test def testCheckCompactionFromBeginUponDelete(): Unit = {
val map: HashMap[Int, Int] = emptyMap
val res1 = map + mkTuple(1) + mkTuple(2)
@annotation.unused val res2 = res1 + mkTuple(32769) - 2
/* should trigger assertion in data structure if not compacting */
}
@Test def testCheckCompactionFromMiddleUponDelete(): Unit = {
val map: HashMap[Int, Int] = emptyMap
val res1 = map + mkTuple(1) + mkTuple(2) + mkTuple(65) + mkTuple(66)
val res2 = res1 + mkTuple(32769) - 66
assert(!(res1 == res2))
}
@Test def testCheckCompactionFromBeginUponDelete_HashCollisionNode1(): Unit = {
val map: HashMap[CustomHashInt, CustomHashInt] = emptyMap
val res1 = map + mkTuple(v11h1) + mkTuple(v12h1)
assertTrue(res1.contains(v11h1))
assertTrue(res1.contains(v12h1))
val res2 = res1 - v12h1
assertTrue(res2.contains(v11h1))
assertEquals(mapOf() + mkTuple(v11h1), res2)
val res3 = res1 - v11h1
assertTrue(res3.contains(v12h1))
assertEquals(mapOf() + mkTuple(v12h1, v12h1), res3)
val resX = res1 + mkTuple(v32769) - v12h1
assertTrue(resX.contains(v11h1))
assertTrue(resX.contains(v32769))
assert(!(res1 == resX))
}
@Test def testCheckCompactionFromBeginUponDelete_HashCollisionNode2(): Unit = {
val map: HashMap[CustomHashInt, CustomHashInt] = emptyMap
val res1 = map + mkTuple(v32769a) + mkTuple(v32769b)
assertEquals(2, res1.size)
assertTrue(res1.contains(v32769a))
assertTrue(res1.contains(v32769b))
val res2 = res1 + mkTuple(v1h1)
assertEquals(3, res2.size)
assertTrue(res2.contains(v1h1))
assertTrue(res2.contains(v32769a))
assertTrue(res2.contains(v32769b))
val res3 = res2 - v32769b
assertEquals(2, res3.size)
assertTrue(res3.contains(v1h1))
assertTrue(res3.contains(v32769a))
println(scala.runtime.ScalaRunTime.getClass.getProtectionDomain.getCodeSource)
val expected = mapOf(mkTuple(v1h1), mkTuple(v32769a))
assertEquals(expected, res3)
}
@Test def testCheckCompactionFromBeginUponDelete_HashCollisionNode3(): Unit = {
val map: HashMap[CustomHashInt, CustomHashInt] = emptyMap
val res1 = map + mkTuple(v32769a) + mkTuple(v32769b)
assertEquals(2, res1.size)
assertTrue(res1.contains(v32769a))
assertTrue(res1.contains(v32769b))
val res2 = res1 + mkTuple(v1h1)
assertEquals(3, res2.size)
assertTrue(res2.contains(v1h1))
assertTrue(res2.contains(v32769a))
assertTrue(res2.contains(v32769b))
val res3 = res2 - v1h1
assertEquals(2, res3.size)
assertTrue(res3.contains(v32769a))
assertTrue(res3.contains(v32769b))
assertEquals(res1, res3)
}
@Test def testCheckCompactionFromBeginUponDelete_HashCollisionNode4(): Unit = {
val map: HashMap[CustomHashInt, CustomHashInt] = emptyMap
val res1 = map + mkTuple(v32769a) + mkTuple(v32769b)
assertEquals(2, res1.size)
assertTrue(res1.contains(v32769a))
assertTrue(res1.contains(v32769b))
val res2 = res1 + mkTuple(v5h5)
assertEquals(3, res2.size)
assertTrue(res2.contains(v5h5))
assertTrue(res2.contains(v32769a))
assertTrue(res2.contains(v32769b))
val res3 = res2 - v5h5
assertEquals(2, res3.size)
assertTrue(res3.contains(v32769a))
assertTrue(res3.contains(v32769b))
assertEquals(res1, res3)
}
@Test def testCreateSingletonWithFactoryMethod(): Unit = {
val map: HashMap[Int, Int] = emptyMap + mkTuple(63, 65)
assertTrue(map.contains(63))
assertEquals(65, map.get(63).get)
}
@Test def testRemoveFromSingleton(): Unit = {
val map: HashMap[Int, Int] = emptyMap + mkTuple(63, 65)
val res = map - 63
assertTrue(res.isEmpty)
assertFalse(res.contains(63))
assertEquals(emptyMap, res)
}
object O1 { override def hashCode = 1 ; override def toString = "O1"}
class C(val i: Int) { override def hashCode = i % 4 ; override def toString = s"C($i)" }
val cs = Array.tabulate(4096)(new C(_))
private def assertSameEqHash(expected: HashMap[Any, Any], actual: HashMap[Any, Any]) = {
assertEquals(List.from(actual).size, actual.size)
assertEquals(expected.size, actual.size)
assertEquals(expected.rootNode.cachedJavaKeySetHashCode, actual.rootNode.cachedJavaKeySetHashCode)
assertEquals(expected.hashCode(), actual.hashCode())
}
private def value(i: Int) = new String("" + i)
@Test def testCachedSizeAndHashCode(): Unit = {
var map: HashMap[Any, Any] = emptyMap + mkTuple(O1, "O1_V1")
assertEquals(1, map.size)
map = map + mkTuple(O1, "O1_V2")
val expected: HashMap[Any, Any] = emptyMap + mkTuple(O1, "O1_V2")
assertSameEqHash(expected, map)
}
@Test def testCachedSizeAndHashCodeCollision(): Unit = {
var map: HashMap[Any, Any] = emptyMap
for (c <- cs)
map = map.updated(c, value(c.i))
var map1 = map
for (c <- cs) {
map1 = map1.updated(c, value(c.i))
assertEquals(map.rootNode.cachedJavaKeySetHashCode, map1.rootNode.cachedJavaKeySetHashCode)
if (c.i % 41 == 0)
assertEquals(map, map1)
}
assertEquals(map, map1)
assertSameEqHash(map1, map)
var map2 = map + mkTuple(O1, "O1_V2")
map2 = map2 + mkTuple(O1, "O1_V2")
assertSameEqHash(map1 + mkTuple(O1, "O1_V2"), map2)
}
@Test def replacedValue(): Unit = {
val s1, s2 = new String("s") // equals, but distinct references,
val key = "k"
var map = emptyMap[Any, Any].updated(key, s1).updated(key, s2)
Assert.assertSame(s2, map.apply(key))
class collision() { override def hashCode = key.hashCode}
for (i <- (0 to 1024)) map = map.updated(new collision(), "")
Assert.assertSame(s1, map.updated(key, s1).apply(key))
}
@Test def replacedValueIdentical(): Unit = {
case class A(a: Int)
val map = emptyMap[Any, Any]
val a = A(1)
val map1 = map.updated(1, a)
val map2 = map1.updated(1, a)
assertSame(map1, map2)
}
@Test def replacedValueIdenticalCollision(): Unit = {
val k0 = new C(0)
val k1 = new C(4)
assertEquals(k0.hashCode, k1.hashCode)
val map = emptyMap[Any, Any].updated(k0, 0)
val v1 = "v1"
val map1 = map.updated(k1, v1)
val map2 = map1.updated(k1, v1)
assertSame(map1, map2)
}
@Test def nullValue(): Unit = {
val map = emptyMap[Any, Any]
assertEquals(Some(null), map.updated("", null).get(""))
}
@Test def nullValueCollision(): Unit = {
val k0 = new C(0)
val k1 = new C(4)
assertEquals(k0.hashCode, k1.hashCode)
val map = emptyMap[Any, Any].updated(k0, 0).updated(k1, null)
assertEquals(Some(null), map.get(k1))
}
@Test def hashCodeCheck(): Unit = {
assertEquals(2098967416, collection.immutable.HashMap(1 -> 2).hashCode())
}
}
| scala/scala | test/junit/scala/collection/immutable/ChampMapSmokeTest.scala | Scala | apache-2.0 | 8,372 |
import com.thesamet.proto.e2e.`enum`._
import com.thesamet.proto.e2e.enum3._
import scalapb.GeneratedEnumCompanion
import org.scalatest._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.must.Matchers
class EnumSpec extends AnyFlatSpec with Matchers with OptionValues {
val red = EnumTest(color = Some(Color.RED))
val green = EnumTest(color = Some(Color.GREEN))
val blue = EnumTest(color = Some(Color.BLUE))
val unrecognized = EnumTest(color = Some(Color.Unrecognized(37)))
val noColor = EnumTest()
val innerEnum = EnumTest(innerEnum = Some(EnumTest.InnerEnum.INNER_SUCCESS))
val otherCase = EnumTest(innerEnum = Some(EnumTest.InnerEnum.OtherCase))
"colors" should "serialize and parse" in {
EnumTest.parseFrom(red.toByteArray) must be(red)
EnumTest.parseFrom(green.toByteArray) must be(green)
EnumTest.parseFrom(blue.toByteArray) must be(blue)
EnumTest.parseFrom(noColor.toByteArray) must be(noColor)
EnumTest.parseFrom(innerEnum.toByteArray) must be(innerEnum)
EnumTest.parseFrom(otherCase.toByteArray) must be(otherCase)
EnumTest.parseFrom(unrecognized.toByteArray) must be(unrecognized)
}
"isEnumValue" should "return correct values" in {
red.color.get.isRed must be(true)
red.color.get.isGreen must be(false)
red.color.get.isBlue must be(false)
red.color.get.isUnrecognized must be(false)
green.color.get.isRed must be(false)
green.color.get.isGreen must be(true)
green.color.get.isBlue must be(false)
green.color.get.isUnrecognized must be(false)
blue.color.get.isRed must be(false)
blue.color.get.isGreen must be(false)
blue.color.get.isBlue must be(true)
blue.color.get.isUnrecognized must be(false)
unrecognized.color.get.isRed must be(false)
unrecognized.color.get.isGreen must be(false)
unrecognized.color.get.isBlue must be(false)
unrecognized.color.get.isUnrecognized must be(true)
innerEnum.getInnerEnum.isInnerSuccess must be(true)
innerEnum.getInnerEnum.isOtherCase must be(false)
otherCase.getInnerEnum.isInnerSuccess must be(false)
otherCase.getInnerEnum.isOtherCase must be(true)
}
"pattern matching" should "work for enums" in {
def colorWord(color: Option[Color]) = color match {
case Some(Color.BLUE) => "blue"
case Some(Color.GREEN) => "green"
case Some(Color.RED) => "red"
case Some(Color.Unrecognized(x)) => s"unrecognized:$x"
case None => "none"
}
colorWord(blue.color) must be("blue")
colorWord(red.color) must be("red")
colorWord(green.color) must be("green")
colorWord(unrecognized.color) must be("unrecognized:37")
colorWord(noColor.color) must be("none")
}
"getColor" should "return first value" in {
noColor.getColor must be(Color.RED)
}
"getOtherColor" should "return default value" in {
noColor.getOtherColor must be(Color.BLUE)
red.getOtherColor must be(Color.BLUE)
green.getOtherColor must be(Color.BLUE)
blue.getOtherColor must be(Color.BLUE)
unrecognized.getOtherColor must be(Color.BLUE)
blue.getOtherColor.isBlue must be(true)
}
"update" should "work correctly" in {
red.update(_.color := Color.BLUE) must be(blue)
noColor.update(_.color := Color.RED) must be(red)
}
"concatenated serialized" should "result in merged object" in {
val bytes = (red.toByteArray ++ green.toByteArray ++ otherCase.toByteArray)
val obj = EnumTest.parseFrom(bytes)
obj must be(EnumTest(color = Some(Color.GREEN), innerEnum = Some(EnumTest.InnerEnum.OtherCase)))
}
"missing enum values in proto3" should "be preserved in parsing" in {
val like = EnumTestLike(color = 18) // same field number as `color` in EnumTest3.
val e3 = EnumTest3.parseFrom(like.toByteArray)
e3.color must be(Color3.Unrecognized(18))
e3.color must not be (Color3.Unrecognized(19))
e3.toByteArray must be(like.toByteArray)
}
"missing enum values in proto3 seq" should "be preserved in parsing" in {
val e3 = EnumTest3(colorVector = Seq(Color3.C3_RED, Color3.Unrecognized(15), Color3.C3_BLUE))
EnumTest3.parseFrom(e3.toByteArray) must be(e3)
}
"missing enum values in proto2" should "be preserved in parsing" in {
val like = EnumTestLike(color = 18) // same field number as `color` in EnumTest3.
val e3 = EnumTest.parseFrom(like.toByteArray)
e3.getColor must be(Color.Unrecognized(18))
e3.getColor must not be (Color.Unrecognized(19))
e3.toByteArray must be(like.toByteArray)
}
"color companion" should "be available implicitly" in {
implicitly[GeneratedEnumCompanion[Color]] must be(Color)
}
"fromName" should "resolve values" in {
Color.fromName("RED").value must be(Color.RED)
Color.fromName("GREEN").value must be(Color.GREEN)
Color.fromName("BLUE").value must be(Color.BLUE)
Color.fromName("FUCHSIA") must be(None)
}
"toByteString" should "give the same byte array as toByteArray" in {
val e3 = EnumTest3(colorVector = Seq(Color3.C3_RED, Color3.Unrecognized(15), Color3.C3_BLUE))
e3.toByteString.toByteArray() must be(e3.toByteArray)
}
"Unrecognized" should "be printable" in {
// See https://github.com/scalapb/ScalaPB/issues/225
unrecognized.toProtoString must be("color: 37\n")
}
"Unrecognized" should "be fine" in {
var x =
Color
.Unrecognized(117)
.scalaValueDescriptor // Do not use 117 elsewhere we need to have it gc'ed.
var y = Color.Unrecognized(117).scalaValueDescriptor
x must be theSameInstanceAs y
x = null
y = null
System.gc()
x = Color.Unrecognized(117).scalaValueDescriptor
}
"asRecognized" should "return Some(_) if defined" in {
red.color.get.asRecognized must be(Some(red.color.get))
}
"asRecognized" should "return None if Unrecognized" in {
unrecognized.color.get.asRecognized must be(None)
}
}
| scalapb/ScalaPB | e2e/src/test/scala/EnumSpec.scala | Scala | apache-2.0 | 5,990 |
package modules
import actors.{ConsumerActor, ProcessorActor}
import com.google.inject.AbstractModule
import play.api.libs.concurrent.AkkaGuiceSupport
import schedulers.MainScheduler
/**
* Configure all akka actors related to the the scheduler
*/
class JobModule extends AbstractModule with AkkaGuiceSupport {
def configure() = {
bindActor[ConsumerActor]("consumer-actor")
bindActorFactory[ProcessorActor, ProcessorActor.Factory]
bind(classOf[MainScheduler]).asEagerSingleton()
}
}
| JohnnyCosta/dataconsumer | app/modules/JobModule.scala | Scala | gpl-3.0 | 507 |
package scalarules.test.scalafmt
object Format {
def main(args: Array[String]) {
val warnings: String = "Be careful with this test. The column number is limited to 40, so it should be in new line."
}
}
| smparkes/rules_scala | test/scalafmt/unformatted/unformatted-custom-conf.scala | Scala | apache-2.0 | 210 |
package com.github.sstone.amqp
import collection.JavaConversions._
import com.rabbitmq.client.AMQP.BasicProperties
import com.rabbitmq.client.{AMQP, ShutdownSignalException, Channel, Envelope}
import akka.actor.{Actor, Props, ActorRef, ActorRefFactory}
import akka.actor.FSM.{SubscribeTransitionCallBack, CurrentState, Transition}
import java.util.concurrent.CountDownLatch
object Amqp {
/**
* queue parameters
* @param name queue name. if empty, the broker will generate a random name, see Queue.DeclareOk
* @param passive if true, just check that que queue exists
* @param durable if true, the queue will be durable i.e. will survive a broker restart
* @param exclusive if true, the queue can be used by one connection only
* @param autodelete if true, the queue will be destroyed when it is no longer used
* @param args additional parameters, such as TTL, ...
*/
case class QueueParameters(name: String, passive: Boolean, durable: Boolean = false, exclusive: Boolean = false, autodelete: Boolean = true, args: Map[String, AnyRef] = Map.empty)
/**
* declare a queue
* @param channel valid AMQP channel
* @param q queue parameters
* @return a com.rabbitmq.client.AMQP.Queue.DeclareOk object
*/
def declareQueue(channel: Channel, q: QueueParameters) = {
if (q.passive)
channel.queueDeclarePassive(q.name)
else
channel.queueDeclare(q.name, q.durable, q.exclusive, q.autodelete, q.args)
}
/**
* exchange parameters
* @param name exchange name
* @param passive if true, just check that the exchange exists
* @param exchangeType exchange type: "direct", "fanout", "topic", "headers"
* @param durable if true, the exchange will be durable i.e. will survive a broker restart
* @param autodelete if true, the exchange will be destroyed when it is no longer used
* @param args additional arguments
*/
case class ExchangeParameters(name: String, passive: Boolean, exchangeType: String, durable: Boolean = false, autodelete: Boolean = false, args: Map[String, AnyRef] = Map.empty)
/**
* declare an exchange
* @param channel valid AMQP channel
* @param e exchange parameters
* @return a com.rabbitmq.client.AMQP.Exchange.DeclareOk object
*/
def declareExchange(channel: Channel, e: ExchangeParameters) = {
if (e.passive)
channel.exchangeDeclarePassive(e.name)
else
channel.exchangeDeclare(e.name, e.exchangeType, e.durable, e.autodelete, e.args)
}
object StandardExchanges {
val amqDirect = ExchangeParameters("amq.direct", passive = true, exchangeType = "direct")
val amqFanout = ExchangeParameters("amq.fanout", passive = true, exchangeType = "fanout")
val amqTopic = ExchangeParameters("amq.topic", passive = true, exchangeType = "topic")
val amqHeaders = ExchangeParameters("amq.headers", passive = true, exchangeType = "headers")
val amqMatch = ExchangeParameters("amq.match", passive = true, exchangeType = "headers")
}
/**
* Channel parameters
* @param qos "quality of service", or prefetch count. The number of non-acknowledged messages a channel can receive. If set
* to one then the consumer using this channel will not receive another message until it has acknowledged or rejected
* its current message. This feature is commonly used as a load-balancing strategy using multiple consumers and
* a shared queue.
*/
case class ChannelParameters(qos: Int)
case class Binding(exchange: ExchangeParameters, queue: QueueParameters, routingKey: String)
/**
* requests that can be sent to a ChannelOwner actor
*/
sealed trait Request
case class Abort(code: Int = AMQP.REPLY_SUCCESS, message: String = "OK") extends Request
case class AddStatusListener(listener: ActorRef) extends Request
case class AddReturnListener(listener: ActorRef) extends Request
case class AddShutdownListener(listener: ActorRef) extends Request
case class AddFlowListener(listener: ActorRef) extends Request
case class Close(code: Int = AMQP.REPLY_SUCCESS, message: String = "OK", timeout: Int = -1) extends Request
case class DeclareQueue(queue: QueueParameters) extends Request
case class DeleteQueue(name: String, ifUnused: Boolean = false, ifEmpty: Boolean = false) extends Request
case class PurgeQueue(name: String) extends Request
case class DeclareExchange(exchange: ExchangeParameters) extends Request
case class DeleteExchange(name: String, ifUnused: Boolean = false) extends Request
case class QueueBind(queue: String, exchange: String, routing_key: String, args: Map[String, AnyRef] = Map.empty) extends Request
case class QueueUnbind(queue: String, exchange: String, routing_key: String, args: Map[String, AnyRef] = Map.empty) extends Request
case class Publish(exchange: String, key: String, body: Array[Byte], properties: Option[BasicProperties] = None, mandatory: Boolean = true, immediate: Boolean = false) extends Request
case class Ack(deliveryTag: Long) extends Request
case class Reject(deliveryTag: Long, requeue: Boolean = true) extends Request
case class Transaction(publish: List[Publish]) extends Request
case class CreateConsumer(listener: ActorRef) extends Request
case class AddQueue(queue: QueueParameters) extends Request
case class AddBinding(binding: Binding) extends Request
case class CancelConsumer(consumerTag: String) extends Request
case class Record(request: Request) extends Request
case class Get(queue: String, autoAck: Boolean) extends Request
case object ConfirmSelect extends Request
case class WaitForConfirms(timeout: Option[Long]) extends Request
case class WaitForConfirmsOrDie(timeout: Option[Long]) extends Request
case class AddConfirmListener(listener: ActorRef) extends Request
case class HandleAck(deliveryTag: Long, multiple: Boolean)
case class HandleNack(deliveryTag: Long, multiple: Boolean)
/**
* sent back by a publisher when the request was processed successfully
* @param request original request
* @param result optional result. Each request maps directly to a RabbitMQ Channel method: DeclareQueue maps to
* Channel.queueDeclare(), Publish maps to Channel.basicPublish() ...
* When the Channel methods returns something, result wraps that something, otherwise it is empty
* For example:
*
*/
case class Ok(request: Request, result: Option[Any] = None)
/**
* sent back by a publisher when the request was not processed successfully
* @param request original request
* @param reason whatever error that was thrown when the request was processed
*/
case class Error(request: Request, reason: Throwable)
/**
* AMQP delivery, which is sent to the actor that you register with a Consumer
* @param consumerTag AMQP consumer tag
* @param envelope AMQP envelope
* @param properties AMQP properties
* @param body message body
* @see [[com.github.sstone.amqp.Consumer]]
*/
case class Delivery(consumerTag: String, envelope: Envelope, properties: BasicProperties, body: Array[Byte])
/**
* wrapper for returned, or undelivered, messages i.e. messages published with the immediate flag an and an
* (exchange, key) pair for which the broker could not find any destination
*/
case class ReturnedMessage(replyCode: Int, replyText: String, exchange: String, routingKey: String, properties: BasicProperties, body: Array[Byte])
/**
* shutdown message sent to listeners set with AddShutdownListener
* @param cause shutdown exception
*/
case class Shutdown(cause: ShutdownSignalException)
/**
* flow-control message, sent to listeners set with AddFlowListener
* @param active
*/
case class HandleFlow(active: Boolean)
/**
* sent when a consumer is cancelled
* @param consumerTag consumer tag
*/
case class ConsumerCancelled(consumerTag: String)
/** executes a callback when a connection or channel actors is "connected" i.e. usable
* <ul>
* <li>for a connection actor, connected means that it is connected to the AMQP broker</li>
* <li>for a channel actor, connected means that it is has a valid channel (sent by its connection parent)</li>
* </ul>
* this is a simple wrapper around the FSM state monitoring tools provided by Akka, since ConnectionOwner and ChannelOwner
* are state machines with 2 states (Disconnected and Connected)
* @param actorRefFactory actor capable of creating child actors (will be used to create a temporary watcher)
* @param channelOrConnectionActor reference to a ConnectionOwner or ChannelOwner actor
* @param onConnected connection callback
*/
def onConnection(actorRefFactory: ActorRefFactory, channelOrConnectionActor: ActorRef, onConnected: () => Unit) = {
val m = actorRefFactory.actorOf(Props(new Actor {
def receive = {
case ChannelOwner.Connected | ConnectionOwner.Connected =>
onConnected()
context.stop(self)
}
}))
channelOrConnectionActor ! AddStatusListener(m)
}
/**
* wait until a number of connection or channel actors are connected
* @param actorRefFactory an actor capable of creating child actors (will be used to create temporary watchers)
* @param actors set of reference to ConnectionOwner or ChannelOwner actors
* @return a CountDownLatch object you can wait on; its count will reach 0 when all actors are connected
*/
def waitForConnection(actorRefFactory: ActorRefFactory, actors: ActorRef*): CountDownLatch = {
val latch = new CountDownLatch(actors.size)
actors.foreach(onConnection(actorRefFactory, _, () => latch.countDown()))
latch
}
}
| SayreBladesWW/amqp-client | src/main/scala/com/github/sstone/amqp/Amqp.scala | Scala | mit | 9,966 |
/*
* Code Pulse: A real-time code coverage testing tool. For more information
* see http://code-pulse.com
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.secdec.codepulse.data.model.slick
import scala.slick.driver.JdbcProfile
import scala.slick.model.ForeignKeyAction
import scala.util.Try
import com.secdec.codepulse.data.bytecode.CodeTreeNodeKind
import com.secdec.codepulse.data.model.{ TreeNodeData => TreeNode, _ }
import scala.slick.jdbc.{ GetResult, StaticQuery => Q }
import com.secdec.codepulse.data.bytecode.CodeTreeNodeKind.{ Cls, Grp, Mth, Pkg }
/** The Slick DAO for tree node data.
*
* @author robertf
*/
private[slick] class TreeNodeDataDao(val driver: JdbcProfile, val sourceDataDao: SourceDataDao) extends SlickHelpers {
import driver.simple._
class TreeNodeData(tag: Tag) extends Table[TreeNode](tag, "tree_node_data") {
private val CodeTreeNodeKindMapping = Map[CodeTreeNodeKind, Char](
CodeTreeNodeKind.Grp -> 'g',
CodeTreeNodeKind.Pkg -> 'p',
CodeTreeNodeKind.Cls -> 'c',
CodeTreeNodeKind.Mth -> 'm')
private val CodeTreeNodeKindUnmapping = CodeTreeNodeKindMapping.map(_.swap)
private implicit val CodeTreeNodeKindMapper = MappedColumnType.base[CodeTreeNodeKind, Char](
CodeTreeNodeKindMapping.apply,
CodeTreeNodeKindUnmapping.apply)
def id = column[Int]("id", O.PrimaryKey, O.NotNull)
def parentId = column[Option[Int]]("parent_id")
def label = column[String]("label", O.NotNull)
def kind = column[CodeTreeNodeKind]("kind", O.NotNull)
def size = column[Option[Int]]("size", O.Nullable)
def sourceFileId = column[Option[Int]]("source_file_id", O.Nullable)
def sourceLocationCount = column[Option[Int]]("source_location_count", O.Nullable)
def methodStartLine = column[Option[Int]]("method_start_line", O.Nullable)
def methodEndLine = column[Option[Int]]("method_end_line", O.Nullable)
def isSurfaceMethod = column[Option[Boolean]]("is_surface_method", O.Nullable)
def * = (id, parentId, label, kind, size, sourceFileId, sourceLocationCount, methodStartLine, methodEndLine, isSurfaceMethod) <> (TreeNode.tupled, TreeNode.unapply)
def labelIndex = index("tnd_label_index", label)
def sourceFile = foreignKey("tree_node_data_to_source_file", sourceFileId, sourceDataDao.sourceFilesQuery)(_.id, onDelete = ForeignKeyAction.Cascade)
}
val treeNodeData = TableQuery[TreeNodeData]
class TracedNodes(tag: Tag) extends Table[(Int, Boolean)](tag, "traced_nodes") {
def nodeId = column[Int]("node_id", O.PrimaryKey, O.NotNull)
def traced = column[Boolean]("traced", O.NotNull)
def * = (nodeId, traced)
def node = foreignKey("tn_node", nodeId, treeNodeData)(_.id, onDelete = ForeignKeyAction.Cascade)
}
val tracedNodes = TableQuery[TracedNodes]
class TreeNodeFlags(tag: Tag) extends Table[(Int, TreeNodeFlag)](tag, "tree_node_flags") {
private val TreeNodeFlagMapping = Map[TreeNodeFlag, String](
TreeNodeFlag.HasVulnerability -> "has_vuln")
private val TreeNodeFlagUnmapping = TreeNodeFlagMapping.map(_.swap)
private implicit val TreeNodeFlagMapper = MappedColumnType.base[TreeNodeFlag, String](
TreeNodeFlagMapping.apply,
TreeNodeFlagUnmapping.apply)
def nodeId = column[Int]("node_id", O.PrimaryKey, O.NotNull)
def flag = column[TreeNodeFlag]("flag", O.NotNull)
def * = (nodeId, flag)
}
val treeNodeFlags = TableQuery[TreeNodeFlags]
class MethodSignatureNodeMap(tag: Tag) extends Table[MethodSignatureNode](tag, "method_signature_node_map") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def signature = column[String]("sig", O.NotNull)
def nodeId = column[Int]("node_id", O.NotNull)
def * = (id, signature, nodeId) <> (MethodSignatureNode.tupled, MethodSignatureNode.unapply)
def node = foreignKey("msnm_node", nodeId, treeNodeData)(_.id, onDelete = ForeignKeyAction.Cascade)
def signatureIndex = index("sig_idx", (signature), unique = false)
}
val methodSignatureNodeMap = TableQuery[MethodSignatureNodeMap]
class JspNodeMap(tag: Tag) extends Table[(String, Int)](tag, "jsp_node_map") {
def jspClass = column[String]("jsp", O.PrimaryKey, O.NotNull)
def nodeId = column[Int]("node_id", O.NotNull)
def * = jspClass -> nodeId
def node = foreignKey("jspnm_node", nodeId, treeNodeData)(_.id, onDelete = ForeignKeyAction.Cascade)
}
val jspNodeMap = TableQuery[JspNodeMap]
def create(implicit session: Session) = (treeNodeData.ddl ++ tracedNodes.ddl ++ treeNodeFlags.ddl ++ methodSignatureNodeMap.ddl ++ jspNodeMap.ddl).create
def get(id: Int)(implicit session: Session): Option[TreeNode] = {
(for (n <- treeNodeData if n.id === id) yield n).firstOption
}
def get(label: String)(implicit session: Session): Option[TreeNode] = {
(for (n <- treeNodeData if n.label === label) yield n).firstOption
}
def getForSignature(signature: String)(implicit session: Session): List[TreeNode] = getIdsForSignature(signature).flatMap(get(_))
def getForJsp(jspClass: String)(implicit session: Session): Option[TreeNode] = getIdForJsp(jspClass).flatMap(get(_))
def getIdsForSignature(signature: String)(implicit session: Session): List[Int] = {
(for (n <- methodSignatureNodeMap if n.signature === signature) yield n.nodeId).list
}
def getIdForJsp(jspClass: String)(implicit session: Session): Option[Int] = {
(for (n <- jspNodeMap if n.jspClass === jspClass) yield n.nodeId).firstOption
}
def iterateWith[T](f: Iterator[TreeNode] => T)(implicit session: Session): T = {
val it = treeNodeData.iterator
try {
f(it)
} finally it.close
}
def iterateMethodMappingsWith[T](f: Iterator[MethodSignatureNode] => T)(implicit session: Session): T = {
val it = methodSignatureNodeMap.iterator
try {
f(it)
} finally it.close
}
def iterateJspMappingsWith[T](f: Iterator[(String, Int)] => T)(implicit session: Session): T = {
val it = jspNodeMap.iterator
try {
f(it)
} finally it.close
}
def storeMethodSignature(methodSignatureNode: MethodSignatureNode)(implicit session: Session) {
methodSignatureNodeMap += methodSignatureNode
}
def storeMethodSignatures(signatures: Iterable[MethodSignatureNode])(implicit session: Session) {
fastImport { methodSignatureNodeMap ++= signatures }
}
def storeJsp(jspPath: String, nodeId: Int)(implicit session: Session) {
jspNodeMap += jspPath -> nodeId
}
def storeJsps(jsps: Iterable[(String, Int)])(implicit session: Session) {
fastImport { jspNodeMap ++= jsps }
}
def storeNode(node: TreeNode)(implicit session: Session) {
treeNodeData += node
}
def storeNodes(nodes: Iterable[TreeNode])(implicit session: Session) {
fastImport { treeNodeData ++= nodes }
}
def getTraced()(implicit session: Session) = tracedNodes.list
def storeTracedValues(values: Iterable[(Int, Option[Boolean])])(implicit session: Session) {
fastImport {
tracedNodes ++= values flatMap {
case (id, Some(traced)) => Some(id -> traced)
case _ => None
}
}
}
def updateTraced(id: Int, traced: Option[Boolean])(implicit session: Session) {
traced match {
case Some(traced) =>
val q = for (row <- tracedNodes if row.nodeId === id) yield row.traced
q.update(traced) match {
case 0 => tracedNodes += id -> traced
case _ =>
}
case None =>
val q = for (row <- tracedNodes if row.nodeId === id) yield row
q.delete
}
}
def updateSourceLocationCount(id: Int, sourceLocationCount: Int) (implicit session: Session): Unit = {
val q = for (row <- treeNodeData if row.id === id) yield row.sourceLocationCount
q.update(Some(sourceLocationCount))
}
def getFlags(id: Int)(implicit session: Session): List[TreeNodeFlag] = {
// wrapped in a try, since older versions may not have the concept of flags
Try {
(for (flag <- treeNodeFlags if flag.nodeId === id) yield flag.flag).list
} getOrElse Nil
}
def setFlag(id: Int, flag: TreeNodeFlag)(implicit session: Session) {
treeNodeFlags += id -> flag
}
def clearFlag(id: Int, flag: TreeNodeFlag)(implicit session: Session) {
(for (flag <- treeNodeFlags if flag.nodeId === id) yield flag).delete
}
def findMethods(sourceFilePath: String)(implicit session: Session): List[Int] = {
(for {
treeNodeDataItem <- treeNodeData
sourceFile <- treeNodeDataItem.sourceFile
if sourceFile.path === sourceFilePath
} yield (treeNodeDataItem.id)).list
}
def findMethods(sourceFilePath: String, startingLineNumber: Int, endingLineNumber: Int)(implicit session: Session): List[Int] = {
(for {
treeNodeDataItem <- treeNodeData.sortBy(x => x.methodStartLine)
sourceFile <- treeNodeDataItem.sourceFile
if sourceFile.path === sourceFilePath &&
(treeNodeDataItem.methodEndLine >= startingLineNumber && treeNodeDataItem.methodStartLine <= endingLineNumber)
} yield (treeNodeDataItem.id)).list
}
def markSurfaceMethod(id: Int, isSurfaceMethod: Option[Boolean])(implicit session: Session): Unit = {
val q = for (row <- treeNodeData if row.id === id) yield row.isSurfaceMethod
q.update(isSurfaceMethod)
}
def getSurfaceMethodAncestorPackages(implicit session: Session): List[Int] = {
val query = Q.queryNA[Int]("""
WITH ANCESTORS(id, parent_id, kind) AS (
SELECT t."id", t."parent_id", t."kind" FROM PUBLIC."tree_node_data" t WHERE t."is_surface_method" = true
UNION ALL
SELECT tr."id", tr."parent_id", tr."kind" FROM ANCESTORS INNER JOIN PUBLIC."tree_node_data" tr ON ANCESTORS.parent_id = tr."id"
AND (ANCESTORS.kind = 'm' OR ANCESTORS.kind = 'c' OR (ANCESTORS.kind='p' AND tr."kind"='c') OR (ANCESTORS.kind='g' AND tr."kind"='m'))
)
SELECT DISTINCT id
FROM ANCESTORS
WHERE kind='p' OR kind='g'
ORDER BY id""")
query.list()
}
def getSurfaceMethodCount(implicit session: Session): Int = {
treeNodeData.filter(x => x.isSurfaceMethod).length.run
}
} | secdec/codepulse | codepulse/src/main/scala/com/secdec/codepulse/data/model/slick/TreeNodeDataDao.scala | Scala | apache-2.0 | 10,372 |
// Project: scalanative-cocoa
// Module: Foundation
// Description: Contains all functions defined in the Foundation global scope.
package cocoa.foundation
import scala.scalanative.native._
import objc.runtime._
import cocoa.foundation._
@extern
object Foundation {
// type objc_class = Ptr[Byte]
// type objc_object = Ptr[Byte]
// type objc_selector = Ptr[Byte]
// type _NSZone = Ptr[Byte]
// def sel_getName_(sel: SEL): Ptr[CSignedChar] = extern
// def sel_registerName_(str: Ptr[CSignedChar]): SEL = extern
// def object_getClassName_(obj: id): Ptr[CSignedChar] = extern
// def object_getIndexedIvars_(obj: id): Ptr[Byte] = extern
// def sel_isMapped_(sel: SEL): BOOL = extern
// def sel_getUid_(str: Ptr[CSignedChar]): SEL = extern
def NSAllocateObject_extraBytes_zone(aClass: id, extraBytes: NSUInteger, zone: NSZone): id = extern
def NSDeallocateObject_(`object`: id): Unit = extern
def NSCopyObject_extraBytes_zone(`object`: id, extraBytes: NSUInteger, zone: NSZone): id = extern
def NSShouldRetainWithZone_requestedZone(anObject: id, requestedZone: NSZone): BOOL = extern
def NSIncrementExtraRefCount(`object`: id): Unit = extern
def NSDecrementExtraRefCountWasZero(`object`: id): BOOL = extern
def NSExtraRefCount(`object`: id): NSUInteger = extern
// def CFBridgingRetain_(X: id): CFTypeRef = extern
// def CFBridgingRelease_(X: CFTypeRef): id = extern
def NSLog(format: NSString, args: CVararg*): Unit = extern
def NXReadNSObjectFromCoder(decoder: NSCoder): NSObject = extern
}
| jokade/scalanative-cocoa | foundation/src/main/scala/cocoa/foundation/Foundation.scala | Scala | mit | 1,531 |
package techniques
import org.apache.spark.rdd.RDD
import utils.Scaling._
import utils.TopK._
import utils.SubTechniques._
import utils.ComputationUtilities._
/**
* Created by Joanna on 4/7/15.
* @author Joanna Salathé
* Naive similarity functions that compute the similar words list of a given word based on comparison techniques
*/
object NaiveComparisons {
val order = (x: (String, Double), y: (String, Double)) => if (x._2 != y._2) {
x._2 < y._2
} else x._1 < y._1
/**
* Compare a word,frequency tuple with a collection of word, frequency tuples to find similar words by using its dedicated metric naiveDifferenceMetricTopK
* @param data data collection of words with their frequency
* @param testedWord word that we want to find its similar words
* @param parameters L(0) contains the number of similar words we want to find, L(1) contains the accepted difference to consider a word's year similar
* @return words that are similar
*/
def naiveDifferenceTopKScalingAverage(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]), parameters: List[Double]): RDD[(String)] = {
val k = parameters.head
val acceptedDifference = parameters(1)
val retrievedWords = retrieveTopK(k.toInt, naiveDifferenceMetricTopK, data, testedWord, order, List(acceptedDifference))
data.sparkContext.parallelize(retrievedWords)
}
/**
* Compare a word,frequency tuple with a collection of word, frequency tuples to find similar words by using its dedicated metric naiveDifferenceSquaredMetricTopK
* @param data data collection of words with their frequency
* @param testedWord word that we want to find its similar words
* @param parameters L(0) contains the number of similar words we want to find
* @return words that are similar
*/
def naiveDifferenceSquaredTopKScalingAverage(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]), parameters: List[Double]): RDD[(String)] = {
val k = parameters.head
val retrievedWords = retrieveTopK(k.toInt, naiveDifferenceSquaredMetricTopK, data, testedWord, order)
data.sparkContext.parallelize(retrievedWords)
}
/**
* Compare a word,frequency tuple with a collection of word, frequency tuples to find similar words by using its dedicated metric naiveDivisionMetricTopK
* @param data data collection of words with their frequency
* @param testedWord word that we want to find its similar words
* @param parameters L(0) contains the number of similar words we want to find
* @return words that are similar
*/
def naiveDivisionTopKScalingAverage(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]), parameters: List[Double]): RDD[(String)] = {
val k = parameters.head
val retrievedWords = retrieveTopK(k.toInt, naiveDivisionMetricTopK, data, testedWord, order)
data.sparkContext.parallelize(retrievedWords)
}
/**
* Compare a word,frequency tuple with a collection of word, frequency tuples to find similar words by using its dedicated metric naiveDivisionVarianceMetricTopK
* @param data data collection of words with their frequency
* @param testedWord word that we want to find its similar words
* @param parameters L(0) contains the number of similar words we want to find
* @return words that are similar
*/
def naiveDivisionVarianceTopKScalingAverage(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]), parameters: List[Double]): RDD[(String)] = {
val k = parameters.head
val retrievedWords = retrieveTopK(k.toInt, naiveDivisionVarianceMetricTopK, data, testedWord, order)
data.sparkContext.parallelize(retrievedWords)
}
/**
* Apply the average scaling function before calling the NaiveDifference similarity function and
* shifts it
* @param data data collection of words with their frequency
* @param testedWord word that we want to find its similar words
* @param parameters L(0) contains the number of similar words we want to find, L(1) contains the accepted difference to consider a word's year similar
* @return words that are similar
*/
def naiveDifferenceScalingAverageWithShifting(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]), parameters: List[Double]): RDD[(String)] = {
shift(data, testedWord, parameters, NaiveComparisons.naiveDivisionTopKScalingAverage, parameters(1).toInt, parameters(2).toInt)
}
/**
* Apply the max scaling function before calling the NaiveDivision similarity function
* the ratio line and shifts it
* @param data data collection of words with their frequency
* @param testedWord word that we want to find its similar words
* @param parameters L(0) contains the number of similar words we want to find, L(1) contains the accepted difference to consider a word's year similar
* @return words that are similar
*/
def naiveDivisionScalingMaxWithShifting(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]), parameters: List[Double]): RDD[(String)] = {
shift(data, testedWord, parameters, NaiveComparisons.naiveDivisionTopKScalingAverage, parameters(1).toInt, parameters(2).toInt)
}
/**
* Given a word, find words which curve is the inverse using the NaiveInverse technique
* @param data data collection of words with their frequency
* @param testedWord word that we want to find its similar words
* @param parameters L(0) contains the number of similar words we want to find
* @return words that are similar
*/
def naiveInverseDifference(data: RDD[(String, Array[Double])], testedWord: (String, Array[Double]),
parameters: List[Double]): RDD[(String)] = {
NaiveComparisons.naiveDifferenceTopKScalingAverage(data, (testedWord._1, testedWord._2.reverse), parameters)
}
/** *******************************************************************************************************
* Metrics
* ******************************************************************************************************* */
/**
* Metric that takes into account the difference between the two arrays of words
* @param word1Freq first temporal profile
* @param word2Freq second temporal profile
* @param parameters L(0) contains the accepted difference between two array value that we consider similar,
* @return sum of differences of each element divided by size of the resulting array (filtered a priori by the accepted difference if too extreme value)
*/
def naiveDifferenceMetricTopK(word1Freq: Array[Double], word2Freq: Array[Double], parameters: List[Double] = List(15)): Double = {
val acceptedDifference = parameters.head
val zipped = proportionalScalarAverageSubstraction(word1Freq).zip(proportionalScalarAverageSubstraction(word2Freq))
val zippedDif = zipped.map(x => math.abs(x._1 - x._2))
val trueDif = zippedDif.map(_ <= acceptedDifference).filter(_ == true)
if (trueDif.length > 0) {
zippedDif.sum / trueDif.length
} else {
zippedDif.sum
}
}
/**
* Metric that takes into account the difference between the two arrays of words in a strictful manner
* @param word1Freq first temporal profile
* @param word2Freq second temporal profile
* @param parameters L(0) contains the accepted difference between two array value that we consider similar,
* @return sum of differences squared of each element divided by size of the resulting array
*/
def naiveDifferenceSquaredMetricTopK(word1Freq: Array[Double], word2Freq: Array[Double], parameters: List[Double] = List(15)): Double = {
val zipped = proportionalScalarAverageSubstraction(word1Freq).zip(proportionalScalarAverageSubstraction(word2Freq))
val zippedDif = zipped.map(x => math.abs(x._1 - x._2))
val zippedDifSquared = zippedDif.map(x => x * x)
zippedDifSquared.sum
}
/**
* Metric that takes into account the straightness of the ratio line (i.e element of the first list divided by elements of the second) between the two arrays of words
* @param word1Freq first temporal profile
* @param word2Freq second temporal profile
* @return difference of min element and max element of the "ratio line" (i.e. w1/w2 element)
*/
def naiveDivisionMetricTopK(word1Freq: (Array[Double]), word2Freq: (Array[Double]), parameters: List[Double] = List()): Double = {
val zipped = proportionalScalarAverageSubstraction(word1Freq).zip(proportionalScalarAverageSubstraction(word2Freq))
val min = findMinAndMax(Array(findMinAndMax(zipped.map(_._1))._1, findMinAndMax(zipped.map(_._2))._1))._1
val zippedWithoutZero = zipped.map(x => (x._1 + min + 1, x._2 + min + 1))
val divided = zippedWithoutZero.map(x => math.abs(x._1 / x._2))
val minMax = findMinAndMax(divided)
minMax._2 - minMax._1
}
/**
* Metric that takes into account the straightness of the ratio line (i.e element of the first list divided by elements of the second) between the two arrays of words
* @param word1Freq first temporal profile
* @param word2Freq second temporal profile
* @return difference of min element and max element of the "ratio line" (i.e. w1/w2 element)
*/
def naiveDivisionVarianceMetricTopK(word1Freq: (Array[Double]), word2Freq: (Array[Double]), parameters: List[Double] = List()): Double = {
val zipped = proportionalScalarAverageSubstraction(word1Freq).zip(proportionalScalarAverageSubstraction(word2Freq))
val min = findMinAndMax(Array(findMinAndMax(zipped.map(_._1))._1, findMinAndMax(zipped.map(_._2))._1))._1
val zippedWithoutZero = zipped.map(x => (x._1 + min + 1, x._2 + min + 1))
val divided = zippedWithoutZero.map(x => math.abs(x._1 / x._2))
variance(divided)
}
}
| SidneyBovet/smargn | SparkCommander/src/main/scala/techniques/NaiveComparisons.scala | Scala | gpl-2.0 | 9,758 |
object App {
class Foo { type A = Boo#B } // error: illegal cyclic reference: alias App.Boo#B of type A refers back to the type itself
class Boo { type B = Foo#A }
}
| som-snytt/dotty | tests/neg/i4371a.scala | Scala | apache-2.0 | 170 |
package com.sksamuel.elastic4s.search.queries
import com.sksamuel.elastic4s.testkit.SharedElasticSugar
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import org.scalatest.{Matchers, WordSpec}
class NestedQueryTest extends WordSpec with SharedElasticSugar with Matchers {
client.execute {
createIndex("nested").mappings(
mapping("places").fields(
keywordField("name"),
nestedField("states")
)
)
}
client.execute(
bulk(
indexInto("nested" / "places") fields(
"name" -> "usa",
"states" -> Seq(
Map(
"name" -> "Montana",
"capital" -> "Helena",
"entry" -> 1889
), Map(
"name" -> "South Dakota",
"capital" -> "Pierre",
"entry" -> 1889
)
)
),
indexInto("nested" / "places") fields(
"name" -> "fictional usa",
"states" -> Seq(
Map(
"name" -> "Old Jersey",
"capital" -> "Trenton",
"entry" -> 1889
), Map(
"name" -> "Montana",
"capital" -> "Helena",
"entry" -> 1567
)
)
)
).refresh(RefreshPolicy.IMMEDIATE)
).await
"nested query" should {
"match against nested objects" in {
client.execute {
search("nested" / "places") query {
nestedQuery("states").query {
boolQuery.must(
matchQuery("states.name", "Montana"),
matchQuery("states.entry", 1889)
)
}
}
}.await.totalHits shouldBe 1
}
}
}
| aroundus-inc/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/queries/NestedQueryTest.scala | Scala | apache-2.0 | 1,641 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.detailquery
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
class ColumnPropertyValidationTestCase extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("""drop table if exists employee""")
}
test("Validate ColumnProperties_ valid key") {
try {
sql("create table employee(empname String,empid String,city String,country String,gender String,salary Double) stored by 'org.apache.carbondata.format' tblproperties('columnproperties.gender.key'='value')")
assert(true)
sql("drop table employee")
} catch {
case e: Throwable =>assert(false)
}
}
test("Validate Dictionary include _ invalid key") {
try {
sql("create table employee(empname String,empid String,city String,country String,gender String,salary Double) stored by 'org.apache.carbondata.format' tblproperties('columnproperties.invalid.key'='value')")
assert(false)
sql("drop table employee")
} catch {
case e: Throwable =>assert(true)
}
}
override def afterAll() {
sql("drop table if exists employee")
}
}
| HuaweiBigData/carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/ColumnPropertyValidationTestCase.scala | Scala | apache-2.0 | 1,982 |
Subsets and Splits