code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package pl.project13.scala.akka.raft.config
import com.typesafe.config.Config
import java.util.concurrent.TimeUnit
import concurrent.duration._
import akka.actor.Extension
class RaftConfig (config: Config) extends Extension {
val raftConfig = config.getConfig("akka.raft")
val defaultAppendEntriesBatchSize = raftConfig.getInt("default-append-entries-batch-size")
val publishTestingEvents = raftConfig.getBoolean("publish-testing-events")
val electionTimeoutMin = raftConfig.getDuration("election-timeout.min", TimeUnit.MILLISECONDS).millis
val electionTimeoutMax = raftConfig.getDuration("election-timeout.max", TimeUnit.MILLISECONDS).millis
val heartbeatInterval = raftConfig.getDuration("heartbeat-interval", TimeUnit.MILLISECONDS).millis
val clusterAutoDiscoveryIdentifyTimeout = raftConfig.getDuration("cluster.auto-discovery.identify-timeout", TimeUnit.MILLISECONDS).millis
val clusterAutoDiscoveryRetryCount = raftConfig.getInt("cluster.auto-discovery.retry-count")
}
| ktoso/akka-raft | src/main/scala/pl/project13/scala/akka/raft/config/RaftConfig.scala | Scala | apache-2.0 | 999 |
package controllers
import javax.inject.Inject
import com.mohiva.play.silhouette.api.{ Environment, LogoutEvent, Silhouette }
import com.mohiva.play.silhouette.impl.authenticators.CookieAuthenticator
import com.mohiva.play.silhouette.impl.providers.SocialProviderRegistry
import forms._
import models.User
import models.daos.UserDAOImpl
import play.api.i18n.MessagesApi
import scala.concurrent.Future
import play.api.mvc._
import play.api.libs.json._
import scala.concurrent.ExecutionContext.Implicits.global
/**
* The basic application controller.
*
* @param messagesApi The Play messages API.
* @param env The Silhouette environment.
* @param socialProviderRegistry The social provider registry.
*/
class ApplicationController @Inject() (
val messagesApi: MessagesApi,
val env: Environment[User, CookieAuthenticator],
socialProviderRegistry: SocialProviderRegistry)
extends Silhouette[User, CookieAuthenticator] {
/**
* Handles the index action.
*
* @return The result to display.
*/
def index = SecuredAction.async { implicit request =>
Future.successful(Ok(views.html.home(request.identity)))
}
/**
* Handles the Sign In action.
*
* @return The result to display.
*/
def signIn = UserAwareAction.async { implicit request =>
request.identity match {
case Some(user) => Future.successful(Redirect(routes.ApplicationController.index()))
case None => Future.successful(Ok(views.html.signIn(SignInForm.form, socialProviderRegistry)))
}
}
/**
* Handles the Sign Up action.
*
* @return The result to display.
*/
def signUp = UserAwareAction.async { implicit request =>
request.identity match {
case Some(user) => Future.successful(Redirect(routes.ApplicationController.index()))
case None => Future.successful(Ok(views.html.signUp(SignUpForm.form)))
}
}
/**
* Handles the Sign Out action.
*
* @return The result to display.
*/
def signOut = SecuredAction.async { implicit request =>
val result = Redirect(routes.ApplicationController.index())
env.eventBus.publish(LogoutEvent(request.identity, request, request2Messages))
env.authenticatorService.discard(request.authenticator, result)
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// FORGOT PASSWORD
/**
* Starts the reset password mechanism if the user has forgot his password. It shows a form to insert his email address.
*/
/*
def forgotPassword = UserAwareAction.async { implicit request =>
Future.successful(request.identity match {
case Some(_) =>
Redirect(routes.ApplicationController.index)
case None =>
Ok(views.html.forgotPassword(ForgotPasswordForm.emailForm))
})
}
*/
/**
* Sends an email to the user with a link to reset the password
*/
/*
def handleForgotPassword = Action.async { implicit request =>
ForgotPasswordForm.emailForm.bindFromRequest.fold(
formWithErrors =>
Future.successful(BadRequest(views.html.forgotPassword(formWithErrors))),
email =>
env.identityService.retrieve(email).flatMap {
case Some(_) => {
val token = MailTokenUser(email, isSignUp = false)
env.tokenService.create(token).map { _ =>
Mailer.forgotPassword(email, link = routes.Auth.resetPassword(token.id).absoluteURL())
Ok(views.html.forgotPasswordSent(email))
}
}
case None =>
Future.successful(BadRequest(viewsAuth.forgotPassword(ForgotPasswordForm.emailForm.withError("email", Messages("auth.user.notexists")))))
}
)
}
*/
}
| hectorgool/fara | app/controllers/ApplicationController.scala | Scala | apache-2.0 | 3,730 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import io.fabric8.kubernetes.client.KubernetesClient
import org.jmock.lib.concurrent.DeterministicScheduler
import org.mockito.{ArgumentCaptor, Mock, MockitoAnnotations}
import org.mockito.Matchers.{eq => mockitoEq}
import org.mockito.Mockito.{never, verify, when}
import org.scalatest.BeforeAndAfter
import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.Fabric8Aliases._
import org.apache.spark.rpc.{RpcEndpoint, RpcEndpointRef, RpcEnv}
import org.apache.spark.scheduler.{ExecutorKilled, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RemoveExecutor
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils.TEST_SPARK_APP_ID
class KubernetesClusterSchedulerBackendSuite extends SparkFunSuite with BeforeAndAfter {
private val requestExecutorsService = new DeterministicScheduler()
private val sparkConf = new SparkConf(false)
.set("spark.executor.instances", "3")
@Mock
private var sc: SparkContext = _
@Mock
private var rpcEnv: RpcEnv = _
@Mock
private var driverEndpointRef: RpcEndpointRef = _
@Mock
private var kubernetesClient: KubernetesClient = _
@Mock
private var podOperations: PODS = _
@Mock
private var labeledPods: LABELED_PODS = _
@Mock
private var taskScheduler: TaskSchedulerImpl = _
@Mock
private var eventQueue: ExecutorPodsSnapshotsStore = _
@Mock
private var podAllocator: ExecutorPodsAllocator = _
@Mock
private var lifecycleEventHandler: ExecutorPodsLifecycleManager = _
@Mock
private var watchEvents: ExecutorPodsWatchSnapshotSource = _
@Mock
private var pollEvents: ExecutorPodsPollingSnapshotSource = _
private var driverEndpoint: ArgumentCaptor[RpcEndpoint] = _
private var schedulerBackendUnderTest: KubernetesClusterSchedulerBackend = _
before {
MockitoAnnotations.initMocks(this)
when(taskScheduler.sc).thenReturn(sc)
when(sc.conf).thenReturn(sparkConf)
driverEndpoint = ArgumentCaptor.forClass(classOf[RpcEndpoint])
when(rpcEnv.setupEndpoint(
mockitoEq(CoarseGrainedSchedulerBackend.ENDPOINT_NAME), driverEndpoint.capture()))
.thenReturn(driverEndpointRef)
when(kubernetesClient.pods()).thenReturn(podOperations)
schedulerBackendUnderTest = new KubernetesClusterSchedulerBackend(
taskScheduler,
rpcEnv,
kubernetesClient,
requestExecutorsService,
eventQueue,
podAllocator,
lifecycleEventHandler,
watchEvents,
pollEvents) {
override def applicationId(): String = TEST_SPARK_APP_ID
}
}
test("Start all components") {
schedulerBackendUnderTest.start()
verify(podAllocator).setTotalExpectedExecutors(3)
verify(podAllocator).start(TEST_SPARK_APP_ID)
verify(lifecycleEventHandler).start(schedulerBackendUnderTest)
verify(watchEvents).start(TEST_SPARK_APP_ID)
verify(pollEvents).start(TEST_SPARK_APP_ID)
}
test("Stop all components") {
when(podOperations.withLabel(SPARK_APP_ID_LABEL, TEST_SPARK_APP_ID)).thenReturn(labeledPods)
when(labeledPods.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)).thenReturn(labeledPods)
schedulerBackendUnderTest.stop()
verify(eventQueue).stop()
verify(watchEvents).stop()
verify(pollEvents).stop()
verify(labeledPods).delete()
verify(kubernetesClient).close()
}
test("Remove executor") {
schedulerBackendUnderTest.start()
schedulerBackendUnderTest.doRemoveExecutor(
"1", ExecutorKilled)
verify(driverEndpointRef).send(RemoveExecutor("1", ExecutorKilled))
}
test("Kill executors") {
schedulerBackendUnderTest.start()
when(podOperations.withLabel(SPARK_APP_ID_LABEL, TEST_SPARK_APP_ID)).thenReturn(labeledPods)
when(labeledPods.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE)).thenReturn(labeledPods)
when(labeledPods.withLabelIn(SPARK_EXECUTOR_ID_LABEL, "1", "2")).thenReturn(labeledPods)
schedulerBackendUnderTest.doKillExecutors(Seq("1", "2"))
verify(labeledPods, never()).delete()
requestExecutorsService.runNextPendingCommand()
verify(labeledPods).delete()
}
test("Request total executors") {
schedulerBackendUnderTest.start()
schedulerBackendUnderTest.doRequestTotalExecutors(5)
verify(podAllocator).setTotalExpectedExecutors(3)
verify(podAllocator, never()).setTotalExpectedExecutors(5)
requestExecutorsService.runNextPendingCommand()
verify(podAllocator).setTotalExpectedExecutors(5)
}
}
| zhouyejoe/spark | resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackendSuite.scala | Scala | apache-2.0 | 5,472 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.datacompaction
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
class MajorCompactionWithMeasureSortColumns extends QueryTest with BeforeAndAfterAll {
val csvFilePath = s"$resourcesPath/compaction/nodictionary_compaction.csv"
val backupDateFormat = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
override def beforeAll: Unit = {
sql("drop table if exists store")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
}
override def afterAll {
sql("drop table if exists store")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, backupDateFormat)
}
test("test major compaction with measure sort columns") {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_MAJOR_COMPACTION_SIZE, "1024")
val createStoreTableSql =
s"""
| CREATE TABLE IF NOT EXISTS store(
| code1 STRING,
| code2 STRING,
| country_code STRING,
| category_id INTEGER,
| product_id LONG,
| date DATE,
| count1 LONG,
| count2 LONG,
| count3 LONG
| )
| STORED AS carbondata
| TBLPROPERTIES(
| 'SORT_COLUMNS'='code1, code2, country_code, date, category_id, product_id',
| 'SORT_SCOPE'='LOCAL_SORT',
| 'CACHE_LEVEL'='BLOCKLET'
| )
""".stripMargin
sql(createStoreTableSql)
sql(
s"""
| LOAD DATA LOCAL INPATH '$csvFilePath'
| INTO TABLE store
| OPTIONS('HEADER'='true', 'COMPLEX_DELIMITER_LEVEL_1'='#')
""".stripMargin).show(false)
sql(
s"""
| LOAD DATA LOCAL INPATH '$csvFilePath'
| INTO TABLE store
| OPTIONS('HEADER'='true', 'COMPLEX_DELIMITER_LEVEL_1'='#')
""".stripMargin).show(false)
val csvRows = sqlContext.sparkSession.read.option("header", "true")
.csv(csvFilePath).orderBy("code1")
sql("ALTER TABLE store COMPACT 'MAJOR'")
val answer = sql("select * from store ").orderBy("code1")
assert(answer.except(csvRows).count() == 0)
sql("drop table store")
}
}
| jackylk/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionWithMeasureSortColumns.scala | Scala | apache-2.0 | 3,333 |
/*
* Copyright 2010 Michael Fortin <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.webapp.controller
/**
* Services that gets the authenticated entity form the database.
*
* @author Michael Fortin
*/
trait Authenticator[T<:Authenticated] {
def login(user:String, pass:String):Option[T]
} | m410/brzy | src/main/scala/org/brzy/webapp/controller/Authenticator.scala | Scala | apache-2.0 | 835 |
/*
* Scala implementation of Cuckoo hashing.
*
* Copyright (C) 2010, Alexander Iliev <[email protected]>
*
* All rights reserved.
*
* This code is released under a BSD license.
* Please see LICENSE.txt for the full license and disclaimers.
*
*/
package cuckoo
import org.scalacheck.Commands
import org.scalacheck.Prop
import org.scalacheck.Gen
import org.scalacheck.Gen._
import org.scalacheck.Arbitrary._
import scala.collection.immutable.HashMap
import scala.collection.immutable.Map
import scala.collection.mutable.{Map => MutableMap}
import java.lang.Long
/** ScalaCheck specification for a mutable Scala map. */
class MapSpecification (htable:MutableMap[Long,Int])
extends Commands with util.Slf4JLogger
{
// This is our state type that encodes the abstract state. The abstract state
// should model all the features we need from the real state, the system
// under test. We should leave out all details that aren't needed for
// specifying our pre- and postconditions. The state type must be called
// State and be immutable.
case class State(mappings : Map[Long,Int])
// initialState should reset the system under test to a well defined
// initial state, and return the abstract version of that state.
def initialState() = {
htable.clear
State(new HashMap[Long,Int])
}
// We define our commands as subtypes of the traits Command or SetCommand.
// Each command must have a run method and a method that returns the new
// abstract state, as it should look after the command has been run.
// A command can also define a precondition that states how the current
// abstract state must look if the command should be allowed to run.
// Finally, we can also define a postcondition which verifies that the
// system under test is in a correct state after the command exectution.
case class Update(key:Long, value:Int) extends Command {
debug(this.toString)
def run(s: State) = {
info("Doing " + this)
htable.update(key, value)
htable
}
def nextState(s: State) = State(s.mappings + ((key, value)))
postConditions += {
case (s0, s1, table:MutableMap[Long,Int]) =>
Prop.propBoolean(table.size == s1.mappings.size) :|
"Table size: expected %d actual %d".format(s1.mappings.size, table.size)
case _ => false
}
}
/** Insert a new entry. */
case class Insert(override val key:Long, override val value:Int)
extends Update(key,value)
{
preConditions += {
case (State(mappings)) => ! mappings.contains(key)
}
}
case class Get(key:Long) extends Command {
debug(this.toString)
def run(s: State) = { info("Doing " + this); htable.get(key) }
def nextState(s: State) = s
postConditions += {
case (s0, s1, r:Option[Int]) => r == s1.mappings.get(key)
case _ => false
}
}
/** Get on an absent key. */
case class GetAbsent(override val key:Long) extends Get(key) {
preConditions += {
case (State(mappings)) => ! mappings.contains(key)
}
}
case class Remove(key:Long) extends Command {
def run(s: State) = { info("Doing {}", this); htable -= key; htable }
def nextState(s: State) = State(s.mappings - key)
postConditions += {
case (s0, s1, table:MutableMap[Long,Int]) => ! table.contains(key)
case _ => false
}
postConditions += {
case (s0, s1, table:MutableMap[Long,Int]) => table.size == s1.mappings.size
case _ => false
}
}
// This is our command generator. Given an abstract state, the generator
// should return a command that is allowed to run in that state. Note that
// it is still neccessary to define preconditions on the commands if there
// are any. The generator is just giving a hint of which commands that are
// suitable for a given state, the preconditions will still be checked before
// a command runs. Sometimes you maybe want to adjust the distribution of
// your command generator according to the state, or do other calculations
// based on the state.
def genCommand(s: State): Gen[Command] =
Gen.frequency( (10, genCommand_get(s.mappings)),
(10, genCommand_put(s.mappings)),
(1, genCommand_remove(s.mappings)) )
def genCommand_get (mappings:Map[Long,Int]) : Gen[Get] =
frequency ( (19, genExistingKey(mappings).map(Get)),
(1, genKey.map(GetAbsent)) )
def genCommand_put (mappings:Map[Long,Int]) : Gen[Update] =
frequency ( (9, randInsert),
(1, randUpdate(mappings)) )
def genCommand_remove (mappings:Map[Long,Int]) : Gen[Remove] =
frequency ( (19, genExistingKey(mappings).map(Remove)),
(1, genKey.map(Remove)) )
def randInsert = for {
key <- genKey
value <- genValue
} yield (Insert(key,value))
// scalacheck doesn't want to work with the whole number range, so trimming it some.
def genKey = choose(Math.MIN_LONG/4, Math.MAX_LONG/2).map(Long.valueOf)
def genValue = choose(Math.MIN_INT/4, Math.MAX_INT/2)
/** An update of a random existing key */
def randUpdate (mappings:Map[Long,Int]) = for {
key <- genExistingKey(mappings)
value <- genValue
} yield (Update(key,value))
/** A random key from the given map */
def genExistingKey[T:ClassManifest] (mappings:Map[T,_]) : Gen[T] = {
val arr = mappings.keySet.toArray
oneOf(arr)
}
// a variation of Gen.oneOf
def oneOf[T](gs: Array[T]) : Gen[T] = if(gs.isEmpty) fail else for {
i <- choose(0,gs.length-1)
x <- gs(i)
} yield x
}
| ailiev/cuckooScala | src/test/scala/cuckoo/MapSpecification.scala | Scala | bsd-2-clause | 5,641 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.compiler
import scala.annotation.tailrec
import scala.scalajs.js
import org.junit.Test
import org.junit.Assert._
import org.scalajs.testsuite.utils.AssertThrows._
class RegressionTest {
import RegressionTest._
@Test def `Wrong_division_conversion_(7_/_2.0)_issue_18`(): Unit = {
val div = 7 / 2.0
assertEquals(3.5, div)
assertEquals("double", div.getClass.getName)
val mod = 7 % 2.0
assertEquals(1.0, mod)
assertEquals("double", mod.getClass.getName)
}
@Test def Abort_with_some_pattern_match_guards_issue_22(): Unit = {
object PatternMatchGuards {
def go(f: Int => Int): Int = f(1)
def main(): Unit = {
go {
case x if false => x
}
}
}
// Nothing to check
}
@Test def Bad_encoding_for_characters_spanning_2_UTF_16_chars_issue_23(): Unit = {
val str = "A∀\\uD835\\uDCAB"
var s: String = ""
for (c <- str) {
val code: Int = c
s = s + code + " "
}
assertEquals("65 8704 55349 56491 ", s)
}
@Test def String_concatenation_with_null_issue_26(): Unit = {
val x: Object = null
assertEquals("nullcheck", x + "check")
}
@Test def should_emit_static_calls_when_forwarding_to_another_constructor_issue_66(): Unit = {
new Bug66B("", "")
}
@Test def should_not_swallow_Unit_expressions_when_converting_to_js_Any_issue_83(): Unit = {
var effectHappened = false
def doEffect(): Unit = effectHappened = true
def f(): js.Any = doEffect()
f()
assertTrue(effectHappened)
}
@Test def should_correctly_call_subSequence_on_non_string_CharSequences_issue_55(): Unit = {
val arr: CharSequence = Array('a','b','c','d')
val ss = arr.subSequence(2,3)
assertEquals(1, ss.length())
assertEquals('c', ss.charAt(0))
}
@Test def should_correctly_concat_primitive_values_to_strings_issue_113(): Unit = {
assertEquals("4foo", 4 + "foo")
assertEquals("afoo", 'a' + "foo")
}
@Test def should_resolve_overloads_on_scala_Function_apply_when_converting_to_js_Function_issue_125(): Unit = {
class Fct extends Function1[Int, Any] {
def apply(n: Int): Int = n
}
val scalaFunction = new Fct
val jsFunction: js.Any = scalaFunction
val thisFunction: js.ThisFunction = scalaFunction
}
@Test def should_correctly_dispatch_calls_on_private_functions_issue_165(): Unit = {
class A {
private def x: Int = 1
def value: Int = x
}
class B extends A {
private def x: Int = 2
}
assertEquals(1, new B().value)
}
@Test def should_correctly_mangle_JavaScript_reserved_identifiers_issue_153(): Unit = {
// scalastyle:off class.name
// Class name
class break {
// class variable
var continue: Int = 1
// method name
def switch: Int = {
// local name
val default = 2
default
}
}
trait Foo {
// static member (through mixin)
def function: Int = 3
}
val x = new break with Foo
assertEquals(1, x.continue)
assertEquals(2, x.switch)
assertEquals(3, x.function)
// scalastyle:on class.name
}
@Test def should_correctly_mangle_identifiers_starting_with_a_digit_issue_153(): Unit = {
// scalastyle:off class.name
// Class name
class `0` {
// class variable
var `1`: Int = 1
// method name
def `2`: Int = {
// local name
val `22` = 2
`22`
}
}
trait Foo {
// static member (through mixin)
def `3`: Int = 3
}
val x = new `0` with Foo
assertEquals(1, x.`1`)
assertEquals(2, x.`2`)
assertEquals(3, x.`3`)
// scalastyle:on class.name
}
@Test def should_reserve_eval_and_arguments_issue_743(): Unit = {
val eval = 5
assertEquals(5, eval)
val arguments = "hello"
assertEquals("hello", arguments)
}
@Test def should_support_class_literals_for_existential_value_types_issue_218(): Unit = {
assertEquals("org.scalajs.testsuite.compiler.RegressionTest$Bug218Foo",
scala.reflect.classTag[Bug218Foo[_]].toString)
}
@Test def should_support_Buffer_issue_268(): Unit = {
val a = scala.collection.mutable.Buffer.empty[Int]
a.insert(0, 0)
a.remove(0)
for (i <- 0 to 10) {
a.insert(a.length / 2, i)
}
assertEquals("1, 3, 5, 7, 9, 10, 8, 6, 4, 2, 0", a.mkString(", "))
}
@Test def should_not_call_equals_when_comparing_with_a_literal_null_issue_362(): Unit = {
// scalastyle:off equals.hash.code
class A {
override def equals(x: Any): Boolean = !(this == null)
}
// scalastyle:on equals.hash.code
val x = new A
val y = new A
// If the null comparisons actually call equals, the following two will
// cause infinite recursion
assertEquals(y, x)
assertEquals(x, y)
}
@Test def should_unbox_null_to_the_zero_of_types_issue_674(): Unit = {
class Box[A] {
var value: A = _
}
def zero[A]: A = new Box[A].value
/* Note: the same shape of test for Unit does not work, but it seems to
* be a problem in scalac because it does not work on the JVM either.
*/
val bool = zero[Boolean]
assertTrue((bool: Any).isInstanceOf[Boolean])
assertEquals(false, bool) // scalastyle:ignore
val char = zero[Char]
assertTrue((char: Any).isInstanceOf[Char])
assertEquals('\\u0000', char)
val byte = zero[Byte]
assertTrue((byte: Any).isInstanceOf[Byte])
assertEquals(0.toByte, byte)
val short = zero[Short]
assertTrue((short: Any).isInstanceOf[Short])
assertEquals(0.toShort, short)
val int = zero[Int]
assertTrue((int: Any).isInstanceOf[Int])
assertEquals(0, int)
val long = zero[Long]
assertTrue((long: Any).isInstanceOf[Long])
assertEquals(0L, long)
val float = zero[Float]
assertTrue((float: Any).isInstanceOf[Float])
assertEquals(0.0f, float)
val double = zero[Double]
assertTrue((double: Any).isInstanceOf[Double])
assertEquals(0.0, double)
val ref = zero[AnyRef]
assertEquals(null, ref)
}
@Test def Param_defs_in_tailrec_methods_should_be_considered_mutable_issue_825(): Unit = {
@tailrec
def foo(x: Int, y: Int): Unit = {
if (x < y) foo(y, x)
else {
assertEquals(4, x)
assertEquals(2, y)
}
}
foo(2, 4)
}
@Test def null_synchronized_should_throw_issue_874(): Unit = {
assertThrows(classOf[NullPointerException], null.synchronized(5))
}
@Test def x_synchronized_should_preserve_side_effects_of_x(): Unit = {
var c = 0
def x: RegressionTest.this.type = { c += 1; this }
assertEquals(5, x.synchronized(5))
assertEquals(1, c)
}
@Test def IR_checker_should_allow_Apply_Select_on_NullType_and_NothingType_issue_1123(): Unit = {
def giveMeANull(): Null = null
assertThrows(classOf[Exception], (giveMeANull(): StringBuilder).append(5))
assertThrows(classOf[Exception], (giveMeANull(): scala.runtime.IntRef).elem)
def giveMeANothing(): Nothing = sys.error("boom")
assertThrows(classOf[Exception], (giveMeANothing(): StringBuilder).append(5))
assertThrows(classOf[Exception], (giveMeANothing(): scala.runtime.IntRef).elem)
}
@Test def should_not_put_bad_flags_on_caseaccessor_export_forwarders_issue_1191(): Unit = {
// This test used to choke patmat
@scala.scalajs.js.annotation.JSExportAll
case class T(one: Int, two: Int)
val T(a, b) = T(1, 2)
assertEquals(1, a)
assertEquals(2, b)
}
@Test def should_properly_order_ctor_statements_when_inlining_issue_1369(): Unit = {
trait Bar {
def x: Int
var y = x + 1
}
@inline
class A(var x: Int) extends Bar
val obj = new A(1)
assertEquals(1, obj.x)
assertEquals(2, obj.y)
}
@Test def should_not_restrict_mutability_of_fields_issue_1021(): Unit = {
class A {
/* This var is refered to in the lambda passed to `foreach`. Therefore
* it is altered in another compilation unit (even though it is
* private[this]).
* This test makes sure the compiler doesn't wrongly mark it as
* immutable because it is not changed in its compilation unit itself.
*/
private[this] var x: Int = 1
def get: Int = x
def foo(): Unit =
Seq(2).foreach(x = _)
}
val a = new A()
assertEquals(1, a.get)
a.foo()
assertEquals(2, a.get)
}
@Test def should_populate_desugar_environments_with_Closure_params_issue_1399(): Unit = {
/* To query whether a field is mutable, the JSDesugar needs to first
* unnest a statement block from an argument list, and then unnest the
* parameter under test.
* It will then test, if it is immutable, which will trigger an
* environment lookup.
*/
// We need a true class for @noinline to work
class Test {
@noinline
def concat(x: Any, y: Any): String = x.toString + y.toString
@noinline
def fct: Function1[Any, String] = { (v: Any) => // parameter under test
/* Pass `v` as a first parameter, a true block as a second parameter.
* Note that this only works after optimizations, because `v` is first
* asInstanceOfd to Object and hence not the original `v` is used in
* the call itself.
* The optimizer eliminates the useless asInstanceOf.
*/
concat(v, {
// This must be a true block
var x = 1
while (x < 5) x += 1
x
})
}
}
assertEquals("15", new Test().fct(1))
}
@Test def should_support_debugger_statements_through_the_whole_pipeline_issue_1402(): Unit = {
// A function that hopfully persuades the optimizer not to optimize
// we need a debugger statement that is unreachable, but not eliminated
@noinline
class A(var z: Int = 4) {
var x: Int = _
var y: Int = _
@noinline
def plus(x0: Int, y0: Int): Int = {
x = x0
y = y0
var res = 0
while (x > 0 || y > 0 || z > 0) {
if (x > 0) x -= 1
else if (y > 0) y -= 1
else z -= 1
res += 1
}
res
}
}
if (new A().plus(5, 10) < 3)
js.debugger()
}
@Test def should_not_cause_Closure_to_crash_with_Unexpected_variable_NaN_issue_1469(): Unit = {
/* Basically we want to make sure that a specialized bridge of Function1
* taking and returning Double is emitted (and not dce'ed) for this
* class F, which actually returns Unit.
* This, after optimizations, causes something like
* +(apply__V(x), (void 0))
* to be emitted (inlining the bridge returning Any into the bridge
* returning Double).
* This in turn causes Closure to constant fold +(void 0) into NaN,
* which used to trigger the
* Internal Compiler Error: Unexpected variable NaN
* Note that we *cannot* actually call that bridge on F, because we would
* run into undefined behavior! So we have another function that actually
* returns a Double, and we use to make sure that
* Function1.apply(Double)Double is reachable, which will make it
* reachable also for F.
*/
class F extends Function1[Any, Unit] {
def apply(x: Any): Unit =
assertEquals(5, x.asInstanceOf[js.Any])
}
// Make sure the specialized Function1.apply(Double)Double is reachable.
@noinline def makeFun(y: Double): Double => Double = {
val z = y + 1.5
((x: Double) => x * z): (Double => Double)
}
val someDoubleFun = makeFun(2.0)
assertEquals(147.0, someDoubleFun(42.0))
// Make sure F itself is reachable and not completely inlineable
@noinline def makeF: Any => Any = (() => new F)()
val f = makeF
f(5)
}
@Test def switch_match_with_2_guards_for_the_same_value_issue_1589(): Unit = {
@noinline def genB(): Int = 0xE1
val b = genB()
val x = b >> 4 match {
case 0xE if b == 0xE0 =>
4
case 0xE if b == 0xE1 =>
5
}
assertEquals(5, x)
}
@Test def switch_match_with_a_guard_and_a_result_type_of_BoxedUnit_issue_1955(): Unit = {
val bug = new Bug1955
bug.bug(2, true)
assertEquals(0, bug.result)
bug.bug(1, true)
assertEquals(579, bug.result)
assertThrows(classOf[MatchError], bug.bug(2, false))
}
@Test def null_asInstanceOf_Unit_should_succeed_issue_1691(): Unit = {
def getNull(): Any = null
val x = getNull().asInstanceOf[Unit]: Any
assertNull(x.asInstanceOf[js.Any])
}
@Test def lambda_parameter_with_a_dash_issue_1790(): Unit = {
val f = (`a-b`: Int) => `a-b` + 1
assertEquals(6, f(5))
}
@Test def nested_labeled_block_sort_circuit_returns_issue_2307(): Unit = {
class UnsafeCrud(i: Int) {
def unsafeUpdate(l: List[Any], i: Int, f: Any => Any): (List[Any], Any) = {
def loop(l: List[Any], i: Int, prefix: List[Any]): (List[Any], List[Any], Any) = {
l match {
case hd :: (tl: List[Any]) =>
if (i == 0) (prefix, f(hd) :: tl, hd)
else loop(tl, i - 1, hd :: prefix)
case _ =>
throw new Exception("...")
}
}
val loopR = loop(l, i, Nil)
val prefix = loopR._1
val v = loopR._3
(prefix, v)
}
def apply(l: List[Any], f: Any => Any): (List[Any], Any) =
unsafeUpdate(l, i, f)
}
val r = 10 :: "foo" :: 'x' :: 42 :: Nil
val result = new UnsafeCrud(0).apply(r, _ => "newStr")
assertEquals((Nil, 10), result)
}
}
object RegressionTest {
class Bug218Foo[T](val x: T) extends AnyVal
class Bug66A(s: String, e: Object) {
def this(e: Object) = this("", e)
def this(s: String) = this(s, "")
}
class Bug66B(s: String, e: Object) extends Bug66A(s)
class Bug1955 {
var result: Int = 0
def doSomething[A](a: Int, b: Int, r: A): A = {
result = a + b
r
}
def bug(x: Int, e: Boolean): Unit = {
x match {
case 1 => doSomething(123, 456, ())
case 2 if e =>
}
if (false) ()
}
}
}
| japgolly/scala-js | test-suite/js/src/test/scala/org/scalajs/testsuite/compiler/RegressionTest.scala | Scala | bsd-3-clause | 14,688 |
package slamdata.engine
import org.threeten.bp.{Instant, Duration}
sealed trait Data {
def dataType: Type
}
object Data {
case object Null extends Data {
def dataType = Type.Null
}
case class Str(value: String) extends Data {
def dataType = Type.Str
}
sealed trait Bool extends Data {
def dataType = Type.Bool
}
object Bool extends (Boolean => Bool) {
def apply(value: Boolean): Bool = if (value) True else False
def unapply(value: Bool): Option[Boolean] = value match {
case True => Some(true)
case False => Some(false)
}
}
case object True extends Bool
case object False extends Bool
sealed trait Number extends Data
object Number {
def unapply(value: Data): Option[BigDecimal] = value match {
case Int(value) => Some(BigDecimal(value))
case Dec(value) => Some(value)
case _ => None
}
}
case class Dec(value: BigDecimal) extends Number {
def dataType = Type.Dec
}
case class Int(value: BigInt) extends Number {
def dataType = Type.Int
}
case class Obj(value: Map[String, Data]) extends Data {
def dataType = (value.map {
case (name, data) => Type.NamedField(name, data.dataType)
}).foldLeft[Type](Type.Top)(_ & _)
}
case class Arr(value: List[Data]) extends Data {
def dataType = (value.zipWithIndex.map {
case (data, index) => Type.IndexedElem(index, data.dataType)
}).foldLeft[Type](Type.Top)(_ & _)
}
case class Set(value: List[Data]) extends Data {
def dataType = (value.headOption.map { head =>
value.tail.map(_.dataType).foldLeft(head.dataType)(Type.lub _)
}).getOrElse(Type.Bottom) // TODO: ???
}
case class DateTime(value: Instant) extends Data {
def dataType = Type.DateTime
}
case class Interval(value: Duration) extends Data {
def dataType = Type.Interval
}
case class Binary(value: Array[Byte]) extends Data {
def dataType = Type.Binary
}
}
| sellout/slamengine-old | src/main/scala/slamdata/engine/data.scala | Scala | agpl-3.0 | 1,954 |
package fr.atelechev.chess.fen2cb.style
import org.scalatest.FlatSpec
import org.scalatest.Matchers.be
import org.scalatest.Matchers.convertToAnyShouldWrapper
import java.nio.file.Paths
import fr.atelechev.util.ExpectedExceptionCatcher
class DiagramPropertiesTestSuite extends FlatSpec
with ExpectedExceptionCatcher
with StyleFolderAccessor {
"constructor()" should "fill the properties with default values" in {
val props = new DiagramProperties
props.get(DiagramProperty.RENDER_TYPE) should be ("raster");
props.get(DiagramProperty.OVERLAY_X) should be ("");
props.get(DiagramProperty.OVERLAY_Y) should be ("");
props.get(DiagramProperty.PADDING_TOP) should be ("0");
props.get(DiagramProperty.PADDING_LEFT) should be ("0");
}
"constructor(Path)" should "read the properties from the specified file" in {
val file = getFolderForStyle("custom_properties").resolve("diagram.properties")
val props = new DiagramProperties(file)
props.get(DiagramProperty.RENDER_TYPE) should be ("custom");
props.get(DiagramProperty.OVERLAY_X) should be ("300");
props.get(DiagramProperty.OVERLAY_Y) should be ("200");
props.get(DiagramProperty.PADDING_TOP) should be ("10");
props.get(DiagramProperty.PADDING_LEFT) should be ("100");
}
it should "throw IllegalArgumentException if the arg is null" in {
catchExpected(classOf[IllegalArgumentException], () => {
new DiagramProperties(null)
})
}
it should "throw IllegalStateException if the fiel of the does not exist" in {
catchExpected(classOf[IllegalStateException], () => {
new DiagramProperties(getFolderForStyle("custom_properties").resolve("inexistent"))
})
}
} | atelechev/fen2chessboard | components/core/src/test/scala/fr/atelechev/chess/fen2cb/style/DiagramPropertiesTestSuite.scala | Scala | mit | 1,727 |
/*
* Copyright 2016 Nikolay Smelik
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalabot.common.message.incoming
import scalabot.common.chat.Chat
/**
* Created by Nikolay.Smelik on 7/13/2016.
*/
trait IncomingMessage {
def sender: Chat
}
trait SourceMessage
| kerzok/ScalaBot | BotApi/src/main/scala/scalabot/common/message/incoming/IncomingMessage.scala | Scala | apache-2.0 | 795 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
// $example on$
import org.apache.spark.ml.feature.{HashingTF, IDF, Tokenizer}
// $example off$
import org.apache.spark.sql.SparkSession
object TfIdfExample {
def main(args: Array[String]) {
val spark = SparkSession
.builder
.appName("TfIdfExample")
.getOrCreate()
// $example on$
val sentenceData = spark.createDataFrame(Seq(
(0, "Hi I heard about Spark"),
(0, "I wish Java could use case classes"),
(1, "Logistic regression models are neat")
)).toDF("label", "sentence")
val tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words")
val wordsData = tokenizer.transform(sentenceData)
val hashingTF = new HashingTF()
.setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(20)
val featurizedData = hashingTF.transform(wordsData)
// alternatively, CountVectorizer can also be used to get term frequency vectors
val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
val idfModel = idf.fit(featurizedData)
val rescaledData = idfModel.transform(featurizedData)
rescaledData.select("features", "label").take(3).foreach(println)
// $example off$
spark.stop()
}
}
// scalastyle:on println
| mrchristine/spark-examples-dbc | src/main/scala/org/apache/spark/examples/ml/TfIdfExample.scala | Scala | apache-2.0 | 2,103 |
package sml.instructions
import sml.Machine
/**
* Multiply the values of 2 registers and store
* result in specified register
*/
case class MulInstruction(label: String, opcode: String, result: Int, op1: Int, op2: Int) extends MathInstruction {
/**
* @see Instruction#execute(m: Machine)
*/
override def execute(m: Machine): Unit =
m.regs(result) = m.regs(op1) * m.regs(op2)
/**
* @see Instruction#toString()
*/
override def toString: String =
super.toString + s" $op1 * $op2 to $result \\n"
}
object MulInstruction {
def apply(label: String, result: Int, op1: Int, op2: Int): MathInstruction =
new MulInstruction(label, "mul", result, op1, op2)
}
| BBK-PiJ-2015-67/sdp-portfolio | coursework/cw-one/src/main/scala/sml/instructions/MulInstruction.scala | Scala | unlicense | 696 |
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.recorder.ui.component
import java.awt.event.{ ActionListener, ActionEvent }
import java.nio.charset.Charset
import com.excilys.ebi.gatling.core.util.StringHelper.trimToOption
import com.excilys.ebi.gatling.recorder.config.Configuration
import com.excilys.ebi.gatling.recorder.config.Configuration.configuration
import com.excilys.ebi.gatling.recorder.controller.RecorderController
import com.excilys.ebi.gatling.recorder.ui.enumeration.FilterStrategy.FilterStrategy
import com.excilys.ebi.gatling.recorder.ui.frame.ConfigurationFrame
import grizzled.slf4j.Logging
import javax.swing.JTextField
class SaveConfigurationListener(controller: RecorderController, configurationFrame: ConfigurationFrame) extends ActionListener with Logging {
def actionPerformed(e: ActionEvent) {
// validate filters
configurationFrame.tblFilters.validateCells
// Parse local proxy port
configuration.port = configurationFrame.txtPort.getText.toInt
// Parse local ssl proxy port
configuration.sslPort = configurationFrame.txtSslPort.getText.toInt
configuration.proxy.host = trimToOption(configurationFrame.txtProxyHost.getText)
if (!configuration.proxy.host.isEmpty) {
// Parse outgoing proxy port
configuration.proxy.port = Some(configurationFrame.txtProxyPort.getText.toInt)
// Parse outgoing ssl proxy port
configuration.proxy.sslPort = Some(configurationFrame.txtProxySslPort.getText.toInt)
configuration.proxy.username = trimToOption(configurationFrame.txtProxyUsername.getText)
configuration.proxy.password = trimToOption(configurationFrame.txtProxyPassword.getText)
}
configuration.filterStrategy = configurationFrame.cbFilterStrategies.getSelectedItem.asInstanceOf[FilterStrategy]
// Set urls filters
configuration.patterns = (for (i <- 0 until configurationFrame.tblFilters.getRowCount) yield configurationFrame.tblFilters.getPattern(i)).toList
// Check if a directory was entered
configuration.outputFolder = configurationFrame.txtOutputFolder.getText.trim
configuration.saveConfiguration = configurationFrame.chkSavePref.isSelected
configuration.followRedirect = configurationFrame.chkFollowRedirect.isSelected
configuration.automaticReferer = configurationFrame.chkAutomaticReferer.isSelected
// set selected encoding
configuration.encoding = classOf[Charset].cast(configurationFrame.cbOutputEncoding.getSelectedItem).name
configuration.simulationPackage = trimToOption(configurationFrame.txtSimulationPackage.getText)
configuration.simulationClassName = configurationFrame.txtSimulationClassName.getText.trim
if (configuration.saveConfiguration)
Configuration.saveToDisk
debug(configuration)
controller.startRecording
}
}
| Tjoene/thesis | Case_Programs/gatling-1.4.0/gatling-recorder/src/main/scala/com/excilys/ebi/gatling/recorder/ui/component/SaveConfigurationListener.scala | Scala | gpl-2.0 | 3,382 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.la
import cc.factorie.util.DoubleSeq
//trait SingletonTensor extends SparseTensor with SparseDoubleSeq with ReadOnlyTensor {
//}
trait SingletonTensor extends SparseTensor with ReadOnlyTensor {
def singleIndex: Int
def singleValue: Double
val activeDomainSize = 1
def sizeHint(size: Int): Unit = { }
def _makeReadable(): Unit = { }
def _unsafeActiveDomainSize: Int = 1
def _indices: Array[Int] = Array(singleIndex)
}
trait SingletonIndexedTensor extends SparseIndexedTensor with SingletonTensor {
def _values: Array[Double] = Array(singleValue)
def copyInto(t: SparseIndexedTensor): Unit = t(singleIndex) = singleValue
//def activeDomain: IntSeq = new SingletonIntSeq(singleIndex) // Can't be here and in Tensor1
override def apply(i:Int) = if (i == singleIndex) singleValue else 0.0
override def foreachActiveElement(f:(Int,Double)=>Unit): Unit = f(singleIndex, singleValue)
override def activeElements: Iterator[(Int,Double)] = Iterator.single((singleIndex, singleValue))
override def forallActiveElements(f:(Int,Double)=>Boolean): Boolean = f(singleIndex, singleValue)
override def =+(a:Array[Double], offset:Int, f:Double): Unit = a(offset+singleIndex) += f * singleValue
override def sum: Double = singleValue
override def max: Double = if (singleValue > 0.0) singleValue else 0.0
override def min: Double = if (singleValue < 0.0) singleValue else 0.0
override def maxIndex: Int = if (singleValue >= 0.0) singleIndex else if (singleIndex != 0) 0 else 1
override def containsNaN: Boolean = false
//override def dot(v:DoubleSeq): Double = v(singleIndex) * singleValue
//override def copy: SingletonTensor = this // immutable, but careful in the future we might make a mutable version
override def dot(t:DoubleSeq): Double = t match {
case t:SingletonBinaryTensor => if (singleIndex == t.singleIndex) singleValue else 0.0
case t:SingletonTensor => if (singleIndex == t.singleIndex) singleValue * t.singleValue else 0.0
case t:DoubleSeq => t(singleIndex) * singleValue
}
}
| patverga/factorie | src/main/scala/cc/factorie/la/SingletonTensor.scala | Scala | apache-2.0 | 2,811 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.sbtheader
import sbt.{ AutoPlugin, Compile, Configuration, Def, Plugins, Setting, Test, inConfig }
import sbt.Keys.compile
/**
* Enable this plugin to automate header creation/update on compile. By default the `Compile` and
* `Test` configurations are considered; use
* [[AutomateHeaderPlugin.autoImport.automateHeaderSettings]] to add further ones.
*/
object AutomateHeaderPlugin extends AutoPlugin {
final object autoImport {
def automateHeaderSettings(configurations: Configuration*): Seq[Setting[_]] =
configurations.foldLeft(List.empty[Setting[_]]) {
_ ++ inConfig(_)(compile := compile.dependsOn(HeaderPlugin.autoImport.headerCreate).value)
}
}
override def requires: Plugins =
HeaderPlugin
override def projectSettings: Seq[Def.Setting[_]] =
autoImport.automateHeaderSettings(Compile, Test)
}
| sbt/sbt-header | src/main/scala/de/heikoseeberger/sbtheader/AutomateHeaderPlugin.scala | Scala | apache-2.0 | 1,480 |
import scala.reflect.runtime.universe._
object Test {
def foo[T](x: T)(implicit m: TypeTag[T]) {
foo(List(x))
}
foo(1)
foo("abc")
foo(List(1, 2, 3))
val x: List[Int] with Ordered[List[Int]] = null
foo(x)
foo[x.type](x)
abstract class C { type T = String; val x: T }
val c = new C { val x = "abc" }
foo(c.x)
abstract class D { type T; implicit val m: TypeTag[T]; val x: T }
val stringm = implicitly[TypeTag[String]]
val d: D = new D { type T = String; val m = stringm; val x = "x" }
import d.m
foo(d.x)
} | loskutov/intellij-scala | testdata/scalacTests/pos/manifest1-new.scala | Scala | apache-2.0 | 540 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.http
import com.typesafe.config.ConfigFactory
import play.api.http.{ FlashConfiguration, SecretConfiguration }
import play.api.libs.crypto.CookieSignerProvider
import play.api.{ BuiltInComponentsFromContext, Configuration, NoHttpFiltersComponents }
import play.api.test._
import play.api.mvc._
import play.api.mvc.Results._
import play.api.libs.ws.{ DefaultWSCookie, WSClient, WSCookie, WSResponse }
import play.api.routing.Router
import play.core.server.Server
import play.it._
class NettyFlashCookieSpec extends FlashCookieSpec with NettyIntegrationSpecification
class AkkaHttpFlashCookieSpec extends FlashCookieSpec with AkkaHttpIntegrationSpecification
trait FlashCookieSpec extends PlaySpecification with ServerIntegrationSpecification with WsTestClient {
sequential
def withClientAndServer[T](additionalConfiguration: Map[String, String] = Map.empty)(block: WSClient => T) = {
Server.withApplicationFromContext() { context =>
new BuiltInComponentsFromContext(context) with NoHttpFiltersComponents {
import play.api.routing.sird.{ GET => SirdGet, _ }
import scala.collection.JavaConverters._
override def configuration: Configuration = super.configuration ++ new Configuration(ConfigFactory.parseMap(additionalConfiguration.asJava))
override def router: Router = Router.from {
case SirdGet(p"/flash") => defaultActionBuilder {
Redirect("/landing").flashing(
"success" -> "found"
)
}
case SirdGet(p"/set-cookie") => defaultActionBuilder {
Ok.withCookies(Cookie("some-cookie", "some-value"))
}
case SirdGet(p"/landing") => defaultActionBuilder {
Ok("ok")
}
}
}.application
} { implicit port =>
withClient(block)
}
}
lazy val flashCookieBaker: FlashCookieBaker = new DefaultFlashCookieBaker()
def readFlashCookie(response: WSResponse): Option[WSCookie] =
response.cookie(flashCookieBaker.COOKIE_NAME)
"the flash cookie" should {
"can be set for one request" in withClientAndServer() { ws =>
val response = await(ws.url("/flash").withFollowRedirects(follow = false).get())
response.status must equalTo(SEE_OTHER)
val flashCookie = readFlashCookie(response)
flashCookie must beSome.like {
case cookie =>
cookie.maxAge must beNone
}
}
"be removed after a redirect" in withClientAndServer() { ws =>
val response = await(ws.url("/flash").get())
response.status must equalTo(OK)
val flashCookie = readFlashCookie(response)
flashCookie must beSome.like {
case cookie =>
cookie.value must ===("")
cookie.maxAge must beSome(0L)
}
}
"allow the setting of additional cookies when cleaned up" in withClientAndServer() { ws =>
val response = await(ws.url("/flash").withFollowRedirects(false).get())
val Some(flashCookie) = readFlashCookie(response)
val response2 = await(ws.url("/set-cookie")
.addCookies(DefaultWSCookie(flashCookie.name, flashCookie.value))
.get())
readFlashCookie(response2) must beSome.like {
case cookie => cookie.value must ===("")
}
response2.cookie("some-cookie") must beSome.like {
case cookie =>
cookie.value must ===("some-value")
}
}
"honor the configuration for play.http.flash.sameSite" in {
"configured to null" in withClientAndServer(Map("play.http.flash.sameSite" -> null)) { ws =>
val response = await(ws.url("/flash").withFollowRedirects(follow = false).get())
response.status must equalTo(SEE_OTHER)
response.header(SET_COOKIE) must beSome.which(!_.contains("SameSite"))
}
"configured to lax" in withClientAndServer(Map("play.http.flash.sameSite" -> "lax")) { ws =>
val response = await(ws.url("/flash").withFollowRedirects(follow = false).get())
response.status must equalTo(SEE_OTHER)
response.header(SET_COOKIE) must beSome.which(_.contains("SameSite=Lax"))
}
"configured to strict" in withClientAndServer(Map("play.http.flash.sameSite" -> "strict")) { ws =>
val response = await(ws.url("/flash").withFollowRedirects(follow = false).get())
response.status must equalTo(SEE_OTHER)
response.header(SET_COOKIE) must beSome.which(_.contains("SameSite=Strict"))
}
}
"honor configuration for flash.secure" in {
"configured to true" in Helpers.running(_.configure("play.http.flash.secure" -> true)) { _ =>
val secretConfig = SecretConfiguration()
val fcb: FlashCookieBaker = new DefaultFlashCookieBaker(
FlashConfiguration(secure = true),
secretConfig,
new CookieSignerProvider(secretConfig).get
)
fcb.encodeAsCookie(Flash()).secure must beTrue
}
"configured to false" in Helpers.running(_.configure("play.http.flash.secure" -> false)) { _ =>
val secretConfig = SecretConfiguration()
val fcb: FlashCookieBaker = new DefaultFlashCookieBaker(
FlashConfiguration(secure = false),
secretConfig,
new CookieSignerProvider(secretConfig).get
)
fcb.encodeAsCookie(Flash()).secure must beFalse
}
}
}
}
| Shruti9520/playframework | framework/src/play-integration-test/src/test/scala/play/it/http/FlashCookieSpec.scala | Scala | apache-2.0 | 5,424 |
/**
* (C) Copyright IBM Corp. 2015 - 2017
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.ibm.sparktc.sparkbench.workload.ml
import com.ibm.sparktc.sparkbench.testfixtures.SparkSessionProvider
import com.ibm.sparktc.sparkbench.workload.ConfigCreator
import org.scalatest.{FlatSpec, Matchers}
class LogisticRegressionWorkloadTest extends FlatSpec with Matchers {
private implicit val spark = SparkSessionProvider.spark
private val cfg = Map(
"name" -> "lr-bml",
"input" -> "cli/src/test/resources/lr-bml/lr-train.csv",
"testfile" -> "cli/src/test/resources/lr-bml/lr-test.csv"
)
private var lr: LogisticRegressionWorkload = _
private val input = s"${cfg("input")}"
private val testFile = s"${cfg("testfile")}"
"ConfigCreator" should "create lr-bml" in {
val workload = ConfigCreator.mapToConf(cfg)
workload shouldBe a [LogisticRegressionWorkload]
lr = workload.asInstanceOf[LogisticRegressionWorkload]
lr.input shouldBe cfg.get("input")
lr.testFile shouldBe cfg("testfile")
lr.cacheEnabled shouldBe true
lr.numPartitions shouldBe 32
}
"LogisticRegressionWorkload" should "load training file" in {
val dtrain = lr.load(input)
dtrain.count shouldBe 10
}
it should "load the test file" in {
val dtest = lr.load(testFile)
dtest.count shouldBe 100
}
"the ld method" should "split into 32 partitions by default" in {
val (_, ds) = lr.ld(testFile)
ds.rdd.getNumPartitions shouldBe 32
}
it should "partition accordingly" in {
val ncfg = cfg ++ Map("numpartitions" -> 48)
val workload = ConfigCreator.mapToConf(ncfg).asInstanceOf[LogisticRegressionWorkload]
val (_, ds) = workload.ld(testFile)
ds.rdd.getNumPartitions shouldBe 48
}
it should "cache by default" in {
val (_, ds) = lr.ld(input)
ds.storageLevel.useMemory shouldBe true
}
it should "disable caching" in {
val ncfg = cfg ++ Map("cacheenabled" -> false)
val workload = ConfigCreator.mapToConf(ncfg).asInstanceOf[LogisticRegressionWorkload]
val (_, ds) = workload.ld(input)
ds.storageLevel.useMemory shouldBe false
}
it should "enable caching" in {
val ncfg = cfg ++ Map("cacheenabled" -> true)
val workload = ConfigCreator.mapToConf(ncfg).asInstanceOf[LogisticRegressionWorkload]
val (_, ds) = workload.ld(input)
ds.storageLevel.useMemory shouldBe true
}
"doWorkload" should "do just that" in {
val (_, ds) = lr.ld(input)
val odf = lr.doWorkload(Some(ds), spark)
odf.count shouldBe 1
val r = odf.head
r.getAs[String]("name") shouldBe "lr-bml"
r.getAs[String]("input") shouldBe input
r.getAs[String]("test_file") shouldBe testFile
r.getAs[Long]("train_count") shouldBe 10L
r.getAs[Long]("test_count") shouldBe 100L
r.getAs[Double]("area_under_roc") shouldBe 0.615 +- 0.01
}
}
| SparkTC/spark-bench | cli/src/test/scala/com/ibm/sparktc/sparkbench/workload/ml/LogisticRegressionWorkloadTest.scala | Scala | apache-2.0 | 3,393 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.util
case class Table(name: String, header: Seq[String], rows: Seq[String], values: Seq[Seq[Any]]) {
def transpose = Table(name, rows, header, values.transpose)
/**
* Formats this table as CSV.
*/
def toCsv = {
val csv = new StringBuilder()
csv.append(name + "," + header.mkString(",") + "\\n")
for((label, row) <- rows zip values)
csv.append(label + "," + row.mkString(",") + "\\n")
csv.toString
}
/**
* Formats this table as textile.
*/
def toTextile = {
val sb = new StringBuilder()
sb.append("|_. Function and parameters |_. Name |_. Description |\\n")
for((label, row) <- rows zip values) {
sb.append("| " + label + " | " + row.mkString(" | ") + " |\\n")
}
sb.toString
}
/**
* Formats this table as latex.
*/
def toLatex = {
val sb = new StringBuilder()
sb.append("\\\\begin{table}\\n")
sb.append("\\\\begin{tabular}{|l|" + header.map(_ => "c").mkString("|") + "|}\\n")
sb.append("\\\\hline\\n")
sb.append(" & " + header.mkString(" & ") + "\\\\\\\\\\n")
sb.append("\\\\hline\\n")
for((label, row) <- rows zip values)
sb.append(label + " & " + row.mkString(" & ") + "\\\\\\\\\\n")
sb.append("\\\\hline\\n")
sb.append("\\\\end{tabular}\\n")
sb.append("\\\\caption{" + name + "}\\n")
sb.append("%\\\\label{}\\n")
sb.append("\\\\end{table}\\n")
sb.toString
}
} | fusepoolP3/p3-silk | silk-core/src/main/scala/de/fuberlin/wiwiss/silk/util/Table.scala | Scala | apache-2.0 | 1,972 |
package lectures
package reductions
import org.scalameter._
import common._
object ArrayNorm {
@volatile var dummy: Int = 0
@volatile var dummy2: Int = 0
val logE = math.log(math.E)
def power(x: Int, p: Double): Int = {
math.exp(p * math.log(x) / logE).toInt // TODO <-- make everything doubles
}
def sumSegment(xs: Array[Int], p: Double, from: Int, until: Int): Int = {
var i = from
var s = 0
while (i < until) {
s += power(xs(i), p)
i += 1
}
s
}
def normSum(xs: Array[Int], p: Double): Int =
power(sumSegment(xs, p, 0, xs.size), 1.0 / p)
def fjSumSegment(xs: Array[Int], p: Double, from: Int, until: Int, threshold: Int): Int = {
if (until - from < threshold) {
sumSegment(xs, p, from, until)
} else {
val mid = (from + until) / 2
val right = task {
fjSumSegment(xs, p, mid, until, threshold)
}
val leftSum = fjSumSegment(xs, p, from, mid, threshold)
val rightSum = right.join()
leftSum + rightSum
}
}
def fjNormSum(xs: Array[Int], p: Double, threshold: Int): Int =
power(fjSumSegment(xs, p, 0, xs.length, threshold), 1.0 / p)
val standardConfig = config(
Key.exec.minWarmupRuns -> 10,
Key.exec.maxWarmupRuns -> 10,
Key.exec.benchRuns -> 10,
Key.verbose -> true
) withWarmer(new Warmer.Default)
def main(args: Array[String]) {
val p = 1.5
val xs = (0 until 2000000).map(_ % 100).toArray
val seqtime = standardConfig measure {
dummy = normSum(xs, p)
}
println(s"sequential sum time: $seqtime ms")
val threshold = 10000
val fjtime = standardConfig measure {
dummy2 = fjNormSum(xs, p, threshold)
}
println(s"values computed are $dummy vs $dummy2")
println(s"fork/join time: $fjtime ms")
println(s"speedup: ${seqtime/fjtime}")
}
}
| twistedgut/scala_coursera | parprog-snippets/src/main/scala/lectures/reductions/ArrayNorm.scala | Scala | gpl-3.0 | 1,850 |
package pme.connect4.gui.d2
import pme.connect4.gui.ConnectFourConfig
object ConnectFourConfig2D {
protected[d2] val paneSize = (800, 880)
protected[d2] val gameSize= (784.0, 842.0)
protected[d2] val chipRadius = 40
protected[d2] val fieldWidth = ConnectFourConfig2D().fieldWidth
def apply() = new ConnectFourConfig2D
}
class ConnectFourConfig2D extends ConnectFourConfig {
protected def chipRadius = ConnectFourConfig2D.chipRadius
}
| pme123/scala-connect4 | src/main/scala/pme/connect4/gui/d2/ConnectFourConfig2D.scala | Scala | mit | 452 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.{HashMap, HashSet}
import scala.concurrent.Future
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.{ExecutorAllocationClient, SparkEnv, SparkException, TaskState}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.executor.ExecutorLogUrlHandler
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Network._
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.rpc._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.ENDPOINT_NAME
import org.apache.spark.util.{RpcUtils, SerializableBuffer, ThreadUtils, Utils}
/**
* A scheduler backend that waits for coarse-grained executors to connect.
* This backend holds onto each executor for the duration of the Spark job rather than relinquishing
* executors whenever a task is done and asking the scheduler to launch a new executor for
* each new task. Executors may be launched in a variety of ways, such as Mesos tasks for the
* coarse-grained Mesos mode or standalone processes for Spark's standalone deploy mode
* (spark.deploy.*).
*/
private[spark]
class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: RpcEnv)
extends ExecutorAllocationClient with SchedulerBackend with Logging {
// Use an atomic variable to track total number of cores in the cluster for simplicity and speed
protected val totalCoreCount = new AtomicInteger(0)
// Total number of executors that are currently registered
protected val totalRegisteredExecutors = new AtomicInteger(0)
protected val conf = scheduler.sc.conf
private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
private val defaultAskTimeout = RpcUtils.askRpcTimeout(conf)
// Submit tasks only after (registered resources / total expected resources)
// is equal to at least this value, that is double between 0 and 1.
private val _minRegisteredRatio =
math.min(1, conf.get(SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO).getOrElse(0.0))
// Submit tasks after maxRegisteredWaitingTime milliseconds
// if minRegisteredRatio has not yet been reached
private val maxRegisteredWaitingTimeNs = TimeUnit.MILLISECONDS.toNanos(
conf.get(SCHEDULER_MAX_REGISTERED_RESOURCE_WAITING_TIME))
private val createTimeNs = System.nanoTime()
// Accessing `executorDataMap` in the inherited methods from ThreadSafeRpcEndpoint doesn't need
// any protection. But accessing `executorDataMap` out of the inherited methods must be
// protected by `CoarseGrainedSchedulerBackend.this`. Besides, `executorDataMap` should only
// be modified in the inherited methods from ThreadSafeRpcEndpoint with protection by
// `CoarseGrainedSchedulerBackend.this`.
private val executorDataMap = new HashMap[String, ExecutorData]
// Number of executors for each ResourceProfile requested by the cluster
// manager, [[ExecutorAllocationManager]]
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private val requestedTotalExecutorsPerResourceProfile = new HashMap[ResourceProfile, Int]
private val listenerBus = scheduler.sc.listenerBus
// Executors we have requested the cluster manager to kill that have not died yet; maps
// the executor ID to whether it was explicitly killed by the driver (and thus shouldn't
// be considered an app-related failure). Visible for testing only.
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private[scheduler] val executorsPendingToRemove = new HashMap[String, Boolean]
// Executors that have been lost, but for which we don't yet know the real exit reason.
private val executorsPendingLossReason = new HashSet[String]
// Executors which are being decommissioned. Maps from executorId to workerHost.
protected val executorsPendingDecommission = new HashMap[String, Option[String]]
// A map of ResourceProfile id to map of hostname with its possible task number running on it
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var rpHostToLocalTaskCount: Map[Int, Map[String, Int]] = Map.empty
// The number of pending tasks per ResourceProfile id which is locality required
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var numLocalityAwareTasksPerResourceProfileId = Map.empty[Int, Int]
// The num of current max ExecutorId used to re-register appMaster
@volatile protected var currentExecutorIdCounter = 0
// Current set of delegation tokens to send to executors.
private val delegationTokens = new AtomicReference[Array[Byte]]()
// The token manager used to create security tokens.
private var delegationTokenManager: Option[HadoopDelegationTokenManager] = None
private val reviveThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-revive-thread")
class DriverEndpoint extends IsolatedRpcEndpoint with Logging {
override val rpcEnv: RpcEnv = CoarseGrainedSchedulerBackend.this.rpcEnv
protected val addressToExecutorId = new HashMap[RpcAddress, String]
// Spark configuration sent to executors. This is a lazy val so that subclasses of the
// scheduler can modify the SparkConf object before this view is created.
private lazy val sparkProperties = scheduler.sc.conf.getAll
.filter { case (k, _) => k.startsWith("spark.") }
.toSeq
private val logUrlHandler: ExecutorLogUrlHandler = new ExecutorLogUrlHandler(
conf.get(UI.CUSTOM_EXECUTOR_LOG_URL))
override def onStart(): Unit = {
// Periodically revive offers to allow delay scheduling to work
val reviveIntervalMs = conf.get(SCHEDULER_REVIVE_INTERVAL).getOrElse(1000L)
reviveThread.scheduleAtFixedRate(() => Utils.tryLogNonFatalError {
Option(self).foreach(_.send(ReviveOffers))
}, 0, reviveIntervalMs, TimeUnit.MILLISECONDS)
}
override def receive: PartialFunction[Any, Unit] = {
case StatusUpdate(executorId, taskId, state, data, resources) =>
scheduler.statusUpdate(taskId, state, data.value)
if (TaskState.isFinished(state)) {
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
val rpId = executorInfo.resourceProfileId
val prof = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
val taskCpus = ResourceProfile.getTaskCpusOrDefaultForProfile(prof, conf)
executorInfo.freeCores += taskCpus
resources.foreach { case (k, v) =>
executorInfo.resourcesInfo.get(k).foreach { r =>
r.release(v.addresses)
}
}
makeOffers(executorId)
case None =>
// Ignoring the update since we don't know about the executor.
logWarning(s"Ignored task status update ($taskId state $state) " +
s"from unknown executor with ID $executorId")
}
}
case ReviveOffers =>
makeOffers()
case KillTask(taskId, executorId, interruptThread, reason) =>
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.executorEndpoint.send(
KillTask(taskId, executorId, interruptThread, reason))
case None =>
// Ignoring the task kill since the executor is not registered.
logWarning(s"Attempted to kill task $taskId for unknown executor $executorId.")
}
case KillExecutorsOnHost(host) =>
scheduler.getExecutorsAliveOnHost(host).foreach { exec =>
killExecutors(exec.toSeq, adjustTargetNumExecutors = false, countFailures = false,
force = true)
}
case UpdateDelegationTokens(newDelegationTokens) =>
updateDelegationTokens(newDelegationTokens)
case RemoveExecutor(executorId, reason) =>
// We will remove the executor's state and cannot restore it. However, the connection
// between the driver and the executor may be still alive so that the executor won't exit
// automatically, so try to tell the executor to stop itself. See SPARK-13519.
executorDataMap.get(executorId).foreach(_.executorEndpoint.send(StopExecutor))
removeExecutor(executorId, reason)
case RemoveWorker(workerId, host, message) =>
removeWorker(workerId, host, message)
case LaunchedExecutor(executorId) =>
executorDataMap.get(executorId).foreach { data =>
data.freeCores = data.totalCores
}
makeOffers(executorId)
case e =>
logError(s"Received unexpected message. ${e}")
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RegisterExecutor(executorId, executorRef, hostname, cores, logUrls,
attributes, resources, resourceProfileId) =>
if (executorDataMap.contains(executorId)) {
context.sendFailure(new IllegalStateException(s"Duplicate executor ID: $executorId"))
} else if (scheduler.excludedNodes.contains(hostname) ||
isExecutorExcluded(executorId, hostname)) {
// If the cluster manager gives us an executor on an excluded node (because it
// already started allocating those resources before we informed it of our exclusion,
// or if it ignored our exclusion), then we reject that executor immediately.
logInfo(s"Rejecting $executorId as it has been excluded.")
context.sendFailure(
new IllegalStateException(s"Executor is excluded due to failures: $executorId"))
} else {
// If the executor's rpc env is not listening for incoming connections, `hostPort`
// will be null, and the client connection should be used to contact the executor.
val executorAddress = if (executorRef.address != null) {
executorRef.address
} else {
context.senderAddress
}
logInfo(s"Registered executor $executorRef ($executorAddress) with ID $executorId, " +
s" ResourceProfileId $resourceProfileId")
addressToExecutorId(executorAddress) = executorId
totalCoreCount.addAndGet(cores)
totalRegisteredExecutors.addAndGet(1)
val resourcesInfo = resources.map { case (rName, info) =>
// tell the executor it can schedule resources up to numSlotsPerAddress times,
// as configured by the user, or set to 1 as that is the default (1 task/resource)
val numParts = scheduler.sc.resourceProfileManager
.resourceProfileFromId(resourceProfileId).getNumSlotsPerAddress(rName, conf)
(info.name, new ExecutorResourceInfo(info.name, info.addresses, numParts))
}
val data = new ExecutorData(executorRef, executorAddress, hostname,
0, cores, logUrlHandler.applyPattern(logUrls, attributes), attributes,
resourcesInfo, resourceProfileId, registrationTs = System.currentTimeMillis())
// This must be synchronized because variables mutated
// in this block are read when requesting executors
CoarseGrainedSchedulerBackend.this.synchronized {
executorDataMap.put(executorId, data)
if (currentExecutorIdCounter < executorId.toInt) {
currentExecutorIdCounter = executorId.toInt
}
}
listenerBus.post(
SparkListenerExecutorAdded(System.currentTimeMillis(), executorId, data))
// Note: some tests expect the reply to come after we put the executor in the map
context.reply(true)
}
case StopDriver =>
context.reply(true)
stop()
case StopExecutors =>
logInfo("Asking each executor to shut down")
for ((_, executorData) <- executorDataMap) {
executorData.executorEndpoint.send(StopExecutor)
}
context.reply(true)
case RemoveWorker(workerId, host, message) =>
removeWorker(workerId, host, message)
context.reply(true)
// Do not change this code without running the K8s integration suites
case ExecutorDecommissioning(executorId) =>
logWarning(s"Received executor $executorId decommissioned message")
context.reply(
decommissionExecutor(
executorId,
ExecutorDecommissionInfo(s"Executor $executorId is decommissioned."),
adjustTargetNumExecutors = false,
triggeredByExecutor = true))
case RetrieveSparkAppConfig(resourceProfileId) =>
val rp = scheduler.sc.resourceProfileManager.resourceProfileFromId(resourceProfileId)
val reply = SparkAppConfig(
sparkProperties,
SparkEnv.get.securityManager.getIOEncryptionKey(),
Option(delegationTokens.get()),
rp)
context.reply(reply)
case IsExecutorAlive(executorId) => context.reply(isExecutorActive(executorId))
case e =>
logError(s"Received unexpected ask ${e}")
}
// Make fake resource offers on all executors
private def makeOffers(): Unit = {
// Make sure no executor is killed while some task is launching on it
val taskDescs = withLock {
// Filter out executors under killing
val activeExecutors = executorDataMap.filterKeys(isExecutorActive)
val workOffers = activeExecutors.map {
case (id, executorData) =>
new WorkerOffer(id, executorData.executorHost, executorData.freeCores,
Some(executorData.executorAddress.hostPort),
executorData.resourcesInfo.map { case (rName, rInfo) =>
(rName, rInfo.availableAddrs.toBuffer)
}, executorData.resourceProfileId)
}.toIndexedSeq
scheduler.resourceOffers(workOffers, true)
}
if (taskDescs.nonEmpty) {
launchTasks(taskDescs)
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
addressToExecutorId
.get(remoteAddress)
.foreach(removeExecutor(_,
ExecutorProcessLost("Remote RPC client disassociated. Likely due to " +
"containers exceeding thresholds, or network issues. Check driver logs for WARN " +
"messages.")))
}
// Make fake resource offers on just one executor
private def makeOffers(executorId: String): Unit = {
// Make sure no executor is killed while some task is launching on it
val taskDescs = withLock {
// Filter out executors under killing
if (isExecutorActive(executorId)) {
val executorData = executorDataMap(executorId)
val workOffers = IndexedSeq(
new WorkerOffer(executorId, executorData.executorHost, executorData.freeCores,
Some(executorData.executorAddress.hostPort),
executorData.resourcesInfo.map { case (rName, rInfo) =>
(rName, rInfo.availableAddrs.toBuffer)
}, executorData.resourceProfileId))
scheduler.resourceOffers(workOffers, false)
} else {
Seq.empty
}
}
if (taskDescs.nonEmpty) {
launchTasks(taskDescs)
}
}
// Launch tasks returned by a set of resource offers
private def launchTasks(tasks: Seq[Seq[TaskDescription]]): Unit = {
for (task <- tasks.flatten) {
val serializedTask = TaskDescription.encode(task)
if (serializedTask.limit() >= maxRpcMessageSize) {
Option(scheduler.taskIdToTaskSetManager.get(task.taskId)).foreach { taskSetMgr =>
try {
var msg = "Serialized task %s:%d was %d bytes, which exceeds max allowed: " +
s"${RPC_MESSAGE_MAX_SIZE.key} (%d bytes). Consider increasing " +
s"${RPC_MESSAGE_MAX_SIZE.key} or using broadcast variables for large values."
msg = msg.format(task.taskId, task.index, serializedTask.limit(), maxRpcMessageSize)
taskSetMgr.abort(msg)
} catch {
case e: Exception => logError("Exception in error callback", e)
}
}
}
else {
val executorData = executorDataMap(task.executorId)
// Do resources allocation here. The allocated resources will get released after the task
// finishes.
val rpId = executorData.resourceProfileId
val prof = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
val taskCpus = ResourceProfile.getTaskCpusOrDefaultForProfile(prof, conf)
executorData.freeCores -= taskCpus
task.resources.foreach { case (rName, rInfo) =>
assert(executorData.resourcesInfo.contains(rName))
executorData.resourcesInfo(rName).acquire(rInfo.addresses)
}
logDebug(s"Launching task ${task.taskId} on executor id: ${task.executorId} hostname: " +
s"${executorData.executorHost}.")
executorData.executorEndpoint.send(LaunchTask(new SerializableBuffer(serializedTask)))
}
}
}
// Remove a disconnected executor from the cluster
private def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
logDebug(s"Asked to remove executor $executorId with reason $reason")
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
// This must be synchronized because variables mutated
// in this block are read when requesting executors
val lossReason = CoarseGrainedSchedulerBackend.this.synchronized {
addressToExecutorId -= executorInfo.executorAddress
executorDataMap -= executorId
executorsPendingLossReason -= executorId
val killedByDriver = executorsPendingToRemove.remove(executorId).getOrElse(false)
val workerHostOpt = executorsPendingDecommission.remove(executorId)
if (killedByDriver) {
ExecutorKilled
} else if (workerHostOpt.isDefined) {
ExecutorDecommission(workerHostOpt.get)
} else {
reason
}
}
totalCoreCount.addAndGet(-executorInfo.totalCores)
totalRegisteredExecutors.addAndGet(-1)
scheduler.executorLost(executorId, lossReason)
listenerBus.post(
SparkListenerExecutorRemoved(System.currentTimeMillis(), executorId, reason.toString))
case None =>
// SPARK-15262: If an executor is still alive even after the scheduler has removed
// its metadata, we may receive a heartbeat from that executor and tell its block
// manager to reregister itself. If that happens, the block manager master will know
// about the executor, but the scheduler will not. Therefore, we should remove the
// executor from the block manager when we hit this case.
scheduler.sc.env.blockManager.master.removeExecutorAsync(executorId)
logInfo(s"Asked to remove non-existent executor $executorId")
}
}
// Remove a lost worker from the cluster
private def removeWorker(workerId: String, host: String, message: String): Unit = {
logDebug(s"Asked to remove worker $workerId with reason $message")
scheduler.workerRemoved(workerId, host, message)
}
/**
* Stop making resource offers for the given executor. The executor is marked as lost with
* the loss reason still pending.
*
* @return Whether executor should be disabled
*/
protected def disableExecutor(executorId: String): Boolean = {
val shouldDisable = CoarseGrainedSchedulerBackend.this.synchronized {
if (isExecutorActive(executorId)) {
executorsPendingLossReason += executorId
true
} else {
// Returns true for explicitly killed executors, we also need to get pending loss reasons;
// For others return false.
executorsPendingToRemove.contains(executorId)
}
}
if (shouldDisable) {
logInfo(s"Disabling executor $executorId.")
scheduler.executorLost(executorId, LossReasonPending)
}
shouldDisable
}
}
val driverEndpoint = rpcEnv.setupEndpoint(ENDPOINT_NAME, createDriverEndpoint())
protected def minRegisteredRatio: Double = _minRegisteredRatio
/**
* Request that the cluster manager decommission the specified executors.
*
* @param executorsAndDecomInfo Identifiers of executors & decommission info.
* @param adjustTargetNumExecutors whether the target number of executors will be adjusted down
* after these executors have been decommissioned.
* @param triggeredByExecutor whether the decommission is triggered at executor.
* @return the ids of the executors acknowledged by the cluster manager to be removed.
*/
override def decommissionExecutors(
executorsAndDecomInfo: Array[(String, ExecutorDecommissionInfo)],
adjustTargetNumExecutors: Boolean,
triggeredByExecutor: Boolean): Seq[String] = withLock {
// Do not change this code without running the K8s integration suites
val executorsToDecommission = executorsAndDecomInfo.flatMap { case (executorId, decomInfo) =>
// Only bother decommissioning executors which are alive.
if (isExecutorActive(executorId)) {
scheduler.executorDecommission(executorId, decomInfo)
executorsPendingDecommission(executorId) = decomInfo.workerHost
Some(executorId)
} else {
None
}
}
logInfo(s"Decommission executors: ${executorsToDecommission.mkString(", ")}")
// If we don't want to replace the executors we are decommissioning
if (adjustTargetNumExecutors) {
adjustExecutors(executorsToDecommission)
}
// Mark those corresponding BlockManagers as decommissioned first before we sending
// decommission notification to executors. So, it's less likely to lead to the race
// condition where `getPeer` request from the decommissioned executor comes first
// before the BlockManagers are marked as decommissioned.
// Note that marking BlockManager as decommissioned doesn't need depend on
// `spark.storage.decommission.enabled`. Because it's meaningless to save more blocks
// for the BlockManager since the executor will be shutdown soon.
scheduler.sc.env.blockManager.master.decommissionBlockManagers(executorsToDecommission)
if (!triggeredByExecutor) {
executorsToDecommission.foreach { executorId =>
logInfo(s"Notify executor $executorId to decommissioning.")
executorDataMap(executorId).executorEndpoint.send(DecommissionExecutor)
}
}
executorsToDecommission
}
override def start(): Unit = {
if (UserGroupInformation.isSecurityEnabled()) {
delegationTokenManager = createTokenManager()
delegationTokenManager.foreach { dtm =>
val ugi = UserGroupInformation.getCurrentUser()
val tokens = if (dtm.renewalEnabled) {
dtm.start()
} else {
val creds = ugi.getCredentials()
dtm.obtainDelegationTokens(creds)
if (creds.numberOfTokens() > 0 || creds.numberOfSecretKeys() > 0) {
SparkHadoopUtil.get.serialize(creds)
} else {
null
}
}
if (tokens != null) {
updateDelegationTokens(tokens)
}
}
}
}
protected def createDriverEndpoint(): DriverEndpoint = new DriverEndpoint()
def stopExecutors(): Unit = {
try {
if (driverEndpoint != null) {
logInfo("Shutting down all executors")
driverEndpoint.askSync[Boolean](StopExecutors)
}
} catch {
case e: Exception =>
throw new SparkException("Error asking standalone scheduler to shut down executors", e)
}
}
override def stop(): Unit = {
reviveThread.shutdownNow()
stopExecutors()
delegationTokenManager.foreach(_.stop())
try {
if (driverEndpoint != null) {
driverEndpoint.askSync[Boolean](StopDriver)
}
} catch {
case e: Exception =>
throw new SparkException("Error stopping standalone scheduler's driver endpoint", e)
}
}
/**
* Reset the state of CoarseGrainedSchedulerBackend to the initial state. Currently it will only
* be called in the yarn-client mode when AM re-registers after a failure.
* Visible for testing only.
* */
protected[scheduler] def reset(): Unit = {
val executors: Set[String] = synchronized {
requestedTotalExecutorsPerResourceProfile.clear()
executorDataMap.keys.toSet
}
// Remove all the lingering executors that should be removed but not yet. The reason might be
// because (1) disconnected event is not yet received; (2) executors die silently.
executors.foreach { eid =>
removeExecutor(eid,
ExecutorProcessLost("Stale executor after cluster manager re-registered."))
}
}
override def reviveOffers(): Unit = Utils.tryLogNonFatalError {
driverEndpoint.send(ReviveOffers)
}
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String): Unit = {
driverEndpoint.send(KillTask(taskId, executorId, interruptThread, reason))
}
override def defaultParallelism(): Int = {
conf.getInt("spark.default.parallelism", math.max(totalCoreCount.get(), 2))
}
/**
* Called by subclasses when notified of a lost worker. It just fires the message and returns
* at once.
*/
protected def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
driverEndpoint.send(RemoveExecutor(executorId, reason))
}
protected def removeWorker(workerId: String, host: String, message: String): Unit = {
driverEndpoint.send(RemoveWorker(workerId, host, message))
}
def sufficientResourcesRegistered(): Boolean = true
override def isReady(): Boolean = {
if (sufficientResourcesRegistered) {
logInfo("SchedulerBackend is ready for scheduling beginning after " +
s"reached minRegisteredResourcesRatio: $minRegisteredRatio")
return true
}
if ((System.nanoTime() - createTimeNs) >= maxRegisteredWaitingTimeNs) {
logInfo("SchedulerBackend is ready for scheduling beginning after waiting " +
s"maxRegisteredResourcesWaitingTime: $maxRegisteredWaitingTimeNs(ns)")
return true
}
false
}
/**
* Return the number of executors currently registered with this backend.
*/
private def numExistingExecutors: Int = synchronized { executorDataMap.size }
override def getExecutorIds(): Seq[String] = synchronized {
executorDataMap.keySet.toSeq
}
def getExecutorsWithRegistrationTs(): Map[String, Long] = synchronized {
executorDataMap.mapValues(v => v.registrationTs).toMap
}
override def isExecutorActive(id: String): Boolean = synchronized {
executorDataMap.contains(id) &&
!executorsPendingToRemove.contains(id) &&
!executorsPendingLossReason.contains(id) &&
!executorsPendingDecommission.contains(id)
}
/**
* Get the max number of tasks that can be concurrent launched based on the ResourceProfile
* could be used, even if some of them are being used at the moment.
* Note that please don't cache the value returned by this method, because the number can change
* due to add/remove executors.
*
* @param rp ResourceProfile which to use to calculate max concurrent tasks.
* @return The max number of tasks that can be concurrent launched currently.
*/
override def maxNumConcurrentTasks(rp: ResourceProfile): Int = synchronized {
val (rpIds, cpus, resources) = {
executorDataMap
.filter { case (id, _) => isExecutorActive(id) }
.values.toArray.map { executor =>
(
executor.resourceProfileId,
executor.totalCores,
executor.resourcesInfo.map { case (name, rInfo) => (name, rInfo.totalAddressAmount) }
)
}.unzip3
}
TaskSchedulerImpl.calculateAvailableSlots(scheduler, conf, rp.id, rpIds, cpus, resources)
}
// this function is for testing only
def getExecutorAvailableResources(
executorId: String): Map[String, ExecutorResourceInfo] = synchronized {
executorDataMap.get(executorId).map(_.resourcesInfo).getOrElse(Map.empty)
}
// this function is for testing only
def getExecutorResourceProfileId(executorId: String): Int = synchronized {
val execDataOption = executorDataMap.get(executorId)
execDataOption.map(_.resourceProfileId).getOrElse(ResourceProfile.UNKNOWN_RESOURCE_PROFILE_ID)
}
/**
* Request an additional number of executors from the cluster manager. This is
* requesting against the default ResourceProfile, we will need an API change to
* allow against other profiles.
* @return whether the request is acknowledged.
*/
final override def requestExecutors(numAdditionalExecutors: Int): Boolean = {
if (numAdditionalExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of additional executor(s) " +
s"$numAdditionalExecutors from the cluster manager. Please specify a positive number!")
}
logInfo(s"Requesting $numAdditionalExecutors additional executor(s) from the cluster manager")
val response = synchronized {
val defaultProf = scheduler.sc.resourceProfileManager.defaultResourceProfile
val numExisting = requestedTotalExecutorsPerResourceProfile.getOrElse(defaultProf, 0)
requestedTotalExecutorsPerResourceProfile(defaultProf) = numExisting + numAdditionalExecutors
// Account for executors pending to be added or removed
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* @param resourceProfileIdToNumExecutors The total number of executors we'd like to have per
* ResourceProfile. The cluster manager shouldn't kill any
* running executor to reach this number, but, if all
* existing executors were to die, this is the number
* of executors we'd want to be allocated.
* @param numLocalityAwareTasksPerResourceProfileId The number of tasks in all active stages that
* have a locality preferences per
* ResourceProfile. This includes running,
* pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
final override def requestTotalExecutors(
resourceProfileIdToNumExecutors: Map[Int, Int],
numLocalityAwareTasksPerResourceProfileId: Map[Int, Int],
hostToLocalTaskCount: Map[Int, Map[String, Int]]
): Boolean = {
val totalExecs = resourceProfileIdToNumExecutors.values.sum
if (totalExecs < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of executor(s) " +
s"$totalExecs from the cluster manager. Please specify a positive number!")
}
val resourceProfileToNumExecutors = resourceProfileIdToNumExecutors.map { case (rpid, num) =>
(scheduler.sc.resourceProfileManager.resourceProfileFromId(rpid), num)
}
val response = synchronized {
this.requestedTotalExecutorsPerResourceProfile.clear()
this.requestedTotalExecutorsPerResourceProfile ++= resourceProfileToNumExecutors
this.numLocalityAwareTasksPerResourceProfileId = numLocalityAwareTasksPerResourceProfileId
this.rpHostToLocalTaskCount = hostToLocalTaskCount
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Request executors from the cluster manager by specifying the total number desired,
* including existing pending and running executors.
*
* The semantics here guarantee that we do not over-allocate executors for this application,
* since a later request overrides the value of any prior request. The alternative interface
* of requesting a delta of executors risks double counting new executors when there are
* insufficient resources to satisfy the first request. We make the assumption here that the
* cluster manager will eventually fulfill all requests when resources free up.
*
* @return a future whose evaluation indicates whether the request is acknowledged.
*/
protected def doRequestTotalExecutors(
resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Future[Boolean] =
Future.successful(false)
/**
* Adjust the number of executors being requested to no longer include the provided executors.
*/
private def adjustExecutors(executorIds: Seq[String]) = {
if (executorIds.nonEmpty) {
executorIds.foreach { exec =>
withLock {
val rpId = executorDataMap(exec).resourceProfileId
val rp = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
if (requestedTotalExecutorsPerResourceProfile.isEmpty) {
// Assume that we are killing an executor that was started by default and
// not through the request api
requestedTotalExecutorsPerResourceProfile(rp) = 0
} else {
val requestedTotalForRp = requestedTotalExecutorsPerResourceProfile(rp)
requestedTotalExecutorsPerResourceProfile(rp) = math.max(requestedTotalForRp - 1, 0)
}
}
}
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
} else {
Future.successful(true)
}
}
/**
* Request that the cluster manager kill the specified executors.
*
* @param executorIds identifiers of executors to kill
* @param adjustTargetNumExecutors whether the target number of executors be adjusted down
* after these executors have been killed
* @param countFailures if there are tasks running on the executors when they are killed, whether
* those failures be counted to task failure limits?
* @param force whether to force kill busy executors, default false
* @return the ids of the executors acknowledged by the cluster manager to be removed.
*/
final override def killExecutors(
executorIds: Seq[String],
adjustTargetNumExecutors: Boolean,
countFailures: Boolean,
force: Boolean): Seq[String] = {
logInfo(s"Requesting to kill executor(s) ${executorIds.mkString(", ")}")
val response = withLock {
val (knownExecutors, unknownExecutors) = executorIds.partition(executorDataMap.contains)
unknownExecutors.foreach { id =>
logWarning(s"Executor to kill $id does not exist!")
}
// If an executor is already pending to be removed, do not kill it again (SPARK-9795)
// If this executor is busy, do not kill it unless we are told to force kill it (SPARK-9552)
val executorsToKill = knownExecutors
.filter { id => !executorsPendingToRemove.contains(id) }
.filter { id => force || !scheduler.isExecutorBusy(id) }
executorsToKill.foreach { id => executorsPendingToRemove(id) = !countFailures }
logInfo(s"Actual list of executor(s) to be killed is ${executorsToKill.mkString(", ")}")
// If we do not wish to replace the executors we kill, sync the target number of executors
// with the cluster manager to avoid allocating new ones. When computing the new target,
// take into account executors that are pending to be added or removed.
val adjustTotalExecutors =
if (adjustTargetNumExecutors) {
adjustExecutors(executorsToKill)
} else {
Future.successful(true)
}
val killExecutors: Boolean => Future[Boolean] =
if (executorsToKill.nonEmpty) {
_ => doKillExecutors(executorsToKill)
} else {
_ => Future.successful(false)
}
val killResponse = adjustTotalExecutors.flatMap(killExecutors)(ThreadUtils.sameThread)
killResponse.flatMap(killSuccessful =>
Future.successful (if (killSuccessful) executorsToKill else Seq.empty[String])
)(ThreadUtils.sameThread)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Kill the given list of executors through the cluster manager.
* @return whether the kill request is acknowledged.
*/
protected def doKillExecutors(executorIds: Seq[String]): Future[Boolean] =
Future.successful(false)
/**
* Request that the cluster manager kill all executors on a given host.
* @return whether the kill request is acknowledged.
*/
final override def killExecutorsOnHost(host: String): Boolean = {
logInfo(s"Requesting to kill any and all executors on host ${host}")
// A potential race exists if a new executor attempts to register on a host
// that is on the exclude list and is no no longer valid. To avoid this race,
// all executor registration and killing happens in the event loop. This way, either
// an executor will fail to register, or will be killed when all executors on a host
// are killed.
// Kill all the executors on this host in an event loop to ensure serialization.
driverEndpoint.send(KillExecutorsOnHost(host))
true
}
/**
* Create the delegation token manager to be used for the application. This method is called
* once during the start of the scheduler backend (so after the object has already been
* fully constructed), only if security is enabled in the Hadoop configuration.
*/
protected def createTokenManager(): Option[HadoopDelegationTokenManager] = None
/**
* Called when a new set of delegation tokens is sent to the driver. Child classes can override
* this method but should always call this implementation, which handles token distribution to
* executors.
*/
protected def updateDelegationTokens(tokens: Array[Byte]): Unit = {
SparkHadoopUtil.get.addDelegationTokens(tokens, conf)
delegationTokens.set(tokens)
executorDataMap.values.foreach { ed =>
ed.executorEndpoint.send(UpdateDelegationTokens(tokens))
}
}
protected def currentDelegationTokens: Array[Byte] = delegationTokens.get()
/**
* Checks whether the executor is excluded due to failure(s). This is called when the executor
* tries to register with the scheduler, and will deny registration if this method returns true.
*
* This is in addition to the exclude list kept by the task scheduler, so custom implementations
* don't need to check there.
*/
protected def isExecutorExcluded(executorId: String, hostname: String): Boolean = false
// SPARK-27112: We need to ensure that there is ordering of lock acquisition
// between TaskSchedulerImpl and CoarseGrainedSchedulerBackend objects in order to fix
// the deadlock issue exposed in SPARK-27112
private def withLock[T](fn: => T): T = scheduler.synchronized {
CoarseGrainedSchedulerBackend.this.synchronized { fn }
}
}
private[spark] object CoarseGrainedSchedulerBackend {
val ENDPOINT_NAME = "CoarseGrainedScheduler"
}
| witgo/spark | core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala | Scala | apache-2.0 | 40,929 |
package cc.ferreira.gcal2slack.cli
import java.time.LocalDate
import cc.ferreira.gcal2slack.BaseSpec
import cc.ferreira.gcal2slack.buildinfo.BuildInfo
class ActionsSpec extends BaseSpec {
"show help" in {
val result = ShowHelp.value
result should include("gcal-slack-update [options] filename")
result should include("--help")
result should include("--version")
}
"show version" in {
val testInfo: BuildInfo = new BuildInfo {
override val name: String = "test-name"
override val version: String = "1.2.3"
override val sbtVersion: String = "1.2.3"
override val scalaVersion: String = "1.2.3"
override val buildDate: LocalDate = LocalDate.of(2017, 5, 4)
}
val result = ShowVersion(testInfo).value
result should include regex "test-name 1.2.3"
result should include regex "scala 1.2.3"
result should include regex "sbt 1.2.3"
result should include regex "2017-05-04"
}
}
| hugocf/gcal-slack-update | src/test/scala/cc/ferreira/gcal2slack/cli/ActionsSpec.scala | Scala | mit | 959 |
// Project: angulate2 (https://github.com/jokade/angulate2)
// Description:
// Copyright (c) 2016 Johannes.Kastner <[email protected]>
// Distributed under the MIT License (see included LICENSE file)
package angulate2.http
import org.scalajs.dom.Blob
import scala.scalajs.js
import scala.scalajs.js.typedarray.ArrayBuffer
@js.native
trait Body extends js.Object {
def json(): js.Dynamic = js.native
def text(): String = js.native
def arrayBuffer(): ArrayBuffer = js.native
def blob(): Blob = js.native
}
@js.native
trait Response extends Body {
def ok: Boolean = js.native
def url: String = js.native
def status: Int = js.native
def statusText: String = js.native
def bytesLoaded: Int = js.native
def totalBytes: Int = js.native
}
object Response {
implicit final class RichResponse(val r: Response) extends AnyVal {
def jsonData[T<:js.Any]: T = r.json().data.asInstanceOf[T]
}
}
| jokade/angulate2 | bindings/src/main/scala/angulate2/http/Response.scala | Scala | mit | 935 |
import scala.quoted.*
def coroutineImpl(using Quotes): Expr[Any] =
'{
new {
def state: Int = 0
${identity('state)}
}
}
| dotty-staging/dotty | tests/pos-macros/i8651a.scala | Scala | apache-2.0 | 143 |
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.stage.lib
object Constants {
val DATASET_SDO = "DATASET_SDO~~RESERVED"
val ADDITIONAL_LICENSE = "ADDITIONAL_LICENSE"
}
| DANS-KNAW/easy-stage-dataset | lib/src/main/scala/nl.knaw.dans.easy.stage/lib/Constants.scala | Scala | apache-2.0 | 799 |
package com.pwootage.metroidprime.dump
import java.io.DataInputStream
import java.nio.file.{Files, Path, Paths}
import com.pwootage.metroidprime.formats.common.{Face, PrimeFileType}
import com.pwootage.metroidprime.formats.io.PrimeDataFile
import com.pwootage.metroidprime.formats.mlvl.{Area, MLVL}
import com.pwootage.metroidprime.formats.mrea.MREA
import com.pwootage.metroidprime.formats.mrea.collision.Collision
import com.pwootage.metroidprime.utils.{DataTypeConversion, FileLocator, Logger, PrimeJacksonMapper}
class AreaDumper() {
def dump(srcPath: String, destPath: String): Unit = {
val dest = Paths.get(destPath)
Files.createDirectories(dest)
val mlvls = FileLocator.findFilesInBasePathWithExtension(srcPath, "MLVL")
for (mlvl <- mlvls) {
dumpMlvl(mlvl, dest)
}
}
def dumpMlvl(mlvlPath: Path, destPath: Path): Unit = {
val mlvl = new MLVL
val fin = Files.newInputStream(mlvlPath)
val din = new DataInputStream(fin)
mlvl.read(new PrimeDataFile(Some(din), None))
fin.close()
Logger.info(s"Loading world ${DataTypeConversion.intToPaddedHexString(mlvl.header.worldNameSTRG)}")
Logger.info(s"World contains ${mlvl.areas.length} areas")
Files.write(destPath.resolve(mlvlPath.getFileName.toString + ".json"),
PrimeJacksonMapper.pretty.writeValueAsBytes(mlvl.areas))
}
}
| Pwootage/prime-patcher | src/main/scala/com/pwootage/metroidprime/dump/AreaDumper.scala | Scala | gpl-3.0 | 1,352 |
package com.khivi.merge
object MergeSorted {
def apply[T <% Ordered[T]](streams: IndexedSeq[Stream[T]]): Stream[T] = new MergeSorted(streams).toStream
}
class MergeSorted[T <% Ordered[T]](streams: IndexedSeq[Stream[T]]) {
private[this] type SI = (Stream[T], Int)
private[this] def getStream(streams: IndexedSeq[SI]): Stream[T] = {
if (streams.isEmpty)
return Stream.empty
val (stream, idx) = streams.minBy(_._1.head)
val newStreams = stream.tail.isEmpty match {
case false => streams.updated(idx, (stream.tail, idx))
case true => streams.filter(_._2 != idx).map(_._1).zipWithIndex
}
stream.head #:: getStream(newStreams)
}
private def toStream: Stream[T] = getStream(streams.filter(!_.isEmpty).zipWithIndex)
}
| khivi/programming-languages | scala/src/main/scala/merge.scala | Scala | mit | 818 |
package eventstreams.support
import akka.actor.{ActorRef, Props}
import akka.stream.FlowMaterializer
import akka.stream.actor.ActorPublisherMessage.Request
import akka.stream.actor._
import akka.stream.scaladsl.{PublisherSource, SubscriberSink}
import akka.testkit.{TestKit, TestProbe}
import core.sysevents.SyseventOps.symbolToSyseventOps
import core.sysevents.WithSyseventPublisher
import core.sysevents.ref.ComponentWithBaseSysevents
import eventstreams.core.actors._
import eventstreams.{BecomeActive, BecomePassive, EventFrame, Stop}
import scala.util.Try
trait FlowComponentTestContext {
self: TestKit =>
type TestFlowFunc = (TestFlowCtx) => Unit
case class TestFlowCtx(pub: ActorRef, comp: ActorRef, sink: ActorRef)
def withCustomFlow(pub: Props, component: Props, sink: Props)(f: TestFlowFunc) = {
implicit val mat = FlowMaterializer()
implicit val dispatcher = system.dispatcher
val tapActorProbe = TestProbe()
val sinkActorProbe = TestProbe()
val componentActorProbe = TestProbe()
val tapActor = system.actorOf(pub)
val sinkActor = system.actorOf(sink)
val componentActor = system.actorOf(component)
val pubSrc = PublisherSource[EventFrame](ActorPublisher[EventFrame](tapActor))
val subSink = SubscriberSink(ActorSubscriber[EventFrame](sinkActor))
val componentAsSink = SubscriberSink(ActorSubscriber[EventFrame](componentActor))
val componentAsPub = PublisherSource[EventFrame](ActorPublisher[EventFrame](componentActor))
componentAsPub.to(subSink).run()
pubSrc.to(componentAsSink).run()
val ctx = TestFlowCtx(tapActor, componentActor, sinkActor)
tapActorProbe watch tapActor
sinkActorProbe watch sinkActor
componentActorProbe watch componentActor
try {
f(ctx)
} finally {
tapActor ! BecomePassive
componentActor ! BecomePassive
sinkActor ! BecomePassive
tapActor ! Stop(None)
system.stop(tapActor)
system.stop(componentActor)
system.stop(sinkActor)
Try {
tapActorProbe expectTerminated tapActor
sinkActorProbe expectTerminated sinkActor
componentActorProbe expectTerminated componentActor
}
}
}
def withFlow(component: Props)(f: TestFlowFunc) = withCustomFlow(JsonFramePublisherStubActor.props, component, SinkStubActor.props())(f)
def activateComponent()(implicit ctx: TestFlowCtx) = ctx.comp ! BecomeActive()
def activateSink()(implicit ctx: TestFlowCtx) = ctx.sink ! BecomeActive()
def activateFlow()(implicit ctx: TestFlowCtx): Unit = {
activateComponent()
activateSink()
}
def deactivateComponent()(implicit ctx: TestFlowCtx) = ctx.comp ! BecomePassive()
def deactivateSink()(implicit ctx: TestFlowCtx) = ctx.sink ! BecomePassive()
def deactivateFlow()(implicit ctx: TestFlowCtx): Unit = {
deactivateComponent()
deactivateSink()
}
def publishMsg(j: EventFrame)(implicit ctx: TestFlowCtx): Unit = ctx.pub ! j
}
trait JsonFramePublisherStubActorSysevents extends ComponentWithBaseSysevents with StateChangeSysevents with BaseActorSysevents {
val PublishingMessage = 'PublishingMessage.trace
val NoDemandAtPublisher = 'NoDemandAtPublisher.trace
val NewDemandAtPublisher = 'NewDemandAtPublisher.trace
override def componentId: String = "Test.JsonFramePublisherStubActor"
}
object JsonFramePublisherStubActor extends JsonFramePublisherStubActorSysevents {
def props = Props(new JsonFramePublisherStubActor())
}
class JsonFramePublisherStubActor
extends ActorWithComposableBehavior
with StoppablePublisherActor[EventFrame]
with ActorWithActivePassiveBehaviors
with JsonFramePublisherStubActorSysevents
with WithSyseventPublisher {
override def commonBehavior: Receive = handler orElse super.commonBehavior
def process(m: EventFrame) =
if (totalDemand > 0) {
PublishingMessage >> ('EventId -> m.eventIdOrNA)
onNext(m)
} else {
NoDemandAtPublisher >>()
}
def handler: Receive = {
case m: EventFrame => process(m)
// case m: JsValue => process(EventFrame(m, Map()))
case Request(n) => NewDemandAtPublisher >> ('Requested -> n)
}
}
| intelix/eventstreams | es-core/es-api/src/test/scala/eventstreams/support/FlowComponentTestContext.scala | Scala | apache-2.0 | 4,164 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.graph.internal.ops
import org.trustedanalytics.sparktk.frame.Frame
import org.trustedanalytics.sparktk.graph.internal.{ BaseGraph, GraphSummarization, GraphState }
import org.graphframes.lib.org.trustedanalytics.{ sparktk => graphframeslib }
trait ClosenessCentralitySummarization extends BaseGraph {
/**
* Compute closeness centrality for nodes.
*
* Closeness centrality of a node is the reciprocal of the sum of the shortest path distances from this node to all
* other nodes in the graph. Since the sum of distances depends on the number of nodes in the
* graph, closeness is normalized by the sum of minimum possible distances.
*
* In the case of a disconnected graph, the algorithm computes the closeness centrality for each connected part.
*
* In the case of a weighted graph, the algorithm handles only positive edge weights and uses Dijkstra's algorithm for
* the shortest-path calculations
*
* Reference: Linton C. Freeman: Centrality in networks: I.Conceptual clarification. Social Networks 1:215-239, 1979.
* http://leonidzhukov.ru/hse/2013/socialnetworks/papers/freeman79-centrality.pdf
*
* @param edgeWeight the name of the column containing the edge weights. If none, every edge is assigned a weight of 1
* @param normalize if true, normalizes the closeness centrality value by the number of nodes in the connected
* part of the graph.
* @return frame with an additional column for the closeness centrality values.
*/
def closenessCentrality(edgeWeight: Option[String] = None,
normalize: Boolean = true): Frame = {
execute[Frame](ClosenessCentrality(edgeWeight, normalize))
}
}
case class ClosenessCentrality(edgeWeight: Option[String] = None,
normalize: Boolean = true) extends GraphSummarization[Frame] {
override def work(state: GraphState): Frame = {
new Frame(graphframeslib.ClosenessCentrality.run(state.graphFrame, edgeWeight, normalize).vertices)
}
}
| trustedanalytics/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/graph/internal/ops/ClosenessCentrality.scala | Scala | apache-2.0 | 2,760 |
package views.html.forgotpassword
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object displaypassword extends BaseScalaTemplate[play.api.templates.Html,Format[play.api.templates.Html]](play.api.templates.HtmlFormat) with play.api.templates.Template1[models.user.User,play.api.templates.Html] {
/**/
def apply/*1.2*/(display:models.user.User):play.api.templates.Html = {
_display_ {import helper._
Seq[Any](format.raw/*1.28*/("""
"""),format.raw/*3.1*/("""
"""),_display_(Seq[Any](/*4.2*/main("Welcome to Timesheet")/*4.30*/ {_display_(Seq[Any](format.raw/*4.32*/("""
<div class="container">
<div class="heading">
"""),_display_(Seq[Any](/*8.4*/helper/*8.10*/.form(routes.Application.login)/*8.41*/{_display_(Seq[Any](format.raw/*8.42*/("""
<br>
<br>
<br>
<fieldset id="login_field" class="right-align" style="width:35%;margin-top:70px;">
<div class="well" style="width:100%;">
<h3>Please Check your Inbox for your User Name and Password</h3>
<p>
<button type="submit" class="btn btn-success" style="margin-left:185px;">Go To Login</button>
</p>
</div>
</fieldset>
""")))})),format.raw/*24.5*/("""
</div>
</div>
""")))})),format.raw/*27.2*/(""" """))}
}
def render(display:models.user.User): play.api.templates.Html = apply(display)
def f:((models.user.User) => play.api.templates.Html) = (display) => apply(display)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Mon Apr 21 14:16:21 EDT 2014
SOURCE: /Users/Secret/Technology/play-2.1.0/Time_Trotter/app/views/forgotpassword/displaypassword.scala.html
HASH: 6c6b85cbf090b4b1e89c266d1d3aba315f0cd0c3
MATRIX: 758->1|878->27|906->47|943->50|979->78|1018->80|1112->140|1126->146|1165->177|1203->178|1719->663|1773->686
LINES: 26->1|30->1|31->3|32->4|32->4|32->4|36->8|36->8|36->8|36->8|52->24|55->27
-- GENERATED --
*/
| paperlotus/Time-Trotter | target/scala-2.10/src_managed/main/views/html/forgotpassword/displaypassword.template.scala | Scala | apache-2.0 | 2,621 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s
import java.io.{File, IOException}
import java.net.URI
import java.security.SecureRandom
import java.util.UUID
import scala.collection.JavaConverters._
import io.fabric8.kubernetes.api.model.{Container, ContainerBuilder, ContainerStateRunning, ContainerStateTerminated, ContainerStateWaiting, ContainerStatus, Pod, PodBuilder}
import io.fabric8.kubernetes.client.KubernetesClient
import org.apache.commons.codec.binary.Hex
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.k8s.Config.KUBERNETES_FILE_UPLOAD_PATH
import org.apache.spark.internal.Logging
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.util.{Clock, SystemClock, Utils}
import org.apache.spark.util.Utils.getHadoopFileSystem
private[spark] object KubernetesUtils extends Logging {
private val systemClock = new SystemClock()
private lazy val RNG = new SecureRandom()
/**
* Extract and parse Spark configuration properties with a given name prefix and
* return the result as a Map. Keys must not have more than one value.
*
* @param sparkConf Spark configuration
* @param prefix the given property name prefix
* @return a Map storing the configuration property keys and values
*/
def parsePrefixedKeyValuePairs(
sparkConf: SparkConf,
prefix: String): Map[String, String] = {
sparkConf.getAllWithPrefix(prefix).toMap
}
def requireBothOrNeitherDefined(
opt1: Option[_],
opt2: Option[_],
errMessageWhenFirstIsMissing: String,
errMessageWhenSecondIsMissing: String): Unit = {
requireSecondIfFirstIsDefined(opt1, opt2, errMessageWhenSecondIsMissing)
requireSecondIfFirstIsDefined(opt2, opt1, errMessageWhenFirstIsMissing)
}
def requireSecondIfFirstIsDefined(
opt1: Option[_],
opt2: Option[_],
errMessageWhenSecondIsMissing: String): Unit = {
opt1.foreach { _ =>
require(opt2.isDefined, errMessageWhenSecondIsMissing)
}
}
def requireNandDefined(opt1: Option[_], opt2: Option[_], errMessage: String): Unit = {
opt1.foreach { _ => require(opt2.isEmpty, errMessage) }
opt2.foreach { _ => require(opt1.isEmpty, errMessage) }
}
def loadPodFromTemplate(
kubernetesClient: KubernetesClient,
templateFile: File,
containerName: Option[String]): SparkPod = {
try {
val pod = kubernetesClient.pods().load(templateFile).get()
selectSparkContainer(pod, containerName)
} catch {
case e: Exception =>
logError(
s"Encountered exception while attempting to load initial pod spec from file", e)
throw new SparkException("Could not load pod from template file.", e)
}
}
def selectSparkContainer(pod: Pod, containerName: Option[String]): SparkPod = {
def selectNamedContainer(
containers: List[Container], name: String): Option[(Container, List[Container])] =
containers.partition(_.getName == name) match {
case (sparkContainer :: Nil, rest) => Some((sparkContainer, rest))
case _ =>
logWarning(
s"specified container ${name} not found on pod template, " +
s"falling back to taking the first container")
Option.empty
}
val containers = pod.getSpec.getContainers.asScala.toList
containerName
.flatMap(selectNamedContainer(containers, _))
.orElse(containers.headOption.map((_, containers.tail)))
.map {
case (sparkContainer: Container, rest: List[Container]) => SparkPod(
new PodBuilder(pod)
.editSpec()
.withContainers(rest.asJava)
.endSpec()
.build(),
sparkContainer)
}.getOrElse(SparkPod(pod, new ContainerBuilder().build()))
}
def parseMasterUrl(url: String): String = url.substring("k8s://".length)
def formatPairsBundle(pairs: Seq[(String, String)], indent: Int = 1) : String = {
// Use more loggable format if value is null or empty
val indentStr = "\t" * indent
pairs.map {
case (k, v) => s"\n$indentStr $k: ${Option(v).filter(_.nonEmpty).getOrElse("N/A")}"
}.mkString("")
}
/**
* Given a pod, output a human readable representation of its state
*
* @param pod Pod
* @return Human readable pod state
*/
def formatPodState(pod: Pod): String = {
val details = Seq[(String, String)](
// pod metadata
("pod name", pod.getMetadata.getName),
("namespace", pod.getMetadata.getNamespace),
("labels", pod.getMetadata.getLabels.asScala.mkString(", ")),
("pod uid", pod.getMetadata.getUid),
("creation time", formatTime(pod.getMetadata.getCreationTimestamp)),
// spec details
("service account name", pod.getSpec.getServiceAccountName),
("volumes", pod.getSpec.getVolumes.asScala.map(_.getName).mkString(", ")),
("node name", pod.getSpec.getNodeName),
// status
("start time", formatTime(pod.getStatus.getStartTime)),
("phase", pod.getStatus.getPhase),
("container status", containersDescription(pod, 2))
)
formatPairsBundle(details)
}
def containersDescription(p: Pod, indent: Int = 1): String = {
p.getStatus.getContainerStatuses.asScala.map { status =>
Seq(
("container name", status.getName),
("container image", status.getImage)) ++
containerStatusDescription(status)
}.map(p => formatPairsBundle(p, indent)).mkString("\n\n")
}
def containerStatusDescription(containerStatus: ContainerStatus)
: Seq[(String, String)] = {
val state = containerStatus.getState
Option(state.getRunning)
.orElse(Option(state.getTerminated))
.orElse(Option(state.getWaiting))
.map {
case running: ContainerStateRunning =>
Seq(
("container state", "running"),
("container started at", formatTime(running.getStartedAt)))
case waiting: ContainerStateWaiting =>
Seq(
("container state", "waiting"),
("pending reason", waiting.getReason))
case terminated: ContainerStateTerminated =>
Seq(
("container state", "terminated"),
("container started at", formatTime(terminated.getStartedAt)),
("container finished at", formatTime(terminated.getFinishedAt)),
("exit code", terminated.getExitCode.toString),
("termination reason", terminated.getReason))
case unknown =>
throw new SparkException(s"Unexpected container status type ${unknown.getClass}.")
}.getOrElse(Seq(("container state", "N/A")))
}
def formatTime(time: String): String = {
if (time != null) time else "N/A"
}
/**
* Generates a unique ID to be used as part of identifiers. The returned ID is a hex string
* of a 64-bit value containing the 40 LSBs from the current time + 24 random bits from a
* cryptographically strong RNG. (40 bits gives about 30 years worth of "unique" timestamps.)
*
* This avoids using a UUID for uniqueness (too long), and relying solely on the current time
* (not unique enough).
*/
def uniqueID(clock: Clock = systemClock): String = {
val random = new Array[Byte](3)
synchronized {
RNG.nextBytes(random)
}
val time = java.lang.Long.toHexString(clock.getTimeMillis() & 0xFFFFFFFFFFL)
Hex.encodeHexString(random) + time
}
/**
* Upload files and modify their uris
*/
def uploadAndTransformFileUris(fileUris: Iterable[String], conf: Option[SparkConf] = None)
: Iterable[String] = {
fileUris.map { uri =>
uploadFileUri(uri, conf)
}
}
private def isLocalDependency(uri: URI): Boolean = {
uri.getScheme match {
case null | "file" => true
case _ => false
}
}
def isLocalAndResolvable(resource: String): Boolean = {
resource != SparkLauncher.NO_RESOURCE &&
isLocalDependency(Utils.resolveURI(resource))
}
def renameMainAppResource(resource: String, conf: SparkConf): String = {
if (isLocalAndResolvable(resource)) {
SparkLauncher.NO_RESOURCE
} else {
resource
}
}
def uploadFileUri(uri: String, conf: Option[SparkConf] = None): String = {
conf match {
case Some(sConf) =>
if (sConf.get(KUBERNETES_FILE_UPLOAD_PATH).isDefined) {
val fileUri = Utils.resolveURI(uri)
try {
val hadoopConf = SparkHadoopUtil.get.newConfiguration(sConf)
val uploadPath = sConf.get(KUBERNETES_FILE_UPLOAD_PATH).get
val fs = getHadoopFileSystem(Utils.resolveURI(uploadPath), hadoopConf)
val randomDirName = s"spark-upload-${UUID.randomUUID()}"
fs.mkdirs(new Path(s"${uploadPath}/${randomDirName}"))
val targetUri = s"${uploadPath}/${randomDirName}/${fileUri.getPath.split("/").last}"
log.info(s"Uploading file: ${fileUri.getPath} to dest: $targetUri...")
uploadFileToHadoopCompatibleFS(new Path(fileUri.getPath), new Path(targetUri), fs)
targetUri
} catch {
case e: Exception =>
throw new SparkException(s"Uploading file ${fileUri.getPath} failed...", e)
}
} else {
throw new SparkException("Please specify " +
"spark.kubernetes.file.upload.path property.")
}
case _ => throw new SparkException("Spark configuration is missing...")
}
}
/**
* Upload a file to a Hadoop-compatible filesystem.
*/
private def uploadFileToHadoopCompatibleFS(
src: Path,
dest: Path,
fs: FileSystem,
delSrc : Boolean = false,
overwrite: Boolean = true): Unit = {
try {
fs.copyFromLocalFile(false, true, src, dest)
} catch {
case e: IOException =>
throw new SparkException(s"Error uploading file ${src.getName}", e)
}
}
}
| highfei2011/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesUtils.scala | Scala | apache-2.0 | 10,762 |
package sandbox
import org.parboiled2._
case class tbl(name: String, columns: Seq[String])
/**
* Shows how easy it is to express a grammar in pseudo-BNF using Parboiled2.
* @param input
* @param columnStart
*/
case class DdlParser(input: ParserInput, columnStart: String) extends Parser {
import DdlParser._
def DDL = rule { Statements.* }
def Statements = rule { Ignore ~ Table }
def Table = rule { TableFlag ~ TableName ~ Ignore ~ Arguments ~> tbl }
def TableName = rule { capture(!EndName ~ ANY).+ ~> (_.mkString("")) ~ EndName}
def Arguments = rule { Arg.*.separatedBy(Ignore) }
def Arg = rule { columnStart ~ capture(!Space ~ ANY).+ ~>(_.mkString("")) ~ Space}
def TableFlag = rule { CreateTable ~ Space }
def EndName = rule { Space | "(" }
def Ignore = rule { (! (CreateTable | Space ~ Space) ~ ANY).+ }
}
object DdlParser {
val NewLine = "\r"
val Comma = ","
val Space = " "
val CreateTable = "table"
private val sample =
"""
|--comment comment
|
|create table tables (
| id int identity not null,
| label varchar(15) not null,
| location int not null
|)
|
|create table locations(
| id int identity not null,
| name varchar(15) not null,
| owner varchar(50) not null
|)
|
|-- more comments
""".stripMargin
def test = DdlParser(sample, " ").DDL.run()
}
| ChrisCoffey/sandbox | src/Parsers.scala | Scala | unlicense | 1,467 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv.libra
object arbitrary
extends kantan.csv.laws.discipline.ArbitraryInstances with kantan.codecs.libra.laws.discipline.ArbitraryInstances
| nrinaudo/tabulate | libra/src/test/scala/kantan/csv/libra/arbitrary.scala | Scala | mit | 761 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.raft
import kafka.common.{InterBrokerSendThread, RequestAndCompletionHandler}
import kafka.utils.Logging
import org.apache.kafka.clients.{ClientResponse, KafkaClient}
import org.apache.kafka.common.Node
import org.apache.kafka.common.message._
import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage, Errors}
import org.apache.kafka.common.requests._
import org.apache.kafka.common.utils.Time
import org.apache.kafka.raft.RaftConfig.InetAddressSpec
import org.apache.kafka.raft.{NetworkChannel, RaftRequest, RaftResponse, RaftUtil}
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable
object KafkaNetworkChannel {
private[raft] def buildRequest(requestData: ApiMessage): AbstractRequest.Builder[_ <: AbstractRequest] = {
requestData match {
case voteRequest: VoteRequestData =>
new VoteRequest.Builder(voteRequest)
case beginEpochRequest: BeginQuorumEpochRequestData =>
new BeginQuorumEpochRequest.Builder(beginEpochRequest)
case endEpochRequest: EndQuorumEpochRequestData =>
new EndQuorumEpochRequest.Builder(endEpochRequest)
case fetchRequest: FetchRequestData =>
// Since we already have the request, we go through a simplified builder
new AbstractRequest.Builder[FetchRequest](ApiKeys.FETCH) {
override def build(version: Short): FetchRequest = new FetchRequest(fetchRequest, version)
override def toString(): String = fetchRequest.toString
}
case fetchSnapshotRequest: FetchSnapshotRequestData =>
new FetchSnapshotRequest.Builder(fetchSnapshotRequest)
case _ =>
throw new IllegalArgumentException(s"Unexpected type for requestData: $requestData")
}
}
}
private[raft] class RaftSendThread(
name: String,
networkClient: KafkaClient,
requestTimeoutMs: Int,
time: Time,
isInterruptible: Boolean = true
) extends InterBrokerSendThread(
name,
networkClient,
requestTimeoutMs,
time,
isInterruptible
) {
private val queue = new ConcurrentLinkedQueue[RequestAndCompletionHandler]()
def generateRequests(): Iterable[RequestAndCompletionHandler] = {
val buffer = mutable.Buffer[RequestAndCompletionHandler]()
while (true) {
val request = queue.poll()
if (request == null) {
return buffer
} else {
buffer += request
}
}
buffer
}
def sendRequest(request: RequestAndCompletionHandler): Unit = {
queue.add(request)
wakeup()
}
}
class KafkaNetworkChannel(
time: Time,
client: KafkaClient,
requestTimeoutMs: Int,
threadNamePrefix: String
) extends NetworkChannel with Logging {
import KafkaNetworkChannel._
type ResponseHandler = AbstractResponse => Unit
private val correlationIdCounter = new AtomicInteger(0)
private val endpoints = mutable.HashMap.empty[Int, Node]
private val requestThread = new RaftSendThread(
name = threadNamePrefix + "-outbound-request-thread",
networkClient = client,
requestTimeoutMs = requestTimeoutMs,
time = time,
isInterruptible = false
)
override def send(request: RaftRequest.Outbound): Unit = {
def completeFuture(message: ApiMessage): Unit = {
val response = new RaftResponse.Inbound(
request.correlationId,
message,
request.destinationId
)
request.completion.complete(response)
}
def onComplete(clientResponse: ClientResponse): Unit = {
val response = if (clientResponse.versionMismatch != null) {
error(s"Request $request failed due to unsupported version error",
clientResponse.versionMismatch)
errorResponse(request.data, Errors.UNSUPPORTED_VERSION)
} else if (clientResponse.authenticationException != null) {
// For now we treat authentication errors as retriable. We use the
// `NETWORK_EXCEPTION` error code for lack of a good alternative.
// Note that `BrokerToControllerChannelManager` will still log the
// authentication errors so that users have a chance to fix the problem.
error(s"Request $request failed due to authentication error",
clientResponse.authenticationException)
errorResponse(request.data, Errors.NETWORK_EXCEPTION)
} else if (clientResponse.wasDisconnected()) {
errorResponse(request.data, Errors.BROKER_NOT_AVAILABLE)
} else {
clientResponse.responseBody.data
}
completeFuture(response)
}
endpoints.get(request.destinationId) match {
case Some(node) =>
requestThread.sendRequest(RequestAndCompletionHandler(
request.createdTimeMs,
destination = node,
request = buildRequest(request.data),
handler = onComplete
))
case None =>
completeFuture(errorResponse(request.data, Errors.BROKER_NOT_AVAILABLE))
}
}
// Visible for testing
private[raft] def pollOnce(): Unit = {
requestThread.doWork()
}
override def newCorrelationId(): Int = {
correlationIdCounter.getAndIncrement()
}
private def errorResponse(
request: ApiMessage,
error: Errors
): ApiMessage = {
val apiKey = ApiKeys.forId(request.apiKey)
RaftUtil.errorResponse(apiKey, error)
}
override def updateEndpoint(id: Int, spec: InetAddressSpec): Unit = {
val node = new Node(id, spec.address.getHostString, spec.address.getPort)
endpoints.put(id, node)
}
def start(): Unit = {
requestThread.start()
}
def initiateShutdown(): Unit = {
requestThread.initiateShutdown()
}
override def close(): Unit = {
requestThread.shutdown()
}
}
| TiVo/kafka | core/src/main/scala/kafka/raft/KafkaNetworkChannel.scala | Scala | apache-2.0 | 6,478 |
/*
* Copyright 2015-2020 Noel Welsh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package doodle
package examples
object Ripples {
import cats.effect.IO
import cats.instances.all._
import cats.syntax.all._
import doodle.core._
import doodle.syntax._
import doodle.svg._
import doodle.svg.effect._
import monix.reactive.Observable
import monix.catnap.{ConcurrentQueue,SchedulerEffect}
import scala.concurrent.duration.{FiniteDuration, MILLISECONDS}
final case class Ripple(age: Int, x: Double, y: Double) {
val maxAge = 200
def alive: Boolean = age <= maxAge
def older: Ripple =
this.copy(age = age + 1)
def picture: Picture[Unit] =
circle[Algebra,Drawing](age.toDouble)
.strokeColor(Color.hotpink.alpha(((maxAge - age) / (maxAge.toDouble)).normalized))
.at(x, y)
}
def ripples(canvas: Canvas): IO[Observable[Picture[Unit]]] = {
implicit val cs = SchedulerEffect.contextShift[IO](svgScheduler)
ConcurrentQueue[IO]
.bounded[Option[Ripple]](5)
.map{ queue =>
canvas
.redraw
.map(_ => none[Ripple])
.mapEvalF(r => queue.offer(r))
.subscribe()
canvas
.mouseMove
.throttleFirst(FiniteDuration(100, MILLISECONDS)) // Stop spamming with too many mouse events
.map(pt => Ripple(0, pt.x, pt.y).some)
.mapEvalF(r => queue.offer(r))
.subscribe()
Observable
.repeatEvalF(queue.poll)
.scan(List.empty[Ripple]){(ripples, ripple) =>
ripple match {
case Some(r) => r :: ripples
case None => ripples.filter(_.alive).map(_.older)
}
}
.map(ripples => ripples.map(_.picture).allOn)
}
}
}
| underscoreio/doodle | examples/js/src/main/scala/doodle/examples/Ripples.scala | Scala | apache-2.0 | 2,297 |
package models.billing
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import cache._
import db._
import models.Constants._
import models.base._
import models.tosca.{ KeyValueField, KeyValueList}
import io.megam.auth.stack.AccountResult
import io.megam.auth.stack.{ Name, Phone, Password, States, Approval, Dates, Suspend }
import io.megam.auth.funnel.FunnelErrors._
import com.datastax.driver.core.{ ResultSet, Row }
import com.websudos.phantom.dsl._
import scala.concurrent.{ Future => ScalaFuture }
import com.websudos.phantom.connectors.{ ContactPoint, KeySpaceDef }
import scala.concurrent.Await
import scala.concurrent.duration._
import utils.DateHelper
import io.megam.util.Time
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.{DateTimeFormat,ISODateTimeFormat}
import io.megam.common.uid.UID
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.nio.charset.Charset
import controllers.stack.ImplicitJsonFormats
/**
* @author rajesh
*
*/
case class BillingtransactionsInput(gateway: String,
amountin: String,
amountout: String,
fees: String,
tranid: String,
trandate: String,
currency_type: String,
inputs: models.tosca.KeyValueList
)
case class BillingtransactionsResult(id: String,
account_id: String,
gateway: String,
amountin: String,
amountout: String,
fees: String,
tranid: String,
trandate: String,
currency_type: String,
inputs: models.tosca.KeyValueList,
json_claz: String,
created_at: DateTime)
sealed class BillingtransactionsSacks extends CassandraTable[BillingtransactionsSacks, BillingtransactionsResult] with ImplicitJsonFormats {
object id extends StringColumn(this)
object account_id extends StringColumn(this) with PartitionKey[String]
object gateway extends StringColumn(this)
object amountin extends StringColumn(this)
object amountout extends StringColumn(this)
object fees extends StringColumn(this)
object tranid extends StringColumn(this)
object trandate extends StringColumn(this) with PrimaryKey[String]
object currency_type extends StringColumn(this)
object inputs extends JsonListColumn[BillingtransactionsSacks, BillingtransactionsResult, KeyValueField](this) {
override def fromJson(obj: String): KeyValueField = {
JsonParser.parse(obj).extract[KeyValueField]
}
override def toJson(obj: KeyValueField): String = {
compactRender(Extraction.decompose(obj))
}
}
object json_claz extends StringColumn(this)
object created_at extends DateTimeColumn(this) with PrimaryKey[DateTime]
def fromRow(row: Row): BillingtransactionsResult = {
BillingtransactionsResult(
id(row),
account_id(row),
gateway(row),
amountin(row),
amountout(row),
fees(row),
tranid(row),
trandate(row),
currency_type(row),
inputs(row),
json_claz(row),
created_at(row))
}
}
abstract class ConcreteBillingtransactions extends BillingtransactionsSacks with RootConnector {
override lazy val tableName = "billingtransactions"
override implicit def space: KeySpace = scyllaConnection.space
override implicit def session: Session = scyllaConnection.session
def insertNewRecord(ams: BillingtransactionsResult): ValidationNel[Throwable, ResultSet] = {
val res = insert.value(_.id, ams.id)
.value(_.account_id, ams.account_id)
.value(_.gateway, ams.gateway)
.value(_.amountin, ams.amountin)
.value(_.amountout, ams.amountout)
.value(_.fees, ams.fees)
.value(_.tranid, ams.tranid)
.value(_.trandate, ams.trandate)
.value(_.currency_type, ams.currency_type)
.value(_.inputs, ams.inputs)
.value(_.json_claz, ams.json_claz)
.value(_.created_at, ams.created_at)
.future()
Await.result(res, 5.seconds).successNel
}
def listRecords(id: String): ValidationNel[Throwable, Seq[BillingtransactionsResult]] = {
val res = select.allowFiltering().where(_.account_id eqs id).limit(20).fetch()
Await.result(res, 5.seconds).successNel
}
def deleteRecords(email: String): ValidationNel[Throwable, ResultSet] = {
val res = delete.where(_.account_id eqs email).future()
Await.result(res, 5.seconds).successNel
}
}
object Billingtransactions extends ConcreteBillingtransactions {
private def mkBillingtransactionsSack(email: String, input: String): ValidationNel[Throwable, BillingtransactionsResult] = {
val billInput: ValidationNel[Throwable, BillingtransactionsInput] = (Validation.fromTryCatchThrowable[BillingtransactionsInput, Throwable] {
parse(input).extract[BillingtransactionsInput]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel //capture failure
for {
bill <- billInput
set <- (atBalUpdate(email, bill.amountin, bill.inputs) leftMap { s: NonEmptyList[Throwable] => s })
uir <- (UID("bhs").get leftMap { ut: NonEmptyList[Throwable] => ut })
} yield {
val bvalue = Set(email)
val json = new BillingtransactionsResult(uir.get._1 + uir.get._2, email, bill.gateway, bill.amountin, bill.amountout, bill.fees, bill.tranid, bill.trandate, bill.currency_type, bill.inputs, "Megam::BillingTransactions",DateHelper.now())
json
}
}
/*
* create new billing transcations for currently pay the bill of user.
*
*/
def create(email: String, input: String): ValidationNel[Throwable, Option[BillingtransactionsResult]] = {
for {
wa <- (mkBillingtransactionsSack(email, input) leftMap { err: NonEmptyList[Throwable] => err })
set <- (insertNewRecord(wa) leftMap { t: NonEmptyList[Throwable] => t })
acc <- (atAccUpdate(email) leftMap { s: NonEmptyList[Throwable] => s })
} yield {
play.api.Logger.warn(("%s%s%-20s%s").format(Console.GREEN, Console.BOLD, "Billingtransactions.created success", Console.RESET))
wa.some
}
}
def atBalUpdate(email: String, amount: String, inputs: models.tosca.KeyValueList): ValidationNel[Throwable, BalancesResults] = {
val quota = inputs.find(_.key.equalsIgnoreCase("quota_based")).getOrElse(models.tosca.KeyValueField.empty).value.toBoolean
val bal = BalancesUpdateInput("", amount, DateHelper.now().toString(), DateHelper.now().toString())
if (!quota) {
models.billing.Balances.update(email, compactRender(Extraction.decompose(bal)))
} else {
val dummy = BalancesResult("", email, amount, "", DateHelper.now(), DateHelper.now())
List(dummy.some).successNel
}
}
/*
* An IO wrapped finder using an email. Upon fetching the account_id for an email,
* the transcations are listed on the index (account.id) in bucket `Billingtransactions`.
* Using a "Billingtransactions name" as key, r wa <- (msSack(email, input) leftMap { err: NonEmptyList[Throwable] => err })
return a list of ValidationNel[List[BillinghistoriesResult]]
* Takes an email, and returns a Future[ValidationNel, List[Option[BillingtransactionsResult]]]
*/
def findByEmail(email: String): ValidationNel[Throwable, Seq[BillingtransactionsResult]] = {
(listRecords(email) leftMap { t: NonEmptyList[Throwable] =>
new ResourceItemNotFound(email, "Billingtransactions = nothing found.")
}).toValidationNel.flatMap { nm: Seq[BillingtransactionsResult] =>
if (!nm.isEmpty)
Validation.success[Throwable, Seq[BillingtransactionsResult]](nm).toValidationNel
else
Validation.failure[Throwable, Seq[BillingtransactionsResult]](new ResourceItemNotFound(email, "Billingtransactions = nothing found.")).toValidationNel
}
}
def delete(email: String): ValidationNel[Throwable, Option[BillingtransactionsResult]] = {
deleteRecords(email) match {
case Success(value) => Validation.success[Throwable, Option[BillingtransactionsResult]](none).toValidationNel
case Failure(err) => Validation.success[Throwable, Option[BillingtransactionsResult]](none).toValidationNel
}
}
}
| indykish/vertice_gateway | app/models/billing/Billingtransactions.scala | Scala | mit | 8,743 |
/* Copyright 2009-2021 EPFL, Lausanne */
import stainless.lang._
object Monads2 {
sealed abstract class Option[T]
case class Some[T](t: T) extends Option[T]
case class None[T]() extends Option[T]
def flatMap[T,U](opt: Option[T], f: T => Option[U]): Option[U] = opt match {
case Some(x) => f(x)
case None() => None()
}
def add[T](o1: Option[T], o2: Option[T]): Option[T] = o1 match {
case Some(x) => o1
case None() => o2
}
def associative_law[T,U,V](opt: Option[T], f: T => Option[U], g: U => Option[V]): Boolean = {
flatMap(flatMap(opt, f), g) == flatMap(opt, (x: T) => flatMap(f(x), g))
}.holds
def left_unit_law[T,U](x: T, f: T => Option[U]): Boolean = {
flatMap(Some(x), f) == f(x)
}.holds
def right_unit_law[T,U](opt: Option[T]): Boolean = {
flatMap(opt, (x: T) => Some[T](x)) == opt
}.holds
def flatMap_zero_law[T,U](none: None[T], f: T => Option[U]): Boolean = {
flatMap(none, f) == None[U]()
}.holds
def flatMap_to_zero_law[T,U](opt: Option[T]): Boolean = {
flatMap(opt, (x: T) => None[U]()) == None[U]()
}.holds
def add_zero_law[T](opt: Option[T]): Boolean = {
add(opt, None[T]()) == opt
}.holds
def zero_add_law[T](opt: Option[T]): Boolean = {
add(None[T](), opt) == opt
}.holds
}
// vim: set ts=4 sw=4 et:
| epfl-lara/stainless | frontends/benchmarks/verification/valid/Monads2.scala | Scala | apache-2.0 | 1,314 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{Externalizable, ObjectInput, ObjectOutput}
import java.sql.{Date, Timestamp}
import org.apache.spark.SparkException
import org.apache.spark.sql.catalyst.encoders.{OuterScopes, RowEncoder}
import org.apache.spark.sql.catalyst.plans.{LeftAnti, LeftSemi}
import org.apache.spark.sql.catalyst.util.sideBySide
import org.apache.spark.sql.execution.{LogicalRDD, RDDScanExec}
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ShuffleExchangeExec}
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
case class TestDataPoint(x: Int, y: Double, s: String, t: TestDataPoint2)
case class TestDataPoint2(x: Int, s: String)
object TestForTypeAlias {
type TwoInt = (Int, Int)
type ThreeInt = (TwoInt, Int)
type SeqOfTwoInt = Seq[TwoInt]
def tupleTypeAlias: TwoInt = (1, 1)
def nestedTupleTypeAlias: ThreeInt = ((1, 1), 2)
def seqOfTupleTypeAlias: SeqOfTwoInt = Seq((1, 1), (2, 2))
}
class DatasetSuite extends QueryTest with SharedSQLContext {
import testImplicits._
private implicit val ordering = Ordering.by((c: ClassData) => c.a -> c.b)
test("checkAnswer should compare map correctly") {
val data = Seq((1, "2", Map(1 -> 2, 2 -> 1)))
checkAnswer(
data.toDF(),
Seq(Row(1, "2", Map(2 -> 1, 1 -> 2))))
}
test("toDS") {
val data = Seq(("a", 1), ("b", 2), ("c", 3))
checkDataset(
data.toDS(),
data: _*)
}
test("toDS with RDD") {
val ds = sparkContext.makeRDD(Seq("a", "b", "c"), 3).toDS()
checkDataset(
ds.mapPartitions(_ => Iterator(1)),
1, 1, 1)
}
test("emptyDataset") {
val ds = spark.emptyDataset[Int]
assert(ds.count() == 0L)
assert(ds.collect() sameElements Array.empty[Int])
}
test("range") {
assert(spark.range(10).map(_ + 1).reduce(_ + _) == 55)
assert(spark.range(10).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
assert(spark.range(0, 10).map(_ + 1).reduce(_ + _) == 55)
assert(spark.range(0, 10).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
assert(spark.range(0, 10, 1, 2).map(_ + 1).reduce(_ + _) == 55)
assert(spark.range(0, 10, 1, 2).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
}
test("SPARK-12404: Datatype Helper Serializability") {
val ds = sparkContext.parallelize((
new Timestamp(0),
new Date(0),
java.math.BigDecimal.valueOf(1),
scala.math.BigDecimal(1)) :: Nil).toDS()
ds.collect()
}
test("collect, first, and take should use encoders for serialization") {
val item = NonSerializableCaseClass("abcd")
val ds = Seq(item).toDS()
assert(ds.collect().head == item)
assert(ds.collectAsList().get(0) == item)
assert(ds.first() == item)
assert(ds.take(1).head == item)
assert(ds.takeAsList(1).get(0) == item)
assert(ds.toLocalIterator().next() === item)
}
test("coalesce, repartition") {
val data = (1 to 100).map(i => ClassData(i.toString, i))
val ds = data.toDS()
intercept[IllegalArgumentException] {
ds.coalesce(0)
}
intercept[IllegalArgumentException] {
ds.repartition(0)
}
assert(ds.repartition(10).rdd.partitions.length == 10)
checkDatasetUnorderly(
ds.repartition(10),
data: _*)
assert(ds.coalesce(1).rdd.partitions.length == 1)
checkDatasetUnorderly(
ds.coalesce(1),
data: _*)
}
test("as tuple") {
val data = Seq(("a", 1), ("b", 2)).toDF("a", "b")
checkDataset(
data.as[(String, Int)],
("a", 1), ("b", 2))
}
test("as case class / collect") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
checkDataset(
ds,
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
assert(ds.collect().head == ClassData("a", 1))
}
test("as case class - reordered fields by name") {
val ds = Seq((1, "a"), (2, "b"), (3, "c")).toDF("b", "a").as[ClassData]
assert(ds.collect() === Array(ClassData("a", 1), ClassData("b", 2), ClassData("c", 3)))
}
test("as case class - take") {
val ds = Seq((1, "a"), (2, "b"), (3, "c")).toDF("b", "a").as[ClassData]
assert(ds.take(2) === Array(ClassData("a", 1), ClassData("b", 2)))
}
test("as seq of case class - reorder fields by name") {
val df = spark.range(3).select(array(struct($"id".cast("int").as("b"), lit("a").as("a"))))
val ds = df.as[Seq[ClassData]]
assert(ds.collect() === Array(
Seq(ClassData("a", 0)),
Seq(ClassData("a", 1)),
Seq(ClassData("a", 2))))
}
test("map") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.map(v => (v._1, v._2 + 1)),
("a", 2), ("b", 3), ("c", 4))
}
test("map with type change with the exact matched number of attributes") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.map(identity[(String, Int)])
.as[OtherTuple]
.map(identity[OtherTuple]),
OtherTuple("a", 1), OtherTuple("b", 2), OtherTuple("c", 3))
}
test("map with type change with less attributes") {
val ds = Seq(("a", 1, 3), ("b", 2, 4), ("c", 3, 5)).toDS()
checkDataset(
ds.as[OtherTuple]
.map(identity[OtherTuple]),
OtherTuple("a", 1), OtherTuple("b", 2), OtherTuple("c", 3))
}
test("map and group by with class data") {
// We inject a group by here to make sure this test case is future proof
// when we implement better pipelining and local execution mode.
val ds: Dataset[(ClassData, Long)] = Seq(ClassData("one", 1), ClassData("two", 2)).toDS()
.map(c => ClassData(c.a, c.b + 1))
.groupByKey(p => p).count()
checkDatasetUnorderly(
ds,
(ClassData("one", 2), 1L), (ClassData("two", 3), 1L))
}
test("select") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(expr("_2 + 1").as[Int]),
2, 3, 4)
}
test("SPARK-16853: select, case class and tuple") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(expr("struct(_2, _2)").as[(Int, Int)]): Dataset[(Int, Int)],
(1, 1), (2, 2), (3, 3))
checkDataset(
ds.select(expr("named_struct('a', _1, 'b', _2)").as[ClassData]): Dataset[ClassData],
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
}
test("select 2") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(
expr("_1").as[String],
expr("_2").as[Int]) : Dataset[(String, Int)],
("a", 1), ("b", 2), ("c", 3))
}
test("select 2, primitive and tuple") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(
expr("_1").as[String],
expr("struct(_2, _2)").as[(Int, Int)]),
("a", (1, 1)), ("b", (2, 2)), ("c", (3, 3)))
}
test("select 2, primitive and class") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(
expr("_1").as[String],
expr("named_struct('a', _1, 'b', _2)").as[ClassData]),
("a", ClassData("a", 1)), ("b", ClassData("b", 2)), ("c", ClassData("c", 3)))
}
test("select 2, primitive and class, fields reordered") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(
expr("_1").as[String],
expr("named_struct('b', _2, 'a', _1)").as[ClassData]),
("a", ClassData("a", 1)), ("b", ClassData("b", 2)), ("c", ClassData("c", 3)))
}
test("REGEX column specification") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
var e = intercept[AnalysisException] {
ds.select(expr("`(_1)?+.+`").as[Int])
}.getMessage
assert(e.contains("cannot resolve '`(_1)?+.+`'"))
e = intercept[AnalysisException] {
ds.select(expr("`(_1|_2)`").as[Int])
}.getMessage
assert(e.contains("cannot resolve '`(_1|_2)`'"))
e = intercept[AnalysisException] {
ds.select(ds("`(_1)?+.+`"))
}.getMessage
assert(e.contains("Cannot resolve column name \\"`(_1)?+.+`\\""))
e = intercept[AnalysisException] {
ds.select(ds("`(_1|_2)`"))
}.getMessage
assert(e.contains("Cannot resolve column name \\"`(_1|_2)`\\""))
}
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "true") {
checkDataset(
ds.select(ds.col("_2")).as[Int],
1, 2, 3)
checkDataset(
ds.select(ds.colRegex("`(_1)?+.+`")).as[Int],
1, 2, 3)
checkDataset(
ds.select(ds("`(_1|_2)`"))
.select(expr("named_struct('a', _1, 'b', _2)").as[ClassData]),
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
checkDataset(
ds.alias("g")
.select(ds("g.`(_1|_2)`"))
.select(expr("named_struct('a', _1, 'b', _2)").as[ClassData]),
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
checkDataset(
ds.select(ds("`(_1)?+.+`"))
.select(expr("_2").as[Int]),
1, 2, 3)
checkDataset(
ds.alias("g")
.select(ds("g.`(_1)?+.+`"))
.select(expr("_2").as[Int]),
1, 2, 3)
checkDataset(
ds.select(expr("`(_1)?+.+`").as[Int]),
1, 2, 3)
val m = ds.select(expr("`(_1|_2)`"))
checkDataset(
ds.select(expr("`(_1|_2)`"))
.select(expr("named_struct('a', _1, 'b', _2)").as[ClassData]),
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
checkDataset(
ds.alias("g")
.select(expr("g.`(_1)?+.+`").as[Int]),
1, 2, 3)
checkDataset(
ds.alias("g")
.select(expr("g.`(_1|_2)`"))
.select(expr("named_struct('a', _1, 'b', _2)").as[ClassData]),
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
}
}
test("filter") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.filter(_._1 == "b"),
("b", 2))
}
test("filter and then select") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.filter(_._1 == "b").select(expr("_1").as[String]),
"b")
}
test("SPARK-15632: typed filter should preserve the underlying logical schema") {
val ds = spark.range(10)
val ds2 = ds.filter(_ > 3)
assert(ds.schema.equals(ds2.schema))
}
test("foreach") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.longAccumulator
ds.foreach(v => acc.add(v._2))
assert(acc.value == 6)
}
test("foreachPartition") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.longAccumulator
ds.foreachPartition((it: Iterator[(String, Int)]) => it.foreach(v => acc.add(v._2)))
assert(acc.value == 6)
}
test("reduce") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == (("sum", 6)))
}
test("joinWith, flat schema") {
val ds1 = Seq(1, 2, 3).toDS().as("a")
val ds2 = Seq(1, 2).toDS().as("b")
checkDataset(
ds1.joinWith(ds2, $"a.value" === $"b.value", "inner"),
(1, 1), (2, 2))
}
test("joinWith tuple with primitive, expression") {
val ds1 = Seq(1, 1, 2).toDS()
val ds2 = Seq(("a", 1), ("b", 2)).toDS()
checkDataset(
ds1.joinWith(ds2, $"value" === $"_2"),
(1, ("a", 1)), (1, ("a", 1)), (2, ("b", 2)))
}
test("joinWith class with primitive, toDF") {
val ds1 = Seq(1, 1, 2).toDS()
val ds2 = Seq(ClassData("a", 1), ClassData("b", 2)).toDS()
checkAnswer(
ds1.joinWith(ds2, $"value" === $"b").toDF().select($"_1", $"_2.a", $"_2.b"),
Row(1, "a", 1) :: Row(1, "a", 1) :: Row(2, "b", 2) :: Nil)
}
test("multi-level joinWith") {
val ds1 = Seq(("a", 1), ("b", 2)).toDS().as("a")
val ds2 = Seq(("a", 1), ("b", 2)).toDS().as("b")
val ds3 = Seq(("a", 1), ("b", 2)).toDS().as("c")
checkDataset(
ds1.joinWith(ds2, $"a._2" === $"b._2").as("ab").joinWith(ds3, $"ab._1._2" === $"c._2"),
((("a", 1), ("a", 1)), ("a", 1)),
((("b", 2), ("b", 2)), ("b", 2)))
}
test("joinWith join types") {
val ds1 = Seq(1, 2, 3).toDS().as("a")
val ds2 = Seq(1, 2).toDS().as("b")
val e1 = intercept[AnalysisException] {
ds1.joinWith(ds2, $"a.value" === $"b.value", "left_semi")
}.getMessage
assert(e1.contains("Invalid join type in joinWith: " + LeftSemi.sql))
val e2 = intercept[AnalysisException] {
ds1.joinWith(ds2, $"a.value" === $"b.value", "left_anti")
}.getMessage
assert(e2.contains("Invalid join type in joinWith: " + LeftAnti.sql))
}
test("groupBy function, keys") {
val ds = Seq(("a", 1), ("b", 1)).toDS()
val grouped = ds.groupByKey(v => (1, v._2))
checkDatasetUnorderly(
grouped.keys,
(1, 1))
}
test("groupBy function, map") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupByKey(v => (v._1, "word"))
val agged = grouped.mapGroups { case (g, iter) => (g._1, iter.map(_._2).sum) }
checkDatasetUnorderly(
agged,
("a", 30), ("b", 3), ("c", 1))
}
test("groupBy function, flatMap") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupByKey(v => (v._1, "word"))
val agged = grouped.flatMapGroups { case (g, iter) =>
Iterator(g._1, iter.map(_._2).sum.toString)
}
checkDatasetUnorderly(
agged,
"a", "30", "b", "3", "c", "1")
}
test("groupBy function, mapValues, flatMap") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val keyValue = ds.groupByKey(_._1).mapValues(_._2)
val agged = keyValue.mapGroups { case (g, iter) => (g, iter.sum) }
checkDataset(agged, ("a", 30), ("b", 3), ("c", 1))
val keyValue1 = ds.groupByKey(t => (t._1, "key")).mapValues(t => (t._2, "value"))
val agged1 = keyValue1.mapGroups { case (g, iter) => (g._1, iter.map(_._1).sum) }
checkDataset(agged, ("a", 30), ("b", 3), ("c", 1))
}
test("groupBy function, reduce") {
val ds = Seq("abc", "xyz", "hello").toDS()
val agged = ds.groupByKey(_.length).reduceGroups(_ + _)
checkDatasetUnorderly(
agged,
3 -> "abcxyz", 5 -> "hello")
}
test("groupBy single field class, count") {
val ds = Seq("abc", "xyz", "hello").toDS()
val count = ds.groupByKey(s => Tuple1(s.length)).count()
checkDataset(
count,
(Tuple1(3), 2L), (Tuple1(5), 1L)
)
}
test("typed aggregation: expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDatasetUnorderly(
ds.groupByKey(_._1).agg(sum("_2").as[Long]),
("a", 30L), ("b", 3L), ("c", 1L))
}
test("typed aggregation: expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDatasetUnorderly(
ds.groupByKey(_._1).agg(sum("_2").as[Long], sum($"_2" + 1).as[Long]),
("a", 30L, 32L), ("b", 3L, 5L), ("c", 1L, 2L))
}
test("typed aggregation: expr, expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDatasetUnorderly(
ds.groupByKey(_._1).agg(sum("_2").as[Long], sum($"_2" + 1).as[Long], count("*")),
("a", 30L, 32L, 2L), ("b", 3L, 5L, 2L), ("c", 1L, 2L, 1L))
}
test("typed aggregation: expr, expr, expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDatasetUnorderly(
ds.groupByKey(_._1).agg(
sum("_2").as[Long],
sum($"_2" + 1).as[Long],
count("*").as[Long],
avg("_2").as[Double]),
("a", 30L, 32L, 2L, 15.0), ("b", 3L, 5L, 2L, 1.5), ("c", 1L, 2L, 1L, 1.0))
}
test("cogroup") {
val ds1 = Seq(1 -> "a", 3 -> "abc", 5 -> "hello", 3 -> "foo").toDS()
val ds2 = Seq(2 -> "q", 3 -> "w", 5 -> "e", 5 -> "r").toDS()
val cogrouped = ds1.groupByKey(_._1).cogroup(ds2.groupByKey(_._1)) { case (key, data1, data2) =>
Iterator(key -> (data1.map(_._2).mkString + "#" + data2.map(_._2).mkString))
}
checkDatasetUnorderly(
cogrouped,
1 -> "a#", 2 -> "#q", 3 -> "abcfoo#w", 5 -> "hello#er")
}
test("cogroup with complex data") {
val ds1 = Seq(1 -> ClassData("a", 1), 2 -> ClassData("b", 2)).toDS()
val ds2 = Seq(2 -> ClassData("c", 3), 3 -> ClassData("d", 4)).toDS()
val cogrouped = ds1.groupByKey(_._1).cogroup(ds2.groupByKey(_._1)) { case (key, data1, data2) =>
Iterator(key -> (data1.map(_._2.a).mkString + data2.map(_._2.a).mkString))
}
checkDatasetUnorderly(
cogrouped,
1 -> "a", 2 -> "bc", 3 -> "d")
}
test("sample with replacement") {
val n = 100
val data = sparkContext.parallelize(1 to n, 2).toDS()
checkDataset(
data.sample(withReplacement = true, 0.05, seed = 13),
5, 10, 52, 73)
}
test("sample without replacement") {
val n = 100
val data = sparkContext.parallelize(1 to n, 2).toDS()
checkDataset(
data.sample(withReplacement = false, 0.05, seed = 13),
3, 17, 27, 58, 62)
}
test("sample fraction should not be negative with replacement") {
val data = sparkContext.parallelize(1 to 2, 1).toDS()
val errMsg = intercept[IllegalArgumentException] {
data.sample(withReplacement = true, -0.1, 0)
}.getMessage
assert(errMsg.contains("Sampling fraction (-0.1) must be nonnegative with replacement"))
// Sampling fraction can be greater than 1 with replacement.
checkDataset(
data.sample(withReplacement = true, 1.05, seed = 13),
1, 2)
}
test("sample fraction should be on interval [0, 1] without replacement") {
val data = sparkContext.parallelize(1 to 2, 1).toDS()
val errMsg1 = intercept[IllegalArgumentException] {
data.sample(withReplacement = false, -0.1, 0)
}.getMessage()
assert(errMsg1.contains(
"Sampling fraction (-0.1) must be on interval [0, 1] without replacement"))
val errMsg2 = intercept[IllegalArgumentException] {
data.sample(withReplacement = false, 1.1, 0)
}.getMessage()
assert(errMsg2.contains(
"Sampling fraction (1.1) must be on interval [0, 1] without replacement"))
}
test("SPARK-16686: Dataset.sample with seed results shouldn't depend on downstream usage") {
val simpleUdf = udf((n: Int) => {
require(n != 1, "simpleUdf shouldn't see id=1!")
1
})
val df = Seq(
(0, "string0"),
(1, "string1"),
(2, "string2"),
(3, "string3"),
(4, "string4"),
(5, "string5"),
(6, "string6"),
(7, "string7"),
(8, "string8"),
(9, "string9")
).toDF("id", "stringData")
val sampleDF = df.sample(false, 0.7, 50)
// After sampling, sampleDF doesn't contain id=1.
assert(!sampleDF.select("id").collect.contains(1))
// simpleUdf should not encounter id=1.
checkAnswer(sampleDF.select(simpleUdf($"id")), List.fill(sampleDF.count.toInt)(Row(1)))
}
test("SPARK-11436: we should rebind right encoder when join 2 datasets") {
val ds1 = Seq("1", "2").toDS().as("a")
val ds2 = Seq(2, 3).toDS().as("b")
val joined = ds1.joinWith(ds2, $"a.value" === $"b.value")
checkDataset(joined, ("2", 2))
}
test("self join") {
val ds = Seq("1", "2").toDS().as("a")
val joined = ds.joinWith(ds, lit(true), "cross")
checkDataset(joined, ("1", "1"), ("1", "2"), ("2", "1"), ("2", "2"))
}
test("toString") {
val ds = Seq((1, 2)).toDS()
assert(ds.toString == "[_1: int, _2: int]")
}
test("Kryo encoder") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val ds = Seq(KryoData(1), KryoData(2)).toDS()
assert(ds.groupByKey(p => p).count().collect().toSet ==
Set((KryoData(1), 1L), (KryoData(2), 1L)))
}
test("Kryo encoder self join") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val ds = Seq(KryoData(1), KryoData(2)).toDS()
assert(ds.joinWith(ds, lit(true), "cross").collect().toSet ==
Set(
(KryoData(1), KryoData(1)),
(KryoData(1), KryoData(2)),
(KryoData(2), KryoData(1)),
(KryoData(2), KryoData(2))))
}
test("Kryo encoder: check the schema mismatch when converting DataFrame to Dataset") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val df = Seq((1)).toDF("a")
val e = intercept[AnalysisException] {
df.as[KryoData]
}.message
assert(e.contains("cannot cast int to binary"))
}
test("Java encoder") {
implicit val kryoEncoder = Encoders.javaSerialization[JavaData]
val ds = Seq(JavaData(1), JavaData(2)).toDS()
assert(ds.groupByKey(p => p).count().collect().toSet ==
Set((JavaData(1), 1L), (JavaData(2), 1L)))
}
test("Java encoder self join") {
implicit val kryoEncoder = Encoders.javaSerialization[JavaData]
val ds = Seq(JavaData(1), JavaData(2)).toDS()
assert(ds.joinWith(ds, lit(true), "cross").collect().toSet ==
Set(
(JavaData(1), JavaData(1)),
(JavaData(1), JavaData(2)),
(JavaData(2), JavaData(1)),
(JavaData(2), JavaData(2))))
}
test("SPARK-14696: implicit encoders for boxed types") {
assert(spark.range(1).map { i => i : java.lang.Long }.head == 0L)
}
test("SPARK-11894: Incorrect results are returned when using null") {
val nullInt = null.asInstanceOf[java.lang.Integer]
val ds1 = Seq((nullInt, "1"), (new java.lang.Integer(22), "2")).toDS()
val ds2 = Seq((nullInt, "1"), (new java.lang.Integer(22), "2")).toDS()
checkDataset(
ds1.joinWith(ds2, lit(true), "cross"),
((nullInt, "1"), (nullInt, "1")),
((nullInt, "1"), (new java.lang.Integer(22), "2")),
((new java.lang.Integer(22), "2"), (nullInt, "1")),
((new java.lang.Integer(22), "2"), (new java.lang.Integer(22), "2")))
}
test("change encoder with compatible schema") {
val ds = Seq(2 -> 2.toByte, 3 -> 3.toByte).toDF("a", "b").as[ClassData]
assert(ds.collect().toSeq == Seq(ClassData("2", 2), ClassData("3", 3)))
}
test("verify mismatching field names fail with a good error") {
val ds = Seq(ClassData("a", 1)).toDS()
val e = intercept[AnalysisException] {
ds.as[ClassData2]
}
assert(e.getMessage.contains("cannot resolve '`c`' given input columns: [a, b]"), e.getMessage)
}
test("runtime nullability check") {
val schema = StructType(Seq(
StructField("f", StructType(Seq(
StructField("a", StringType, nullable = true),
StructField("b", IntegerType, nullable = true)
)), nullable = true)
))
def buildDataset(rows: Row*): Dataset[NestedStruct] = {
val rowRDD = spark.sparkContext.parallelize(rows)
spark.createDataFrame(rowRDD, schema).as[NestedStruct]
}
checkDataset(
buildDataset(Row(Row("hello", 1))),
NestedStruct(ClassData("hello", 1))
)
// Shouldn't throw runtime exception when parent object (`ClassData`) is null
assert(buildDataset(Row(null)).collect() === Array(NestedStruct(null)))
val message = intercept[RuntimeException] {
buildDataset(Row(Row("hello", null))).collect()
}.getMessage
assert(message.contains("Null value appeared in non-nullable field"))
}
test("SPARK-12478: top level null field") {
val ds0 = Seq(NestedStruct(null)).toDS()
checkDataset(ds0, NestedStruct(null))
checkAnswer(ds0.toDF(), Row(null))
val ds1 = Seq(DeepNestedStruct(NestedStruct(null))).toDS()
checkDataset(ds1, DeepNestedStruct(NestedStruct(null)))
checkAnswer(ds1.toDF(), Row(Row(null)))
}
test("support inner class in Dataset") {
val outer = new OuterClass
OuterScopes.addOuterScope(outer)
val ds = Seq(outer.InnerClass("1"), outer.InnerClass("2")).toDS()
checkDataset(ds.map(_.a), "1", "2")
}
test("grouping key and grouped value has field with same name") {
val ds = Seq(ClassData("a", 1), ClassData("a", 2)).toDS()
val agged = ds.groupByKey(d => ClassNullableData(d.a, null)).mapGroups {
case (key, values) => key.a + values.map(_.b).sum
}
checkDataset(agged, "a3")
}
test("cogroup's left and right side has field with same name") {
val left = Seq(ClassData("a", 1), ClassData("b", 2)).toDS()
val right = Seq(ClassNullableData("a", 3), ClassNullableData("b", 4)).toDS()
val cogrouped = left.groupByKey(_.a).cogroup(right.groupByKey(_.a)) {
case (key, lData, rData) => Iterator(key + lData.map(_.b).sum + rData.map(_.b.toInt).sum)
}
checkDataset(cogrouped, "a13", "b24")
}
test("give nice error message when the real number of fields doesn't match encoder schema") {
val ds = Seq(ClassData("a", 1), ClassData("b", 2)).toDS()
val message = intercept[AnalysisException] {
ds.as[(String, Int, Long)]
}.message
assert(message ==
"Try to map struct<a:string,b:int> to Tuple3, " +
"but failed as the number of fields does not line up.")
val message2 = intercept[AnalysisException] {
ds.as[Tuple1[String]]
}.message
assert(message2 ==
"Try to map struct<a:string,b:int> to Tuple1, " +
"but failed as the number of fields does not line up.")
}
test("SPARK-13440: Resolving option fields") {
val df = Seq(1, 2, 3).toDS()
val ds = df.as[Option[Int]]
checkDataset(
ds.filter(_ => true),
Some(1), Some(2), Some(3))
}
test("SPARK-13540 Dataset of nested class defined in Scala object") {
checkDataset(
Seq(OuterObject.InnerClass("foo")).toDS(),
OuterObject.InnerClass("foo"))
}
test("SPARK-14000: case class with tuple type field") {
checkDataset(
Seq(TupleClass((1, "a"))).toDS(),
TupleClass((1, "a"))
)
}
test("isStreaming returns false for static Dataset") {
val data = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
assert(!data.isStreaming, "static Dataset returned true for 'isStreaming'.")
}
test("isStreaming returns true for streaming Dataset") {
val data = MemoryStream[Int].toDS()
assert(data.isStreaming, "streaming Dataset returned false for 'isStreaming'.")
}
test("isStreaming returns true after static and streaming Dataset join") {
val static = Seq(("a", 1), ("b", 2), ("c", 3)).toDF("a", "b")
val streaming = MemoryStream[Int].toDS().toDF("b")
val df = streaming.join(static, Seq("b"))
assert(df.isStreaming, "streaming Dataset returned false for 'isStreaming'.")
}
test("SPARK-14554: Dataset.map may generate wrong java code for wide table") {
val wideDF = spark.range(10).select(Seq.tabulate(1000) {i => ('id + i).as(s"c$i")} : _*)
// Make sure the generated code for this plan can compile and execute.
checkDataset(wideDF.map(_.getLong(0)), 0L until 10 : _*)
}
test("SPARK-14838: estimating sizeInBytes in operators with ObjectProducer shouldn't fail") {
val dataset = Seq(
(0, 3, 54f),
(0, 4, 44f),
(0, 5, 42f),
(1, 3, 39f),
(1, 5, 33f),
(1, 4, 26f),
(2, 3, 51f),
(2, 5, 45f),
(2, 4, 30f)
).toDF("user", "item", "rating")
val actual = dataset
.select("user", "item")
.as[(Int, Int)]
.groupByKey(_._1)
.mapGroups { case (src, ids) => (src, ids.map(_._2).toArray) }
.toDF("id", "actual")
dataset.join(actual, dataset("user") === actual("id")).collect()
}
test("SPARK-15097: implicits on dataset's spark can be imported") {
val dataset = Seq(1, 2, 3).toDS()
checkDataset(DatasetTransform.addOne(dataset), 2, 3, 4)
}
test("dataset.rdd with generic case class") {
val ds = Seq(Generic(1, 1.0), Generic(2, 2.0)).toDS()
val ds2 = ds.map(g => Generic(g.id, g.value))
assert(ds.rdd.map(r => r.id).count === 2)
assert(ds2.rdd.map(r => r.id).count === 2)
val ds3 = ds.map(g => new java.lang.Long(g.id))
assert(ds3.rdd.map(r => r).count === 2)
}
test("runtime null check for RowEncoder") {
val schema = new StructType().add("i", IntegerType, nullable = false)
val df = spark.range(10).map(l => {
if (l % 5 == 0) {
Row(null)
} else {
Row(l)
}
})(RowEncoder(schema))
val message = intercept[Exception] {
df.collect()
}.getMessage
assert(message.contains("The 0th field 'i' of input row cannot be null"))
}
test("row nullability mismatch") {
val schema = new StructType().add("a", StringType, true).add("b", StringType, false)
val rdd = spark.sparkContext.parallelize(Row(null, "123") :: Row("234", null) :: Nil)
val message = intercept[Exception] {
spark.createDataFrame(rdd, schema).collect()
}.getMessage
assert(message.contains("The 1th field 'b' of input row cannot be null"))
}
test("createTempView") {
val dataset = Seq(1, 2, 3).toDS()
dataset.createOrReplaceTempView("tempView")
// Overrides the existing temporary view with same name
// No exception should be thrown here.
dataset.createOrReplaceTempView("tempView")
// Throws AnalysisException if temp view with same name already exists
val e = intercept[AnalysisException](
dataset.createTempView("tempView"))
intercept[AnalysisException](dataset.createTempView("tempView"))
assert(e.message.contains("already exists"))
dataset.sparkSession.catalog.dropTempView("tempView")
}
test("SPARK-15381: physical object operator should define `reference` correctly") {
val df = Seq(1 -> 2).toDF("a", "b")
checkAnswer(df.map(row => row)(RowEncoder(df.schema)).select("b", "a"), Row(2, 1))
}
private def checkShowString[T](ds: Dataset[T], expected: String): Unit = {
val numRows = expected.split("\\n").length - 4
val actual = ds.showString(numRows, truncate = 20)
if (expected != actual) {
fail(
"Dataset.showString() gives wrong result:\\n\\n" + sideBySide(
"== Expected ==\\n" + expected,
"== Actual ==\\n" + actual
).mkString("\\n")
)
}
}
test("SPARK-15550 Dataset.show() should show contents of the underlying logical plan") {
val df = Seq((1, "foo", "extra"), (2, "bar", "extra")).toDF("b", "a", "c")
val ds = df.as[ClassData]
val expected =
"""+---+---+-----+
|| b| a| c|
|+---+---+-----+
|| 1|foo|extra|
|| 2|bar|extra|
|+---+---+-----+
|""".stripMargin
checkShowString(ds, expected)
}
test("SPARK-15550 Dataset.show() should show inner nested products as rows") {
val ds = Seq(
NestedStruct(ClassData("foo", 1)),
NestedStruct(ClassData("bar", 2))
).toDS()
val expected =
"""+--------+
|| f|
|+--------+
||[foo, 1]|
||[bar, 2]|
|+--------+
|""".stripMargin
checkShowString(ds, expected)
}
test(
"SPARK-15112: EmbedDeserializerInFilter should not optimize plan fragment that changes schema"
) {
val ds = Seq(1 -> "foo", 2 -> "bar").toDF("b", "a").as[ClassData]
assertResult(Seq(ClassData("foo", 1), ClassData("bar", 2))) {
ds.collect().toSeq
}
assertResult(Seq(ClassData("bar", 2))) {
ds.filter(_.b > 1).collect().toSeq
}
}
test("mapped dataset should resolve duplicated attributes for self join") {
val ds = Seq(1, 2, 3).toDS().map(_ + 1)
val ds1 = ds.as("d1")
val ds2 = ds.as("d2")
checkDatasetUnorderly(ds1.joinWith(ds2, $"d1.value" === $"d2.value"), (2, 2), (3, 3), (4, 4))
checkDatasetUnorderly(ds1.intersect(ds2), 2, 3, 4)
checkDatasetUnorderly(ds1.except(ds1))
}
test("SPARK-15441: Dataset outer join") {
val left = Seq(ClassData("a", 1), ClassData("b", 2)).toDS().as("left")
val right = Seq(ClassData("x", 2), ClassData("y", 3)).toDS().as("right")
val joined = left.joinWith(right, $"left.b" === $"right.b", "left")
val result = joined.collect().toSet
assert(result == Set(ClassData("a", 1) -> null, ClassData("b", 2) -> ClassData("x", 2)))
}
test("better error message when use java reserved keyword as field name") {
val e = intercept[UnsupportedOperationException] {
Seq(InvalidInJava(1)).toDS()
}
assert(e.getMessage.contains(
"`abstract` is a reserved keyword and cannot be used as field name"))
}
test("Dataset should support flat input object to be null") {
checkDataset(Seq("a", null).toDS(), "a", null)
}
test("Dataset should throw RuntimeException if top-level product input object is null") {
val e = intercept[RuntimeException](Seq(ClassData("a", 1), null).toDS())
assert(e.getMessage.contains("Null value appeared in non-nullable field"))
assert(e.getMessage.contains("top level Product input object"))
}
test("dropDuplicates") {
val ds = Seq(("a", 1), ("a", 2), ("b", 1), ("a", 1)).toDS()
checkDataset(
ds.dropDuplicates("_1"),
("a", 1), ("b", 1))
checkDataset(
ds.dropDuplicates("_2"),
("a", 1), ("a", 2))
checkDataset(
ds.dropDuplicates("_1", "_2"),
("a", 1), ("a", 2), ("b", 1))
}
test("dropDuplicates: columns with same column name") {
val ds1 = Seq(("a", 1), ("a", 2), ("b", 1), ("a", 1)).toDS()
val ds2 = Seq(("a", 1), ("a", 2), ("b", 1), ("a", 1)).toDS()
// The dataset joined has two columns of the same name "_2".
val joined = ds1.join(ds2, "_1").select(ds1("_2").as[Int], ds2("_2").as[Int])
checkDataset(
joined.dropDuplicates(),
(1, 2), (1, 1), (2, 1), (2, 2))
}
test("SPARK-16097: Encoders.tuple should handle null object correctly") {
val enc = Encoders.tuple(Encoders.tuple(Encoders.STRING, Encoders.STRING), Encoders.STRING)
val data = Seq((("a", "b"), "c"), (null, "d"))
val ds = spark.createDataset(data)(enc)
checkDataset(ds, (("a", "b"), "c"), (null, "d"))
}
test("SPARK-16995: flat mapping on Dataset containing a column created with lit/expr") {
val df = Seq("1").toDF("a")
import df.sparkSession.implicits._
checkDataset(
df.withColumn("b", lit(0)).as[ClassData]
.groupByKey(_.a).flatMapGroups { case (x, iter) => List[Int]() })
checkDataset(
df.withColumn("b", expr("0")).as[ClassData]
.groupByKey(_.a).flatMapGroups { case (x, iter) => List[Int]() })
}
test("SPARK-18125: Spark generated code causes CompileException") {
val data = Array(
Route("a", "b", 1),
Route("a", "b", 2),
Route("a", "c", 2),
Route("a", "d", 10),
Route("b", "a", 1),
Route("b", "a", 5),
Route("b", "c", 6))
val ds = sparkContext.parallelize(data).toDF.as[Route]
val grped = ds.map(r => GroupedRoutes(r.src, r.dest, Seq(r)))
.groupByKey(r => (r.src, r.dest))
.reduceGroups { (g1: GroupedRoutes, g2: GroupedRoutes) =>
GroupedRoutes(g1.src, g1.dest, g1.routes ++ g2.routes)
}.map(_._2)
val expected = Seq(
GroupedRoutes("a", "d", Seq(Route("a", "d", 10))),
GroupedRoutes("b", "c", Seq(Route("b", "c", 6))),
GroupedRoutes("a", "b", Seq(Route("a", "b", 1), Route("a", "b", 2))),
GroupedRoutes("b", "a", Seq(Route("b", "a", 1), Route("b", "a", 5))),
GroupedRoutes("a", "c", Seq(Route("a", "c", 2)))
)
implicit def ordering[GroupedRoutes]: Ordering[GroupedRoutes] = new Ordering[GroupedRoutes] {
override def compare(x: GroupedRoutes, y: GroupedRoutes): Int = {
x.toString.compareTo(y.toString)
}
}
checkDatasetUnorderly(grped, expected: _*)
}
test("SPARK-18189: Fix serialization issue in KeyValueGroupedDataset") {
val resultValue = 12345
val keyValueGrouped = Seq((1, 2), (3, 4)).toDS().groupByKey(_._1)
val mapGroups = keyValueGrouped.mapGroups((k, v) => (k, 1))
val broadcasted = spark.sparkContext.broadcast(resultValue)
// Using broadcast triggers serialization issue in KeyValueGroupedDataset
val dataset = mapGroups.map(_ => broadcasted.value)
assert(dataset.collect() sameElements Array(resultValue, resultValue))
}
test("SPARK-18284: Serializer should have correct nullable value") {
val df1 = Seq(1, 2, 3, 4).toDF
assert(df1.schema(0).nullable == false)
val df2 = Seq(Integer.valueOf(1), Integer.valueOf(2)).toDF
assert(df2.schema(0).nullable == true)
val df3 = Seq(Seq(1, 2), Seq(3, 4)).toDF
assert(df3.schema(0).nullable == true)
assert(df3.schema(0).dataType.asInstanceOf[ArrayType].containsNull == false)
val df4 = Seq(Seq("a", "b"), Seq("c", "d")).toDF
assert(df4.schema(0).nullable == true)
assert(df4.schema(0).dataType.asInstanceOf[ArrayType].containsNull == true)
val df5 = Seq((0, 1.0), (2, 2.0)).toDF("id", "v")
assert(df5.schema(0).nullable == false)
assert(df5.schema(1).nullable == false)
val df6 = Seq((0, 1.0, "a"), (2, 2.0, "b")).toDF("id", "v1", "v2")
assert(df6.schema(0).nullable == false)
assert(df6.schema(1).nullable == false)
assert(df6.schema(2).nullable == true)
val df7 = (Tuple1(Array(1, 2, 3)) :: Nil).toDF("a")
assert(df7.schema(0).nullable == true)
assert(df7.schema(0).dataType.asInstanceOf[ArrayType].containsNull == false)
val df8 = (Tuple1(Array((null: Integer), (null: Integer))) :: Nil).toDF("a")
assert(df8.schema(0).nullable == true)
assert(df8.schema(0).dataType.asInstanceOf[ArrayType].containsNull == true)
val df9 = (Tuple1(Map(2 -> 3)) :: Nil).toDF("m")
assert(df9.schema(0).nullable == true)
assert(df9.schema(0).dataType.asInstanceOf[MapType].valueContainsNull == false)
val df10 = (Tuple1(Map(1 -> (null: Integer))) :: Nil).toDF("m")
assert(df10.schema(0).nullable == true)
assert(df10.schema(0).dataType.asInstanceOf[MapType].valueContainsNull == true)
val df11 = Seq(TestDataPoint(1, 2.2, "a", null),
TestDataPoint(3, 4.4, "null", (TestDataPoint2(33, "b")))).toDF
assert(df11.schema(0).nullable == false)
assert(df11.schema(1).nullable == false)
assert(df11.schema(2).nullable == true)
assert(df11.schema(3).nullable == true)
assert(df11.schema(3).dataType.asInstanceOf[StructType].fields(0).nullable == false)
assert(df11.schema(3).dataType.asInstanceOf[StructType].fields(1).nullable == true)
}
Seq(true, false).foreach { eager =>
Seq(true, false).foreach { reliable =>
def testCheckpointing(testName: String)(f: => Unit): Unit = {
test(s"Dataset.checkpoint() - $testName (eager = $eager, reliable = $reliable)") {
if (reliable) {
withTempDir { dir =>
val originalCheckpointDir = spark.sparkContext.checkpointDir
try {
spark.sparkContext.setCheckpointDir(dir.getCanonicalPath)
f
} finally {
// Since the original checkpointDir can be None, we need
// to set the variable directly.
spark.sparkContext.checkpointDir = originalCheckpointDir
}
}
} else {
// Local checkpoints dont require checkpoint_dir
f
}
}
}
testCheckpointing("basic") {
val ds = spark.range(10).repartition('id % 2).filter('id > 5).orderBy('id.desc)
val cp = if (reliable) ds.checkpoint(eager) else ds.localCheckpoint(eager)
val logicalRDD = cp.logicalPlan match {
case plan: LogicalRDD => plan
case _ =>
val treeString = cp.logicalPlan.treeString(verbose = true)
fail(s"Expecting a LogicalRDD, but got\\n$treeString")
}
val dsPhysicalPlan = ds.queryExecution.executedPlan
val cpPhysicalPlan = cp.queryExecution.executedPlan
assertResult(dsPhysicalPlan.outputPartitioning) {
logicalRDD.outputPartitioning
}
assertResult(dsPhysicalPlan.outputOrdering) {
logicalRDD.outputOrdering
}
assertResult(dsPhysicalPlan.outputPartitioning) {
cpPhysicalPlan.outputPartitioning
}
assertResult(dsPhysicalPlan.outputOrdering) {
cpPhysicalPlan.outputOrdering
}
// For a lazy checkpoint() call, the first check also materializes the checkpoint.
checkDataset(cp, (9L to 6L by -1L).map(java.lang.Long.valueOf): _*)
// Reads back from checkpointed data and check again.
checkDataset(cp, (9L to 6L by -1L).map(java.lang.Long.valueOf): _*)
}
testCheckpointing("should preserve partitioning information") {
val ds = spark.range(10).repartition('id % 2)
val cp = if (reliable) ds.checkpoint(eager) else ds.localCheckpoint(eager)
val agg = cp.groupBy('id % 2).agg(count('id))
agg.queryExecution.executedPlan.collectFirst {
case ShuffleExchangeExec(_, _: RDDScanExec, _) =>
case BroadcastExchangeExec(_, _: RDDScanExec) =>
}.foreach { _ =>
fail(
"No Exchange should be inserted above RDDScanExec since the checkpointed Dataset " +
"preserves partitioning information:\\n\\n" + agg.queryExecution
)
}
checkAnswer(agg, ds.groupBy('id % 2).agg(count('id)))
}
}
}
test("identity map for primitive arrays") {
val arrayByte = Array(1.toByte, 2.toByte, 3.toByte)
val arrayInt = Array(1, 2, 3)
val arrayLong = Array(1.toLong, 2.toLong, 3.toLong)
val arrayDouble = Array(1.1, 2.2, 3.3)
val arrayString = Array("a", "b", "c")
val dsByte = sparkContext.parallelize(Seq(arrayByte), 1).toDS.map(e => e)
val dsInt = sparkContext.parallelize(Seq(arrayInt), 1).toDS.map(e => e)
val dsLong = sparkContext.parallelize(Seq(arrayLong), 1).toDS.map(e => e)
val dsDouble = sparkContext.parallelize(Seq(arrayDouble), 1).toDS.map(e => e)
val dsString = sparkContext.parallelize(Seq(arrayString), 1).toDS.map(e => e)
checkDataset(dsByte, arrayByte)
checkDataset(dsInt, arrayInt)
checkDataset(dsLong, arrayLong)
checkDataset(dsDouble, arrayDouble)
checkDataset(dsString, arrayString)
}
test("SPARK-18251: the type of Dataset can't be Option of Product type") {
checkDataset(Seq(Some(1), None).toDS(), Some(1), None)
val e = intercept[UnsupportedOperationException] {
Seq(Some(1 -> "a"), None).toDS()
}
assert(e.getMessage.contains("Cannot create encoder for Option of Product type"))
}
test ("SPARK-17460: the sizeInBytes in Statistics shouldn't overflow to a negative number") {
// Since the sizeInBytes in Statistics could exceed the limit of an Int, we should use BigInt
// instead of Int for avoiding possible overflow.
val ds = (0 to 10000).map( i =>
(i, Seq((i, Seq((i, "This is really not that long of a string")))))).toDS()
val sizeInBytes = ds.logicalPlan.stats.sizeInBytes
// sizeInBytes is 2404280404, before the fix, it overflows to a negative number
assert(sizeInBytes > 0)
}
test("SPARK-18717: code generation works for both scala.collection.Map" +
" and scala.collection.imutable.Map") {
val ds = Seq(WithImmutableMap("hi", Map(42L -> "foo"))).toDS
checkDataset(ds.map(t => t), WithImmutableMap("hi", Map(42L -> "foo")))
val ds2 = Seq(WithMap("hi", Map(42L -> "foo"))).toDS
checkDataset(ds2.map(t => t), WithMap("hi", Map(42L -> "foo")))
}
test("SPARK-18746: add implicit encoder for BigDecimal, date, timestamp") {
// For this implicit encoder, 18 is the default scale
assert(spark.range(1).map { x => new java.math.BigDecimal(1) }.head ==
new java.math.BigDecimal(1).setScale(18))
assert(spark.range(1).map { x => scala.math.BigDecimal(1, 18) }.head ==
scala.math.BigDecimal(1, 18))
assert(spark.range(1).map { x => java.sql.Date.valueOf("2016-12-12") }.head ==
java.sql.Date.valueOf("2016-12-12"))
assert(spark.range(1).map { x => new java.sql.Timestamp(100000) }.head ==
new java.sql.Timestamp(100000))
}
test("SPARK-19896: cannot have circular references in in case class") {
val errMsg1 = intercept[UnsupportedOperationException] {
Seq(CircularReferenceClassA(null)).toDS
}
assert(errMsg1.getMessage.startsWith("cannot have circular references in class, but got the " +
"circular reference of class"))
val errMsg2 = intercept[UnsupportedOperationException] {
Seq(CircularReferenceClassC(null)).toDS
}
assert(errMsg2.getMessage.startsWith("cannot have circular references in class, but got the " +
"circular reference of class"))
val errMsg3 = intercept[UnsupportedOperationException] {
Seq(CircularReferenceClassD(null)).toDS
}
assert(errMsg3.getMessage.startsWith("cannot have circular references in class, but got the " +
"circular reference of class"))
}
test("SPARK-20125: option of map") {
val ds = Seq(WithMapInOption(Some(Map(1 -> 1)))).toDS()
checkDataset(ds, WithMapInOption(Some(Map(1 -> 1))))
}
test("SPARK-20399: do not unescaped regex pattern when ESCAPED_STRING_LITERALS is enabled") {
withSQLConf(SQLConf.ESCAPED_STRING_LITERALS.key -> "true") {
val data = Seq("\\u0020\\u0021\\u0023", "abc")
val df = data.toDF()
val rlike1 = df.filter("value rlike '^\\\\x20[\\\\x20-\\\\x23]+$'")
val rlike2 = df.filter($"value".rlike("^\\\\x20[\\\\x20-\\\\x23]+$"))
val rlike3 = df.filter("value rlike '^\\\\\\\\x20[\\\\\\\\x20-\\\\\\\\x23]+$'")
checkAnswer(rlike1, rlike2)
assert(rlike3.count() == 0)
}
}
test("SPARK-21538: Attribute resolution inconsistency in Dataset API") {
val df = spark.range(3).withColumnRenamed("id", "x")
val expected = Row(0) :: Row(1) :: Row (2) :: Nil
checkAnswer(df.sort("id"), expected)
checkAnswer(df.sort(col("id")), expected)
checkAnswer(df.sort($"id"), expected)
checkAnswer(df.sort('id), expected)
checkAnswer(df.orderBy("id"), expected)
checkAnswer(df.orderBy(col("id")), expected)
checkAnswer(df.orderBy($"id"), expected)
checkAnswer(df.orderBy('id), expected)
}
test("SPARK-21567: Dataset should work with type alias") {
checkDataset(
Seq(1).toDS().map(_ => ("", TestForTypeAlias.tupleTypeAlias)),
("", (1, 1)))
checkDataset(
Seq(1).toDS().map(_ => ("", TestForTypeAlias.nestedTupleTypeAlias)),
("", ((1, 1), 2)))
checkDataset(
Seq(1).toDS().map(_ => ("", TestForTypeAlias.seqOfTupleTypeAlias)),
("", Seq((1, 1), (2, 2))))
}
test("Check RelationalGroupedDataset toString: Single data") {
val kvDataset = (1 to 3).toDF("id").groupBy("id")
val expected = "RelationalGroupedDataset: [" +
"grouping expressions: [id: int], value: [id: int], type: GroupBy]"
val actual = kvDataset.toString
assert(expected === actual)
}
test("Check RelationalGroupedDataset toString: over length schema ") {
val kvDataset = (1 to 3).map( x => (x, x.toString, x.toLong))
.toDF("id", "val1", "val2").groupBy("id")
val expected = "RelationalGroupedDataset:" +
" [grouping expressions: [id: int]," +
" value: [id: int, val1: string ... 1 more field]," +
" type: GroupBy]"
val actual = kvDataset.toString
assert(expected === actual)
}
test("Check KeyValueGroupedDataset toString: Single data") {
val kvDataset = (1 to 3).toDF("id").as[SingleData].groupByKey(identity)
val expected = "KeyValueGroupedDataset: [key: [id: int], value: [id: int]]"
val actual = kvDataset.toString
assert(expected === actual)
}
test("Check KeyValueGroupedDataset toString: Unnamed KV-pair") {
val kvDataset = (1 to 3).map(x => (x, x.toString))
.toDF("id", "val1").as[DoubleData].groupByKey(x => (x.id, x.val1))
val expected = "KeyValueGroupedDataset:" +
" [key: [_1: int, _2: string]," +
" value: [id: int, val1: string]]"
val actual = kvDataset.toString
assert(expected === actual)
}
test("Check KeyValueGroupedDataset toString: Named KV-pair") {
val kvDataset = (1 to 3).map( x => (x, x.toString))
.toDF("id", "val1").as[DoubleData].groupByKey(x => DoubleData(x.id, x.val1))
val expected = "KeyValueGroupedDataset:" +
" [key: [id: int, val1: string]," +
" value: [id: int, val1: string]]"
val actual = kvDataset.toString
assert(expected === actual)
}
test("Check KeyValueGroupedDataset toString: over length schema ") {
val kvDataset = (1 to 3).map( x => (x, x.toString, x.toLong))
.toDF("id", "val1", "val2").as[TripleData].groupByKey(identity)
val expected = "KeyValueGroupedDataset:" +
" [key: [id: int, val1: string ... 1 more field(s)]," +
" value: [id: int, val1: string ... 1 more field(s)]]"
val actual = kvDataset.toString
assert(expected === actual)
}
test("SPARK-22442: Generate correct field names for special characters") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val data = """{"field.1": 1, "field 2": 2}"""
Seq(data).toDF().repartition(1).write.text(path)
val ds = spark.read.json(path).as[SpecialCharClass]
checkDataset(ds, SpecialCharClass("1", "2"))
}
}
test("SPARK-23627: provide isEmpty in DataSet") {
val ds1 = spark.emptyDataset[Int]
val ds2 = Seq(1, 2, 3).toDS()
assert(ds1.isEmpty == true)
assert(ds2.isEmpty == false)
}
test("SPARK-22472: add null check for top-level primitive values") {
// If the primitive values are from Option, we need to do runtime null check.
val ds = Seq(Some(1), None).toDS().as[Int]
intercept[NullPointerException](ds.collect())
val e = intercept[SparkException](ds.map(_ * 2).collect())
assert(e.getCause.isInstanceOf[NullPointerException])
withTempPath { path =>
Seq(new Integer(1), null).toDF("i").write.parquet(path.getCanonicalPath)
// If the primitive values are from files, we need to do runtime null check.
val ds = spark.read.parquet(path.getCanonicalPath).as[Int]
intercept[NullPointerException](ds.collect())
val e = intercept[SparkException](ds.map(_ * 2).collect())
assert(e.getCause.isInstanceOf[NullPointerException])
}
}
test("SPARK-23025: Add support for null type in scala reflection") {
val data = Seq(("a", null))
checkDataset(data.toDS(), data: _*)
}
test("SPARK-23614: Union produces incorrect results when caching is used") {
val cached = spark.createDataset(Seq(TestDataUnion(1, 2, 3), TestDataUnion(4, 5, 6))).cache()
val group1 = cached.groupBy("x").agg(min(col("y")) as "value")
val group2 = cached.groupBy("x").agg(min(col("z")) as "value")
checkAnswer(group1.union(group2), Row(4, 5) :: Row(1, 2) :: Row(4, 6) :: Row(1, 3) :: Nil)
}
test("SPARK-23835: null primitive data type should throw NullPointerException") {
val ds = Seq[(Option[Int], Option[Int])]((Some(1), None)).toDS()
intercept[NullPointerException](ds.as[(Int, Int)].collect())
}
}
case class TestDataUnion(x: Int, y: Int, z: Int)
case class SingleData(id: Int)
case class DoubleData(id: Int, val1: String)
case class TripleData(id: Int, val1: String, val2: Long)
case class WithImmutableMap(id: String, map_test: scala.collection.immutable.Map[Long, String])
case class WithMap(id: String, map_test: scala.collection.Map[Long, String])
case class WithMapInOption(m: Option[scala.collection.Map[Int, Int]])
case class Generic[T](id: T, value: Double)
case class OtherTuple(_1: String, _2: Int)
case class TupleClass(data: (Int, String))
class OuterClass extends Serializable {
case class InnerClass(a: String)
}
object OuterObject {
case class InnerClass(a: String)
}
case class ClassData(a: String, b: Int)
case class ClassData2(c: String, d: Int)
case class ClassNullableData(a: String, b: Integer)
case class NestedStruct(f: ClassData)
case class DeepNestedStruct(f: NestedStruct)
case class InvalidInJava(`abstract`: Int)
/**
* A class used to test serialization using encoders. This class throws exceptions when using
* Java serialization -- so the only way it can be "serialized" is through our encoders.
*/
case class NonSerializableCaseClass(value: String) extends Externalizable {
override def readExternal(in: ObjectInput): Unit = {
throw new UnsupportedOperationException
}
override def writeExternal(out: ObjectOutput): Unit = {
throw new UnsupportedOperationException
}
}
/** Used to test Kryo encoder. */
class KryoData(val a: Int) {
override def equals(other: Any): Boolean = {
a == other.asInstanceOf[KryoData].a
}
override def hashCode: Int = a
override def toString: String = s"KryoData($a)"
}
object KryoData {
def apply(a: Int): KryoData = new KryoData(a)
}
/** Used to test Java encoder. */
class JavaData(val a: Int) extends Serializable {
override def equals(other: Any): Boolean = {
a == other.asInstanceOf[JavaData].a
}
override def hashCode: Int = a
override def toString: String = s"JavaData($a)"
}
object JavaData {
def apply(a: Int): JavaData = new JavaData(a)
}
/** Used to test importing dataset.spark.implicits._ */
object DatasetTransform {
def addOne(ds: Dataset[Int]): Dataset[Int] = {
import ds.sparkSession.implicits._
ds.map(_ + 1)
}
}
case class Route(src: String, dest: String, cost: Int)
case class GroupedRoutes(src: String, dest: String, routes: Seq[Route])
case class CircularReferenceClassA(cls: CircularReferenceClassB)
case class CircularReferenceClassB(cls: CircularReferenceClassA)
case class CircularReferenceClassC(ar: Array[CircularReferenceClassC])
case class CircularReferenceClassD(map: Map[String, CircularReferenceClassE])
case class CircularReferenceClassE(id: String, list: List[CircularReferenceClassD])
case class SpecialCharClass(`field.1`: String, `field 2`: String)
| lxsmnv/spark | sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala | Scala | apache-2.0 | 54,621 |
package lensimpl
import org.scalatest.{FunSuite, Matchers}
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
class LensSpec extends FunSuite with Matchers with ScalaCheckDrivenPropertyChecks {
import Person._
test("set - get (ADT)") {
forAll { (a: Int, p: Person) => ageCC.get(ageCC.set(a, p)) == a }
}
test("get - set (ADT)") {
forAll { (p: Person) => ageCC.set(ageCC.get(p), p) == p }
}
test("set - get (Van Laarhoven)") {
forAll { (a: Int, p: Person) => ageVL.get(ageVL.set(a, p)) == a }
}
test("get - set (Van Laarhoven)") {
forAll { (p: Person) => ageVL.set(ageVL.get(p), p) == p
}
}
test("set - get (Profunctor)") {
forAll { (a: Int, p: Person) => agePF.get(agePF.set(a, p)) == a }
}
test("get - set (Profunctor)") {
forAll { (p: Person) => agePF.set(agePF.get(p), p) == p }
}
test("set - get (Monocle1)") {
forAll { (a: Int, p: Person) => ageMO1.get(ageMO1.set(a, p)) == a }
}
test("get - set (Monocle1)") {
forAll { (p: Person) => ageMO1.set(ageMO1.get(p), p) == p }
}
test("set - get (Subtyping)") {
forAll { (a: Int, p: Person) => ageSubtyping.get(ageSubtyping.set(a)(p)) == a }
}
test("get - set (Subtyping)") {
forAll { (p: Person) => ageSubtyping.set(ageSubtyping.get(p))(p) == p }
}
}
| julien-truffaut/LensImpl | core/src/test/scala/lensimpl/LensSpec.scala | Scala | mit | 1,315 |
package fi.proweb.train.model.app
import fi.proweb.train.model.AppData
import scala.collection.mutable.Map
class TrainList extends AppData[TrainList] {
var trains = Map[String, Train]()
def makeCopy: TrainList = {
val trainlist = new TrainList
trainlist.trains = Map[String, Train]()
trains.foreach { t =>
trainlist.trains += ((t._1, t._2.makeCopy))
}
trainlist
}
} | roikonen/MissaJuna | app/fi/proweb/train/model/app/TrainList.scala | Scala | apache-2.0 | 417 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.handler
import javax.servlet.FilterChain
import com.rackspace.com.papi.components.checker.Validator
import com.rackspace.com.papi.components.checker.servlet._
import com.rackspace.com.papi.components.checker.step.base.{Step, StepContext}
import com.rackspace.com.papi.components.checker.step.results.Result
import org.w3c.dom.Document
abstract class ResultHandler {
def init(validator : Validator, checker : Option[Document]) : Unit
def handle (req : CheckerServletRequest, resp : CheckerServletResponse, chain : FilterChain, result : Result) : Unit
def inStep (currentStep: Step, req: CheckerServletRequest, resp : CheckerServletResponse, context: StepContext) : StepContext = { context }
def destroy : Unit = {}
}
| wdschei/api-checker | core/src/main/scala/com/rackspace/com/papi/components/checker/handler/ResultHandler.scala | Scala | apache-2.0 | 1,401 |
package se.uu.farmbio.parsers
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.RecordReader
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.lib.input.FileSplit
class RXNInputFormat extends FileInputFormat[LongWritable,Text] {
def createRecordReader(split: InputSplit , context: TaskAttemptContext): RecordReader[LongWritable,Text] ={
new RXNRecordReader(split.asInstanceOf[FileSplit], context);
}
} | mcapuccini/spark-cheminformatics | parsers/src/main/scala/se/uu/farmbio/parsers/RXNInputFormat.scala | Scala | apache-2.0 | 658 |
package wow.realm.protocol.payloads
import wow.common.codecs._
import wow.realm.protocol._
import scodec.Codec
import scodec.codecs._
/**
* Server authentication challenge
*/
case class ServerAuthChallenge(authSeed: Long, firstSeed: BigInt, secondSeed: BigInt) extends Payload with ServerSide
object ServerAuthChallenge {
val SeedSize = 16
implicit val opCodeProvider: OpCodeProvider[ServerAuthChallenge] = OpCodes.SAuthChallenge
implicit val codec: Codec[ServerAuthChallenge] = {
constantE(1L)(uint32L) ::
("authSeed" | uint32L) ::
("firstSeed" | fixedUBigIntL(SeedSize)) ::
("secondSeed" | fixedUBigIntL(SeedSize))
}.as[ServerAuthChallenge]
}
| SKNZ/SpinaciCore | wow/core/src/main/scala/wow/realm/protocol/payloads/ServerAuthChallenge.scala | Scala | mit | 685 |
package devnull.storage
import java.util.UUID
import doobie.postgres.pgtypes._
import doobie.hi
import doobie.imports._
class FeedbackRepository {
val uuidType = UuidType
object Queries {
def insert(fb: Feedback): Update0 = {
sql"""
INSERT INTO feedback (
created,
client_info,
voter_id,
ip_address,
session_id,
rating_overall,
rating_relevance,
rating_content,
rating_quality,
comments
) VALUES (
current_timestamp,
${fb.voterInfo.clientInfo},
${fb.voterInfo.voterId},
${fb.voterInfo.ipAddress},
${fb.sessionId},
${fb.rating.overall},
${fb.rating.relevance},
${fb.rating.content},
${fb.rating.quality},
${fb.rating.comments}
)""".update
}
def selectAllFeedbacks: Query0[Feedback] = {
sql"""
SELECT
id,
created,
voter_id,
ip_address,
client_info,
session_id,
rating_overall,
rating_relevance,
rating_content,
rating_quality,
comments
FROM feedback""".query[(Feedback)]
}
def selectAvgForSession(sessionId: UUID): Query0[FeedbackResult] = {
sql"""
SELECT
avg(fb.over) :: FLOAT AS overall,
avg(fb.rele) :: FLOAT AS relevance,
avg(fb.cont) :: FLOAT AS content,
avg(fb.qual) :: FLOAT AS quality,
count(*) :: FLOAT AS counts
FROM (
WITH unique_feedbacks AS (
SELECT
f.id,
f.voter_id,
substring(f.client_info FROM 0 FOR 30),
f.session_id AS session_id,
f.rating_overall AS over,
f.rating_relevance AS rele,
f.rating_content AS cont,
f.rating_quality AS qual,
row_number()
OVER(
PARTITION BY f.voter_id, f.session_id
ORDER BY f.created DESC
) AS rk
FROM feedback f
)
SELECT uf.*
FROM unique_feedbacks uf
where uf.rk = 1
ORDER BY uf.session_id
) fb
WHERE session_id = $sessionId
GROUP BY fb.session_id
ORDER BY counts DESC
""".query[FeedbackResult]
}
def selectAvgForEvent(eventId: UUID): Query0[FeedbackResult] = {
sql"""
SELECT
avg(fb.over) :: FLOAT AS overall,
avg(fb.rele) :: FLOAT AS relevance,
avg(fb.cont) :: FLOAT AS content,
avg(fb.qual) :: FLOAT AS quality,
count(*) :: FLOAT AS counts
FROM (
WITH unique_feedbacks AS (
SELECT
f.id,
f.voter_id,
substring(f.client_info FROM 0 FOR 30),
f.session_id AS session_id,
f.rating_overall AS over,
f.rating_relevance AS rele,
f.rating_content AS cont,
f.rating_quality AS qual,
row_number()
OVER(
PARTITION BY f.voter_id, f.session_id
ORDER BY f.created DESC
) AS rk
FROM feedback f
JOIN paper_feedback pf USING (session_id)
WHERE event_id = $eventId
)
SELECT uf.*
FROM unique_feedbacks uf
WHERE uf.rk = 1
ORDER BY uf.session_id
) fb
""".query[FeedbackResult]
}
def selectComments(sessionId: UUID): Query0[String] = {
sql"""
SELECT
fb.comments
FROM (
WITH unique_feedbacks AS (
SELECT
f.id,
f.voter_id,
f.session_id AS session_id,
f.comments AS COMMENTS,
row_number()
OVER(
PARTITION BY f.voter_id, f.session_id
ORDER BY f.created DESC
) AS rk
FROM feedback f
)
SELECT uf.*
FROM unique_feedbacks uf
WHERE uf.rk = 1
ORDER BY uf.session_id
) fb
WHERE fb.session_id = $sessionId
AND fb.comments IS NOT NULL
""".query[(String)]
}
}
def insertFeedback(fb: Feedback): hi.ConnectionIO[FeedbackId] = {
Queries.insert(fb).withUniqueGeneratedKeys[FeedbackId]("id")
}
def selectFeedbacks(): hi.ConnectionIO[List[Feedback]] = {
Queries.selectAllFeedbacks.list
}
def selectFeedbackForSession(
sessionId: UUID
): hi.ConnectionIO[Option[FeedbackResult]] = {
Queries.selectAvgForSession(sessionId).option
}
def selectFeedbackForEvent(eventId: UUID): hi.ConnectionIO[Option[FeedbackResult]] = {
Queries.selectAvgForEvent(eventId).option
}
def selectComments(sessionId: UUID): hi.ConnectionIO[List[String]] = {
Queries.selectComments(sessionId).list
}
}
| javaBin/devnull | src/main/scala/devnull/storage/FeedbackRepository.scala | Scala | apache-2.0 | 5,056 |
package com.houseofmoran.selfies.faces
import java.awt.image.BufferedImage
import java.io.IOException
import java.net.URL
import javax.imageio.ImageIO
import org.openimaj.image.ImageUtilities
import org.openimaj.image.processing.face.detection.HaarCascadeDetector
import scala.collection.JavaConversions._
object Faces {
def detectIn(bufferedImg: BufferedImage) : Seq[DetectedFaceInContext] = {
val img = ImageUtilities.createFImage(bufferedImg)
val detector = new HaarCascadeDetector()
detector.detectFaces(img).map(face => {
DetectedFaceInContext(face, bufferedImg)
})
}
def detectIn(urls: Seq[URL]): Map[URL,Seq[DetectedFaceInContext]] = {
urls.foldLeft(Map[URL,Seq[DetectedFaceInContext]]())((map, url) => {
try {
val bufferedImg = ImageIO.read(url)
return map.updated(url, detectIn(bufferedImg))
}
catch {
case e: IOException => {
return map
}
}
})
}
}
| mikemoraned/selfies | src/main/scala/com/houseofmoran/selfies/faces/Faces.scala | Scala | mit | 970 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate
import java.io.File
import collection.immutable.Map
import util.Log
import xml.NodeSeq
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import org.slf4j.LoggerFactory
/**
* @version $Revision : 1.1 $
*/
@RunWith(classOf[JUnitRunner])
abstract class FunSuiteSupport extends FunSuite with Log with BeforeAndAfterAll {
protected var _basedir = "."
/**
* Returns the base directory of the current project
*/
def baseDir = new File(_basedir)
override protected def beforeAll(map: Map[String, Any]): Unit = {
_basedir = map.get("basedir") match {
case Some(basedir) => basedir.toString
case _ => System.getProperty("basedir", ".")
}
debug("using basedir: %s", _basedir)
}
def assertType(anyRef: AnyRef, expectedClass: Class[_]): Unit = {
assert(anyRef != null, "expected instance of " + expectedClass.getName)
expect(expectedClass) {anyRef.getClass}
}
} | dnatic09/scalate | scalate-util/src/test/scala/org/fusesource/scalate/util/FunSuiteSupport.scala | Scala | apache-2.0 | 1,751 |
package bozzy
import java.io.FileNotFoundException
import java.util.PropertyResourceBundle
/**
* Created by ted on 2016-04-06.
*/
object I18n {
val i18n = new PropertyResourceBundle(getClass.getResource("/bundle/Resources.properties").openStream)
if (i18n == null) {
throw new FileNotFoundException("Cannot load resource: /bundle/Resources.properties")
}
}
| Germanika/Bozzy | src/main/scala/bozzy/I18n.scala | Scala | gpl-3.0 | 373 |
/*
* Copyright 2020 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.common.rich
import java.util.Optional
class RichOptional[T](val self: Optional[T]) extends AnyVal {
/** Implements asScala method similar to collection.JavaConverters._ for java Optional class */
def asScala: Option[T] = if (self.isPresent) Some(self.get) else None
} | frugalmechanic/fm-common | shared/src/main/scala/fm/common/rich/RichOptional.scala | Scala | apache-2.0 | 911 |
package nodes.nlp
import scala.reflect.ClassTag
trait NGramIndexer[WordType, NGramType] extends Serializable {
val minNgramOrder: Int
val maxNgramOrder: Int
/**
* Packs a sequence of words of type WordType into a single NGramType. The
* current word is `ngram.last`, and the words before are the context.
*/
def pack(ngram: Seq[WordType]): NGramType
}
/**
* A family of NGramIndexer that can unpack or strip off specific words, query
* the order of an packed ngram, etc.
*
* Such indexers are useful for LMs that require backoff contexts (e.g. Stupid Backoff, KN).
*/
trait BackoffIndexer[WordType, NGramType] extends NGramIndexer[WordType, NGramType] {
/**
* Unpacks the `pos` word out of the packed ngram of type NGramType. Position 0
* indicates the farthest context (if unigram, the current word), and position
* MAX_ORDER-1 represents the current word.
*
* Useful for getting words at special positions (e.g. first two in context).
*/
def unpack(ngram: NGramType, pos: Int): WordType
def removeFarthestWord(ngram: NGramType): NGramType
def removeCurrentWord(ngram: NGramType): NGramType
/** Returns an order in [minNgramOrder, maxNgramOrder] if valid; otherwise errors out. */
def ngramOrder(ngram: NGramType): Int
}
/**
* Packs up to 3 words (trigrams) into a single Long by bit packing.
*
* Assumptions:
* (1) |Vocab| <= one million (20 bits per word).
* (2) Words get mapped into [0, |Vocab|). In particular, each word ID < 2**20.
*/
object NaiveBitPackIndexer extends BackoffIndexer[Int, Long] {
final val minNgramOrder = 1
final val maxNgramOrder = 3
/**
* The packed layout (most significant to least):
* [4 control bits] [farthest word] ... [curr word].
* If can't fill all bits, we prefer to left-align.
*/
def pack(ngram: Seq[Int]): Long = {
ngram.foreach { word => require(word < math.pow(2, 20)) }
// Four most significant bits are control bits:
// 0000: unigram; 0001: bigram; 0010: trigram
ngram.length match {
case 1 => ngram(0).toLong << 40
case 2 => (ngram(1).toLong << 20) | (ngram(0).toLong << 40) | (1L << 60)
case 3 => ngram(2).toLong | (ngram(1).toLong << 20) | (ngram(0).toLong << 40) | (1L << 61)
case _ => sys.error("ngram order need to be in { 1, 2, 3 } for now")
}
}
def unpack(ngram: Long, pos: Int): Int = pos match {
case 0 => ((ngram >>> 40) & ((1 << 20) - 1)).asInstanceOf[Int]
case 1 => ((ngram >>> 20) & ((1 << 20) - 1)).asInstanceOf[Int]
case 2 => (ngram & ((1 << 20) - 1)).asInstanceOf[Int]
case _ => sys.error("ngram order need to be in { 1, 2, 3 } for now")
}
def removeFarthestWord(ngram: Long): Long = {
val order = ngramOrder(ngram)
val ngramCleared = ngram & (0xF << 60L)
val stripped = ngram & ((1L << 40) - 1)
val shifted = ((stripped << 20L) | ngramCleared) & ~(0xF << 60L)
// Now set the control bits accordingly
order match {
case 2 => shifted
case 3 => shifted | (1L << 60)
case _ => sys.error(s"ngram order is either invalid or not supported: $order")
}
}
def removeCurrentWord(ngram: Long): Long = {
val order = ngramOrder(ngram)
order match {
case 2 =>
val stripped = ngram & ~((1L << 40) - 1)
stripped & ~(0xF << 60L)
case 3 =>
val stripped = ngram & ~((1L << 20) - 1)
(stripped & ~(0xF << 60L)) | (1L << 60)
case _ => sys.error(s"ngram order is either invalid or not supported: $order")
}
}
def ngramOrder(ngram: Long): Int = {
val order = ((ngram & (0xF << 60L)) >>> 60).asInstanceOf[Int]
if (order + 1 >= minNgramOrder && order + 1 <= maxNgramOrder) {
order + 1
} else {
sys.error(s"raw control bits $order are invalid")
}
}
}
class NGramIndexerImpl[@specialized(Int) T: ClassTag]
extends BackoffIndexer[T, NGram[T]] {
final val minNgramOrder = 1
final val maxNgramOrder = 5 // TODO: makes sense to set it to infty?
// TODO: Call .toArray() to do a copy?
def pack(ngram: Seq[T]): NGram[T] = new NGram(ngram)
def unpack(ngram: NGram[T], pos: Int): T = ngram.words(pos)
// TODO: does the interface allow modifying same NGram object?
def removeFarthestWord(ngram: NGram[T]): NGram[T] =
new NGram(ngram.words.drop(1))
def removeCurrentWord(ngram: NGram[T]): NGram[T] =
new NGram(ngram.words.dropRight(1))
def ngramOrder(ngram: NGram[T]): Int = ngram.words.length
}
| dongjoon-hyun/keystone | src/main/scala/nodes/nlp/indexers.scala | Scala | apache-2.0 | 4,472 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
// DO NOT EDIT, CHANGES WILL BE LOST
// This auto-generated code can be modified in "project/GenerateAnyVals.scala".
// Afterwards, running "sbt generateSources" regenerates this source file.
package scala
/** `Int`, a 32-bit signed integer (equivalent to Java's `int` primitive type) is a
* subtype of [[scala.AnyVal]]. Instances of `Int` are not
* represented by an object in the underlying runtime system.
*
* There is an implicit conversion from [[scala.Int]] => [[scala.runtime.RichInt]]
* which provides useful non-primitive operations.
*/
final abstract class Int private extends AnyVal {
def toByte: Byte
def toShort: Short
def toChar: Char
def toInt: Int
def toLong: Long
def toFloat: Float
def toDouble: Double
/**
* Returns the bitwise negation of this value.
* @example {{{
* ~5 == -6
* // in binary: ~00000101 ==
* // 11111010
* }}}
*/
def unary_~ : Int
/** Returns this value, unmodified. */
def unary_+ : Int
/** Returns the negation of this value. */
def unary_- : Int
@deprecated("Adding a number and a String is deprecated. Use the string interpolation `s\\"$num$str\\"`", "2.13.0")
def +(x: String): String
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the new right bits with zeroes.
* @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}
*/
def <<(x: Int): Int
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the new right bits with zeroes.
* @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}
*/
@deprecated("shifting a value by a `Long` argument is deprecated (except when the value is a `Long`).\\nCall `toInt` on the argument to maintain the current behavior and avoid the deprecation warning.", "2.12.7")
def <<(x: Long): Int
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling the new left bits with zeroes.
* @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}
* @example {{{
* -21 >>> 3 == 536870909
* // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==
* // 00011111 11111111 11111111 11111101
* }}}
*/
def >>>(x: Int): Int
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling the new left bits with zeroes.
* @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}
* @example {{{
* -21 >>> 3 == 536870909
* // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==
* // 00011111 11111111 11111111 11111101
* }}}
*/
@deprecated("shifting a value by a `Long` argument is deprecated (except when the value is a `Long`).\\nCall `toInt` on the argument to maintain the current behavior and avoid the deprecation warning.", "2.12.7")
def >>>(x: Long): Int
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling in the left bits with the same value as the left-most bit of this.
* The effect of this is to retain the sign of the value.
* @example {{{
* -21 >> 3 == -3
* // in binary: 11111111 11111111 11111111 11101011 >> 3 ==
* // 11111111 11111111 11111111 11111101
* }}}
*/
def >>(x: Int): Int
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling in the left bits with the same value as the left-most bit of this.
* The effect of this is to retain the sign of the value.
* @example {{{
* -21 >> 3 == -3
* // in binary: 11111111 11111111 11111111 11101011 >> 3 ==
* // 11111111 11111111 11111111 11111101
* }}}
*/
@deprecated("shifting a value by a `Long` argument is deprecated (except when the value is a `Long`).\\nCall `toInt` on the argument to maintain the current behavior and avoid the deprecation warning.", "2.12.7")
def >>(x: Long): Int
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Byte): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Short): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Char): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Int): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Long): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Float): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Double): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Byte): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Short): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Char): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Int): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Long): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Float): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Double): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Byte): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Short): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Char): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Int): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Long): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Float): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Double): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Byte): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Short): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Char): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Int): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Long): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Float): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Double): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Byte): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Short): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Char): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Int): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Long): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Float): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Double): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Byte): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Short): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Char): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Int): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Long): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Float): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Double): Boolean
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Byte): Int
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Short): Int
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Char): Int
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Int): Int
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Long): Long
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Byte): Int
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Short): Int
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Char): Int
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Int): Int
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Long): Long
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Byte): Int
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Short): Int
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Char): Int
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Int): Int
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Long): Long
/** Returns the sum of this value and `x`. */
def +(x: Byte): Int
/** Returns the sum of this value and `x`. */
def +(x: Short): Int
/** Returns the sum of this value and `x`. */
def +(x: Char): Int
/** Returns the sum of this value and `x`. */
def +(x: Int): Int
/** Returns the sum of this value and `x`. */
def +(x: Long): Long
/** Returns the sum of this value and `x`. */
def +(x: Float): Float
/** Returns the sum of this value and `x`. */
def +(x: Double): Double
/** Returns the difference of this value and `x`. */
def -(x: Byte): Int
/** Returns the difference of this value and `x`. */
def -(x: Short): Int
/** Returns the difference of this value and `x`. */
def -(x: Char): Int
/** Returns the difference of this value and `x`. */
def -(x: Int): Int
/** Returns the difference of this value and `x`. */
def -(x: Long): Long
/** Returns the difference of this value and `x`. */
def -(x: Float): Float
/** Returns the difference of this value and `x`. */
def -(x: Double): Double
/** Returns the product of this value and `x`. */
def *(x: Byte): Int
/** Returns the product of this value and `x`. */
def *(x: Short): Int
/** Returns the product of this value and `x`. */
def *(x: Char): Int
/** Returns the product of this value and `x`. */
def *(x: Int): Int
/** Returns the product of this value and `x`. */
def *(x: Long): Long
/** Returns the product of this value and `x`. */
def *(x: Float): Float
/** Returns the product of this value and `x`. */
def *(x: Double): Double
/** Returns the quotient of this value and `x`. */
def /(x: Byte): Int
/** Returns the quotient of this value and `x`. */
def /(x: Short): Int
/** Returns the quotient of this value and `x`. */
def /(x: Char): Int
/** Returns the quotient of this value and `x`. */
def /(x: Int): Int
/** Returns the quotient of this value and `x`. */
def /(x: Long): Long
/** Returns the quotient of this value and `x`. */
def /(x: Float): Float
/** Returns the quotient of this value and `x`. */
def /(x: Double): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Byte): Int
/** Returns the remainder of the division of this value by `x`. */
def %(x: Short): Int
/** Returns the remainder of the division of this value by `x`. */
def %(x: Char): Int
/** Returns the remainder of the division of this value by `x`. */
def %(x: Int): Int
/** Returns the remainder of the division of this value by `x`. */
def %(x: Long): Long
/** Returns the remainder of the division of this value by `x`. */
def %(x: Float): Float
/** Returns the remainder of the division of this value by `x`. */
def %(x: Double): Double
// Provide a more specific return type for Scaladoc
override def getClass(): Class[Int] = ???
}
object Int extends AnyValCompanion {
/** The smallest value representable as an Int. */
final val MinValue = java.lang.Integer.MIN_VALUE
/** The largest value representable as an Int. */
final val MaxValue = java.lang.Integer.MAX_VALUE
/** Transform a value type into a boxed reference type.
*
* Runtime implementation determined by `scala.runtime.BoxesRunTime.boxToInteger`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*
* @param x the Int to be boxed
* @return a java.lang.Integer offering `x` as its underlying value.
*/
def box(x: Int): java.lang.Integer = ???
/** Transform a boxed type into a value type. Note that this
* method is not typesafe: it accepts any Object, but will throw
* an exception if the argument is not a java.lang.Integer.
*
* Runtime implementation determined by `scala.runtime.BoxesRunTime.unboxToInt`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*
* @param x the java.lang.Integer to be unboxed.
* @throws ClassCastException if the argument is not a java.lang.Integer
* @return the Int resulting from calling intValue() on `x`
*/
def unbox(x: java.lang.Object): Int = ???
/** The String representation of the scala.Int companion object. */
override def toString = "object scala.Int"
/** Language mandated coercions from Int to "wider" types. */
import scala.language.implicitConversions
@deprecated("Implicit conversion from Int to Float is dangerous because it loses precision. Write `.toFloat` instead.", "2.13.1")
implicit def int2float(x: Int): Float = x.toFloat
implicit def int2long(x: Int): Long = x.toLong
implicit def int2double(x: Int): Double = x.toDouble
}
| scala/scala | src/library/scala/Int.scala | Scala | apache-2.0 | 17,069 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.cli
import java.util.logging.Level
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.projections.{ AlignmentRecordField, Projection }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.util.ParquetLogger
import org.bdgenomics.formats.avro.AlignmentRecord
import org.bdgenomics.utils.cli._
import org.bdgenomics.utils.misc.Logging
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
object CountReadKmers extends BDGCommandCompanion {
val commandName = "count_kmers"
val commandDescription = "Counts the k-mers/q-mers from a read dataset."
def apply(cmdLine: Array[String]) = {
new CountReadKmers(Args4j[CountReadKmersArgs](cmdLine))
}
}
class CountReadKmersArgs extends Args4jBase with ParquetArgs {
@Argument(required = true, metaVar = "INPUT", usage = "The ADAM, BAM or SAM file to count kmers from", index = 0)
var inputPath: String = null
@Argument(required = true, metaVar = "OUTPUT", usage = "Location for storing k-mer counts", index = 1)
var outputPath: String = null
@Argument(required = true, metaVar = "KMER_LENGTH", usage = "Length of k-mers", index = 2)
var kmerLength: Int = 0
@Args4jOption(required = false, name = "-print_histogram", usage = "Prints a histogram of counts.")
var printHistogram: Boolean = false
@Args4jOption(required = false, name = "-repartition", usage = "Set the number of partitions to map data to")
var repartition: Int = -1
}
class CountReadKmers(protected val args: CountReadKmersArgs) extends BDGSparkCommand[CountReadKmersArgs] with Logging {
val companion = CountReadKmers
def run(sc: SparkContext) {
// Quiet Parquet...
ParquetLogger.hadoopLoggerLevel(Level.SEVERE)
// read from disk
var adamRecords = sc.loadAlignments(
args.inputPath,
projection = Some(Projection(AlignmentRecordField.sequence))
)
if (args.repartition != -1) {
log.info("Repartitioning reads to '%d' partitions".format(args.repartition))
adamRecords = adamRecords.transform(_.repartition(args.repartition))
}
// count kmers
val countedKmers = adamRecords.countKmers(args.kmerLength)
// cache counted kmers
countedKmers.cache()
// print histogram, if requested
if (args.printHistogram) {
countedKmers.map(kv => kv._2.toLong)
.countByValue()
.toSeq
.sortBy(kv => kv._1)
.foreach(println)
}
// save as text file
countedKmers.saveAsTextFile(args.outputPath)
}
}
| tdanford/adam | adam-cli/src/main/scala/org/bdgenomics/adam/cli/CountReadKmers.scala | Scala | apache-2.0 | 3,388 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.parquet
import java.nio.file.Files
import java.time.temporal.ChronoUnit
import com.vividsolutions.jts.geom.{Coordinate, Point}
import org.apache.commons.io.FileUtils
import org.geotools.data.Query
import org.geotools.factory.CommonFactoryFinder
import org.geotools.filter.text.ecql.ECQL
import org.geotools.geometry.jts.JTSFactoryFinder
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.fs.storage.common.{CompositeScheme, DateTimeScheme, PartitionScheme, Z2Scheme}
import org.locationtech.geomesa.index.planning.QueryPlanner
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import org.specs2.specification.AllExpectations
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class ParquetFSTest extends Specification with AllExpectations {
sequential
val gf = JTSFactoryFinder.getGeometryFactory
val sft = SimpleFeatureTypes.createType("test", "name:String,age:Int,dtg:Date,*geom:Point:srid=4326")
val ff = CommonFactoryFinder.getFilterFactory2
val tempDir = Files.createTempDirectory("geomesa")
val fsStorage = new ParquetFileSystemStorageFactory().build(Map(
"fs.path" -> tempDir.toFile.getPath,
"parquet.compression" -> "gzip"
))
val sf1 = new ScalaSimpleFeature(sft, "1", Array("first", Integer.valueOf(100), new java.util.Date, gf.createPoint(new Coordinate(25.236263, 27.436734))))
val sf2 = new ScalaSimpleFeature(sft, "2", Array(null, Integer.valueOf(200), new java.util.Date, gf.createPoint(new Coordinate(67.2363, 55.236))))
val sf3 = new ScalaSimpleFeature(sft, "3", Array("third", Integer.valueOf(300), new java.util.Date, gf.createPoint(new Coordinate(73.0, 73.0))))
"ParquetFileSystemStorage" should {
"create an fs" >> {
val scheme = new CompositeScheme(Seq(
new DateTimeScheme("yyy/DDD/HH", ChronoUnit.HOURS, 1, "dtg", false),
new Z2Scheme(10, "geom", false)
))
PartitionScheme.addToSft(sft, scheme)
fsStorage.createNewFeatureType(sft, scheme)
fsStorage.listFeatureTypes().size mustEqual 1
fsStorage.listFeatureTypes().head.getTypeName mustEqual "test"
}
"write and read features" >> {
val partitionSchema = fsStorage.getPartitionScheme(sft.getTypeName)
val partitions = List(sf1, sf2, sf3).map(partitionSchema.getPartitionName)
List[SimpleFeature](sf1, sf2, sf3)
.zip(partitions)
.groupBy(_._2)
.foreach { case (partition, features) =>
val writer = fsStorage.getWriter(sft.getTypeName, partition)
features.map(_._1).foreach(writer.write)
writer.close()
}
WithClose(fsStorage.getPartitionReader(sft, new Query("test", ECQL.toFilter("name = 'first'")), partitions(0))) { reader =>
val features = reader.toList
features must haveSize(1)
features.head.getAttribute("name") mustEqual "first"
features.head.getAttribute("dtg") must not(beNull)
features.head.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 25.236263
features.head.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 27.436734
}
WithClose(fsStorage.getPartitionReader(sft, new Query("test", ECQL.toFilter("name = 'third'")), partitions(2))) { reader =>
val features = reader.toList
features must haveSize(1)
features.head.getAttribute("name") mustEqual "third"
features.head.getAttribute("dtg") must not(beNull)
features.head.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 73.0
features.head.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 73.0
}
val transform = new Query("test", ECQL.toFilter("name = 'third'"), Array("dtg", "geom"))
QueryPlanner.setQueryTransforms(transform, sft)
WithClose(fsStorage.getPartitionReader(sft, transform, partitions(2))) { reader =>
val features = reader.toList
features must haveSize(1)
features.head.getFeatureType.getAttributeDescriptors.map(_.getLocalName) mustEqual Seq("dtg", "geom")
features.head.getAttribute("name") must beNull
features.head.getAttribute("dtg") must not(beNull)
features.head.getDefaultGeometry.asInstanceOf[Point].getX mustEqual 73.0
features.head.getDefaultGeometry.asInstanceOf[Point].getY mustEqual 73.0
}
}
}
step {
FileUtils.deleteDirectory(tempDir.toFile)
}
}
| ronq/geomesa | geomesa-fs/geomesa-fs-storage-parquet/src/test/scala/org/locationtech/geomesa/parquet/ParquetFSTest.scala | Scala | apache-2.0 | 5,116 |
package com.programmaticallyspeaking.ncd.nashorn
import com.programmaticallyspeaking.ncd.host.{Script, ScriptIdentity, ScriptVersion}
import com.programmaticallyspeaking.ncd.infra.ScriptURL
import com.programmaticallyspeaking.ncd.testing.IsolatedUnitTest
class ScriptsTest extends IsolatedUnitTest {
"The Scripts repository" - {
val scripts = new Scripts
"when a script is suggested" - {
val script = aScript("script.js", "return 42;", "a", 1)
scripts.suggest(script)
"accepts it" in {
scripts.scripts.map(_.id) should be (Seq("a"))
}
"allows lookup by ID" in {
scripts.byId(ScriptIdentity.fromId("a")).map(_.id) should be (Some("a"))
}
"allows lookup by URL" in {
scripts.byId(ScriptIdentity.fromURL("script.js")).map(_.id) should be (Some("a"))
}
}
"when an anonymous script is suggested" - {
val script = aScript("", "return 42;", "a", 1)
scripts.suggest(script)
"accepts it" in {
scripts.scripts.map(_.id) should be (Seq("a"))
}
"allows lookup by ID" in {
scripts.byId(ScriptIdentity.fromId("a")).map(_.id) should be (Some("a"))
}
"doesn't allow lookup by URL" in {
scripts.byId(ScriptIdentity.fromURL("")) should be (None)
}
}
"when a script with the same contents as an existing one is added" - {
val script1 = aScript("script.js", "return 42;", "a", 1)
val script2 = aScript("scriptRecompilation.js", "return 42;", "b", 2)
scripts.suggest(script1)
val actual = scripts.suggest(script2)
"returns the original one" in {
actual.get.script shouldBe theSameInstanceAs(script1)
}
"allows lookup with the new URL (and returns the original script)" in {
scripts.byId(ScriptIdentity.fromURL("scriptRecompilation.js")).map(_.id) should be (Some("a"))
}
"only returns the target script once in an enumeration" in {
val ids = scripts.scripts.map(_.id)
ids should be (Seq("a"))
}
}
"when a newer script with different contents but same URL is added" - {
val script1 = aScript("script.js", "return 42;", "a", 1)
val script2 = aScript("script.js", "return 52;", "b", 2)
scripts.suggest(script1)
val actual = scripts.suggest(script2)
"returns the newer script" in {
actual.map(_.script.id) should be (Some("b"))
}
"returns the replacement script" in {
actual.flatMap(_.replaced.map(_.id)) should be (Some("a"))
}
"preserves the script URL" in {
actual.map(_.script.url.toString) should be (Some("script.js"))
}
"returns the new script for lookup with the URL" in {
scripts.byId(ScriptIdentity.fromURL("script.js")).map(_.contents) should be (Some("return 52;"))
}
"doesn't have a script with the old ID" in {
scripts.byId(ScriptIdentity.fromId("a")) should be (None)
}
"has a script with the new ID" in {
scripts.byId(ScriptIdentity.fromId("b")) should be ('defined)
}
"doesn't enumerate the old script" in {
val ids = scripts.scripts.map(_.id)
ids should not contain ("a")
}
}
"when an older script with different contents but same URL is added" - {
val script1 = aScript("script.js", "return 42;", "a", 2)
val script2 = aScript("script.js", "return 52;", "b", 1)
scripts.suggest(script1)
val actual = scripts.suggest(script2)
"ignores the script" in {
actual should be (None)
}
}
"when a recompilation of an already replaced script is added" - {
val script1 = aScript("script.js", "return 42;", "a", ScriptVersion(1, true))
val script2 = aScript("script.js", "return 52;", "b", ScriptVersion(2, true))
val script3 = aScript("script.js", "return 42;", "c", ScriptVersion(3, false))
scripts.suggest(script1)
scripts.suggest(script2)
val actual = scripts.suggest(script3)
"ignores the script" in {
actual should be (None)
}
}
"rejects suggestion of the same script twice" in {
val script = aScript("script.js", "return 42;", "a", 1)
scripts.suggest(script)
assertThrows[IllegalArgumentException](scripts.suggest(script))
}
}
def aScript(url: String, source: String, id: String, version: Int): Script =
aScript(url, source, id, ScriptVersion(version, true))
def aScript(url: String, source: String, id: String, version: ScriptVersion): Script =
ScriptImpl.fromSource(ScriptURL.create(url), source, id, version)
}
| provegard/ncdbg | src/test/scala/com/programmaticallyspeaking/ncd/nashorn/ScriptsTest.scala | Scala | bsd-3-clause | 4,634 |
/*
* Copyright [2014] [Jason Nerothin]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gigaspaces
import org.scalatest._
import org.openspaces.core.{GigaSpaceConfigurer, GigaSpace}
import org.openspaces.core.cluster.ClusterInfo
import org.openspaces.pu.container.{ProcessingUnitContainerProvider, ProcessingUnitContainer}
import org.openspaces.core.space.UrlSpaceConfigurer
import org.openspaces.core.space.cache.{LocalViewSpaceConfigurer, LocalCacheSpaceConfigurer}
import com.j_spaces.core.client.SQLQuery
import org.openspaces.pu.container.integrated.IntegratedProcessingUnitContainerProvider
/** Created by IntelliJ IDEA.
* User: jason
* Date: 2/27/14
* Time: 3:25 PM
*
* An abstract test suite that can be used to instrument scala tests that start up a
* new container sin standalone mode and then create a [[GigaSpace]] reference into it.
*/
abstract class GsI10nSuite extends FunSuite with BeforeAndAfterAllConfigMap with BeforeAndAfterEach {
val schemaProperty = "schema"
val spaceUrlProperty = "spaceUrl"
val numInstancesProperty = "numInstances"
val numBackupsProperty = "numBackups"
val instanceIdProperty = "instanceId"
val spaceModeProperty = "spaceMode"
val configLocationProperty = "configLocation"
val localViewQueryListProperty = "localViewQueryList"
protected var defaults = Map[String, Any]()
/**
* Test instances. The purpose of this class is to initialize these members
*/
protected var containerProvider: ProcessingUnitContainerProvider = null
protected var container: ProcessingUnitContainer = null
protected var gigaSpace: GigaSpace = null
object SpaceMode extends Enumeration {
type SpaceMode = Value
val Embedded, Remote, LocalCache, LocalView = Value
}
import SpaceMode._
/* convenience methods */
protected def spaceContents(): Int = {
assume(gigaSpace != null)
gigaSpace.count(new Object())
}
/* Default setup/tear-down behaviors */
override def beforeAll(configMap: ConfigMap = new ConfigMap(Map[String, Any]())): Unit = {
setupWith(configMap)
}
protected def setupWith(configMap: ConfigMap): Unit = {
containerProvider = createProvider(configMap)
container = createContainer(configMap)
gigaSpace = createGigaSpace(configMap)
}
override def afterAll(configMap: ConfigMap = new ConfigMap(Map[String, Any]())): Unit = {
container.close()
}
private def getProperty(propertyName: String, configMap: ConfigMap = new ConfigMap(Map[String, Any]())): Any = {
val prop = configMap.get(propertyName)
val innerP = prop match {
case (Some(p)) => p
case _ =>
defaults.get(propertyName)
}
innerP
// innerP match {
// case Some(q) => q
// case _ =>
// throw new UnsupportedOperationException(String.format("No value exists for property name: [%s].", propertyName))
// }
}
/* i10n infrastructure setup methods */
private def createClusterInfo(configMap: ConfigMap = new ConfigMap(Map[String, Any]())): ClusterInfo = {
val schema = getProperty(schemaProperty, configMap)
val numInstances = getProperty(numInstancesProperty, configMap)
val numBackups = getProperty(numBackupsProperty, configMap)
val instanceId = getProperty(instanceIdProperty, configMap)
// not type-safe, but don't care
val clusterInfo = new ClusterInfo
clusterInfo.setSchema(schema.asInstanceOf[String])
clusterInfo.setNumberOfInstances(numInstances.asInstanceOf[Integer])
clusterInfo.setNumberOfBackups(numBackups.asInstanceOf[Integer])
clusterInfo.setInstanceId(instanceId.asInstanceOf[Integer])
clusterInfo
}
private def createGigaSpace(configMap: ConfigMap = new ConfigMap(Map[String, Any]())): GigaSpace = {
def makeGs(configurer: UrlSpaceConfigurer): GigaSpace = {
new GigaSpaceConfigurer(configurer).gigaSpace()
}
val spaceUrl = getProperty(spaceUrlProperty, configMap).asInstanceOf[String]
val configurer = new UrlSpaceConfigurer(spaceUrl)
getProperty(spaceModeProperty, configMap) match {
case Embedded =>
makeGs(configurer)
case Remote =>
makeGs(configurer)
case LocalCache =>
new GigaSpaceConfigurer(new LocalCacheSpaceConfigurer(configurer)).gigaSpace()
case LocalView =>
val queries = getProperty(localViewQueryListProperty, configMap).asInstanceOf[List[SQLQuery[_]]]
val viewConfigurer = new LocalViewSpaceConfigurer(configurer)
queries.foreach(qry => {
viewConfigurer.addViewQuery(qry)
})
new GigaSpaceConfigurer(viewConfigurer).gigaSpace()
}
}
private def createProvider(configMap: ConfigMap): ProcessingUnitContainerProvider = {
val containerProvider = new IntegratedProcessingUnitContainerProvider
containerProvider.setClusterInfo(createClusterInfo(configMap))
containerProvider.addConfigLocation(getProperty(configLocationProperty, configMap).asInstanceOf[String])
containerProvider
}
private def createContainer(configMap: ConfigMap): ProcessingUnitContainer = {
containerProvider.createContainer()
}
} | Gigaspaces/gs-executor-remoting | src/test/scala/com/gigaspaces/GsI10nSuite.scala | Scala | apache-2.0 | 5,656 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg
import java.util.{Arrays, Random}
import scala.collection.mutable.{ArrayBuilder => MArrayBuilder, HashSet => MHashSet, ArrayBuffer}
import breeze.linalg.{CSCMatrix => BSM, DenseMatrix => BDM, Matrix => BM}
/**
* Trait for a local matrix.
*/
sealed trait Matrix extends Serializable {
/** Number of rows. */
def numRows: Int
/** Number of columns. */
def numCols: Int
/** Flag that keeps track whether the matrix is transposed or not. False by default. */
val isTransposed: Boolean = false
/** Converts to a dense array in column major. */
def toArray: Array[Double] = {
val newArray = new Array[Double](numRows * numCols)
foreachActive { (i, j, v) =>
newArray(j * numRows + i) = v
}
newArray
}
/** Converts to a breeze matrix. */
private[mllib] def toBreeze: BM[Double]
/** Gets the (i, j)-th element. */
def apply(i: Int, j: Int): Double
/** Return the index for the (i, j)-th element in the backing array. */
private[mllib] def index(i: Int, j: Int): Int
/** Update element at (i, j) */
private[mllib] def update(i: Int, j: Int, v: Double): Unit
/** Get a deep copy of the matrix. */
def copy: Matrix
/** Transpose the Matrix. Returns a new `Matrix` instance sharing the same underlying data. */
def transpose: Matrix
/** Convenience method for `Matrix`-`DenseMatrix` multiplication. */
def multiply(y: DenseMatrix): DenseMatrix = {
val C: DenseMatrix = DenseMatrix.zeros(numRows, y.numCols)
BLAS.gemm(1.0, this, y, 0.0, C)
C
}
/** Convenience method for `Matrix`-`DenseVector` multiplication. */
def multiply(y: DenseVector): DenseVector = {
val output = new DenseVector(new Array[Double](numRows))
BLAS.gemv(1.0, this, y, 0.0, output)
output
}
/** A human readable representation of the matrix */
override def toString: String = toBreeze.toString()
/** Map the values of this matrix using a function. Generates a new matrix. Performs the
* function on only the backing array. For example, an operation such as addition or
* subtraction will only be performed on the non-zero values in a `SparseMatrix`. */
private[mllib] def map(f: Double => Double): Matrix
/** Update all the values of this matrix using the function f. Performed in-place on the
* backing array. For example, an operation such as addition or subtraction will only be
* performed on the non-zero values in a `SparseMatrix`. */
private[mllib] def update(f: Double => Double): Matrix
/**
* Applies a function `f` to all the active elements of dense and sparse matrix. The ordering
* of the elements are not defined.
*
* @param f the function takes three parameters where the first two parameters are the row
* and column indices respectively with the type `Int`, and the final parameter is the
* corresponding value in the matrix with type `Double`.
*/
private[spark] def foreachActive(f: (Int, Int, Double) => Unit)
}
/**
* Column-major dense matrix.
* The entry values are stored in a single array of doubles with columns listed in sequence.
* For example, the following matrix
* {{{
* 1.0 2.0
* 3.0 4.0
* 5.0 6.0
* }}}
* is stored as `[1.0, 3.0, 5.0, 2.0, 4.0, 6.0]`.
*
* @param numRows number of rows
* @param numCols number of columns
* @param values matrix entries in column major if not transposed or in row major otherwise
* @param isTransposed whether the matrix is transposed. If true, `values` stores the matrix in
* row major.
*/
class DenseMatrix(
val numRows: Int,
val numCols: Int,
val values: Array[Double],
override val isTransposed: Boolean) extends Matrix {
require(values.length == numRows * numCols, "The number of values supplied doesn't match the " +
s"size of the matrix! values.length: ${values.length}, numRows * numCols: ${numRows * numCols}")
/**
* Column-major dense matrix.
* The entry values are stored in a single array of doubles with columns listed in sequence.
* For example, the following matrix
* {{{
* 1.0 2.0
* 3.0 4.0
* 5.0 6.0
* }}}
* is stored as `[1.0, 3.0, 5.0, 2.0, 4.0, 6.0]`.
*
* @param numRows number of rows
* @param numCols number of columns
* @param values matrix entries in column major
*/
def this(numRows: Int, numCols: Int, values: Array[Double]) =
this(numRows, numCols, values, false)
override def equals(o: Any) = o match {
case m: DenseMatrix =>
m.numRows == numRows && m.numCols == numCols && Arrays.equals(toArray, m.toArray)
case _ => false
}
private[mllib] def toBreeze: BM[Double] = {
if (!isTransposed) {
new BDM[Double](numRows, numCols, values)
} else {
val breezeMatrix = new BDM[Double](numCols, numRows, values)
breezeMatrix.t
}
}
private[mllib] def apply(i: Int): Double = values(i)
override def apply(i: Int, j: Int): Double = values(index(i, j))
private[mllib] def index(i: Int, j: Int): Int = {
if (!isTransposed) i + numRows * j else j + numCols * i
}
private[mllib] def update(i: Int, j: Int, v: Double): Unit = {
values(index(i, j)) = v
}
override def copy = new DenseMatrix(numRows, numCols, values.clone())
private[mllib] def map(f: Double => Double) = new DenseMatrix(numRows, numCols, values.map(f))
private[mllib] def update(f: Double => Double): DenseMatrix = {
val len = values.length
var i = 0
while (i < len) {
values(i) = f(values(i))
i += 1
}
this
}
override def transpose: DenseMatrix = new DenseMatrix(numCols, numRows, values, !isTransposed)
private[spark] override def foreachActive(f: (Int, Int, Double) => Unit): Unit = {
if (!isTransposed) {
// outer loop over columns
var j = 0
while (j < numCols) {
var i = 0
val indStart = j * numRows
while (i < numRows) {
f(i, j, values(indStart + i))
i += 1
}
j += 1
}
} else {
// outer loop over rows
var i = 0
while (i < numRows) {
var j = 0
val indStart = i * numCols
while (j < numCols) {
f(i, j, values(indStart + j))
j += 1
}
i += 1
}
}
}
/**
* Generate a `SparseMatrix` from the given `DenseMatrix`. The new matrix will have isTransposed
* set to false.
*/
def toSparse: SparseMatrix = {
val spVals: MArrayBuilder[Double] = new MArrayBuilder.ofDouble
val colPtrs: Array[Int] = new Array[Int](numCols + 1)
val rowIndices: MArrayBuilder[Int] = new MArrayBuilder.ofInt
var nnz = 0
var j = 0
while (j < numCols) {
var i = 0
while (i < numRows) {
val v = values(index(i, j))
if (v != 0.0) {
rowIndices += i
spVals += v
nnz += 1
}
i += 1
}
j += 1
colPtrs(j) = nnz
}
new SparseMatrix(numRows, numCols, colPtrs, rowIndices.result(), spVals.result())
}
}
/**
* Factory methods for [[org.apache.spark.mllib.linalg.DenseMatrix]].
*/
object DenseMatrix {
/**
* Generate a `DenseMatrix` consisting of zeros.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @return `DenseMatrix` with size `numRows` x `numCols` and values of zeros
*/
def zeros(numRows: Int, numCols: Int): DenseMatrix = {
require(numRows.toLong * numCols <= Int.MaxValue,
s"$numRows x $numCols dense matrix is too large to allocate")
new DenseMatrix(numRows, numCols, new Array[Double](numRows * numCols))
}
/**
* Generate a `DenseMatrix` consisting of ones.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @return `DenseMatrix` with size `numRows` x `numCols` and values of ones
*/
def ones(numRows: Int, numCols: Int): DenseMatrix = {
require(numRows.toLong * numCols <= Int.MaxValue,
s"$numRows x $numCols dense matrix is too large to allocate")
new DenseMatrix(numRows, numCols, Array.fill(numRows * numCols)(1.0))
}
/**
* Generate an Identity Matrix in `DenseMatrix` format.
* @param n number of rows and columns of the matrix
* @return `DenseMatrix` with size `n` x `n` and values of ones on the diagonal
*/
def eye(n: Int): DenseMatrix = {
val identity = DenseMatrix.zeros(n, n)
var i = 0
while (i < n) {
identity.update(i, i, 1.0)
i += 1
}
identity
}
/**
* Generate a `DenseMatrix` consisting of `i.i.d.` uniform random numbers.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param rng a random number generator
* @return `DenseMatrix` with size `numRows` x `numCols` and values in U(0, 1)
*/
def rand(numRows: Int, numCols: Int, rng: Random): DenseMatrix = {
require(numRows.toLong * numCols <= Int.MaxValue,
s"$numRows x $numCols dense matrix is too large to allocate")
new DenseMatrix(numRows, numCols, Array.fill(numRows * numCols)(rng.nextDouble()))
}
/**
* Generate a `DenseMatrix` consisting of `i.i.d.` gaussian random numbers.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param rng a random number generator
* @return `DenseMatrix` with size `numRows` x `numCols` and values in N(0, 1)
*/
def randn(numRows: Int, numCols: Int, rng: Random): DenseMatrix = {
require(numRows.toLong * numCols <= Int.MaxValue,
s"$numRows x $numCols dense matrix is too large to allocate")
new DenseMatrix(numRows, numCols, Array.fill(numRows * numCols)(rng.nextGaussian()))
}
/**
* Generate a diagonal matrix in `DenseMatrix` format from the supplied values.
* @param vector a `Vector` that will form the values on the diagonal of the matrix
* @return Square `DenseMatrix` with size `values.length` x `values.length` and `values`
* on the diagonal
*/
def diag(vector: Vector): DenseMatrix = {
val n = vector.size
val matrix = DenseMatrix.zeros(n, n)
val values = vector.toArray
var i = 0
while (i < n) {
matrix.update(i, i, values(i))
i += 1
}
matrix
}
}
/**
* Column-major sparse matrix.
* The entry values are stored in Compressed Sparse Column (CSC) format.
* For example, the following matrix
* {{{
* 1.0 0.0 4.0
* 0.0 3.0 5.0
* 2.0 0.0 6.0
* }}}
* is stored as `values: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]`,
* `rowIndices=[0, 2, 1, 0, 1, 2]`, `colPointers=[0, 2, 3, 6]`.
*
* @param numRows number of rows
* @param numCols number of columns
* @param colPtrs the index corresponding to the start of a new column (if not transposed)
* @param rowIndices the row index of the entry (if not transposed). They must be in strictly
* increasing order for each column
* @param values nonzero matrix entries in column major (if not transposed)
* @param isTransposed whether the matrix is transposed. If true, the matrix can be considered
* Compressed Sparse Row (CSR) format, where `colPtrs` behaves as rowPtrs,
* and `rowIndices` behave as colIndices, and `values` are stored in row major.
*/
class SparseMatrix(
val numRows: Int,
val numCols: Int,
val colPtrs: Array[Int],
val rowIndices: Array[Int],
val values: Array[Double],
override val isTransposed: Boolean) extends Matrix {
require(values.length == rowIndices.length, "The number of row indices and values don't match! " +
s"values.length: ${values.length}, rowIndices.length: ${rowIndices.length}")
// The Or statement is for the case when the matrix is transposed
require(colPtrs.length == numCols + 1 || colPtrs.length == numRows + 1, "The length of the " +
"column indices should be the number of columns + 1. Currently, colPointers.length: " +
s"${colPtrs.length}, numCols: $numCols")
require(values.length == colPtrs.last, "The last value of colPtrs must equal the number of " +
s"elements. values.length: ${values.length}, colPtrs.last: ${colPtrs.last}")
/**
* Column-major sparse matrix.
* The entry values are stored in Compressed Sparse Column (CSC) format.
* For example, the following matrix
* {{{
* 1.0 0.0 4.0
* 0.0 3.0 5.0
* 2.0 0.0 6.0
* }}}
* is stored as `values: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]`,
* `rowIndices=[0, 2, 1, 0, 1, 2]`, `colPointers=[0, 2, 3, 6]`.
*
* @param numRows number of rows
* @param numCols number of columns
* @param colPtrs the index corresponding to the start of a new column
* @param rowIndices the row index of the entry. They must be in strictly increasing
* order for each column
* @param values non-zero matrix entries in column major
*/
def this(
numRows: Int,
numCols: Int,
colPtrs: Array[Int],
rowIndices: Array[Int],
values: Array[Double]) = this(numRows, numCols, colPtrs, rowIndices, values, false)
private[mllib] def toBreeze: BM[Double] = {
if (!isTransposed) {
new BSM[Double](values, numRows, numCols, colPtrs, rowIndices)
} else {
val breezeMatrix = new BSM[Double](values, numCols, numRows, colPtrs, rowIndices)
breezeMatrix.t
}
}
override def apply(i: Int, j: Int): Double = {
val ind = index(i, j)
if (ind < 0) 0.0 else values(ind)
}
private[mllib] def index(i: Int, j: Int): Int = {
if (!isTransposed) {
Arrays.binarySearch(rowIndices, colPtrs(j), colPtrs(j + 1), i)
} else {
Arrays.binarySearch(rowIndices, colPtrs(i), colPtrs(i + 1), j)
}
}
private[mllib] def update(i: Int, j: Int, v: Double): Unit = {
val ind = index(i, j)
if (ind == -1) {
throw new NoSuchElementException("The given row and column indices correspond to a zero " +
"value. Only non-zero elements in Sparse Matrices can be updated.")
} else {
values(ind) = v
}
}
override def copy = new SparseMatrix(numRows, numCols, colPtrs, rowIndices, values.clone())
private[mllib] def map(f: Double => Double) =
new SparseMatrix(numRows, numCols, colPtrs, rowIndices, values.map(f))
private[mllib] def update(f: Double => Double): SparseMatrix = {
val len = values.length
var i = 0
while (i < len) {
values(i) = f(values(i))
i += 1
}
this
}
override def transpose: SparseMatrix =
new SparseMatrix(numCols, numRows, colPtrs, rowIndices, values, !isTransposed)
private[spark] override def foreachActive(f: (Int, Int, Double) => Unit): Unit = {
if (!isTransposed) {
var j = 0
while (j < numCols) {
var idx = colPtrs(j)
val idxEnd = colPtrs(j + 1)
while (idx < idxEnd) {
f(rowIndices(idx), j, values(idx))
idx += 1
}
j += 1
}
} else {
var i = 0
while (i < numRows) {
var idx = colPtrs(i)
val idxEnd = colPtrs(i + 1)
while (idx < idxEnd) {
val j = rowIndices(idx)
f(i, j, values(idx))
idx += 1
}
i += 1
}
}
}
/**
* Generate a `DenseMatrix` from the given `SparseMatrix`. The new matrix will have isTransposed
* set to false.
*/
def toDense: DenseMatrix = {
new DenseMatrix(numRows, numCols, toArray)
}
}
/**
* Factory methods for [[org.apache.spark.mllib.linalg.SparseMatrix]].
*/
object SparseMatrix {
/**
* Generate a `SparseMatrix` from Coordinate List (COO) format. Input must be an array of
* (i, j, value) tuples. Entries that have duplicate values of i and j are
* added together. Tuples where value is equal to zero will be omitted.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param entries Array of (i, j, value) tuples
* @return The corresponding `SparseMatrix`
*/
def fromCOO(numRows: Int, numCols: Int, entries: Iterable[(Int, Int, Double)]): SparseMatrix = {
val sortedEntries = entries.toSeq.sortBy(v => (v._2, v._1))
val numEntries = sortedEntries.size
if (sortedEntries.nonEmpty) {
// Since the entries are sorted by column index, we only need to check the first and the last.
for (col <- Seq(sortedEntries.head._2, sortedEntries.last._2)) {
require(col >= 0 && col < numCols, s"Column index out of range [0, $numCols): $col.")
}
}
val colPtrs = new Array[Int](numCols + 1)
val rowIndices = MArrayBuilder.make[Int]
rowIndices.sizeHint(numEntries)
val values = MArrayBuilder.make[Double]
values.sizeHint(numEntries)
var nnz = 0
var prevCol = 0
var prevRow = -1
var prevVal = 0.0
// Append a dummy entry to include the last one at the end of the loop.
(sortedEntries.view :+ (numRows, numCols, 1.0)).foreach { case (i, j, v) =>
if (v != 0) {
if (i == prevRow && j == prevCol) {
prevVal += v
} else {
if (prevVal != 0) {
require(prevRow >= 0 && prevRow < numRows,
s"Row index out of range [0, $numRows): $prevRow.")
nnz += 1
rowIndices += prevRow
values += prevVal
}
prevRow = i
prevVal = v
while (prevCol < j) {
colPtrs(prevCol + 1) = nnz
prevCol += 1
}
}
}
}
new SparseMatrix(numRows, numCols, colPtrs, rowIndices.result(), values.result())
}
/**
* Generate an Identity Matrix in `SparseMatrix` format.
* @param n number of rows and columns of the matrix
* @return `SparseMatrix` with size `n` x `n` and values of ones on the diagonal
*/
def speye(n: Int): SparseMatrix = {
new SparseMatrix(n, n, (0 to n).toArray, (0 until n).toArray, Array.fill(n)(1.0))
}
/**
* Generates the skeleton of a random `SparseMatrix` with a given random number generator.
* The values of the matrix returned are undefined.
*/
private def genRandMatrix(
numRows: Int,
numCols: Int,
density: Double,
rng: Random): SparseMatrix = {
require(numRows > 0, s"numRows must be greater than 0 but got $numRows")
require(numCols > 0, s"numCols must be greater than 0 but got $numCols")
require(density >= 0.0 && density <= 1.0,
s"density must be a double in the range 0.0 <= d <= 1.0. Currently, density: $density")
val size = numRows.toLong * numCols
val expected = size * density
assert(expected < Int.MaxValue,
"The expected number of nonzeros cannot be greater than Int.MaxValue.")
val nnz = math.ceil(expected).toInt
if (density == 0.0) {
new SparseMatrix(numRows, numCols, new Array[Int](numCols + 1), Array[Int](), Array[Double]())
} else if (density == 1.0) {
val colPtrs = Array.tabulate(numCols + 1)(j => j * numRows)
val rowIndices = Array.tabulate(size.toInt)(idx => idx % numRows)
new SparseMatrix(numRows, numCols, colPtrs, rowIndices, new Array[Double](numRows * numCols))
} else if (density < 0.34) {
// draw-by-draw, expected number of iterations is less than 1.5 * nnz
val entries = MHashSet[(Int, Int)]()
while (entries.size < nnz) {
entries += ((rng.nextInt(numRows), rng.nextInt(numCols)))
}
SparseMatrix.fromCOO(numRows, numCols, entries.map(v => (v._1, v._2, 1.0)))
} else {
// selection-rejection method
var idx = 0L
var numSelected = 0
var j = 0
val colPtrs = new Array[Int](numCols + 1)
val rowIndices = new Array[Int](nnz)
while (j < numCols && numSelected < nnz) {
var i = 0
while (i < numRows && numSelected < nnz) {
if (rng.nextDouble() < 1.0 * (nnz - numSelected) / (size - idx)) {
rowIndices(numSelected) = i
numSelected += 1
}
i += 1
idx += 1
}
colPtrs(j + 1) = numSelected
j += 1
}
new SparseMatrix(numRows, numCols, colPtrs, rowIndices, new Array[Double](nnz))
}
}
/**
* Generate a `SparseMatrix` consisting of `i.i.d`. uniform random numbers. The number of non-zero
* elements equal the ceiling of `numRows` x `numCols` x `density`
*
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param density the desired density for the matrix
* @param rng a random number generator
* @return `SparseMatrix` with size `numRows` x `numCols` and values in U(0, 1)
*/
def sprand(numRows: Int, numCols: Int, density: Double, rng: Random): SparseMatrix = {
val mat = genRandMatrix(numRows, numCols, density, rng)
mat.update(i => rng.nextDouble())
}
/**
* Generate a `SparseMatrix` consisting of `i.i.d`. gaussian random numbers.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param density the desired density for the matrix
* @param rng a random number generator
* @return `SparseMatrix` with size `numRows` x `numCols` and values in N(0, 1)
*/
def sprandn(numRows: Int, numCols: Int, density: Double, rng: Random): SparseMatrix = {
val mat = genRandMatrix(numRows, numCols, density, rng)
mat.update(i => rng.nextGaussian())
}
/**
* Generate a diagonal matrix in `SparseMatrix` format from the supplied values.
* @param vector a `Vector` that will form the values on the diagonal of the matrix
* @return Square `SparseMatrix` with size `values.length` x `values.length` and non-zero
* `values` on the diagonal
*/
def spdiag(vector: Vector): SparseMatrix = {
val n = vector.size
vector match {
case sVec: SparseVector =>
SparseMatrix.fromCOO(n, n, sVec.indices.zip(sVec.values).map(v => (v._1, v._1, v._2)))
case dVec: DenseVector =>
val entries = dVec.values.zipWithIndex
val nnzVals = entries.filter(v => v._1 != 0.0)
SparseMatrix.fromCOO(n, n, nnzVals.map(v => (v._2, v._2, v._1)))
}
}
}
/**
* Factory methods for [[org.apache.spark.mllib.linalg.Matrix]].
*/
object Matrices {
/**
* Creates a column-major dense matrix.
*
* @param numRows number of rows
* @param numCols number of columns
* @param values matrix entries in column major
*/
def dense(numRows: Int, numCols: Int, values: Array[Double]): Matrix = {
new DenseMatrix(numRows, numCols, values)
}
/**
* Creates a column-major sparse matrix in Compressed Sparse Column (CSC) format.
*
* @param numRows number of rows
* @param numCols number of columns
* @param colPtrs the index corresponding to the start of a new column
* @param rowIndices the row index of the entry
* @param values non-zero matrix entries in column major
*/
def sparse(
numRows: Int,
numCols: Int,
colPtrs: Array[Int],
rowIndices: Array[Int],
values: Array[Double]): Matrix = {
new SparseMatrix(numRows, numCols, colPtrs, rowIndices, values)
}
/**
* Creates a Matrix instance from a breeze matrix.
* @param breeze a breeze matrix
* @return a Matrix instance
*/
private[mllib] def fromBreeze(breeze: BM[Double]): Matrix = {
breeze match {
case dm: BDM[Double] =>
new DenseMatrix(dm.rows, dm.cols, dm.data, dm.isTranspose)
case sm: BSM[Double] =>
// There is no isTranspose flag for sparse matrices in Breeze
new SparseMatrix(sm.rows, sm.cols, sm.colPtrs, sm.rowIndices, sm.data)
case _ =>
throw new UnsupportedOperationException(
s"Do not support conversion from type ${breeze.getClass.getName}.")
}
}
/**
* Generate a `Matrix` consisting of zeros.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @return `Matrix` with size `numRows` x `numCols` and values of zeros
*/
def zeros(numRows: Int, numCols: Int): Matrix = DenseMatrix.zeros(numRows, numCols)
/**
* Generate a `DenseMatrix` consisting of ones.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @return `Matrix` with size `numRows` x `numCols` and values of ones
*/
def ones(numRows: Int, numCols: Int): Matrix = DenseMatrix.ones(numRows, numCols)
/**
* Generate a dense Identity Matrix in `Matrix` format.
* @param n number of rows and columns of the matrix
* @return `Matrix` with size `n` x `n` and values of ones on the diagonal
*/
def eye(n: Int): Matrix = DenseMatrix.eye(n)
/**
* Generate a sparse Identity Matrix in `Matrix` format.
* @param n number of rows and columns of the matrix
* @return `Matrix` with size `n` x `n` and values of ones on the diagonal
*/
def speye(n: Int): Matrix = SparseMatrix.speye(n)
/**
* Generate a `DenseMatrix` consisting of `i.i.d.` uniform random numbers.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param rng a random number generator
* @return `Matrix` with size `numRows` x `numCols` and values in U(0, 1)
*/
def rand(numRows: Int, numCols: Int, rng: Random): Matrix =
DenseMatrix.rand(numRows, numCols, rng)
/**
* Generate a `SparseMatrix` consisting of `i.i.d.` gaussian random numbers.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param density the desired density for the matrix
* @param rng a random number generator
* @return `Matrix` with size `numRows` x `numCols` and values in U(0, 1)
*/
def sprand(numRows: Int, numCols: Int, density: Double, rng: Random): Matrix =
SparseMatrix.sprand(numRows, numCols, density, rng)
/**
* Generate a `DenseMatrix` consisting of `i.i.d.` gaussian random numbers.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param rng a random number generator
* @return `Matrix` with size `numRows` x `numCols` and values in N(0, 1)
*/
def randn(numRows: Int, numCols: Int, rng: Random): Matrix =
DenseMatrix.randn(numRows, numCols, rng)
/**
* Generate a `SparseMatrix` consisting of `i.i.d.` gaussian random numbers.
* @param numRows number of rows of the matrix
* @param numCols number of columns of the matrix
* @param density the desired density for the matrix
* @param rng a random number generator
* @return `Matrix` with size `numRows` x `numCols` and values in N(0, 1)
*/
def sprandn(numRows: Int, numCols: Int, density: Double, rng: Random): Matrix =
SparseMatrix.sprandn(numRows, numCols, density, rng)
/**
* Generate a diagonal matrix in `Matrix` format from the supplied values.
* @param vector a `Vector` that will form the values on the diagonal of the matrix
* @return Square `Matrix` with size `values.length` x `values.length` and `values`
* on the diagonal
*/
def diag(vector: Vector): Matrix = DenseMatrix.diag(vector)
/**
* Horizontally concatenate a sequence of matrices. The returned matrix will be in the format
* the matrices are supplied in. Supplying a mix of dense and sparse matrices will result in
* a sparse matrix. If the Array is empty, an empty `DenseMatrix` will be returned.
* @param matrices array of matrices
* @return a single `Matrix` composed of the matrices that were horizontally concatenated
*/
def horzcat(matrices: Array[Matrix]): Matrix = {
if (matrices.isEmpty) {
return new DenseMatrix(0, 0, Array[Double]())
} else if (matrices.size == 1) {
return matrices(0)
}
val numRows = matrices(0).numRows
var hasSparse = false
var numCols = 0
matrices.foreach { mat =>
require(numRows == mat.numRows, "The number of rows of the matrices in this sequence, " +
"don't match!")
mat match {
case sparse: SparseMatrix => hasSparse = true
case dense: DenseMatrix => // empty on purpose
case _ => throw new IllegalArgumentException("Unsupported matrix format. Expected " +
s"SparseMatrix or DenseMatrix. Instead got: ${mat.getClass}")
}
numCols += mat.numCols
}
if (!hasSparse) {
new DenseMatrix(numRows, numCols, matrices.flatMap(_.toArray))
} else {
var startCol = 0
val entries: Array[(Int, Int, Double)] = matrices.flatMap { mat =>
val nCols = mat.numCols
mat match {
case spMat: SparseMatrix =>
val data = new Array[(Int, Int, Double)](spMat.values.length)
var cnt = 0
spMat.foreachActive { (i, j, v) =>
data(cnt) = (i, j + startCol, v)
cnt += 1
}
startCol += nCols
data
case dnMat: DenseMatrix =>
val data = new ArrayBuffer[(Int, Int, Double)]()
dnMat.foreachActive { (i, j, v) =>
if (v != 0.0) {
data.append((i, j + startCol, v))
}
}
startCol += nCols
data
}
}
SparseMatrix.fromCOO(numRows, numCols, entries)
}
}
/**
* Vertically concatenate a sequence of matrices. The returned matrix will be in the format
* the matrices are supplied in. Supplying a mix of dense and sparse matrices will result in
* a sparse matrix. If the Array is empty, an empty `DenseMatrix` will be returned.
* @param matrices array of matrices
* @return a single `Matrix` composed of the matrices that were vertically concatenated
*/
def vertcat(matrices: Array[Matrix]): Matrix = {
if (matrices.isEmpty) {
return new DenseMatrix(0, 0, Array[Double]())
} else if (matrices.size == 1) {
return matrices(0)
}
val numCols = matrices(0).numCols
var hasSparse = false
var numRows = 0
matrices.foreach { mat =>
require(numCols == mat.numCols, "The number of rows of the matrices in this sequence, " +
"don't match!")
mat match {
case sparse: SparseMatrix => hasSparse = true
case dense: DenseMatrix => // empty on purpose
case _ => throw new IllegalArgumentException("Unsupported matrix format. Expected " +
s"SparseMatrix or DenseMatrix. Instead got: ${mat.getClass}")
}
numRows += mat.numRows
}
if (!hasSparse) {
val allValues = new Array[Double](numRows * numCols)
var startRow = 0
matrices.foreach { mat =>
var j = 0
val nRows = mat.numRows
mat.foreachActive { (i, j, v) =>
val indStart = j * numRows + startRow
allValues(indStart + i) = v
}
startRow += nRows
}
new DenseMatrix(numRows, numCols, allValues)
} else {
var startRow = 0
val entries: Array[(Int, Int, Double)] = matrices.flatMap { mat =>
val nRows = mat.numRows
mat match {
case spMat: SparseMatrix =>
val data = new Array[(Int, Int, Double)](spMat.values.length)
var cnt = 0
spMat.foreachActive { (i, j, v) =>
data(cnt) = (i + startRow, j, v)
cnt += 1
}
startRow += nRows
data
case dnMat: DenseMatrix =>
val data = new ArrayBuffer[(Int, Int, Double)]()
dnMat.foreachActive { (i, j, v) =>
if (v != 0.0) {
data.append((i + startRow, j, v))
}
}
startRow += nRows
data
}
}
SparseMatrix.fromCOO(numRows, numCols, entries)
}
}
}
| trueyao/spark-lever | mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala | Scala | apache-2.0 | 32,489 |
package cinema.example
import cinema.crew._
import cinema.graph.Graph
import cinema.graph.immutable.UndirectedGraph
object LossyHittingTimeApp {
def lossyHittingTime(myGraph: Graph, u: Int, v: Int): Double = {
var result = 0
var i = 0
while (i != 2000) {
var walker = u
while (walker != v) {
walker = myGraph.randomNeighbor(walker)
result += 1
}
i += 1
}
result / 2000
}
def main(args: Array[String]) {
if (args.length != 4) {
println("Usage: scala HittingTimeApp [edgelist] [subset cardinality] [output filename] [# of servers]")
return
}
val G = new UndirectedGraph(args(0), parallel = true)
PairwiseApp.calculate(G, args(1).toInt, lossyHittingTime, args(2), args(3).toInt)
}
} | adelbertc/cinema | src/main/scala/cinema/example/LossyHittingTimeApp.scala | Scala | mit | 778 |
package math
import renderer.Hit
import play.api.libs.json.{Format, JsValue, Json}
final case class Triangle(a: Vector3,
b: Vector3,
c: Vector3,
material: String = "DEFAULT_MATERIAL",
normals: Option[Seq[Vector3]] = None)
extends Shape {
import Math._
lazy val edge1: Vector3 = b - a
lazy val edge2: Vector3 = c - a
lazy val normal: Vector3 = -(edge1 cross edge2) normalized
override def intersect(r: Ray): Option[Hit] = {
//Begin calculating determinant - also used to calculate u parameter
val p: Vector3 = r.direction cross edge2
//if determinant is near zero, ray lies in plane of triangle or ray is parallel to plane of triangle
val det: Double = edge1 * p
//TODO: Backface culling???
if (det > -EPS && det < EPS)
None
else {
val inv_det = 1f / det
//calculate distance from V1 to ray origin
val t_vec = r.origin - a //I normalized this???
//Calculate u parameter and test bound
val u = (t_vec * p) * inv_det
//The intersection lies outside of the triangle
if (u < 0f || u > 1f)
None
else {
//Prepare to test v parameter
val q: Vector3 = t_vec cross edge1
//Calculate V parameter and test bound
val v = (r.direction * q) * inv_det
//The intersection lies outside of the triangle
if (v < 0f || u + v > 1f)
None
else {
val t = (edge2 * q) * inv_det
if (t > EPS) {
//ray intersection
val pos = r.march(t)
normals match {
case Some(Seq(a, b, c)) =>
val interpolatedNormal = a * (1 - u - v) + b * u + c * v
Some(Hit(t, r.march(t), interpolatedNormal, Shape.getMaterial(material, pos)))
case _ =>
Some(Hit(t, r.march(t), normal, Shape.getMaterial(material, pos)))
}
} else {
None
}
}
}
}
}
override def intersect(r: Ray, maxDist: Double): Boolean = {
val p: Vector3 = r.direction cross edge2
//if determinant is near zero, ray lies in plane of triangle or ray is parallel to plane of triangle
val det: Double = edge1 * p
//TODO: Backface culling???
if (det > -EPS && det < EPS)
false
else {
val inv_det = 1f / det
//calculate distance from V1 to ray origin
val t_vec = (r.origin - a)
//Calculate u parameter and test bound
val u = (t_vec * p) * inv_det
//The intersection lies outside of the triangle
if (u < 0f || u > 1f)
false
else {
//Prepare to test v parameter
val q: Vector3 = t_vec cross edge1
//Calculate V parameter and test bound
val v = (r.direction * q) * inv_det
//The intersection lies outside of the triangle
if (v < 0f || u + v > 1f)
false
else {
val t = (edge2 * q) * inv_det
EPS < t && t < maxDist - EPS
}
}
}
}
override lazy val boundingBox: AABB = {
val points: Seq[Vector3] = Seq(a, b, c)
AABB(points.map(_.x).min,
points.map(_.x).max,
points.map(_.y).min,
points.map(_.y).max,
points.map(_.z).min,
points.map(_.z).max)
}
override lazy val midpoint: Vector3 = (a + b + c) / 3
override lazy val minX: Double = a.x min b.x min c.x
override lazy val minY: Double = a.y min b.y min c.y
override lazy val minZ: Double = a.z min b.z min c.z
override lazy val maxX: Double = a.x max b.x max c.x
override lazy val maxY: Double = a.y max b.y max c.y
override lazy val maxZ: Double = a.z max b.z max c.z
}
| wookenny/scalarty | src/main/scala/math/Triangle.scala | Scala | mit | 3,768 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.common.engine.core.entities
/**
* @author Pavel Tomskikh
*/
case class KafkaEnvelopes[T <: AnyRef](envelopes: Seq[KafkaEnvelope[T]]) extends EnvelopeInterface {
override val weight: Int = envelopes.size
}
| bwsw/sj-platform | core/sj-common/src/main/scala/com/bwsw/sj/common/engine/core/entities/KafkaEnvelopes.scala | Scala | apache-2.0 | 1,040 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import EqualityPolicy._
/**
* Provides an implicit method that loosens the equality constraint defined by <code>TypeCheckedTripleEquals</code> or <code>ConversionCheckedTripleEquals</code>
* for Scala <code>Seq</code>s to one that more closely matches Scala's approach to <code>Seq</code> equality.
*
* <p>
* Scala's approach to <code>Seq</code> equality is that if both objects being compared are <code>Seq</code>s, the elements are compared to determine equality.
* This means you could compare an immutable <code>Vector</code> and a mutable <code>ListBuffer</code> for equality, for instance, and get true so long as the
* two <code>Seq</code>s contained the same elements in the same order. Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> import scala.collection.mutable.ListBuffer
* import scala.collection.mutable.ListBuffer
*
* scala> Vector(1, 2) == ListBuffer(1, 2)
* res0: Boolean = true
* </pre>
*
* <p>
* Such a comparison would not, however, compile if you used <code>===</code> under either <code>TypeCheckedTripleEquals</code> or <code>ConversionCheckedTripleEquals</code>,
* because <code>Vector</code> and <code>ListBuffer</code> are not in a subtype/supertype relationship, nor does an implicit conversion by default exist between them:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalactic._
* import org.scalactic._
*
* scala> import TypeCheckedTripleEquals._
* import TypeCheckedTripleEquals._
*
* scala> Vector(1, 2) === ListBuffer(1, 2)
* <console>:16: error: types scala.collection.immutable.Vector[Int] and
* scala.collection.mutable.ListBuffer[Int] do not adhere to the equality constraint selected for
* the === and !== operators; the missing implicit parameter is of type
* org.scalactic.EqualityConstraint[scala.collection.immutable.Vector[Int],
* scala.collection.mutable.ListBuffer[Int]]
* Vector(1, 2) === ListBuffer(1, 2)
* ^
* </pre>
*
* <p>
* If you mix or import the implicit conversion provided by <code>SeqEqualityConstraint</code>, however, the comparison will be allowed:
* </p>
*
* <pre class="stREPL">
* scala> import SeqEqualityConstraints._
* import SeqEqualityConstraints._
*
* scala> Vector(1, 2) === ListBuffer(1, 2)
* res2: Boolean = true
* </pre>
*
* <p>
* The equality constraint provided by this trait requires that both left and right sides are subclasses of <code>scala.collection.GenSeq</code> and that
* an <code>EqualityConstraint</code> can be found for the element types. In the example above, both the <code>Vector</code> and
* <code>ListBuffer</code> are subclasses of <code>scala.collection.GenSeq</code>, and the regular <code>TypeCheckedTripleEquals</code> provides equality
* constraints for the element types, both of which are <code>Int</code>. By contrast, this
* trait would not allow a <code>Vector[Int]</code> to be compared against a <code>ListBuffer[java.util.Date]</code>, because no equality constraint
* will exist between the element types <code>Int</code> and <code>Date</code>:
* </p>
*
* <pre class="stREPL">
* scala> import java.util.Date
* import java.util.Date
*
* scala> Vector(1, 2) === ListBuffer(new Date, new Date)
* <console>:20: error: types scala.collection.immutable.Vector[Int] and
* scala.collection.mutable.ListBuffer[java.util.Date] do not adhere to the equality constraint selected for
* the === and !== operators; the missing implicit parameter is of type
* org.scalactic.EqualityConstraint[scala.collection.immutable.Vector[Int],
* scala.collection.mutable.ListBuffer[java.util.Date]]
* Vector(1, 2) === ListBuffer(new Date, new Date)
* ^
* </pre>
*
* @author Bill Venners
*/
trait SeqEqualityConstraints {
import scala.language.higherKinds
/**
* Provides an equality constraint that allows two subtypes of <code>scala.collection.GenSeq</code>s to be compared for equality with <code>===</code> so long
* as an <code>EqualityConstraint</code> is available for the element types.
*/
implicit def seqEqualityConstraint[EA, CA[ea] <: collection.GenSeq[ea], EB, CB[eb] <: collection.GenSeq[eb]](implicit equalityOfA: Equality[CA[EA]], ev: Constraint[EA, EB]): Constraint[CA[EA], CB[EB]] = new BasicConstraint[CA[EA], CB[EB]](equalityOfA)
}
/**
* Companion object that facilitates the importing of <code>SeqEqualityConstraints</code> members as
* an alternative to mixing it in. One use case is to import <code>SeqEqualityConstraints</code> members so you can use
* them in the Scala interpreter.
*/
object SeqEqualityConstraints extends SeqEqualityConstraints
| travisbrown/scalatest | src/main/scala/org/scalactic/SeqEqualityConstraints.scala | Scala | apache-2.0 | 5,337 |
package feh.tec.visual.swing
import scala.swing.Component
import scala.swing.Swing._
import javax.swing.{JPanel, UIManager, JComponent}
import javax.swing.plaf.ComponentUI
import java.awt.Graphics
class Canvas extends Component{
override lazy val peer: JCanvas = new JCanvas with SuperMixin
def canvas = peer.canvas
}
class JCanvas extends JPanel{
lazy val canvas = new java.awt.Canvas
override def paintComponent(g: Graphics){
canvas.paint(g)
super.paintComponent(g)
}
} | fehu/agent-tareas | swing/src/main/scala/feh/tec/visual/swing/Canvas.scala | Scala | mit | 493 |
package dispatch
import io.netty.util.{TimerTask, Timeout, Timer}
import scala.concurrent.duration.Duration
object SleepFuture {
def apply[T](d: Duration)(todo: => T)
(implicit timer: Timer) = {
val promise = scala.concurrent.Promise[T]()
timer.newTimeout(new TimerTask {
def run(timeout: Timeout) = {
promise.complete(util.Try(todo))
()
}
}, d.length, d.unit)
promise.future
}
}
| dispatch/reboot | core/src/main/scala/sleep.scala | Scala | lgpl-3.0 | 446 |
package coursier.cli.install
import caseapp.{ExtraName => Short, HelpMessage => Help, ValueDescription => Value, _}
import coursier.cli.options.OptionGroup
// format: off
final case class SharedInstallOptions(
@Group(OptionGroup.install)
@Hidden
graalvmHome: Option[String] = None,
@Group(OptionGroup.install)
@Hidden
graalvmOption: List[String] = Nil,
@Group(OptionGroup.install)
@Hidden
graalvmDefaultVersion: Option[String] = SharedInstallOptions.defaultGraalvmVersion,
@Group(OptionGroup.install)
@Short("dir")
installDir: Option[String] = None,
@Group(OptionGroup.install)
@Hidden
@Help("Platform for prebuilt binaries (e.g. \\"x86_64-pc-linux\\", \\"x86_64-apple-darwin\\", \\"x86_64-pc-win32\\")")
installPlatform: Option[String] = None,
@Group(OptionGroup.install)
@Hidden
installPreferPrebuilt: Boolean = true,
@Group(OptionGroup.install)
@Hidden
@Help("Require prebuilt artifacts for native applications, don't try to build native executable ourselves")
onlyPrebuilt: Boolean = false,
@Group(OptionGroup.install)
@Hidden
proguarded: Option[Boolean] = None
)
// format: on
object SharedInstallOptions {
def defaultGraalvmVersion: Option[String] =
Some("19.3")
}
| coursier/coursier | modules/cli/src/main/scala/coursier/cli/install/SharedInstallOptions.scala | Scala | apache-2.0 | 1,251 |
package one.lockstep.monolock
import one.lockstep.monolock.LockState.SealIdentifiers
import one.lockstep.monolock.protocol.UnlockResponse._
import one.lockstep.monolock.protocol._
import one.lockstep.util._
import one.lockstep.util.crypto._
import one.lockstep.util.codec._
import one.lockstep.util.protocol._
import scodec.codecs._
class LockGuard(ticketGuard: TicketGuard = TicketGuard(acceptUnsigned = true)) extends Logging {
def enroll(stateOpt: Option[LockState], request: EnrollmentRequest)
(implicit ciphersuite: Ciphersuite): (Option[LockState], EnrollmentResponse) = {
if (!ticketGuard.validateTicket(request.ticket.decoded, None)) {
(None, EnrollmentResponse.Failure(MonolockResponse.Rejected))
}
else stateOpt match {
case None =>
val newState = freshState(request)
(Some(newState), EnrollmentResponse.Success(newState.nextSealKeyPair.publicKey))
case Some(state) if validateRecurringEnrollmentRequest(request, state) =>
val newState = resetState(state, request)
(Some(newState), EnrollmentResponse.Success(newState.nextSealKeyPair.publicKey))
case _ => // invalid enrollment request, reject
(None, EnrollmentResponse.Failure(MonolockResponse.Rejected))
}
}
def unlock(stateOpt: Option[LockState], request: UnlockRequest)
(implicit ciphersuite: Ciphersuite): (Option[LockState], UnlockResponse) = {
stateOpt match {
case None => (None, UnlockResponse.Failure(MonolockResponse.Rejected))
case Some(state) =>
val incomingSeal = decryptSeal(request.lockSeal, state)
val isSealFresh = isFreshSeal(incomingSeal, state)
val isSealValid = validateSeal(incomingSeal, state)
val isRequestInSequence = request.timestamp > state.currentTimestamp
if (!isSealValid || !isRequestInSequence) {
logger.warn(s"rejecting unlock request, lockId=${request.lockId}")
(None, UnlockResponse.Failure(MonolockResponse.Rejected))
} else {
if (!isSealFresh && state.badUnlockAttempts > incomingSeal.header.lockoutThreshold) {
logger.warn(s"rejecting unlock request as the lock is already locked-out, lockId=${request.lockId}")
(None, UnlockResponse.Failure(MonolockResponse.LockedOut))
} else {
val sharedSecret = deriveSharedSecret(incomingSeal, request.timestamp, request.kexClientPubKey)
val passcodeMatch = validateSharedSecret(sharedSecret, request.kexConfirmation)
//we do NOT reset the bad-attempt-count right after match,
//because that could give advantage to attackers - see requirement #2:
val newBadAttempts = (if (isSealFresh) 0 else state.badUnlockAttempts) + (if (passcodeMatch) 0 else 1)
//there is no need to rotate the seal-key on every fresh seal, however frequent rotation is good for security
val (newSealKeyPair, newNextSealKeyPair) = if (isSealFresh) {
(state.nextSealKeyPair, ciphersuite.kem.keygen()) //rotate
} else (state.sealKeyPair, state.nextSealKeyPair) //keep
val newState = state.copy(
currentTimestamp = request.timestamp,
lastUnlockRequestAt = request.timestamp,
badUnlockAttempts = newBadAttempts,
sealCreatedAt = incomingSeal.header.createdAt,
sealIdsOpt = Some(sealIdentifiers(incomingSeal)),
sealKeyPair = newSealKeyPair,
nextSealKeyPair = newNextSealKeyPair)
val unlockResponse = if (newBadAttempts > incomingSeal.header.lockoutThreshold)
UnlockResponse.Failure(MonolockResponse.LockedOut)
else if (!passcodeMatch)
UnlockResponse.Failure(MonolockResponse.IncorrectPasscode)
else {
val keyMaterial = new KeyMaterial(incomingSeal.confidential.masterSecretShare, newNextSealKeyPair.publicKey)
val clientVersion = Preface.decode(request.lockSeal.header).version
UnlockResponse.Success(EncryptedKeyMaterial.encrypt(keyMaterial, sharedSecret, clientVersion))
}
(Some(newState), unlockResponse)
}
}
}
}
private def decryptSeal(lockSeal: EncryptedLockSeal, state: LockState)
(implicit ciphersuite: Ciphersuite): LockSeal = {
val decryptionKey = sealDecryptionKey(lockSeal, state)
LockSeal.decrypt(lockSeal, decryptionKey)
}
private def sealDecryptionKey(lockSeal: EncryptedLockSeal, state: LockState)
(implicit ciphersuite: Ciphersuite): PrivateKey = {
val keyFingerprint = Protocol.decode[LockSeal.Header](lockSeal.header).sealKeyFingerprint
if (keyFingerprint == ciphersuite.hash(state.sealKeyPair.publicKey))
state.sealKeyPair.privateKey
else if (keyFingerprint == ciphersuite.hash(state.nextSealKeyPair.publicKey))
state.nextSealKeyPair.privateKey
else throw new IllegalArgumentException(s"unknown seal key fingerprint $keyFingerprint")
}
private def deriveSharedSecret(seal: LockSeal, timestamp: Long, kexClientPubKey: Speke.Member)
(implicit ciphersuite: Ciphersuite): Bytes = {
val sessionId = encode(timestamp)(int64) ++ seal.header.lockId.asBytes //note: client does the same calculation
val keyPair = Speke.KeyPair(seal.confidential.kexServerPrivKey, seal.header.kexServerPubKey)
require(ciphersuite.speke.validatePublicKey(kexClientPubKey), "invalid SPEKE public key")
ciphersuite.speke.deriveSharedSecret(sessionId, keyPair, kexClientPubKey)
}
private def validateSharedSecret(sharedSecret: Bytes, confirmation: Bytes)
(implicit ciphersuite: Ciphersuite): Boolean = {
ciphersuite.speke.validateConfirmation(sharedSecret, iterations = 2, confirmation)
}
private def isFreshSeal(incomingSeal: LockSeal, state: LockState)
(implicit ciphersuite: Ciphersuite): Boolean = {
val incomingSealIdDigest = ciphersuite.hash(prependLength(incomingSeal.confidential.id))
!state.sealIdsOpt.exists(_.sealIdDigest == incomingSealIdDigest)
}
private def validateSeal(incomingSeal: LockSeal, state: LockState)
(implicit ciphersuite: Ciphersuite): Boolean = {
!isFreshSeal(incomingSeal, state) || { // validate a fresh seal:
validateSealSeriesId(incomingSeal, state) && // requirement #8
incomingSeal.header.createdAt > state.sealCreatedAt && // requirement #1
incomingSeal.header.createdAt >= state.lastUnlockRequestAt // requirement #3
}
}
private def validateSealSeriesId(incomingSeal: LockSeal, state: LockState)
(implicit ciphersuite: Ciphersuite): Boolean = {
val incomingSeriesIdDigest = ciphersuite.hash(prependLength(incomingSeal.confidential.seriesId))
state.sealIdsOpt.exists(_.sealSeriesIdDigest == incomingSeriesIdDigest) || {
// this is a new series-id - check that this seal was created during last enrollment:
incomingSeal.header.createdAt == state.lastEnrollmentRequestAt &&
state.enrolledSealSeriesIdDigest == incomingSeriesIdDigest
}
}
private def validateRecurringEnrollmentRequest(request: EnrollmentRequest, state: LockState)
(implicit ciphersuite: Ciphersuite): Boolean = {
request.timestamp > state.currentTimestamp && {
// check that the incoming seal matches the current state:
val incomingSealOpt = request.latestSeal.map(decryptSeal(_, state))
state.sealIdsOpt.forall { _ => incomingSealOpt.isDefined } &&
incomingSealOpt.forall { seal => validateSeal(seal, state) }
}
}
private def freshState(request: EnrollmentRequest)
(implicit ciphersuite: Ciphersuite): LockState = {
val freshKeyPair = ciphersuite.kem.keygen()
LockState(currentTimestamp = request.timestamp,
enrolledSealSeriesIdDigest = request.newSeriesIdDigest,
lastEnrollmentRequestAt = request.timestamp,
sealKeyPair = freshKeyPair, nextSealKeyPair = freshKeyPair)
}
private def resetState(state: LockState, validatedRequest: EnrollmentRequest)
(implicit ciphersuite: Ciphersuite): LockState = {
val incomingSealOpt = validatedRequest.latestSeal.map(decryptSeal(_, state))
incomingSealOpt.map(seal => updateSeal(state, seal)).getOrElse(state).
copy(currentTimestamp = validatedRequest.timestamp,
lastEnrollmentRequestAt = validatedRequest.timestamp,
enrolledSealSeriesIdDigest = validatedRequest.newSeriesIdDigest)
}
private def updateSeal(state: LockState, incomingSeal: LockSeal)
(implicit ciphersuite: Ciphersuite): LockState = {
if (!isFreshSeal(incomingSeal, state)) state
else {
state.copy(badUnlockAttempts = 0,
sealCreatedAt = incomingSeal.header.createdAt,
sealIdsOpt = Some(sealIdentifiers(incomingSeal)))
}
}
private def sealIdentifiers(seal: LockSeal)(implicit ciphersuite: Ciphersuite) = {
SealIdentifiers(
ciphersuite.hash(prependLength(seal.confidential.id)),
ciphersuite.hash(prependLength(seal.confidential.seriesId)))
}
}
| lockstep-one/vault | monolock-server/src/main/scala/one/lockstep/monolock/LockGuard.scala | Scala | agpl-3.0 | 9,304 |
package org.bitcoins.core.util
import scala.math.BigInt
/**
* Created by chris on 2/8/16.
*/
trait NumberUtil extends BitcoinSLogger {
private def parseLong(hex : String) : Long = java.lang.Long.parseLong(hex,16)
private def parseLong(bytes : List[Byte]) : Long = parseLong(BitcoinSUtil.encodeHex(bytes))
private def parseLong(byte : Byte) : Long = parseLong(List(byte))
private def parseLong(bytes : Seq[Byte]) : Long = parseLong(bytes.toList)
/** Takes 2^^num. */
def pow2(exponent : Int) : BigInt = {
require(exponent < 64, "We cannot have anything larger than 2^64 - 1 in a long, you tried to do 2^" + exponent)
BigInt(1) << exponent
}
/**
* Calculates the unsigned number for a byte
* @param byteIndex this is used to tell what position this byte is out of a 4 byte integer
* For instance, if byte was equal to 0x0001 and we were trying to calculate the unsigned int for
* the following byte value Seq(0xf000, 0x0f00, 0x0001, 0x0000) we would have byteIndex 1
* @param byte the byte which we need to calculate the unsigned integer for
* @return the unsigned integer corresponding to the given byteIndex and byte
*/
def calculateUnsignedNumberFromByte(byteIndex : Int, byte : Byte): BigInt = {
val setBits : Seq[BigInt] = for {
i <- 0 until 8
bitIndex = i + (byteIndex * 8)
} yield {
//check if index i is set in the byte, if so we need to calculate 2 ^ bitIndex
if ((pow2(i) & byte) != 0) pow2(bitIndex)
else BigInt(0)
}
setBits.foldLeft(BigInt(0)){_ + _}
}
/** Takes a hex string and parses it to a [[BigInt]]. */
def toBigInt(hex : String) : BigInt = toBigInt(BitcoinSUtil.decodeHex(hex))
/** Converts a sequence of bytes to twos complement signed number. */
def toBigInt(bytes : Seq[Byte]) : BigInt = {
//BigInt interprets the number as an unsigned number then applies the given
//sign in front of that number, therefore if we have a negative number we need to invert it
//since twos complement is an inverted number representation for negative numbers
//see [[https://en.wikipedia.org/wiki/Two%27s_complement]]
if (bytes.isEmpty) BigInt(0)
//check if sign bit is set
else if ((0x80.toByte & bytes.head) !=0) {
val invertedBytes = bytes.tail.map(b => (b ^ 0xff.toByte).toByte)
val firstByteInverted = (bytes.head ^ 0xff.toByte).toByte
val num = firstByteInverted +: invertedBytes
BigInt(-1,num.toArray) - 1
} else {
val firstBitOff = (0x7f & bytes.head).toByte
val num = firstBitOff +: bytes.tail
BigInt(num.toArray)
}
}
/** Converts a sequence of [[Byte]] to a [[Int]]. */
def toInt(bytes : Seq[Byte]) : Int = toBigInt(bytes).toInt
/** Converts a hex string to a [[Int]]. */
def toInt(hex : String) : Int = toInt(BitcoinSUtil.decodeHex(hex))
/** Converts a sequence of [[Byte]] to a [[Long]]. */
def toLong(bytes : Seq[Byte]) : Long = toBigInt(bytes).toLong
/** Converts a hex string to a [[Long]]. */
def toLong(hex : String): Long = toLong(BitcoinSUtil.decodeHex(hex))
}
object NumberUtil extends NumberUtil
| SuredBits/bitcoin-s-sidechains | src/main/scala/org/bitcoins/core/util/NumberUtil.scala | Scala | mit | 3,183 |
package acceptance
import cucumber.api.junit.Cucumber
import cucumber.api.junit.Cucumber.Options
import org.junit.runner.RunWith
@RunWith(classOf[Cucumber])
@Options(
features = Array("features"),
glue = Array("acceptance"),
format = Array("pretty", "html:target/cucumber-report"),
tags = Array()
)
class FeaturesRunner {
} | tvlive/tv-details | test/acceptance/FeaturesRunner.scala | Scala | apache-2.0 | 333 |
package info.mukel.telegrambot4s.methods
import info.mukel.telegrambot4s.models.Message
/** Use this method to forward messages of any kind. On success, the sent Message is returned.
*
* @param chatId Integer or String Unique identifier for the target chat or username of the target channel (in the format @channelusername)
* @param fromChatId Integer or String Unique identifier for the chat where the original message was sent (or channel username in the format @channelusername)
* @param disableNotification Boolean Optional Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound.
* @param messageId Integer Unique message identifier
*/
case class ForwardMessage(
chatId : Long Either String,
fromChatId : Long Either String,
disableNotification : Option[Boolean] = None,
messageId : Long
) extends ApiRequestJson[Message]
| hugemane/telegrambot4s | src/main/scala/info/mukel/telegrambot4s/methods/ForwardMessage.scala | Scala | apache-2.0 | 1,069 |
package gitbucket.core.controller
import gitbucket.core.model.{CommitComment, CommitComments, IssueComment, WebHook}
import gitbucket.core.plugin.PluginRegistry
import gitbucket.core.pulls.html
import gitbucket.core.service.CommitStatusService
import gitbucket.core.service.MergeService
import gitbucket.core.service.IssuesService._
import gitbucket.core.service.PullRequestService._
import gitbucket.core.service.RepositoryService.RepositoryInfo
import gitbucket.core.service._
import gitbucket.core.util.SyntaxSugars._
import gitbucket.core.util.Directory._
import gitbucket.core.util.Implicits._
import gitbucket.core.util._
import org.scalatra.forms._
import org.eclipse.jgit.api.Git
import org.eclipse.jgit.lib.{ObjectId, PersonIdent}
import org.eclipse.jgit.revwalk.RevWalk
import org.scalatra.BadRequest
import scala.collection.JavaConverters._
class PullRequestsController
extends PullRequestsControllerBase
with RepositoryService
with AccountService
with IssuesService
with PullRequestService
with MilestonesService
with LabelsService
with CommitsService
with ActivityService
with WebHookPullRequestService
with WebHookPullRequestReviewCommentService
with ReadableUsersAuthenticator
with ReferrerAuthenticator
with WritableUsersAuthenticator
with CommitStatusService
with MergeService
with ProtectedBranchService
with PrioritiesService
trait PullRequestsControllerBase extends ControllerBase {
self: RepositoryService
with AccountService
with IssuesService
with MilestonesService
with LabelsService
with CommitsService
with ActivityService
with PullRequestService
with WebHookPullRequestService
with ReadableUsersAuthenticator
with ReferrerAuthenticator
with WritableUsersAuthenticator
with CommitStatusService
with MergeService
with ProtectedBranchService
with PrioritiesService =>
val pullRequestForm = mapping(
"title" -> trim(label("Title", text(required, maxlength(100)))),
"content" -> trim(label("Content", optional(text()))),
"targetUserName" -> trim(text(required, maxlength(100))),
"targetBranch" -> trim(text(required, maxlength(100))),
"requestUserName" -> trim(text(required, maxlength(100))),
"requestRepositoryName" -> trim(text(required, maxlength(100))),
"requestBranch" -> trim(text(required, maxlength(100))),
"commitIdFrom" -> trim(text(required, maxlength(40))),
"commitIdTo" -> trim(text(required, maxlength(40))),
"assignedUserName" -> trim(optional(text())),
"milestoneId" -> trim(optional(number())),
"priorityId" -> trim(optional(number())),
"labelNames" -> trim(optional(text()))
)(PullRequestForm.apply)
val mergeForm = mapping(
"message" -> trim(label("Message", text(required))),
"strategy" -> trim(label("Strategy", text(required)))
)(MergeForm.apply)
case class PullRequestForm(
title: String,
content: Option[String],
targetUserName: String,
targetBranch: String,
requestUserName: String,
requestRepositoryName: String,
requestBranch: String,
commitIdFrom: String,
commitIdTo: String,
assignedUserName: Option[String],
milestoneId: Option[Int],
priorityId: Option[Int],
labelNames: Option[String]
)
case class MergeForm(message: String, strategy: String)
get("/:owner/:repository/pulls")(referrersOnly { repository =>
val q = request.getParameter("q")
if (Option(q).exists(_.contains("is:issue"))) {
redirect(s"/${repository.owner}/${repository.name}/issues?q=" + StringUtil.urlEncode(q))
} else {
searchPullRequests(None, repository)
}
})
get("/:owner/:repository/pull/:id")(referrersOnly { repository =>
params("id").toIntOpt.flatMap {
issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map {
case (issue, pullreq) =>
val (commits, diffs) =
getRequestCompareInfo(owner, name, pullreq.commitIdFrom, owner, name, pullreq.commitIdTo)
html.conversation(
issue,
pullreq,
commits.flatten,
getPullRequestComments(owner, name, issue.issueId, commits.flatten),
diffs.size,
getIssueLabels(owner, name, issueId),
getAssignableUserNames(owner, name),
getMilestonesWithIssueCount(owner, name),
getPriorities(owner, name),
getLabels(owner, name),
isEditable(repository),
isManageable(repository),
hasDeveloperRole(pullreq.requestUserName, pullreq.requestRepositoryName, context.loginAccount),
repository,
getRepository(pullreq.requestUserName, pullreq.requestRepositoryName),
flash.toMap.map(f => f._1 -> f._2.toString)
)
// html.pullreq(
// issue,
// pullreq,
// comments,
// getIssueLabels(owner, name, issueId),
// getAssignableUserNames(owner, name),
// getMilestonesWithIssueCount(owner, name),
// getPriorities(owner, name),
// getLabels(owner, name),
// commits,
// diffs,
// isEditable(repository),
// isManageable(repository),
// hasDeveloperRole(pullreq.requestUserName, pullreq.requestRepositoryName, context.loginAccount),
// repository,
// getRepository(pullreq.requestUserName, pullreq.requestRepositoryName),
// flash.toMap.map(f => f._1 -> f._2.toString)
// )
}
} getOrElse NotFound()
})
get("/:owner/:repository/pull/:id/commits")(referrersOnly { repository =>
params("id").toIntOpt.flatMap {
issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map {
case (issue, pullreq) =>
val (commits, diffs) =
getRequestCompareInfo(owner, name, pullreq.commitIdFrom, owner, name, pullreq.commitIdTo)
html.commits(
issue,
pullreq,
commits,
getPullRequestComments(owner, name, issue.issueId, commits.flatten),
diffs.size,
isManageable(repository),
repository
)
}
} getOrElse NotFound()
})
get("/:owner/:repository/pull/:id/files")(referrersOnly { repository =>
params("id").toIntOpt.flatMap {
issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map {
case (issue, pullreq) =>
val (commits, diffs) =
getRequestCompareInfo(owner, name, pullreq.commitIdFrom, owner, name, pullreq.commitIdTo)
html.files(
issue,
pullreq,
diffs,
commits.flatten,
getPullRequestComments(owner, name, issue.issueId, commits.flatten),
isManageable(repository),
repository
)
}
} getOrElse NotFound()
})
ajaxGet("/:owner/:repository/pull/:id/mergeguide")(referrersOnly { repository =>
params("id").toIntOpt.flatMap {
issueId =>
val owner = repository.owner
val name = repository.name
getPullRequest(owner, name, issueId) map {
case (issue, pullreq) =>
val conflictMessage = LockUtil.lock(s"${owner}/${name}") {
checkConflict(owner, name, pullreq.branch, issueId)
}
val hasMergePermission = hasDeveloperRole(owner, name, context.loginAccount)
val branchProtection = getProtectedBranchInfo(owner, name, pullreq.branch)
val mergeStatus = PullRequestService.MergeStatus(
conflictMessage = conflictMessage,
commitStatues = getCommitStatues(owner, name, pullreq.commitIdTo),
branchProtection = branchProtection,
branchIsOutOfDate = JGitUtil.getShaByRef(owner, name, pullreq.branch) != Some(pullreq.commitIdFrom),
needStatusCheck = context.loginAccount
.map { u =>
branchProtection.needStatusCheck(u.userName)
}
.getOrElse(true),
hasUpdatePermission = hasDeveloperRole(
pullreq.requestUserName,
pullreq.requestRepositoryName,
context.loginAccount
) &&
context.loginAccount
.map { u =>
!getProtectedBranchInfo(
pullreq.requestUserName,
pullreq.requestRepositoryName,
pullreq.requestBranch
).needStatusCheck(u.userName)
}
.getOrElse(false),
hasMergePermission = hasMergePermission,
commitIdTo = pullreq.commitIdTo
)
html.mergeguide(
mergeStatus,
issue,
pullreq,
repository,
getRepository(pullreq.requestUserName, pullreq.requestRepositoryName).get
)
}
} getOrElse NotFound()
})
get("/:owner/:repository/pull/:id/delete_branch")(readableUsersOnly { baseRepository =>
(for {
issueId <- params("id").toIntOpt
loginAccount <- context.loginAccount
(issue, pullreq) <- getPullRequest(baseRepository.owner, baseRepository.name, issueId)
owner = pullreq.requestUserName
name = pullreq.requestRepositoryName
if hasDeveloperRole(owner, name, context.loginAccount)
} yield {
val repository = getRepository(owner, name).get
val branchProtection = getProtectedBranchInfo(owner, name, pullreq.requestBranch)
if (branchProtection.enabled) {
flash += "error" -> s"branch ${pullreq.requestBranch} is protected."
} else {
if (repository.repository.defaultBranch != pullreq.requestBranch) {
val userName = context.loginAccount.get.userName
using(Git.open(getRepositoryDir(repository.owner, repository.name))) { git =>
git.branchDelete().setForce(true).setBranchNames(pullreq.requestBranch).call()
recordDeleteBranchActivity(repository.owner, repository.name, userName, pullreq.requestBranch)
}
createComment(
baseRepository.owner,
baseRepository.name,
userName,
issueId,
pullreq.requestBranch,
"delete_branch"
)
} else {
flash += "error" -> s"""Can't delete the default branch "${pullreq.requestBranch}"."""
}
}
redirect(s"/${baseRepository.owner}/${baseRepository.name}/pull/${issueId}")
}) getOrElse NotFound()
})
post("/:owner/:repository/pull/:id/update_branch")(readableUsersOnly { baseRepository =>
(for {
issueId <- params("id").toIntOpt
loginAccount <- context.loginAccount
(issue, pullreq) <- getPullRequest(baseRepository.owner, baseRepository.name, issueId)
repository <- getRepository(pullreq.requestUserName, pullreq.requestRepositoryName)
remoteRepository <- getRepository(pullreq.userName, pullreq.repositoryName)
owner = pullreq.requestUserName
name = pullreq.requestRepositoryName
if hasDeveloperRole(owner, name, context.loginAccount)
} yield {
val branchProtection = getProtectedBranchInfo(owner, name, pullreq.requestBranch)
if (branchProtection.needStatusCheck(loginAccount.userName)) {
flash += "error" -> s"branch ${pullreq.requestBranch} is protected need status check."
} else {
LockUtil.lock(s"${owner}/${name}") {
val alias =
if (pullreq.repositoryName == pullreq.requestRepositoryName && pullreq.userName == pullreq.requestUserName) {
pullreq.branch
} else {
s"${pullreq.userName}:${pullreq.branch}"
}
val existIds = using(Git.open(Directory.getRepositoryDir(owner, name))) { git =>
JGitUtil.getAllCommitIds(git)
}.toSet
pullRemote(
repository,
pullreq.requestBranch,
remoteRepository,
pullreq.branch,
loginAccount,
s"Merge branch '${alias}' into ${pullreq.requestBranch}",
Some(pullreq)
) match {
case None => // conflict
flash += "error" -> s"Can't automatic merging branch '${alias}' into ${pullreq.requestBranch}."
case Some(oldId) =>
// update pull request
updatePullRequests(owner, name, pullreq.requestBranch, loginAccount, "synchronize")
flash += "info" -> s"Merge branch '${alias}' into ${pullreq.requestBranch}"
}
}
}
redirect(s"/${baseRepository.owner}/${baseRepository.name}/pull/${issueId}")
}) getOrElse NotFound()
})
post("/:owner/:repository/pull/:id/merge", mergeForm)(writableUsersOnly { (form, repository) =>
params("id").toIntOpt.flatMap { issueId =>
val owner = repository.owner
val name = repository.name
mergePullRequest(repository, issueId, context.loginAccount.get, form.message, form.strategy) match {
case Right(objectId) => redirect(s"/${owner}/${name}/pull/${issueId}")
case Left(message) => Some(BadRequest())
}
} getOrElse NotFound()
})
get("/:owner/:repository/compare")(referrersOnly { forkedRepository =>
val headBranch: Option[String] = params.get("head")
(forkedRepository.repository.originUserName, forkedRepository.repository.originRepositoryName) match {
case (Some(originUserName), Some(originRepositoryName)) => {
getRepository(originUserName, originRepositoryName).map {
originRepository =>
using(
Git.open(getRepositoryDir(originUserName, originRepositoryName)),
Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))
) { (oldGit, newGit) =>
val newBranch = headBranch.getOrElse(JGitUtil.getDefaultBranch(newGit, forkedRepository).get._2)
val oldBranch = originRepository.branchList
.find(_ == newBranch)
.getOrElse(JGitUtil.getDefaultBranch(oldGit, originRepository).get._2)
redirect(
s"/${forkedRepository.owner}/${forkedRepository.name}/compare/${originUserName}:${oldBranch}...${newBranch}"
)
}
} getOrElse NotFound()
}
case _ => {
using(Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))) { git =>
JGitUtil.getDefaultBranch(git, forkedRepository).map {
case (_, defaultBranch) =>
redirect(
s"/${forkedRepository.owner}/${forkedRepository.name}/compare/${defaultBranch}...${headBranch.getOrElse(defaultBranch)}"
)
} getOrElse {
redirect(s"/${forkedRepository.owner}/${forkedRepository.name}")
}
}
}
}
})
get("/:owner/:repository/compare/*...*")(referrersOnly { forkedRepository =>
val Seq(origin, forked) = multiParams("splat")
val (originOwner, originId) = parseCompareIdentifier(origin, forkedRepository.owner)
val (forkedOwner, forkedId) = parseCompareIdentifier(forked, forkedRepository.owner)
(for (originRepositoryName <- if (originOwner == forkedOwner) {
// Self repository
Some(forkedRepository.name)
} else if (forkedRepository.repository.originUserName.isEmpty) {
// when ForkedRepository is the original repository
getForkedRepositories(forkedRepository.owner, forkedRepository.name)
.find(_.userName == originOwner)
.map(_.repositoryName)
} else if (Some(originOwner) == forkedRepository.repository.originUserName) {
// Original repository
forkedRepository.repository.originRepositoryName
} else {
// Sibling repository
getUserRepositories(originOwner)
.find { x =>
x.repository.originUserName == forkedRepository.repository.originUserName &&
x.repository.originRepositoryName == forkedRepository.repository.originRepositoryName
}
.map(_.repository.repositoryName)
};
originRepository <- getRepository(originOwner, originRepositoryName)) yield {
val (oldId, newId) =
getPullRequestCommitFromTo(originRepository, forkedRepository, originId, forkedId)
(oldId, newId) match {
case (Some(oldId), Some(newId)) => {
val (commits, diffs) = getRequestCompareInfo(
originRepository.owner,
originRepository.name,
oldId.getName,
forkedRepository.owner,
forkedRepository.name,
newId.getName
)
val title = if (commits.flatten.length == 1) {
commits.flatten.head.shortMessage
} else {
val text = forkedId.replaceAll("[\\\\-_]", " ")
text.substring(0, 1).toUpperCase + text.substring(1)
}
html.compare(
title,
commits,
diffs,
((forkedRepository.repository.originUserName, forkedRepository.repository.originRepositoryName) match {
case (Some(userName), Some(repositoryName)) =>
getRepository(userName, repositoryName) match {
case Some(x) => x.repository :: getForkedRepositories(userName, repositoryName)
case None => getForkedRepositories(userName, repositoryName)
}
case _ =>
forkedRepository.repository :: getForkedRepositories(forkedRepository.owner, forkedRepository.name)
}).map { repository =>
(repository.userName, repository.repositoryName, repository.defaultBranch)
},
commits.flatten
.map(commit => getCommitComments(forkedRepository.owner, forkedRepository.name, commit.id, false))
.flatten
.toList,
originId,
forkedId,
oldId.getName,
newId.getName,
getContentTemplate(originRepository, "PULL_REQUEST_TEMPLATE"),
forkedRepository,
originRepository,
forkedRepository,
hasDeveloperRole(originRepository.owner, originRepository.name, context.loginAccount),
getAssignableUserNames(originRepository.owner, originRepository.name),
getMilestones(originRepository.owner, originRepository.name),
getPriorities(originRepository.owner, originRepository.name),
getDefaultPriority(originRepository.owner, originRepository.name),
getLabels(originRepository.owner, originRepository.name)
)
}
case (oldId, newId) =>
redirect(
s"/${forkedRepository.owner}/${forkedRepository.name}/compare/" +
s"${originOwner}:${oldId.map(_ => originId).getOrElse(originRepository.repository.defaultBranch)}..." +
s"${forkedOwner}:${newId.map(_ => forkedId).getOrElse(forkedRepository.repository.defaultBranch)}"
)
}
}) getOrElse NotFound()
})
ajaxGet("/:owner/:repository/compare/*...*/mergecheck")(readableUsersOnly { forkedRepository =>
val Seq(origin, forked) = multiParams("splat")
val (originOwner, tmpOriginBranch) = parseCompareIdentifier(origin, forkedRepository.owner)
val (forkedOwner, tmpForkedBranch) = parseCompareIdentifier(forked, forkedRepository.owner)
(for (originRepositoryName <- if (originOwner == forkedOwner) {
Some(forkedRepository.name)
} else {
forkedRepository.repository.originRepositoryName.orElse {
getForkedRepositories(forkedRepository.owner, forkedRepository.name)
.find(_.userName == originOwner)
.map(_.repositoryName)
}
};
originRepository <- getRepository(originOwner, originRepositoryName)) yield {
using(
Git.open(getRepositoryDir(originRepository.owner, originRepository.name)),
Git.open(getRepositoryDir(forkedRepository.owner, forkedRepository.name))
) {
case (oldGit, newGit) =>
val originBranch = JGitUtil.getDefaultBranch(oldGit, originRepository, tmpOriginBranch).get._2
val forkedBranch = JGitUtil.getDefaultBranch(newGit, forkedRepository, tmpForkedBranch).get._2
val conflict = LockUtil.lock(s"${originRepository.owner}/${originRepository.name}") {
checkConflict(
originRepository.owner,
originRepository.name,
originBranch,
forkedRepository.owner,
forkedRepository.name,
forkedBranch
)
}
html.mergecheck(conflict.isDefined)
}
}) getOrElse NotFound()
})
post("/:owner/:repository/pulls/new", pullRequestForm)(readableUsersOnly { (form, repository) =>
defining(repository.owner, repository.name) {
case (owner, name) =>
val manageable = isManageable(repository)
val loginUserName = context.loginAccount.get.userName
val issueId = insertIssue(
owner = repository.owner,
repository = repository.name,
loginUser = loginUserName,
title = form.title,
content = form.content,
assignedUserName = if (manageable) form.assignedUserName else None,
milestoneId = if (manageable) form.milestoneId else None,
priorityId = if (manageable) form.priorityId else None,
isPullRequest = true
)
createPullRequest(
originRepository = repository,
issueId = issueId,
originBranch = form.targetBranch,
requestUserName = form.requestUserName,
requestRepositoryName = form.requestRepositoryName,
requestBranch = form.requestBranch,
commitIdFrom = form.commitIdFrom,
commitIdTo = form.commitIdTo,
loginAccount = context.loginAccount.get
)
// insert labels
if (manageable) {
form.labelNames.foreach { value =>
val labels = getLabels(owner, name)
value.split(",").foreach { labelName =>
labels.find(_.labelName == labelName).map { label =>
registerIssueLabel(repository.owner, repository.name, issueId, label.labelId)
}
}
}
}
redirect(s"/${owner}/${name}/pull/${issueId}")
}
})
ajaxGet("/:owner/:repository/pulls/proposals")(readableUsersOnly { repository =>
val thresholdTime = System.currentTimeMillis() - (1000 * 60 * 60)
val mailAddresses =
context.loginAccount.map(x => Seq(x.mailAddress) ++ getAccountExtraMailAddresses(x.userName)).getOrElse(Nil)
val branches =
using(Git.open(getRepositoryDir(repository.owner, repository.name))) {
git =>
JGitUtil
.getBranches(
git = git,
defaultBranch = repository.repository.defaultBranch,
origin = repository.repository.originUserName.isEmpty
)
.filter { x =>
x.mergeInfo.map(_.ahead).getOrElse(0) > 0 && x.mergeInfo.map(_.behind).getOrElse(0) == 0 &&
x.commitTime.getTime > thresholdTime &&
mailAddresses.contains(x.committerEmailAddress)
}
.sortBy { br =>
(br.mergeInfo.isEmpty, br.commitTime)
}
.map(_.name)
.reverse
}
val targetRepository = (for {
parentUserName <- repository.repository.parentUserName
parentRepoName <- repository.repository.parentRepositoryName
parentRepository <- getRepository(parentUserName, parentRepoName)
} yield {
parentRepository
}).getOrElse {
repository
}
val proposedBranches = branches.filter { branch =>
getPullRequestsByRequest(repository.owner, repository.name, branch, None).isEmpty
}
html.proposals(proposedBranches, targetRepository, repository)
})
private def searchPullRequests(userName: Option[String], repository: RepositoryService.RepositoryInfo) =
defining(repository.owner, repository.name) {
case (owner, repoName) =>
val page = IssueSearchCondition.page(request)
// retrieve search condition
val condition = IssueSearchCondition(request)
gitbucket.core.issues.html.list(
"pulls",
searchIssue(condition, true, (page - 1) * PullRequestLimit, PullRequestLimit, owner -> repoName),
page,
getAssignableUserNames(owner, repoName),
getMilestones(owner, repoName),
getPriorities(owner, repoName),
getLabels(owner, repoName),
countIssue(condition.copy(state = "open"), true, owner -> repoName),
countIssue(condition.copy(state = "closed"), true, owner -> repoName),
condition,
repository,
isEditable(repository),
isManageable(repository)
)
}
/**
* Tests whether an logged-in user can manage pull requests.
*/
private def isManageable(repository: RepositoryInfo)(implicit context: Context): Boolean = {
hasDeveloperRole(repository.owner, repository.name, context.loginAccount)
}
/**
* Tests whether an logged-in user can post pull requests.
*/
private def isEditable(repository: RepositoryInfo)(implicit context: Context): Boolean = {
repository.repository.options.issuesOption match {
case "ALL" => !repository.repository.isPrivate && context.loginAccount.isDefined
case "PUBLIC" => hasGuestRole(repository.owner, repository.name, context.loginAccount)
case "PRIVATE" => hasDeveloperRole(repository.owner, repository.name, context.loginAccount)
case "DISABLE" => false
}
}
}
| kounoike/gitbucket | src/main/scala/gitbucket/core/controller/PullRequestsController.scala | Scala | apache-2.0 | 26,409 |
class Bar {
def bar(b: Int = 2): Unit = {}; def baz[X](b: Int = 2): Unit = {}
}
class Foo {
def foo(): Unit = {
new Bar/*#*/().bar/*#*/()
new Bar/*#*/().baz/*#*/[Any]()
new Bar/*#*/().baz/*#*/()
}
}
| scala/scala | test/files/presentation/t7915/src/Foo.scala | Scala | apache-2.0 | 218 |
package com.mtraina.bookservice
import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.scalatest.{WordSpec, Matchers}
class RouteSpec extends WordSpec with Matchers with ScalatestRouteTest with Service {
"The book service" should {
"return an hello message when call the root path" in {
Get() ~> route ~> check {
responseAs[String] shouldEqual "<h1>hello book service</h1>"
}
}
"return id and isbn passed as query parameters" in {
Get("/query_params?id=1&isbn=2") ~> route ~> check {
responseAs[String] shouldEqual "id: 1 and isbn: 2"
}
}
"return the id passed as path parameter" in {
Get("/path_params/1") ~> route ~> check {
responseAs[String] shouldEqual "id: 1"
}
}
}
}
| mtraina/book-service-akka-http | src/test/scala/com/mtraina/bookservice/RouteSpec.scala | Scala | mit | 812 |
package models.domain
import app.{PaymentTypes, StatutoryPaymentFrequency}
import controllers.mappings.Mappings
import models.DayMonthYear
import play.api.data.validation.{ValidationError, Invalid, Valid, Constraint}
import gov.dwp.carers.xml.validation.CommonValidation
import utils.helpers.TextLengthHelper
object YourIncome extends Identifier(id = "s16") {
val ssp = "sickpay"
val spmp = "patmatadoppay"
val fa = "fostering"
val dp = "directpay"
val rental = "rental"
val ao = "anyother"
val n = "none"
}
case class YourIncomes(beenEmployedSince6MonthsBeforeClaim: String = "",
beenSelfEmployedSince1WeekBeforeClaim: String = "",
yourIncome_sickpay: Option[String] = None,
yourIncome_patmatadoppay: Option[String] = None,
yourIncome_fostering: Option[String] = None,
yourIncome_directpay: Option[String] = None,
yourIncome_rentalincome: Option[String] = None,
yourIncome_anyother: Option[String] = None,
yourIncome_none: Option[String] = None
) extends QuestionGroup(YourIncomes)
object YourIncomes extends QGIdentifier(id = s"${YourIncome.id}.g0") {
def receivesStatutorySickPay(claim: Claim) = {
claim.questionGroup[YourIncomes].getOrElse(YourIncomes()).yourIncome_sickpay.getOrElse("false") == "true"
}
def receivesStatutoryPay(claim: Claim) = {
claim.questionGroup[YourIncomes].getOrElse(YourIncomes()).yourIncome_patmatadoppay.getOrElse("false") == "true"
}
}
object YourIncomeStatutorySickPay extends Identifier(id = "s17")
object StatutorySickPay extends QGIdentifier(id = s"${YourIncomeStatutorySickPay.id}.g1") with OtherIncomes {
def whoPaidYouMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//SickPay//WhoPaidYouThisPay//Answer")
def amountPaidMaxLength = CommonValidation.CURRENCY_REGEX_MAX_LENGTH
def howOftenOtherMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//SickPay//HowOftenPaidThisPayOther//Answer")
}
case class StatutorySickPay(
override val stillBeingPaidThisPay: String = "",
override val whenDidYouLastGetPaid: Option[DayMonthYear] = None,
override val whoPaidYouThisPay: String = "",
override val amountOfThisPay: String = "",
override val howOftenPaidThisPay: String = "",
override val howOftenPaidThisPayOther: Option[String] = None
) extends QuestionGroup(StatutorySickPay) with OtherIncomes
object YourIncomeStatutoryMaternityPaternityAdoptionPay extends Identifier(id = "s18") {
def whoPaidYouMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//StatutoryMaternityPaternityAdopt//WhoPaidYouThisPay//Answer")
def amountPaidMaxLength = CommonValidation.CURRENCY_REGEX_MAX_LENGTH
def howOftenOtherMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//StatutoryMaternityPaternityAdopt//HowOftenPaidThisPayOther//Answer")
}
object StatutoryMaternityPaternityAdoptionPay extends QGIdentifier(id = s"${YourIncomeStatutoryMaternityPaternityAdoptionPay.id}.g1") with OtherIncomes
case class StatutoryMaternityPaternityAdoptionPay(
override val paymentTypesForThisPay: String = "",
override val stillBeingPaidThisPay: String = "",
override val whenDidYouLastGetPaid: Option[DayMonthYear] = None,
override val whoPaidYouThisPay: String = "",
override val amountOfThisPay: String = "",
override val howOftenPaidThisPay: String = "",
override val howOftenPaidThisPayOther: Option[String] = None
) extends QuestionGroup(StatutoryMaternityPaternityAdoptionPay) with OtherIncomes
object YourIncomeFosteringAllowance extends Identifier(id = "s19") {
def whoPaidYouOtherMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//FosteringAllowance//PaymentTypesForThisPayOther//Answer")
def whoPaidYouMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//FosteringAllowance//WhoPaidYouThisPay//Answer")
def amountPaidMaxLength = CommonValidation.CURRENCY_REGEX_MAX_LENGTH
def howOftenOtherMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//FosteringAllowance//HowOftenPaidThisPayOther//Answer")
}
object FosteringAllowance extends QGIdentifier( id = s"${YourIncomeFosteringAllowance.id}.g1") with OtherIncomes
case class FosteringAllowance(
override val paymentTypesForThisPay: String = "",
override val paymentTypesForThisPayOther: Option[String] = None,
override val stillBeingPaidThisPay: String = "",
override val whenDidYouLastGetPaid: Option[DayMonthYear] = None,
override val whoPaidYouThisPay: String = "",
override val amountOfThisPay: String = "",
override val howOftenPaidThisPay: String = "",
override val howOftenPaidThisPayOther: Option[String] = None
) extends QuestionGroup(FosteringAllowance) with OtherIncomes
object YourIncomeDirectPayment extends Identifier(id = "s20") {
def whoPaidYouMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//DirectPay//WhoPaidYouThisPay//Answer")
def amountPaidMaxLength = CommonValidation.CURRENCY_REGEX_MAX_LENGTH
def howOftenOtherMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//DirectPay//HowOftenPaidThisPayOther//Answer")
}
object DirectPayment extends QGIdentifier(id = s"${YourIncomeDirectPayment.id}.g1") with OtherIncomes
case class DirectPayment(
override val stillBeingPaidThisPay: String = "",
override val whenDidYouLastGetPaid: Option[DayMonthYear] = None,
override val whoPaidYouThisPay: String = "",
override val amountOfThisPay: String = "",
override val howOftenPaidThisPay: String = "",
override val howOftenPaidThisPayOther: Option[String] = None
) extends QuestionGroup(DirectPayment) with OtherIncomes
object YourIncomeRentalIncome extends Identifier(id = "s24") {
def rentalIncomeMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//RentalIncomeInfo//Answer")
}
object RentalIncome extends QGIdentifier(id = s"${YourIncomeRentalIncome.id}.g1")
case class RentalIncome(
rentalIncomeInfo: String = ""
) extends QuestionGroup(RentalIncome)
object YourIncomeOtherPayments extends Identifier(id = "s21") {
def otherPaymentsMaxLength = TextLengthHelper.textMaxLength("DWPCAClaim//Incomes//OtherPaymentsInfo//Answer")
}
object OtherPayments extends QGIdentifier(id = s"${YourIncomeOtherPayments.id}.g1")
case class OtherPayments(
otherPaymentsInfo: String = ""
) extends QuestionGroup(OtherPayments)
trait OtherIncomes {
val paymentTypesForThisPay: String = ""
val paymentTypesForThisPayOther: Option[String] = None
val stillBeingPaidThisPay: String = ""
val whenDidYouLastGetPaid: Option[DayMonthYear] = None
val whoPaidYouThisPay: String = ""
val amountOfThisPay: String = ""
val howOftenPaidThisPay: String = ""
val howOftenPaidThisPayOther: Option[String] = None
def howOftenPaidThisPayItVariesRequired: Constraint[OtherIncomes] = Constraint[OtherIncomes]("constraint.howOftenPaidThisPay") {
income =>
if (income.howOftenPaidThisPay == StatutoryPaymentFrequency.ItVaries) {
income.howOftenPaidThisPayOther match {
case Some(howOften) => Valid
case _ => Invalid(ValidationError("howOftenPaidThisPay.required"))
}
}
else Valid
}
def whenDidYouLastGetPaidRequired: Constraint[OtherIncomes] = Constraint[OtherIncomes]("constraint.whenDidYouLastGetPaid") {
income =>
if (income.stillBeingPaidThisPay == Mappings.no) {
income.whenDidYouLastGetPaid match {
case Some(whenLastPaid) => Valid
case _ => Invalid(ValidationError("whenDidYouLastGetPaid.required"))
}
}
else Valid
}
def paymentTypesForThisPayOtherRequired: Constraint[OtherIncomes] = Constraint[OtherIncomes]("constraint.paymentTypesForThisPay") {
income =>
if (income.paymentTypesForThisPay == PaymentTypes.Other) {
income.paymentTypesForThisPayOther match {
case Some(howOften) => Valid
case _ => Invalid(ValidationError("paymentTypesForThisPayOther.required"))
}
}
else Valid
}
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/app/models/domain/YourIncome.scala | Scala | mit | 9,229 |
package suiryc.scala.sys.linux
import grizzled.slf4j.Logging
import java.io.File
import java.nio.file.{Files, Path, Paths}
import suiryc.scala.io.SourceEx
import suiryc.scala.misc
import suiryc.scala.sys.{Command, CommandResult}
class DevicePartition(val device: Device, val partNumber: Int)
extends Logging
{
val block = device.block.resolve(Paths.get(s"${device.block.getFileName}${device.partitionInfix}$partNumber"))
val dev = device.dev.getParent.resolve(block.getFileName)
val size = Device.size(block)
protected def blkid(tag: String): Either[Throwable, String] =
try {
/* Try a direct approach using 'blkid' (requires privileges) */
val CommandResult(result, stdout, stderr) = Command.execute(Seq("blkid", "-o", "value", "-s", tag.toUpperCase, dev.toString))
if (result == 0) {
Right(stdout.trim)
}
else {
/* Fallback to indirect approach through '/dev/disk/by-tag' */
val byTAG = Paths.get("/", "dev", "disk", s"by-${tag.toLowerCase}")
val files =
if (!Files.isDirectory(byTAG)) Nil
else misc.Util.wrapNull(byTAG.toFile.listFiles()).toList
files find { file =>
file.getCanonicalPath == dev.toString
} match {
case Some(file) =>
Right(file.toString)
case None =>
val msg =
if (stderr != "") s"Cannot get partition[$dev] ${tag.toLowerCase}: $stderr"
else s"Cannot get partition[$dev] ${tag.toLowerCase}"
error(msg)
Left(new Exception(msg))
}
}
}
catch {
case e: Throwable =>
Left(e)
}
/* Note: UUID may be set or changed upon formatting partition */
def uuid: Either[Throwable, String] =
blkid("UUID").right.map(uuid => if (uuid == "") "<unknown-uuid>" else uuid)
def label: Either[Throwable, String] =
blkid("LABEL")
def fsType: Either[Throwable, String] =
blkid("TYPE")
def mounted: Boolean = {
val partitionUUID = uuid.fold(_ => "<unknown-uuid>", uuid => uuid)
SourceEx.autoCloseFile(Paths.get("/", "proc", "mounts").toFile) { source =>
source.getLines() map { line =>
line.trim().split("""\\s""").head
} exists { line =>
(line == dev.toString) || (line == s"/dev/disk/by-uuid/$partitionUUID")
}
}
}
def umount = Command.execute(Seq("umount", dev.toString))
override def toString =
s"Partition(device=$device, partNumber=$partNumber, uuid=$uuid, size=$size)"
}
object DevicePartition {
def apply(device: Device, partNumber: Int): DevicePartition =
new DevicePartition(device, partNumber)
def option(path: Path): Option[DevicePartition] =
Device.fromPartition(path) flatMap {device =>
device.partitions find { partition =>
partition.block.getFileName.toString == path.getFileName.toString
}
}
def option(path: File): Option[DevicePartition] =
option(path.toPath)
}
| swhgoon/suiryc-scala | core/src/main/scala/suiryc/scala/sys/linux/DevicePartition.scala | Scala | gpl-3.0 | 2,971 |
/*
* Skylark
* http://skylark.io
*
* Copyright 2012-2017 Quantarray, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.quantarray.skylark.measure
/**
* Same measure converter.
*
* @author Araik Grigoryan
*/
trait SameMeasureConverter[T <: Measure[T]] extends SameTypeConverter[T]
{
protected override def convert(from: T, to: T): Option[Double] =
{
(from.ultimateBase, to.ultimateBase) match
{
case (Some(f), Some(t)) if from.system == to.system && f._1 == t._1 => Some(f._2 / t._2)
case _ => super.convert(from, to)
}
}
}
| quantarray/skylark | skylark-measure/src/main/scala/com/quantarray/skylark/measure/SameMeasureConverter.scala | Scala | apache-2.0 | 1,095 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.words
import org.scalatest.matchers._
import org.scalatest.enablers._
import org.scalactic._
import org.scalatest.FailureMessages
import org.scalatest.UnquotedString
import org.scalatest.Resources
import scala.collection.GenTraversable
import scala.collection.GenSeq
import org.scalatest.MatchersHelper.accessProperty
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="../Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class HaveWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* book should have length (9)
* ^
* </pre>
*/
def length(expectedLength: Long): MatcherFactory1[Any, Length] =
new MatcherFactory1[Any, Length] {
def matcher[T <: Any : Length]: Matcher[T] = {
val length = implicitly[Length[T]]
new Matcher[T] {
def apply(left: T): MatchResult = {
val lengthOfLeft = length.lengthOf(left)
MatchResult(
lengthOfLeft == expectedLength,
Resources("hadLengthInsteadOfExpectedLength"),
Resources("hadLength"),
Vector(left, lengthOfLeft, expectedLength),
Vector(left, expectedLength)
)
}
override def toString: String = "have length " + expectedLength
}
}
override def toString: String = "have length " + expectedLength
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* book should have size (9)
* ^
* </pre>
*
* <p>
* Currently, this method will produce a <code>Matcher[AnyRef]</code>, and if the
* <code>AnyRef</code> passed to that matcher's <code>apply</code> method does not have the appropriate <code>size</code> property
* structure, all will compile but a <code>TestFailedException</code> will result at runtime explaining the problem.
* In a future ScalaTest release, this may be tightened so that all is statically checked at compile time.
* </p>
*/
def size(expectedSize: Long): MatcherFactory1[Any, Size] =
new MatcherFactory1[Any, Size] {
def matcher[T <: Any : Size]: Matcher[T] = {
val size = implicitly[Size[T]]
new Matcher[T] {
def apply(left: T): MatchResult = {
val sizeOfLeft = size.sizeOf(left)
MatchResult(
sizeOfLeft == expectedSize,
FailureMessages("hadSizeInsteadOfExpectedSize"),
FailureMessages("hadSize"),
Vector(left, sizeOfLeft, expectedSize),
Vector(left, expectedSize)
)
}
override def toString: String = "have size " + expectedSize
}
}
override def toString: String = "have size " + expectedSize
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* result should have message ("A message from Mars!")
* ^
* </pre>
*/
def message(expectedMessage: String): MatcherFactory1[Any, Messaging] =
new MatcherFactory1[Any, Messaging] {
def matcher[T <: Any : Messaging]: Matcher[T] = {
val messaging = implicitly[Messaging[T]]
new Matcher[T] {
def apply(left: T): MatchResult = {
val messageOfLeft = messaging.messageOf(left)
MatchResult(
messageOfLeft == expectedMessage,
FailureMessages("hadMessageInsteadOfExpectedMessage"),
FailureMessages("hadExpectedMessage"),
Vector(left, messageOfLeft, expectedMessage),
Vector(left, expectedMessage)
)
}
override def toString: String = "have message " + Prettifier.default(expectedMessage)
}
}
override def toString: String = "have message " + Prettifier.default(expectedMessage)
}
/**
* Enables parentheses to be placed around <code>length (N)</code> in expressions of the form: <code>should have (length (N))</code>.
*/
def apply[T](resultOfLengthWordApplication: ResultOfLengthWordApplication): MatcherFactory1[Any, Length] = length(resultOfLengthWordApplication.expectedLength)
/**
* Enables parentheses to be placed around <code>size (N)</code> in expressions of the form: <code>should have (size (N))</code>.
*/
def apply[T](resultOfSizeWordApplication: ResultOfSizeWordApplication): MatcherFactory1[Any, Size] = size(resultOfSizeWordApplication.expectedSize)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* book should have (title ("A Tale of Two Cities"))
* ^
* </pre>
*/
def apply[T](firstPropertyMatcher: HavePropertyMatcher[T, _], propertyMatchers: HavePropertyMatcher[T, _]*): Matcher[T] =
new Matcher[T] {
def apply(left: T): MatchResult = {
val results =
for (propertyVerifier <- firstPropertyMatcher :: propertyMatchers.toList) yield
propertyVerifier(left)
val firstFailureOption = results.find(pv => !pv.matches)
val justOneProperty = propertyMatchers.length == 0
firstFailureOption match {
case Some(firstFailure) =>
val failedVerification = firstFailure
val (rawFailureMessage, failureMessageArgs) =
(
Resources("propertyDidNotHaveExpectedValue"),
Vector(
UnquotedString(failedVerification.propertyName),
failedVerification.expectedValue,
failedVerification.actualValue,
left
)
)
val (rawMidSentenceFailureMessage, midSentenceFailureMessageArgs) =
(
Resources("midSentencePropertyDidNotHaveExpectedValue"),
Vector(
UnquotedString(failedVerification.propertyName),
failedVerification.expectedValue,
failedVerification.actualValue,
left
)
)
MatchResult(false, rawFailureMessage, rawFailureMessage, rawMidSentenceFailureMessage, rawMidSentenceFailureMessage, failureMessageArgs, midSentenceFailureMessageArgs)
case None =>
val (rawFailureMessage, failureMessageArgs) =
if (justOneProperty) {
val firstPropertyResult = results.head // know this will succeed, because firstPropertyMatcher was required
(
Resources("propertyHadExpectedValue"),
Vector(
UnquotedString(firstPropertyResult.propertyName),
firstPropertyResult.expectedValue,
left
)
)
}
else (Resources("allPropertiesHadExpectedValues"), Vector(left))
val (rawMidSentenceFailureMessage, rawMidSentenceFailureMessageArgs) =
if (justOneProperty) {
val firstPropertyResult = results.head // know this will succeed, because firstPropertyMatcher was required
(
Resources("midSentencePropertyHadExpectedValue"),
Vector(
UnquotedString(firstPropertyResult.propertyName),
firstPropertyResult.expectedValue,
left
)
)
}
else (Resources("midSentenceAllPropertiesHadExpectedValues"), Vector(left))
MatchResult(true, rawFailureMessage, rawFailureMessage, rawMidSentenceFailureMessage, rawMidSentenceFailureMessage, failureMessageArgs, rawMidSentenceFailureMessageArgs)
}
}
override def toString: String = "have (" + Prettifier.default(firstPropertyMatcher) + ")"
}
/**
* Overrides toString to return "length"
*/
override def toString: String = "have"
}
| travisbrown/scalatest | src/main/scala/org/scalatest/words/HaveWord.scala | Scala | apache-2.0 | 8,685 |
package is.hail.expr.ir
import is.hail.backend.spark.SparkBackend
import is.hail.utils._
import is.hail.types.virtual.{TBoolean, TInt32, TInt64, TString, TStruct, Type}
import is.hail.io.compress.BGzipInputStream
import is.hail.io.fs.{FS, FileStatus, Positioned, PositionedInputStream, BGZipCompressionCodec}
import org.apache.commons.io.input.{CountingInputStream, ProxyInputStream}
import org.apache.hadoop.io.compress.SplittableCompressionCodec
import org.apache.spark.{Partition, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import scala.annotation.meta.param
trait CloseableIterator[T] extends Iterator[T] with AutoCloseable
object GenericLines {
def read(fs: FS, contexts: IndexedSeq[Any], gzAsBGZ: Boolean): GenericLines = {
val body: (FS, Any) => CloseableIterator[GenericLine] = { (fs: FS, context: Any) =>
val contextRow = context.asInstanceOf[Row]
val index = contextRow.getAs[Int](0)
val file = contextRow.getAs[String](1)
val start = contextRow.getAs[Long](2)
val end = contextRow.getAs[Long](3)
val split = contextRow.getAs[Boolean](4)
new CloseableIterator[GenericLine] {
private var splitCompressed = false
private val is: PositionedInputStream = {
val rawIS = fs.openNoCompression(file)
val codec = fs.getCodecFromPath(file, gzAsBGZ)
if (codec == null) {
assert(split)
rawIS.seek(start)
rawIS
} else if (codec == BGZipCompressionCodec) {
assert(split)
splitCompressed = true
val bgzIS = new BGzipInputStream(rawIS, start, end, SplittableCompressionCodec.READ_MODE.BYBLOCK)
new ProxyInputStream(bgzIS) with Positioned {
def getPosition: Long = bgzIS.getVirtualOffset
}
} else {
assert(!split)
new CountingInputStream(codec.makeInputStream(rawIS)) with Positioned {
def getPosition: Long = getByteCount
}
}
}
private var eof = false
private var closed = false
private var buf = new Array[Byte](64 * 1024)
private var bufOffset = 0L
private var bufMark = 0
private var bufPos = 0
private var realEnd =
if (splitCompressed)
-1L // end really means first block >= end
else
end
private def loadBuffer(): Unit = {
// compressed blocks can be empty
while (bufPos == bufMark && !eof) {
// load new block
bufOffset = is.getPosition
val nRead = is.read(buf)
if (nRead == -1) {
eof = true
assert(!closed)
close()
} else {
bufPos = 0
bufMark = nRead
assert(!splitCompressed || virtualOffsetBlockOffset(bufOffset) == 0)
if (realEnd == -1 && bufOffset >= end)
realEnd = bufOffset
}
}
}
loadBuffer()
private var lineData = new Array[Byte](1024)
private var line = new GenericLine(file)
line.data = lineData
private def readLine(): Unit = {
assert(line != null)
if (eof) {
line = null
return
}
assert(bufPos < bufMark)
val offset = bufOffset + bufPos
if (split && realEnd != -1L && offset > realEnd) {
line = null
return
}
var sawcr = false
var linePos = 0
while (true) {
if (eof) {
assert(linePos > 0)
line.setLine(offset, linePos)
return
}
assert(bufPos < bufMark)
val begin = bufPos
var eol = false
if (sawcr) {
val c = buf(bufPos)
if (c == '\\n')
bufPos += 1
eol = true
} else {
// look for end of line in buf
while (bufPos < bufMark && {
val c = buf(bufPos)
c != '\\n' && c != '\\r'
})
bufPos += 1
if (bufPos < bufMark) {
val c = buf(bufPos)
if (c == '\\n') {
bufPos += 1
eol = true
} else {
assert(c == '\\r')
bufPos += 1
if (bufPos < bufMark) {
val c2 = buf(bufPos)
if (c2 == '\\n')
bufPos += 1
eol = true
} else
sawcr = true
}
}
}
// move scanned input from buf to lineData
val n = bufPos - begin
if (linePos + n > lineData.length) {
val copySize = linePos.toLong + n
// Maximum array size compatible with common JDK implementations
// https://github.com/openjdk/jdk14u/blob/84917a040a81af2863fddc6eace3dda3e31bf4b5/src/java.base/share/classes/jdk/internal/util/ArraysSupport.java#L577
val maxArraySize = Int.MaxValue - 8
if (copySize > maxArraySize)
fatal(s"GenericLines: line size reached: cannot read a line with more than 2^31-1 bytes")
val newSize = Math.min(copySize * 2, maxArraySize).toInt
if (newSize > (1 << 20)) {
log.info(s"GenericLines: growing line buffer to $newSize")
}
val newLineData = new Array[Byte](newSize)
System.arraycopy(lineData, 0, newLineData, 0, linePos)
lineData = newLineData
line.data = newLineData
}
System.arraycopy(buf, begin, lineData, linePos, n)
linePos += n
if (bufPos == bufMark)
loadBuffer()
if (eol) {
assert(linePos > 0)
line.setLine(offset, linePos)
return
}
}
}
readLine()
// the first line begins at most at start
// belongs to previous partition
if (index > 0 && line != null)
readLine()
private var consumed = false
def hasNext: Boolean = {
if (consumed) {
readLine()
consumed = false
}
line != null
}
def next(): GenericLine = {
if (consumed)
readLine()
assert(line != null)
assert(line.lineLength > 0)
consumed = true
line
}
def close(): Unit = {
if (!closed) {
is.close()
closed = true
}
}
}
}
val contextType = TStruct(
"index" -> TInt32,
"file" -> TString,
"start" -> TInt64,
"end" -> TInt64,
"split" -> TBoolean)
new GenericLines(
contextType,
contexts,
body)
}
def read(
fs: FS,
fileStatuses0: IndexedSeq[FileStatus],
nPartitions: Option[Int],
blockSizeInMB: Option[Int],
minPartitions: Option[Int],
gzAsBGZ: Boolean,
allowSerialRead: Boolean
): GenericLines = {
val fileStatuses = fileStatuses0.filter(_.getLen > 0)
val totalSize = fileStatuses.map(_.getLen).sum
var totalPartitions = nPartitions match {
case Some(nPartitions) => nPartitions
case None =>
val blockSizeInB = blockSizeInMB.getOrElse(128) * 1024 * 1024
(totalSize.toDouble / blockSizeInB + 0.5).toInt
}
minPartitions match {
case Some(minPartitions) =>
if (totalPartitions < minPartitions)
totalPartitions = minPartitions
case None =>
}
val contexts = fileStatuses.flatMap { status =>
val size = status.getLen
val codec = fs.getCodecFromPath(status.getPath, gzAsBGZ)
val splittable = codec == null || codec == BGZipCompressionCodec
if (splittable) {
var fileNParts = ((totalPartitions.toDouble * size) / totalSize + 0.5).toInt
if (fileNParts == 0)
fileNParts = 1
val parts = partition(size, fileNParts)
val partScan = parts.scanLeft(0L)(_ + _)
Iterator.range(0, fileNParts)
.map { i =>
val start = partScan(i)
var end = partScan(i + 1)
if (codec != null)
end = makeVirtualOffset(end, 0)
Row(i, status.getPath, start, end, true)
}
} else {
if (!allowSerialRead)
fatal(s"Cowardly refusing to read file serially: ${ status.getPath }.")
Iterator.single {
Row(0, status.getPath, 0L, size, false)
}
}
}
GenericLines.read(fs, contexts, gzAsBGZ)
}
def collect(fs: FS, lines: GenericLines): IndexedSeq[String] = {
lines.contexts.flatMap { context =>
using(lines.body(fs, context)) { it =>
it.map(_.toString).toArray
}
}
}
}
class GenericLine(
val file: String,
// possibly virtual
private var _offset: Long,
var data: Array[Byte],
private var _lineLength: Int) {
def this(file: String) = this(file, 0, null, 0)
private var _str: String = null
def setLine(newOffset: Long, newLength: Int): Unit = {
_offset = newOffset
_lineLength = newLength
_str = null
}
def offset: Long = _offset
def lineLength: Int = _lineLength
override def toString: String = {
if (_str == null) {
var n = lineLength
assert(n > 0)
// strip line delimiter to match behavior of Spark textFile
if (data(n - 1) == '\\n') {
n -= 1
if (n > 0 && data(n - 1) == '\\r')
n -= 1
} else if (data(n - 1) == '\\r')
n -= 1
_str = new String(data, 0, n)
}
_str
}
}
class GenericLinesRDDPartition(val index: Int, val context: Any) extends Partition
class GenericLinesRDD(
@(transient@param) contexts: IndexedSeq[Any],
body: (Any) => CloseableIterator[GenericLine]
) extends RDD[GenericLine](SparkBackend.sparkContext("GenericLinesRDD"), Seq()) {
protected def getPartitions: Array[Partition] =
contexts.iterator.zipWithIndex.map { case (c, i) =>
new GenericLinesRDDPartition(i, c)
}.toArray
def compute(split: Partition, context: TaskContext): Iterator[GenericLine] = {
val it = body(split.asInstanceOf[GenericLinesRDDPartition].context)
TaskContext.get.addTaskCompletionListener[Unit] { _ =>
it.close()
}
it
}
}
class GenericLines(
val contextType: Type,
val contexts: IndexedSeq[Any],
val body: (FS, Any) => CloseableIterator[GenericLine]) {
def nPartitions: Int = contexts.length
def toRDD(fs: FS): RDD[GenericLine] = {
val localBody = body
new GenericLinesRDD(contexts, localBody(fs, _))
}
}
| hail-is/hail | hail/src/main/scala/is/hail/expr/ir/GenericLines.scala | Scala | mit | 10,920 |
package fpinscala.gettingstarted
// A comment!
/* Another comment */
/** A documentation comment */
object MyModule {
def abs(n: Int): Int =
if (n < 0) -n
else n
private def formatAbs(x: Int) = {
val msg = "The absolute value of %d is %d"
msg.format(x, abs(x))
}
def main(args: Array[String]): Unit =
println(formatAbs(-42))
// A definition of factorial, using a local, tail recursive function
def factorial(n: Int): Int = {
@annotation.tailrec
def go(n: Int, acc: Int): Int =
if (n <= 0) acc
else go(n-1, n*acc)
go(n, 1)
}
// Another implementation of `factorial`, this time with a `while` loop
def factorial2(n: Int): Int = {
var acc = 1
var i = n
while (i > 0) { acc *= i; i -= 1 }
acc
}
// Exercise 1: Write a function to compute the nth fibonacci number
def fib(n: Int): Int = if (n == 0) 0 else if (n == 1) 1 else fib(n - 1) + fib(n - 2)
// This definition and `formatAbs` are very similar..
private def formatFactorial(n: Int) = {
val msg = "The factorial of %d is %d."
msg.format(n, factorial(n))
}
// We can generalize `formatAbs` and `formatFactorial` to
// accept a _function_ as a parameter
def formatResult(name: String, n: Int, f: Int => Int) = {
val msg = "The %s of %d is %d."
msg.format(name, n, f(n))
}
}
object FormatAbsAndFactorial {
import MyModule._
// Now we can use our general `formatResult` function
// with both `abs` and `factorial`
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
}
}
object TestFib {
import MyModule._
// test implementation of `fib`
def main(args: Array[String]): Unit = {
println("Expected: 0, 1, 1, 2, 3, 5, 8")
println("Actual: %d, %d, %d, %d, %d, %d, %d".format(fib(0), fib(1), fib(2), fib(3), fib(4), fib(5), fib(6)))
}
}
// Functions get passed around so often in FP that it's
// convenient to have syntax for constructing a function
// *without* having to give it a name
object AnonymousFunctions {
import MyModule._
// Some examples of anonymous functions:
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
println(formatResult("increment", 7, (x: Int) => x + 1))
println(formatResult("increment2", 7, (x) => x + 1))
println(formatResult("increment3", 7, x => x + 1))
println(formatResult("increment4", 7, _ + 1))
println(formatResult("increment5", 7, x => { val r = x + 1; r }))
}
}
object MonomorphicBinarySearch {
// First, a binary search implementation, specialized to `Double`,
// another primitive type in Scala, representing 64-bit floating
// point numbers
// Ideally, we could generalize this to work for any `Array` type,
// so long as we have some way of comparing elements of the `Array`
def binarySearch(ds: Array[Double], key: Double): Int = {
@annotation.tailrec
def go(low: Int, mid: Int, high: Int): Int = {
if (low > high) -mid - 1
else {
val mid2 = (low + high) / 2
val d = ds(mid2) // We index into an array using the same
// syntax as function application
if (d == key) mid2
else if (d > key) go(low, mid2, mid2-1)
else go(mid2 + 1, mid2, high)
}
}
go(0, 0, ds.length - 1)
}
}
object PolymorphicFunctions {
// Here's a polymorphic version of `binarySearch`, parameterized on
// a function for testing whether an `A` is greater than another `A`.
def binarySearch[A](as: Array[A], key: A, gt: (A,A) => Boolean): Int = {
@annotation.tailrec
def go(low: Int, mid: Int, high: Int): Int = {
if (low > high) -mid - 1
else {
val mid2 = (low + high) / 2
val a = as(mid2)
val greater = gt(a, key)
if (!greater && !gt(key,a)) mid2
else if (greater) go(low, mid2, mid2-1)
else go(mid2 + 1, mid2, high)
}
}
go(0, 0, as.length - 1)
}
// Exercise 2: Implement a polymorphic function to check whether
// an `Array[A]` is sorted
def isSorted[A](as: Array[A], gt: (A,A) => Boolean): Boolean = as match {
case Array() | Array(_) => true
case arr @ Array(_*) => arr.sliding(2).forall(a => !gt(a(0), a(1)))
}
// Polymorphic functions are often so constrained by their type
// that they only have one implementation! Here's an example:
def partial1[A,B,C](a: A, f: (A,B) => C): B => C =
(b: B) => f(a, b)
// Exercise 3: Implement `curry`.
// Note that `=>` associates to the right, so we could
// write the return type as `A => B => C`
def curry[A,B,C](f: (A, B) => C): A => (B => C) =
a => b => f(a,b)
// NB: The `Function2` trait has a `curried` method already
// Exercise 4: Implement `uncurry`
def uncurry[A,B,C](f: A => B => C): (A, B) => C =
(a, b) => f(a)(b)
/*
NB: There is a method on the `Function` object in the standard library,
`Function.uncurried` that you can use for uncurrying.
Note that we can go back and forth between the two forms. We can curry
and uncurry and the two forms are in some sense "the same". In FP jargon,
we say that they are _isomorphic_ ("iso" = same; "morphe" = shape, form),
a term we inherit from category theory.
*/
// Exercise 5: Implement `compose`
def compose[A,B,C](f: B => C, g: A => B): A => C =
a => f(g(a))
}
| mkunikow/fpinscala | exercises/src/main/scala/fpinscala/gettingstarted/GettingStarted.scala | Scala | mit | 5,489 |
/**
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.ingestion.provider
import Provider._
object Main {
def main(args: Array[String]): Unit =
args match {
case Array("download", workflowId) =>
downloadFiles(workflowId)
case Array("download", workflowId) =>
downloadFiles(workflowId)
case Array("upload", workflowId) =>
uploadFiles(workflowId)
case Array("upload", workflowId) =>
uploadFiles(workflowId)
case _ =>
println("usage: download|upload [workflowId]")
}
}
| Stratio/Ingestion | tools/src/main/scala/com/stratio/ingestion/provider/Main.scala | Scala | apache-2.0 | 1,140 |
object Sample extends App {
case class Wrapper[T](value: T)
implicit final class TestExtensions[C[X] <: Seq[X]](val v: Wrapper[C[String]]) extends AnyVal {
def test = v.value.map(_ + "!")
}
Wrapper(Seq("a", "b", "c")).<ref>test
} | ilinum/intellij-scala | testdata/resolve/implicitConversion/SCL10549.scala | Scala | apache-2.0 | 243 |
package org.jetbrains.plugins.scala.editor.importOptimizer
import com.intellij.psi._
import org.jetbrains.plugins.scala.editor.importOptimizer.ScalaImportOptimizer._root_prefix
import org.jetbrains.plugins.scala.extensions.{PsiClassExt, PsiMemberExt, PsiModifierListOwnerExt, PsiNamedElementExt}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAlias
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportExpr
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages.{ImportExprUsed, ImportSelectorUsed, ImportUsed, ImportWildcardSelectorUsed}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScObject}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.impl.base.ScStableCodeReferenceElementImpl
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.lang.resolve.processor.CompletionProcessor
import org.jetbrains.plugins.scala.lang.resolve.{ScalaResolveResult, StdKinds}
import scala.annotation.tailrec
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* @author Nikolay.Tropin
*/
case class ImportInfo(prefixQualifier: String,
relative: Option[String],
allNames: Set[String],
singleNames: Set[String],
renames: Map[String, String],
hiddenNames: Set[String],
hasWildcard: Boolean,
rootUsed: Boolean,
isStableImport: Boolean = true,
allNamesForWildcard: Set[String] = Set.empty,
wildcardHasUnusedImplicit: Boolean = false) {
def withoutRelative: ImportInfo =
if (relative.isDefined || rootUsed) copy(relative = None) else this
def split: Seq[ImportInfo] = {
val result = new ArrayBuffer[ImportInfo]()
result ++= singleNames.toSeq.sorted.map { name =>
template.copy(singleNames = Set(name))
}
result ++= renames.map { rename =>
template.copy(renames = Map(rename))
}
result ++= hiddenNames.map { hidden =>
this.toHiddenNameInfo(hidden)
}
if (hasWildcard) {
result += this.toWildcardInfo
}
result
}
def merge(second: ImportInfo): ImportInfo = {
val relative = this.relative.orElse(second.relative)
val rootUsed = relative.isEmpty && (this.rootUsed || second.rootUsed)
new ImportInfo(this.prefixQualifier, relative,
this.allNames ++ second.allNames, this.singleNames ++ second.singleNames,
this.renames ++ second.renames, this.hiddenNames ++ second.hiddenNames,
this.hasWildcard || second.hasWildcard, rootUsed, this.isStableImport && second.isStableImport,
this.allNamesForWildcard)
}
def isSimpleWildcard = hasWildcard && singleNames.isEmpty && renames.isEmpty && hiddenNames.isEmpty
def namesFromWildcard: Set[String] = {
if (hasWildcard) allNames -- singleNames -- renames.keySet
else Set.empty[String]
}
private def template: ImportInfo =
copy(singleNames = Set.empty, renames = Map.empty, hiddenNames = Set.empty, allNames = allNamesForWildcard, hasWildcard = false)
def toWildcardInfo: ImportInfo = template.copy(hasWildcard = true)
def toHiddenNameInfo(name: String): ImportInfo = template.copy(hiddenNames = Set(name))
def withRootPrefix: ImportInfo = copy(rootUsed = true)
}
object ImportInfo {
def apply(imp: ScImportExpr, isImportUsed: ImportUsed => Boolean): Option[ImportInfo] = {
def name(s: String) = ScalaNamesUtil.changeKeyword(s)
val qualifier = imp.qualifier
if (qualifier == null) return None //ignore invalid imports
val importsUsed = ArrayBuffer[ImportUsed]()
val allNames = mutable.HashSet[String]()
val singleNames = mutable.HashSet[String]()
val renames = mutable.HashMap[String, String]()
val hiddenNames = mutable.HashSet[String]()
var hasWildcard = false
val namesForWildcard = mutable.HashSet[String]()
val implicitNames = mutable.HashSet[String]()
var hasNonUsedImplicits = false
def shouldAddName(resolveResult: ResolveResult): Boolean = {
resolveResult match {
case ScalaResolveResult(p: PsiPackage, _) => true
case ScalaResolveResult(m: PsiMethod, _) => m.containingClass != null
case ScalaResolveResult(td: ScTypedDefinition, _) if td.isStable => true
case ScalaResolveResult(_: ScTypeAlias, _) => true
case ScalaResolveResult(_: PsiClass, _) => true
case ScalaResolveResult(f: PsiField, _) => f.hasFinalModifier
case _ => false
}
}
def addAllNames(ref: ScStableCodeReferenceElement, nameToAdd: String): Unit = {
if (ref.multiResolve(false).exists(shouldAddName)) allNames += nameToAdd
}
def collectAllNamesForWildcard(): Unit = {
val refText = imp.qualifier.getText + ".someIdentifier"
val reference = ScalaPsiElementFactory.createReferenceFromText(refText, imp.qualifier.getContext, imp.qualifier)
.asInstanceOf[ScStableCodeReferenceElementImpl]
val processor = new CompletionProcessor(StdKinds.stableImportSelector, reference, collectImplicits = true, includePrefixImports = false)
reference.doResolve(reference, processor).foreach {
case rr: ScalaResolveResult if shouldAddName(rr) =>
val element = rr.element
val nameToAdd = name(element.name)
namesForWildcard += nameToAdd
if (ScalaPsiUtil.isImplicit(element))
implicitNames += nameToAdd
case _ =>
}
}
collectAllNamesForWildcard()
if (!imp.singleWildcard && imp.selectorSet.isEmpty) {
val importUsed: ImportExprUsed = ImportExprUsed(imp)
if (isImportUsed(importUsed)) {
importsUsed += importUsed
imp.reference match {
case Some(ref) =>
singleNames += ref.refName
addAllNames(ref, ref.refName)
case None => //something is not valid
}
}
} else if (imp.singleWildcard) {
val importUsed =
if (imp.selectorSet.isEmpty) ImportExprUsed(imp)
else ImportWildcardSelectorUsed(imp)
if (isImportUsed(importUsed)) {
importsUsed += importUsed
hasWildcard = true
allNames ++= namesForWildcard
}
}
for (selector <- imp.selectors) {
val importUsed: ImportSelectorUsed = ImportSelectorUsed(selector)
if (isImportUsed(importUsed)) {
importsUsed += importUsed
val refName: String = selector.reference.refName
if (selector.isAliasedImport) {
val importedName: String = selector.importedName
if (importedName == "_") {
hiddenNames += refName
} else if (importedName == refName) {
singleNames += refName
addAllNames(selector.reference, refName)
} else {
renames += ((refName, importedName))
addAllNames(selector.reference, importedName)
}
} else {
singleNames += refName
addAllNames(selector.reference, refName)
}
}
}
if (importsUsed.isEmpty) return None //all imports are empty
allNames --= hiddenNames
hasNonUsedImplicits = (implicitNames -- singleNames).nonEmpty
@tailrec
def deepestQualifier(ref: ScStableCodeReferenceElement): ScStableCodeReferenceElement = {
ref.qualifier match {
case Some(q) => deepestQualifier(q)
case None => ref
}
}
def packageFqn(p: PsiPackage): String = {
p.getParentPackage match {
case null => name(p.getName)
case parent if parent.getName == null => name(p.getName)
case parent => packageFqn(parent) + "." + name(p.getName)
}
}
@tailrec
def explicitQualifierString(ref: ScStableCodeReferenceElement, withDeepest: Boolean, res: String = ""): String = {
ref.qualifier match {
case Some(q) => explicitQualifierString(q, withDeepest, ref.refName + withDot(res))
case None if withDeepest && ref.refName != _root_prefix => ref.refName + withDot(res)
case None => res
}
}
def withDot(s: String): String = {
if (s.isEmpty) "" else "." + s
}
@tailrec
def isRelativeObject(o: ScObject, res: Boolean = false): Boolean = {
o.getContext match {
case _: ScTemplateBody =>
o.containingClass match {
case containingObject: ScObject => isRelativeObject(containingObject, res = true)
case _ => false //inner of some class/trait
}
case _: ScPackaging | _: ScalaFile => true
case _ => res //something in default package or in local object
}
}
def qualifiedRef(ref: ScStableCodeReferenceElement): String = {
if (ref.getText == _root_prefix) return _root_prefix
val refName = ref.refName
ref.bind() match {
case Some(ScalaResolveResult(p: PsiPackage, _)) =>
if (p.getParentPackage != null && p.getParentPackage.getName != null) packageFqn(p)
else refName
case Some(ScalaResolveResult(o: ScObject, _)) =>
if (isRelativeObject(o)) o.qualifiedName
else refName
case Some(ScalaResolveResult(c: PsiClass, _)) =>
val parts = c.qualifiedName.split('.')
if (parts.length > 1) parts.map(name).mkString(".") else refName
case Some(ScalaResolveResult(td: ScTypedDefinition, _)) =>
ScalaPsiUtil.nameContext(td) match {
case m: ScMember =>
m.containingClass match {
case o: ScObject if isRelativeObject(o, res = true) =>
o.qualifiedName + withDot(refName)
case _ => refName
}
case _ => refName
}
case Some(ScalaResolveResult(f: PsiField, _)) =>
val clazzFqn = f.containingClass match {
case null => throw new IllegalStateException() //somehting is wrong
case clazz => clazz.qualifiedName.split('.').map(name).mkString(".")
}
clazzFqn + withDot(refName)
case _ => throw new IllegalStateException() //do not process invalid import
}
}
val deepRef = deepestQualifier(qualifier)
val rootUsed = deepRef.getText == _root_prefix
val (prefixQualifier, isRelative) =
if (rootUsed) (explicitQualifierString(qualifier, withDeepest = false), false)
else {
val qualifiedDeepRef =
try qualifiedRef(deepRef)
catch {
case _: IllegalStateException => return None
}
val prefixQual = qualifiedDeepRef + withDot(explicitQualifierString(qualifier, withDeepest = false))
val relative = qualifiedDeepRef != deepRef.getText
(prefixQual, relative)
}
val relativeQualifier =
if (isRelative) Some(explicitQualifierString(qualifier, withDeepest = true))
else None
val isStableImport = {
deepRef.resolve() match {
case named: PsiNamedElement => ScalaPsiUtil.hasStablePath(named)
case _ => false
}
}
Some(new ImportInfo(prefixQualifier, relativeQualifier, allNames.toSet,
singleNames.toSet, renames.toMap, hiddenNames.toSet, hasWildcard, rootUsed,
isStableImport, namesForWildcard.toSet, hasNonUsedImplicits))
}
def merge(infos: Seq[ImportInfo]): Option[ImportInfo] = infos.reduceOption(_ merge _)
} | igrocki/intellij-scala | src/org/jetbrains/plugins/scala/editor/importOptimizer/ImportInfo.scala | Scala | apache-2.0 | 11,943 |
package com.phaller.rasync.lattice
package lattices
class PowerSetLattice[T] extends Lattice[Set[T]] {
def join(left: Set[T], right: Set[T]): Set[T] =
left ++ right
val bottom: Set[T] =
Set[T]()
}
| phaller/reactive-async | core/src/main/scala/com/phaller/rasync/lattice/lattices/PowerSetLattice.scala | Scala | bsd-2-clause | 213 |
/*
* Twitter Korean Text - Scala library to process Korean text
*
* Copyright 2014 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.penguin.korean.v1.util
import com.twitter.penguin.korean.v1.tokenizer.KoreanTokenizer.KoreanToken
import com.twitter.penguin.korean.v1.util.Hangul._
import com.twitter.penguin.korean.v1.util.KoreanDictionaryProvider._
import com.twitter.penguin.korean.v1.util.KoreanPos._
/**
* Helper methods for Korean nouns and josas.
*/
object KoreanSubstantive {
val JOSA_HEAD_FOR_CODA: Set[Char] = Set('은', '이', '을', '과', '아')
val JOSA_HEAD_FOR_NO_CODA: Set[Char] = Set('는', '가', '를', '와', '야', '여')
protected[korean] def isJosaAttachable(prevChar: Char, headChar: Char): Boolean = {
(hasCoda(prevChar) && !JOSA_HEAD_FOR_NO_CODA.contains(headChar)) ||
(!hasCoda(prevChar) && !JOSA_HEAD_FOR_CODA.contains(headChar))
}
protected[korean] def isName(chunk: CharSequence): Boolean = {
if (chunk.length() != 3) return false
nameDictionay('family_name).contains(chunk.charAt(0).toString) &&
nameDictionay('given_name).contains(chunk.subSequence(1, 3).toString)
}
val NUMBER_CHARS = "일이삼사오육칠팔구천백십해경조억만".map(_.toInt).toSet
val NUMBER_LAST_CHARS = "일이삼사오육칠팔구천백십해경조억만원배분초".map(_.toInt).toSet
protected[korean] def isKoreanNumber(chunk: CharSequence): Boolean =
(0 to chunk.length() - 1).foldLeft(true) {
case (output, i) if i < chunk.length() - 1 => output && NUMBER_CHARS.contains(chunk.charAt(i).toInt)
case (output, i) => output && NUMBER_LAST_CHARS.contains(chunk.charAt(i).toInt)
}
/**
* Collapse all the one-char nouns into one unknown noun
*
* @param posNodes sequence of KoreanTokens
* @return sequence of collapsed KoreanTokens
*/
protected[korean] def collapseNouns(posNodes: Seq[KoreanToken]): Seq[KoreanToken] = {
val (nodes, collapsing) = posNodes.foldLeft((List[KoreanToken](), false)) {
case ((pl: List[KoreanToken], collapsing: Boolean), p: KoreanToken)
if p.pos == Noun && p.text.length == 1 && collapsing =>
(KoreanToken(pl.head.text + p.text, Noun, unknown = true) :: pl.tail, true)
case ((pl: List[KoreanToken], collapsing: Boolean), p: KoreanToken)
if p.pos == Noun && p.text.length == 1 && !collapsing =>
(p :: pl, true)
case ((pl: List[KoreanToken], collapsing: Boolean), p: KoreanToken) =>
(p :: pl, false)
}
nodes.reverse.toSeq
}
}
| NamHosung/SE | src/main/scala/com/twitter/penguin/korean/v1/util/KoreanSubstantive.scala | Scala | apache-2.0 | 3,085 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package kafka.api
import java.io.File
import java.{lang, util}
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.Properties
import kafka.api.GroupedUserPrincipalBuilder._
import kafka.api.GroupedUserQuotaCallback._
import kafka.server._
import kafka.utils.JaasTestUtils.ScramLoginModule
import kafka.utils.{JaasTestUtils, Logging, TestUtils}
import kafka.zk.ConfigEntityChangeNotificationZNode
import org.apache.kafka.clients.admin.{Admin, AdminClientConfig}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.{Cluster, Reconfigurable}
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.security.auth._
import org.apache.kafka.server.quota._
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import scala.collection.mutable.ArrayBuffer
import scala.jdk.CollectionConverters._
class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup {
override protected def securityProtocol = SecurityProtocol.SASL_SSL
override protected def listenerName = new ListenerName("CLIENT")
override protected def interBrokerListenerName: ListenerName = new ListenerName("BROKER")
override protected lazy val trustStoreFile = Some(File.createTempFile("truststore", ".jks"))
override val brokerCount: Int = 2
private val kafkaServerSaslMechanisms = Seq("SCRAM-SHA-256")
private val kafkaClientSaslMechanism = "SCRAM-SHA-256"
override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism))
override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism))
private val adminClients = new ArrayBuffer[Admin]()
private var producerWithoutQuota: KafkaProducer[Array[Byte], Array[Byte]] = _
val defaultRequestQuota = 1000
val defaultProduceQuota = 2000 * 1000 * 1000
val defaultConsumeQuota = 1000 * 1000 * 1000
@BeforeEach
override def setUp(): Unit = {
startSasl(jaasSections(kafkaServerSaslMechanisms, Some("SCRAM-SHA-256"), KafkaSasl, JaasTestUtils.KafkaServerContextName))
this.serverConfig.setProperty(KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp, Long.MaxValue.toString)
this.serverConfig.setProperty(KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp, Long.MaxValue.toString)
this.serverConfig.setProperty(KafkaConfig.ClientQuotaCallbackClassProp, classOf[GroupedUserQuotaCallback].getName)
this.serverConfig.setProperty(s"${listenerName.configPrefix}${KafkaConfig.PrincipalBuilderClassProp}",
classOf[GroupedUserPrincipalBuilder].getName)
this.serverConfig.setProperty(KafkaConfig.DeleteTopicEnableProp, "true")
super.setUp()
brokerList = TestUtils.bootstrapServers(servers, listenerName)
producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG,
ScramLoginModule(JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword).toString)
producerWithoutQuota = createProducer()
}
@AfterEach
override def tearDown(): Unit = {
adminClients.foreach(_.close())
GroupedUserQuotaCallback.tearDown()
super.tearDown()
}
override def configureSecurityBeforeServersStart(): Unit = {
super.configureSecurityBeforeServersStart()
zkClient.makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path)
createScramCredentials(zkConnect, JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword)
}
@Test
def testCustomQuotaCallback(): Unit = {
// Large quota override, should not throttle
var brokerId = 0
var user = createGroupWithOneUser("group0_user1", brokerId)
user.configureAndWaitForQuota(1000000, 2000000)
quotaLimitCalls.values.foreach(_.set(0))
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// ClientQuotaCallback#quotaLimit is invoked by each quota manager once per throttled produce request for each client
assertEquals(1, quotaLimitCalls(ClientQuotaType.PRODUCE).get)
// ClientQuotaCallback#quotaLimit is invoked once per each unthrottled and two for each throttled request
// since we don't know the total number of requests, we verify it was called at least twice (at least one throttled request)
assertTrue(quotaLimitCalls(ClientQuotaType.FETCH).get > 2, "quotaLimit must be called at least twice")
assertTrue(quotaLimitCalls(ClientQuotaType.REQUEST).get <= 10, s"Too many quotaLimit calls $quotaLimitCalls") // sanity check
// Large quota updated to small quota, should throttle
user.configureAndWaitForQuota(9000, 3000)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Quota override deletion - verify default quota applied (large quota, no throttling)
user = addUser("group0_user2", brokerId)
user.removeQuotaOverrides()
user.waitForQuotaUpdate(defaultProduceQuota, defaultConsumeQuota, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Make default quota smaller, should throttle
user.configureAndWaitForQuota(8000, 2500, divisor = 1, group = None)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Configure large quota override, should not throttle
user = addUser("group0_user3", brokerId)
user.configureAndWaitForQuota(2000000, 2000000)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Quota large enough for one partition, should not throttle
brokerId = 1
user = createGroupWithOneUser("group1_user1", brokerId)
user.configureAndWaitForQuota(8000 * 100, 2500 * 100)
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Create large number of partitions on another broker, should result in throttling on first partition
val largeTopic = "group1_largeTopic"
createTopic(largeTopic, numPartitions = 99, leader = 0)
user.waitForQuotaUpdate(8000, 2500, defaultRequestQuota)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Remove quota override and test default quota applied with scaling based on partitions
user = addUser("group1_user2", brokerId)
user.waitForQuotaUpdate(defaultProduceQuota / 100, defaultConsumeQuota / 100, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
user.configureAndWaitForQuota(8000 * 100, 2500 * 100, divisor=100, group = None)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Remove the second topic with large number of partitions, verify no longer throttled
adminZkClient.deleteTopic(largeTopic)
user = addUser("group1_user3", brokerId)
user.waitForQuotaUpdate(8000 * 100, 2500 * 100, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Alter configs of custom callback dynamically
val adminClient = createAdminClient()
val newProps = new Properties
newProps.put(GroupedUserQuotaCallback.DefaultProduceQuotaProp, "8000")
newProps.put(GroupedUserQuotaCallback.DefaultFetchQuotaProp, "2500")
TestUtils.incrementalAlterConfigs(servers, adminClient, newProps, perBrokerConfig = false)
user.waitForQuotaUpdate(8000, 2500, defaultRequestQuota)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
assertEquals(brokerCount, callbackInstances.get)
}
/**
* Creates a group with one user and one topic with one partition.
* @param firstUser First user to create in the group
* @param brokerId The broker id to use as leader of the partition
*/
private def createGroupWithOneUser(firstUser: String, brokerId: Int): GroupedUser = {
val user = addUser(firstUser, brokerId)
createTopic(user.topic, numPartitions = 1, brokerId)
user.configureAndWaitForQuota(defaultProduceQuota, defaultConsumeQuota, divisor = 1, group = None)
user
}
private def createTopic(topic: String, numPartitions: Int, leader: Int): Unit = {
val assignment = (0 until numPartitions).map { i => i -> Seq(leader) }.toMap
TestUtils.createTopic(zkClient, topic, assignment, servers)
}
private def createAdminClient(): Admin = {
val config = new util.HashMap[String, Object]
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
TestUtils.bootstrapServers(servers, new ListenerName("BROKER")))
clientSecurityProps("admin-client").asInstanceOf[util.Map[Object, Object]].forEach { (key, value) =>
config.put(key.toString, value)
}
config.put(SaslConfigs.SASL_JAAS_CONFIG,
ScramLoginModule(JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword).toString)
val adminClient = Admin.create(config)
adminClients += adminClient
adminClient
}
private def produceWithoutThrottle(topic: String, numRecords: Int): Unit = {
(0 until numRecords).foreach { i =>
val payload = i.toString.getBytes
producerWithoutQuota.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, null, null, payload))
}
}
private def passwordForUser(user: String) = {
s"$user:secret"
}
private def addUser(user: String, leader: Int): GroupedUser = {
val adminClient = createAdminClient()
createScramCredentials(adminClient, user, passwordForUser(user))
waitForUserScramCredentialToAppearOnAllBrokers(user, kafkaClientSaslMechanism)
groupedUser(adminClient, user, leader)
}
private def groupedUser(adminClient: Admin, user: String, leader: Int): GroupedUser = {
val password = passwordForUser(user)
val userGroup = group(user)
val topic = s"${userGroup}_topic"
val producerClientId = s"$user:producer-client-id"
val consumerClientId = s"$user:consumer-client-id"
producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, producerClientId)
producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, ScramLoginModule(user, password).toString)
consumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, consumerClientId)
consumerConfig.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 4096.toString)
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, s"$user-group")
consumerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, ScramLoginModule(user, password).toString)
GroupedUser(user, userGroup, topic, servers(leader), producerClientId, consumerClientId,
createProducer(), createConsumer(), adminClient)
}
case class GroupedUser(user: String, userGroup: String, topic: String, leaderNode: KafkaServer,
producerClientId: String, consumerClientId: String,
override val producer: KafkaProducer[Array[Byte], Array[Byte]],
override val consumer: KafkaConsumer[Array[Byte], Array[Byte]],
override val adminClient: Admin) extends
QuotaTestClients(topic, leaderNode, producerClientId, consumerClientId, producer, consumer, adminClient) {
override def userPrincipal: KafkaPrincipal = GroupedUserPrincipal(user, userGroup)
override def quotaMetricTags(clientId: String): Map[String, String] = {
Map(GroupedUserQuotaCallback.QuotaGroupTag -> userGroup)
}
override def overrideQuotas(producerQuota: Long, consumerQuota: Long, requestQuota: Double): Unit = {
configureQuota(userGroup, producerQuota, consumerQuota, requestQuota)
}
override def removeQuotaOverrides(): Unit = {
alterClientQuotas(
clientQuotaAlteration(
clientQuotaEntity(Some(quotaEntityName(userGroup)), None),
None, None, None
)
)
}
def configureQuota(userGroup: String, producerQuota: Long, consumerQuota: Long, requestQuota: Double): Unit = {
alterClientQuotas(
clientQuotaAlteration(
clientQuotaEntity(Some(quotaEntityName(userGroup)), None),
Some(producerQuota), Some(consumerQuota), Some(requestQuota)
)
)
}
def configureAndWaitForQuota(produceQuota: Long, fetchQuota: Long, divisor: Int = 1,
group: Option[String] = Some(userGroup)): Unit = {
configureQuota(group.getOrElse(""), produceQuota, fetchQuota, defaultRequestQuota)
waitForQuotaUpdate(produceQuota / divisor, fetchQuota / divisor, defaultRequestQuota)
}
def produceConsume(expectProduceThrottle: Boolean, expectConsumeThrottle: Boolean): Unit = {
val numRecords = 1000
val produced = produceUntilThrottled(numRecords, waitForRequestCompletion = false)
// don't verify request channel metrics as it's difficult to write non flaky assertions
// given the specifics of this test (throttle metric removal followed by produce/consume
// until throttled)
verifyProduceThrottle(expectProduceThrottle, verifyClientMetric = false,
verifyRequestChannelMetric = false)
// make sure there are enough records on the topic to test consumer throttling
produceWithoutThrottle(topic, numRecords - produced)
consumeUntilThrottled(numRecords, waitForRequestCompletion = false)
verifyConsumeThrottle(expectConsumeThrottle, verifyClientMetric = false,
verifyRequestChannelMetric = false)
}
def removeThrottleMetrics(): Unit = {
def removeSensors(quotaType: QuotaType, clientId: String): Unit = {
val sensorSuffix = quotaMetricTags(clientId).values.mkString(":")
leaderNode.metrics.removeSensor(s"${quotaType}ThrottleTime-$sensorSuffix")
leaderNode.metrics.removeSensor(s"$quotaType-$sensorSuffix")
}
removeSensors(QuotaType.Produce, producerClientId)
removeSensors(QuotaType.Fetch, consumerClientId)
removeSensors(QuotaType.Request, producerClientId)
removeSensors(QuotaType.Request, consumerClientId)
}
private def quotaEntityName(userGroup: String): String = s"${userGroup}_"
}
}
object GroupedUserPrincipalBuilder {
def group(str: String): String = {
if (str.indexOf("_") <= 0)
""
else
str.substring(0, str.indexOf("_"))
}
}
class GroupedUserPrincipalBuilder extends KafkaPrincipalBuilder {
override def build(context: AuthenticationContext): KafkaPrincipal = {
val securityProtocol = context.securityProtocol
if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
val user = context.asInstanceOf[SaslAuthenticationContext].server().getAuthorizationID
val userGroup = group(user)
if (userGroup.isEmpty)
new KafkaPrincipal(KafkaPrincipal.USER_TYPE, user)
else
GroupedUserPrincipal(user, userGroup)
} else
throw new IllegalStateException(s"Unexpected security protocol $securityProtocol")
}
}
case class GroupedUserPrincipal(user: String, userGroup: String) extends KafkaPrincipal(KafkaPrincipal.USER_TYPE, user)
object GroupedUserQuotaCallback {
val QuotaGroupTag = "group"
val DefaultProduceQuotaProp = "default.produce.quota"
val DefaultFetchQuotaProp = "default.fetch.quota"
val UnlimitedQuotaMetricTags = new util.HashMap[String, String]
val quotaLimitCalls = Map(
ClientQuotaType.PRODUCE -> new AtomicInteger,
ClientQuotaType.FETCH -> new AtomicInteger,
ClientQuotaType.REQUEST -> new AtomicInteger
)
val callbackInstances = new AtomicInteger
def tearDown(): Unit = {
callbackInstances.set(0)
quotaLimitCalls.values.foreach(_.set(0))
UnlimitedQuotaMetricTags.clear()
}
}
/**
* Quota callback for a grouped user. Both user principals and topics of each group
* are prefixed with the group name followed by '_'. This callback defines quotas of different
* types at the group level. Group quotas are configured in ZooKeeper as user quotas with
* the entity name "${group}_". Default group quotas are configured in ZooKeeper as user quotas
* with the entity name "_".
*
* Default group quotas may also be configured using the configuration options
* "default.produce.quota" and "default.fetch.quota" which can be reconfigured dynamically
* without restarting the broker. This tests custom reconfigurable options for quota callbacks,
*/
class GroupedUserQuotaCallback extends ClientQuotaCallback with Reconfigurable with Logging {
var brokerId: Int = -1
val customQuotasUpdated = ClientQuotaType.values.map(quotaType => quotaType -> new AtomicBoolean).toMap
val quotas = ClientQuotaType.values.map(quotaType => quotaType -> new ConcurrentHashMap[String, Double]).toMap
val partitionRatio = new ConcurrentHashMap[String, Double]()
override def configure(configs: util.Map[String, _]): Unit = {
brokerId = configs.get(KafkaConfig.BrokerIdProp).toString.toInt
callbackInstances.incrementAndGet
}
override def reconfigurableConfigs: util.Set[String] = {
Set(DefaultProduceQuotaProp, DefaultFetchQuotaProp).asJava
}
override def validateReconfiguration(configs: util.Map[String, _]): Unit = {
reconfigurableConfigs.forEach(configValue(configs, _))
}
override def reconfigure(configs: util.Map[String, _]): Unit = {
configValue(configs, DefaultProduceQuotaProp).foreach(value => quotas(ClientQuotaType.PRODUCE).put("", value.toDouble))
configValue(configs, DefaultFetchQuotaProp).foreach(value => quotas(ClientQuotaType.FETCH).put("", value.toDouble))
customQuotasUpdated.values.foreach(_.set(true))
}
private def configValue(configs: util.Map[String, _], key: String): Option[Long] = {
val value = configs.get(key)
if (value != null) Some(value.toString.toLong) else None
}
override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = {
principal match {
case groupPrincipal: GroupedUserPrincipal =>
val userGroup = groupPrincipal.userGroup
val quotaLimit = quotaOrDefault(userGroup, quotaType)
if (quotaLimit != null)
Map(QuotaGroupTag -> userGroup).asJava
else
UnlimitedQuotaMetricTags
case _ =>
UnlimitedQuotaMetricTags
}
}
override def quotaLimit(quotaType: ClientQuotaType, metricTags: util.Map[String, String]): lang.Double = {
quotaLimitCalls(quotaType).incrementAndGet
val group = metricTags.get(QuotaGroupTag)
if (group != null) quotaOrDefault(group, quotaType) else null
}
override def updateClusterMetadata(cluster: Cluster): Boolean = {
val topicsByGroup = cluster.topics.asScala.groupBy(group)
topicsByGroup.map { case (group, groupTopics) =>
val groupPartitions = groupTopics.flatMap(topic => cluster.partitionsForTopic(topic).asScala)
val totalPartitions = groupPartitions.size
val partitionsOnThisBroker = groupPartitions.count { p => p.leader != null && p.leader.id == brokerId }
val multiplier = if (totalPartitions == 0)
1
else if (partitionsOnThisBroker == 0)
1.0 / totalPartitions
else
partitionsOnThisBroker.toDouble / totalPartitions
partitionRatio.put(group, multiplier) != multiplier
}.exists(identity)
}
override def updateQuota(quotaType: ClientQuotaType, quotaEntity: ClientQuotaEntity, newValue: Double): Unit = {
quotas(quotaType).put(userGroup(quotaEntity), newValue)
}
override def removeQuota(quotaType: ClientQuotaType, quotaEntity: ClientQuotaEntity): Unit = {
quotas(quotaType).remove(userGroup(quotaEntity))
}
override def quotaResetRequired(quotaType: ClientQuotaType): Boolean = customQuotasUpdated(quotaType).getAndSet(false)
def close(): Unit = {}
private def userGroup(quotaEntity: ClientQuotaEntity): String = {
val configEntity = quotaEntity.configEntities.get(0)
if (configEntity.entityType == ClientQuotaEntity.ConfigEntityType.USER)
group(configEntity.name)
else
throw new IllegalArgumentException(s"Config entity type ${configEntity.entityType} is not supported")
}
private def quotaOrDefault(group: String, quotaType: ClientQuotaType): lang.Double = {
val quotaMap = quotas(quotaType)
var quotaLimit: Any = quotaMap.get(group)
if (quotaLimit == null)
quotaLimit = quotaMap.get("")
if (quotaLimit != null) scaledQuota(quotaType, group, quotaLimit.asInstanceOf[Double]) else null
}
private def scaledQuota(quotaType: ClientQuotaType, group: String, configuredQuota: Double): Double = {
if (quotaType == ClientQuotaType.REQUEST)
configuredQuota
else {
val multiplier = partitionRatio.get(group)
if (multiplier <= 0.0) configuredQuota else configuredQuota * multiplier
}
}
}
| Chasego/kafka | core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala | Scala | apache-2.0 | 21,711 |
/*
This file is part of Intake24.
Copyright 2015, 2016 Newcastle University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package uk.ac.ncl.openlab.intake24.services.foodindex.english
import org.scalatest.FunSuite
import uk.ac.ncl.openlab.intake24.api.shared.admin.SplitList
class EnglishSplitterTest extends FunSuite {
val splitWords = Seq("and", "with")
val ignorePairs = Map[String, Set[String]](
"sweet" -> Set("sour"),
"qweqwe" -> Set("*")
)
val splitList = SplitList(splitWords, ignorePairs)
val splitter = EnglishSplitter(splitList)
test ("Empty input") {
assert(splitter.split("") === Seq(""))
}
test ("Single split word") {
assert (splitter.split("fish and chips") === Seq("fish", "chips"))
}
test ("Repeated split word") {
assert (splitter.split("fish and and with chips and and dog") === Seq("fish", "chips", "dog"))
}
test ("Keep pair") {
assert (splitter.split("sweet and sour chicken") === Seq("sweet and sour chicken"))
}
test ("Keep pair wildcard") {
assert (splitter.split("qweqwe with 123") === Seq("qweqwe with 123"))
}
test ("Commas and ampersands") {
assert (splitter.split("x,y & z") === Seq("x", "y", "z"))
}
test ("Multiple commas in a row") {
assert(splitter.split("x,,y,,,z") === Seq("x", "y", "z"))
}
test("Mixed split words and characters") {
assert(splitter.split("x, sweet and sour chicken with y &&& z") === Seq("x", "sweet and sour chicken", "y", "z"))
}
test("Ignore leading split words") {
assert(splitter.split(",, and chicken with potatoes") === Seq("chicken", "potatoes"))
}
test("Ignore trailing split words") {
assert(splitter.split("chicken with potatoes and ,,,, , , , with ") === Seq("chicken", "potatoes"))
}
test("Ignore leading and trailing split words") {
assert(splitter.split(" , , with chicken and potatoes with &, ") === Seq("chicken", "potatoes"))
}
} | digitalinteraction/intake24 | FoodDataServices/src/test/scala/uk/ac/ncl/openlab/intake24/services/foodindex/english/EnglishSplitterTest.scala | Scala | apache-2.0 | 2,474 |
package com.sksamuel.scapegoat.inspections.controlflow
import com.sksamuel.scapegoat.{Inspection, InspectionContext, Inspector, Levels}
class RepeatedIfElseBody
extends Inspection(
text = "Repeated body of if main and else branch",
defaultLevel = Levels.Warning,
description = "Checks for the main branch and the else branch of an if being the same.",
explanation =
"The if statement could be refactored if both branches are the same or start with the same."
) {
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser =
new context.Traverser {
import context.global._
private def isRepeated(mainBranch: Tree, elseBranch: Tree): Boolean =
mainBranch.toString() == elseBranch.toString()
private def twoBlocksStartWithTheSame(oneBlock: Block, another: Block): Boolean = {
(oneBlock.children.headOption, another.children.headOption) match {
case (Some(statement1), Some(statement2)) if statement1.toString == statement2.toString => true
case _ => false
}
}
override def inspect(tree: Tree): Unit = {
tree match {
case If(_, mainBranch, elseBranch) if isRepeated(mainBranch, elseBranch) =>
context
.warn(tree.pos, self, tree.toString.take(500), "Main and else branches of if are repeated.")
case If(_, mainBranch @ Block(_, _), elseBranch @ Block(_, _))
if twoBlocksStartWithTheSame(mainBranch, elseBranch) =>
context.warn(
tree.pos,
self,
tree.toString.take(500),
"Main and else branches start with the same command."
)
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scapegoat | src/main/scala/com/sksamuel/scapegoat/inspections/controlflow/RepeatedIfElseBody.scala | Scala | apache-2.0 | 2,014 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger, CtTypeConverters}
import uk.gov.hmrc.ct.ct600.v3.retriever.CT600BoxRetriever
case class B295(value: Int) extends CtBoxIdentifier(name = "Total of deductions and reliefs") with CtInteger
object B295 extends Calculated[B295, CT600BoxRetriever] with CtTypeConverters {
override def calculate(boxRetriever: CT600BoxRetriever): B295 = {
B295(boxRetriever.b275() + boxRetriever.b285())
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v3/B295.scala | Scala | apache-2.0 | 1,099 |
/* Copyright 2009-2021 EPFL, Lausanne */
package stainless
package ast
class Symbol private[stainless](val path: Seq[String], private[stainless] val id: Int) {
def this(name: String, id: Int) = this(name.split("\\\\.").toSeq, id)
val name: String = path.mkString(".")
override def equals(that: Any): Boolean = that match {
case s: Symbol => id == s.id
case _ => false
}
override def hashCode: Int = id
override def toString: String = s"$name@$id"
}
object Symbol {
private val counter = new inox.utils.UniqueCounter[Unit]
def apply(name: String) = new Symbol(name, counter.nextGlobal)
}
class SymbolIdentifier private[stainless](id: Identifier, val symbol: Symbol)
extends Identifier(id.name, id.globalId, id.id, alwaysShowUniqueID = false) {
override def freshen: SymbolIdentifier = new SymbolIdentifier(id.freshen, symbol)
}
object SymbolIdentifier {
def apply(name: String): SymbolIdentifier = {
new SymbolIdentifier(FreshIdentifier(name.split("\\\\.").last), Symbol(name))
}
def apply(sym: Symbol): SymbolIdentifier = {
new SymbolIdentifier(FreshIdentifier(sym.path.last), sym)
}
def unapply(id: SymbolIdentifier): Option[String] = Some(id.symbol.name)
extension (id: Identifier) {
def unsafeToSymbolIdentifier: SymbolIdentifier = id.asInstanceOf[SymbolIdentifier]
}
}
| epfl-lara/stainless | core/src/main/scala/stainless/ast/SymbolIdentifier.scala | Scala | apache-2.0 | 1,336 |
package almhirt.i18n
import almhirt.common._
import scalaz.syntax.validation._
import com.ibm.icu.text.MeasureFormat.FormatWidth
sealed trait MeasureRenderWidth
object MeasureRenderWidth {
/**
* Spell out everything.
*/
case object Wide extends MeasureRenderWidth
/**
* Abbreviate when possible.
*/
case object Short extends MeasureRenderWidth
/**
* Brief. Use only a symbol for the unit when possible.
*/
case object Narrow extends MeasureRenderWidth
/**
* Identical to NARROW except when formatMeasures is called with
* an hour and minute; minute and second; or hour, minute, and second Measures.
* In these cases formatMeasures formats as 5:37:23 instead of 5h, 37m, 23s.
*/
case object Numeric extends MeasureRenderWidth
implicit class MeasureRenderWidthOps(val self: MeasureRenderWidth) {
def icuFormatWidth: FormatWidth =
self match {
case Wide ⇒ FormatWidth.WIDE
case Short ⇒ FormatWidth.SHORT
case Narrow ⇒ FormatWidth.NARROW
case Numeric ⇒ FormatWidth.NUMERIC
}
def parsableString: String =
self match {
case Wide ⇒ "wide"
case Short ⇒ "short"
case Narrow ⇒ "narrow"
case Numeric ⇒ "numeric"
}
}
def parseString(toParse: String): AlmValidation[MeasureRenderWidth] =
toParse match {
case "wide" => Wide.success
case "short" => Short.success
case "narrow" => Narrow.success
case "numeric" => Numeric.success
case x => ParsingProblem(s""""$x" is not a MeasureRenderWidth.""").failure
}
} | chridou/almhirt | almhirt-i18n/src/main/scala/almhirt/i18n/MeasureRenderWidth.scala | Scala | apache-2.0 | 1,607 |
package com.electronwill.collections
import scala.collection.mutable
import scala.reflect.ClassTag
/**
* @author TheElectronWill
*/
final class ArrayMap[@specialized(Int) A: ClassTag](initialCapacity: Int,
private[this] val nullValue: A)
extends mutable.Map[Int, A] {
/** Contains the elements of the ArrayMap. */
private[this] var elements = new Array[A](initialCapacity)
/** The number of (non-null) elements. */
private[this] var elementCount = 0
override def size: Int = elementCount
override def default(key: Int): A = nullValue
override def +=(kv: (Int, A)): this.type = {
update(kv._1, kv._2)
this
}
override def put(key: Int, value: A): Option[A] = {
val previousValue = apply(key)
update(key, value)
if (nullValue == previousValue) None else Some(previousValue)
}
override def update(key: Int, value: A): Unit = {
elementCount += 1
if (elements.length < key) {
elements = growAmortize(elements, key + 1)
}
elements(key) = value
}
override def -=(key: Int): this.type = {
if (key < elements.length) {
doRemove(key)
}
this
}
override def remove(key: Int): Option[A] = {
val previousValue = apply(key)
if (previousValue == nullValue) {
None
} else {
doRemove(key)
Some(previousValue)
}
}
def -=(key: Int, expectedValue: Int): this.type = {
remove(key, expectedValue)
this
}
def remove(key: Int, expectedValue: Int): Boolean = {
val previousValue = apply(key)
if (previousValue == expectedValue) {
doRemove(key)
true
} else {
false
}
}
private def doRemove(key: Int): Unit = {
elements(key) = nullValue
elementCount -= 1
}
override def get(key: Int): Option[A] = {
if (key >= elements.length) {
None
} else {
val v = elements(key)
if (nullValue == v) None else Some(v)
}
}
override def apply(key: Int): A = {
if (key >= elements.length) nullValue else elements(key)
}
override def iterator: Iterator[(Int, A)] = new Iterator[(Int, A)] {
// Iterates over (key,elem)
private[this] var id = 0
private[this] var nextElement: A = nullValue
private def findNext(): Unit = {
// Finds the next non-null element
while (id < elements.length && (nullValue == nextElement)) {
val v = elements(id)
if (nullValue != v) {
nextElement = v.asInstanceOf[A]
}
id -= 1
}
}
override def hasNext: Boolean = {
if (nullValue == nextElement) {
findNext()
}
nullValue == nextElement
}
override def next(): (Int, A) = {
if (nullValue == nextElement) {
findNext()
}
val e = nextElement
nextElement = nullValue
(id - 1, e)
}
override def size: Int = elementCount
}
}
| mcphoton/Photon-Server | core/src/main/scala/com/electronwill/collections/ArrayMap.scala | Scala | lgpl-3.0 | 2,916 |
package net.hearthstats.util
case class Coordinate(x: Int, y: Int)
object Coordinate {
def apply(x: Float, y: Float): Coordinate =
Coordinate(x.toInt, y.toInt)
} | HearthStats/HearthStats.net-Uploader | companion/src/main/scala/net/hearthstats/util/Coordinate.scala | Scala | bsd-3-clause | 169 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.sql
import java.sql.Timestamp
import org.apache.flink.api.scala._
import org.apache.flink.table.api.TableException
import org.apache.flink.table.api.scala._
import org.apache.flink.table.plan.logical.rel.LogicalTemporalTableJoin.TEMPORAL_JOIN_CONDITION
import org.apache.flink.table.utils.TableTestUtil.{binaryNode, streamTableNode, term, unaryNode}
import org.apache.flink.table.utils._
import org.hamcrest.Matchers.startsWith
import org.junit.Test
class TemporalTableJoinTest extends TableTestBase {
val util: TableTestUtil = streamTestUtil()
val orders = util.addTable[(Long, String, Timestamp)](
"Orders", 'o_amount, 'o_currency, 'o_rowtime.rowtime)
val ratesHistory = util.addTable[(String, Int, Timestamp)](
"RatesHistory", 'currency, 'rate, 'rowtime.rowtime)
val rates = util.addFunction(
"Rates",
ratesHistory.createTemporalTableFunction('rowtime, 'currency))
val proctimeOrders = util.addTable[(Long, String)](
"ProctimeOrders", 'o_amount, 'o_currency, 'o_proctime.proctime)
val proctimeRatesHistory = util.addTable[(String, Int)](
"ProctimeRatesHistory", 'currency, 'rate, 'proctime.proctime)
val proctimeRates = util.addFunction(
"ProctimeRates",
proctimeRatesHistory.createTemporalTableFunction('proctime, 'currency))
@Test
def testSimpleJoin(): Unit = {
val sqlQuery = "SELECT " +
"o_amount * rate as rate " +
"FROM Orders AS o, " +
"LATERAL TABLE (Rates(o.o_rowtime)) AS r " +
"WHERE currency = o_currency"
util.verifySql(sqlQuery, getExpectedSimpleJoinPlan())
}
@Test
def testSimpleProctimeJoin(): Unit = {
val sqlQuery = "SELECT " +
"o_amount * rate as rate " +
"FROM ProctimeOrders AS o, " +
"LATERAL TABLE (ProctimeRates(o.o_proctime)) AS r " +
"WHERE currency = o_currency"
util.verifySql(sqlQuery, getExpectedSimpleProctimeJoinPlan())
}
/**
* Test versioned joins with more complicated query.
* Important thing here is that we have complex OR join condition
* and there are some columns that are not being used (are being pruned).
*/
@Test
def testComplexJoin(): Unit = {
val util = streamTestUtil()
val thirdTable = util.addTable[(String, Int)]("Table3", 't3_comment, 't3_secondary_key)
val orders = util.addTable[(Timestamp, String, Long, String, Int)](
"Orders", 'o_rowtime.rowtime, 'o_comment, 'o_amount, 'o_currency, 'o_secondary_key)
val ratesHistory = util.addTable[(Timestamp, String, String, Int, Int)](
"RatesHistory", 'rowtime.rowtime, 'comment, 'currency, 'rate, 'secondary_key)
val rates = ratesHistory
.filter('rate > 110L)
.createTemporalTableFunction('rowtime, 'currency)
util.addFunction("Rates", rates)
val sqlQuery =
"SELECT * FROM " +
"(SELECT " +
"o_amount * rate as rate, " +
"secondary_key as secondary_key " +
"FROM Orders AS o, " +
"LATERAL TABLE (Rates(o_rowtime)) AS r " +
"WHERE currency = o_currency OR secondary_key = o_secondary_key), " +
"Table3 " +
"WHERE t3_secondary_key = secondary_key"
util.verifySql(sqlQuery, binaryNode(
"DataStreamJoin",
unaryNode(
"DataStreamCalc",
binaryNode(
"DataStreamTemporalTableJoin",
unaryNode(
"DataStreamCalc",
streamTableNode(orders),
term("select", "o_rowtime, o_amount, o_currency, o_secondary_key")
),
unaryNode(
"DataStreamCalc",
streamTableNode(ratesHistory),
term("select", "rowtime, currency, rate, secondary_key"),
term("where", ">(rate, 110:BIGINT)")
),
term(
"where",
"AND(" +
s"${TEMPORAL_JOIN_CONDITION.getName}(o_rowtime, rowtime, currency), " +
"OR(=(currency, o_currency), =(secondary_key, o_secondary_key)))"),
term(
"join",
"o_rowtime",
"o_amount",
"o_currency",
"o_secondary_key",
"rowtime",
"currency",
"rate",
"secondary_key"),
term("joinType", "InnerJoin")
),
term("select", "*(o_amount, rate) AS rate", "secondary_key")
),
streamTableNode(thirdTable),
term("where", "=(t3_secondary_key, secondary_key)"),
term("join", "rate, secondary_key, t3_comment, t3_secondary_key"),
term("joinType", "InnerJoin")
))
}
@Test
def testUncorrelatedJoin(): Unit = {
expectedException.expect(classOf[TableException])
expectedException.expectMessage(startsWith("Cannot generate a valid execution plan"))
val sqlQuery = "SELECT " +
"o_amount * rate as rate " +
"FROM Orders AS o, " +
"LATERAL TABLE (Rates(TIMESTAMP '2016-06-27 10:10:42.123')) AS r " +
"WHERE currency = o_currency"
util.printSql(sqlQuery)
}
@Test
def testTemporalTableFunctionScan(): Unit = {
expectedException.expect(classOf[TableException])
expectedException.expectMessage(startsWith("Cannot generate a valid execution plan"))
val sqlQuery = "SELECT * FROM LATERAL TABLE (Rates(TIMESTAMP '2016-06-27 10:10:42.123'))"
util.printSql(sqlQuery)
}
def getExpectedSimpleJoinPlan(): String = {
unaryNode(
"DataStreamCalc",
binaryNode(
"DataStreamTemporalTableJoin",
streamTableNode(orders),
streamTableNode(ratesHistory),
term("where",
"AND(" +
s"${TEMPORAL_JOIN_CONDITION.getName}(o_rowtime, rowtime, currency), " +
"=(currency, o_currency))"),
term("join", "o_amount", "o_currency", "o_rowtime", "currency", "rate", "rowtime"),
term("joinType", "InnerJoin")
),
term("select", "*(o_amount, rate) AS rate")
)
}
def getExpectedSimpleProctimeJoinPlan(): String = {
unaryNode(
"DataStreamCalc",
binaryNode(
"DataStreamTemporalTableJoin",
streamTableNode(proctimeOrders),
unaryNode(
"DataStreamCalc",
streamTableNode(proctimeRatesHistory),
term("select", "currency, rate")),
term("where",
"AND(" +
s"${TEMPORAL_JOIN_CONDITION.getName}(o_proctime, currency), " +
"=(currency, o_currency))"),
term("join", "o_amount", "o_currency", "o_proctime", "currency", "rate"),
term("joinType", "InnerJoin")
),
term("select", "*(o_amount, rate) AS rate")
)
}
def getExpectedTemporalTableFunctionOnTopOfQueryPlan(): String = {
unaryNode(
"DataStreamCalc",
binaryNode(
"DataStreamTemporalTableJoin",
streamTableNode(orders),
unaryNode(
"DataStreamCalc",
streamTableNode(ratesHistory),
term("select", "currency", "*(rate, 2) AS rate", "rowtime"),
term("where", ">(rate, 100)")),
term("where",
"AND(" +
s"${TEMPORAL_JOIN_CONDITION.getName}(o_rowtime, rowtime, currency), " +
"=(currency, o_currency))"),
term("join", "o_amount", "o_currency", "o_rowtime", "currency", "rate", "rowtime"),
term("joinType", "InnerJoin")
),
term("select", "*(o_amount, rate) AS rate")
)
}
}
| fhueske/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/sql/TemporalTableJoinTest.scala | Scala | apache-2.0 | 8,144 |
import com.hypertino.binders.core.BindOptions
import com.hypertino.binders.json.{DefaultJsonBindersFactory, JsonBinders}
import com.hypertino.inflector.naming.PlainConverter
import org.scalatest.{FlatSpec, Matchers}
case class TestInt(intVal: Int)
case class TestIntN(intValN1: Option[Int], intValN2: Option[Int])
case class TestIntArray(intArray: Seq[Int])
case class TestIntArrayN(intArrayN: Seq[Option[Int]])
class TestIntJsonSerializer extends FlatSpec with Matchers {
import JsonBinders._
"Json " should " serialize class with Int" in {
val t = TestInt(1234)
val str = t.toJson
assert (str === """{"intVal":1234}""")
}
"Json " should " deserialize class with Int" in {
val o = """{"intVal":1234}""".parseJson[TestInt]
val t = TestInt(1234)
assert (o === t)
}
"Json " should " serialize class with array of Int" in {
val t = TestIntArray(List(1,2,3))
val str = t.toJson
assert (str === """{"intArray":[1,2,3]}""")
}
"Json " should " deserialize class with array of Int" in {
val o = """{"intArray":[1,2,3]}""".parseJson[TestIntArray]
val t = TestIntArray(List(1,2,3))
assert (o === t)
}
"Json " should " serialize class with array of Option[Int]" in {
val t = TestIntArrayN(List(Some(1),None,Some(3)))
val str = t.toJson
assert (str === """{"intArrayN":[1,null,3]}""")
}
"Json " should " deserialize class with array of Option[Int]" in {
val o = """{"intArrayN":[1,null,3]}""".parseJson[TestIntArrayN]
val t = TestIntArrayN(List(Some(1),None,Some(3)))
assert (o === t)
}
"Json " should " serialize class with Nullable Int" in {
val t = TestIntN(Some(1234), Some(456))
val str = t.toJson
assert(str === """{"intValN1":1234,"intValN2":456}""")
val t2 = TestIntN(Some(1234), None)
val str2 = t2.toJson
assert(str2 === """{"intValN1":1234,"intValN2":null}""")
}
"Json " should " skip field when BindOptions(skipOptionalFields=true)" in {
implicit val op3: BindOptions = new BindOptions(true)
val t3 = TestIntN(Some(1234), None)
val str3 = t3.toJson
assert(str3 === """{"intValN1":1234}""")
}
ignore should " pretty print" in {
implicit val defaultJsonBindersFactory = new DefaultJsonBindersFactory[PlainConverter.type](true)
val t3 = TestIntN(Some(1234), None)
val str3 = t3.toJson
assert(str3 === """{
| "intValN1" : 1234,
| "intValN2" : null
|}""".stripMargin('|'))
}
}
| hypertino/json-binders | jsonBinders/shared/src/test/scala/TestIntJsonSerializer.scala | Scala | bsd-3-clause | 2,500 |
package com.twitter.finagle.tracing
import com.twitter.app.GlobalFlag
import com.twitter.finagle.Init
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.util.ByteArrays
import com.twitter.io.Buf
import com.twitter.util.{Duration, Future, Return, Stopwatch, Throw, Time, Try}
import java.net.InetSocketAddress
import scala.util.Random
object debugTrace extends GlobalFlag(false, "Print all traces to the console.")
/**
* This is a tracing system similar to Dapper:
*
* “Dapper, a Large-Scale Distributed Systems Tracing Infrastructure”,
* Benjamin H. Sigelman, Luiz André Barroso, Mike Burrows, Pat
* Stephenson, Manoj Plakal, Donald Beaver, Saul Jaspan, Chandan
* Shanbhag, 2010.
*
* It is meant to be independent of whatever underlying RPC mechanism
* is being used, and it is up to the underlying codec to implement
* the transport.
*
* `Trace` maintains the state of the tracing stack
* The current `TraceId` has a terminal flag, indicating whether it
* can be overridden with a different `TraceId`. Setting the current
* `TraceId` as terminal forces all future annotations to share that
* `TraceId`.
* When reporting, we report to all tracers in the list of `Tracer`s.
*/
object Trace {
private case class TraceCtx(terminal: Boolean, tracers: List[Tracer]) {
def withTracer(tracer: Tracer) = copy(tracers=tracer :: this.tracers)
def withTerminal(terminal: Boolean) =
if (terminal == this.terminal) this
else copy(terminal=terminal)
}
private object TraceCtx {
val empty = TraceCtx(false, Nil)
}
private[this] val traceCtx = new Contexts.local.Key[TraceCtx]
private[this] val someTrue = Some(true)
private[this] val someFalse = Some(false)
private[finagle] val idCtx = new Contexts.broadcast.Key[TraceId](
"com.twitter.finagle.tracing.TraceContext"
) {
private val local = new ThreadLocal[Array[Byte]] {
override def initialValue(): Array[Byte] = new Array[Byte](32)
}
def marshal(id: TraceId): Buf =
Buf.ByteArray.Owned(TraceId.serialize(id))
/**
* The wire format is (big-endian):
* ''spanId:8 parentId:8 traceId:8 flags:8''
*/
def tryUnmarshal(body: Buf): Try[TraceId] = {
if (body.length != 32)
return Throw(new IllegalArgumentException("Expected 32 bytes"))
val bytes = local.get()
body.write(bytes, 0)
val span64 = ByteArrays.get64be(bytes, 0)
val parent64 = ByteArrays.get64be(bytes, 8)
val trace64 = ByteArrays.get64be(bytes, 16)
val flags64 = ByteArrays.get64be(bytes, 24)
val flags = Flags(flags64)
val sampled = if (flags.isFlagSet(Flags.SamplingKnown)) {
if (flags.isFlagSet(Flags.Sampled)) someTrue else someFalse
} else None
val traceId = TraceId(
if (trace64 == parent64) None else Some(SpanId(trace64)),
if (parent64 == span64) None else Some(SpanId(parent64)),
SpanId(span64),
sampled,
flags)
Return(traceId)
}
}
private[this] val rng = new Random
private[this] val defaultId = TraceId(None, None, SpanId(rng.nextLong()), None, Flags())
@volatile private[this] var tracingEnabled = true
private[this] val EmptyTraceCtxFn = () => TraceCtx.empty
private def ctx: TraceCtx =
Contexts.local.getOrElse(traceCtx, EmptyTraceCtxFn)
/**
* True if there is an identifier for the current trace.
*/
def hasId: Boolean = Contexts.broadcast.contains(idCtx)
private[this] val defaultIdFn: () => TraceId = () => defaultId
/**
* Get the current trace identifier. If no identifiers have been
* pushed, a default one is provided.
*/
def id: TraceId =
Contexts.broadcast.getOrElse(idCtx, defaultIdFn)
/**
* Get the current identifier, if it exists.
*/
def idOption: Option[TraceId] =
Contexts.broadcast.get(idCtx)
/**
* @return true if the current trace id is terminal
*/
def isTerminal: Boolean = ctx.terminal
/**
* @return the current list of tracers
*/
def tracers: List[Tracer] = ctx.tracers
/**
* Turn trace recording on.
*/
def enable(): Unit = tracingEnabled = true
/**
* Turn trace recording off.
*/
def disable(): Unit = tracingEnabled = false
/**
* Create a derived id from the current TraceId.
*/
def nextId: TraceId = {
val spanId = SpanId(rng.nextLong())
idOption match {
case Some(id) =>
TraceId(Some(id.traceId), Some(id.spanId), spanId, id.sampled, id.flags)
case None =>
TraceId(None, None, spanId, None, Flags())
}
}
/**
* Run computation `f` with the given traceId.
*
* @param traceId the TraceId to set as the current trace id
* @param terminal true if traceId is a terminal id. Future calls to set() after a terminal
* id is set will not set the traceId
*/
def letId[R](traceId: TraceId, terminal: Boolean = false)(f: => R): R = {
if (isTerminal) f
else if (terminal) {
Contexts.local.let(traceCtx, ctx.withTerminal(terminal)) {
Contexts.broadcast.let(idCtx, traceId)(f)
}
} else Contexts.broadcast.let(idCtx, traceId)(f)
}
/**
* A version of [com.twitter.finagle.tracing.Trace.letId] providing an
* optional ID. If the argument is None, the computation `f` is run without
* altering the trace environment.
*/
def letIdOption[R](traceIdOpt: Option[TraceId])(f: => R): R =
traceIdOpt match {
case Some(traceId) => letId(traceId)(f)
case None => f
}
/**
* Run computation `f` with `tracer` added onto the tracer stack.
*/
def letTracer[R](tracer: Tracer)(f: => R): R =
Contexts.local.let(traceCtx, ctx.withTracer(tracer))(f)
/**
* Run computation `f` with the given tracer, and a derivative TraceId.
* The implementation of this function is more efficient than calling
* letTracer, nextId and letId sequentially as it minimizes the number
* of request context changes.
*
* @param tracer the tracer to be pushed
* @param terminal true if the next traceId is a terminal id. Future
* attempts to set nextId will be ignored.
*/
def letTracerAndNextId[R](tracer: Tracer, terminal: Boolean = false)(f: => R): R =
letTracerAndId(tracer, nextId, terminal)(f)
/**
* Run computation `f` with the given tracer and trace id.
*
* @param terminal true if the next traceId is a terminal id. Future
* attempts to set nextId will be ignored.
*/
def letTracerAndId[R](tracer: Tracer, id: TraceId, terminal: Boolean = false)(f: => R): R = {
if (ctx.terminal) {
letTracer(tracer)(f)
} else {
val newCtx = ctx.withTracer(tracer).withTerminal(terminal)
val newId = id.sampled match {
case None => id.copy(_sampled = tracer.sampleTrace(id))
case Some(_) => id
}
Contexts.local.let(traceCtx, newCtx) {
Contexts.broadcast.let(idCtx, newId)(f)
}
}
}
/**
* Run computation `f` with all tracing state (tracers, trace id)
* cleared.
*/
def letClear[R](f: => R): R =
Contexts.local.letClear(traceCtx) {
Contexts.broadcast.letClear(idCtx) {
f
}
}
/**
* Convenience method for event loops in services. Put your
* service handling code inside this to get proper tracing with all
* the correct fields filled in.
*/
def traceService[T](service: String, rpc: String, hostOpt: Option[InetSocketAddress]=None)(f: => T): T = {
Trace.letId(Trace.nextId) {
Trace.recordBinary("finagle.version", Init.finagleVersion)
Trace.recordServiceName(service)
Trace.recordRpc(rpc)
hostOpt.map { Trace.recordServerAddr(_) }
Trace.record(Annotation.ServerRecv())
try f finally {
Trace.record(Annotation.ServerSend())
}
}
}
/**
* Returns true if tracing is enabled with a good tracer pushed and at least one tracer
* decides to actively trace the current [[id]]
*/
def isActivelyTracing: Boolean = {
if (!tracingEnabled)
return false
// store `tracers` and `id` in local vars to avoid repeated `Context` lookups
val ts = tracers
if (ts.isEmpty)
return false
val tid = id
ts.exists(_.isActivelyTracing(tid))
}
/**
* Record a raw record without checking if it's sampled/enabled/etc.
*/
private[this] def uncheckedRecord(rec: Record): Unit = {
tracers.distinct.foreach { t: Tracer => t.record(rec) }
}
/**
* Record a raw ''Record''. This will record to a _unique_ set of
* tracers in the stack.
*/
def record(rec: => Record): Unit = {
if (debugTrace())
System.err.println(rec)
if (isActivelyTracing)
uncheckedRecord(rec)
}
/**
* Time an operation and add an annotation with that duration on it
* @param message The message describing the operation
* @param f operation to perform
* @tparam T return type
* @return return value of the operation
*/
def time[T](message: String)(f: => T): T = {
val elapsed = Stopwatch.start()
val rv = f
record(message, elapsed())
rv
}
/**
* Runs the function f and logs that duration until the future is satisfied with the given name.
*/
def timeFuture[T](message: String)(f: Future[T]): Future[T] = {
val start = Time.now
f.ensure {
record(message, start.untilNow)
}
f
}
/*
* Convenience methods that construct records of different kinds.
*/
def record(ann: Annotation): Unit = {
if (debugTrace())
System.err.println(Record(id, Time.now, ann, None))
if (isActivelyTracing)
uncheckedRecord(Record(id, Time.now, ann, None))
}
def record(ann: Annotation, duration: Duration): Unit = {
if (debugTrace())
System.err.println(Record(id, Time.now, ann, Some(duration)))
if (isActivelyTracing)
uncheckedRecord(Record(id, Time.now, ann, Some(duration)))
}
def record(message: String): Unit = {
record(Annotation.Message(message))
}
def record(message: String, duration: Duration): Unit = {
record(Annotation.Message(message), duration)
}
@deprecated("Use recordRpc and recordServiceName", "6.13.x")
def recordRpcname(service: String, rpc: String): Unit = {
record(Annotation.Rpcname(service, rpc))
}
def recordServiceName(serviceName: String): Unit = {
record(Annotation.ServiceName(serviceName))
}
def recordRpc(name: String): Unit = {
record(Annotation.Rpc(name))
}
def recordClientAddr(ia: InetSocketAddress): Unit = {
record(Annotation.ClientAddr(ia))
}
def recordServerAddr(ia: InetSocketAddress): Unit = {
record(Annotation.ServerAddr(ia))
}
def recordLocalAddr(ia: InetSocketAddress): Unit = {
record(Annotation.LocalAddr(ia))
}
def recordBinary(key: String, value: Any): Unit = {
record(Annotation.BinaryAnnotation(key, value))
}
def recordBinaries(annotations: Map[String, Any]): Unit = {
if (isActivelyTracing) {
for ((key, value) <- annotations) {
recordBinary(key, value)
}
}
}
}
| spockz/finagle | finagle-core/src/main/scala/com/twitter/finagle/tracing/Trace.scala | Scala | apache-2.0 | 11,104 |
/*
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.flinkspector.dataset
import java.util.{ArrayList => JArrayList, List => JList}
import io.flinkspector.core.input.InputBuilder
import io.flinkspector.core.runtime.SimpleOutputVerifier
import io.flinkspector.core.trigger.VerifyFinishedTrigger
import org.scalatest.exceptions.TestFailedException
import scala.collection.JavaConversions._
class DataSetEnvironmentSpec extends CoreSpec {
class CountTrigger(n: Int) extends VerifyFinishedTrigger[Integer] {
override def onRecord(record: Integer): Boolean = false
override def onRecordCount(count: Long): Boolean = count >= n
}
class Verifier[T](list: List[T]) extends SimpleOutputVerifier[T] {
override def verify(output: JList[T]): Unit =
output should contain theSameElementsAs list
}
class CountVerifier[T](cnt: Int) extends SimpleOutputVerifier[T] {
override def verify(output: JList[T]): Unit =
output should have length (cnt)
}
"The batch environment" should "initialize" in {
DataSetTestEnvironment.createTestEnvironment(1)
}
it should "provide a OutputFormat" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val dataSet = env.fromElements(1, 2, 3, 4, 5)
val outputFormat = env.createTestOutputFormat(new Verifier(List(1, 2, 3, 4, 5)))
dataSet.output(outputFormat)
env.executeTest()
}
it should "provide a Outputformat and throw an exception" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val dataSet = env.fromElements(1, 2, 3, 4, 5)
val outputFormat = env.createTestOutputFormat(new Verifier(List(1, 3, 4, 5)))
dataSet.output(outputFormat)
an[TestFailedException] shouldBe thrownBy(env.executeTest())
}
it should "stop with trigger and signal a success" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val dataSet = env.fromElements(1, 2, 3, 4, 5)
val outputFormat = env.createTestOutputFormat(new CountVerifier[Int](2), new CountTrigger(2))
dataSet.output(outputFormat)
env.executeTest()
}
it should "stop with trigger and signal a failure" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val dataSet = env.fromElements(1, 2, 3, 4, 5)
val outputFormat = env.createTestOutputFormat(new Verifier(List(1, 2, 3)), new CountTrigger(2))
dataSet.output(outputFormat)
an[TestFailedException] shouldBe thrownBy(env.executeTest())
}
it should "provide a OutputFormat from [[Input]]" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val input = new InputBuilder[Int]().emitAll(List(1, 2, 3, 4, 5))
val dataSet = env.createTestSet(input)
val outputFormat = env.createTestOutputFormat(new Verifier(List(1, 2, 3, 4, 5)))
dataSet.output(outputFormat)
env.executeTest()
}
it should "handle more than one outputFormat" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val evenlist = List[Integer](2, 4, 6, 8)
val oddlist = List[Integer](1, 3, 5, 7)
val evenDataSet = env.fromElements(evenlist: _*)
val oddDataSet = env.fromElements(oddlist: _*)
val evenOutputFormat = env.createTestOutputFormat(new Verifier[Integer](List(2, 4, 6, 8)))
val oddOutputFormat = env.createTestOutputFormat(new Verifier[Integer](List(1, 3, 5, 7)))
evenDataSet.output(evenOutputFormat)
oddDataSet.output(oddOutputFormat)
env.executeTest()
}
it should "throw an exception if a verifier failed" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val dataSet = env.fromElements(1, 2, 3, 4, 5)
val outputFormat = env.createTestOutputFormat(new Verifier(List(1, 2, 3, 4)))
dataSet.output(outputFormat)
an[TestFailedException] shouldBe thrownBy(env.executeTest())
}
it should "handle one failure with multiple outputFormats" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val evenlist = List[Integer](2, 4, 6, 8)
val oddlist = List[Integer](1, 3, 5, 7)
val evenDataSet = env.fromElements(evenlist: _*)
val oddDataSet = env.fromElements(oddlist: _*)
val evenOutputFormat = env.createTestOutputFormat(new Verifier[Integer](List(2, 4, 6, 8)))
val oddOutputFormat = env.createTestOutputFormat(new Verifier[Integer](List(2, 3, 5, 7)))
evenDataSet.output(evenOutputFormat)
oddDataSet.output(oddOutputFormat)
an[TestFailedException] shouldBe thrownBy(env.executeTest())
}
it should "handle more than one failures with multiple outputFormats" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val evenlist = List[Integer](2, 4, 6, 8)
val oddlist = List[Integer](1, 3, 5, 7)
val evenDataSet = env.fromElements(evenlist: _*)
val oddDataSet = env.fromElements(oddlist: _*)
val evenOutputFormat = env.createTestOutputFormat(new Verifier[Integer](List(1, 4, 6, 8)))
val oddOutputFormat = env.createTestOutputFormat(new Verifier[Integer](List(2, 3, 5, 7)))
evenDataSet.output(evenOutputFormat)
oddDataSet.output(oddOutputFormat)
//TODO shutdown at failure
an[TestFailedException] shouldBe thrownBy(env.executeTest())
}
it should "not stop if only one trigger fires with multiple outputFormats" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
env.setTimeoutInterval(10000)
val evenlist = List[Integer](2, 4, 6, 8)
val oddlist = List[Integer](1, 3, 5, 7)
val evenDataSet = env.fromElements(evenlist: _*)
val oddDataSet = env.fromElements(oddlist: _*)
val evenOutputFormat = env.createTestOutputFormat(new CountVerifier[Integer](2), new CountTrigger(2))
val oddOutputFormat = env.createTestOutputFormat(new Verifier[Integer](List(1, 3, 5, 7)))
evenDataSet.output(evenOutputFormat)
oddDataSet.output(oddOutputFormat)
env.executeTest()
}
it should "stop if all triggers fire" in {
val env = DataSetTestEnvironment.createTestEnvironment(1)
val evenlist = List[Integer](2, 4, 6, 8)
val oddlist = List[Integer](1, 3, 5, 7)
val evenDataSet = env.fromElements(evenlist: _*)
val oddDataSet = env.fromElements(oddlist: _*)
val evenOutputFormat = env.createTestOutputFormat(new CountVerifier[Integer](2), new CountTrigger(2))
val oddOutputFormat = env.createTestOutputFormat(new CountVerifier[Integer](2), new CountTrigger(2))
evenDataSet.output(evenOutputFormat)
oddDataSet.output(oddOutputFormat)
env.executeTest()
//check for flag
}
}
| ottogroup/flink-spector | flinkspector-dataset/src/test/scala/io/flinkspector/dataset/DataSetEnvironmentSpec.scala | Scala | apache-2.0 | 7,041 |
package com.lookout.borderpatrol.test
import java.net.URL
import com.lookout.borderpatrol.{LoginManager, Manager, ServiceIdentifier, ServiceMatcher}
import com.twitter.finagle.httpx.{RequestBuilder, Request}
import com.twitter.finagle.httpx.path.Path
class ServiceMatcherSpec extends BorderPatrolSuite {
val urls = Set(new URL("http://localhost:8081"))
val keymasterIdManager = Manager("keymaster", Path("/identityProvider"), urls)
val keymasterAccessManager = Manager("keymaster", Path("/accessIssuer"), urls)
val checkpointLoginManager = LoginManager("checkpoint", Path("/check"), urls, Path("/loginConfirm"),
keymasterIdManager, keymasterAccessManager)
val basicIdManager = Manager("basic", Path("/signin"), urls)
val basicAccessManager = Manager("basic", Path("/accessin"), urls)
val umbrellaLoginManager = LoginManager("umbrella", Path("/umb"), urls, Path("/loginIt"),
keymasterIdManager, keymasterAccessManager)
val one = ServiceIdentifier("one", urls, Path("/ent"), "enterprise", checkpointLoginManager)
val two = ServiceIdentifier("two", urls, Path("/api"), "api", umbrellaLoginManager)
val three = ServiceIdentifier("three", urls, Path("/apis"), "api.subdomain", checkpointLoginManager)
val four = ServiceIdentifier("four", urls, Path("/apis/test"), "api.testdomain", umbrellaLoginManager)
val sids = Set(one, two, three, four)
val serviceMatcher = ServiceMatcher(sids)
def req(subdomain: String = "nothing", path: String = "/"): Request =
RequestBuilder().url(s"http://${subdomain + "."}example.com${path.toString}").buildGet()
def getWinner(winner: ServiceIdentifier, loser: ServiceIdentifier): ServiceIdentifier =
serviceMatcher.get(req(winner.subdomain, winner.path.toString)).value
behavior of "ServiceMatchers"
it should "match the longest subdomain" in {
serviceMatcher.subdomain("www.example.com") should be(None)
serviceMatcher.subdomain("enterprise.api.example.com").value should be(one)
serviceMatcher.subdomain("enterprise.example.com").value should be(one)
serviceMatcher.subdomain("api.example.com").value should be(two)
serviceMatcher.subdomain("api.subdomains.example.com").value should be(two)
serviceMatcher.subdomain("api.subdomain.example.com").value should be(three)
}
it should "match the longest get" in {
serviceMatcher.get(req("enterprise", "/")) should be(None)
serviceMatcher.get(req("enterprise", "/ent")).value should be(one)
serviceMatcher.get(req("enterprise", "/check")).value should be(one)
serviceMatcher.get(req("enterprise", "/loginConfirm")).value should be(one)
serviceMatcher.get(req("api", "/check")) should be(None)
serviceMatcher.get(req("api", "/loginConfirm")) should be(None)
serviceMatcher.get(req("api.testdomain", "/apis/test")).value should be(four)
serviceMatcher.get(req("api.testdomain", "/umb")).value should be(four)
serviceMatcher.get(req("api.testdomain", "/loginIt")).value should be(four)
serviceMatcher.get(req("api.testdomain", "/login")) should be(None)
}
it should "match the given ServiceIdentifier with itself" in {
val permutations = for {
winner <- List(one, two, three, four)
loser <- List(one, two, three, four)
if winner != loser
} yield getWinner(winner, loser) == winner
permutations.foreach(p => p should be(true))
}
it should "return None when neither matching" in {
serviceMatcher.get(req("www", "/applesauce")) should be(None)
}
it should "return None when subdomain matches, but path does not" in {
serviceMatcher.get(req("enterprise", "/apis")) should be(None)
}
}
| jamescway/borderpatrol | core/src/test/scala/com/lookout/borderpatrol/test/ServiceMatcherSpec.scala | Scala | mit | 3,634 |
package com.github.opengrabeso
package formulafx
import core._
import org.scalajs.dom._
import scala.collection.mutable
import scala.scalajs.js
import scala.scalajs.js.JSApp
import scala.scalajs.js.annotation.JSExport
import com.github.opengrabeso.js.JsUtils._
import org.scalajs.dom
@JSExport(name="FormulaJS")
object FormulaJS extends JSApp {
private val resPrefix = " "
private val prefs = dom.window.localStorage
import com.github.opengrabeso.js.Prefs._
def main(): Unit = {
println("Formula JS")
}
val tableData = mutable.ArrayBuffer[String]()
def addTableRow(str: String, c: String): Unit = {
tableData += str
val tableNode = document.getElementById("history")
val tr = document.createElement("tr")
val tdr = document.createElement("td")
tr.appendChild(tdr)
tdr.setAttribute("class", c)
val td = document.createElement("td")
td.innerHTML = str
tdr.appendChild(td)
/*
val tdPin = document.createElement("td")
tdPin.setAttribute("class","pin")
tdPin.innerHTML = ">"
tdr.appendChild(tdPin)
*/
tableNode.appendChild(tr)
}
def setInput(str: String): Unit = {
val document = js.Dynamic.global.document // evalNode.value not working without Dynamic
val evalNode = document.getElementById("eval") //.asInstanceOf[html.Paragraph]
if (str.startsWith(resPrefix)) {
evalNode.value = str.drop(resPrefix.length)
} else {
evalNode.value = str
}
}
def setResult(res: String): Unit = {
val resultNode = document.getElementById("result")
resultNode.innerHTML = res
}
private def rowId(i: Int) = s"row$i"
def saveSession(): Unit = {
val oldSize = prefs.getInt("rows", 0)
prefs.put("version", "0")
prefs.putInt("rows", tableData.size)
tableData.zipWithIndex.foreach {
case (row, i) =>
prefs.put(rowId(i), row)
}
for (i <- tableData.size until oldSize) {
prefs.remove(rowId(i))
}
}
@JSExport
def loadSession(): Unit = {
clearTable()
val version = prefs.get("version", "")
if (version.nonEmpty) {
val rows = prefs.getInt("rows", 0)
for (i <- 0 until rows) {
val row = prefs.get(rowId(i), "")
// we need to execute even lines so that variables are initialized
if ((i % 2) == 0) {
Evaluate.compute(row, false)
addTableRow(row, "expr")
} else {
addTableRow(row, "result")
}
}
}
}
@JSExport
def reset(): Unit = {
Evaluate.clear()
clearTable()
setInput("")
setResult("")
}
def clearTable(): Unit = {
tableData.clear()
// remove all rows except the first (headers)
val tableNode = document.getElementById("history")
val chs = tableNode.childNodes.copySeq // copy needed to avoid mutation while iterating
for (n <- chs) {
val ch = n.firstChild
// delete all but headers
if (ch != null && ch.nodeName == "TD") {
tableNode.removeChild(n)
}
}
}
@JSExport
def tableClicked(element: Element): Unit = {
if (element.nodeName == "TD") {
def leafCell(e: Element): Element = {
val ch = e.firstElementChild
if (ch != null) leafCell(ch)
else e
}
val cell = leafCell(element)
val cellText = cell.innerHTML
val resultNode = document.getElementById("result")
setInput(cellText)
eval(cellText, true)
}
}
@JSExport
def eval(str: String, preview: Boolean): Unit = {
val resText = Evaluate.compute(str, preview)
resText.map { res =>
setResult(res)
if (!preview) {
addTableRow(str, "expr")
addTableRow(resPrefix + res, "result")
setInput("")
saveSession()
}
}
}
}
| OndrejSpanel/FormulaFX | js/src/main/scala/com/github/opengrabeso/formulafx/FormulaJS.scala | Scala | gpl-2.0 | 3,788 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.tools
import java.io.File
import com.beust.jcommander.Parameter
import org.apache.kudu.client.KuduClient
import org.locationtech.geomesa.kudu.data.{KuduDataStore, KuduDataStoreFactory}
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand.KuduParams
import org.locationtech.geomesa.tools.{CatalogParam, DataStoreCommand, DistributedCommand, PasswordParams}
import org.locationtech.geomesa.utils.classpath.ClassPathUtils
/**
* Abstract class for Kudu commands
*/
trait KuduDataStoreCommand extends DataStoreCommand[KuduDataStore] {
override def params: KuduParams
override def connection: Map[String, String] = {
Map(
KuduDataStoreFactory.Params.CatalogParam.getName -> params.catalog,
KuduDataStoreFactory.Params.KuduMasterParam.getName -> params.master,
KuduDataStoreFactory.Params.CredentialsParam.getName -> params.password,
KuduDataStoreFactory.Params.BossThreadsParam.getName -> Option(params.bosses).map(_.toString).orNull,
KuduDataStoreFactory.Params.WorkerThreadsParam.getName -> Option(params.workers).map(_.toString).orNull,
KuduDataStoreFactory.Params.StatisticsParam.getName -> Option(params.statistics).map(_.toString).orNull
).filter(_._2 != null)
}
}
object KuduDataStoreCommand {
trait KuduDistributedCommand extends KuduDataStoreCommand with DistributedCommand {
abstract override def libjarsFiles: Seq[String] =
Seq("org/locationtech/geomesa/kudu/tools/kudu-libjars.list") ++ super.libjarsFiles
abstract override def libjarsPaths: Iterator[() => Seq[File]] = Iterator(
() => ClassPathUtils.getJarsFromEnvironment("GEOMESA_KUDU_HOME", "lib"),
() => ClassPathUtils.getJarsFromClasspath(classOf[KuduDataStore]),
() => ClassPathUtils.getJarsFromClasspath(classOf[KuduClient])
) ++ super.libjarsPaths
}
trait KuduParams extends CatalogParam with PasswordParams {
@Parameter(names = Array("-M", "--master"), description = "Kudu master server", required = true)
var master: String = _
@Parameter(names = Array("--boss-threads"), description = "Kudu client boss threads")
var bosses: Integer = _
@Parameter(names = Array("--worker-threads"), description = "Kudu client worker threads")
var workers: Integer = _
@Parameter(names = Array("--disable-statistics"), description = "Disable Kudu client statistics", arity = 0)
var statistics: java.lang.Boolean = _
}
}
| locationtech/geomesa | geomesa-kudu/geomesa-kudu-tools/src/main/scala/org/locationtech/geomesa/kudu/tools/KuduDataStoreCommand.scala | Scala | apache-2.0 | 2,942 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex.dummyfailedflowsvc1
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import akka.stream.scaladsl.Flow
import org.squbs.unicomplex.{FlowDefinition, WebContext}
/**
* A FlowDefinition must be a no-arg constructor. This is intended to test a failure path.
* @param name Some bogus name.
*/
class DummyFailedFlowSvc1(name: String) extends FlowDefinition with WebContext {
val pingPath = s"/$webContext/ping"
def flow = Flow[HttpRequest].map {
case HttpRequest(_, Uri(_, _, Path(`pingPath`), _, _), _, _, _) =>
HttpResponse(StatusCodes.OK, entity = "pong")
case _ => HttpResponse(StatusCodes.NotFound, entity = "Path not found!")
}
}
| akara/squbs | squbs-unicomplex/src/test/scala/org/squbs/unicomplex/dummyfailedflowsvc1/DummyFailedFlowSvc1.scala | Scala | apache-2.0 | 1,301 |
/*
* Copyright 2014 okumin.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akka.persistence.journal.sqlasync
import akka.actor.Actor
import akka.persistence.JournalProtocol.{
WriteMessageRejected,
WriteMessages,
WriteMessagesSuccessful
}
import akka.persistence.helper.MySQLInitializer
import akka.persistence.journal.JournalSpec
import akka.persistence.{AtomicWrite, CapabilityFlag, PersistentImpl, PersistentRepr}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import java.io.NotSerializableException
import scala.concurrent.duration._
class MySQLAsyncJournalSpec
extends JournalSpec(ConfigFactory.load("mysql-application.conf"))
with MySQLInitializer {
override protected def supportsRejectingNonSerializableObjects: CapabilityFlag = true
"ScalikeJDBCWriteJournal" must {
"not execute SQL when all the events is not serializable" in {
val probe = TestProbe()
val notSerializableEvent = new Object { override def toString = "not serializable" }
val messages = (6 to 8).map { i =>
AtomicWrite(
PersistentRepr(
payload = notSerializableEvent,
sequenceNr = i,
persistenceId = pid,
sender = Actor.noSender,
writerUuid = writerUuid
))
}
journal ! WriteMessages(messages, probe.ref, actorInstanceId)
probe.expectMsg(WriteMessagesSuccessful)
val Pid = pid
val WriterUuid = writerUuid
probe.expectMsgPF() {
case WriteMessageRejected(
PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid),
cause,
_) =>
payload should be(notSerializableEvent)
cause.isInstanceOf[NotSerializableException] should be(true)
}
probe.expectMsgPF() {
case WriteMessageRejected(
PersistentImpl(payload, 7L, Pid, _, _, Actor.noSender, WriterUuid),
cause,
_) =>
payload should be(notSerializableEvent)
cause.isInstanceOf[NotSerializableException] should be(true)
}
probe.expectMsgPF() {
case WriteMessageRejected(
PersistentImpl(payload, 8L, Pid, _, _, Actor.noSender, WriterUuid),
cause,
_) =>
payload should be(notSerializableEvent)
cause.isInstanceOf[NotSerializableException] should be(true)
}
probe.expectNoMessage(1.second)
}
}
}
| okumin/akka-persistence-sql-async | core/src/test/scala/akka/persistence/journal/sqlasync/MySQLAsyncJournalSpec.scala | Scala | apache-2.0 | 2,972 |
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common.enrichments.web
// Java
import java.net.URI
// Specs2 & Scalaz-Specs2
import org.specs2.Specification
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
// Scalaz
import scalaz._
import Scalaz._
class ParseCrossDomainSpec extends Specification with DataTables with ValidationMatchers { def is = s2"""
This is a specification to test the parseCrossDomain function
parseCrossDomain should return None when the querystring contains no _sp parameter $e1
parseCrossDomain should return a failure when the _sp timestamp is unparseable $e2
parseCrossDomain should successfully extract the domain user ID when available $e3
parseCrossDomain should successfully extract the domain user ID and timestamp when available $e4
parseCrossDomain should extract neither field from an empty _sp parameter $e5
"""
def e1 =
PageEnrichments.parseCrossDomain(Map()) must beSuccessful((None, None))
def e2 = {
val expected = "Field [sp_dtm]: [not-a-timestamp] is not in the expected format (ms since epoch)"
PageEnrichments.parseCrossDomain(Map("_sp" -> "abc.not-a-timestamp")) must beFailing(expected)
}
def e3 =
PageEnrichments.parseCrossDomain(Map("_sp" -> "abc")) must beSuccessful(("abc".some, None))
def e4 =
PageEnrichments.parseCrossDomain(Map("_sp" -> "abc.1426245561368")) must beSuccessful(("abc".some, "2015-03-13 11:19:21.368".some))
def e5 =
PageEnrichments.parseCrossDomain(Map("_sp" -> "")) must beSuccessful(None -> None)
}
| acgray/snowplow | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/web/ParseCrossDomainSpec.scala | Scala | apache-2.0 | 2,321 |
package scalapb
object Scalapb {
def getDescriptor(): com.google.protobuf.Descriptors.FileDescriptor = {
throw new NotImplementedError("Descriptors are not implemented yet for Scala.js.")
}
}
| scalapb/ScalaPB | scalapb-runtime/src/main/scalajs/scalapb/Scalapb.scala | Scala | apache-2.0 | 201 |
Subsets and Splits