code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
// goseumdochi: experiments with incarnation
// Copyright 2016 John V. Sichi
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.goseumdochi.control
import org.goseumdochi.common._
trait Actuator
{
def setMotionTimeout(duration : TimeSpan)
def getPowerState : String = "{power state unknown}"
def actuateMotion(impulse : PolarImpulse)
def actuateLight(color : LightColor)
def actuateTwirl(theta : Double, duration : TimeSpan, newHeading : Boolean)
}
object NullActuator extends Actuator
{
override def setMotionTimeout(duration : TimeSpan) {}
override def actuateMotion(impulse : PolarImpulse) {}
override def actuateLight(color : LightColor) {}
override def actuateTwirl(
theta : Double, duration : TimeSpan, newHeading : Boolean) {}
}
| lingeringsocket/goseumdochi | base/src/main/scala/org/goseumdochi/control/Actuator.scala | Scala | apache-2.0 | 1,289 |
package notebook
import akka.actor.{ActorSystem, ActorRef, ActorRefFactory}
import play.api.mvc._
import play.api.libs.json._
import play.api.Play.current
import notebook.util._
object ObservableController extends Controller {
} | vitan/spark-notebook | modules/observable/src/main/scala/notebook/ObservableController.scala | Scala | apache-2.0 | 235 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
import sbt._
import sbt.Keys._
import sbt.complete.Parsers
object Generators {
// Generates a scala file that contains the play version for use at runtime.
def PlayVersion(version: String, scalaVersion: String, sbtVersion: String, dir: File): Seq[File] = {
val file = dir / "PlayVersion.scala"
val scalaSource =
"""|package play.core
|
|object PlayVersion {
| val current = "%s"
| val scalaVersion = "%s"
| val sbtVersion = "%s"
|}
""".stripMargin.format(version, scalaVersion, sbtVersion)
if (!file.exists() || IO.read(file) != scalaSource) {
IO.write(file, scalaSource)
}
Seq(file)
}
}
object Commands {
val quickPublish = Command("quickPublish", Help.more("quickPublish", "Toggles quick publish mode, disabling/enabling build of documentation/source jars"))(_ => Parsers.EOF) { (state, _) =>
val x = Project.extract(state)
import x._
val quickPublishToggle = AttributeKey[Boolean]("quickPublishToggle")
val toggle = !state.get(quickPublishToggle).getOrElse(true)
val filtered = session.mergeSettings.filter { setting =>
setting.key match {
case Def.ScopedKey(Scope(_, Global, Global, Global), key)
if key == publishArtifact.key => false
case other => true
}
}
if (toggle) {
state.log.info("Turning off quick publish")
} else {
state.log.info("Turning on quick publish")
}
val newStructure = Load.reapply(filtered ++ Seq(
publishArtifact in GlobalScope in packageDoc := toggle,
publishArtifact in GlobalScope in packageSrc := toggle,
publishArtifact in GlobalScope := true
), structure)
Project.setProject(session, newStructure, state.put(quickPublishToggle, toggle))
}
}
| aradchykov/playframework | framework/project/Tasks.scala | Scala | apache-2.0 | 1,915 |
object HOInvocations {
def switch (x: Int, f: (Int) => Int, g: (Int) => Int) = if (x > 0) f else g
def failling_1 (f: (Int) => Int) = {
switch(-10, (x: Int) => x + 1, f)(2)
} ensuring { res => res > 0}
def failling_2 (x: Int, f: (Int) => Int, g: (Int) => Int) = {
require(x > 0)
switch(1, switch(x, f, g), g)(1)
} ensuring { res => res != f(1)}
} | ericpony/scala-examples | testcases/verification/higher-order/invalid/HOInvocations.scala | Scala | mit | 370 |
import sbt._
object Dependency {
object V {
val Scala = "2.11.7"
}
val json4s = "org.json4s" %% "json4s-native" % "3.2.11" % "provided"
val typesafeConfig = "com.typesafe" % "config" % "1.3.0" % "provided"
val scalatest = "org.scalatest" %% "scalatest" % "2.2.5" % "test"
}
| oxy-development/util | project/Dependency.scala | Scala | lgpl-3.0 | 291 |
package org.littlewings.javaee7.service
import javax.enterprise.context.RequestScoped
@RequestScoped
class CalcService {
def add(a: Int, b: Int): Int = a + b
def multiply(a: Int, b: Int): Int = a * b
}
| kazuhira-r/javaee7-scala-examples | arquillian-wildfly-container-embedded/src/main/scala/org/littlewings/javaee7/service/CalcService.scala | Scala | mit | 209 |
package recfun
import common._
object Main
{
def main(args: Array[String])
{
println("Pascal's Triangle")
for (row <- 0 to 10) {
for (col <- 0 to row)
print(pascal(col, row) + " ")
println()
}
}
/**
* Exercise 1:
* Here, I simply determine if the number its on an edge of the triangle...
* If not I sum the two numbers above it.
*/
def pascal(c: Int, r: Int): Int =
{
if( r == 0 ) 1
else
if( c == 0 || c == r ) 1
else pascal( c-1, r-1 ) + pascal( c, r-1 )
}
/**
* Exercise 2:
* Here I assign a +1 count to '(', -1 to ')' and 0 otherwise.
* If I encounter a negative count anytime, then String its unbalanced.
* When recursion finish balanceCount must be zero!
*/
def balance(chars: List[Char]): Boolean =
{
def charCount( char:Char ): Int =
if( char == '(' ) 1
else if( char == ')' ) -1
else 0
def balanceCount(subChars: List[Char], acc:Int): Boolean =
if( subChars.isEmpty ) acc == 0
else if( acc < 0 ) false
else balanceCount( subChars.tail, acc + charCount(subChars.head) )
balanceCount( chars, 0 )
}
/**
* Exercise 3:
* This problem is equivalent to:
* Given N and a set of integers S = { S1, S2, ..., Sm }, how many ways can we express
* N as a linear combination of S with non-negative coefficients.
*
* Look for the explanation of the problem thru dynamic programming in:
* www.algorithmist.com/index.php/Coin_Change
*/
def countChange(money: Int, coins: List[Int]): Int =
{
if( money == 0 ) 1
else if( money < 0 ) 0
else if( coins.size <= 0 && money >= 1 ) 0
else countChange( money, coins.tail ) + countChange(money-coins.head, coins )
}
}
| andrucuna/scala | progfun-coursera/progfun-assignment1/src/main/scala/recfun/Main.scala | Scala | gpl-2.0 | 1,768 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.crossdata.test
import com.stratio.crossdata.test.BaseXDTest
import org.apache.spark.sql.Row
import org.apache.spark.sql.crossdata.{XDDataFrame, ExecutionType}
import org.apache.spark.sql.crossdata.test.SharedXDContextWithDataTest.SparkTable
import org.apache.spark.sql.types.{StructType, ArrayType, StructField}
/* Mix this trait in a type test class to get most of the type test done.
* Its based on SharedXDContextWithDataTest thus filling most of that template slots and generating new entry points
* focused on type testing.
*/
trait SharedXDContextTypesTest extends SharedXDContextWithDataTest {
this: BaseXDTest =>
import SharedXDContextTypesTest._
//Template steps: Override them
val emptyTypesSetError: String /* Error message to be shown when the types test data have not
* been properly inserted in the data source */
def saveTypesData: Int // Entry point for saving types examples into the data source
def sparkAdditionalKeyColumns: Seq[SparkSQLColDef] = Seq() /* There are data sources which require their tables to have a
* primary key. This entry point allows specifying primary keys
* columns.
* NOTE that these `SparkSQLColdDef`s shouldn't have type checker
* since the column type does not form part of the test.
* e.g:
* override def sparkAdditionalKeyColumns(
* "k",
* "INT PRIMARY KEY"
* )
*/
def dataTypesSparkOptions: Map[String, String] /* Especial SparkSQL options for type tables, it is equivalent to
* `defaultOptions` but will only apply in the registration of
* the types test table.
*/
//Template: This is the template implementation and shouldn't be modified in any specific test
def doTypesTest(datasourceName: String): Unit = {
for(executionType <- ExecutionType.Spark::ExecutionType.Native::Nil)
datasourceName should s"provide the right types for $executionType execution" in {
assumeEnvironmentIsUpAndRunning
val dframe = sql("SELECT " + typesSet.map(_.colname).mkString(", ") + s" FROM $dataTypesTableName")
for(
(tpe, i) <- typesSet zipWithIndex;
typeCheck <- tpe.typeCheck
) typeCheck(dframe.collect(executionType).head(i))
}
//Multi-level column flat test
it should "provide flattened column names through the `annotatedCollect` method" in {
val dataFrame = sql("SELECT structofstruct.struct1.structField1 FROM typesCheckTable")
val rows = dataFrame.flattenedCollect()
rows.head.schema.head.name shouldBe "structofstruct.struct1.structField1"
}
it should "be able to flatten results for LIMIT queries" in {
val dataFrame = sql("SELECT structofstruct FROM typesCheckTable LIMIT 1")
val rows = dataFrame.flattenedCollect()
rows.head.schema.head.name shouldBe "structofstruct.field1"
rows.length shouldBe 1
}
it should "be able to vertically flatten results for array columns" in {
val dataFrame = sql(s"SELECT arraystructarraystruct FROM typesCheckTable")
val res = dataFrame.flattenedCollect()
// No array columns should be found in the result schema
res.head.schema filter {
case StructField(_, _: ArrayType, _, _) => true
case _ => false
} shouldBe empty
// No struct columns should be found in the result schema
res.head.schema filter {
case StructField(_, _: StructType, _, _) => true
case _ => false
} shouldBe empty
}
it should "correctly apply user limits to a vertically flattened array column" in {
val dataFrame = sql(s"SELECT arraystructarraystruct FROM typesCheckTable LIMIT 1")
val res = dataFrame.flattenedCollect()
res.length shouldBe 1
}
it should "correctly apply user limits to project-less queries where arrays are getting flattened" in {
val dataFrame = sql(s"SELECT * FROM typesCheckTable LIMIT 1")
val res = dataFrame.flattenedCollect()
res.length shouldBe 1
}
}
abstract override def saveTestData: Unit = {
super.saveTestData
require(saveTypesData > 0, emptyTypesSetError)
}
protected def typesSet: Seq[SparkSQLColDef] = Seq(
SparkSQLColDef("int", "INT", _ shouldBe a[java.lang.Integer]),
SparkSQLColDef("bigint", "BIGINT", _ shouldBe a[java.lang.Long]),
SparkSQLColDef("long", "LONG", _ shouldBe a[java.lang.Long]),
SparkSQLColDef("string", "STRING", _ shouldBe a[java.lang.String]),
SparkSQLColDef("boolean", "BOOLEAN", _ shouldBe a[java.lang.Boolean]),
SparkSQLColDef("double", "DOUBLE", _ shouldBe a[java.lang.Double]),
SparkSQLColDef("float", "FLOAT", _ shouldBe a[java.lang.Float]),
SparkSQLColDef("decimalint", "DECIMAL", _ shouldBe a[java.math.BigDecimal]),
SparkSQLColDef("decimallong", "DECIMAL", _ shouldBe a[java.math.BigDecimal]),
SparkSQLColDef("decimaldouble", "DECIMAL", _ shouldBe a[java.math.BigDecimal]),
SparkSQLColDef("decimalfloat", "DECIMAL", _ shouldBe a[java.math.BigDecimal]),
SparkSQLColDef("date", "DATE", _ shouldBe a[java.sql.Date]),
SparkSQLColDef("timestamp", "TIMESTAMP", _ shouldBe a[java.sql.Timestamp]),
SparkSQLColDef("tinyint", "TINYINT", _ shouldBe a[java.lang.Byte]),
SparkSQLColDef("smallint", "SMALLINT", _ shouldBe a[java.lang.Short]),
SparkSQLColDef("binary", "BINARY", _.asInstanceOf[Array[Byte]]),
SparkSQLColDef("arrayint", "ARRAY<INT>", _ shouldBe a[Seq[_]]),
SparkSQLColDef("arraystring", "ARRAY<STRING>", _ shouldBe a[Seq[_]]),
SparkSQLColDef("mapintint", "MAP<INT, INT>", _ shouldBe a[Map[_, _]]),
SparkSQLColDef("mapstringint", "MAP<STRING, INT>", _ shouldBe a[Map[_, _]]),
SparkSQLColDef("mapstringstring", "MAP<STRING, STRING>", _ shouldBe a[Map[_, _]]),
SparkSQLColDef("struct", "STRUCT<field1: INT, field2: INT>", _ shouldBe a[Row]),
SparkSQLColDef("arraystruct", "ARRAY<STRUCT<field1: INT, field2: INT>>", _ shouldBe a[Seq[_]]),
SparkSQLColDef("arraystructwithdate", "ARRAY<STRUCT<field1: DATE, field2: INT>>", _ shouldBe a[Seq[_]]),
SparkSQLColDef("structofstruct", "STRUCT<field1: DATE, field2: INT, struct1: STRUCT<structField1: STRING, structField2: INT>>", _ shouldBe a[Row]),
SparkSQLColDef("mapstruct", "MAP<STRING, STRUCT<structField1: DATE, structField2: INT>>", _ shouldBe a[Map[_,_]]),
SparkSQLColDef(
"arraystructarraystruct",
"ARRAY<STRUCT<stringfield: STRING, arrayfield: ARRAY<STRUCT<field1: INT, field2: INT>>>>",
{ res =>
res shouldBe a[Seq[_]]
res.asInstanceOf[Seq[_]].head shouldBe a[Row]
res.asInstanceOf[Seq[_]].head.asInstanceOf[Row].get(1) shouldBe a[Seq[_]]
res.asInstanceOf[Seq[_]].head.asInstanceOf[Row].get(1).asInstanceOf[Seq[_]].head shouldBe a[Row]
}
)
)
override def sparkRegisterTableSQL: Seq[SparkTable] = super.sparkRegisterTableSQL :+ {
val fields = (sparkAdditionalKeyColumns ++ typesSet) map {
case SparkSQLColDef(name, tpe, _) => s"$name $tpe"
} mkString ", "
SparkTable(s"CREATE TEMPORARY TABLE $dataTypesTableName ( $fields )", dataTypesSparkOptions)
}
}
object SharedXDContextTypesTest {
val dataTypesTableName = "typesCheckTable"
case class SparkSQLColDef(colname: String, sqlType: String, typeCheck: Option[Any => Unit] = None)
object SparkSQLColDef {
def apply(colname: String, sqlType: String, typeCheck: Any => Unit): SparkSQLColDef =
SparkSQLColDef(colname, sqlType, Some(typeCheck))
}
}
| darroyocazorla/crossdata | core/src/test/scala/org/apache/spark/sql/crossdata/test/SharedXDContextTypesTest.scala | Scala | apache-2.0 | 9,164 |
package com.twitter.finagle.mysql.transport
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.framer.LengthFieldFramer
import com.twitter.finagle.netty3.Netty3Transporter
import com.twitter.finagle.netty4.Netty4Transporter
import com.twitter.finagle.Stack
import com.twitter.finagle.transport.Transport
import com.twitter.util.Future
import java.net.SocketAddress
/**
* Responsible for the transport layer plumbing required to produce
* a Transporter[Packet, Packet]. The current default is Netty3
* TODO(jparker): Convert this to Transporter[Buf, Buf] and adjust accordingly.
*/
object TransportImpl {
val Netty3: TransportImpl = TransportImpl(params => Netty3Transporter(MysqlClientPipelineFactory, params))
val Netty4: TransportImpl = TransportImpl { params =>
new Transporter[Packet, Packet] {
private[this] val bufTransporter = Netty4Transporter(Some(framerFactory), params)
def apply(addr: SocketAddress): Future[Transport[Packet, Packet]] = {
bufTransporter(addr).map { bufTransport =>
bufTransport.map(_.toBuf, Packet.fromBuf)
}
}
}
}
implicit val param: Stack.Param[TransportImpl] = Stack.Param(Netty3)
private val framerFactory = () => {
new LengthFieldFramer(
lengthFieldBegin = 0,
lengthFieldLength = 3,
lengthAdjust = Packet.HeaderSize, // Packet size field doesn't include the header size.
maxFrameLength = Packet.HeaderSize + Packet.MaxBodySize,
bigEndian = false
)
}
}
case class TransportImpl(transporter: Stack.Params => Transporter[Packet, Packet]) {
def mk(): (TransportImpl, Stack.Param[TransportImpl]) = {
(this, TransportImpl.param)
}
}
| adriancole/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/transport/TransportImpl.scala | Scala | apache-2.0 | 1,703 |
package freecli
package option
package dsl
import cats.free.FreeApplicative
import shapeless._
import shapeless.ops.hlist.Prepend
import core.api.{CanProduce, Description}
import option.api._
case class HelpDslBuilder[H <: HList](list: H) {
def --(
name: String)
(implicit ev: Prepend[H, OptionFieldName :: HNil],
ev2: NotContainsConstraint[H, OptionFieldName]) =
new HelpDslBuilder(list :+ OptionFieldName(name))
def -(
abbr: Char)
(implicit ev: Prepend[H, OptionFieldAbbreviation :: HNil],
ev2: NotContainsConstraint[H, OptionFieldAbbreviation]) =
new HelpDslBuilder(list :+ OptionFieldAbbreviation(abbr))
def -~(
description: Description)
(implicit ev: Prepend[H, Description :: HNil],
ev2: NotContainsConstraint[H, Description]) =
new HelpDslBuilder(list :+ description)
}
object HelpDslBuilder {
def help: HelpDslBuilder[HNil] =
new HelpDslBuilder[HNil](HNil)
implicit def canProduceOptionDsl[H <: HList](
implicit canProduceField: CanProduce.Aux[H, (OptionField, HNil)]):
CanProduce.Aux[HelpDslBuilder[H], OptionDsl[HNil]] = {
new CanProduce[HelpDslBuilder[H]] {
type Out = OptionDsl[HNil]
def apply(t: HelpDslBuilder[H]): Out = {
val (field, _) = canProduceField.apply(t.list)
FreeApplicative.lift(Help[HNil](field, identity))
}
}
}
} | pavlosgi/freecli | core/src/main/scala/freecli/option/dsl/HelpDslBuilder.scala | Scala | apache-2.0 | 1,364 |
package ee.cone.c4actor
import ee.cone.c4actor.Types.{ClName, SrcId}
import ee.cone.c4assemble.{AssembledKey, IndexUtil}
trait DefaultKeyFactoryApp {
def indexUtil: IndexUtil
def byPKKeyFactory: KeyFactory = DefaultKeyFactory(indexUtil)()
def origKeyFactoryOpt: Option[KeyFactory] = None
}
case class DefaultKeyFactory(composes: IndexUtil)(
srcIdAlias: String = "SrcId",
srcIdClass: ClName = classOf[SrcId].getName
) extends KeyFactory {
def rawKey(className: String): AssembledKey =
composes.joinKey(was = false, srcIdAlias, srcIdClass, className)
}
| wregs/c4proto | c4actor-base/src/main/scala/ee/cone/c4actor/OrigKeyFactoryApp.scala | Scala | apache-2.0 | 571 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.plugin.wcs
import java.util.{HashMap => JMap}
import org.apache.wicket.markup.html.form.validation.IFormValidator
import org.apache.wicket.markup.html.form.{Form, FormComponent}
import org.apache.wicket.model.PropertyModel
import org.geoserver.catalog.CoverageStoreInfo
import org.geotools.data.DataAccessFactory.Param
import org.locationtech.geomesa.plugin.GeoMesaStoreEditPanel
import org.locationtech.geomesa.plugin.wcs.GeoMesaCoverageReader.FORMAT
class GeoMesaCoverageStoreEditPanel(componentId: String, storeEditForm: Form[_])
extends GeoMesaStoreEditPanel(componentId, storeEditForm) {
val model = storeEditForm.getModel
setDefaultModel(model)
val storeInfo = storeEditForm.getModelObject.asInstanceOf[CoverageStoreInfo]
storeInfo.getConnectionParameters.putAll(parseConnectionParametersFromURL(storeInfo.getURL))
val paramsModel = new PropertyModel(model, "connectionParameters")
val instanceId = addTextPanel(paramsModel, new Param("instanceId", classOf[String], "The Accumulo Instance ID", true))
val zookeepers = addTextPanel(paramsModel, new Param("zookeepers", classOf[String], "Zookeepers", true))
val user = addTextPanel(paramsModel, new Param("user", classOf[String], "User", true))
val password = addPasswordPanel(paramsModel, new Param("password", classOf[String], "Password", true))
val auths = addTextPanel(paramsModel, new Param("auths", classOf[String], "Authorizations", false))
val visibilities = addTextPanel(paramsModel, new Param("visibilities", classOf[String], "Visibilities", false))
val tableName = addTextPanel(paramsModel, new Param("tableName", classOf[String], "The Accumulo Table Name", true))
val collectStats = addCheckBoxPanel(paramsModel, new Param("collectStats", classOf[String], "Collect Stats", false))
val dependentFormComponents = Array[FormComponent[_]](instanceId, zookeepers, user, password, auths, visibilities, tableName)
dependentFormComponents.map(_.setOutputMarkupId(true))
storeEditForm.add(new IFormValidator() {
def getDependentFormComponents = dependentFormComponents
def validate(form: Form[_]) {
val storeInfo = form.getModelObject.asInstanceOf[CoverageStoreInfo]
val sb = StringBuilder.newBuilder
sb.append("accumulo://").append(user.getValue)
.append(":").append(password.getValue)
.append("@").append(instanceId.getValue)
.append("/").append(tableName.getValue)
.append("#zookeepers=").append(zookeepers.getValue)
.append("#auths=").append(auths.getValue)
.append("#visibilities=").append(visibilities.getValue)
.append("#collectStats=").append(collectStats.getValue)
storeInfo.setURL(sb.toString())
}
})
def parseConnectionParametersFromURL(url: String): JMap[String, String] = {
val params = new JMap[String, String]
if (url != null && url.startsWith("accumulo:")) {
val FORMAT(user, password, instanceId, table, zookeepers, auths, visibilities, collectStats) = url
params.put("user", user)
params.put("password", password)
params.put("instanceId", instanceId)
params.put("tableName", table)
params.put("zookeepers", zookeepers)
params.put("auths", auths)
params.put("visibilities", visibilities)
params.put("collectStats", collectStats)
}
params
}
}
| drackaer/geomesa | geomesa-plugin/src/main/scala/org/locationtech/geomesa/plugin/wcs/GeoMesaCoverageStoreEditPanel.scala | Scala | apache-2.0 | 3,820 |
/*
* Sonar Scoverage Plugin
* Copyright (C) 2013 Rado Buransky
* [email protected]
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02
*/
package com.buransky.plugins.scoverage.language
import org.sonar.api.resources.AbstractLanguage
/**
* Scala language.
*
* @author Rado Buransky
*/
class Scala extends AbstractLanguage(Scala.key, Scala.name) {
val getFileSuffixes = Array(Scala.fileExtension)
}
object Scala {
val key = "scala"
val name = "Scala"
val fileExtension = "scala"
} | Sagacify/sonar-scala | src/main/scala/com/buransky/plugins/scoverage/language/Scala.scala | Scala | lgpl-3.0 | 1,180 |
package exceptions
class BadIntException(val msg: String) extends RuntimeException(msg)
| j-c-w/mlc | src/main/scala/exceptions/BadIntException.scala | Scala | gpl-3.0 | 89 |
package com.felixmilea.vorbit.actors
import com.mysql.jdbc.exceptions.jdbc4.MySQLTransactionRollbackException
import com.felixmilea.vorbit.data.DBConnection
class NgramProcessor extends ManagedActor {
import NgramProcessor._
private[this] lazy val db = new DBConnection(true)
private[this] lazy val addBigramsProc = db.conn.prepareCall("{CALL record_ngram(?,?)}")
def doReceive = {
case TextUnits(units) => {
for (u <- 0 until units.length) {
for (n <- minLevel to maxLevel) {
if (u + n <= units.length) {
var successful = false
do {
try {
addBigramsProc.setInt(1, n)
addBigramsProc.setString(2, units.slice(u, u + n).mkString(","))
addBigramsProc.execute()
db.conn.commit()
successful = true
} catch {
case tre: MySQLTransactionRollbackException => {
Warning("insert deadlock..will try again in 1000ms")
successful = false
Thread.sleep(1000)
}
}
} while (!successful)
}
}
}
}
}
}
object NgramProcessor {
val minLevel = 2
val maxLevel = 4
case class TextUnits(units: Seq[Int])
} | felixmc/Felix-Milea-Ciobanu-Vorbit | code/com/felixmilea/vorbit/actors/NgramProcessor.scala | Scala | mit | 1,301 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package viper.silicon.interfaces
import viper.silver.ast
import viper.silver.components.StatefulComponent
import viper.silicon.Set
import viper.silicon.state.terms.Sort
trait PreambleEmitter extends StatefulComponent {
def analyze(program: ast.Program)
def sorts: Set[Sort]
def declareSorts()
def declareSymbols()
def emitAxioms()
}
| sccblom/vercors | viper/silicon/src/main/scala/interfaces/Preamble.scala | Scala | mpl-2.0 | 555 |
package org.jetbrains.plugins.scala
package components
import java.awt.event.MouseEvent
import com.intellij.openapi.actionSystem.ActionManager
import com.intellij.openapi.keymap.KeymapUtil
import com.intellij.openapi.project.Project
import com.intellij.openapi.wm.StatusBarWidget.WidgetPresentation
import com.intellij.openapi.wm.impl.status.widget.StatusBarWidgetsManager
import com.intellij.openapi.wm.{StatusBar, StatusBarWidget, StatusBarWidgetFactory}
import com.intellij.util.Consumer
import javax.swing.{Icon, Timer}
import org.jetbrains.annotations.Nullable
import org.jetbrains.plugins.scala.actions.ToggleTypeAwareHighlightingAction
import org.jetbrains.plugins.scala.components.TypeAwareWidgetFactory.Widget
import org.jetbrains.plugins.scala.extensions.invokeLater
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.project.ProjectExt
import org.jetbrains.plugins.scala.settings.ScalaProjectSettings
class TypeAwareWidgetFactory extends StatusBarWidgetFactory {
override def getId: String = "TypeAwareHighlighting"
override def getDisplayName: String = ScalaBundle.message("scala.type.aware.highlighting.indicator")
override def isAvailable(project: Project): Boolean = project.isOpen && project.hasScala
override def createWidget(project: Project): StatusBarWidget = new Widget(project, this)
override def disposeWidget(widget: StatusBarWidget): Unit = ()
override def canBeEnabledOn(statusBar: StatusBar): Boolean = isAvailable(statusBar.getProject)
}
object TypeAwareWidgetFactory {
private class Widget(project: Project, factory: TypeAwareWidgetFactory) extends StatusBarWidget with StatusBarWidget.IconPresentation {
private val statusBarWidgetsManager = project.getService(classOf[StatusBarWidgetsManager])
private var statusBar = Option.empty[StatusBar]
private def isEnabled = ScalaProjectSettings.getInstance(project).isTypeAwareHighlightingEnabled
private val myTimer = new Timer(1000, _ => {
invokeLater(updateWidget())
})
override def ID: String = "TypeAwareHighlighting"
override def getPresentation: WidgetPresentation = this
override def install(statusBar: StatusBar): Unit = {
this.statusBar = Some(statusBar)
myTimer.setRepeats(true)
myTimer.start()
subscribeToRootsChange()
}
override def dispose(): Unit = {
myTimer.stop()
statusBar = None
}
@Nullable
override def getIcon: Icon =
if (isEnabled) Icons.TYPED else Icons.UNTYPED
override def getTooltipText: String = {
val title = ScalaBundle.message("type.aware.highlighting.title")
val toChange = shortcutText match {
case Some(text) => ScalaBundle.message("click.or.press.shortcut.to.change", text)
case None => ScalaBundle.message("click.to.change")
}
val status = if (isEnabled) ScalaBundle.message("enabled.word") else ScalaBundle.message("disabled.word")
//noinspection ScalaExtractStringToBundle
s"$title: $status $toChange"
}
override def getClickConsumer: Consumer[MouseEvent] = _ => {
ToggleTypeAwareHighlightingAction.toggleSettingAndRehighlight(project)
updateWidget()
}
private def shortcutText: Option[String] = {
val action = ActionManager.getInstance().getAction("Scala.EnableErrors")
action.getShortcutSet.getShortcuts.headOption.map(KeymapUtil.getShortcutText)
}
private def updateWidget(): Unit = {
statusBarWidgetsManager.updateWidget(factory)
statusBar.foreach(_.updateWidget(ID))
}
private def subscribeToRootsChange(): Unit = {
project.subscribeToModuleRootChanged() { _ =>
invokeLater {
updateWidget()
}
}
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/components/TypeAwareWidgetFactory.scala | Scala | apache-2.0 | 3,752 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.Desktop
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 07/11/17.
*/
/**
* Desktop Repository
* @param session
* @param executionContext
*/
class DesktopRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.DesktopRepository[Desktop , Int]
with DesktopMapping {
def getById(id: Int): Future[Desktop] = {
Future(run(queryDesktop.filter(_.desktopId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[Desktop] = {
Future(run(queryDesktop.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByDesktopId(id : Int) : Future[List[Desktop]] = {
Future(run(queryDesktop))
}
def getAll() : Future[List[Desktop]] = {
Future(run(queryDesktop))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[Desktop]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countDesktop()
elements <- if (offset > count) Future.successful(Nil)
else selectDesktop(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countDesktop() = {
Future(run(queryDesktop.size).toInt)
}
private def selectDesktop(offset: Int, limit: Int): Future[Seq[Desktop]] = {
Future(run(queryDesktop).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/DesktopRepository.scala | Scala | gpl-3.0 | 2,669 |
/*
* Copyright 2017 Sumo Logic
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ws.epigraph.java.service
import ws.epigraph.java.JavaGenNames.{lqbct, lqbrn, lqdrn2}
import ws.epigraph.java.NewlineStringInterpolator.NewlineHelper
import ws.epigraph.java.{GenContext, JavaGenUtils, ObjectGenContext}
import ws.epigraph.lang.Qn
import ws.epigraph.schema.ResourceDeclaration
import ws.epigraph.schema.operations.ReadOperationDeclaration
/**
* @author <a href="mailto:[email protected]">Konstantin Sobolev</a>
*/
class AbstractReadOperationGen(
val baseNamespace: Qn,
val rd: ResourceDeclaration,
val op: ReadOperationDeclaration,
val ctx: GenContext) extends AbstractOperationGen {
override protected def generate: String = {
val sctx = new ObjectGenContext(ctx, namespace)
val outputType = JavaGenUtils.toCType(op.outputType())
val nsString = namespace.toString
val resultBuilderCtor = lqbct(outputType, nsString)
val rreq = sctx.use("ws.epigraph.service.operations.ReadOperationRequest")
val rresp = sctx.use("ws.epigraph.service.operations.ReadOperationResponse")
val cfut = sctx.use("java.util.concurrent.CompletableFuture")
val notnull = sctx.use("org.jetbrains.annotations.NotNull")
val outputShortName = sctx.use(outputFieldProjectionGen.fullClassName)
val shortDataType = sctx.use(lqdrn2(outputType, nsString))
val shortBuilderType = sctx.use(lqbrn(outputType, nsString))
pathProjectionGenOpt match {
case Some(pathProjectionGen) =>
val pathShortName = sctx.use(pathProjectionGen.fullClassName)
sctx.addMethod(/*@formatter:off*/sn"""\\
@Override
public @$notnull $cfut<$rresp<$shortDataType>> process(@$notnull $rreq request) {
$shortBuilderType builder = $resultBuilderCtor;
$pathShortName path = new $pathShortName(request.path());
$outputShortName projection = new $outputShortName(request.outputProjection());
return process(builder, path, projection).thenApply($rresp::new);
}
"""/*@formatter:off*/
)
sctx.addMethod(/*@formatter:off*/sn"""\\
/**
* Process read request
*
* @param resultBuilder result builder, initially empty
* @param path request path
* @param projection request projection
*
* @return future of the result
*/
protected abstract @$notnull $cfut<$shortDataType> process(@$notnull $shortBuilderType resultBuilder, @$notnull $pathShortName path, @$notnull $outputShortName projection);
"""/*@formatter:off*/
)
case None =>
sctx.addMethod(/*@formatter:off*/sn"""\\
@Override
public @$notnull $cfut<$rresp<$shortDataType>> process(@$notnull final $rreq request) {
$shortBuilderType builder = $resultBuilderCtor;
$outputShortName projection = new $outputShortName(request.outputProjection());
return process(builder, projection).thenApply($rresp::new);
}
"""/*@formatter:off*/
)
sctx.addMethod(/*@formatter:off*/sn"""\\
/**
* Process read request
*
* @param resultBuilder result builder, initially empty
* @param projection request projection
*
* @return future of the result
*/
protected abstract @$notnull $cfut<$shortDataType> process(@$notnull $shortBuilderType resultBuilder, @$notnull $outputShortName projection);
"""/*@formatter:off*/
)
}
generate(sctx)
}
}
| SumoLogic/epigraph | java/codegen/src/main/scala/ws/epigraph/java/service/AbstractReadOperationGen.scala | Scala | apache-2.0 | 3,811 |
package de.fosd.typechef
import org.junit.Test
import de.fosd.typechef.featureexpr.FeatureExprFactory
import de.fosd.typechef.parser.c._
import de.fosd.typechef.conditional.Opt
import FeatureExprFactory._
import java.io.File
class SerializationTest {
@Test
def testSerializationBDD() {
FeatureExprFactory.setDefault(FeatureExprFactory.bdd)
val fa = FeatureExprFactory.createDefinedExternal("a")
val fb = FeatureExprFactory.createDefinedExternal("b")
val ast = TranslationUnit(List(
Opt(True, EmptyExternalDef()),
Opt(fa and fb, EmptyExternalDef())
))
val file = File.createTempFile("serialize", "ast")
file.deleteOnExit()
Frontend.serializeAST(ast, file.getAbsolutePath)
val newAST = Frontend.loadSerializedAST(file.getAbsolutePath)
assert(ast == newAST)
}
@Test
def testSerializationSAT() {
FeatureExprFactory.setDefault(FeatureExprFactory.sat)
val fa = FeatureExprFactory.createDefinedExternal("a")
val fb = FeatureExprFactory.createDefinedExternal("b")
val ast = TranslationUnit(List(
Opt(True, EmptyExternalDef()),
Opt(fa and fb, EmptyExternalDef())
))
val file = File.createTempFile("serialize", "ast")
file.deleteOnExit()
Frontend.serializeAST(ast, file.getAbsolutePath)
val newAST = Frontend.loadSerializedAST(file.getAbsolutePath)
assert(ast == newAST)
}
// @Test
// def testSerializationMixed () {
// FeatureExprFactory.setDefault(FeatureExprFactory.sat)
//
// val fa=FeatureExprFactory.createDefinedExternal("a")
// val fb=FeatureExprFactory.createDefinedExternal("b")
//
// val ast = TranslationUnit(List(
// Opt(True, EmptyExternalDef()),
// Opt(fa and fb, EmptyExternalDef())
// ))
//
// val file=File.createTempFile("serialize","ast")
// file.deleteOnExit()
//
// Frontend.serializeAST(ast, file.getAbsolutePath)
//
// FeatureExprFactory.setDefault(FeatureExprFactory.bdd)
//
// val newAST = Frontend.loadSerializedAST(file.getAbsolutePath)
//
// assert(ast==newAST)
// }
}
| mbeddr/TypeChef | Frontend/src/test/scala/de/fosd/typechef/SerializationTest.scala | Scala | lgpl-3.0 | 2,341 |
import java.net.URL
import bintry.Client
import dispatch.{FunctionHandler, Http}
import org.apache.ivy.core.module.descriptor.Artifact
import org.apache.ivy.plugins.repository.{AbstractRepository, Repository}
import org.apache.ivy.plugins.resolver.IBiblioResolver
import sbt.{RawRepository, Resolver, _}
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
object bintray {
case class BintrayMavenRepository(
underlying: Repository,
bty: Client#Repo#Package,
release: Boolean)
extends AbstractRepository {
def asStatusAndBody =
new FunctionHandler({ r => (r.getStatusCode, r.getResponseBody)})
override def put(artifact: Artifact, src: File, dest: String, overwrite: Boolean): Unit =
Await.result(
bty.mvnUpload(transform(dest), src).publish(release)(asStatusAndBody),
Duration.Inf) match {
case (201, _) =>
case (_, fail) =>
println(fail)
throw new RuntimeException(s"error uploading to $dest: $fail")
}
def getResource(src: String) = underlying.getResource(src)
def get(src: String, dest: File) = underlying.get(src, dest)
def list(parent: String) = underlying.list(parent)
private def transform(dest: String) =
new URL(dest).getPath.split('/').drop(5).mkString("/")
}
case class BintrayMavenResolver(
name: String,
rootURL: String,
bty: Client#Repo#Package,
release: Boolean)
extends IBiblioResolver {
setName(name)
setM2compatible(true)
setRoot(rootURL)
override def setRepository(repository: Repository): Unit =
super.setRepository(BintrayMavenRepository(repository, bty, release))
}
def getPublishResolver(user: String, apiKey: String, packageName: String, isRelease: Boolean): Option[Resolver] =
{
val repo: Client#Repo = Client(user, apiKey, new Http()).repo(user, "maven")
val pkg = repo.get(packageName)
Some(new RawRepository(
BintrayMavenResolver(s"Bintray-Maven-Publish-${repo.subject}-${repo.repo}-${pkg.name}",
s"https://api.bintray.com/maven/${repo.subject}/${repo.repo}/${repo.repo}", pkg, isRelease)))
}
} | shnapz/service | project/bintray.scala | Scala | apache-2.0 | 2,430 |
package models
import skinny.DBSettings
import skinny.test._
import org.scalatest.fixture.FlatSpec
import org.scalatest._
import scalikejdbc._
import scalikejdbc.scalatest._
import org.joda.time._
class ArticlesSpec extends FlatSpec with Matchers with DBSettings with AutoRollback {
}
| yamitzky/nonplay-play | test/models/ArticlesSpec.scala | Scala | cc0-1.0 | 287 |
package uk.gov.dvla.vehicles.presentation.common.clientsidesession
case class CookieKeyValue(key: String, value: String)
| dvla/vehicles-presentation-common | app/uk/gov/dvla/vehicles/presentation/common/clientsidesession/CookieKeyValue.scala | Scala | mit | 122 |
package com.mesosphere.universe.v2.model
import io.lemonlabs.uri.Uri
import io.circe.Decoder
import io.circe.Encoder
import io.circe.JsonObject
import io.circe.generic.semiauto.deriveDecoder
import io.circe.generic.semiauto.deriveEncoder
import com.mesosphere.cosmos.circe.Encoders.encodeUri
import com.mesosphere.cosmos.circe.Decoders.decodeUri
case class PackageFiles(
revision: String,
sourceUri: Uri,
packageJson: PackageDetails,
marathonJsonMustache: String,
commandJson: Option[Command] = None,
configJson: Option[JsonObject] = None,
resourceJson: Option[Resource] = None
)
object PackageFiles {
implicit val encodeV2PackageFiles: Encoder[PackageFiles] = deriveEncoder[PackageFiles]
implicit val decodeV2PackageFiles: Decoder[PackageFiles] = deriveDecoder[PackageFiles]
}
| dcos/cosmos | cosmos-common/src/main/scala/com/mesosphere/universe/v2/model/PackageFiles.scala | Scala | apache-2.0 | 799 |
package lists
import java.util._
object test {
def singleton[T](elem: T) = new Cons[T](elem, new Nil[T])
//> singleton: [T](elem: T)lists.Cons[T]
def nth[T](n: Int, xs: List[T]): T =
if (xs.isEmpty) throw new IndexOutOfBoundsException
else if(n == 0) xs.head
else nth(n-1, xs.tail) //> nth: [T](n: Int, xs: lists.List[T])T
val list = new Cons(1, new Cons(2, new Cons(3, new Nil)))
//> list : lists.Cons[Int] = lists.Cons@46727249
nth(1,list) //> res0: Int = 2
}
object List {
def apply[T]() = new Nil
def apply[T](x1: T): List[T] = new Cons(x1, new Nil)
def apply[T](x1: T, x2: T): List[T] = new Cons(x1, new Cons(x2, new Nil))
}
trait List[T] {
def isEmpty: Boolean
def head: T
def tail: List[T]
}
class Cons[T](val head: T,val tail: List[T]) extends List[T] {
def isEmpty = false
}
class Nil[T] extends List[T] {
def isEmpty: Boolean = true
def head: Nothing = throw new NoSuchElementException("Nil.head")
def tail: Nothing = throw new NoSuchElementException("Nil.head")
}
| akxs14/TinyWeb | Classes/type_parameters/TypeParameters.scala | Scala | apache-2.0 | 1,167 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.feature.dataset
import com.intel.analytics.bigdl.dllib.feature.dataset.image.LabeledBGRImage
import org.scalatest.{FlatSpec, Matchers}
@com.intel.analytics.bigdl.tags.Parallel
class ImageSpec extends FlatSpec with Matchers {
"image with odd width" should "flip good" in {
val image = new LabeledBGRImage(
Array[Float](
1, 2, 3, 4, 5, 6, 7, 8, 9,
11, 12, 13, 14, 15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26, 27, 28, 29
),
3, 3, 1)
image.hflip()
val flippedData = Array[Float](
7, 8, 9, 4, 5, 6, 1, 2, 3,
17, 18, 19, 14, 15, 16, 11, 12, 13,
27, 28, 29, 24, 25, 26, 21, 22, 23
)
image.content should be(flippedData)
}
"image with even width" should "flip good" in {
val image = new LabeledBGRImage(
Array[Float](
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32
),
4, 3, 1)
image.hflip()
val flippedData = Array[Float](
10, 11, 12, 7, 8, 9, 4, 5, 6, 1, 2, 3,
20, 21, 22, 17, 18, 19, 14, 15, 16, 11, 12, 13,
30, 31, 32, 27, 28, 29, 24, 25, 26, 21, 22, 23
)
image.content should be(flippedData)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/dataset/ImageSpec.scala | Scala | apache-2.0 | 1,896 |
package au.com.dius.pact.matchers
import java.text.ParseException
import com.typesafe.scalalogging.StrictLogging
import io.gatling.jsonpath.AST._
import io.gatling.jsonpath.Parser
import org.apache.commons.lang3.time.{DateFormatUtils, DateUtils}
import scala.xml.Elem
object Matchers extends StrictLogging {
def matchesToken(pathElement: String, token: PathToken) = token match {
case RootNode => if (pathElement == "$") 2 else 0
case Field(name) => if (pathElement == name) 2 else 0
case ArrayRandomAccess(indices) => if (pathElement.matches("\\d+") && indices.contains(pathElement.toInt)) 2 else 0
case ArraySlice(None, None, 1) => if (pathElement.matches("\\d+")) 1 else 0
case AnyField => 1
case _ => 0
}
def matchesPath(pathExp: String, path: Seq[String]) =
new Parser().compile(pathExp) match {
case Parser.Success(q, _) =>
val filter = path.reverse.tails.filter(l =>
l.reverse.corresponds(q)((pathElement, pathToken) => matchesToken(pathElement, pathToken) != 0))
if (filter.nonEmpty) {
filter.maxBy(seq => seq.length).length
} else {
0
}
case ns: Parser.NoSuccess =>
logger.warn(s"Path expression $pathExp is invalid, ignoring: $ns")
0
}
def calculatePathWeight(pathExp: String, path: Seq[String]) = {
new Parser().compile(pathExp) match {
case Parser.Success(q, _) =>
path.zip(q).map(entry => matchesToken(entry._1, entry._2)).product
case ns: Parser.NoSuccess =>
logger.warn(s"Path expression $pathExp is invalid, ignoring: $ns")
0
}
}
def resolveMatchers(matchers: Map[String, Map[String, Any]], path: Seq[String]) =
matchers.filterKeys(p => matchesPath(p, path) > 0)
def matcherDefined(path: Seq[String], matchers: Option[Map[String, Map[String, Any]]]): Boolean =
matchers.isDefined && resolveMatchers(matchers.get, path).nonEmpty
def wildcardMatcherDefined(path: Seq[String], matchers: Option[Map[String, Map[String, Any]]]): Boolean = {
matchers match {
case Some(m) => {
val resolvedMatchers = m.filterKeys(p => matchesPath(p, path) == path.length)
resolvedMatchers.exists(entry => entry._1.endsWith(".*"))
}
case None => false
}
}
def domatch[Mismatch](matchers: Option[Map[String, Map[String, Any]]], path: Seq[String], expected: Any, actual: Any,
mismatchFn: MismatchFactory[Mismatch]) : List[Mismatch] = {
val matcherDef = selectBestMatcher(matchers, path)
matcherDef match {
case map: Map[String, Any] => matcher(map).domatch[Mismatch](map, path, expected, actual, mismatchFn)
case m =>
logger.warn(s"Matcher $m is mis-configured, defaulting to equality matching")
EqualsMatcher.domatch[Mismatch](Map[String, String](), path, expected, actual, mismatchFn)
}
}
def selectBestMatcher[Mismatch](matchers: Option[Map[String, Map[String, Any]]], path: Seq[String]): Map[String, Any] = {
resolveMatchers(matchers.get, path).maxBy(entry => calculatePathWeight(entry._1, path))._2
}
def matcher(matcherDef: Map[String, Any]) : Matcher = {
if (matcherDef.isEmpty) {
logger.warn(s"Unrecognised empty matcher, defaulting to equality matching")
EqualsMatcher
} else if (matcherDef.contains("match")) {
matcherDef("match") match {
case "regex" => RegexpMatcher
case "type" =>
if (matcherDef.contains("min")) MinimumMatcher
else if (matcherDef.contains("max")) MaximumMatcher
else TypeMatcher
case "number" => TypeMatcher
case "integer" => TypeMatcher
case "real" => TypeMatcher
case "decimal" => TypeMatcher
case "timestamp" => TypeMatcher
case "time" => TimeMatcher
case "date" => DateMatcher
case "min" => MinimumMatcher
case "max" => MaximumMatcher
}
} else matcherDef.keys.head match {
case "regex" => RegexpMatcher
case "match" => TypeMatcher
case "timestamp" => TimestampMatcher
case "time" => TimeMatcher
case "date" => DateMatcher
case "min" => MinimumMatcher
case "max" => MaximumMatcher
case m =>
logger.warn(s"Unrecognised matcher $m, defaulting to equality matching")
EqualsMatcher
}
}
def safeToString(value: Any) = {
if (value == null) ""
else value match {
case elem: Elem => elem.text
case _ => value.toString
}
}
}
object EqualsMatcher extends Matcher with StrictLogging {
def domatch[Mismatch](matcherDef: Map[String, Any], path: Seq[String], expected: Any, actual: Any,
mismatchFn: MismatchFactory[Mismatch]): List[Mismatch] = {
val matches: Boolean = Matchers.safeToString(actual).equals(expected)
logger.debug(s"comparing ${valueOf(actual)} to ${valueOf(expected)} at $path -> $matches")
if (matches) {
List[Mismatch]()
} else {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to equal ${valueOf(actual)}", path))
}
}
}
object RegexpMatcher extends Matcher with StrictLogging {
def domatch[Mismatch](matcherDef: Map[String, Any], path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]): List[Mismatch] = {
val regex = matcherDef.get("regex").get.toString
val matches: Boolean = Matchers.safeToString(actual).matches(regex)
logger.debug(s"comparing ${valueOf(actual)} with regexp $regex at $path -> $matches")
if (matches) {
List[Mismatch]()
} else {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to match '$regex'", path))
}
}
}
object TypeMatcher extends Matcher with StrictLogging {
def matchType[Mismatch](path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]) = {
logger.debug(s"comparing type of ${valueOf(actual)} to ${valueOf(expected)} at $path")
(actual, expected) match {
case (a: String, e: String) => List[Mismatch]()
case (a: Number, e: Number) => List[Mismatch]()
case (a: Boolean, e: Boolean) => List[Mismatch]()
case (a: List[_], e: List[_]) => List[Mismatch]()
case (a: Map[_, _], e: Map[_, _]) => List[Mismatch]()
case (a: Elem, e: Elem) if a.label == e.label => List[Mismatch]()
case (_, null) =>
if (actual == null) {
List[Mismatch]()
} else {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be null", path))
}
case default => List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be the same type as ${valueOf(expected)}", path))
}
}
def matchNumber[Mismatch](path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]) = {
logger.debug(s"comparing type of ${valueOf(actual)} to Number at $path")
(actual, expected) match {
case (a: Number, _) => List[Mismatch]()
case (_, null) =>
if (actual == null) {
List[Mismatch]()
} else {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be null", path))
}
case default => List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be a number", path))
}
}
def matchInteger[Mismatch](path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]) = {
logger.debug(s"comparing type of ${valueOf(actual)} to Integer at $path")
(actual, expected) match {
case (a: Integer, _) => List[Mismatch]()
case (a: Long, _) => List[Mismatch]()
case (a: BigInt, _) => List[Mismatch]()
case (_, null) =>
if (actual == null) {
List[Mismatch]()
} else {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be null", path))
}
case default => List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be an integer", path))
}
}
def matchDecimal[Mismatch](path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]) = {
logger.debug(s"comparing type of ${valueOf(actual)} to Real at $path")
(actual, expected) match {
case (_: Float, _) => List[Mismatch]()
case (_: Double, _) => List[Mismatch]()
case (_: BigDecimal, _) => List[Mismatch]()
case (_: java.math.BigDecimal, _) => List[Mismatch]()
case (_, null) =>
if (actual == null) {
List[Mismatch]()
} else {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be null", path))
}
case default => List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be a decimal number", path))
}
}
def matchTimestamp[Mismatch](path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]) = {
logger.debug(s"comparing ${valueOf(actual)} as Timestamp at $path")
try {
DateUtils.parseDate(Matchers.safeToString(actual), DateFormatUtils.ISO_DATETIME_TIME_ZONE_FORMAT.getPattern,
DateFormatUtils.ISO_DATETIME_FORMAT.getPattern, DateFormatUtils.SMTP_DATETIME_FORMAT.getPattern,
"yyyy-MM-dd HH:mm:ssZZ", "yyyy-MM-dd HH:mm:ss"
)
List[Mismatch]()
}
catch {
case e: java.text.ParseException =>
logger.warn(s"failed to parse timestamp value of ${valueOf(actual)}", e)
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to be a timestamp", path))
}
}
def matchArray[Mismatch](path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch], matcher: String, args: List[String]) = {
matcher match {
case "atleast" => actual match {
case v: List[Any] =>
if (v.asInstanceOf[List[Any]].size < args.head.toInt) List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to have at least ${args.head} elements", path))
else List[Mismatch]()
case _ => List(mismatchFn.create(expected, actual, s"Array matcher $matcher can only be applied to arrays", path))
}
case _ => List(mismatchFn.create(expected, actual, s"Array matcher $matcher is not defined", path))
}
}
def domatch[Mismatch](matcherDef: Map[String, Any], path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]): List[Mismatch] = {
if (matcherDef.contains("match")) {
matcherDef.get("match").get.toString match {
case "type" => matchType[Mismatch](path, expected, actual, mismatchFn)
case "number" => matchNumber[Mismatch](path, expected, actual, mismatchFn)
case "integer" => matchInteger[Mismatch](path, expected, actual, mismatchFn)
case "decimal" => matchDecimal[Mismatch](path, expected, actual, mismatchFn)
case "real" => matchDecimal[Mismatch](path, expected, actual, mismatchFn)
case "timestamp" => matchTimestamp[Mismatch](path, expected, actual, mismatchFn)
case _ => List(mismatchFn.create(expected, actual, "type matcher is mis-configured", path))
}
} else {
logger.warn("Matcher definition does not contain a 'match' element, defaulting to type matching")
matchType[Mismatch](path, expected, actual, mismatchFn)
}
}
}
object TimestampMatcher extends Matcher with StrictLogging {
def domatch[Mismatch](matcherDef: Map[String, Any], path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]): List[Mismatch] = {
val pattern = matcherDef.get("timestamp").get.toString
logger.debug(s"comparing ${valueOf(actual)} to timestamp pattern $pattern at $path")
try {
DateUtils.parseDate(Matchers.safeToString(actual), pattern)
List[Mismatch]()
} catch {
case e: ParseException => List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to match a timestamp of '$pattern': ${e.getMessage}", path))
}
}
}
object TimeMatcher extends Matcher with StrictLogging {
def domatch[Mismatch](matcherDef: Map[String, Any], path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]): List[Mismatch] = {
val pattern = matcherDef.get("time").get.toString
logger.debug(s"comparing ${valueOf(actual)} to time pattern $pattern at $path")
try {
DateUtils.parseDate(Matchers.safeToString(actual), pattern)
List[Mismatch]()
} catch {
case e: ParseException => List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to match a time of '$pattern': ${e.getMessage}", path))
}
}
}
object DateMatcher extends Matcher with StrictLogging {
def domatch[Mismatch](matcherDef: Map[String, Any], path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]): List[Mismatch] = {
val pattern = matcherDef.get("date").get.toString
logger.debug(s"comparing ${valueOf(actual)} to date pattern $pattern at $path")
try {
DateUtils.parseDate(Matchers.safeToString(actual), pattern)
List[Mismatch]()
} catch {
case e: ParseException => List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to match a date of '$pattern': ${e.getMessage}", path))
}
}
}
object MinimumMatcher extends Matcher with StrictLogging {
def domatch[Mismatch](matcherDef: Map[String, Any], path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]): List[Mismatch] = {
val value = matcherDef.get("min").get match {
case i: Int => i
case j: Integer => j.toInt
case o => o.toString.toInt
}
logger.debug(s"comparing ${valueOf(actual)} with minimum $value at $path")
actual match {
case v: List[Any] =>
if (v.size < value) {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to have minimum $value", path))
} else {
List()
}
case v: Elem =>
if (v.child.size < value) {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to have minimum $value", path))
} else {
List()
}
case _ => TypeMatcher.domatch[Mismatch](matcherDef, path, expected, actual, mismatchFn)
}
}
}
object MaximumMatcher extends Matcher with StrictLogging {
def domatch[Mismatch](matcherDef: Map[String, Any], path: Seq[String], expected: Any, actual: Any, mismatchFn: MismatchFactory[Mismatch]): List[Mismatch] = {
val value = matcherDef.get("max").get match {
case i: Int => i
case j: Integer => j.toInt
case o => o.toString.toInt
}
logger.debug(s"comparing ${valueOf(actual)} with maximum $value at $path")
actual match {
case v: List[Any] =>
if (v.size > value) {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to have maximum $value", path))
} else {
List()
}
case v: Elem =>
if (v.child.size > value) {
List(mismatchFn.create(expected, actual, s"Expected ${valueOf(actual)} to have minimum $value", path))
} else {
List()
}
case _ => TypeMatcher.domatch[Mismatch](matcherDef, path, expected, actual, mismatchFn)
}
}
}
| flaregames/pact-jvm | pact-jvm-matchers/src/main/scala/au/com/dius/pact/matchers/Matchers.scala | Scala | apache-2.0 | 15,272 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.chart.model
import java.awt.Color
import java.io.FileNotFoundException
import java.util.concurrent.ConcurrentHashMap
import com.netflix.atlas.chart.Colors
import com.netflix.atlas.core.util.Strings
case class Palette(name: String, colors: Int => Color) {
def withAlpha(alpha: Int): Palette = {
def f(i: Int): Color = {
val c = colors(i)
new Color(c.getRed, c.getGreen, c.getBlue, alpha)
}
Palette(name, f)
}
def withVisionType(vision: VisionType): Palette = {
def f(i: Int): Color = {
val c = colors(i)
vision.convert(c)
}
Palette(s"${vision.name}_$name", f)
}
/**
* Convert colors from another palette into grayscale. For information about the conversion
* see: http://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/.
*/
def asGrayscale: Palette = {
def f(i: Int): Color = {
val c = colors(i)
val v = (0.21 * c.getRed + 0.72 * c.getGreen + 0.07 * c.getBlue).toInt
new Color(v, v, v, c.getAlpha)
}
Palette(s"grayscale_$name", f)
}
def iterator: Iterator[Color] = new Iterator[Color] {
private var pos = -1
override def hasNext: Boolean = true
override def next(): Color = {
pos += 1
colors(pos)
}
}
}
object Palette {
val default = fromArray(
"default",
Array[Color](
Color.RED,
Color.GREEN,
Color.BLUE,
Color.MAGENTA,
Color.YELLOW,
Color.CYAN,
Color.PINK,
Color.ORANGE
)
)
private val palettes = new ConcurrentHashMap[String, Palette]
/**
* Creates a palette instance from a description string. The description can be an explicit
* list of colors or the name of a palette file in the classpath. An explicit list is specified
* as an ASL list of colors. For example:
*
* ```
* (,f00,00ff00,000000ff,)
* ```
*
* The color values will be parsed using `Strings.parseColor`.
* Otherwise the description will be used to find a palette file in the classpath named
* `palettes/{desc}_palette.txt` that has one color per line.
*/
def create(desc: String): Palette = {
// `colors:` prefix is deprecated, use list variant that is consistent between
// the url parameter and expression
if (desc.startsWith("colors:"))
fromArray("colors", parseColors(desc.substring("colors:".length)))
else if (desc.startsWith("("))
fromArray("colors", parseColors(desc))
else
fromResource(desc)
}
private def parseColors(colorsString: String): Array[Color] = {
colorsString
.split(",")
.map(_.trim)
.filterNot(s => s.isEmpty || s == "(" || s == ")")
.map(Strings.parseColor)
}
def fromArray(name: String, colors: Array[Color]): Palette = {
require(colors.nonEmpty, "palette must contain at least one color")
Palette(name, i => colors(math.abs(i) % colors.length))
}
/**
* Create a palette from a file in the classpath named `palettes/{name}_palette.txt`. The
* file should have one color per line in a format supported by `Strings.parseColor`.
*/
def fromResource(name: String): Palette = {
palettes.computeIfAbsent(name, loadFromResource)
}
private def loadFromResource(name: String): Palette = {
try {
val colors = Colors.load(s"palettes/${name}_palette.txt").toArray
Palette.fromArray(name, colors)
} catch {
case _: FileNotFoundException =>
throw new IllegalArgumentException(s"invalid palette name: '$name'")
}
}
def singleColor(c: Color): Palette = {
Palette("%08X".format(c.getRGB), _ => c)
}
def brighter(c: Color, n: Int): Palette = {
val colors = new Array[Color](n)
colors(0) = c
(1 until n).foreach { i =>
colors(i) = colors(i - 1).brighter()
}
Palette.fromArray("brighter_%08X".format(c.getRGB), colors)
}
def darker(c: Color, n: Int): Palette = {
val colors = new Array[Color](n)
colors(0) = c
(1 until n).foreach { i =>
colors(i) = colors(i - 1).darker()
}
Palette.fromArray("darker_%08X".format(c.getRGB), colors)
}
def main(args: Array[String]): Unit = {
val p = fromResource("armytage").asGrayscale
(0 until 26).foreach { i =>
println("%08X".format(p.colors(i).getRGB))
}
}
}
| brharrington/atlas | atlas-chart/src/main/scala/com/netflix/atlas/chart/model/Palette.scala | Scala | apache-2.0 | 4,933 |
/*
* Copyright 2010 Michael Fortin <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.webapp
import javax.servlet.{FilterChain, FilterConfig, ServletResponse, ServletRequest, Filter => SFilter}
import javax.servlet.http.{HttpServletResponse, HttpServletRequest}
import org.slf4j.LoggerFactory
import org.brzy.webapp.application.WebApp
/**
* Forwards only requests to brzy actions, lets all other requests pass through.
*
* @author Michael Fortin
*/
class BrzyFilter extends SFilter {
private[this] val log = LoggerFactory.getLogger(getClass)
def init(config: FilterConfig) {
log.debug("Init Filter: {}", config)
}
def doFilter(req: ServletRequest, res: ServletResponse, chain: FilterChain) {
try {
val request = req.asInstanceOf[HttpServletRequest]
val response = res.asInstanceOf[HttpServletResponse]
val webapp = req.getServletContext.getAttribute("application").asInstanceOf[WebApp]
// todo need to preserve http status
webapp.doFilterAction(request) match {
case ActOn(action) =>
log.debug("ActOn({})",action)
action.trans.doWith(webapp.threadLocalSessions, {()=> chain.doFilter(req,res)})
case ActOnAsync(action) =>
log.debug("ActOnAsync({})",action)
chain.doFilter(req,res)
case RedirectToSecure(path) =>
log.debug("RedirectToSecure({})",path)
response.sendRedirect(path)
case RedirectToAuthenticate(path,lastView) =>
log.debug("RedirectToAuthenticate({},{})",Array(path,lastView):_*)
request.getSession.setAttribute("last_view",lastView)
response.sendRedirect(path)
case DispatchTo(path) =>
log.debug("DispatchTo({})",path)
req.getRequestDispatcher(path).forward(req, res)
case NotAnAction =>
log.trace("NotAnAction({})",request.getRequestURI)
chain.doFilter(req,res)
case Forbidden =>
log.debug("Forbidden({})",request.getRequestURI)
response.sendError(403,"Forbidden, Not Authorized to view this resource")
}
}
catch {
case e:Throwable =>
log.error(e.getMessage,e)
throw e
}
}
/**
*
*/
def destroy() {
log.trace("Destroy")
}
}
| m410/brzy | src/main/scala/org/brzy/webapp/BrzyFilter.scala | Scala | apache-2.0 | 2,798 |
package gapt.expr.ty
object ->: {
/**
* Interprets the given type as an arrow type.
*
* @param ty The type to be interpreted as an arrow type.
* @return Returns Some( (t,r) ) if `ty` = t -> r, otherwise None is
* returned.
*/
def unapply( ty: Ty ): Option[( Ty, Ty )] =
ty match {
case TArr( a, b ) => Some( ( a, b ) )
case _ => None
}
}
| gapt/gapt | core/src/main/scala/gapt/expr/ty/->:.scala | Scala | gpl-3.0 | 393 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.scheduler
import org.mockito.ArgumentMatchers.{eq => meq}
import org.mockito.Mockito.{never, reset, times, verify, when}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, PrivateMethodTester}
import org.scalatest.concurrent.Eventually.{eventually, timeout}
import org.scalatest.mockito.MockitoSugar
import org.scalatest.time.SpanSugar._
import org.apache.spark.{ExecutorAllocationClient, SparkConf, SparkFunSuite}
import org.apache.spark.internal.config.{DYN_ALLOCATION_ENABLED, DYN_ALLOCATION_TESTING}
import org.apache.spark.streaming.{DummyInputDStream, Seconds, StreamingContext}
import org.apache.spark.util.{ManualClock, Utils}
class ExecutorAllocationManagerSuite extends SparkFunSuite
with BeforeAndAfter with BeforeAndAfterAll with MockitoSugar with PrivateMethodTester {
import ExecutorAllocationManager._
private val batchDurationMillis = 1000L
private var allocationClient: ExecutorAllocationClient = null
private var clock: StreamManualClock = null
before {
allocationClient = mock[ExecutorAllocationClient]
clock = new StreamManualClock()
}
test("basic functionality") {
// Test that adding batch processing time info to allocation manager
// causes executors to be requested and killed accordingly
// There is 1 receiver, and exec 1 has been allocated to it
withAllocationManager(numReceivers = 1) { case (receiverTracker, allocationManager) =>
when(receiverTracker.allocatedExecutors).thenReturn(Map(1 -> Some("1")))
/** Add data point for batch processing time and verify executor allocation */
def addBatchProcTimeAndVerifyAllocation(batchProcTimeMs: Double)(body: => Unit): Unit = {
// 2 active executors
reset(allocationClient)
when(allocationClient.getExecutorIds()).thenReturn(Seq("1", "2"))
addBatchProcTime(allocationManager, batchProcTimeMs.toLong)
val advancedTime = SCALING_INTERVAL_DEFAULT_SECS * 1000 + 1
val expectedWaitTime = clock.getTimeMillis() + advancedTime
clock.advance(advancedTime)
// Make sure ExecutorAllocationManager.manageAllocation is called
eventually(timeout(10 seconds)) {
assert(clock.isStreamWaitingAt(expectedWaitTime))
}
body
}
/** Verify that the expected number of total executor were requested */
def verifyTotalRequestedExecs(expectedRequestedTotalExecs: Option[Int]): Unit = {
if (expectedRequestedTotalExecs.nonEmpty) {
require(expectedRequestedTotalExecs.get > 0)
verify(allocationClient, times(1)).requestTotalExecutors(
meq(expectedRequestedTotalExecs.get), meq(0), meq(Map.empty))
} else {
verify(allocationClient, never).requestTotalExecutors(0, 0, Map.empty)
}
}
/** Verify that a particular executor was killed */
def verifyKilledExec(expectedKilledExec: Option[String]): Unit = {
if (expectedKilledExec.nonEmpty) {
verify(allocationClient, times(1)).killExecutor(meq(expectedKilledExec.get))
} else {
verify(allocationClient, never).killExecutor(null)
}
}
// Batch proc time = batch interval, should increase allocation by 1
addBatchProcTimeAndVerifyAllocation(batchDurationMillis) {
verifyTotalRequestedExecs(Some(3)) // one already allocated, increase allocation by 1
verifyKilledExec(None)
}
// Batch proc time = batch interval * 2, should increase allocation by 2
addBatchProcTimeAndVerifyAllocation(batchDurationMillis * 2) {
verifyTotalRequestedExecs(Some(4))
verifyKilledExec(None)
}
// Batch proc time slightly more than the scale up ratio, should increase allocation by 1
addBatchProcTimeAndVerifyAllocation(batchDurationMillis * SCALING_UP_RATIO_DEFAULT + 1) {
verifyTotalRequestedExecs(Some(3))
verifyKilledExec(None)
}
// Batch proc time slightly less than the scale up ratio, should not change allocation
addBatchProcTimeAndVerifyAllocation(batchDurationMillis * SCALING_UP_RATIO_DEFAULT - 1) {
verifyTotalRequestedExecs(None)
verifyKilledExec(None)
}
// Batch proc time slightly more than the scale down ratio, should not change allocation
addBatchProcTimeAndVerifyAllocation(batchDurationMillis * SCALING_DOWN_RATIO_DEFAULT + 1) {
verifyTotalRequestedExecs(None)
verifyKilledExec(None)
}
// Batch proc time slightly more than the scale down ratio, should not change allocation
addBatchProcTimeAndVerifyAllocation(batchDurationMillis * SCALING_DOWN_RATIO_DEFAULT - 1) {
verifyTotalRequestedExecs(None)
verifyKilledExec(Some("2"))
}
}
}
test("requestExecutors policy") {
/** Verify that the expected number of total executor were requested */
def verifyRequestedExecs(
numExecs: Int,
numNewExecs: Int,
expectedRequestedTotalExecs: Int)(
implicit allocationManager: ExecutorAllocationManager): Unit = {
reset(allocationClient)
when(allocationClient.getExecutorIds()).thenReturn((1 to numExecs).map(_.toString))
requestExecutors(allocationManager, numNewExecs)
verify(allocationClient, times(1)).requestTotalExecutors(
meq(expectedRequestedTotalExecs), meq(0), meq(Map.empty))
}
withAllocationManager(numReceivers = 1) { case (_, allocationManager) =>
implicit val am = allocationManager
intercept[IllegalArgumentException] {
verifyRequestedExecs(numExecs = 0, numNewExecs = 0, 0)
}
verifyRequestedExecs(numExecs = 0, numNewExecs = 1, expectedRequestedTotalExecs = 1)
verifyRequestedExecs(numExecs = 1, numNewExecs = 1, expectedRequestedTotalExecs = 2)
verifyRequestedExecs(numExecs = 2, numNewExecs = 2, expectedRequestedTotalExecs = 4)
}
withAllocationManager(numReceivers = 2) { case(_, allocationManager) =>
implicit val am = allocationManager
verifyRequestedExecs(numExecs = 0, numNewExecs = 1, expectedRequestedTotalExecs = 2)
verifyRequestedExecs(numExecs = 1, numNewExecs = 1, expectedRequestedTotalExecs = 2)
verifyRequestedExecs(numExecs = 2, numNewExecs = 2, expectedRequestedTotalExecs = 4)
}
withAllocationManager(
// Test min 2 executors
new SparkConf().set("spark.streaming.dynamicAllocation.minExecutors", "2")) {
case (_, allocationManager) =>
implicit val am = allocationManager
verifyRequestedExecs(numExecs = 0, numNewExecs = 1, expectedRequestedTotalExecs = 2)
verifyRequestedExecs(numExecs = 0, numNewExecs = 3, expectedRequestedTotalExecs = 3)
verifyRequestedExecs(numExecs = 1, numNewExecs = 1, expectedRequestedTotalExecs = 2)
verifyRequestedExecs(numExecs = 1, numNewExecs = 2, expectedRequestedTotalExecs = 3)
verifyRequestedExecs(numExecs = 2, numNewExecs = 1, expectedRequestedTotalExecs = 3)
verifyRequestedExecs(numExecs = 2, numNewExecs = 2, expectedRequestedTotalExecs = 4)
}
withAllocationManager(
// Test with max 2 executors
new SparkConf().set("spark.streaming.dynamicAllocation.maxExecutors", "2")) {
case (_, allocationManager) =>
implicit val am = allocationManager
verifyRequestedExecs(numExecs = 0, numNewExecs = 1, expectedRequestedTotalExecs = 1)
verifyRequestedExecs(numExecs = 0, numNewExecs = 3, expectedRequestedTotalExecs = 2)
verifyRequestedExecs(numExecs = 1, numNewExecs = 2, expectedRequestedTotalExecs = 2)
verifyRequestedExecs(numExecs = 2, numNewExecs = 1, expectedRequestedTotalExecs = 2)
verifyRequestedExecs(numExecs = 2, numNewExecs = 2, expectedRequestedTotalExecs = 2)
}
}
test("killExecutor policy") {
/**
* Verify that a particular executor was killed, given active executors and executors
* allocated to receivers.
*/
def verifyKilledExec(
execIds: Seq[String],
receiverExecIds: Map[Int, Option[String]],
expectedKilledExec: Option[String])(
implicit x: (ReceiverTracker, ExecutorAllocationManager)): Unit = {
val (receiverTracker, allocationManager) = x
reset(allocationClient)
when(allocationClient.getExecutorIds()).thenReturn(execIds)
when(receiverTracker.allocatedExecutors).thenReturn(receiverExecIds)
killExecutor(allocationManager)
if (expectedKilledExec.nonEmpty) {
verify(allocationClient, times(1)).killExecutor(meq(expectedKilledExec.get))
} else {
verify(allocationClient, never).killExecutor(null)
}
}
withAllocationManager() { case (receiverTracker, allocationManager) =>
implicit val rcvrTrackerAndExecAllocMgr = (receiverTracker, allocationManager)
verifyKilledExec(Nil, Map.empty, None)
verifyKilledExec(Seq("1", "2"), Map.empty, None)
verifyKilledExec(Seq("1"), Map(1 -> Some("1")), None)
verifyKilledExec(Seq("1", "2"), Map(1 -> Some("1")), Some("2"))
verifyKilledExec(Seq("1", "2"), Map(1 -> Some("1"), 2 -> Some("2")), None)
}
withAllocationManager(
new SparkConf().set("spark.streaming.dynamicAllocation.minExecutors", "2")) {
case (receiverTracker, allocationManager) =>
implicit val rcvrTrackerAndExecAllocMgr = (receiverTracker, allocationManager)
verifyKilledExec(Seq("1", "2"), Map.empty, None)
verifyKilledExec(Seq("1", "2", "3"), Map(1 -> Some("1"), 2 -> Some("2")), Some("3"))
}
}
test("parameter validation") {
def validateParams(
numReceivers: Int = 1,
scalingIntervalSecs: Option[Int] = None,
scalingUpRatio: Option[Double] = None,
scalingDownRatio: Option[Double] = None,
minExecs: Option[Int] = None,
maxExecs: Option[Int] = None): Unit = {
require(numReceivers > 0)
val receiverTracker = mock[ReceiverTracker]
when(receiverTracker.numReceivers()).thenReturn(numReceivers)
val conf = new SparkConf()
if (scalingIntervalSecs.nonEmpty) {
conf.set(
"spark.streaming.dynamicAllocation.scalingInterval",
s"${scalingIntervalSecs.get}s")
}
if (scalingUpRatio.nonEmpty) {
conf.set("spark.streaming.dynamicAllocation.scalingUpRatio", scalingUpRatio.get.toString)
}
if (scalingDownRatio.nonEmpty) {
conf.set(
"spark.streaming.dynamicAllocation.scalingDownRatio",
scalingDownRatio.get.toString)
}
if (minExecs.nonEmpty) {
conf.set("spark.streaming.dynamicAllocation.minExecutors", minExecs.get.toString)
}
if (maxExecs.nonEmpty) {
conf.set("spark.streaming.dynamicAllocation.maxExecutors", maxExecs.get.toString)
}
new ExecutorAllocationManager(
allocationClient, receiverTracker, conf, batchDurationMillis, clock)
}
validateParams(numReceivers = 1)
validateParams(numReceivers = 2, minExecs = Some(1))
validateParams(numReceivers = 2, minExecs = Some(3))
validateParams(numReceivers = 2, maxExecs = Some(3))
validateParams(numReceivers = 2, maxExecs = Some(1))
validateParams(minExecs = Some(3), maxExecs = Some(3))
validateParams(scalingIntervalSecs = Some(1))
validateParams(scalingUpRatio = Some(1.1))
validateParams(scalingDownRatio = Some(0.1))
validateParams(scalingUpRatio = Some(1.1), scalingDownRatio = Some(0.1))
intercept[IllegalArgumentException] {
validateParams(minExecs = Some(0))
}
intercept[IllegalArgumentException] {
validateParams(minExecs = Some(-1))
}
intercept[IllegalArgumentException] {
validateParams(maxExecs = Some(0))
}
intercept[IllegalArgumentException] {
validateParams(maxExecs = Some(-1))
}
intercept[IllegalArgumentException] {
validateParams(minExecs = Some(4), maxExecs = Some(3))
}
intercept[IllegalArgumentException] {
validateParams(scalingIntervalSecs = Some(-1))
}
intercept[IllegalArgumentException] {
validateParams(scalingIntervalSecs = Some(0))
}
intercept[IllegalArgumentException] {
validateParams(scalingUpRatio = Some(-0.1))
}
intercept[IllegalArgumentException] {
validateParams(scalingUpRatio = Some(0))
}
intercept[IllegalArgumentException] {
validateParams(scalingDownRatio = Some(-0.1))
}
intercept[IllegalArgumentException] {
validateParams(scalingDownRatio = Some(0))
}
intercept[IllegalArgumentException] {
validateParams(scalingUpRatio = Some(0.5), scalingDownRatio = Some(0.5))
}
intercept[IllegalArgumentException] {
validateParams(scalingUpRatio = Some(0.3), scalingDownRatio = Some(0.5))
}
}
test("enabling and disabling") {
withStreamingContext(new SparkConf()) { ssc =>
ssc.start()
assert(getExecutorAllocationManager(ssc).isEmpty)
}
withStreamingContext(
new SparkConf().set("spark.streaming.dynamicAllocation.enabled", "true")) { ssc =>
ssc.start()
assert(getExecutorAllocationManager(ssc).nonEmpty)
}
val confWithBothDynamicAllocationEnabled = new SparkConf()
.set("spark.streaming.dynamicAllocation.enabled", "true")
.set(DYN_ALLOCATION_ENABLED, true)
.set(DYN_ALLOCATION_TESTING, true)
require(Utils.isDynamicAllocationEnabled(confWithBothDynamicAllocationEnabled))
withStreamingContext(confWithBothDynamicAllocationEnabled) { ssc =>
intercept[IllegalArgumentException] {
ssc.start()
}
}
}
private def withAllocationManager(
conf: SparkConf = new SparkConf,
numReceivers: Int = 1
)(body: (ReceiverTracker, ExecutorAllocationManager) => Unit): Unit = {
val receiverTracker = mock[ReceiverTracker]
when(receiverTracker.numReceivers()).thenReturn(numReceivers)
val manager = new ExecutorAllocationManager(
allocationClient, receiverTracker, conf, batchDurationMillis, clock)
try {
manager.start()
body(receiverTracker, manager)
} finally {
manager.stop()
}
}
private val _addBatchProcTime = PrivateMethod[Unit]('addBatchProcTime)
private val _requestExecutors = PrivateMethod[Unit]('requestExecutors)
private val _killExecutor = PrivateMethod[Unit]('killExecutor)
private val _executorAllocationManager =
PrivateMethod[Option[ExecutorAllocationManager]]('executorAllocationManager)
private def addBatchProcTime(manager: ExecutorAllocationManager, timeMs: Long): Unit = {
manager invokePrivate _addBatchProcTime(timeMs)
}
private def requestExecutors(manager: ExecutorAllocationManager, newExecs: Int): Unit = {
manager invokePrivate _requestExecutors(newExecs)
}
private def killExecutor(manager: ExecutorAllocationManager): Unit = {
manager invokePrivate _killExecutor()
}
private def getExecutorAllocationManager(
ssc: StreamingContext): Option[ExecutorAllocationManager] = {
ssc.scheduler invokePrivate _executorAllocationManager()
}
private def withStreamingContext(conf: SparkConf)(body: StreamingContext => Unit): Unit = {
conf.setMaster("myDummyLocalExternalClusterManager")
.setAppName(this.getClass.getSimpleName)
.set("spark.streaming.dynamicAllocation.testing", "true") // to test dynamic allocation
var ssc: StreamingContext = null
try {
ssc = new StreamingContext(conf, Seconds(1))
new DummyInputDStream(ssc).foreachRDD(_ => { })
body(ssc)
} finally {
if (ssc != null) ssc.stop()
}
}
}
/**
* A special manual clock that provide `isStreamWaitingAt` to allow the user to check if the clock
* is blocking.
*/
class StreamManualClock(time: Long = 0L) extends ManualClock(time) with Serializable {
private var waitStartTime: Option[Long] = None
override def waitTillTime(targetTime: Long): Long = synchronized {
try {
waitStartTime = Some(getTimeMillis())
super.waitTillTime(targetTime)
} finally {
waitStartTime = None
}
}
/**
* Returns if the clock is blocking and the time it started to block is the parameter `time`.
*/
def isStreamWaitingAt(time: Long): Boolean = synchronized {
waitStartTime == Some(time)
}
}
| yanboliang/spark | streaming/src/test/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManagerSuite.scala | Scala | apache-2.0 | 17,108 |
package io.mth.unfiltered.cors
import unfiltered.request.{Connection => _, _}
import unfiltered.response._
import unfiltered.kit._
import unfiltered.Cycle
case class CorsConfig(
validateOrigin: String => Boolean,
validateMethod: String => Boolean,
validateHeaders: List[String] => Boolean,
allowCredentials: Boolean,
maxAge: Option[Int],
exposeHeaders: List[String]
) {
def validate(origin: String, method: String, headers: List[String]) =
validateOrigin(origin) && validateMethod(method) && validateHeaders(headers)
}
object CorsConfig {
def origins(origins: List[String]) = CorsConfig(
origins.contains(_),
(_: String) => true,
(_: List[String]) => true,
true,
Some(120),
Nil
)
}
| rgladwell/unfiltered-cors | src/main/scala/io/mth/unfiltered/cors/CorsConfig.scala | Scala | bsd-3-clause | 731 |
package helpers
import play.api.Play
import scala.collection.immutable
import models._
import java.sql.Connection
import play.api.libs.concurrent.Akka
import play.api.Play.current
import scala.concurrent.duration._
import play.api.i18n.{Messages, MessagesProvider}
import play.api.libs.mailer._
import akka.actor._
import javax.inject._
import play.api.libs.mailer._
import scala.concurrent.ExecutionContext
import play.api.Configuration
@Singleton
class ItemInquiryMail @Inject() (
system: ActorSystem, mailerClient: MailerClient,
siteItemRepo: SiteItemRepo,
orderNotificationRepo: OrderNotificationRepo,
conf: Configuration,
implicit val ec: ExecutionContext
) extends HasLogger {
val disableMailer = conf.getOptional[Boolean]("disable.mailer").getOrElse(false)
val from = conf.get[String]("user.registration.email.from")
def send(
user: StoreUser, inq: ItemInquiry, fields: immutable.Map[Symbol, String], locale: LocaleInfo
)(
implicit conn: Connection, mp: MessagesProvider
) {
val itemInfo: (Site, ItemName) = siteItemRepo.getWithSiteAndItem(inq.siteId, inq.itemId, locale).get
sendToBuyer(user, locale, itemInfo, inq, fields)
sendToStoreOwner(user, locale, itemInfo, inq, fields)
sendToAdmin(user, locale, itemInfo, inq, fields)
}
def sendToBuyer(
user: StoreUser, locale: LocaleInfo, itemInfo: (Site, ItemName), inq: ItemInquiry, fields: immutable.Map[Symbol, String]
)(
implicit conn: Connection, mp: MessagesProvider
) {
logger.info("Sending item inquiry for buyer sent to " + inq.email)
val body = inq.inquiryType match {
case ItemInquiryType.QUERY =>
views.html.mail.itemInquiryForBuyer(user, itemInfo, inq, fields).toString
case ItemInquiryType.RESERVATION =>
views.html.mail.itemReservationForBuyer(user, itemInfo, inq, fields).toString
case t =>
throw new Error("Unknown inquiry type " + t)
}
if (! disableMailer) {
system.scheduler.scheduleOnce(0.microsecond) {
val mail = Email(
subject = Messages(
inq.inquiryType match {
case ItemInquiryType.QUERY => "mail.item.inquiry.buyer.subject"
case ItemInquiryType.RESERVATION => "mail.item.reservation.buyer.subject"
}
).format(inq.id.get.id),
to = Seq(inq.email),
from = from,
bodyText = Some(body)
)
mailerClient.send(mail)
logger.info("Item inquiry notification for buyer sent to " + inq.email)
}
}
}
def sendToStoreOwner(
user: StoreUser, locale: LocaleInfo, itemInfo: (Site, ItemName), inq: ItemInquiry, fields: immutable.Map[Symbol, String]
)(
implicit conn: Connection, mp: MessagesProvider
) {
orderNotificationRepo.listBySite(inq.siteId).foreach { owner =>
logger.info("Sending item inquiry to site owner " + itemInfo._1 + " sent to " + inq.email)
val body = inq.inquiryType match {
case ItemInquiryType.QUERY =>
views.html.mail.itemInquiryForSiteOwner(user, itemInfo, inq, fields).toString
case ItemInquiryType.RESERVATION =>
views.html.mail.itemReservationForSiteOwner(user, itemInfo, inq, fields).toString
case t =>
throw new Error("Unknown inquiry type " + t)
}
if (! disableMailer) {
system.scheduler.scheduleOnce(0.microsecond) {
val mail = Email(
subject = Messages(
inq.inquiryType match {
case ItemInquiryType.QUERY => "mail.item.inquiry.site.owner.subject"
case ItemInquiryType.RESERVATION => "mail.item.reservation.site.owner.subject"
}
).format(inq.id.get.id),
to = Seq(owner.email),
from = from,
bodyText = Some(body)
)
mailerClient.send(mail)
logger.info("Item inquiry notification for site owner " + itemInfo._1 + " sent to " + inq.email)
}
}
}
}
def sendToAdmin(
user: StoreUser, locale: LocaleInfo, itemInfo: (Site, ItemName), inq: ItemInquiry, fields: immutable.Map[Symbol, String]
)(
implicit conn: Connection, mp: MessagesProvider
) {
if (! disableMailer) {
orderNotificationRepo.listAdmin.foreach { admin =>
logger.info("Sending item inquiry for admin to " + admin.email)
val body = inq.inquiryType match {
case ItemInquiryType.QUERY =>
views.html.mail.itemInquiryForAdmin(user, itemInfo, inq, fields).toString
case ItemInquiryType.RESERVATION =>
views.html.mail.itemReservationForAdmin(user, itemInfo, inq, fields).toString
case t =>
throw new Error("Unknown inquiry type " + t)
}
system.scheduler.scheduleOnce(0.microsecond) {
val mail = Email(
subject = Messages(
inq.inquiryType match {
case ItemInquiryType.QUERY => "mail.item.inquiry.site.owner.subject"
case ItemInquiryType.RESERVATION => "mail.item.reservation.site.owner.subject"
}
).format(inq.id.get.id),
to = Seq(admin.email),
from = from,
bodyText = Some(body)
)
mailerClient.send(mail)
logger.info("Item inquiry notification for admin to " + admin.email)
}
}
}
else {
logger.info("Item inquiry notification mail is not sent since mailer is disabled.")
}
}
}
| ruimo/store2 | app/helpers/ItemInquiryMail.scala | Scala | apache-2.0 | 5,491 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import java.util.Locale
import scala.language.existentials
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.PredicateHelper
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
class FilteredScanSource extends RelationProvider {
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
SimpleFilteredScan(parameters("from").toInt, parameters("to").toInt)(sqlContext.sparkSession)
}
}
case class SimpleFilteredScan(from: Int, to: Int)(@transient val sparkSession: SparkSession)
extends BaseRelation
with PrunedFilteredScan {
override def sqlContext: SQLContext = sparkSession.sqlContext
override def schema: StructType =
StructType(
StructField("a", IntegerType, nullable = false) ::
StructField("b", IntegerType, nullable = false) ::
StructField("c", StringType, nullable = false) :: Nil)
override def unhandledFilters(filters: Array[Filter]): Array[Filter] = {
def unhandled(filter: Filter): Boolean = {
filter match {
case EqualTo(col, v) => col == "b"
case EqualNullSafe(col, v) => col == "b"
case LessThan(col, v: Int) => col == "b"
case LessThanOrEqual(col, v: Int) => col == "b"
case GreaterThan(col, v: Int) => col == "b"
case GreaterThanOrEqual(col, v: Int) => col == "b"
case In(col, values) => col == "b"
case IsNull(col) => col == "b"
case IsNotNull(col) => col == "b"
case Not(pred) => unhandled(pred)
case And(left, right) => unhandled(left) || unhandled(right)
case Or(left, right) => unhandled(left) || unhandled(right)
case _ => false
}
}
filters.filter(unhandled)
}
override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
val rowBuilders = requiredColumns.map {
case "a" => (i: Int) => Seq(i)
case "b" => (i: Int) => Seq(i * 2)
case "c" => (i: Int) =>
val c = (i - 1 + 'a').toChar.toString
Seq(c * 5 + c.toUpperCase(Locale.ROOT) * 5)
}
FiltersPushed.list = filters
ColumnsRequired.set = requiredColumns.toSet
// Predicate test on integer column
def translateFilterOnA(filter: Filter): Int => Boolean = filter match {
case EqualTo("a", v) => (a: Int) => a == v
case EqualNullSafe("a", v) => (a: Int) => a == v
case LessThan("a", v: Int) => (a: Int) => a < v
case LessThanOrEqual("a", v: Int) => (a: Int) => a <= v
case GreaterThan("a", v: Int) => (a: Int) => a > v
case GreaterThanOrEqual("a", v: Int) => (a: Int) => a >= v
case In("a", values) => (a: Int) => values.map(_.asInstanceOf[Int]).toSet.contains(a)
case IsNull("a") => (a: Int) => false // Int can't be null
case IsNotNull("a") => (a: Int) => true
case Not(pred) => (a: Int) => !translateFilterOnA(pred)(a)
case And(left, right) => (a: Int) =>
translateFilterOnA(left)(a) && translateFilterOnA(right)(a)
case Or(left, right) => (a: Int) =>
translateFilterOnA(left)(a) || translateFilterOnA(right)(a)
case _ => (a: Int) => true
}
// Predicate test on string column
def translateFilterOnC(filter: Filter): String => Boolean = filter match {
case StringStartsWith("c", v) => _.startsWith(v)
case StringEndsWith("c", v) => _.endsWith(v)
case StringContains("c", v) => _.contains(v)
case EqualTo("c", v: String) => _.equals(v)
case EqualTo("c", v: UTF8String) => sys.error("UTF8String should not appear in filters")
case In("c", values) => (s: String) => values.map(_.asInstanceOf[String]).toSet.contains(s)
case _ => (c: String) => true
}
def eval(a: Int) = {
val c = (a - 1 + 'a').toChar.toString * 5 +
(a - 1 + 'a').toChar.toString.toUpperCase(Locale.ROOT) * 5
filters.forall(translateFilterOnA(_)(a)) && filters.forall(translateFilterOnC(_)(c))
}
sparkSession.sparkContext.parallelize(from to to).filter(eval).map(i =>
Row.fromSeq(rowBuilders.map(_(i)).reduceOption(_ ++ _).getOrElse(Seq.empty)))
}
}
// A hack for better error messages when filter pushdown fails.
object FiltersPushed {
var list: Seq[Filter] = Nil
}
// Used together with `SimpleFilteredScan` to check pushed columns.
object ColumnsRequired {
var set: Set[String] = Set.empty
}
class FilteredScanSuite extends DataSourceTest with SharedSQLContext with PredicateHelper {
protected override lazy val sql = spark.sql _
override def beforeAll(): Unit = {
super.beforeAll()
sql(
"""
|CREATE TEMPORARY VIEW oneToTenFiltered
|USING org.apache.spark.sql.sources.FilteredScanSource
|OPTIONS (
| from '1',
| to '10'
|)
""".stripMargin)
}
sqlTest(
"SELECT * FROM oneToTenFiltered",
(1 to 10).map(i => Row(i, i * 2, (i - 1 + 'a').toChar.toString * 5
+ (i - 1 + 'a').toChar.toString.toUpperCase(Locale.ROOT) * 5)).toSeq)
sqlTest(
"SELECT a, b FROM oneToTenFiltered",
(1 to 10).map(i => Row(i, i * 2)).toSeq)
sqlTest(
"SELECT b, a FROM oneToTenFiltered",
(1 to 10).map(i => Row(i * 2, i)).toSeq)
sqlTest(
"SELECT a FROM oneToTenFiltered",
(1 to 10).map(i => Row(i)).toSeq)
sqlTest(
"SELECT b FROM oneToTenFiltered",
(1 to 10).map(i => Row(i * 2)).toSeq)
sqlTest(
"SELECT a * 2 FROM oneToTenFiltered",
(1 to 10).map(i => Row(i * 2)).toSeq)
sqlTest(
"SELECT A AS b FROM oneToTenFiltered",
(1 to 10).map(i => Row(i)).toSeq)
sqlTest(
"SELECT x.b, y.a FROM oneToTenFiltered x JOIN oneToTenFiltered y ON x.a = y.b",
(1 to 5).map(i => Row(i * 4, i)).toSeq)
sqlTest(
"SELECT x.a, y.b FROM oneToTenFiltered x JOIN oneToTenFiltered y ON x.a = y.b",
(2 to 10 by 2).map(i => Row(i, i)).toSeq)
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE a = 1",
Seq(1).map(i => Row(i, i * 2)))
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE a IN (1,3,5)",
Seq(1, 3, 5).map(i => Row(i, i * 2)))
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE A = 1",
Seq(1).map(i => Row(i, i * 2)))
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE b = 2",
Seq(1).map(i => Row(i, i * 2)))
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE a IS NULL",
Seq.empty[Row])
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE a IS NOT NULL",
(1 to 10).map(i => Row(i, i * 2)).toSeq)
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE a < 5 AND a > 1",
(2 to 4).map(i => Row(i, i * 2)).toSeq)
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE a < 3 OR a > 8",
Seq(1, 2, 9, 10).map(i => Row(i, i * 2)))
sqlTest(
"SELECT a, b FROM oneToTenFiltered WHERE NOT (a < 6)",
(6 to 10).map(i => Row(i, i * 2)).toSeq)
sqlTest(
"SELECT a, b, c FROM oneToTenFiltered WHERE c like 'c%'",
Seq(Row(3, 3 * 2, "c" * 5 + "C" * 5)))
sqlTest(
"SELECT a, b, c FROM oneToTenFiltered WHERE c like '%D'",
Seq(Row(4, 4 * 2, "d" * 5 + "D" * 5)))
sqlTest(
"SELECT a, b, c FROM oneToTenFiltered WHERE c like '%eE%'",
Seq(Row(5, 5 * 2, "e" * 5 + "E" * 5)))
testPushDown("SELECT * FROM oneToTenFiltered WHERE A = 1", 1, Set("a", "b", "c"))
testPushDown("SELECT a FROM oneToTenFiltered WHERE A = 1", 1, Set("a"))
testPushDown("SELECT b FROM oneToTenFiltered WHERE A = 1", 1, Set("b"))
testPushDown("SELECT a, b FROM oneToTenFiltered WHERE A = 1", 1, Set("a", "b"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a = 1", 1, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE 1 = a", 1, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a > 1", 9, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a >= 2", 9, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE 1 < a", 9, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE 2 <= a", 9, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE 1 > a", 0, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE 2 >= a", 2, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a < 1", 0, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a <= 2", 2, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a > 1 AND a < 10", 8, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a IN (1,3,5)", 3, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a = 20", 0, Set("a", "b", "c"))
testPushDown(
"SELECT * FROM oneToTenFiltered WHERE b = 1",
10,
Set("a", "b", "c"),
Set(EqualTo("b", 1)))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a < 5 AND a > 1", 3, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE a < 3 OR a > 8", 4, Set("a", "b", "c"))
testPushDown("SELECT * FROM oneToTenFiltered WHERE NOT (a < 6)", 5, Set("a", "b", "c"))
testPushDown("SELECT a, b, c FROM oneToTenFiltered WHERE c like 'c%'", 1, Set("a", "b", "c"))
testPushDown("SELECT a, b, c FROM oneToTenFiltered WHERE c like 'C%'", 0, Set("a", "b", "c"))
testPushDown("SELECT a, b, c FROM oneToTenFiltered WHERE c like '%D'", 1, Set("a", "b", "c"))
testPushDown("SELECT a, b, c FROM oneToTenFiltered WHERE c like '%d'", 0, Set("a", "b", "c"))
testPushDown("SELECT a, b, c FROM oneToTenFiltered WHERE c like '%eE%'", 1, Set("a", "b", "c"))
testPushDown("SELECT a, b, c FROM oneToTenFiltered WHERE c like '%Ee%'", 0, Set("a", "b", "c"))
testPushDown("SELECT c FROM oneToTenFiltered WHERE c = 'aaaaaAAAAA'", 1, Set("c"))
testPushDown("SELECT c FROM oneToTenFiltered WHERE c IN ('aaaaaAAAAA', 'foo')", 1, Set("c"))
// Filters referencing multiple columns are not convertible, all referenced columns must be
// required.
testPushDown("SELECT c FROM oneToTenFiltered WHERE A + b > 9", 10, Set("a", "b", "c"))
// A query with an inconvertible filter, an unhandled filter, and a handled filter.
testPushDown(
"""SELECT a
| FROM oneToTenFiltered
| WHERE a + b > 9
| AND b < 16
| AND c IN ('bbbbbBBBBB', 'cccccCCCCC', 'dddddDDDDD', 'foo')
""".stripMargin.split("\\n").map(_.trim).mkString(" "),
3,
Set("a", "b"),
Set(LessThan("b", 16)))
def testPushDown(
sqlString: String,
expectedCount: Int,
requiredColumnNames: Set[String]): Unit = {
testPushDown(sqlString, expectedCount, requiredColumnNames, Set.empty[Filter])
}
def testPushDown(
sqlString: String,
expectedCount: Int,
requiredColumnNames: Set[String],
expectedUnhandledFilters: Set[Filter]): Unit = {
test(s"PushDown Returns $expectedCount: $sqlString") {
// These tests check a particular plan, disable whole stage codegen.
spark.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, false)
try {
val queryExecution = sql(sqlString).queryExecution
val rawPlan = queryExecution.executedPlan.collect {
case p: execution.DataSourceScanExec => p
} match {
case Seq(p) => p
case _ => fail(s"More than one PhysicalRDD found\\n$queryExecution")
}
val rawCount = rawPlan.execute().count()
assert(ColumnsRequired.set === requiredColumnNames)
val table = spark.table("oneToTenFiltered")
val relation = table.queryExecution.logical.collectFirst {
case LogicalRelation(r, _, _) => r
}.get
assert(
relation.unhandledFilters(FiltersPushed.list.toArray).toSet === expectedUnhandledFilters)
if (rawCount != expectedCount) {
fail(
s"Wrong # of results for pushed filter. Got $rawCount, Expected $expectedCount\\n" +
s"Filters pushed: ${FiltersPushed.list.mkString(",")}\\n" +
queryExecution)
}
} finally {
spark.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key,
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.defaultValue.get)
}
}
}
}
| mike0sv/spark | sql/core/src/test/scala/org/apache/spark/sql/sources/FilteredScanSuite.scala | Scala | apache-2.0 | 13,139 |
package djinni
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
class ObjcMarshal(spec: Spec) extends Marshal(spec) {
override def typename(tm: MExpr): String = {
val (name, _) = toObjcType(tm)
name
}
def typename(name: String, ty: TypeDef): String = idObjc.ty(name)
override def fqTypename(tm: MExpr): String = typename(tm)
def fqTypename(name: String, ty: TypeDef): String = typename(name, ty)
def nullability(tm: MExpr): Option[String] = {
tm.base match {
case MOptional => Some("nullable")
case MPrimitive(_,_,_,_,_,_,_,_) => None
case d: MDef => d.defType match {
case DEnum => None
case DInterface => Some("nullable")
case DRecord => Some("nonnull")
}
case _ => Some("nonnull")
}
}
override def paramType(tm: MExpr): String = {
nullability(tm).fold("")(_ + " ") + toObjcParamType(tm)
}
override def fqParamType(tm: MExpr): String = paramType(tm)
override def returnType(ret: Option[TypeRef]): String = ret.fold("void")((t: TypeRef) => nullability(t.resolved).fold("")(_ + " ") + toObjcParamType(t.resolved))
override def fqReturnType(ret: Option[TypeRef]): String = returnType(ret)
override def fieldType(tm: MExpr): String = toObjcParamType(tm)
override def fqFieldType(tm: MExpr): String = toObjcParamType(tm)
override def toCpp(tm: MExpr, expr: String): String = throw new AssertionError("direct objc to cpp conversion not possible")
override def fromCpp(tm: MExpr, expr: String): String = throw new AssertionError("direct cpp to objc conversion not possible")
def references(m: Meta, exclude: String = ""): Seq[SymbolReference] = m match {
case o: MOpaque =>
List(ImportRef("<Foundation/Foundation.h>"))
case d: MDef => d.defType match {
case DEnum =>
List(ImportRef(q(spec.objcIncludePrefix + headerName(d.name))))
case DInterface =>
val ext = d.body.asInstanceOf[Interface].ext
if (ext.cpp) {
List(ImportRef("<Foundation/Foundation.h>"), DeclRef(s"@class ${typename(d.name, d.body)};", None))
}
else if (ext.objc) {
List(ImportRef("<Foundation/Foundation.h>"), DeclRef(s"@protocol ${typename(d.name, d.body)};", None))
}
else {
List()
}
case DRecord =>
val r = d.body.asInstanceOf[Record]
val prefix = if (r.ext.objc) "../" else ""
List(ImportRef(q(spec.objcIncludePrefix + prefix + headerName(d.name))))
}
case p: MParam => List()
}
def headerName(ident: String): String = idObjc.ty(ident) + "." + spec.objcHeaderExt
// Return value: (Type_Name, Is_Class_Or_Not)
def toObjcType(ty: TypeRef): (String, Boolean) = toObjcType(ty.resolved, false)
def toObjcType(ty: TypeRef, needRef: Boolean): (String, Boolean) = toObjcType(ty.resolved, needRef)
def toObjcType(tm: MExpr): (String, Boolean) = toObjcType(tm, false)
def toObjcType(tm: MExpr, needRef: Boolean): (String, Boolean) = {
def f(tm: MExpr, needRef: Boolean): (String, Boolean) = {
tm.base match {
case MOptional =>
// We use "nil" for the empty optional.
assert(tm.args.size == 1)
val arg = tm.args.head
arg.base match {
case MOptional => throw new AssertionError("nested optional?")
case m => f(arg, true)
}
case o =>
val base = o match {
case p: MPrimitive => if (needRef) (p.objcBoxed, true) else (p.objcName, false)
case MString => ("NSString", true)
case MDate => ("NSDate", true)
case MBinary => ("NSData", true)
case MOptional => throw new AssertionError("optional should have been special cased")
case MList => ("NSArray", true)
case MSet => ("NSSet", true)
case MMap => ("NSDictionary", true)
case d: MDef => d.defType match {
case DEnum => if (needRef) ("NSNumber", true) else (idObjc.ty(d.name), false)
case DRecord => (idObjc.ty(d.name), true)
case DInterface =>
val ext = d.body.asInstanceOf[Interface].ext
(idObjc.ty(d.name), true)
}
case p: MParam => throw new AssertionError("Parameter should not happen at Obj-C top level")
}
base
}
}
f(tm, needRef)
}
def toObjcParamType(tm: MExpr): String = {
val (name, needRef) = toObjcType(tm)
val param = name + (if(needRef) " *" else "")
tm.base match {
case d: MDef => d.body match {
case i: Interface => if(i.ext.objc) s"id<$name>" else param
case _ => param
}
case MOptional => tm.args.head.base match {
case d: MDef => d.body match {
case i: Interface => if(i.ext.objc) s"id<$name>" else param
case _ => param
}
case _ => param
}
case _ => param
}
}
}
| mrdomino/djinni | src/source/ObjcMarshal.scala | Scala | apache-2.0 | 4,963 |
package com.aristocrat.mandrill.requests.Webhooks
import com.aristocrat.mandrill.requests.MandrillRequest
case class Add(
key: String,
url: String,
description: String,
// @OneOf(Events)
events: Seq[String] = Seq()) extends MandrillRequest
| aristocratic/mandrill | src/main/scala/com/aristocrat/mandrill/requests/Webhooks/Add.scala | Scala | mit | 262 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.commons.utils
import org.scalatest.matchers.{BeMatcher, MatchResult}
import io.deepsense.commons.{StandardSpec, UnitTestSupport}
class VersionSpec extends StandardSpec with UnitTestSupport {
"Version" should {
"be comparable with other version" in {
val v = Version(1, 2, 3, "")
v should be (compatibleWith(v))
val versionPatchLower = v.copy(fix = v.fix - 1)
val versionPatchHigher = v.copy(fix = v.fix + 1)
v should be (compatibleWith(versionPatchLower))
v should be (compatibleWith(versionPatchHigher))
val versionMinorLower = v.copy(minor = v.minor - 1)
val versionMinorHigher = v.copy(minor = v.minor + 1)
v should be (incompatibleWith(versionMinorLower))
v should be (incompatibleWith(versionMinorHigher))
val versionMajorLower = v.copy(major = v.major - 1)
val versionMajorHigher = v.copy(major = v.major + 1)
v should be (incompatibleWith(versionMajorLower))
v should be (incompatibleWith(versionMajorHigher))
}
"be parse strings" in {
Version("1.2.3") shouldBe Version(1, 2, 3, "")
Version("1.2.3.4") shouldBe Version(1, 2, 3, ".4")
Version("1.2.3.a") shouldBe Version(1, 2, 3, ".a")
Version("1.2.3-x") shouldBe Version(1, 2, 3, "-x")
Version("1.2.3-numberhere:1") shouldBe Version(1, 2, 3, "-numberhere:1")
a [VersionException] shouldBe thrownBy (Version("1"))
a [VersionException] shouldBe thrownBy (Version("1."))
a [VersionException] shouldBe thrownBy (Version("1.2"))
a [VersionException] shouldBe thrownBy (Version("1.2."))
a [VersionException] shouldBe thrownBy (Version("1.2.x"))
a [VersionException] shouldBe thrownBy (Version("1x.2.3"))
a [VersionException] shouldBe thrownBy (Version("1.2x.3"))
a [VersionException] shouldBe thrownBy (Version("1x.2x.3"))
a [VersionException] shouldBe thrownBy (Version("foo"))
}
}
class VersionMatcher(right: Version) extends BeMatcher[Version] {
def apply(left: Version): MatchResult = {
MatchResult(left.compatibleWith(right),
s"Version '${left.humanReadable}' was compatible with '${right.humanReadable}'",
s"Version '${left.humanReadable}' was not compatible with '${right.humanReadable}'"
)
}
}
private def compatibleWith(version: Version) = new VersionMatcher(version)
private def incompatibleWith(version: Version) = not(new VersionMatcher(version))
}
| deepsense-io/seahorse-workflow-executor | commons/src/test/scala/io/deepsense/commons/utils/VersionSpec.scala | Scala | apache-2.0 | 3,076 |
package com.leeavital
import org.jboss.netty.handler.codec.http._
import collection.mutable.Map
import org.jboss.netty.buffer.ChannelBuffer
import com.twitter.util.Future
import com.leeavital.util.ChannelBufferHelper
import scala.collection.mutable
/**
* Created by lee on 10/4/14.
*/
class UnfangledResponse(val content: ChannelBuffer, val status: HttpResponseStatus = HttpResponseStatus.OK, headers: Map[String, String] = Map()) {
val cookies = mutable.Set[Cookie]()
def header(k: String, v: String) = {
headers.put(k, v)
}
def toHttpResponse(version: HttpVersion): HttpResponse = {
val r: HttpResponse = new DefaultHttpResponse(version, status)
val channelBufferedContent = content
r.setContent(channelBufferedContent)
headers.foreach {
case (k, v) =>
r.headers.add(k, v)
}
// take care of cookies
if(!cookies.isEmpty) {
val cookieEncoder = new CookieEncoder(false)
cookies.foreach(cookieEncoder.addCookie)
r.headers.add("Set-Cookie", cookieEncoder.encode + "; Path=/")
}
r
}
private def addCookie(c: Cookie) = {
cookies.add(c)
}
//TODO figure out sane defaults and add optional params
def cookie(name: String, value: String) = {
val c = new DefaultCookie(name, value)
addCookie(c)
this
}
def toFuture = {
Future.value(this)
}
}
object UnfangledResponse {
type Status = HttpResponseStatus
def html(html: HtmlString, status: Status = HttpResponseStatus.OK) = {
val channelBuffer = ChannelBufferHelper.create(html.s)
new UnfangledResponse(channelBuffer, status, Map("Content-Type" -> "text/html"))
}
def json(json: JsonString, status: Status = HttpResponseStatus.OK) = {
val channelBuffer = ChannelBufferHelper.create(json.s)
new UnfangledResponse(channelBuffer, status, Map("Content-Type" -> "application/json"))
}
implicit def toFuture(e: UnfangledResponse): Future[UnfangledResponse] = {
Future.value(e)
}
}
| leeavital/unfangled | src/main/scala/com/leeavital/unfangled/UnfangledResponse.scala | Scala | mit | 1,981 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.data
import java.util.concurrent.atomic.AtomicBoolean
import org.geotools.data.{FeatureReader, Query}
import org.locationtech.geomesa.accumulo.data.stats.usage.{GeoMesaUsageStats, QueryStat, QueryStatTransform}
import org.locationtech.geomesa.accumulo.index.QueryHints.RichHints
import org.locationtech.geomesa.accumulo.index._
import org.locationtech.geomesa.accumulo.iterators.BinAggregatingIterator.BIN_ATTRIBUTE_INDEX
import org.locationtech.geomesa.filter.filterToString
import org.locationtech.geomesa.security.AuditProvider
import org.locationtech.geomesa.utils.stats.{MethodProfiling, TimingsImpl}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
abstract class AccumuloFeatureReader(val query: Query, val timeout: Option[Long], val maxFeatures: Long)
extends FeatureReader[SimpleFeatureType, SimpleFeature] {
private val closed = new AtomicBoolean(false)
private lazy val start = System.currentTimeMillis()
timeout.foreach(t => ThreadManagement.register(this, start, t))
def isClosed: Boolean = closed.get()
def count: Long = -1L
protected def closeOnce(): Unit
override def getFeatureType = query.getHints.getReturnSft
override def close() = if (!closed.getAndSet(true)) {
try {
timeout.foreach(t => ThreadManagement.unregister(this, start, t))
} finally {
closeOnce()
}
}
}
object AccumuloFeatureReader {
def apply(query: Query, qp: QueryPlanner, timeout: Option[Long], stats: Option[(GeoMesaUsageStats, AuditProvider)]) = {
val maxFeatures = if (query.isMaxFeaturesUnlimited) None else Some(query.getMaxFeatures)
(stats, maxFeatures) match {
case (None, None) => new AccumuloFeatureReaderImpl(query, qp, timeout)
case (None, Some(max)) => new AccumuloFeatureReaderImpl(query, qp, timeout, max) with FeatureLimiting
case (Some((sw, ap)), None) => new AccumuloFeatureReaderWithStats(query, qp, timeout, sw, ap) with FeatureCounting
case (Some((sw, ap)), Some(max)) => new AccumuloFeatureReaderWithStats(query, qp, timeout, sw, ap, max) with FeatureLimiting
}
}
}
/**
* Basic feature reader that wraps the underlying iterator of simple features.
*/
class AccumuloFeatureReaderImpl(query: Query, qp: QueryPlanner, timeout: Option[Long], maxFeatures: Long = 0L)
extends AccumuloFeatureReader(query, timeout, maxFeatures) {
private val iter = qp.runQuery(query)
override def hasNext: Boolean = iter.hasNext
override def next(): SimpleFeature = iter.next()
override protected def closeOnce(): Unit = iter.close()
}
/**
* Basic feature reader with method profiling for stat gathering.
*/
class AccumuloFeatureReaderWithStats(query: Query,
qp: QueryPlanner,
timeout: Option[Long],
sw: GeoMesaUsageStats,
auditProvider: AuditProvider,
maxFeatures: Long = 0L)
extends AccumuloFeatureReader(query, timeout, maxFeatures) with MethodProfiling {
implicit val timings = new TimingsImpl
private val iter = profile(qp.runQuery(query), "planning")
override def next(): SimpleFeature = profile(iter.next(), "next")
override def hasNext: Boolean = profile(iter.hasNext, "hasNext")
override protected def closeOnce(): Unit = {
iter.close()
val stat = QueryStat(qp.sft.getTypeName,
System.currentTimeMillis(),
auditProvider.getCurrentUserId,
filterToString(query.getFilter),
QueryStatTransform.hintsToString(query.getHints),
timings.time("planning"),
timings.time("next") + timings.time("hasNext"),
count
)
sw.writeUsageStat(stat) // note: implementation is asynchronous
}
}
trait FeatureCounting extends AccumuloFeatureReader {
protected var counter = 0L
// because the query planner configures the query hints, we can't check for bin hints
// until after setting up the iterator
protected val sfCount: (SimpleFeature) => Int = if (query.getHints.isBinQuery) {
// bin queries pack multiple records into each feature
// to count the records, we have to count the total bytes coming back, instead of the number of features
val bytesPerHit = if (query.getHints.getBinLabelField.isDefined) 24 else 16
(sf) => sf.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]].length / bytesPerHit
} else {
(_) => 1
}
abstract override def next(): SimpleFeature = {
val sf = super.next()
counter += sfCount(sf)
sf
}
abstract override def count = counter
}
trait FeatureLimiting extends FeatureCounting {
abstract override def hasNext: Boolean = counter < maxFeatures && super.hasNext
}
| mdzimmerman/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/data/AccumuloFeatureReader.scala | Scala | apache-2.0 | 5,256 |
package scala.reflect
import org.junit.Assert._
import org.junit.Test
class FieldAccessTest {
class TestClass {
private val x = 123
locally {
() => x
}
}
/** scala/bug#9306 */
@Test
def testFieldAccess(): Unit = {
import scala.reflect.runtime.universe._
import scala.reflect.runtime.currentMirror
val obj = new TestClass
val objType = currentMirror.reflect(obj).symbol.toType
val objFields = objType.members.collect { case ms: MethodSymbol if ms.isGetter => ms }
assertEquals(123, currentMirror.reflect(obj).reflectField(objFields.head).get)
}
}
| scala/scala | test/junit/scala/reflect/FieldAccessTest.scala | Scala | apache-2.0 | 604 |
package tastytest
object TestMethodDeps {
def test = MethodDeps.parent
}
| scala/scala | test/tasty/neg-isolated/src-2/TestMethodDeps_fail.scala | Scala | apache-2.0 | 78 |
// The MIT License (MIT)
//
// Copyright (c) 2015 David Heidrich, BowlingX <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.bowlingx.commentp
import javax.servlet.http.HttpServletRequest
import org.fusesource.scalate.TemplateEngine
import org.fusesource.scalate.layout.DefaultLayoutStrategy
import org.scalatra._
import org.scalatra.scalate.ScalateSupport
import scala.collection.mutable
trait BaseController extends ScalatraServlet with ScalateSupport {
/* wire up the precompiled templates */
override protected def defaultTemplatePath: List[String] = List("/WEB-INF/templates/views")
override protected def createTemplateEngine(config: ConfigT) = {
val engine = super.createTemplateEngine(config)
engine.layoutStrategy = new DefaultLayoutStrategy(engine,
TemplateEngine.templateTypes.map("/WEB-INF/templates/layouts/default." + _): _*)
engine.packagePrefix = "templates"
engine
}
/* end wiring up the precompiled templates */
override protected def templateAttributes(implicit request: HttpServletRequest): mutable.Map[String, Any] = {
super.templateAttributes ++ mutable.Map.empty // Add extra attributes here, they need bindings in the build file
}
notFound {
// remove content type in case it was set through an action
// scalastyle:off
contentType = null
// scalastyle:on
// Try to render a ScalateTemplate if no route matched
findTemplate(requestPath) map { path =>
contentType = "text/html"
layoutTemplate(path)
} orElse serveStaticResource() getOrElse resourceNotFound()
}
}
| BowlingX/commentp | src/main/scala/com/bowlingx/commentp/BaseController.scala | Scala | mit | 2,635 |
package se.chimps.bitziness.core.endpoints.persistence.redis.endpoint
import se.chimps.bitziness.core.Endpoint
import se.chimps.bitziness.core.endpoints.persistence.redis.RedisFactory
trait RedisEndpoint extends Endpoint with RedisFactory
| Meduzz/Bitziness | src/main/scala/se/chimps/bitziness/core/endpoints/persistence/redis/endpoint/RedisEndpoint.scala | Scala | apache-2.0 | 241 |
import org.scalatest.{FlatSpec, Matchers}
import play.api.libs.json.Json
import scala.io.Source
import scala.utils.JsonFile
class SbtJsonPluginTests extends FlatSpec with Matchers {
"JSON Object with one field" should "deserialized with correct field value" in {
import my.json.models.foo._
val json = JsonFile.readeJsonFile("foo")
val foo = Json.parse(json).as[Foo]
foo.foo shouldEqual 42
}
"Generated code from JSON document of FB post with optional field" should "contain optional field if it was marked as such" in {
import my.json.models.fbpost._
val json =
"""{
| "id":"339880699398622_371821532871205",
| "created_time":"2012-06-19T07:57:54+0000",
| "full_picture":"https:\\/\\/scontent.xx.fbcdn.net\\/v\\/t31.0-8\\/s720x720\\/469692_371821072871251_145095902_o.jpg?oh=8a1be9485002e2d25dbe396a8f1fe176&oe=5A2F45F8"
|}
|""".stripMargin
val post = Json.parse(json).as[Fbpost]
post.message shouldEqual None
}
"Generated code from JSON document of facebook site scala4beginner" should "contain optional message field" in {
import my.json.models.facebook._
val json = JsonFile.readeJsonFile("facebook")
val fb = Json.parse(json).as[Facebook]
fb.posts.data.head.message shouldEqual None
}
// "Generated code from JSON document of HPImageArchive" should "contain url" in {
// import my.json.models.hpimagearchive._
//
// val json = Source.fromURL("https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US").mkString
//
// val imageArchive = Json.parse(json).as[HPImageArchive]
//
// imageArchive.images.head.url should not be empty
// }
"Generated code from JSON document which contains a Scala type name" should "contain a class name with suffix" in {
import my.json.models.listmodel._
val json = JsonFile.readeJsonFile("list")
val list = Json.parse(json).as[ListModel]
list.list shouldEqual Seq(1, 2, 3)
}
"Generated code from JSON document with values of the same schema" should "contain only a single representation of the schema and correct order of play-json formats" in {
import my.json.models.geo._
val json = JsonFile.readeJsonFile("geo")
val geo = Json.parse(json).as[Geo]
geo.geometry.viewport.northeast.lat shouldBe 37.42426708029149
}
"Snake case object name" should "be transformed to camel case" in {
import my.json.models.bar._
val json = JsonFile.readeJsonFile("bar")
val bar = Json.parse(json).as[Bar]
bar shouldEqual Bar(TestObj(42))
}
}
| battermann/sbt-json | test-project/src/test/scala/SbtJsonPluginTests.scala | Scala | mit | 2,574 |
package de.choffmeister.microserviceutils.auth
import java.time.Instant
import akka.Done
import akka.http.scaladsl.model.{StatusCodes, Uri}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{Directive1, ExceptionHandler, Route, StandardRoute}
import akka.http.scaladsl.settings.RoutingSettings
import de.choffmeister.microserviceutils.auth.grants.AuthorizationCodeGrant
import de.choffmeister.microserviceutils.auth.models._
import scala.annotation.nowarn
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.implicitConversions
import scala.util.control.NonFatal
trait AuthProviderAuthorizationCodeFlow[RO, C, AT, AC] extends AuthProvider[RO, C, AT] {
val authorizationCodeGrant: AuthorizationCodeGrant[RO, C, AT, AC]
def authorizeResourceOwner: Directive1[RO]
@nowarn
def additionalAuthorizationCodeRedirectUriQuery(client: C, resourceOwner: RO): Future[Map[String, String]] =
Future.successful(Map.empty)
def authorizeRoute: Route = {
toStrictEntity(3.second, 128 * 1024) {
authorizeRouteExtractParameters { parameters =>
parameters.get("response_type") match {
case Some("code") =>
extractClient {
case Right(client) =>
verifyRedirectUri(parameters.get("redirect_uri"), client) match {
case Right(verifiedRedirectUri) =>
val state = parameters.get("state")
handleExceptions(authorizeRouteExceptionHandler(verifiedRedirectUri, state)) {
authorizeResourceOwner { resourceOwner =>
val challenge =
(parameters.get("code_challenge"), parameters.get("code_challenge_method")) match {
case (None, None) =>
Right(None)
case (None, Some(_)) =>
Left(
AuthError
.invalidRequest("Code challenge must be provided if code challenge method is present")
)
case (Some(challenge), None) =>
Right(Some(AuthorizationCodeChallenge(challenge, AuthorizationCodeChallengeMethod.Plain)))
case (Some(challenge), Some("plain")) =>
Right(Some(AuthorizationCodeChallenge(challenge, AuthorizationCodeChallengeMethod.Plain)))
case (Some(challenge), Some("S256")) =>
Right(Some(AuthorizationCodeChallenge(challenge, AuthorizationCodeChallengeMethod.S256)))
case (Some(_), Some(other)) =>
Left(
AuthError.invalidRequest(
s"Code challenge method $other is unsupported (expected one of ${List("plain", "S256")
.mkString(", ")})"
)
)
}
challenge match {
case Right(challenge) =>
val requestedScopes =
parameters.get("scope").map(_.split(" ").map(_.trim).filter(_.nonEmpty).toSet)
if (!resourceOwner.disabled) {
val scopes = evaluateScopes(resourceOwner, client, requestedScopes)
val authorizationCodeF = createAuthorizationCode(
state = state,
scopes = scopes,
resourceOwnerId = resourceOwner.id,
clientId = client.id,
challenge = challenge,
redirectUri = verifiedRedirectUri,
expiresAt = Instant.now.plusSeconds(300)
)
onSuccess(authorizationCodeF) { authorizationCode =>
onSuccess(additionalAuthorizationCodeRedirectUriQuery(client, resourceOwner)) {
additionalQuery =>
val parsedRedirectUri = Uri(authorizationCode.redirectUri)
val codeQuery =
Map("code" -> Some(authorizationCode.code), "state" -> authorizationCode.state)
.collect { case (k, Some(v)) =>
k -> v
}
val fullQuery =
Uri.Query((parsedRedirectUri.query() ++ codeQuery ++ additionalQuery): _*)
val fullRedirectUri = parsedRedirectUri.withQuery(fullQuery)
redirect(fullRedirectUri, StatusCodes.Found)
}
}
} else {
redirectWithAuthError(
AuthError.accessDenied("Resource owner has been disabled"),
verifiedRedirectUri,
state
)
}
case Left(error) =>
redirectWithAuthError(error, verifiedRedirectUri, state)
}
}
}
case Left(error) =>
completeWithAuthError(error)
}
case Left(error) =>
completeWithAuthError(error)
}
case Some(_) =>
val error = AuthError.invalidRequest("Parameter response_type is unsupported")
completeWithAuthError(error)
case None =>
val error = AuthError.invalidRequest("Parameter response_type is missing")
completeWithAuthError(error)
}
}
}
}
private def verifyRedirectUri(redirectUri: Option[String], client: C): AuthResult[String] = {
redirectUri match {
case None =>
Left(AuthError.invalidRequest("Parameter redirect_uri is missing"))
case Some(provided) if !client.redirectUris.contains(provided) =>
Left(AuthError.invalidRequest("Parameter redirect_uri is invalid"))
case Some(provided) =>
Right(provided)
}
}
private def authorizeRouteExceptionHandler(redirectUri: String, state: Option[String]): ExceptionHandler =
new ExceptionHandler {
override def withFallback(that: ExceptionHandler): ExceptionHandler = this
override def seal(settings: RoutingSettings): ExceptionHandler = this
override def isDefinedAt(exception: Throwable): Boolean = true
override def apply(exception: Throwable): Route = {
exception match {
case error: AuthError =>
redirectWithAuthError(error, redirectUri, state)
case NonFatal(error) =>
redirectWithAuthError(
AuthError.serverError(Option(error.getMessage).filter(_.nonEmpty)),
redirectUri,
state
)
case err =>
failWith(err)
}
}
}
private def authorizeRouteExtractParameters: Directive1[Map[String, String]] = {
parameterMultiMap.flatMap { rawParameters =>
rawParameters.find(_._2.size > 1) match {
case None =>
provide(rawParameters.view.map { case (k, v) => k -> v.head }.filter(_._2.nonEmpty).toMap)
case Some((key, _)) =>
completeWithAuthError(AuthError.invalidRequest(s"Parameter $key must not be provided more than once"))
}
}
}
private def redirectWithAuthError(error: AuthError, redirectUri: String, state: Option[String]): StandardRoute = {
val parsedRedirectUri = Uri(redirectUri)
val additionalQuery = Uri.Query(
Map("error" -> Some(error.error), "error_description" -> Some(error.description), "state" -> state).collect {
case (k, Some(v)) => k -> v
}
)
val fullQuery = Uri.Query((parsedRedirectUri.query() ++ additionalQuery): _*)
val fullRedirectUri = parsedRedirectUri.withQuery(fullQuery)
redirect(fullRedirectUri, StatusCodes.Found)
}
def findAuthorizationCode(code: String): Future[Option[AC]]
def createAuthorizationCode(
state: Option[String],
scopes: Set[String],
resourceOwnerId: String,
clientId: String,
challenge: Option[AuthorizationCodeChallenge],
redirectUri: String,
expiresAt: Instant
): Future[AC]
def exchangeAuthorizationCode(code: String): Future[Done]
implicit def toAuthorizationCode(value: AC): AuthorizationCode
implicit def toClientWithRedirectUris(value: C): ClientWithRedirectUris
}
| choffmeister/microservice-utils | microservice-utils-auth/src/main/scala/de/choffmeister/microserviceutils/auth/AuthProviderAuthorizationCodeFlow.scala | Scala | mit | 9,013 |
package dsmoq.services
/**
* データセットのアクセス権を表すケースクラス
*
* @param id ユーザID/グループID
* @param ownerType オーナー種別(@see dsmoq.persistence.OwnerType)
* @param accessLevel アクセスレベル(@see dsmoq.persistence.UserAccessLevel、GroupAccessLevel)
*/
case class DataSetAccessControlItem(id: String, ownerType: Int, accessLevel: Int)
| nkawa/dsmoq | server/apiServer/src/main/scala/dsmoq/services/DataSetAccessControlItem.scala | Scala | apache-2.0 | 400 |
# Copyright (c) 2010 Andrew Wild ([email protected])
# Licensed under the MIT (MIT-LICENSE.txt) licence.
package ternary;
object Primes {
implicit def bigInt2BigInteger(b: BigInt) = b.bigInteger
implicit def bigInteger2BigInt(b: java.math.BigInteger) = new BigInt(b)
def divisors(n: Int): List[Int] = for (val i <- List.range(1, n + 1); n % i == 0) yield i;
def isPrime(n: Int) = divisors(n).length == 2;
def naivePrimes(b: Int)(e: Int):List[Int] = List.range(b,e) filter isPrime
// Stream impl
def from(n: Int): Stream[Int] = Stream.cons(n, from(n + 1))
def sieveEratosthenes(s: Stream[Int]): Stream[Int] = Stream.cons(s.head, sieveEratosthenes(s.tail filter { _ % s.head != 0 }))
def primes = sieveEratosthenes(from(2))
// Borrow Java's impl makes use of implicit BitInt conversions
lazy val primesJava: Stream[BigInt] = Stream.cons(2, primesJava.map(_.nextProbablePrime))
}
| akohdr/examples | ScalaTernary/src/ternary/Primes.scala | Scala | mit | 920 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
// SBT
import sbt._
import Keys._
object BuildSettings {
// Basic settings for our app
lazy val basicSettings = Seq[Setting[_]](
organization := "com.snowplowanalytics",
version := "0.4.0",
description := "Kinesis sink for Elasticsearch",
scalaVersion := "2.10.1",
scalacOptions := Seq("-deprecation", "-encoding", "utf8",
"-feature", "-target:jvm-1.7"),
scalacOptions in Test := Seq("-Yrangepos"),
resolvers ++= Dependencies.resolutionRepos
)
// Makes our SBT app settings available from within the app
lazy val scalifySettings = Seq(sourceGenerators in Compile <+= (sourceManaged in Compile, version, name, organization) map { (d, v, n, o) =>
val file = d / "settings.scala"
IO.write(file, """package com.snowplowanalytics.snowplow.storage.kinesis.elasticsearch.generated
|object Settings {
| val organization = "%s"
| val version = "%s"
| val name = "%s"
|}
|""".stripMargin.format(o, v, n))
Seq(file)
})
// sbt-assembly settings for building a fat jar
import sbtassembly.Plugin._
import AssemblyKeys._
lazy val sbtAssemblySettings = assemblySettings ++ Seq(
// Executable jarfile
assemblyOption in assembly ~= { _.copy(prependShellScript = Some(defaultShellScript)) },
// Name it as an executable
jarName in assembly := { s"${name.value}-${version.value}" }
)
lazy val buildSettings = basicSettings ++ scalifySettings ++ sbtAssemblySettings
}
| mdavid/lessig-bigdata | lib/snowplow/4-storage/kinesis-elasticsearch-sink/project/BuildSettings.scala | Scala | mit | 2,275 |
/*
* Copyright 2014 – 2018 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalax.transducers.internal
import scalax.transducers.Arbitraries
import scalaz.@@
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary._
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
object CappedEvictingQueueSpec extends Specification with ScalaCheck with Arbitraries {
implicit val nonEmptyList = Arbitrary(arbitrary[List[String]] suchThat (_.nonEmpty))
"The capped evicting queue" should {
"consume all elements" in prop { (xs: List[String]) ⇒
val queue = new CappedEvictingQueue[String](xs.size)
val added = xs map queue.add
added must contain(beNone).forall
}
"evict the oldest elements" in prop { (xs: List[String]) ⇒
val queue = new CappedEvictingQueue[String](xs.size)
xs foreach queue.add
val evicted = xs map queue.add
xs.map(Option(_)) ==== evicted
}
"iterator all elements from old to young" in prop { (xs: List[String]) ⇒
val queue = new CappedEvictingQueue[String](xs.size)
xs foreach queue.add
queue.elements.toList ==== xs
}
"iterator only the live elements" in prop { (xs: List[String], ys: List[String]) ⇒
val newXs = Iterator.continually(ys).flatMap(identity).take(xs.size).toList
val queue = new CappedEvictingQueue[String](xs.size)
xs foreach queue.add
newXs foreach queue.add
queue.elements.toList ==== newXs
}
"iterator only added elements" in prop { (xs: List[String]) ⇒
val size = xs.size
val max = size / 2
val subset = xs.take(max)
val queue = new CappedEvictingQueue[String](size)
subset foreach queue.add
queue.elements.toList ==== subset
}
"iterator knows its size" in prop { (xs: List[String]) ⇒
val queue = new CappedEvictingQueue[String](xs.size)
xs foreach queue.add
val iter = queue.elements
iter.hasDefiniteSize ==== true
iter.size ==== xs.size
}
"have a size" in prop { (xs: List[String]) ⇒
val queue = new CappedEvictingQueue[String](xs.size)
xs foreach queue.add
queue.size ==== xs.size
}
"has isEmpty" in prop { (xs: List[String]) ⇒
val queue = new CappedEvictingQueue[String](xs.size)
xs foreach queue.add
queue.isEmpty ==== xs.isEmpty
}
"has nonEmpty" in prop { (xs: List[String]) ⇒
val queue = new CappedEvictingQueue[String](xs.size)
xs foreach queue.add
queue.nonEmpty ==== xs.nonEmpty
}
"decline non-positive capacities" in prop { (n: Int @@ Negative) ⇒
new CappedEvictingQueue[String](n) must throwA[IllegalArgumentException].like {
case e ⇒ e.getMessage must startWith("requirement failed")
}
}
"show current elements in toString" in prop { (xs: List[String], n: Int @@ NonZeroPositive) ⇒
val size1: Int = xs.size
val size2: Int = n
val capacity = size1 max size2
val overCapacity = size2 - size1
val items = if (overCapacity <= 0)
xs.take(1).map(x ⇒ s"($x)") ::: xs.drop(1)
else
xs ::: "(null)" :: List.fill(overCapacity - 1)("null")
val queue = new CappedEvictingQueue[String](capacity)
xs foreach queue.add
queue.toString ==== items.mkString("[", ", ", "]")
}
}
}
| knutwalker/transducers-scala | tests/src/test/scala/scalax/transducers/internal/CappedEvictingQueueSpec.scala | Scala | apache-2.0 | 3,904 |
package io.dylemma.spac
package example
import cats.syntax.apply._
import io.dylemma.spac.xml.JavaxSupport._
import io.dylemma.spac.xml._
import java.time.LocalDate
import java.time.format.DateTimeFormatter
object Example_FromReadme extends App {
case class Post(date: LocalDate, author: Author, stats: Stats, body: String, comments: List[Comment])
case class Author(id: String, name: String)
case class Stats(numLikes: Int, numTweets: Int)
case class Comment(date: LocalDate, author: Author, body: String)
val rawXml = """<blog>
| <post date="2015-11-16">
| <author name="dylemma" id="abc123"/>
| <stats likes="123" tweets="4"/>
| <body>Hello world!</body>
| <comments>
| <comment date="2015-11-18">
| <author name="anonymous" id="def456"/>
| <body>I'm commenting on your fake blog!</body>
| </comment>
| </comments>
| </post>
| <post date="2015-11-18">
| <author name="johndoe" id="004200"/>
| <stats likes="7" tweets="1"/>
| <body>A second blog post, huzzah!</body>
| <comments>
| <comment date="2015-11-19">
| <author name="anonymous" id="def456"/>
| <body>It's me again</body>
| </comment>
| </comments>
| </post>
|</blog>"""
val commentDateFormat = DateTimeFormatter.ofPattern("yyyy-MM-dd")
val dateAttributeParser = XmlParser.attr("date").map(LocalDate.parse(_, commentDateFormat))
implicit val AuthorParser: XmlParser[Author] = (
XmlParser.attr("id"),
XmlParser.attr("name")
).mapN(Author.apply)
val authorElementParser = Splitter.xml(* \\ "author").as[Author].parseFirst
implicit val StatsParser: XmlParser[Stats] = (
XmlParser.attr("likes").map(_.toInt),
XmlParser.attr("tweets").map(_.toInt)
).mapN(Stats.apply)
implicit val CommentParser: XmlParser[Comment] = (
dateAttributeParser,
authorElementParser,
Splitter.xml(* \\ "body").text.parseFirst
).mapN(Comment.apply)
implicit val PostParser: XmlParser[Post] = (
dateAttributeParser,
authorElementParser,
Splitter.xml(* \\ "stats").as[Stats].parseFirst,
Splitter.xml(* \\ "body").text.parseFirst,
Splitter.xml(* \\ "comments" \\ "comment").as[Comment].parseToList
).mapN(Post.apply)
val postTransformer: Transformer[XmlEvent, Post] = Splitter.xml("blog" \\ "post") joinBy PostParser
val postTransformerAlt = Splitter.xml("blog" \\ "post").as[Post] // available because PostParser is marked implicit
postTransformer.parseTap(println) parse rawXml
}
| dylemma/xml-spac | examples/src/main/scala/io/dylemma/spac/example/Example_FromReadme.scala | Scala | mit | 2,486 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.communication.socket
import org.zeromq.ZMQ
/** Represents an option to provide to a socket. */
sealed trait SocketOption
/**
* Represents the linger option used to communicate the millisecond duration
* to continue processing messages after the socket has been told to close.
*
* @note Provide -1 as the duration to wait until all messages are processed
*
* @param milliseconds The duration in milliseconds
*/
case class Linger(milliseconds: Int) extends SocketOption
/**
* Represents the subscribe option used to filter messages coming into a
* socket subscribing to a publisher. Uses the provided byte prefix to filter
* incoming messages.
*
* @param topic The array of bytes to use as a filter based on the
* bytes at the beginning of incoming messages
*/
case class Subscribe(topic: Array[Byte]) extends SocketOption
object Subscribe {
val all = Subscribe(ZMQ.SUBSCRIPTION_ALL)
}
/**
* Represents the identity option used to identify the socket.
*
* @param identity The identity to use with the socket
*/
case class Identity(identity: Array[Byte]) extends SocketOption
/**
* Represents the bind option used to tell the socket what address to bind to.
*
* @param address The address for the socket to use
*/
case class Bind(address: String) extends SocketOption
/**
* Represents the connect option used to tell the socket what address to
* connect to.
*
* @param address The address for the socket to use
*/
case class Connect(address: String) extends SocketOption
| lresende/incubator-toree | communication/src/main/scala/org/apache/toree/communication/socket/SocketOption.scala | Scala | apache-2.0 | 2,355 |
package com.criteo.dev.cluster.s3
import com.criteo.dev.cluster.aws.AwsUtilities
import com.criteo.dev.cluster.command.SshHiveAction
import com.criteo.dev.cluster.copy.CopyUtilities
import com.criteo.dev.cluster.{Node, NodeFactory}
import org.slf4j.LoggerFactory
/**
* Attaches a given cluster to a S3 bucket, by running ddl on that cluster to create tables
* pointing to data stored in the bucket.
*/
object RunS3DdlAction {
private val logger = LoggerFactory.getLogger(RunS3DdlAction.getClass)
def apply(node: Node, bucketId: String, copiedLocally: Boolean, conf: Map[String, String]) = {
val blobStore = BucketUtilities.getBlobStore(conf)
val fileList = BucketUtilities.getSortedLogs(blobStore, bucketId)
val ddlList = fileList.filter(f => {
BucketUtilities.getDataType(f).equals(DataType.hive)
})
val ddl = new SshHiveAction(node)
ddlList.foreach(d => {
val blob = blobStore.getBlob(bucketId, d.getName())
val payload = blob.getPayload()
val content = BucketUtilities.getContent(payload)
logger.info("Running following DDL")
println
content.map(c =>
if (copiedLocally) {
c.replace("$LOCATION", "")
} else {
c.replace("$LOCATION", BucketUtilities.getS3Location(conf, bucketId, node.nodeType,
includeCredentials=true))
}).foreach(c => {
println(c)
ddl.add(c)
})
})
ddl.run
logger.info("Successfully created metadata on cluster for data on the S3 bucket")
}
}
| criteo/berilia | src/main/scala/com/criteo/dev/cluster/s3/RunS3DdlAction.scala | Scala | apache-2.0 | 1,537 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scaladl.optimization
import scaladl.layers.AnnTypes.Tensor
abstract class Updater extends Serializable {
/**
* Compute an updated value for weights given the gradient, stepSize, iteration number and
* regularization parameter. Also returns the regularization value regParam * R(w)
* computed using the *updated* weights.
*
* @param weightsOld - Column matrix of size dx1 where d is the number of features.
* @param gradient - Column matrix of size dx1 where d is the number of features.
* @param stepSize - step size across iterations
* @param iter - Iteration number
* @param regParam - Regularization parameter
* @return A tuple of 2 elements. The first element is a column matrix containing updated weights,
* and the second element is the regularization value computed using updated weights.
*/
def compute(
weightsOld: Tensor,
gradient: Tensor,
stepSize: Double,
iter: Int,
regParam: Double): (Tensor, Double)
}
| avulanov/scalable-deeplearning | src/main/scala/scaladl/optimization/Updater.scala | Scala | apache-2.0 | 1,853 |
package net.fwbrasil.activate
import net.fwbrasil.activate.migration.Migration
import net.fwbrasil.activate.entity.EntityHelper
trait StoppableActivateContext extends ActivateContext {
var running = false
def start = synchronized {
ActivateContext.clearCaches()
running = true
Migration.update(this)
}
def stop = synchronized {
running = false
}
override protected val runMigrationAtStartup = false
override def acceptEntity(entityClass: Class[_]) =
running
} | avramirez/activate | activate-core/src/main/scala/net/fwbrasil/activate/StoppableActivateContext.scala | Scala | lgpl-2.1 | 538 |
package main.scala.projectEulerScala
import scala.collection.mutable
object LatticePaths {
val memo: mutable.Map[(Int, Int), BigInt] = mutable.HashMap()
def count(x: Int, y: Int): BigInt = {
val key = (x, y)
if (x == 0 || y == 0) {
memo(key) = 1
} else if (!memo.contains(key)) {
memo(key) = count(x - 1, y) + count(x, y - 1)
}
memo(key)
}
def main(args: Array[String]): Unit = {
println(count(20, 20))
}
}
| rck109d/projectEuler | src/main/scala/projectEulerScala/LatticePaths.scala | Scala | lgpl-3.0 | 457 |
package com.olegych.scastie.client.components.editor
import codemirror.TextAreaEditor
import com.olegych.scastie.api
private[editor] case class EditorState(
editor: Option[TextAreaEditor] = None,
problemAnnotations: Map[api.Problem, Annotation] = Map(),
renderAnnotations: Map[api.Instrumentation, Annotation] = Map(),
runtimeErrorAnnotations: Map[api.RuntimeError, Annotation] = Map(),
folded: Boolean = false,
readOnly: Boolean = false
)
| scalacenter/scastie | client/src/main/scala/com.olegych.scastie.client/components/editor/EditorState.scala | Scala | apache-2.0 | 466 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp.pretrained
import com.johnsnowlabs.nlp.pretrained.ResourceType.ResourceType
import com.johnsnowlabs.util.{JsonParser, Version}
import org.json4s.ext.EnumNameSerializer
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization.write
import org.json4s.{Formats, NoTypeHints}
import java.io.{FileWriter, InputStream}
import java.sql.Timestamp
import scala.io.Source
case class ResourceMetadata
(
name: String,
language: Option[String],
libVersion: Option[Version],
sparkVersion: Option[Version],
readyToUse: Boolean,
time: Timestamp,
isZipped: Boolean = false,
category: Option[ResourceType] = Some(ResourceType.NOT_DEFINED),
checksum: String = "",
annotator: Option[String] = None
) extends Ordered[ResourceMetadata] {
lazy val key: String = {
if (language.isEmpty && libVersion.isEmpty && sparkVersion.isEmpty) {
name
} else s"${name}_${s(language)}_${v(libVersion)}_${v(sparkVersion)}_${t(time)}"
}
lazy val fileName: String = {
if (isZipped) key + ".zip" else key
}
private def s(str: Option[String]): String = {
str.getOrElse("")
}
private def v(ver: Option[Version]): String = {
ver.map(v => v.toString()).getOrElse("")
}
private def t(time: Timestamp): String = {
time.getTime.toString
}
override def compare(that: ResourceMetadata): Int = {
var value: Option[Int] = None
if (this.sparkVersion == that.sparkVersion && this.libVersion == that.libVersion) {
value = Some(0)
}
if (this.sparkVersion == that.sparkVersion) {
if (this.libVersion.get.toFloat == that.libVersion.get.toFloat) {
value = orderByTimeStamp(this.time, that.time)
} else {
if (this.libVersion.get.toFloat > that.libVersion.get.toFloat) {
value = Some(1)
} else value = Some(-1)
}
} else {
if (this.sparkVersion.get.toFloat > that.sparkVersion.get.toFloat) {
value = Some(1)
} else value = Some(-1)
}
value.get
}
private def orderByTimeStamp(thisTime: Timestamp, thatTime: Timestamp): Option[Int] = {
if (thisTime.after(thatTime)) Some(1) else Some(-1)
}
}
object ResourceMetadata {
implicit val formats: Formats = Serialization.formats(NoTypeHints) + new EnumNameSerializer(ResourceType)
def toJson(meta: ResourceMetadata): String = {
write(meta)
}
def parseJson(json: String): ResourceMetadata = {
JsonParser.formats = formats
JsonParser.parseObject[ResourceMetadata](json)
}
def resolveResource(candidates: List[ResourceMetadata],
request: ResourceRequest): Option[ResourceMetadata] = {
val compatibleCandidates = candidates
.filter(item => item.readyToUse && item.libVersion.isDefined && item.sparkVersion.isDefined
&& item.name == request.name
&& (request.language.isEmpty || item.language.isEmpty || request.language.get == item.language.get)
&& Version.isCompatible(request.libVersion, item.libVersion)
&& Version.isCompatible(request.sparkVersion, item.sparkVersion)
)
val sortedResult = compatibleCandidates.sorted
sortedResult.lastOption
}
def readResources(file: String): List[ResourceMetadata] = {
readResources(Source.fromFile(file))
}
def readResources(stream: InputStream): List[ResourceMetadata] = {
readResources(Source.fromInputStream(stream))
}
def readResources(source: Source): List[ResourceMetadata] = {
source.getLines()
.collect { case line if line.nonEmpty =>
ResourceMetadata.parseJson(line)
}
.toList
}
def addMetadataToFile(fileName: String, metadata: ResourceMetadata): Unit = {
val fw = new FileWriter(fileName, true)
try {
fw.write("\\n" + ResourceMetadata.toJson(metadata))
}
finally fw.close()
}
}
| JohnSnowLabs/spark-nlp | src/main/scala/com/johnsnowlabs/nlp/pretrained/ResourceMetadata.scala | Scala | apache-2.0 | 4,452 |
package shade.memcached
import internals._
import concurrent.{Future, ExecutionContext}
import net.spy.memcached.{FailureMode => SpyFailureMode, _}
import net.spy.memcached.ConnectionFactoryBuilder.{Protocol => SpyProtocol}
import net.spy.memcached.auth.{PlainCallbackHandler, AuthDescriptor}
import concurrent.duration._
import java.util.concurrent.TimeUnit
import shade.memcached.internals.SuccessfulResult
import shade.memcached.internals.FailedResult
import shade.{UnhandledStatusException, CancelledException, TimeoutException}
import monifu.concurrent.Scheduler
/**
* Memcached client implementation based on SpyMemcached.
*
* See the parent trait (Cache) for API docs.
*/
class MemcachedImpl(config: Configuration, ec: ExecutionContext) extends Memcached {
private[this] implicit val context = ec
/**
* Adds a value for a given key, if the key doesn't already exist in the cache store.
*
* If the key already exists in the cache, the future returned result will be false and the
* current value will not be overridden. If the key isn't there already, the value
* will be set and the future returned result will be true.
*
* The expiry time can be Duration.Inf (infinite duration).
*
* @return either true, in case the value was set, or false otherwise
*/
def add[T](key: String, value: T, exp: Duration)(implicit codec: Codec[T]): Future[Boolean] =
value match {
case null =>
Future.successful(false)
case _ =>
instance.realAsyncAdd(withPrefix(key), codec.serialize(value), 0, exp, config.operationTimeout) map {
case SuccessfulResult(givenKey, Some(_)) =>
true
case SuccessfulResult(givenKey, None) =>
false
case failure: FailedResult =>
throwExceptionOn(failure)
}
}
/**
* Sets a (key, value) in the cache store.
*
* The expiry time can be Duration.Inf (infinite duration).
*/
def set[T](key: String, value: T, exp: Duration)(implicit codec: Codec[T]): Future[Unit] =
value match {
case null =>
Future.successful(())
case _ =>
instance.realAsyncSet(withPrefix(key), codec.serialize(value), 0, exp, config.operationTimeout) map {
case SuccessfulResult(givenKey, _) =>
()
case failure: FailedResult =>
throwExceptionOn(failure)
}
}
/**
* Deletes a key from the cache store.
*
* @return true if a key was deleted or false if there was nothing there to delete
*/
def delete(key: String): Future[Boolean] =
instance.realAsyncDelete(withPrefix(key), config.operationTimeout) map {
case SuccessfulResult(givenKey, result) =>
result
case failure: FailedResult =>
throwExceptionOn(failure)
}
/**
* Fetches a value from the cache store.
*
* @return Some(value) in case the key is available, or None otherwise (doesn't throw exception on key missing)
*/
def get[T](key: String)(implicit codec: Codec[T]): Future[Option[T]] =
instance.realAsyncGet(withPrefix(key), config.operationTimeout) map {
case SuccessfulResult(givenKey, option) =>
option.map(codec.deserialize)
case failure: FailedResult =>
throwExceptionOn(failure)
}
def getOrElse[T](key: String, default: => T)(implicit codec: Codec[T]): Future[T] =
get[T](key) map {
case Some(value) => value
case None => default
}
/**
* Compare and set.
*
* @param expecting should be None in case the key is not expected, or Some(value) otherwise
* @param exp can be Duration.Inf (infinite) for not setting an expiration
* @return either true (in case the compare-and-set succeeded) or false otherwise
*/
def compareAndSet[T](key: String, expecting: Option[T], newValue: T, exp: Duration)(implicit codec: Codec[T]): Future[Boolean] =
expecting match {
case None =>
add[T](key, newValue, exp)
case Some(expectingValue) =>
instance.realAsyncGets(withPrefix(key), config.operationTimeout) flatMap {
case SuccessfulResult(givenKey, None) =>
Future.successful(false)
case SuccessfulResult(givenKey, Some((currentData, casID))) =>
if (codec.deserialize(currentData) == expectingValue)
instance.realAsyncCAS(withPrefix(key), casID, 0, codec.serialize(newValue), exp, config.operationTimeout) map {
case SuccessfulResult(_, bool) =>
bool
case failure: FailedResult =>
throwExceptionOn(failure)
}
else
Future.successful(false)
case failure: FailedResult =>
throwExceptionOn(failure)
}
}
/**
* Used by both transformAndGet and getAndTransform for code reusability.
*
* @param f is the function that dictates what gets returned (either the old or the new value)
*/
private[this] def genericTransform[T, R](key: String, exp: Duration, cb: Option[T] => T)(f: (Option[T], T) => R)(implicit codec: Codec[T]): Future[R] = {
val keyWithPrefix = withPrefix(key)
val timeoutAt = System.currentTimeMillis() + config.operationTimeout.toMillis
/*
* Inner function used for retrying compare-and-set operations
* with a maximum threshold of retries.
*
* @throws TransformOverflowException in case the maximum number of
* retries is reached
*/
def loop(retry: Int): Future[R] = {
val remainingTime = timeoutAt - System.currentTimeMillis()
if (remainingTime <= 0)
throw new TimeoutException(key)
instance.realAsyncGets(keyWithPrefix, remainingTime.millis) flatMap {
case SuccessfulResult(_, None) =>
val result = cb(None)
add(key, result, exp) flatMap {
case true =>
Future.successful(f(None, result))
case false =>
loop(retry + 1)
}
case SuccessfulResult(_, Some((current, casID))) =>
val currentOpt = Some(codec.deserialize(current))
val result = cb(currentOpt)
instance.realAsyncCAS(keyWithPrefix, casID, 0, codec.serialize(result), exp, remainingTime.millis) flatMap {
case SuccessfulResult(_, true) =>
Future.successful(f(currentOpt, result))
case SuccessfulResult(_, false) =>
loop(retry + 1)
case failure: FailedResult =>
throwExceptionOn(failure)
}
case failure: FailedResult =>
throwExceptionOn(failure)
}
}
loop(0)
}
/**
* Transforms the given key and returns the new value.
*
* The cb callback receives the current value
* (None in case the key is missing or Some(value) otherwise)
* and should return the new value to store.
*
* The method retries until the compare-and-set operation succeeds, so
* the callback should have no side-effects.
*
* This function can be used for atomic incrementers and stuff like that.
*
* @return the new value
*/
def transformAndGet[T](key: String, exp: Duration)(cb: (Option[T]) => T)(implicit codec: Codec[T]): Future[T] =
genericTransform(key, exp, cb) {
case (oldValue, newValue) => newValue
}
/**
* Transforms the given key and returns the old value as an Option[T]
* (None in case the key wasn't in the cache or Some(value) otherwise).
*
* The cb callback receives the current value
* (None in case the key is missing or Some(value) otherwise)
* and should return the new value to store.
*
* The method retries until the compare-and-set operation succeeds, so
* the callback should have no side-effects.
*
* This function can be used for atomic incrementers and stuff like that.
*
* @return the old value
*/
def getAndTransform[T](key: String, exp: Duration)(cb: (Option[T]) => T)(implicit codec: Codec[T]): Future[Option[T]] =
genericTransform(key, exp, cb) {
case (oldValue, newValue) => oldValue
}
def close() {
instance.shutdown(3, TimeUnit.SECONDS)
}
private[this] def throwExceptionOn(failure: FailedResult) = failure match {
case FailedResult(k, TimedOutStatus) =>
throw new TimeoutException(withoutPrefix(k))
case FailedResult(k, CancelledStatus) =>
throw new CancelledException(withoutPrefix(k))
case FailedResult(k, unhandled) =>
throw new UnhandledStatusException(
"For key %s - %s".format(withoutPrefix(k), unhandled.getClass.getName))
}
@inline
private[this] def withPrefix(key: String): String =
if (prefix.isEmpty)
key
else
prefix + "-" + key
@inline
private[this] def withoutPrefix[T](key: String): String = {
if (!prefix.isEmpty && key.startsWith(prefix + "-"))
key.substring(prefix.length + 1)
else
key
}
private[this] val prefix = config.keysPrefix.getOrElse("")
private[this] val instance = {
System.setProperty("net.spy.log.LoggerImpl",
"shade.memcached.internals.Slf4jLogger")
val conn = {
val builder = new ConnectionFactoryBuilder()
.setProtocol(
if (config.protocol == Protocol.Binary)
SpyProtocol.BINARY
else
SpyProtocol.TEXT
)
.setDaemon(true)
.setFailureMode(config.failureMode match {
case FailureMode.Retry =>
SpyFailureMode.Retry
case FailureMode.Cancel =>
SpyFailureMode.Cancel
case FailureMode.Redistribute =>
SpyFailureMode.Redistribute
})
val withTimeout = config.operationTimeout match {
case duration: FiniteDuration =>
builder.setOpTimeout(config.operationTimeout.toMillis)
case _ =>
builder
}
val withAuth = config.authentication match {
case Some(credentials) =>
withTimeout.setAuthDescriptor(
new AuthDescriptor(Array("PLAIN"),
new PlainCallbackHandler(credentials.username, credentials.password)))
case None =>
withTimeout
}
withAuth
}
import scala.collection.JavaConverters._
val addresses = AddrUtil.getAddresses(config.addresses).asScala
new SpyMemcachedIntegration(conn.build(), addresses, Scheduler.fromContext(context))
}
}
| kazzna/shade | src/main/scala/shade/memcached/MemcachedImpl.scala | Scala | mit | 10,416 |
package rpgboss.model
import rpgboss.lib._
import rpgboss.model.battle.BattleStatus
import rpgboss.model.battle.Hit
/**
* Because effects have different meanings in different contexts, we provide
* a way to get the validity and meaning of an effect in this context.
*/
object EffectContext extends Enumeration {
val CharacterClass, Item, Equipment, Enemy, Skill, StatusEffect = Value
}
case class EffectUsability(valid: Boolean, helpMessage: String)
case class Effect(var keyId: Int, v1: Int = 0, v2: Int = 0) {
def meta = Effect.getMeta(keyId)
def applyToStats(stats: BattleStats) = meta.applyToStats(this, stats)
def applyAsSkillOrItem(target: BattleStatus) =
meta.applyAsSkillOrItem(this, target)
// TODO: Remove this statement and remove var. This is to support legacy
// mappings.
keyId = meta.id
}
trait MetaEffect {
def id: Int
// TODO: Remove the getMeta clause. Only here to support legacy mappings.
def matchesKey(keyId: Int): Boolean =
keyId == id || Effect.getMeta(keyId).id == id
def matches(effect: Effect) = matchesKey(effect.keyId)
def name: String
def usability = (context: EffectContext.Value) =>
EffectUsability(false, "TODO: Implement a help message here.")
def renderer = (pData: ProjectData, effect: Effect) => "TODO: No renderer"
def applyToStats(effect: Effect, stats: BattleStats) = stats
/**
* Although this method looks similar to applyToStats, it's very different.
* The method operates by mutating |target| directly without making a copy.
* @return The damage (or healing) performed. Used for display purposes.
*/
def applyAsSkillOrItem(
effect: Effect, target: BattleStatus): Seq[Damage] = Nil
Effect.registerMetaEffect(id, this)
}
object Effect {
import EffectContext._
private var _metaEffects = collection.mutable.Map[Int, MetaEffect]()
def registerMetaEffect(id: Int, metaEffect: MetaEffect) = {
assert(!_metaEffects.contains(id))
_metaEffects.put(id, metaEffect)
}
{
// Serves at a static initializer to create all Effect classes and force
// them to register their ids.
// TODO: This seems hacky. There must be a better way...
Array(
RecoverHpAdd,
RecoverHpMul,
RecoverMpAdd,
RecoverMpMul,
AddStatusEffect,
RemoveStatusEffect,
RemoveAllStatusEffect,
MhpAdd,
MmpAdd,
AtkAdd,
SpdAdd,
MagAdd,
ArmAdd,
MreAdd,
ResistElement,
EscapeBattle,
LearnSkill,
UseSkill
)
}
/**
* These are the old keyIds that we will be phasing out.
* These were problematic because they were arbitrarily assigned, and have
* no room between them to add new keys.
*/
def getMeta(keyId: Int) = keyId match {
case 0 => RecoverHpAdd
case 1 => RecoverHpMul
case 2 => RecoverMpAdd
case 3 => RecoverMpMul
case 4 => AddStatusEffect
case 5 => RemoveStatusEffect
case 6 => MhpAdd
case 7 => MmpAdd
case 8 => AtkAdd
case 9 => SpdAdd
case 10 => MagAdd
case 11 => ArmAdd
case 12 => MreAdd
case 13 => ResistElement
case 14 => EscapeBattle
case 15 => LearnSkill
case 16 => UseSkill
case i => _metaEffects.getOrElse(i, InvalidEffect)
}
def pointRenderer(pData: ProjectData, effect: Effect) =
"%dp".format(effect.v1)
def percentRenderer(pData: ProjectData, effect: Effect) =
"%d%%".format(effect.v1)
/**
* Renders the value of the enum index stored in v1.
*/
def getEnumOfValue1[T <: HasName]
(getChoices: ProjectData => Array[T])
(pData: ProjectData, effect: Effect) = {
val choices = getChoices(pData)
val name =
if (effect.v1 < choices.length)
choices(effect.v1).name
else
"<Past end of array>"
StringUtils.standardIdxFormat(effect.v1, name)
}
/**
* Renders the value of the enum index stored in id, and then shows the number
* stored in value.
*/
def getEnumWithExtraValue[T <: HasName]
(getChoices: ProjectData => Array[T])
(pData: ProjectData, effect: Effect) = {
val value1string = getEnumOfValue1(getChoices)(pData, effect)
"%s. Value = %d ".format(value1string, effect.v2)
}
def recoveryHelp(context: EffectContext.Value) = context match {
case Item => EffectUsability(true, "One-time effect of item use.")
case Skill => EffectUsability(true, "One-time effect of skill use.")
case StatusEffect => EffectUsability(true, "Applies per tick.")
case _ => EffectUsability(false, "Doesn't do anything.")
}
def itemEquipSkillOnlyHelp(context: EffectContext.Value) = context match {
case Item => EffectUsability(true, "One-time effect of item use.")
case Equipment => EffectUsability(true, "Occurs once per hit.")
case Skill => EffectUsability(true, "One-time effect of skill use.")
case _ => EffectUsability(false, "Doesn't do anything.")
}
def classEquipOrStatus(context: EffectContext.Value) = context match {
case CharacterClass => EffectUsability(true, "Permanently has resistance.")
case Equipment => EffectUsability(true, "Confers resistance on equipper.")
case StatusEffect => EffectUsability(true, "Confers resistance while active.")
case _ => EffectUsability(false, "Doesn't do anything.")
}
def onItemAndSkillHelp(context: EffectContext.Value) = context match {
case Item => EffectUsability(true, "One-time effect of item use.")
case Skill => EffectUsability(true, "One-time effect of skill use.")
case _ => EffectUsability(false, "Doesn't do anything.")
}
def onItemOnlyHelp(context: EffectContext.Value) = context match {
case Item => EffectUsability(true, "One-time effect of item use.")
case _ => EffectUsability(false, "Doesn't do anything.")
}
def onItemAndEquipHelp(context: EffectContext.Value) = context match {
case Item => EffectUsability(true, "One-time effect of item use.")
case Equipment => EffectUsability(true, "Occurs once per hit.")
case _ => EffectUsability(false, "Doesn't do anything.")
}
/**
* Recovery Utility functions
*/
def recoverHp(target: BattleStatus, amount: Double): Seq[Damage] = {
if (!target.alive)
return Nil
val amountInt = amount.round.toInt
target.hp += amountInt
List(Damage(DamageType.Magic, 0, -amountInt))
}
def recoverMp(target: BattleStatus, amount: Double) = {
val amountInt = amount.round.toInt
target.mp += amountInt
List(Damage(DamageType.MPDamage, 0, -amountInt))
}
}
object InvalidEffect extends MetaEffect {
def id = -1
def name = "Invalid Effect"
}
object RecoverHpAdd extends MetaEffect {
def id = 100
def name = "Recover HP"
override def usability = Effect.recoveryHelp _
override def renderer = Effect.pointRenderer _
override def applyAsSkillOrItem(effect: Effect, target: BattleStatus) =
Effect.recoverHp(target, effect.v1)
}
object RecoverHpMul extends MetaEffect {
def id = 101
def name = "Recover percentage of HP"
override def renderer = Effect.percentRenderer _
override def usability = Effect.recoveryHelp _
override def applyAsSkillOrItem(effect: Effect, target: BattleStatus) =
Effect.recoverHp(target, effect.v1 * 0.01 * target.stats.mhp)
}
object RecoverMpAdd extends MetaEffect {
def id = 102
def name = "Recover MP"
override def renderer = Effect.pointRenderer _
override def usability = Effect.recoveryHelp _
override def applyAsSkillOrItem(effect: Effect, target: BattleStatus) =
Effect.recoverMp(target, effect.v1)
}
object RecoverMpMul extends MetaEffect {
def id = 103
def name = "Recover percentage of MP"
override def renderer = Effect.percentRenderer _
override def usability = Effect.recoveryHelp _
override def applyAsSkillOrItem(effect: Effect, target: BattleStatus) =
Effect.recoverMp(target, effect.v1 * 0.01 * target.stats.mmp)
}
object AddStatusEffect extends MetaEffect {
def id = 200
def name = "Add status effect"
override def renderer = Effect.getEnumWithExtraValue(_.enums.statusEffects) _
override def usability = Effect.itemEquipSkillOnlyHelp _
override def applyAsSkillOrItem(effect: Effect, target: BattleStatus) = {
if (Utils.randomWithPercent(effect.v2)) {
target.updateTempStatusEffectIds(target.tempStatusEffectIds :+ effect.v1)
List(Damage(DamageType.AddStatusEffect, 0, effect.v1))
} else {
Nil
}
}
}
object RemoveStatusEffect extends MetaEffect {
def id = 201
def name = "Remove status effect"
override def renderer = Effect.getEnumWithExtraValue(_.enums.statusEffects) _
override def usability = Effect.itemEquipSkillOnlyHelp _
override def applyAsSkillOrItem(effect: Effect, target: BattleStatus) = {
if (Utils.randomWithPercent(effect.v2)) {
target.updateTempStatusEffectIds(
target.tempStatusEffectIds.filter(_ != effect.v1))
List(Damage(DamageType.AddStatusEffect, 0, effect.v1))
} else {
Nil
}
}
}
object RemoveAllStatusEffect extends MetaEffect {
def id = 202
def name = "Remove all status effects"
override def usability = Effect.itemEquipSkillOnlyHelp _
override def applyAsSkillOrItem(effect: Effect, target: BattleStatus) = {
val origEffects = target.tempStatusEffectIds
if (!origEffects.isEmpty && Utils.randomWithPercent(effect.v2)) {
target.updateTempStatusEffectIds(Array())
origEffects.distinct.map(Damage(DamageType.AddStatusEffect, 0, _))
} else {
Nil
}
}
}
object MhpAdd extends MetaEffect {
def id = 300
def name = "Increase Max HP"
override def renderer = Effect.pointRenderer _
override def usability = Effect.classEquipOrStatus _
override def applyToStats(effect: Effect, stats: BattleStats) =
stats.copy(mhp = stats.mhp + effect.v1)
}
object MmpAdd extends MetaEffect {
def id = 301
def name = "Increase Max MP"
override def renderer = Effect.pointRenderer _
override def usability = Effect.classEquipOrStatus _
override def applyToStats(effect: Effect, stats: BattleStats) =
stats.copy(mmp = stats.mmp + effect.v1)
}
object AtkAdd extends MetaEffect {
def id = 302
def name = "Increase ATK"
override def renderer = Effect.pointRenderer _
override def usability = Effect.classEquipOrStatus _
override def applyToStats(effect: Effect, stats: BattleStats) =
stats.copy(atk = stats.atk + effect.v1)
}
object SpdAdd extends MetaEffect {
def id = 303
def name = "Increase SPD"
override def renderer = Effect.pointRenderer _
override def usability = Effect.classEquipOrStatus _
override def applyToStats(effect: Effect, stats: BattleStats) =
stats.copy(spd = stats.spd + effect.v1)
}
object MagAdd extends MetaEffect {
def id = 304
def name = "Increase MAG"
override def renderer = Effect.pointRenderer _
override def usability = Effect.classEquipOrStatus _
override def applyToStats(effect: Effect, stats: BattleStats) =
stats.copy(mag = stats.mag + effect.v1)
}
object ArmAdd extends MetaEffect {
def id = 305
def name = "Increase ARM"
override def renderer = Effect.pointRenderer _
override def usability = Effect.classEquipOrStatus _
override def applyToStats(effect: Effect, stats: BattleStats) =
stats.copy(arm = stats.arm + effect.v1)
}
object MreAdd extends MetaEffect {
def id = 306
def name = "Increase MRE"
override def renderer = Effect.pointRenderer _
override def usability = Effect.classEquipOrStatus _
override def applyToStats(effect: Effect, stats: BattleStats) =
stats.copy(mre = stats.mre + effect.v1)
}
object ResistElement extends MetaEffect {
def id = 400
def name = "Resist Element"
override def renderer = Effect.getEnumWithExtraValue(_.enums.statusEffects) _
override def usability = Effect.classEquipOrStatus _
override def applyToStats(effect: Effect, stats: BattleStats) = {
val newResists = stats.elementResists.updated(
effect.v1, stats.elementResists(effect.v1) + effect.v2)
stats.copy(elementResists = newResists)
}
}
object EscapeBattle extends MetaEffect {
def id = 500
def name = "Escape Battle"
override def renderer = Effect.pointRenderer _
override def usability = Effect.onItemOnlyHelp _
}
object LearnSkill extends MetaEffect {
def id = 600
def name = "Learn Skill"
override def renderer = Effect.getEnumOfValue1(_.enums.skills) _
override def usability = Effect.onItemOnlyHelp _
}
object UseSkill extends MetaEffect {
def id = 601
def name = "Use Skill"
override def renderer = Effect.getEnumOfValue1(_.enums.skills) _
override def usability = Effect.onItemAndEquipHelp _
} | DrDub/rpgboss | core/src/main/scala/rpgboss/model/Effect.scala | Scala | agpl-3.0 | 12,649 |
package composing_methods
/**
* Created by lingx on 2015/10/27.
*/
class ReplaceMethodWithMethodObject {
}
| zj-lingxin/refactoring | src/main/scala/composing_methods/ReplaceMethodWithMethodObject.scala | Scala | mit | 111 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index
import org.locationtech.geomesa.index.metadata.GeoMesaMetadata
class InMemoryMetadata[T] extends GeoMesaMetadata[T] {
import scala.collection.mutable.{ Map => mMap }
private val schemas = mMap.empty[String, mMap[String, T]]
override def getFeatureTypes: Array[String] = synchronized(schemas.keys.toArray)
override def insert(typeName: String, key: String, value: T): Unit = synchronized {
schemas.getOrElseUpdate(typeName, mMap.empty[String, T]).put(key, value)
}
override def insert(typeName: String, kvPairs: Map[String, T]): Unit = synchronized {
val m = schemas.getOrElseUpdate(typeName, mMap.empty[String, T])
kvPairs.foreach { case (k, v) => m.put(k, v) }
}
override def remove(typeName: String, key: String): Unit = synchronized {
schemas.get(typeName).foreach(_.remove(key))
}
override def remove(typeName: String, keys: Seq[String]): Unit = keys.foreach(remove(typeName, _))
override def read(typeName: String, key: String, cache: Boolean): Option[T] = synchronized {
schemas.get(typeName).flatMap(_.get(key))
}
override def scan(typeName: String, prefix: String, cache: Boolean): Seq[(String, T)] = synchronized {
schemas.get(typeName) match {
case None => Seq.empty
case Some(m) => m.filterKeys(_.startsWith(prefix)).toSeq
}
}
override def delete(typeName: String): Unit = synchronized {
schemas.remove(typeName)
}
override def invalidateCache(typeName: String, key: String): Unit = {}
override def backup(typeName: String): Unit = {}
override def close(): Unit = {}
}
| aheyne/geomesa | geomesa-index-api/src/test/scala/org/locationtech/geomesa/index/InMemoryMetadata.scala | Scala | apache-2.0 | 2,081 |
package testUtils
import org.qirx.cms.Cms
import play.api.test.FakeApplication
import play.api.GlobalSettings
import play.api.mvc.Handler
import play.api.mvc.RequestHeader
import play.api.Mode
object TestApplication {
def fakeApplication(global: Option[GlobalSettings] = None) =
new FakeApplication(withGlobal = global)
def apply(cms: Cms) = {
val global =
new GlobalSettings {
override def onRouteRequest(request: RequestHeader): Option[Handler] =
cms.handle(request, orElse = super.onRouteRequest)
}
fakeApplication(Some(global))
}
} | EECOLOR/play-cms | cms/src/test/scala/testUtils/TestApplication.scala | Scala | mit | 588 |
package mesosphere.marathon
package core.instance
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.condition.Condition
import mesosphere.marathon.core.pod.MesosContainer
import mesosphere.marathon.core.task.bus.MesosTaskStatusTestHelper
import mesosphere.marathon.core.task.state.{ NetworkInfo, NetworkInfoPlaceholder }
import mesosphere.marathon.core.task.update.TaskUpdateOperation
import mesosphere.marathon.core.task.{ Task, TaskCondition }
import mesosphere.marathon.state.{ PathId, Timestamp }
import mesosphere.marathon.test.MarathonTestHelper
import mesosphere.marathon.test.MarathonTestHelper.Implicits._
import org.apache.mesos
import org.slf4j.LoggerFactory
case class TestTaskBuilder(
task: Option[Task], instanceBuilder: TestInstanceBuilder, now: Timestamp = Timestamp.now()
) {
def taskFromTaskInfo(
taskInfo: mesos.Protos.TaskInfo,
offer: mesos.Protos.Offer = MarathonTestHelper.makeBasicOffer().build(),
version: Timestamp = Timestamp(10),
taskCondition: Condition = Condition.Staging) = {
val instance = instanceBuilder.getInstance()
this.copy(task = Some(TestTaskBuilder.Helper.makeTaskFromTaskInfo(taskInfo, offer, version, now, taskCondition).copy(taskId = Task.Id.forInstanceId(instance.instanceId, None))))
}
def taskForStatus(mesosState: mesos.Protos.TaskState, stagedAt: Timestamp = now, container: Option[MesosContainer] = None) = {
val instance = instanceBuilder.getInstance()
val taskId = Task.Id.forInstanceId(instance.instanceId, container)
val mesosStatus = TestTaskBuilder.Helper.statusForState(taskId.idString, mesosState)
this.copy(task = Some(TestTaskBuilder.Helper.minimalTask(taskId, stagedAt, Some(mesosStatus))))
}
def maybeMesosContainerByName(name: Option[String]): Option[MesosContainer] = name.map(n => MesosContainer(name = n, resources = raml.Resources()))
def taskLaunched(container: Option[MesosContainer] = None) =
this.copy(task = Some(TestTaskBuilder.Helper.minimalTask(instanceBuilder.getInstance().instanceId, container, now).copy(taskId = Task.Id.forInstanceId(instanceBuilder.getInstance().instanceId, None))))
def taskReserved(containerName: Option[String] = None) = {
val instance = instanceBuilder.getInstance()
val taskReservationState = Task.Reservation.State.New(timeout = None)
val localVolumeIds = Seq.empty[Task.LocalVolumeId]
val taskId = Task.Id.forInstanceId(instance.instanceId, maybeMesosContainerByName(containerName))
this.copy(task = Some(TestTaskBuilder.Helper.minimalReservedTask(instance.instanceId.runSpecId, taskReservationState, localVolumeIds, maybeTaskId = Some(taskId))))
}
def taskResidentReserved(localVolumeIds: Task.LocalVolumeId*) = {
val instance = instanceBuilder.getInstance()
val taskId = Task.Id.forInstanceId(instance.instanceId, container = None)
this.copy(task = Some(TestTaskBuilder.Helper.residentReservedTask(instance.instanceId.runSpecId, TestTaskBuilder.Helper.taskReservationStateNew, localVolumeIds.to[Seq], maybeTaskId = Some(taskId))))
}
def taskResidentReserved(taskReservationState: Task.Reservation.State) = {
val instance = instanceBuilder.getInstance()
val taskId = Task.Id.forInstanceId(instance.instanceId, None)
this.copy(task = Some(TestTaskBuilder.Helper.residentReservedTask(instance.instanceId.runSpecId, taskReservationState, Seq.empty[Task.LocalVolumeId], maybeTaskId = Some(taskId))))
}
def taskResidentLaunched(localVolumeIds: Task.LocalVolumeId*) = {
val instance = instanceBuilder.getInstance()
val taskId = Task.Id.forInstanceId(instance.instanceId, None)
this.copy(task = Some(TestTaskBuilder.Helper.residentLaunchedTask(instance.instanceId.runSpecId, localVolumeIds.to[Seq], maybeTaskId = Some(taskId))))
}
def taskResidentUnreachable(localVolumeIds: Task.LocalVolumeId*) = {
val instance = instanceBuilder.getInstance()
val taskId = Task.Id.forInstanceId(instance.instanceId, None)
this.copy(task = Some(TestTaskBuilder.Helper.residentUnreachableTask(instance.instanceId.runSpecId, localVolumeIds.to[Seq], maybeTaskId = Some(taskId))))
}
def taskRunning(containerName: Option[String] = None, stagedAt: Timestamp = now, startedAt: Timestamp = now) = {
val instance = instanceBuilder.getInstance()
this.copy(task = Some(TestTaskBuilder.Helper.runningTask(
Task.Id.forInstanceId(instance.instanceId, maybeMesosContainerByName(containerName)),
instance.runSpecVersion, stagedAt = stagedAt.millis, startedAt = startedAt.millis)))
}
/**
* Creates a task with Condition.Unreachable and Mesos status TASK_LOST for backwards compatibility tests.
*
* @param since Mesos status timestamp.
* @param containerName the name of the container
* @return
*/
def taskLost(since: Timestamp = now, containerName: Option[String] = None) = {
val instance = instanceBuilder.getInstance()
val task = TestTaskBuilder.Helper.minimalLostTask(instance.instanceId.runSpecId, since = since, taskCondition = Condition.Unreachable)
this.copy(task = Some(task.copy(taskId = Task.Id.forInstanceId(instance.instanceId, maybeMesosContainerByName(containerName)))))
}
/**
* Creates a task with Condition.Unreachable and Mesos status TASK_UNREACHABLE.
*
* @param since Mesos status timestamp AND unreachable time.
* @return
*/
def taskUnreachable(since: Timestamp = now, containerName: Option[String] = None) = {
val instance = instanceBuilder.getInstance()
val task = TestTaskBuilder.Helper.minimalUnreachableTask(instance.instanceId.runSpecId, since = since)
this.copy(task = Some(task.copy(taskId = Task.Id.forInstanceId(instance.instanceId, maybeMesosContainerByName(containerName)))))
}
/**
* Creates a task with Condition.UnreachableInactive and Mesos status TASK_UNREACHABLE.
*
* @param since Mesos status timestamp AND unreachable time.
* @return
*/
def taskUnreachableInactive(since: Timestamp = now, containerName: Option[String] = None) = {
val instance = instanceBuilder.getInstance()
val taskId = Task.Id.forInstanceId(instance.instanceId, maybeMesosContainerByName(containerName))
val task = TestTaskBuilder.Helper.minimalUnreachableTask(instance.instanceId.runSpecId, Condition.UnreachableInactive, since).copy(taskId = taskId)
this.copy(task = Some(task))
}
def mesosStatusForCondition(condition: Condition, taskId: Task.Id): Option[mesos.Protos.TaskStatus] = condition match {
case Condition.Created => None
case Condition.Dropped => Some(MesosTaskStatusTestHelper.dropped(taskId))
case Condition.Error => Some(MesosTaskStatusTestHelper.error(taskId))
case Condition.Failed => Some(MesosTaskStatusTestHelper.failed(taskId))
case Condition.Finished => Some(MesosTaskStatusTestHelper.finished(taskId))
case Condition.Gone => Some(MesosTaskStatusTestHelper.gone(taskId))
case Condition.Killed => Some(MesosTaskStatusTestHelper.killed(taskId))
case Condition.Killing => Some(MesosTaskStatusTestHelper.killing(taskId))
case Condition.Reserved => None
case Condition.Running => Some(MesosTaskStatusTestHelper.running(taskId))
case Condition.Staging => Some(MesosTaskStatusTestHelper.staging(taskId))
case Condition.Starting => Some(MesosTaskStatusTestHelper.starting(taskId))
case Condition.Unknown => Some(MesosTaskStatusTestHelper.unknown(taskId))
case Condition.Unreachable => Some(MesosTaskStatusTestHelper.unreachable(taskId))
case Condition.UnreachableInactive => Some(MesosTaskStatusTestHelper.unreachable(taskId))
}
def taskError(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Error)
def taskFailed(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Failed)
def taskFinished(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Finished)
def taskKilled(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Killed)
def taskDropped(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Dropped)
def taskUnknown(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Unknown)
def taskGone(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Gone)
def taskCreated(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Created)
def taskKilling(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Killing)
def taskStaging(since: Timestamp = now, containerName: Option[String] = None) = createTask(since, containerName, Condition.Staging)
def taskStaged(containerName: Option[String] = None, stagedAt: Timestamp = now, version: Option[Timestamp] = None) = {
val instance = instanceBuilder.getInstance()
this.copy(task = Some(TestTaskBuilder.Helper.stagedTask(Task.Id.forInstanceId(instance.instanceId, maybeMesosContainerByName(containerName)), version.getOrElse(instance.runSpecVersion), stagedAt = stagedAt.millis)))
}
def taskStarting(stagedAt: Timestamp = now, containerName: Option[String] = None) = {
val instance = instanceBuilder.getInstance()
this.copy(task = Some(TestTaskBuilder.Helper.startingTaskForApp(instance.instanceId, stagedAt = stagedAt.millis, container = maybeMesosContainerByName(containerName))))
}
private def createTask(since: Timestamp, containerName: Option[String], condition: Condition) = {
val instance = instanceBuilder.getInstance()
val taskId = Task.Id.forInstanceId(instance.instanceId, maybeMesosContainerByName(containerName))
val mesosStatus = mesosStatusForCondition(condition, taskId)
this.copy(task = Some(TestTaskBuilder.Helper.minimalTask(taskId, since, mesosStatus, condition)))
}
def withNetworkInfo(networkInfo: NetworkInfo): TestTaskBuilder =
copy(task = task.map(_.withNetworkInfo(networkInfo)))
def withNetworkInfo(
hostName: Option[String] = None,
hostPorts: Seq[Int] = Nil,
networkInfos: scala.collection.Seq[mesos.Protos.NetworkInfo] = Nil): TestTaskBuilder =
copy(task = task.map(_.withNetworkInfo(hostName, hostPorts, networkInfos)))
def asHealthyTask(): TestTaskBuilder = {
import mesosphere.marathon.test.MarathonTestHelper.Implicits._
this.copy(task = task match {
case Some(t: Task) => Some(t.withStatus(status => status.copy(mesosStatus = status.mesosStatus.map(_.toBuilder.setHealthy(true).build()))))
case None => None
})
}
def applyUpdate(update: TaskUpdateOperation): TestTaskBuilder = {
val concreteTask = task.getOrElse(throw new IllegalArgumentException("No task defined for TaskBuilder"))
concreteTask.update(update)
this
}
def build(): TestInstanceBuilder = task match {
case Some(concreteTask) => instanceBuilder.addTask(concreteTask)
case None => instanceBuilder
}
}
object TestTaskBuilder {
private[this] val log = LoggerFactory.getLogger(getClass)
def newBuilder(instanceBuilder: TestInstanceBuilder) = TestTaskBuilder(None, instanceBuilder)
object Helper {
def makeTaskFromTaskInfo(
taskInfo: mesos.Protos.TaskInfo,
offer: mesos.Protos.Offer = MarathonTestHelper.makeBasicOffer().build(),
version: Timestamp = Timestamp(10), now: Timestamp = Timestamp(10),
taskCondition: Condition = Condition.Staging): Task.LaunchedEphemeral = {
log.debug(s"offer: $offer")
Task.LaunchedEphemeral(
taskId = Task.Id(taskInfo.getTaskId),
runSpecVersion = version,
status = Task.Status(
stagedAt = now,
condition = taskCondition,
networkInfo = NetworkInfo(hostName = "host.some", hostPorts = Seq(1, 2, 3), ipAddresses = Nil)
)
)
}
def minimalTask(appId: PathId): Task.LaunchedEphemeral = minimalTask(Task.Id.forRunSpec(appId))
def minimalTask(instanceId: Instance.Id, container: Option[MesosContainer], now: Timestamp): Task.LaunchedEphemeral =
minimalTask(Task.Id.forInstanceId(instanceId, container), now)
def minimalTask(taskId: Task.Id, now: Timestamp = Timestamp.now(), mesosStatus: Option[mesos.Protos.TaskStatus] = None): Task.LaunchedEphemeral = {
minimalTask(taskId, now, mesosStatus, if (mesosStatus.isDefined) TaskCondition(mesosStatus.get) else Condition.Created)
}
def minimalTask(taskId: Task.Id, now: Timestamp, mesosStatus: Option[mesos.Protos.TaskStatus], taskCondition: Condition): Task.LaunchedEphemeral = {
Task.LaunchedEphemeral(
taskId,
runSpecVersion = now,
status = Task.Status(
stagedAt = now,
startedAt = None,
mesosStatus = mesosStatus,
condition = taskCondition,
networkInfo = NetworkInfo("host.some", hostPorts = Nil, ipAddresses = Nil)
)
)
}
def minimalLostTask(appId: PathId, taskCondition: Condition = Condition.Gone, since: Timestamp = Timestamp.now()): Task.LaunchedEphemeral = {
val taskId = Task.Id.forRunSpec(appId)
val status = MesosTaskStatusTestHelper.lost(mesos.Protos.TaskStatus.Reason.REASON_RECONCILIATION, taskId, since)
minimalTask(
taskId = taskId,
now = since,
mesosStatus = Some(status),
taskCondition = taskCondition
)
}
def minimalUnreachableTask(appId: PathId, taskCondition: Condition = Condition.Unreachable, since: Timestamp = Timestamp.now()): Task.LaunchedEphemeral = {
val lostTask = minimalLostTask(appId = appId, since = since)
val mesosStatus = MesosTaskStatusTestHelper.unreachable(taskId = lostTask.taskId, since = since)
val status = lostTask.status.copy(condition = taskCondition, mesosStatus = Some(mesosStatus))
lostTask.copy(status = status)
}
def minimalRunning(appId: PathId, taskCondition: Condition = Condition.Running, since: Timestamp = Timestamp.now()): Task.LaunchedEphemeral = {
val taskId = Task.Id.forRunSpec(appId)
val status = MesosTaskStatusTestHelper.mesosStatus(state = mesos.Protos.TaskState.TASK_RUNNING, maybeHealthy = Option(true), taskId = taskId)
minimalTask(
taskId = taskId,
now = since,
mesosStatus = Some(status),
taskCondition = taskCondition
)
}
def minimalReservedTask(appId: PathId, taskReservationState: Task.Reservation.State, localVolumeIds: Seq[Task.LocalVolumeId], maybeTaskId: Option[Task.Id] = None): Task.Reserved = {
val taskId = maybeTaskId.getOrElse(Task.Id.forRunSpec(appId))
val reservation = Task.Reservation(localVolumeIds, taskReservationState)
Task.Reserved(
taskId = taskId,
reservation = reservation,
status = Task.Status(Timestamp.now(), condition = Condition.Reserved, networkInfo = NetworkInfoPlaceholder()),
runSpecVersion = Timestamp.now())
}
def reservationFor(localVolumeIds: Seq[Task.LocalVolumeId], taskReservationState: Task.Reservation.State) = {
Task.Reservation(localVolumeIds, taskReservationState)
}
// Use with caution, this returns a reservation whose labels most probably donÄt match your task/instance ID!
def newReservation: Task.Reservation = Task.Reservation(Seq.empty, taskReservationStateNew)
def taskReservationStateNew = Task.Reservation.State.New(timeout = None)
def residentReservedTask(appId: PathId, taskReservationState: Task.Reservation.State, localVolumeIds: Seq[Task.LocalVolumeId], maybeTaskId: Option[Task.Id] = None) =
minimalReservedTask(appId, taskReservationState, localVolumeIds, maybeTaskId)
def residentLaunchedTask(appId: PathId, localVolumeIds: Seq[Task.LocalVolumeId], maybeTaskId: Option[Task.Id] = None) = {
val now = Timestamp.now()
val taskId = maybeTaskId.getOrElse(Task.Id.forRunSpec(appId))
val reservation = reservationFor(localVolumeIds, Task.Reservation.State.Launched)
Task.LaunchedOnReservation(
taskId = taskId,
runSpecVersion = now,
status = Task.Status(
stagedAt = now,
startedAt = Some(now),
mesosStatus = None,
condition = Condition.Running,
networkInfo = NetworkInfoPlaceholder()
),
reservation = reservation)
}
def residentUnreachableTask(appId: PathId, localVolumeIds: Seq[Task.LocalVolumeId], maybeTaskId: Option[Task.Id] = None) = {
val now = Timestamp.now()
val taskId = maybeTaskId.getOrElse(Task.Id.forRunSpec(appId))
val reservation = reservationFor(localVolumeIds, Task.Reservation.State.Launched)
Task.LaunchedOnReservation(
taskId = taskId,
runSpecVersion = now,
status = Task.Status(
stagedAt = now,
startedAt = Some(now),
mesosStatus = None,
condition = Condition.Unreachable,
networkInfo = NetworkInfoPlaceholder()
),
reservation = reservation)
}
def startingTaskForApp(instanceId: Instance.Id, appVersion: Timestamp = Timestamp(1), stagedAt: Long = 2, container: Option[MesosContainer] = None): Task.LaunchedEphemeral =
startingTask(
Task.Id.forInstanceId(instanceId, container),
appVersion = appVersion,
stagedAt = stagedAt
)
def startingTask(taskId: Task.Id, appVersion: Timestamp = Timestamp(1), stagedAt: Long = 2): Task.LaunchedEphemeral =
Task.LaunchedEphemeral(
taskId = taskId,
runSpecVersion = appVersion,
status = Task.Status(
stagedAt = Timestamp(stagedAt),
startedAt = None,
mesosStatus = Some(statusForState(taskId.idString, mesos.Protos.TaskState.TASK_STARTING)),
condition = Condition.Starting,
networkInfo = NetworkInfoPlaceholder()
)
)
def stagedTaskForApp(
appId: PathId = PathId("/test"), appVersion: Timestamp = Timestamp(1), stagedAt: Long = 2): Task.LaunchedEphemeral =
stagedTask(Task.Id.forRunSpec(appId), appVersion = appVersion, stagedAt = stagedAt)
def stagedTask(
taskId: Task.Id,
appVersion: Timestamp = Timestamp(1),
stagedAt: Long = 2): Task.LaunchedEphemeral =
Task.LaunchedEphemeral(
taskId = taskId,
runSpecVersion = appVersion,
status = Task.Status(
stagedAt = Timestamp(stagedAt),
startedAt = None,
mesosStatus = Some(statusForState(taskId.idString, mesos.Protos.TaskState.TASK_STAGING)),
condition = Condition.Staging,
networkInfo = NetworkInfoPlaceholder()
)
)
def statusForState(taskId: String, state: mesos.Protos.TaskState, maybeReason: Option[mesos.Protos.TaskStatus.Reason] = None): mesos.Protos.TaskStatus = {
val builder = mesos.Protos.TaskStatus
.newBuilder()
.setTaskId(mesos.Protos.TaskID.newBuilder().setValue(taskId))
.setState(state)
maybeReason.foreach(builder.setReason)
builder.buildPartial()
}
def runningTaskForApp(
appId: PathId = PathId("/test"),
appVersion: Timestamp = Timestamp(1),
stagedAt: Long = 2,
startedAt: Long = 3): Task.LaunchedEphemeral =
runningTask(
Task.Id.forRunSpec(appId),
appVersion = appVersion,
stagedAt = stagedAt,
startedAt = startedAt
)
def runningTask(
taskId: Task.Id,
appVersion: Timestamp = Timestamp(1),
stagedAt: Long = 2,
startedAt: Long = 3): Task.LaunchedEphemeral = {
import mesosphere.marathon.test.MarathonTestHelper.Implicits._
startingTask(taskId, appVersion, stagedAt)
.withStatus((status: Task.Status) =>
status.copy(
startedAt = Some(Timestamp(startedAt)),
mesosStatus = Some(statusForState(taskId.idString, mesos.Protos.TaskState.TASK_RUNNING))
)
)
}
def killedTask(
taskId: Task.Id,
appVersion: Timestamp = Timestamp(1),
stagedAt: Long = 2,
startedAt: Long = 3): Task.LaunchedEphemeral = {
startingTask(taskId, appVersion, stagedAt)
.withStatus((status: Task.Status) =>
status.copy(
condition = Condition.Killed,
startedAt = Some(Timestamp(startedAt)),
mesosStatus = Some(statusForState(taskId.idString, mesos.Protos.TaskState.TASK_KILLED))
)
)
}
def healthyTask(appId: PathId): Task.LaunchedEphemeral = healthyTask(Task.Id.forRunSpec(appId))
def healthyTask(taskId: Task.Id): Task.LaunchedEphemeral = {
import mesosphere.marathon.test.MarathonTestHelper.Implicits._
runningTask(taskId).withStatus { status =>
status.copy(mesosStatus = status.mesosStatus.map(_.toBuilder.setHealthy(true).build()))
}
}
def unhealthyTask(appId: PathId): Task.LaunchedEphemeral = unhealthyTask(Task.Id.forRunSpec(appId))
def unhealthyTask(taskId: Task.Id): Task.LaunchedEphemeral = {
import mesosphere.marathon.test.MarathonTestHelper.Implicits._
runningTask(taskId).withStatus { status =>
status.copy(mesosStatus = status.mesosStatus.map(_.toBuilder.setHealthy(false).build()))
}
}
def lostTask(id: String): MarathonTask = {
MarathonTask
.newBuilder()
.setId(id)
.setStatus(statusForState(id, mesos.Protos.TaskState.TASK_LOST))
.buildPartial()
}
}
}
| Caerostris/marathon | src/test/scala/mesosphere/marathon/core/instance/TestTaskBuilder.scala | Scala | apache-2.0 | 21,656 |
package spark.examples
import java.util.Random
import scala.math.exp
import spark.util.Vector
import spark._
/**
* Logistic regression based classification.
*/
object SparkLR {
val N = 10000 // Number of data points
val D = 10 // Numer of dimensions
val R = 0.7 // Scaling factor
val ITERATIONS = 5
val rand = new Random(42)
case class DataPoint(x: Vector, y: Double)
def generateData = {
def generatePoint(i: Int) = {
val y = if(i % 2 == 0) -1 else 1
val x = Vector(D, _ => rand.nextGaussian + y * R)
DataPoint(x, y)
}
Array.tabulate(N)(generatePoint)
}
def main(args: Array[String]) {
if (args.length == 0) {
System.err.println("Usage: SparkLR <master> [<slices>]")
System.exit(1)
}
val sc = new SparkContext(args(0), "SparkLR",
System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR")))
val numSlices = if (args.length > 1) args(1).toInt else 2
val points = sc.parallelize(generateData, numSlices).cache()
// Initialize w to a random value
var w = Vector(D, _ => 2 * rand.nextDouble - 1)
println("Initial w: " + w)
for (i <- 1 to ITERATIONS) {
println("On iteration " + i)
val gradient = points.map { p =>
(1 / (1 + exp(-p.y * (w dot p.x))) - 1) * p.y * p.x
}.reduce(_ + _)
w -= gradient
}
println("Final w: " + w)
System.exit(0)
}
}
| koeninger/spark | examples/src/main/scala/spark/examples/SparkLR.scala | Scala | bsd-3-clause | 1,411 |
/*
* Copyright (c) 2015 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package macrocompat
import scala.language.experimental.macros
import scala.reflect.macros.Context
trait MacroCompat {
val c: Context
import c.universe._
def TypeName(s: String) = newTypeName(s)
def TermName(s: String) = newTermName(s)
val termNames = nme
val typeNames = tpnme
def freshName = c.fresh
def freshName(name: String) = c.fresh(name)
def freshName[NameType <: Name](name: NameType) = c.fresh(name)
implicit def mkContextOps(c0: c.type): this.type = this
sealed trait TypecheckMode
case object TERMmode extends TypecheckMode
case object TYPEmode extends TypecheckMode
def typecheck(
tree: Tree,
mode: TypecheckMode = TERMmode,
pt: Type = WildcardType,
silent: Boolean = false,
withImplicitViewsDisabled: Boolean = false,
withMacrosDisabled: Boolean = false
): Tree =
// Spurious non exhaustive match warning ... see,
// https://issues.scala-lang.org/browse/SI-8068
(mode: @unchecked) match {
case TERMmode =>
c.typeCheck(tree, pt, silent, withImplicitViewsDisabled, withMacrosDisabled)
case TYPEmode =>
val term = q"null.asInstanceOf[$tree[Any]]"
c.typeCheck(term, pt, silent, withImplicitViewsDisabled, withMacrosDisabled)
}
implicit def mkTypeOps(tpe: Type): TypeOps = new TypeOps(tpe)
class TypeOps(tpe: Type) {
def typeParams = tpe.typeSymbol.asType.typeParams
}
def appliedType(tc: Type, ts: List[Type]): Type = c.universe.appliedType(tc, ts)
def appliedType(tc: Type, ts: Type*): Type = c.universe.appliedType(tc, ts.toList)
def showCode(t: Tree): String = show(t)
}
| adelbertc/macro-compat | core/src/main/scala_2.10/macrocompat/macrocompat.scala | Scala | apache-2.0 | 2,220 |
/*§
===========================================================================
Chronos
===========================================================================
Copyright (C) 2015-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.chronos.ast.expressions
import info.gianlucacosta.chronos.ast.{AstVisitor, Expression}
case class Condition(expression: Expression) extends Expression {
override def accept[T](visitor: AstVisitor[T]): T =
visitor.visit(this)
}
| giancosta86/Chronos | src/main/scala/info/gianlucacosta/chronos/ast/expressions/Condition.scala | Scala | apache-2.0 | 1,177 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package org.scala_tools.javautils.j2s
import java.util.Map
import scala.collection.mutable.{Map => SMutableMap}
trait JMutableMapWrapper[K, V] extends SMutableMap[K, V] with JMapWrapper[K, V] {
type Wrapped <: Map[K, V]
def -=(key: K): Unit =
underlying.remove(key)
def update(key: K, value: V): Unit =
underlying.put(key, value)
}
| jorgeortiz85/scala-javautils | src/main/scala/org/scala_tools/javautils/j2s/JMutableMapWrapper.scala | Scala | apache-2.0 | 949 |
package uk.gov.gds.ier.transaction.forces.nationality
import uk.gov.gds.ier.validation.ErrorTransformForm
import uk.gov.gds.ier.step.StepTemplate
import uk.gov.gds.ier.validation.constants.NationalityConstants
import uk.gov.gds.ier.transaction.forces.InprogressForces
trait NationalityMustache extends StepTemplate[InprogressForces] {
case class NationalityModel(
question:Question,
nationality: FieldSet,
britishOption: Field,
irishOption: Field,
hasOtherCountryOption: Field,
otherCountry: FieldSet,
otherCountries0: Field,
otherCountries1: Field,
otherCountries2: Field,
noNationalityReason: Field,
noNationalityReasonShowFlag: String
) extends MustacheData
val mustache = MustacheTemplate("forces/nationality") { (form, postUrl) =>
implicit val progressForm = form
val title = "What is your nationality?"
val nationalityReason = form(keys.nationality.noNationalityReason).value
val nationalityReasonClass = nationalityReason match {
case Some("") | None => ""
case _ => "-open"
}
NationalityModel(
question = Question(
postUrl = postUrl.url,
errorMessages = form.globalErrors.map{ _.message },
title = title
),
nationality = FieldSet(keys.nationality),
britishOption = CheckboxField(
key = keys.nationality.british,
value = "true"
),
irishOption = CheckboxField(
key = keys.nationality.irish,
value = "true"
),
hasOtherCountryOption = CheckboxField(
key = keys.nationality.hasOtherCountry,
value = "true"
),
otherCountry = FieldSet(keys.nationality.otherCountries),
otherCountries0 = TextField(keys.nationality.otherCountries.item(0)),
otherCountries1 = TextField(keys.nationality.otherCountries.item(1)),
otherCountries2 = TextField(keys.nationality.otherCountries.item(2)),
noNationalityReason = TextField(keys.nationality.noNationalityReason),
noNationalityReasonShowFlag = nationalityReasonClass
)
}
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/transaction/forces/nationality/NationalityMustache.scala | Scala | mit | 2,095 |
/*
* ProbQueryAlgorithm.scala
* Algorithms that compute conditional probabilities of queries.
*
* Created By: Avi Pfeffer ([email protected])
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm
import com.cra.figaro.language._
/**
* Algorithms that compute conditional probabilities of queries.
*
*/
trait ProbQueryAlgorithm
extends Algorithm {
val universe: Universe
/*
* @param targets List of elements that can be queried after running the algorithm.
*/
val queryTargets: Seq[Element[_]]
/*
* Particular implementations of algorithm must provide the following two methods.
*/
/**
* Return an estimate of the marginal probability distribution over the target that lists each element
* with its probability. The result is a lazy stream. It is up to the algorithm how the stream is
* ordered.
*/
def computeDistribution[T](target: Element[T]): Stream[(Double, T)]
/**
* Return an estimate of the expectation of the function under the marginal probability distribution
* of the target.
*/
def computeExpectation[T](target: Element[T], function: T => Double): Double
/**
* Return an estimate of the probability of the predicate under the marginal probability distribution
* of the target.
*/
def computeProbability[T](target: Element[T], predicate: T => Boolean): Double = {
computeExpectation(target, (t: T) => if (predicate(t)) 1.0; else 0.0)
}
/*
* The following methods are defined in either the onetime or anytime versions of this class,
* and do not need to be defined by particular algorithm implementations.
*/
protected def doDistribution[T](target: Element[T]): Stream[(Double, T)]
protected def doExpectation[T](target: Element[T], function: T => Double): Double
protected def doProbability[T](target: Element[T], predicate: T => Boolean): Double
private def check[T](target: Element[T]): Unit = {
if (!active) throw new AlgorithmInactiveException
if (!(queryTargets contains target)) throw new NotATargetException(target)
}
/**
* Return an estimate of the marginal probability distribution over the target that lists each element
* with its probability. The result is a lazy stream. It is up to the algorithm how the stream is
* ordered.
* Throws NotATargetException if called on a target that is not in the list of
* targets of the algorithm.
* Throws AlgorithmInactiveException if the algorithm is inactive.
*/
def distribution[T](target: Element[T]): Stream[(Double, T)] = {
check(target)
doDistribution(target)
}
/**
* Return an estimate of the expectation of the function under the marginal probability distribution
* of the target.
* Throws NotATargetException if called on a target that is not in the list of
* targets of the algorithm.
* Throws AlgorithmInactiveException if the algorithm is inactive.
*/
def expectation[T](target: Element[T], function: T => Double): Double = {
check(target)
doExpectation(target, function)
}
/**
* Return the mean of the probability density function for the given continuous element
*/
def mean(target: Element[Double]): Double = {
expectation(target, (d: Double) => d)
}
/**
* Return the variance of the probability density function for the given continuous element
*/
def variance(target: Element[Double]): Double = {
val m = mean(target)
val ex2 = expectation(target, (d: Double) => d * d)
ex2 - m*m
}
/**
* Return an element representing the posterior probability distribution of the given element
*/
def posteriorElement[T](target: Element[T], universe: Universe = Universe.universe): Element[T] = {
Select(distribution(target).toList:_*)("", universe)
}
/**
* Return an estimate of the probability of the predicate under the marginal probability distribution
* of the target.
* Throws NotATargetException if called on a target that is not in the list of
* targets of the algorithm.
* Throws AlgorithmInactiveException if the algorithm is inactive.
*/
def probability[T](target: Element[T], predicate: T => Boolean): Double = {
check(target)
doProbability(target, predicate)
}
/**
* Return an estimate of the probability that the target produces the value.
* Throws NotATargetException if called on a target that is not in the list of
* targets of the algorithm.
* Throws AlgorithmInactiveException if the algorithm is inactive.
*/
def probability[T](target: Element[T], value: T): Double = {
check(target)
doProbability(target, (t: T) => t == value)
}
universe.registerAlgorithm(this)
}
| bruttenberg/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/ProbQueryAlgorithm.scala | Scala | bsd-3-clause | 4,911 |
package org.scaladebugger.api.profiles.java.info.events
import com.sun.jdi._
import com.sun.jdi.event._
import org.scaladebugger.api.lowlevel.JDIArgument
import org.scaladebugger.api.lowlevel.events.JDIEventArgument
import org.scaladebugger.api.lowlevel.requests.JDIRequestArgument
import org.scaladebugger.api.profiles.traits.info._
import org.scaladebugger.api.profiles.traits.info.events._
import org.scaladebugger.api.virtualmachines.ScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
class JavaAccessWatchpointEventInfoSpec extends ParallelMockFunSpec {
private val mockScalaVirtualMachine = mock[ScalaVirtualMachine]
private val mockInfoProducer = mock[InfoProducer]
private val mockAccessWatchpointEvent = mock[AccessWatchpointEvent]
private val mockJdiRequestArguments = Seq(mock[JDIRequestArgument])
private val mockJdiEventArguments = Seq(mock[JDIEventArgument])
private val mockJdiArguments =
mockJdiRequestArguments ++ mockJdiEventArguments
private val mockContainerObjectReference = Left(mock[ObjectReference])
private val mockField = mock[Field]
private val mockVirtualMachine = mock[VirtualMachine]
private val mockThreadReference = mock[ThreadReference]
private val mockThreadReferenceType = mock[ReferenceType]
private val mockLocation = mock[Location]
private val testOffsetIndex = -1
private val javaAccessWatchpointEventInfoProfile = new JavaAccessWatchpointEventInfo(
scalaVirtualMachine = mockScalaVirtualMachine,
infoProducer = mockInfoProducer,
accessWatchpointEvent = mockAccessWatchpointEvent,
jdiArguments = mockJdiArguments
)(
_container = mockContainerObjectReference,
_field = mockField,
_virtualMachine = mockVirtualMachine,
_thread = mockThreadReference,
_threadReferenceType = mockThreadReferenceType,
_location = mockLocation
)
describe("JavaAccessWatchpointEventInfo") {
describe("#toJavaInfo") {
it("should return a new instance of the Java profile representation") {
val expected = mock[AccessWatchpointEventInfo]
// Event info producer will be generated in its Java form
val mockEventInfoProducer = mock[EventInfoProducer]
(mockInfoProducer.eventProducer _).expects()
.returning(mockEventInfoProducer).once()
(mockEventInfoProducer.toJavaInfo _).expects()
.returning(mockEventInfoProducer).once()
// Java version of event info producer creates a new event instance
// NOTE: Cannot validate second set of args because they are
// call-by-name, which ScalaMock does not support presently
(mockEventInfoProducer.newAccessWatchpointEventInfo(
_: ScalaVirtualMachine,
_: AccessWatchpointEvent,
_: Seq[JDIArgument]
)(
_: Either[ObjectReference, ReferenceType],
_: Field,
_: VirtualMachine,
_: ThreadReference,
_: ReferenceType,
_: Location
)).expects(
mockScalaVirtualMachine,
mockAccessWatchpointEvent,
mockJdiArguments,
*, *, *, *, *, *
).returning(expected).once()
val actual = javaAccessWatchpointEventInfoProfile.toJavaInfo
actual should be (expected)
}
}
describe("#isJavaInfo") {
it("should return true") {
val expected = true
val actual = javaAccessWatchpointEventInfoProfile.isJavaInfo
actual should be (expected)
}
}
describe("#toJdiInstance") {
it("should return the JDI instance this profile instance represents") {
val expected = mockAccessWatchpointEvent
val actual = javaAccessWatchpointEventInfoProfile.toJdiInstance
actual should be (expected)
}
}
}
}
| ensime/scala-debugger | scala-debugger-api/src/test/scala/org/scaladebugger/api/profiles/java/info/events/JavaAccessWatchpointEventInfoSpec.scala | Scala | apache-2.0 | 3,800 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataOutput, DataOutputStream, File,
FileOutputStream, PrintStream}
import java.lang.{Double => JDouble, Float => JFloat}
import java.net.{BindException, ServerSocket, URI}
import java.nio.{ByteBuffer, ByteOrder}
import java.nio.charset.StandardCharsets
import java.text.DecimalFormatSymbols
import java.util.Locale
import java.util.concurrent.TimeUnit
import java.util.zip.GZIPOutputStream
import scala.collection.mutable.ListBuffer
import scala.util.Random
import com.google.common.io.Files
import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.{JavaVersion, SystemUtils}
import org.apache.commons.math3.stat.inference.ChiSquareTest
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.scheduler.SparkListener
class UtilsSuite extends SparkFunSuite with ResetSystemProperties with Logging {
test("timeConversion") {
// Test -1
assert(Utils.timeStringAsSeconds("-1") === -1)
// Test zero
assert(Utils.timeStringAsSeconds("0") === 0)
assert(Utils.timeStringAsSeconds("1") === 1)
assert(Utils.timeStringAsSeconds("1s") === 1)
assert(Utils.timeStringAsSeconds("1000ms") === 1)
assert(Utils.timeStringAsSeconds("1000000us") === 1)
assert(Utils.timeStringAsSeconds("1m") === TimeUnit.MINUTES.toSeconds(1))
assert(Utils.timeStringAsSeconds("1min") === TimeUnit.MINUTES.toSeconds(1))
assert(Utils.timeStringAsSeconds("1h") === TimeUnit.HOURS.toSeconds(1))
assert(Utils.timeStringAsSeconds("1d") === TimeUnit.DAYS.toSeconds(1))
assert(Utils.timeStringAsMs("1") === 1)
assert(Utils.timeStringAsMs("1ms") === 1)
assert(Utils.timeStringAsMs("1000us") === 1)
assert(Utils.timeStringAsMs("1s") === TimeUnit.SECONDS.toMillis(1))
assert(Utils.timeStringAsMs("1m") === TimeUnit.MINUTES.toMillis(1))
assert(Utils.timeStringAsMs("1min") === TimeUnit.MINUTES.toMillis(1))
assert(Utils.timeStringAsMs("1h") === TimeUnit.HOURS.toMillis(1))
assert(Utils.timeStringAsMs("1d") === TimeUnit.DAYS.toMillis(1))
// Test invalid strings
intercept[NumberFormatException] {
Utils.timeStringAsMs("600l")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This breaks 600s")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This breaks 600ds")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("600s This breaks")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This 123s breaks")
}
}
test("Test byteString conversion") {
// Test zero
assert(Utils.byteStringAsBytes("0") === 0)
assert(Utils.byteStringAsGb("1") === 1)
assert(Utils.byteStringAsGb("1g") === 1)
assert(Utils.byteStringAsGb("1023m") === 0)
assert(Utils.byteStringAsGb("1024m") === 1)
assert(Utils.byteStringAsGb("1048575k") === 0)
assert(Utils.byteStringAsGb("1048576k") === 1)
assert(Utils.byteStringAsGb("1k") === 0)
assert(Utils.byteStringAsGb("1t") === ByteUnit.TiB.toGiB(1))
assert(Utils.byteStringAsGb("1p") === ByteUnit.PiB.toGiB(1))
assert(Utils.byteStringAsMb("1") === 1)
assert(Utils.byteStringAsMb("1m") === 1)
assert(Utils.byteStringAsMb("1048575b") === 0)
assert(Utils.byteStringAsMb("1048576b") === 1)
assert(Utils.byteStringAsMb("1023k") === 0)
assert(Utils.byteStringAsMb("1024k") === 1)
assert(Utils.byteStringAsMb("3645k") === 3)
assert(Utils.byteStringAsMb("1024gb") === 1048576)
assert(Utils.byteStringAsMb("1g") === ByteUnit.GiB.toMiB(1))
assert(Utils.byteStringAsMb("1t") === ByteUnit.TiB.toMiB(1))
assert(Utils.byteStringAsMb("1p") === ByteUnit.PiB.toMiB(1))
assert(Utils.byteStringAsKb("1") === 1)
assert(Utils.byteStringAsKb("1k") === 1)
assert(Utils.byteStringAsKb("1m") === ByteUnit.MiB.toKiB(1))
assert(Utils.byteStringAsKb("1g") === ByteUnit.GiB.toKiB(1))
assert(Utils.byteStringAsKb("1t") === ByteUnit.TiB.toKiB(1))
assert(Utils.byteStringAsKb("1p") === ByteUnit.PiB.toKiB(1))
assert(Utils.byteStringAsBytes("1") === 1)
assert(Utils.byteStringAsBytes("1k") === ByteUnit.KiB.toBytes(1))
assert(Utils.byteStringAsBytes("1m") === ByteUnit.MiB.toBytes(1))
assert(Utils.byteStringAsBytes("1g") === ByteUnit.GiB.toBytes(1))
assert(Utils.byteStringAsBytes("1t") === ByteUnit.TiB.toBytes(1))
assert(Utils.byteStringAsBytes("1p") === ByteUnit.PiB.toBytes(1))
// Overflow handling, 1073741824p exceeds Long.MAX_VALUE if converted straight to Bytes
// This demonstrates that we can have e.g 1024^3 PiB without overflowing.
assert(Utils.byteStringAsGb("1073741824p") === ByteUnit.PiB.toGiB(1073741824))
assert(Utils.byteStringAsMb("1073741824p") === ByteUnit.PiB.toMiB(1073741824))
// Run this to confirm it doesn't throw an exception
assert(Utils.byteStringAsBytes("9223372036854775807") === 9223372036854775807L)
assert(ByteUnit.PiB.toPiB(9223372036854775807L) === 9223372036854775807L)
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MAX when converted to bytes
Utils.byteStringAsBytes("9223372036854775808")
}
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MAX when converted to TiB
ByteUnit.PiB.toTiB(9223372036854775807L)
}
// Test fractional string
intercept[NumberFormatException] {
Utils.byteStringAsMb("0.064")
}
// Test fractional string
intercept[NumberFormatException] {
Utils.byteStringAsMb("0.064m")
}
// Test invalid strings
intercept[NumberFormatException] {
Utils.byteStringAsBytes("500ub")
}
// Test invalid strings
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This breaks 600b")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This breaks 600")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("600gb This breaks")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This 123mb breaks")
}
}
test("bytesToString") {
assert(Utils.bytesToString(10) === "10.0 B")
assert(Utils.bytesToString(1500) === "1500.0 B")
assert(Utils.bytesToString(2000000) === "1953.1 KiB")
assert(Utils.bytesToString(2097152) === "2.0 MiB")
assert(Utils.bytesToString(2306867) === "2.2 MiB")
assert(Utils.bytesToString(5368709120L) === "5.0 GiB")
assert(Utils.bytesToString(5L * (1L << 40)) === "5.0 TiB")
assert(Utils.bytesToString(5L * (1L << 50)) === "5.0 PiB")
assert(Utils.bytesToString(5L * (1L << 60)) === "5.0 EiB")
assert(Utils.bytesToString(BigInt(1L << 11) * (1L << 60)) === "2.36E+21 B")
}
test("copyStream") {
// input array initialization
val bytes = Array.ofDim[Byte](9000)
Random.nextBytes(bytes)
val os = new ByteArrayOutputStream()
Utils.copyStream(new ByteArrayInputStream(bytes), os)
assert(os.toByteArray.toList.equals(bytes.toList))
}
test("memoryStringToMb") {
assert(Utils.memoryStringToMb("1") === 0)
assert(Utils.memoryStringToMb("1048575") === 0)
assert(Utils.memoryStringToMb("3145728") === 3)
assert(Utils.memoryStringToMb("1024k") === 1)
assert(Utils.memoryStringToMb("5000k") === 4)
assert(Utils.memoryStringToMb("4024k") === Utils.memoryStringToMb("4024K"))
assert(Utils.memoryStringToMb("1024m") === 1024)
assert(Utils.memoryStringToMb("5000m") === 5000)
assert(Utils.memoryStringToMb("4024m") === Utils.memoryStringToMb("4024M"))
assert(Utils.memoryStringToMb("2g") === 2048)
assert(Utils.memoryStringToMb("3g") === Utils.memoryStringToMb("3G"))
assert(Utils.memoryStringToMb("2t") === 2097152)
assert(Utils.memoryStringToMb("3t") === Utils.memoryStringToMb("3T"))
}
test("splitCommandString") {
assert(Utils.splitCommandString("") === Seq())
assert(Utils.splitCommandString("a") === Seq("a"))
assert(Utils.splitCommandString("aaa") === Seq("aaa"))
assert(Utils.splitCommandString("a b c") === Seq("a", "b", "c"))
assert(Utils.splitCommandString(" a b\\t c ") === Seq("a", "b", "c"))
assert(Utils.splitCommandString("a 'b c'") === Seq("a", "b c"))
assert(Utils.splitCommandString("a 'b c' d") === Seq("a", "b c", "d"))
assert(Utils.splitCommandString("'b c'") === Seq("b c"))
assert(Utils.splitCommandString("a \\"b c\\"") === Seq("a", "b c"))
assert(Utils.splitCommandString("a \\"b c\\" d") === Seq("a", "b c", "d"))
assert(Utils.splitCommandString("\\"b c\\"") === Seq("b c"))
assert(Utils.splitCommandString("a 'b\\" c' \\"d' e\\"") === Seq("a", "b\\" c", "d' e"))
assert(Utils.splitCommandString("a\\t'b\\nc'\\nd") === Seq("a", "b\\nc", "d"))
assert(Utils.splitCommandString("a \\"b\\\\\\\\c\\"") === Seq("a", "b\\\\c"))
assert(Utils.splitCommandString("a \\"b\\\\\\"c\\"") === Seq("a", "b\\"c"))
assert(Utils.splitCommandString("a 'b\\\\\\"c'") === Seq("a", "b\\\\\\"c"))
assert(Utils.splitCommandString("'a'b") === Seq("ab"))
assert(Utils.splitCommandString("'a''b'") === Seq("ab"))
assert(Utils.splitCommandString("\\"a\\"b") === Seq("ab"))
assert(Utils.splitCommandString("\\"a\\"\\"b\\"") === Seq("ab"))
assert(Utils.splitCommandString("''") === Seq(""))
assert(Utils.splitCommandString("\\"\\"") === Seq(""))
}
test("string formatting of time durations") {
val second = 1000
val minute = second * 60
val hour = minute * 60
def str: (Long) => String = Utils.msDurationToString(_)
val sep = new DecimalFormatSymbols(Locale.US).getDecimalSeparator
assert(str(123) === "123 ms")
assert(str(second) === "1" + sep + "0 s")
assert(str(second + 462) === "1" + sep + "5 s")
assert(str(hour) === "1" + sep + "00 h")
assert(str(minute) === "1" + sep + "0 m")
assert(str(minute + 4 * second + 34) === "1" + sep + "1 m")
assert(str(10 * hour + minute + 4 * second) === "10" + sep + "02 h")
assert(str(10 * hour + 59 * minute + 59 * second + 999) === "11" + sep + "00 h")
}
def getSuffix(isCompressed: Boolean): String = {
if (isCompressed) {
".gz"
} else {
""
}
}
def writeLogFile(path: String, content: Array[Byte]): Unit = {
val outputStream = if (path.endsWith(".gz")) {
new GZIPOutputStream(new FileOutputStream(path))
} else {
new FileOutputStream(path)
}
IOUtils.write(content, outputStream)
outputStream.close()
content.size
}
private val workerConf = new SparkConf()
def testOffsetBytes(isCompressed: Boolean): Unit = {
withTempDir { tmpDir2 =>
val suffix = getSuffix(isCompressed)
val f1Path = tmpDir2 + "/f1" + suffix
writeLogFile(f1Path, "1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n".getBytes(StandardCharsets.UTF_8))
val f1Length = Utils.getFileLength(new File(f1Path), workerConf)
// Read first few bytes
assert(Utils.offsetBytes(f1Path, f1Length, 0, 5) === "1\\n2\\n3")
// Read some middle bytes
assert(Utils.offsetBytes(f1Path, f1Length, 4, 11) === "3\\n4\\n5\\n6")
// Read last few bytes
assert(Utils.offsetBytes(f1Path, f1Length, 12, 18) === "7\\n8\\n9\\n")
// Read some nonexistent bytes in the beginning
assert(Utils.offsetBytes(f1Path, f1Length, -5, 5) === "1\\n2\\n3")
// Read some nonexistent bytes at the end
assert(Utils.offsetBytes(f1Path, f1Length, 12, 22) === "7\\n8\\n9\\n")
// Read some nonexistent bytes on both ends
assert(Utils.offsetBytes(f1Path, f1Length, -3, 25) === "1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n")
}
}
test("reading offset bytes of a file") {
testOffsetBytes(isCompressed = false)
}
test("reading offset bytes of a file (compressed)") {
testOffsetBytes(isCompressed = true)
}
def testOffsetBytesMultipleFiles(isCompressed: Boolean): Unit = {
withTempDir { tmpDir =>
val suffix = getSuffix(isCompressed)
val files = (1 to 3).map(i =>
new File(tmpDir, i.toString + suffix)) :+ new File(tmpDir, "4")
writeLogFile(files(0).getAbsolutePath, "0123456789".getBytes(StandardCharsets.UTF_8))
writeLogFile(files(1).getAbsolutePath, "abcdefghij".getBytes(StandardCharsets.UTF_8))
writeLogFile(files(2).getAbsolutePath, "ABCDEFGHIJ".getBytes(StandardCharsets.UTF_8))
writeLogFile(files(3).getAbsolutePath, "9876543210".getBytes(StandardCharsets.UTF_8))
val fileLengths = files.map(Utils.getFileLength(_, workerConf))
// Read first few bytes in the 1st file
assert(Utils.offsetBytes(files, fileLengths, 0, 5) === "01234")
// Read bytes within the 1st file
assert(Utils.offsetBytes(files, fileLengths, 5, 8) === "567")
// Read bytes across 1st and 2nd file
assert(Utils.offsetBytes(files, fileLengths, 8, 18) === "89abcdefgh")
// Read bytes across 1st, 2nd and 3rd file
assert(Utils.offsetBytes(files, fileLengths, 5, 24) === "56789abcdefghijABCD")
// Read bytes across 3rd and 4th file
assert(Utils.offsetBytes(files, fileLengths, 25, 35) === "FGHIJ98765")
// Read some nonexistent bytes in the beginning
assert(Utils.offsetBytes(files, fileLengths, -5, 18) === "0123456789abcdefgh")
// Read some nonexistent bytes at the end
assert(Utils.offsetBytes(files, fileLengths, 18, 45) === "ijABCDEFGHIJ9876543210")
// Read some nonexistent bytes on both ends
assert(Utils.offsetBytes(files, fileLengths, -5, 45) ===
"0123456789abcdefghijABCDEFGHIJ9876543210")
}
}
test("reading offset bytes across multiple files") {
testOffsetBytesMultipleFiles(isCompressed = false)
}
test("reading offset bytes across multiple files (compressed)") {
testOffsetBytesMultipleFiles(isCompressed = true)
}
test("deserialize long value") {
val testval : Long = 9730889947L
val bbuf = ByteBuffer.allocate(8)
assert(bbuf.hasArray)
bbuf.order(ByteOrder.BIG_ENDIAN)
bbuf.putLong(testval)
assert(bbuf.array.length === 8)
assert(Utils.deserializeLongValue(bbuf.array) === testval)
}
test("writeByteBuffer should not change ByteBuffer position") {
// Test a buffer with an underlying array, for both writeByteBuffer methods.
val testBuffer = ByteBuffer.wrap(Array[Byte](1, 2, 3, 4))
assert(testBuffer.hasArray)
val bytesOut = new ByteBufferOutputStream(4096)
Utils.writeByteBuffer(testBuffer, bytesOut)
assert(testBuffer.position() === 0)
val dataOut = new DataOutputStream(bytesOut)
Utils.writeByteBuffer(testBuffer, dataOut: DataOutput)
assert(testBuffer.position() === 0)
// Test a buffer without an underlying array, for both writeByteBuffer methods.
val testDirectBuffer = ByteBuffer.allocateDirect(8)
assert(!testDirectBuffer.hasArray())
Utils.writeByteBuffer(testDirectBuffer, bytesOut)
assert(testDirectBuffer.position() === 0)
Utils.writeByteBuffer(testDirectBuffer, dataOut: DataOutput)
assert(testDirectBuffer.position() === 0)
}
test("get iterator size") {
val empty = Seq[Int]()
assert(Utils.getIteratorSize(empty.toIterator) === 0L)
val iterator = Iterator.range(0, 5)
assert(Utils.getIteratorSize(iterator) === 5L)
}
test("getIteratorZipWithIndex") {
val iterator = Utils.getIteratorZipWithIndex(Iterator(0, 1, 2), -1L + Int.MaxValue)
assert(iterator.toArray === Array(
(0, -1L + Int.MaxValue), (1, 0L + Int.MaxValue), (2, 1L + Int.MaxValue)
))
intercept[IllegalArgumentException] {
Utils.getIteratorZipWithIndex(Iterator(0, 1, 2), -1L)
}
}
test("doesDirectoryContainFilesNewerThan") {
// create some temporary directories and files
withTempDir { parent =>
// The parent directory has two child directories
val child1: File = Utils.createTempDir(parent.getCanonicalPath)
val child2: File = Utils.createTempDir(parent.getCanonicalPath)
val child3: File = Utils.createTempDir(child1.getCanonicalPath)
// set the last modified time of child1 to 30 secs old
child1.setLastModified(System.currentTimeMillis() - (1000 * 30))
// although child1 is old, child2 is still new so return true
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
child2.setLastModified(System.currentTimeMillis - (1000 * 30))
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
parent.setLastModified(System.currentTimeMillis - (1000 * 30))
// although parent and its immediate children are new, child3 is still old
// we expect a full recursive search for new files.
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
child3.setLastModified(System.currentTimeMillis - (1000 * 30))
assert(!Utils.doesDirectoryContainAnyNewFiles(parent, 5))
}
}
test("resolveURI") {
def assertResolves(before: String, after: String): Unit = {
// This should test only single paths
assert(before.split(",").length === 1)
def resolve(uri: String): String = Utils.resolveURI(uri).toString
assert(resolve(before) === after)
assert(resolve(after) === after)
// Repeated invocations of resolveURI should yield the same result
assert(resolve(resolve(after)) === after)
assert(resolve(resolve(resolve(after))) === after)
}
val rawCwd = System.getProperty("user.dir")
val cwd = if (Utils.isWindows) s"/$rawCwd".replace("\\\\", "/") else rawCwd
assertResolves("hdfs:/root/spark.jar", "hdfs:/root/spark.jar")
assertResolves("hdfs:///root/spark.jar#app.jar", "hdfs:///root/spark.jar#app.jar")
assertResolves("spark.jar", s"file:$cwd/spark.jar")
assertResolves("spark.jar#app.jar", s"file:$cwd/spark.jar#app.jar")
assertResolves("path to/file.txt", s"file:$cwd/path%20to/file.txt")
if (Utils.isWindows) {
assertResolves("C:\\\\path\\\\to\\\\file.txt", "file:/C:/path/to/file.txt")
assertResolves("C:\\\\path to\\\\file.txt", "file:/C:/path%20to/file.txt")
}
assertResolves("file:/C:/path/to/file.txt", "file:/C:/path/to/file.txt")
assertResolves("file:///C:/path/to/file.txt", "file:///C:/path/to/file.txt")
assertResolves("file:/C:/file.txt#alias.txt", "file:/C:/file.txt#alias.txt")
assertResolves("file:foo", "file:foo")
assertResolves("file:foo:baby", "file:foo:baby")
}
test("resolveURIs with multiple paths") {
def assertResolves(before: String, after: String): Unit = {
def resolve(uri: String): String = Utils.resolveURIs(uri)
assert(resolve(before) === after)
assert(resolve(after) === after)
// Repeated invocations of resolveURIs should yield the same result
assert(resolve(resolve(after)) === after)
assert(resolve(resolve(resolve(after))) === after)
}
val rawCwd = System.getProperty("user.dir")
val cwd = if (Utils.isWindows) s"/$rawCwd".replace("\\\\", "/") else rawCwd
assertResolves("jar1,jar2", s"file:$cwd/jar1,file:$cwd/jar2")
assertResolves("file:/jar1,file:/jar2", "file:/jar1,file:/jar2")
assertResolves("hdfs:/jar1,file:/jar2,jar3", s"hdfs:/jar1,file:/jar2,file:$cwd/jar3")
assertResolves("hdfs:/jar1,file:/jar2,jar3,jar4#jar5,path to/jar6",
s"hdfs:/jar1,file:/jar2,file:$cwd/jar3,file:$cwd/jar4#jar5,file:$cwd/path%20to/jar6")
if (Utils.isWindows) {
assertResolves("""hdfs:/jar1,file:/jar2,jar3,C:\\pi.py#py.pi,C:\\path to\\jar4""",
s"hdfs:/jar1,file:/jar2,file:$cwd/jar3,file:/C:/pi.py%23py.pi,file:/C:/path%20to/jar4")
}
assertResolves(",jar1,jar2", s"file:$cwd/jar1,file:$cwd/jar2")
// Also test resolveURIs with single paths
assertResolves("hdfs:/root/spark.jar", "hdfs:/root/spark.jar")
}
test("nonLocalPaths") {
assert(Utils.nonLocalPaths("spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("file:/spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("file:///spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("local:/spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("local:///spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/spark.jar") === Array("hdfs:/spark.jar"))
assert(Utils.nonLocalPaths("hdfs:///spark.jar") === Array("hdfs:///spark.jar"))
assert(Utils.nonLocalPaths("file:/spark.jar,local:/smart.jar,family.py") === Array.empty)
assert(Utils.nonLocalPaths("local:/spark.jar,file:/smart.jar,family.py") === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/spark.jar,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("hdfs:/spark.jar,path to/a.jar,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("hdfs:/spark.jar,s3:/smart.jar,local.py,file:/hello/pi.py") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("local.py,hdfs:/spark.jar,file:/hello/pi.py,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
// Test Windows paths
assert(Utils.nonLocalPaths("C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("file:/C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("file:///C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("local:/C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("local:///C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/a.jar,C:/my.jar,s3:/another.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
assert(Utils.nonLocalPaths("D:/your.jar,hdfs:/a.jar,s3:/another.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
assert(Utils.nonLocalPaths("hdfs:/a.jar,s3:/another.jar,e:/our.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
}
test("isBindCollision") {
// Negatives
assert(!Utils.isBindCollision(null))
assert(!Utils.isBindCollision(new Exception))
assert(!Utils.isBindCollision(new Exception(new Exception)))
assert(!Utils.isBindCollision(new Exception(new BindException)))
// Positives
val be = new BindException("Random Message")
val be1 = new Exception(new BindException("Random Message"))
val be2 = new Exception(new Exception(new BindException("Random Message")))
assert(Utils.isBindCollision(be))
assert(Utils.isBindCollision(be1))
assert(Utils.isBindCollision(be2))
// Actual bind exception
var server1: ServerSocket = null
var server2: ServerSocket = null
try {
server1 = new java.net.ServerSocket(0)
server2 = new java.net.ServerSocket(server1.getLocalPort)
} catch {
case e: Exception =>
assert(e.isInstanceOf[java.net.BindException])
assert(Utils.isBindCollision(e))
} finally {
Option(server1).foreach(_.close())
Option(server2).foreach(_.close())
}
}
// Test for using the util function to change our log levels.
test("log4j log level change") {
val current = org.apache.log4j.Logger.getRootLogger().getLevel()
try {
Utils.setLogLevel(org.apache.log4j.Level.ALL)
assert(log.isInfoEnabled())
Utils.setLogLevel(org.apache.log4j.Level.ERROR)
assert(!log.isInfoEnabled())
assert(log.isErrorEnabled())
} finally {
// Best effort at undoing changes this test made.
Utils.setLogLevel(current)
}
}
test("deleteRecursively") {
val tempDir1 = Utils.createTempDir()
assert(tempDir1.exists())
Utils.deleteRecursively(tempDir1)
assert(!tempDir1.exists())
val tempDir2 = Utils.createTempDir()
val sourceFile1 = new File(tempDir2, "foo.txt")
Files.touch(sourceFile1)
assert(sourceFile1.exists())
Utils.deleteRecursively(sourceFile1)
assert(!sourceFile1.exists())
val tempDir3 = new File(tempDir2, "subdir")
assert(tempDir3.mkdir())
val sourceFile2 = new File(tempDir3, "bar.txt")
Files.touch(sourceFile2)
assert(sourceFile2.exists())
Utils.deleteRecursively(tempDir2)
assert(!tempDir2.exists())
assert(!tempDir3.exists())
assert(!sourceFile2.exists())
}
test("loading properties from file") {
withTempDir { tmpDir =>
val outFile = File.createTempFile("test-load-spark-properties", "test", tmpDir)
System.setProperty("spark.test.fileNameLoadB", "2")
Files.write("spark.test.fileNameLoadA true\\n" +
"spark.test.fileNameLoadB 1\\n", outFile, StandardCharsets.UTF_8)
val properties = Utils.getPropertiesFromFile(outFile.getAbsolutePath)
properties
.filter { case (k, v) => k.startsWith("spark.")}
.foreach { case (k, v) => sys.props.getOrElseUpdate(k, v)}
val sparkConf = new SparkConf
assert(sparkConf.getBoolean("spark.test.fileNameLoadA", false))
assert(sparkConf.getInt("spark.test.fileNameLoadB", 1) === 2)
}
}
test("timeIt with prepare") {
var cnt = 0
val prepare = () => {
cnt += 1
Thread.sleep(1000)
}
val time = Utils.timeIt(2)({}, Some(prepare))
require(cnt === 2, "prepare should be called twice")
require(time < TimeUnit.MILLISECONDS.toNanos(500), "preparation time should not count")
}
test("fetch hcfs dir") {
withTempDir { tempDir =>
val sourceDir = new File(tempDir, "source-dir")
sourceDir.mkdir()
val innerSourceDir = Utils.createTempDir(root = sourceDir.getPath)
val sourceFile = File.createTempFile("someprefix", "somesuffix", innerSourceDir)
val targetDir = new File(tempDir, "target-dir")
Files.write("some text", sourceFile, StandardCharsets.UTF_8)
val path =
if (Utils.isWindows) {
new Path("file:/" + sourceDir.getAbsolutePath.replace("\\\\", "/"))
} else {
new Path("file://" + sourceDir.getAbsolutePath)
}
val conf = new Configuration()
val fs = Utils.getHadoopFileSystem(path.toString, conf)
assert(!targetDir.isDirectory())
Utils.fetchHcfsFile(path, targetDir, fs, new SparkConf(), conf, false)
assert(targetDir.isDirectory())
// Copy again to make sure it doesn't error if the dir already exists.
Utils.fetchHcfsFile(path, targetDir, fs, new SparkConf(), conf, false)
val destDir = new File(targetDir, sourceDir.getName())
assert(destDir.isDirectory())
val destInnerDir = new File(destDir, innerSourceDir.getName)
assert(destInnerDir.isDirectory())
val destInnerFile = new File(destInnerDir, sourceFile.getName)
assert(destInnerFile.isFile())
val filePath =
if (Utils.isWindows) {
new Path("file:/" + sourceFile.getAbsolutePath.replace("\\\\", "/"))
} else {
new Path("file://" + sourceFile.getAbsolutePath)
}
val testFileDir = new File(tempDir, "test-filename")
val testFileName = "testFName"
val testFilefs = Utils.getHadoopFileSystem(filePath.toString, conf)
Utils.fetchHcfsFile(filePath, testFileDir, testFilefs, new SparkConf(),
conf, false, Some(testFileName))
val newFileName = new File(testFileDir, testFileName)
assert(newFileName.isFile())
}
}
test("shutdown hook manager") {
val manager = new SparkShutdownHookManager()
val output = new ListBuffer[Int]()
val hook1 = manager.add(1, () => output += 1)
manager.add(3, () => output += 3)
manager.add(2, () => output += 2)
manager.add(4, () => output += 4)
manager.remove(hook1)
manager.runAll()
assert(output.toList === List(4, 3, 2))
}
test("isInDirectory") {
val tmpDir = new File(sys.props("java.io.tmpdir"))
val parentDir = new File(tmpDir, "parent-dir")
val childDir1 = new File(parentDir, "child-dir-1")
val childDir1b = new File(parentDir, "child-dir-1b")
val childFile1 = new File(parentDir, "child-file-1.txt")
val childDir2 = new File(childDir1, "child-dir-2")
val childDir2b = new File(childDir1, "child-dir-2b")
val childFile2 = new File(childDir1, "child-file-2.txt")
val childFile3 = new File(childDir2, "child-file-3.txt")
val nullFile: File = null
parentDir.mkdir()
childDir1.mkdir()
childDir1b.mkdir()
childDir2.mkdir()
childDir2b.mkdir()
childFile1.createNewFile()
childFile2.createNewFile()
childFile3.createNewFile()
// Identity
assert(Utils.isInDirectory(parentDir, parentDir))
assert(Utils.isInDirectory(childDir1, childDir1))
assert(Utils.isInDirectory(childDir2, childDir2))
// Valid ancestor-descendant pairs
assert(Utils.isInDirectory(parentDir, childDir1))
assert(Utils.isInDirectory(parentDir, childFile1))
assert(Utils.isInDirectory(parentDir, childDir2))
assert(Utils.isInDirectory(parentDir, childFile2))
assert(Utils.isInDirectory(parentDir, childFile3))
assert(Utils.isInDirectory(childDir1, childDir2))
assert(Utils.isInDirectory(childDir1, childFile2))
assert(Utils.isInDirectory(childDir1, childFile3))
assert(Utils.isInDirectory(childDir2, childFile3))
// Inverted ancestor-descendant pairs should fail
assert(!Utils.isInDirectory(childDir1, parentDir))
assert(!Utils.isInDirectory(childDir2, parentDir))
assert(!Utils.isInDirectory(childDir2, childDir1))
assert(!Utils.isInDirectory(childFile1, parentDir))
assert(!Utils.isInDirectory(childFile2, parentDir))
assert(!Utils.isInDirectory(childFile3, parentDir))
assert(!Utils.isInDirectory(childFile2, childDir1))
assert(!Utils.isInDirectory(childFile3, childDir1))
assert(!Utils.isInDirectory(childFile3, childDir2))
// Non-existent files or directories should fail
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one.txt")))
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one/two.txt")))
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one/two/three.txt")))
// Siblings should fail
assert(!Utils.isInDirectory(childDir1, childDir1b))
assert(!Utils.isInDirectory(childDir1, childFile1))
assert(!Utils.isInDirectory(childDir2, childDir2b))
assert(!Utils.isInDirectory(childDir2, childFile2))
// Null files should fail without throwing NPE
assert(!Utils.isInDirectory(parentDir, nullFile))
assert(!Utils.isInDirectory(childFile3, nullFile))
assert(!Utils.isInDirectory(nullFile, parentDir))
assert(!Utils.isInDirectory(nullFile, childFile3))
}
test("circular buffer: if nothing was written to the buffer, display nothing") {
val buffer = new CircularBuffer(4)
assert(buffer.toString === "")
}
test("circular buffer: if the buffer isn't full, print only the contents written") {
val buffer = new CircularBuffer(10)
val stream = new PrintStream(buffer, true, "UTF-8")
stream.print("test")
assert(buffer.toString === "test")
}
test("circular buffer: data written == size of the buffer") {
val buffer = new CircularBuffer(4)
val stream = new PrintStream(buffer, true, "UTF-8")
// fill the buffer to its exact size so that it just hits overflow
stream.print("test")
assert(buffer.toString === "test")
// add more data to the buffer
stream.print("12")
assert(buffer.toString === "st12")
}
test("circular buffer: multiple overflow") {
val buffer = new CircularBuffer(25)
val stream = new PrintStream(buffer, true, "UTF-8")
stream.print("test circular test circular test circular test circular test circular")
assert(buffer.toString === "st circular test circular")
}
test("nanSafeCompareDoubles") {
def shouldMatchDefaultOrder(a: Double, b: Double): Unit = {
assert(Utils.nanSafeCompareDoubles(a, b) === JDouble.compare(a, b))
assert(Utils.nanSafeCompareDoubles(b, a) === JDouble.compare(b, a))
}
shouldMatchDefaultOrder(0d, 0d)
shouldMatchDefaultOrder(0d, 1d)
shouldMatchDefaultOrder(Double.MinValue, Double.MaxValue)
assert(Utils.nanSafeCompareDoubles(Double.NaN, Double.NaN) === 0)
assert(Utils.nanSafeCompareDoubles(Double.NaN, Double.PositiveInfinity) === 1)
assert(Utils.nanSafeCompareDoubles(Double.NaN, Double.NegativeInfinity) === 1)
assert(Utils.nanSafeCompareDoubles(Double.PositiveInfinity, Double.NaN) === -1)
assert(Utils.nanSafeCompareDoubles(Double.NegativeInfinity, Double.NaN) === -1)
}
test("nanSafeCompareFloats") {
def shouldMatchDefaultOrder(a: Float, b: Float): Unit = {
assert(Utils.nanSafeCompareFloats(a, b) === JFloat.compare(a, b))
assert(Utils.nanSafeCompareFloats(b, a) === JFloat.compare(b, a))
}
shouldMatchDefaultOrder(0f, 0f)
shouldMatchDefaultOrder(1f, 1f)
shouldMatchDefaultOrder(Float.MinValue, Float.MaxValue)
assert(Utils.nanSafeCompareFloats(Float.NaN, Float.NaN) === 0)
assert(Utils.nanSafeCompareFloats(Float.NaN, Float.PositiveInfinity) === 1)
assert(Utils.nanSafeCompareFloats(Float.NaN, Float.NegativeInfinity) === 1)
assert(Utils.nanSafeCompareFloats(Float.PositiveInfinity, Float.NaN) === -1)
assert(Utils.nanSafeCompareFloats(Float.NegativeInfinity, Float.NaN) === -1)
}
test("isDynamicAllocationEnabled") {
val conf = new SparkConf()
conf.set("spark.master", "yarn")
conf.set(SUBMIT_DEPLOY_MODE, "client")
assert(Utils.isDynamicAllocationEnabled(conf) === false)
assert(Utils.isDynamicAllocationEnabled(
conf.set(DYN_ALLOCATION_ENABLED, false)) === false)
assert(Utils.isDynamicAllocationEnabled(
conf.set(DYN_ALLOCATION_ENABLED, true)))
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.executor.instances", "1")))
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.executor.instances", "0")))
assert(Utils.isDynamicAllocationEnabled(conf.set("spark.master", "local")) === false)
assert(Utils.isDynamicAllocationEnabled(conf.set(DYN_ALLOCATION_TESTING, true)))
}
test("getDynamicAllocationInitialExecutors") {
val conf = new SparkConf()
assert(Utils.getDynamicAllocationInitialExecutors(conf) === 0)
assert(Utils.getDynamicAllocationInitialExecutors(
conf.set(DYN_ALLOCATION_MIN_EXECUTORS, 3)) === 3)
assert(Utils.getDynamicAllocationInitialExecutors( // should use minExecutors
conf.set("spark.executor.instances", "2")) === 3)
assert(Utils.getDynamicAllocationInitialExecutors( // should use executor.instances
conf.set("spark.executor.instances", "4")) === 4)
assert(Utils.getDynamicAllocationInitialExecutors( // should use executor.instances
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 3)) === 4)
assert(Utils.getDynamicAllocationInitialExecutors( // should use initialExecutors
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 5)) === 5)
assert(Utils.getDynamicAllocationInitialExecutors( // should use minExecutors
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 2)
.set("spark.executor.instances", "1")) === 3)
}
test("Set Spark CallerContext") {
val context = "test"
new CallerContext(context).setCurrentContext()
if (CallerContext.callerContextSupported) {
val callerContext = Utils.classForName("org.apache.hadoop.ipc.CallerContext")
assert(s"SPARK_$context" ===
callerContext.getMethod("getCurrent").invoke(null).toString)
}
}
test("encodeFileNameToURIRawPath") {
assert(Utils.encodeFileNameToURIRawPath("abc") === "abc")
assert(Utils.encodeFileNameToURIRawPath("abc xyz") === "abc%20xyz")
assert(Utils.encodeFileNameToURIRawPath("abc:xyz") === "abc:xyz")
}
test("decodeFileNameInURI") {
assert(Utils.decodeFileNameInURI(new URI("files:///abc/xyz")) === "xyz")
assert(Utils.decodeFileNameInURI(new URI("files:///abc")) === "abc")
assert(Utils.decodeFileNameInURI(new URI("files:///abc%20xyz")) === "abc xyz")
}
test("Kill process") {
// Verify that we can terminate a process even if it is in a bad state. This is only run
// on UNIX since it does some OS specific things to verify the correct behavior.
if (SystemUtils.IS_OS_UNIX) {
def getPid(p: Process): Int = {
val f = p.getClass().getDeclaredField("pid")
f.setAccessible(true)
f.get(p).asInstanceOf[Int]
}
def pidExists(pid: Int): Boolean = {
val p = Runtime.getRuntime.exec(s"kill -0 $pid")
p.waitFor()
p.exitValue() == 0
}
def signal(pid: Int, s: String): Unit = {
val p = Runtime.getRuntime.exec(s"kill -$s $pid")
p.waitFor()
}
// Start up a process that runs 'sleep 10'. Terminate the process and assert it takes
// less time and the process is no longer there.
val startTimeNs = System.nanoTime()
val process = new ProcessBuilder("sleep", "10").start()
val pid = getPid(process)
try {
assert(pidExists(pid))
val terminated = Utils.terminateProcess(process, 5000)
assert(terminated.isDefined)
process.waitFor(5, TimeUnit.SECONDS)
val durationNs = System.nanoTime() - startTimeNs
assert(durationNs < TimeUnit.SECONDS.toNanos(5))
assert(!pidExists(pid))
} finally {
// Forcibly kill the test process just in case.
signal(pid, "SIGKILL")
}
if (SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_1_8)) {
// We'll make sure that forcibly terminating a process works by
// creating a very misbehaving process. It ignores SIGTERM and has been SIGSTOPed. On
// older versions of java, this will *not* terminate.
val file = File.createTempFile("temp-file-name", ".tmp")
file.deleteOnExit()
val cmd =
s"""
|#!/bin/bash
|trap "" SIGTERM
|sleep 10
""".stripMargin
Files.write(cmd.getBytes(StandardCharsets.UTF_8), file)
file.getAbsoluteFile.setExecutable(true)
val process = new ProcessBuilder(file.getAbsolutePath).start()
val pid = getPid(process)
assert(pidExists(pid))
try {
signal(pid, "SIGSTOP")
val startNs = System.nanoTime()
val terminated = Utils.terminateProcess(process, 5000)
assert(terminated.isDefined)
process.waitFor(5, TimeUnit.SECONDS)
val duration = System.nanoTime() - startNs
// add a little extra time to allow a force kill to finish
assert(duration < TimeUnit.SECONDS.toNanos(6))
assert(!pidExists(pid))
} finally {
signal(pid, "SIGKILL")
}
}
}
}
test("chi square test of randomizeInPlace") {
// Parameters
val arraySize = 10
val numTrials = 1000
val threshold = 0.05
val seed = 1L
// results(i)(j): how many times Utils.randomize moves an element from position j to position i
val results = Array.ofDim[Long](arraySize, arraySize)
// This must be seeded because even a fair random process will fail this test with
// probability equal to the value of `threshold`, which is inconvenient for a unit test.
val rand = new java.util.Random(seed)
val range = 0 until arraySize
for {
_ <- 0 until numTrials
trial = Utils.randomizeInPlace(range.toArray, rand)
i <- range
} results(i)(trial(i)) += 1L
val chi = new ChiSquareTest()
// We expect an even distribution; this array will be rescaled by `chiSquareTest`
val expected = Array.fill(arraySize * arraySize)(1.0)
val observed = results.flatten
// Performs Pearson's chi-squared test. Using the sum-of-squares as the test statistic, gives
// the probability of a uniform distribution producing results as extreme as `observed`
val pValue = chi.chiSquareTest(expected, observed)
assert(pValue > threshold)
}
test("redact sensitive information") {
val sparkConf = new SparkConf
// Set some secret keys
val secretKeys = Seq(
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD",
"spark.my.password",
"spark.my.sECreT")
secretKeys.foreach { key => sparkConf.set(key, "sensitive_value") }
// Set a non-secret key
sparkConf.set("spark.regular.property", "regular_value")
// Set a property with a regular key but secret in the value
sparkConf.set("spark.sensitive.property", "has_secret_in_value")
// Redact sensitive information
val redactedConf = Utils.redact(sparkConf, sparkConf.getAll).toMap
// Assert that secret information got redacted while the regular property remained the same
secretKeys.foreach { key => assert(redactedConf(key) === Utils.REDACTION_REPLACEMENT_TEXT) }
assert(redactedConf("spark.regular.property") === "regular_value")
assert(redactedConf("spark.sensitive.property") === Utils.REDACTION_REPLACEMENT_TEXT)
}
test("redact sensitive information in command line args") {
val sparkConf = new SparkConf
// Set some secret keys
val secretKeysWithSameValue = Seq(
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD",
"spark.my.password",
"spark.my.sECreT")
val cmdArgsForSecretWithSameValue = secretKeysWithSameValue.map(s => s"-D$s=sensitive_value")
val secretKeys = secretKeysWithSameValue ++ Seq("spark.your.password")
val cmdArgsForSecret = cmdArgsForSecretWithSameValue ++ Seq(
// Have '=' twice
"-Dspark.your.password=sensitive=sensitive2"
)
val ignoredArgs = Seq(
// starts with -D but no assignment
"-Ddummy",
// secret value contained not starting with -D (we don't care about this case for now)
"spark.my.password=sensitive_value",
// edge case: not started with -D, but matched pattern after first '-'
"--Dspark.my.password=sensitive_value")
val cmdArgs = cmdArgsForSecret ++ ignoredArgs ++ Seq(
// Set a non-secret key
"-Dspark.regular.property=regular_value",
// Set a property with a regular key but secret in the value
"-Dspark.sensitive.property=has_secret_in_value")
// Redact sensitive information
val redactedCmdArgs = Utils.redactCommandLineArgs(sparkConf, cmdArgs)
// These arguments should be left as they were:
// 1) argument without -D option is not applied
// 2) -D option without key-value assignment is not applied
assert(ignoredArgs.forall(redactedCmdArgs.contains))
val redactedCmdArgMap = redactedCmdArgs.filterNot(ignoredArgs.contains).map { cmd =>
val keyValue = cmd.substring("-D".length).split("=")
keyValue(0) -> keyValue.tail.mkString("=")
}.toMap
// Assert that secret information got redacted while the regular property remained the same
secretKeys.foreach { key =>
assert(redactedCmdArgMap(key) === Utils.REDACTION_REPLACEMENT_TEXT)
}
assert(redactedCmdArgMap("spark.regular.property") === "regular_value")
assert(redactedCmdArgMap("spark.sensitive.property") === Utils.REDACTION_REPLACEMENT_TEXT)
}
test("tryWithSafeFinally") {
var e = new Error("Block0")
val finallyBlockError = new Error("Finally Block")
var isErrorOccurred = false
// if the try and finally blocks throw different exception instances
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = { throw finallyBlockError })
} catch {
case t: Error =>
assert(t.getSuppressed.head == finallyBlockError)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try and finally blocks throw the same exception instance then it should not
// try to add to suppressed and get IllegalArgumentException
e = new Error("Block1")
isErrorOccurred = false
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = { throw e })
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try throws the exception and finally doesn't throw exception
e = new Error("Block2")
isErrorOccurred = false
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = {})
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try and finally block don't throw exception
Utils.tryWithSafeFinally {}(finallyBlock = {})
}
test("tryWithSafeFinallyAndFailureCallbacks") {
var e = new Error("Block0")
val catchBlockError = new Error("Catch Block")
val finallyBlockError = new Error("Finally Block")
var isErrorOccurred = false
TaskContext.setTaskContext(TaskContext.empty())
// if the try, catch and finally blocks throw different exception instances
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(
catchBlock = { throw catchBlockError }, finallyBlock = { throw finallyBlockError })
} catch {
case t: Error =>
assert(t.getSuppressed.head == catchBlockError)
assert(t.getSuppressed.last == finallyBlockError)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try, catch and finally blocks throw the same exception instance then it should not
// try to add to suppressed and get IllegalArgumentException
e = new Error("Block1")
isErrorOccurred = false
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(catchBlock = { throw e },
finallyBlock = { throw e })
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try throws the exception, catch and finally don't throw exceptions
e = new Error("Block2")
isErrorOccurred = false
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(catchBlock = {}, finallyBlock = {})
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try, catch and finally blocks don't throw exceptions
Utils.tryWithSafeFinallyAndFailureCallbacks {}(catchBlock = {}, finallyBlock = {})
TaskContext.unset
}
test("load extensions") {
val extensions = Seq(
classOf[SimpleExtension],
classOf[ExtensionWithConf],
classOf[UnregisterableExtension]).map(_.getName())
val conf = new SparkConf(false)
val instances = Utils.loadExtensions(classOf[Object], extensions, conf)
assert(instances.size === 2)
assert(instances.count(_.isInstanceOf[SimpleExtension]) === 1)
val extWithConf = instances.find(_.isInstanceOf[ExtensionWithConf])
.map(_.asInstanceOf[ExtensionWithConf])
.get
assert(extWithConf.conf eq conf)
class NestedExtension { }
val invalid = Seq(classOf[NestedExtension].getName())
intercept[SparkException] {
Utils.loadExtensions(classOf[Object], invalid, conf)
}
val error = Seq(classOf[ExtensionWithError].getName())
intercept[IllegalArgumentException] {
Utils.loadExtensions(classOf[Object], error, conf)
}
val wrongType = Seq(classOf[ListenerImpl].getName())
intercept[IllegalArgumentException] {
Utils.loadExtensions(classOf[Seq[_]], wrongType, conf)
}
}
test("check Kubernetes master URL") {
val k8sMasterURLHttps = Utils.checkAndGetK8sMasterUrl("k8s://https://host:port")
assert(k8sMasterURLHttps === "k8s://https://host:port")
val k8sMasterURLHttp = Utils.checkAndGetK8sMasterUrl("k8s://http://host:port")
assert(k8sMasterURLHttp === "k8s://http://host:port")
val k8sMasterURLWithoutScheme = Utils.checkAndGetK8sMasterUrl("k8s://127.0.0.1:8443")
assert(k8sMasterURLWithoutScheme === "k8s://https://127.0.0.1:8443")
val k8sMasterURLWithoutScheme2 = Utils.checkAndGetK8sMasterUrl("k8s://127.0.0.1")
assert(k8sMasterURLWithoutScheme2 === "k8s://https://127.0.0.1")
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s:https://host:port")
}
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s://foo://host:port")
}
}
test("stringHalfWidth") {
// scalastyle:off nonascii
assert(Utils.stringHalfWidth(null) == 0)
assert(Utils.stringHalfWidth("") == 0)
assert(Utils.stringHalfWidth("ab c") == 4)
assert(Utils.stringHalfWidth("1098") == 4)
assert(Utils.stringHalfWidth("mø") == 2)
assert(Utils.stringHalfWidth("γύρ") == 3)
assert(Utils.stringHalfWidth("pê") == 2)
assert(Utils.stringHalfWidth("ー") == 2)
assert(Utils.stringHalfWidth("测") == 2)
assert(Utils.stringHalfWidth("か") == 2)
assert(Utils.stringHalfWidth("걸") == 2)
assert(Utils.stringHalfWidth("à") == 1)
assert(Utils.stringHalfWidth("焼") == 2)
assert(Utils.stringHalfWidth("羍む") == 4)
assert(Utils.stringHalfWidth("뺭ᾘ") == 3)
assert(Utils.stringHalfWidth("\\u0967\\u0968\\u0969") == 3)
// scalastyle:on nonascii
}
test("trimExceptCRLF standalone") {
val crlfSet = Set("\\r", "\\n")
val nonPrintableButCRLF = (0 to 32).map(_.toChar.toString).toSet -- crlfSet
// identity for CRLF
crlfSet.foreach { s => Utils.trimExceptCRLF(s) === s }
// empty for other non-printables
nonPrintableButCRLF.foreach { s => assert(Utils.trimExceptCRLF(s) === "") }
// identity for a printable string
assert(Utils.trimExceptCRLF("a") === "a")
// identity for strings with CRLF
crlfSet.foreach { s =>
assert(Utils.trimExceptCRLF(s"${s}a") === s"${s}a")
assert(Utils.trimExceptCRLF(s"a${s}") === s"a${s}")
assert(Utils.trimExceptCRLF(s"b${s}b") === s"b${s}b")
}
// trim nonPrintableButCRLF except when inside a string
nonPrintableButCRLF.foreach { s =>
assert(Utils.trimExceptCRLF(s"${s}a") === "a")
assert(Utils.trimExceptCRLF(s"a${s}") === "a")
assert(Utils.trimExceptCRLF(s"b${s}b") === s"b${s}b")
}
}
}
private class SimpleExtension
private class ExtensionWithConf(val conf: SparkConf)
private class UnregisterableExtension {
throw new UnsupportedOperationException()
}
private class ExtensionWithError {
throw new IllegalArgumentException()
}
private class ListenerImpl extends SparkListener
| yanboliang/spark | core/src/test/scala/org/apache/spark/util/UtilsSuite.scala | Scala | apache-2.0 | 51,198 |
package aia.testdriven
import akka.testkit.{ CallingThreadDispatcher, EventFilter, TestKit }
import akka.actor.{ Props, ActorSystem }
import com.typesafe.config.ConfigFactory
import org.scalatest.WordSpecLike
import Greeter01Test._
class Greeter01Test extends TestKit(testSystem)
with WordSpecLike
with StopSystemAfterAll {
"The Greeter" must {
"say Hello World! when a Greeting(\\"World\\") is sent to it" in {
val dispatcherId = CallingThreadDispatcher.Id
val props = Props[Greeter].withDispatcher(dispatcherId)
val greeter = system.actorOf(props)
EventFilter.info(message = "Hello World!",
occurrences = 1).intercept {
greeter ! Greeting("World")
}
}
}
}
object Greeter01Test {
val testSystem = {
val config = ConfigFactory.parseString(
"""
akka.loggers = [akka.testkit.TestEventListener]
""")
ActorSystem("testsystem", config)
}
}
| RayRoestenburg/akka-in-action | chapter-testdriven/src/test/scala/aia/testdriven/Greeter01Test.scala | Scala | mit | 939 |
/*
* Copyright (c) 2013.
* Tinkoff Credit Systems.
* All rights reserved.
*/
package ru.tcsbank.utils
import java.sql.{Timestamp, Connection, ResultSet}
import scala.collection.{immutable, mutable}
package object database {
// Simple notion of the connection pool. `Database` needn't to know more.
trait ConnectionPool {
def getConnection: Connection
}
/*
SQL string interpolation (forked from https://gist.github.com/mnesarco/4515475)
*/
case class BoundSql(sql: String, parameters: Seq[_]) {
// Concatenation of the sql statements
def +(another: BoundSql) = BoundSql(this.sql + another.sql, this.parameters ++ another.parameters)
// and the statement with a `String`
def +(anotherSql: String) = BoundSql(this.sql + anotherSql, this.parameters)
// concatenates the parameters' sequences
}
// Implicit conversion defining custom `String` interpolator sql"select ..."
implicit class SqlStringContext(val sc: StringContext) extends AnyVal {
def sql(args : Any*) = BoundSql(sc.parts.mkString("?"), args.toSeq)
}
// Out-parameters are also supported, it is possible to call procedures like,
//
// val Seq(output: String) = database execute call (
// sql"""begin
// our_package.my_procedure('input', ${out(java.sql.Types.VARCHAR)})
// end;""")
//
// @see tests for an actual usage example.
//
case class OutParam(valueType: Int)
def out(valueType: Int) = OutParam(valueType)
// Charges database operation (Connection => T) with a powerful method(s)
implicit class ChargedSqlOperation[T](operation: Connection => T) {
// Composition on the database operations (having composition itself is not little)
def ~[T2](anotherOperation: Connection => T2): Connection => T2 = { connection: Connection =>
operation(connection)
anotherOperation(connection)
}
}
/*
ResultSet extractor methods
Transforming all `ResultSet` records to the single nice Scala object.
Nicely used with `Database.query`, @see test for examples.
*/
// Creates native Scala `Option`
def toOption[V](valueReader: ResultSet => V) = { resultSet: ResultSet =>
Iterator.continually(resultSet).takeWhile(_.next).find(rs => true).map(valueReader)
}
// Creates native Scala `List` from `ResultSet` with list values creating-closure
def toList[V](valueReader: ResultSet => V) = { resultSet: ResultSet =>
Iterator.continually(resultSet).takeWhile(_.next).map(valueReader).toList
}
// Creates native Scala `Set` from `ResultSet` with set values creating-closure
def toSet[V](valueReader: ResultSet => V) = { resultSet: ResultSet =>
Iterator.continually(resultSet).takeWhile(_.next).map(valueReader).toSet
}
// Creates native Scala `Map` from `ResultSet` with key and value creating-closures
def toMap[K,V](keyReader: ResultSet => K, valueReader: ResultSet => V) = { resultSet: ResultSet =>
Iterator.continually(resultSet).takeWhile(_.next).map(rs => keyReader(rs) -> valueReader(rs)).toMap
}
// Creates `Map` with `List` of multiple values from the `ResultSet`
def toListMultiMap[K,V](keyReader: ResultSet => K, valueReader: ResultSet => V) = { resultSet: ResultSet =>
val m = mutable.Map.empty[K, mutable.Builder[V,List[V]]]
for (rs <- Iterator.continually(resultSet).takeWhile(_.next)) {
val key = keyReader(rs)
val builder = m.getOrElseUpdate(key, immutable.List.newBuilder[V])
builder += valueReader(rs)
}
val b = immutable.Map.newBuilder[K, List[V]]
for ((k, v) <- m)
b += ((k, v.result()))
b.result()
}
// Creates `Map` with `Set` of multiple values from the `ResultSet`
def toSetMultiMap[K,V](keyReader: ResultSet => K, valueReader: ResultSet => V) = { resultSet: ResultSet =>
val m = mutable.Map.empty[K, mutable.Builder[V,Set[V]]]
for (rs <- Iterator.continually(resultSet).takeWhile(_.next)) {
val key = keyReader(rs)
val builder = m.getOrElseUpdate(key, immutable.Set.newBuilder[V])
builder += valueReader(rs)
}
val b = immutable.Map.newBuilder[K, Set[V]]
for ((k, v) <- m)
b += ((k, v.result()))
b.result()
}
// Creates `Map` with `Map` of multiple key-value pairs from the `ResultSet`
def toMapMultiMap[K,K2,V](keyReader: ResultSet => K, valueReader: ResultSet => (K2,V)) = { resultSet: ResultSet =>
val m = mutable.Map.empty[K, mutable.Builder[(K2,V),Map[K2,V]]]
for (rs <- Iterator.continually(resultSet).takeWhile(_.next)) {
val key = keyReader(rs)
val builder = m.getOrElseUpdate(key, immutable.Map.newBuilder[K2,V])
builder += valueReader(rs)
}
val b = immutable.Map.newBuilder[K, Map[K2,V]]
for ((k, v) <- m)
b += ((k, v.result()))
b.result()
}
/*
ResultSet Extensions
Allows to fetch native Scala types out of the `java.sql.ResultSet`.
*/
implicit class ExtendedResultSet(resultSet: ResultSet) {
/*
Reading id from result set
val id: Id[Type] = resultSet.getId("id_column")
It is possible to get Option[Id[T]] whether id existence is not guarantied
(e.g. when id belongs to the foreign table).
*/
def getId[T](i: Int): Id[T] = {
Option(resultSet.getString(i)).map(id => Id[T](id)).getOrElse(null)
}
def getId[T](column: String): Id[T] = {
Option(resultSet.getString(column)).map(id => Id[T](id)).getOrElse(null)
}
/*
Reading `scala.Option` out of the `java.sql.ResultSet`.
*/
def getOption[T](fetcher: ResultSet => T): Option[T] = {
val value = fetcher(resultSet)
if(resultSet.wasNull) None else Option(value)
}
def getOptionalId[T](i: Int): Option[Id[T]] = getOption(rs => Id[T](rs.getString(i)))
def getOptionalId[T](column: String): Option[Id[T]] = getOption(rs => Id[T](rs.getString(column)))
def getOptionalString(i: Int): Option[String] = getOption(_.getString(i))
def getOptionalString(column: String): Option[String] = getOption(_.getString(column))
/*
Getting numbers
This methods allow to get options instead of null
to handle the absence of values properly.
*/
def getOptionalLong(i: Int): Option[Long] = getOption(_.getLong(i))
def getOptionalLong(column: String): Option[Long] = getOption(_.getLong(column))
def getOptionalInt(i: Int): Option[Int] = getOption(_.getInt(i))
def getOptionalInt(column: String): Option[Int] = getOption(_.getInt(column))
def getOptionalBigDecimal(i: Int): Option[BigDecimal] = getOption(_.getBigDecimal(i))
def getOptionalBigDecimal(column: String): Option[BigDecimal] = getOption(_.getBigDecimal(column))
def getOptionalDouble(i: Int): Option[Double] = getOption(_.getDouble(i))
def getOptionalDouble(column: String): Option[Double] = getOption(_.getDouble(column))
/*
Dealing with time
*/
def getTime(i: Int): Time = wrapTimestamp(resultSet.getTimestamp(i))
def getTime(column: String): Time = wrapTimestamp(resultSet.getTimestamp(column))
def getOptionalTime(column: String) = getOption(rs => wrapTimestamp(rs.getTimestamp(column)))
def getOptionalTime(i: Int) = getOption(rs => wrapTimestamp(rs.getTimestamp(i)))
private def wrapTimestamp(timestamp: Timestamp): Time = {
if(timestamp == null) null
else Time(timestamp.getTime)
}
/* Getting `Class`
val objectClass: Class[Type] = resultSet.getClass("class_column")
*/
def getClass[T](i: Int): Class[T] = wrapClass(resultSet.getString(i))
def getClass[T](column: String): Class[T] = wrapClass(resultSet.getString(column))
private def wrapClass[T](className: String): Class[T] = {
if(className == null) null
else Class.forName(className).asInstanceOf[Class[T]]
}
}
}
| TinkoffCreditSystems/ScalaDatabase | src/main/scala/ru/tcsbank/utils/database/package.scala | Scala | mit | 7,817 |
package scredis.commands
import scredis.io.NonBlockingConnection
import scredis.protocol.Decoder
import scredis.protocol.requests.ScriptingRequests._
import scredis.serialization.{ Reader, Writer }
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* This trait implements scripting commands.
*
* @define e [[scredis.exceptions.RedisErrorResponseException]]
* @define p [[scredis.exceptions.RedisProtocolException]]
* @define none `None`
* @define true '''true'''
* @define false '''false'''
*/
trait ScriptingCommands { self: NonBlockingConnection =>
/**
* Executes a Lua script that does not require any keys or arguments.
*
* @param script the LUA script
* @param keys keys to be used in the script
* @param args arguments to be used in the script
* @throws $e if an error occurs while running the script
* @throws $p if the result could not be decoded by provided `Decoder`
*
* @since 2.6.0
*/
def eval[R: Decoder, W1: Writer, W2: Writer](
script: String, keys: Seq[W1] = Nil, args: Seq[W2] = Nil
): Future[R] = send(Eval(script, keys, args))
/**
* Executes a cached Lua script that does not require any keys or arguments by its SHA1 digest.
*
* @param sha1 the SHA1 digest
* @param keys keys to be used in the script
* @param args arguments to be used in the script
* @throws $e if there is no script corresponding to the provided SHA1 digest or if an error
* occurs while running the script
* @throws $p if the result could not be decoded by provided `Decoder`
*
* @since 2.6.0
*/
def evalSHA[R: Decoder, W1: Writer, W2: Writer](
sha1: String, keys: Seq[W1] = Nil, args: Seq[W2] = Nil
): Future[R] = send(EvalSHA(sha1, keys, args))
/**
* Checks existence of scripts in the script cache.
*
* @param sha1s SHA1 digest(s) to check for existence
* @return SHA1 -> Boolean `Map` where $true means the script associated to the sha1 exists
* in the cache
*
* @since 2.6.0
*/
def scriptExists(sha1s: String*): Future[Map[String, Boolean]] = send(
ScriptExists(sha1s: _*)
)
/**
* Removes all the scripts from the script cache.
*
* @since 2.6.0
*/
def scriptFlush(): Future[Unit] = send(ScriptFlush())
/**
* Kills the currently executing Lua script, assuming no write operation was yet performed by
* the script.
*
* @note If the script already performed write operations it can not be killed in this way
* because it would violate Lua script atomicity contract. In such a case only SHUTDOWN NOSAVE
* is able to kill the script, killing the Redis process in an hard way preventing it to persist
* with half-written information.
*
* @since 2.6.0
*/
def scriptKill(): Future[Unit] = send(ScriptKill())
/**
* Loads or stores the specified Lua script into the script cache.
*
* @note The script is guaranteed to stay in the script cache forever (unless SCRIPT FLUSH
* is called).
*
* @param script the script to be loaded into the cache
* @return the SHA1 digest of the stored script
* @throws $e if a compilation error occurs
*
* @since 2.6.0
*/
def scriptLoad(script: String): Future[String] = send(ScriptLoad(script))
} | rileyberton/scredis | src/main/scala/scredis/commands/ScriptingCommands.scala | Scala | apache-2.0 | 3,268 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io._
import java.util.{GregorianCalendar, LinkedHashSet, UUID}
import java.util.concurrent.atomic.AtomicLong
import scala.Array.canBuildFrom
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.util.parsing.combinator.RegexParsers
import org.apache.spark.sql.{RuntimeConfig, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{MultiInstanceRelation, NoSuchTableException}
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.execution.command.Partitioner
import org.apache.spark.sql.types._
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.filesystem.CarbonFile
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.datastore.impl.FileFactory.FileType
import org.apache.carbondata.core.fileoperations.FileWriteOperation
import org.apache.carbondata.core.locks.ZookeeperInit
import org.apache.carbondata.core.metadata.{CarbonMetadata, CarbonTableIdentifier}
import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl
import org.apache.carbondata.core.metadata.datatype.DataType.DECIMAL
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.metadata.schema.table.column.{CarbonColumn, CarbonDimension}
import org.apache.carbondata.core.reader.ThriftReader
import org.apache.carbondata.core.stats.{QueryStatistic, QueryStatisticsConstants}
import org.apache.carbondata.core.statusmanager.SegmentStatusManager
import org.apache.carbondata.core.util.{CarbonProperties, CarbonTimeStatisticsFactory, CarbonUtil}
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
import org.apache.carbondata.core.writer.ThriftWriter
import org.apache.carbondata.format.{SchemaEvolutionEntry, TableInfo}
import org.apache.carbondata.spark.merger.TableMeta
import org.apache.carbondata.spark.util.CarbonSparkUtil
case class MetaData(var tablesMeta: ArrayBuffer[TableMeta]) {
// clear the metadata
def clear(): Unit = {
tablesMeta.clear()
}
}
case class CarbonMetaData(dims: Seq[String],
msrs: Seq[String],
carbonTable: CarbonTable,
dictionaryMap: DictionaryMap)
object CarbonMetastore {
def readSchemaFileToThriftTable(schemaFilePath: String): TableInfo = {
val createTBase = new ThriftReader.TBaseCreator() {
override def create(): org.apache.thrift.TBase[TableInfo, TableInfo._Fields] = {
new TableInfo()
}
}
val thriftReader = new ThriftReader(schemaFilePath, createTBase)
var tableInfo: TableInfo = null
try {
thriftReader.open()
tableInfo = thriftReader.read().asInstanceOf[TableInfo]
} finally {
thriftReader.close()
}
tableInfo
}
def writeThriftTableToSchemaFile(schemaFilePath: String, tableInfo: TableInfo): Unit = {
val thriftWriter = new ThriftWriter(schemaFilePath, false)
try {
thriftWriter.open()
thriftWriter.write(tableInfo);
} finally {
thriftWriter.close()
}
}
}
case class DictionaryMap(dictionaryMap: Map[String, Boolean]) {
def get(name: String): Option[Boolean] = {
dictionaryMap.get(name.toLowerCase)
}
}
class CarbonMetastore(conf: RuntimeConfig, val storePath: String) {
@transient
val LOGGER = LogServiceFactory.getLogService("org.apache.spark.sql.CarbonMetastoreCatalog")
val tableModifiedTimeStore = new java.util.HashMap[String, Long]()
tableModifiedTimeStore
.put(CarbonCommonConstants.DATABASE_DEFAULT_NAME, System.currentTimeMillis())
private val nextId = new AtomicLong(0)
def nextQueryId: String = {
System.nanoTime() + ""
}
val metadata = loadMetadata(storePath, nextQueryId)
def getTableCreationTime(databaseName: String, tableName: String): Long = {
val tableMeta = metadata.tablesMeta.filter(
c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(databaseName) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(tableName))
val tableCreationTime = tableMeta.head.carbonTable.getTableLastUpdatedTime
tableCreationTime
}
def cleanStore(): Unit = {
try {
val fileType = FileFactory.getFileType(storePath)
FileFactory.deleteFile(storePath, fileType)
metadata.clear()
} catch {
case e: Throwable => LOGGER.error(e, "clean store failed")
}
}
def lookupRelation(dbName: Option[String], tableName: String)
(sparkSession: SparkSession): LogicalPlan = {
lookupRelation(TableIdentifier(tableName, dbName))(sparkSession)
}
def lookupRelation(tableIdentifier: TableIdentifier, alias: Option[String] = None)
(sparkSession: SparkSession): LogicalPlan = {
checkSchemasModifiedTimeAndReloadTables()
val database = tableIdentifier.database.getOrElse(
sparkSession.catalog.currentDatabase
)
val tables = getTableFromMetadata(database, tableIdentifier.table)
tables match {
case Some(t) =>
CarbonRelation(database, tableIdentifier.table,
CarbonSparkUtil.createSparkMeta(tables.head.carbonTable), tables.head, alias)
case None =>
throw new NoSuchTableException(database, tableIdentifier.table)
}
}
/**
* This method will search for a table in the catalog metadata
*
* @param database
* @param tableName
* @return
*/
def getTableFromMetadata(database: String,
tableName: String): Option[TableMeta] = {
metadata.tablesMeta
.find(c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(database) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(tableName))
}
def tableExists(
table: String,
databaseOp: Option[String] = None)(sparkSession: SparkSession): Boolean = {
checkSchemasModifiedTimeAndReloadTables()
val database = databaseOp.getOrElse(sparkSession.catalog.currentDatabase)
val tables = metadata.tablesMeta.filter(
c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(database) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(table))
tables.nonEmpty
}
def tableExists(tableIdentifier: TableIdentifier)(sparkSession: SparkSession): Boolean = {
checkSchemasModifiedTimeAndReloadTables()
val database = tableIdentifier.database.getOrElse(sparkSession.catalog.currentDatabase)
val tables = metadata.tablesMeta.filter(
c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(database) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(tableIdentifier.table))
tables.nonEmpty
}
def loadMetadata(metadataPath: String, queryId: String): MetaData = {
val recorder = CarbonTimeStatisticsFactory.createDriverRecorder()
val statistic = new QueryStatistic()
// creating zookeeper instance once.
// if zookeeper is configured as carbon lock type.
val zookeeperUrl: String = conf.get(CarbonCommonConstants.ZOOKEEPER_URL, null)
if (zookeeperUrl != null) {
CarbonProperties.getInstance.addProperty(CarbonCommonConstants.ZOOKEEPER_URL, zookeeperUrl)
ZookeeperInit.getInstance(zookeeperUrl)
LOGGER.info("Zookeeper url is configured. Taking the zookeeper as lock type.")
var configuredLockType = CarbonProperties.getInstance
.getProperty(CarbonCommonConstants.LOCK_TYPE)
if (null == configuredLockType) {
configuredLockType = CarbonCommonConstants.CARBON_LOCK_TYPE_ZOOKEEPER
CarbonProperties.getInstance
.addProperty(CarbonCommonConstants.LOCK_TYPE,
configuredLockType)
}
}
if (metadataPath == null) {
return null
}
val fileType = FileFactory.getFileType(metadataPath)
val metaDataBuffer = new ArrayBuffer[TableMeta]
fillMetaData(metadataPath, fileType, metaDataBuffer)
updateSchemasUpdatedTime(readSchemaFileSystemTime("", ""))
statistic.addStatistics(QueryStatisticsConstants.LOAD_META,
System.currentTimeMillis())
recorder.recordStatisticsForDriver(statistic, queryId)
MetaData(metaDataBuffer)
}
private def fillMetaData(basePath: String, fileType: FileType,
metaDataBuffer: ArrayBuffer[TableMeta]): Unit = {
val databasePath = basePath // + "/schemas"
try {
if (FileFactory.isFileExist(databasePath, fileType)) {
val file = FileFactory.getCarbonFile(databasePath, fileType)
val databaseFolders = file.listFiles()
databaseFolders.foreach(databaseFolder => {
if (databaseFolder.isDirectory) {
val dbName = databaseFolder.getName
val tableFolders = databaseFolder.listFiles()
tableFolders.foreach(tableFolder => {
if (tableFolder.isDirectory) {
val carbonTableIdentifier = new CarbonTableIdentifier(databaseFolder.getName,
tableFolder.getName, UUID.randomUUID().toString)
val carbonTablePath = CarbonStorePath.getCarbonTablePath(basePath,
carbonTableIdentifier)
val tableMetadataFile = carbonTablePath.getSchemaFilePath
if (FileFactory.isFileExist(tableMetadataFile, fileType)) {
val tableName = tableFolder.getName
val tableUniqueName = databaseFolder.getName + "_" + tableFolder.getName
val tableInfo: TableInfo = readSchemaFile(tableMetadataFile)
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(tableInfo, dbName, tableName, basePath)
val schemaFilePath = CarbonStorePath
.getCarbonTablePath(storePath, carbonTableIdentifier).getSchemaFilePath
wrapperTableInfo.setStorePath(storePath)
wrapperTableInfo
.setMetaDataFilepath(CarbonTablePath.getFolderContainingFile(schemaFilePath))
CarbonMetadata.getInstance().loadTableMetadata(wrapperTableInfo)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable(tableUniqueName)
metaDataBuffer += new TableMeta(carbonTable.getCarbonTableIdentifier, storePath,
carbonTable)
}
}
})
}
})
} else {
// Create folders and files.
FileFactory.mkdirs(databasePath, fileType)
}
} catch {
case s: java.io.FileNotFoundException =>
// Create folders and files.
FileFactory.mkdirs(databasePath, fileType)
}
}
/**
* This method will read the schema file from a given path
*
* @param schemaFilePath
* @return
*/
def readSchemaFile(schemaFilePath: String): TableInfo = {
val createTBase = new ThriftReader.TBaseCreator() {
override def create(): org.apache.thrift.TBase[TableInfo, TableInfo._Fields] = {
new TableInfo()
}
}
val thriftReader = new ThriftReader(schemaFilePath, createTBase)
thriftReader.open()
val tableInfo: TableInfo = thriftReader.read().asInstanceOf[TableInfo]
thriftReader.close()
tableInfo
}
/**
* This method will overwrite the existing schema and update it with the gievn details
*
* @param carbonTableIdentifier
* @param thriftTableInfo
* @param schemaEvolutionEntry
* @param carbonStorePath
* @param sparkSession
*/
def updateTableSchema(carbonTableIdentifier: CarbonTableIdentifier,
thriftTableInfo: org.apache.carbondata.format.TableInfo,
schemaEvolutionEntry: SchemaEvolutionEntry,
carbonStorePath: String)
(sparkSession: SparkSession): String = {
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(thriftTableInfo,
carbonTableIdentifier.getDatabaseName,
carbonTableIdentifier.getTableName,
carbonStorePath)
thriftTableInfo.fact_table.schema_evolution.schema_evolution_history.add(schemaEvolutionEntry)
createSchemaThriftFile(wrapperTableInfo,
thriftTableInfo,
carbonTableIdentifier.getDatabaseName,
carbonTableIdentifier.getTableName)(sparkSession)
}
/**
*
* Prepare Thrift Schema from wrapper TableInfo and write to Schema file.
* Load CarbonTable from wrapper tableInfo
*
*/
def createTableFromThrift(
tableInfo: org.apache.carbondata.core.metadata.schema.table.TableInfo,
dbName: String, tableName: String)
(sparkSession: SparkSession): String = {
if (tableExists(tableName, Some(dbName))(sparkSession)) {
sys.error(s"Table [$tableName] already exists under Database [$dbName]")
}
val schemaEvolutionEntry = new SchemaEvolutionEntry(tableInfo.getLastUpdatedTime)
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val thriftTableInfo = schemaConverter
.fromWrapperToExternalTableInfo(tableInfo, dbName, tableName)
thriftTableInfo.getFact_table.getSchema_evolution.getSchema_evolution_history
.add(schemaEvolutionEntry)
val carbonTablePath = createSchemaThriftFile(tableInfo,
thriftTableInfo,
dbName,
tableName)(sparkSession)
LOGGER.info(s"Table $tableName for Database $dbName created successfully.")
carbonTablePath
}
/**
* This method will write the schema thrift file in carbon store and load table metadata
*
* @param tableInfo
* @param thriftTableInfo
* @param dbName
* @param tableName
* @param sparkSession
* @return
*/
private def createSchemaThriftFile(
tableInfo: org.apache.carbondata.core.metadata.schema.table.TableInfo,
thriftTableInfo: org.apache.carbondata.format.TableInfo,
dbName: String, tableName: String)
(sparkSession: SparkSession): String = {
val carbonTableIdentifier = new CarbonTableIdentifier(dbName, tableName,
tableInfo.getFactTable.getTableId)
val carbonTablePath = CarbonStorePath.getCarbonTablePath(storePath, carbonTableIdentifier)
val schemaFilePath = carbonTablePath.getSchemaFilePath
val schemaMetadataPath = CarbonTablePath.getFolderContainingFile(schemaFilePath)
tableInfo.setMetaDataFilepath(schemaMetadataPath)
tableInfo.setStorePath(storePath)
val fileType = FileFactory.getFileType(schemaMetadataPath)
if (!FileFactory.isFileExist(schemaMetadataPath, fileType)) {
FileFactory.mkdirs(schemaMetadataPath, fileType)
}
val thriftWriter = new ThriftWriter(schemaFilePath, false)
thriftWriter.open(FileWriteOperation.OVERWRITE)
thriftWriter.write(thriftTableInfo)
thriftWriter.close()
removeTableFromMetadata(dbName, tableName)
CarbonMetadata.getInstance().loadTableMetadata(tableInfo)
val tableMeta = new TableMeta(carbonTableIdentifier, storePath,
CarbonMetadata.getInstance().getCarbonTable(dbName + '_' + tableName))
metadata.tablesMeta += tableMeta
updateSchemasUpdatedTime(touchSchemaFileSystemTime(dbName, tableName))
carbonTablePath.getPath
}
/**
* This method will remove the table meta from catalog metadata array
*
* @param dbName
* @param tableName
*/
def removeTableFromMetadata(dbName: String, tableName: String): Unit = {
val metadataToBeRemoved: Option[TableMeta] = getTableFromMetadata(dbName, tableName)
metadataToBeRemoved match {
case Some(tableMeta) =>
metadata.tablesMeta -= tableMeta
CarbonMetadata.getInstance.removeTable(dbName + "_" + tableName)
case None =>
LOGGER.debug(s"No entry for table $tableName in database $dbName")
}
}
private def updateMetadataByWrapperTable(
wrapperTableInfo: org.apache.carbondata.core.metadata.schema.table.TableInfo): Unit = {
CarbonMetadata.getInstance().loadTableMetadata(wrapperTableInfo)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable(
wrapperTableInfo.getTableUniqueName)
for (i <- metadata.tablesMeta.indices) {
if (wrapperTableInfo.getTableUniqueName.equals(
metadata.tablesMeta(i).carbonTableIdentifier.getTableUniqueName)) {
metadata.tablesMeta(i).carbonTable = carbonTable
}
}
}
def updateMetadataByThriftTable(schemaFilePath: String,
tableInfo: TableInfo, dbName: String, tableName: String, storePath: String): Unit = {
tableInfo.getFact_table.getSchema_evolution.getSchema_evolution_history.get(0)
.setTime_stamp(System.currentTimeMillis())
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(tableInfo, dbName, tableName, storePath)
wrapperTableInfo
.setMetaDataFilepath(CarbonTablePath.getFolderContainingFile(schemaFilePath))
wrapperTableInfo.setStorePath(storePath)
updateMetadataByWrapperTable(wrapperTableInfo)
}
/**
* Shows all schemas which has Database name like
*/
def showDatabases(schemaLike: Option[String]): Seq[String] = {
checkSchemasModifiedTimeAndReloadTables()
metadata.tablesMeta.map { c =>
schemaLike match {
case Some(name) =>
if (c.carbonTableIdentifier.getDatabaseName.contains(name)) {
c.carbonTableIdentifier
.getDatabaseName
} else {
null
}
case _ => c.carbonTableIdentifier.getDatabaseName
}
}.filter(f => f != null)
}
/**
* Shows all tables in all schemas.
*/
def getAllTables(): Seq[TableIdentifier] = {
checkSchemasModifiedTimeAndReloadTables()
metadata.tablesMeta.map { c =>
TableIdentifier(c.carbonTableIdentifier.getTableName,
Some(c.carbonTableIdentifier.getDatabaseName))
}
}
def isTablePathExists(tableIdentifier: TableIdentifier)(sparkSession: SparkSession): Boolean = {
val dbName = tableIdentifier.database.getOrElse(sparkSession.catalog.currentDatabase)
val tableName = tableIdentifier.table.toLowerCase
val tablePath = CarbonStorePath.getCarbonTablePath(this.storePath,
new CarbonTableIdentifier(dbName, tableName, "")).getPath
val fileType = FileFactory.getFileType(tablePath)
FileFactory.isFileExist(tablePath, fileType)
}
def dropTable(tableStorePath: String, tableIdentifier: TableIdentifier)
(sparkSession: SparkSession) {
val dbName = tableIdentifier.database.get
val tableName = tableIdentifier.table
val metadataFilePath = CarbonStorePath.getCarbonTablePath(tableStorePath,
new CarbonTableIdentifier(dbName, tableName, "")).getMetadataDirectoryPath
val fileType = FileFactory.getFileType(metadataFilePath)
if (FileFactory.isFileExist(metadataFilePath, fileType)) {
// while drop we should refresh the schema modified time so that if any thing has changed
// in the other beeline need to update.
checkSchemasModifiedTimeAndReloadTables
val file = FileFactory.getCarbonFile(metadataFilePath, fileType)
CarbonUtil.deleteFoldersAndFilesSilent(file.getParentFile)
val metadataToBeRemoved: Option[TableMeta] = getTableFromMetadata(dbName,
tableIdentifier.table)
metadataToBeRemoved match {
case Some(tableMeta) =>
metadata.tablesMeta -= tableMeta
CarbonMetadata.getInstance.removeTable(dbName + "_" + tableName)
CarbonMetadata.getInstance.removeTable(dbName + "_" + tableName)
updateSchemasUpdatedTime(touchSchemaFileSystemTime(dbName, tableName))
case None =>
LOGGER.info(s"Metadata does not contain entry for table $tableName in database $dbName")
}
CarbonHiveMetadataUtil.invalidateAndDropTable(dbName, tableName, sparkSession)
// discard cached table info in cachedDataSourceTables
sparkSession.sessionState.catalog.refreshTable(tableIdentifier)
}
}
private def getTimestampFileAndType(databaseName: String, tableName: String) = {
val timestampFile = storePath + "/" + CarbonCommonConstants.SCHEMAS_MODIFIED_TIME_FILE
val timestampFileType = FileFactory.getFileType(timestampFile)
(timestampFile, timestampFileType)
}
/**
* This method will put the updated timestamp of schema file in the table modified time store map
*
* @param timeStamp
*/
def updateSchemasUpdatedTime(timeStamp: Long) {
tableModifiedTimeStore.put("default", timeStamp)
}
/**
* This method will read the timestamp of empty schema file
*
* @param databaseName
* @param tableName
* @return
*/
def readSchemaFileSystemTime(databaseName: String, tableName: String): Long = {
val (timestampFile, timestampFileType) = getTimestampFileAndType(databaseName, tableName)
if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
FileFactory.getCarbonFile(timestampFile, timestampFileType).getLastModifiedTime
} else {
System.currentTimeMillis()
}
}
/**
* This method will check and create an empty schema timestamp file
*
* @param databaseName
* @param tableName
* @return
*/
def touchSchemaFileSystemTime(databaseName: String, tableName: String): Long = {
val (timestampFile, timestampFileType) = getTimestampFileAndType(databaseName, tableName)
if (!FileFactory.isFileExist(timestampFile, timestampFileType)) {
LOGGER.audit(s"Creating timestamp file for $databaseName.$tableName")
FileFactory.createNewFile(timestampFile, timestampFileType)
}
val systemTime = System.currentTimeMillis()
FileFactory.getCarbonFile(timestampFile, timestampFileType)
.setLastModifiedTime(systemTime)
systemTime
}
def checkSchemasModifiedTimeAndReloadTables() {
val (timestampFile, timestampFileType) = getTimestampFileAndType("", "")
if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
if (!(FileFactory.getCarbonFile(timestampFile, timestampFileType).
getLastModifiedTime ==
tableModifiedTimeStore.get(CarbonCommonConstants.DATABASE_DEFAULT_NAME))) {
refreshCache()
}
}
}
def refreshCache() {
metadata.tablesMeta = loadMetadata(storePath, nextQueryId).tablesMeta
}
def getSchemaLastUpdatedTime(databaseName: String, tableName: String): Long = {
var schemaLastUpdatedTime = System.currentTimeMillis
val (timestampFile, timestampFileType) = getTimestampFileAndType(databaseName, tableName)
if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
schemaLastUpdatedTime = FileFactory.getCarbonFile(timestampFile, timestampFileType)
.getLastModifiedTime
}
schemaLastUpdatedTime
}
def readTableMetaDataFile(tableFolder: CarbonFile,
fileType: FileFactory.FileType):
(String, String, String, String, Partitioner, Long) = {
val tableMetadataFile = tableFolder.getAbsolutePath + "/metadata"
var schema: String = ""
var databaseName: String = ""
var tableName: String = ""
var dataPath: String = ""
var partitioner: Partitioner = null
val cal = new GregorianCalendar(2011, 1, 1)
var tableCreationTime = cal.getTime.getTime
if (FileFactory.isFileExist(tableMetadataFile, fileType)) {
// load metadata
val in = FileFactory.getDataInputStream(tableMetadataFile, fileType)
var len = 0
try {
len = in.readInt()
} catch {
case others: EOFException => len = 0
}
while (len > 0) {
val databaseNameBytes = new Array[Byte](len)
in.readFully(databaseNameBytes)
databaseName = new String(databaseNameBytes, "UTF8")
val tableNameLen = in.readInt()
val tableNameBytes = new Array[Byte](tableNameLen)
in.readFully(tableNameBytes)
tableName = new String(tableNameBytes, "UTF8")
val dataPathLen = in.readInt()
val dataPathBytes = new Array[Byte](dataPathLen)
in.readFully(dataPathBytes)
dataPath = new String(dataPathBytes, "UTF8")
val versionLength = in.readInt()
val versionBytes = new Array[Byte](versionLength)
in.readFully(versionBytes)
val schemaLen = in.readInt()
val schemaBytes = new Array[Byte](schemaLen)
in.readFully(schemaBytes)
schema = new String(schemaBytes, "UTF8")
val partitionLength = in.readInt()
val partitionBytes = new Array[Byte](partitionLength)
in.readFully(partitionBytes)
val inStream = new ByteArrayInputStream(partitionBytes)
val objStream = new ObjectInputStream(inStream)
partitioner = objStream.readObject().asInstanceOf[Partitioner]
objStream.close()
try {
tableCreationTime = in.readLong()
len = in.readInt()
} catch {
case others: EOFException => len = 0
}
}
in.close()
}
(databaseName, tableName, dataPath, schema, partitioner, tableCreationTime)
}
def createDatabaseDirectory(dbName: String) {
val databasePath = storePath + File.separator + dbName
val fileType = FileFactory.getFileType(databasePath)
FileFactory.mkdirs(databasePath, fileType)
}
def dropDatabaseDirectory(dbName: String) {
val databasePath = storePath + File.separator + dbName
val fileType = FileFactory.getFileType(databasePath)
if (FileFactory.isFileExist(databasePath, fileType)) {
val dbPath = FileFactory.getCarbonFile(databasePath, fileType)
CarbonUtil.deleteFoldersAndFiles(dbPath)
}
}
}
object CarbonMetastoreTypes extends RegexParsers {
protected lazy val primitiveType: Parser[DataType] =
"string" ^^^ StringType |
"float" ^^^ FloatType |
"int" ^^^ IntegerType |
"tinyint" ^^^ ShortType |
"short" ^^^ ShortType |
"double" ^^^ DoubleType |
"long" ^^^ LongType |
"binary" ^^^ BinaryType |
"boolean" ^^^ BooleanType |
fixedDecimalType |
"decimal" ^^^ "decimal" ^^^ DecimalType(10, 0) |
"varchar\\\\((\\\\d+)\\\\)".r ^^^ StringType |
"date" ^^^ DateType |
"timestamp" ^^^ TimestampType
protected lazy val fixedDecimalType: Parser[DataType] =
"decimal" ~> "(" ~> "^[1-9]\\\\d*".r ~ ("," ~> "^[0-9]\\\\d*".r <~ ")") ^^ {
case precision ~ scale =>
DecimalType(precision.toInt, scale.toInt)
}
protected lazy val arrayType: Parser[DataType] =
"array" ~> "<" ~> dataType <~ ">" ^^ {
case tpe => ArrayType(tpe)
}
protected lazy val mapType: Parser[DataType] =
"map" ~> "<" ~> dataType ~ "," ~ dataType <~ ">" ^^ {
case t1 ~ _ ~ t2 => MapType(t1, t2)
}
protected lazy val structField: Parser[StructField] =
"[a-zA-Z0-9_]*".r ~ ":" ~ dataType ^^ {
case name ~ _ ~ tpe => StructField(name, tpe, nullable = true)
}
protected lazy val structType: Parser[DataType] =
"struct" ~> "<" ~> repsep(structField, ",") <~ ">" ^^ {
case fields => StructType(fields)
}
protected lazy val dataType: Parser[DataType] =
arrayType |
mapType |
structType |
primitiveType
def toDataType(metastoreType: String): DataType = {
parseAll(dataType, metastoreType) match {
case Success(result, _) => result
case failure: NoSuccess => sys.error(s"Unsupported dataType: $metastoreType")
}
}
def toMetastoreType(dt: DataType): String = {
dt match {
case ArrayType(elementType, _) => s"array<${ toMetastoreType(elementType) }>"
case StructType(fields) =>
s"struct<${
fields.map(f => s"${ f.name }:${ toMetastoreType(f.dataType) }")
.mkString(",")
}>"
case StringType => "string"
case FloatType => "float"
case IntegerType => "int"
case ShortType => "tinyint"
case DoubleType => "double"
case LongType => "bigint"
case BinaryType => "binary"
case BooleanType => "boolean"
case DecimalType() => "decimal"
case TimestampType => "timestamp"
case DateType => "date"
}
}
}
/**
* Represents logical plan for one carbon table
*/
case class CarbonRelation(
databaseName: String,
tableName: String,
var metaData: CarbonMetaData,
tableMeta: TableMeta,
alias: Option[String])
extends LeafNode with MultiInstanceRelation {
def recursiveMethod(dimName: String, childDim: CarbonDimension): String = {
childDim.getDataType.toString.toLowerCase match {
case "array" => s"${
childDim.getColName.substring(dimName.length + 1)
}:array<${ getArrayChildren(childDim.getColName) }>"
case "struct" => s"${
childDim.getColName.substring(dimName.length + 1)
}:struct<${ getStructChildren(childDim.getColName) }>"
case dType => s"${ childDim.getColName.substring(dimName.length + 1) }:${ dType }"
}
}
def getArrayChildren(dimName: String): String = {
metaData.carbonTable.getChildren(dimName).asScala.map(childDim => {
childDim.getDataType.toString.toLowerCase match {
case "array" => s"array<${ getArrayChildren(childDim.getColName) }>"
case "struct" => s"struct<${ getStructChildren(childDim.getColName) }>"
case dType => addDecimalScaleAndPrecision(childDim, dType)
}
}).mkString(",")
}
def getStructChildren(dimName: String): String = {
metaData.carbonTable.getChildren(dimName).asScala.map(childDim => {
childDim.getDataType.toString.toLowerCase match {
case "array" => s"${
childDim.getColName.substring(dimName.length + 1)
}:array<${ getArrayChildren(childDim.getColName) }>"
case "struct" => s"${
childDim.getColName.substring(dimName.length + 1)
}:struct<${ metaData.carbonTable.getChildren(childDim.getColName)
.asScala.map(f => s"${ recursiveMethod(childDim.getColName, f) }").mkString(",")
}>"
case dType => s"${ childDim.getColName
.substring(dimName.length() + 1) }:${ addDecimalScaleAndPrecision(childDim, dType) }"
}
}).mkString(",")
}
override def newInstance(): LogicalPlan = {
CarbonRelation(databaseName, tableName, metaData, tableMeta, alias)
.asInstanceOf[this.type]
}
val dimensionsAttr = {
val sett = new LinkedHashSet(
tableMeta.carbonTable.getDimensionByTableName(tableMeta.carbonTableIdentifier.getTableName)
.asScala.asJava)
sett.asScala.toSeq.map(dim => {
val dimval = metaData.carbonTable
.getDimensionByName(metaData.carbonTable.getFactTableName, dim.getColName)
val output: DataType = dimval.getDataType
.toString.toLowerCase match {
case "array" =>
CarbonMetastoreTypes.toDataType(s"array<${ getArrayChildren(dim.getColName) }>")
case "struct" =>
CarbonMetastoreTypes.toDataType(s"struct<${ getStructChildren(dim.getColName) }>")
case dType =>
val dataType = addDecimalScaleAndPrecision(dimval, dType)
CarbonMetastoreTypes.toDataType(dataType)
}
AttributeReference(
dim.getColName,
output,
nullable = true)()
})
}
val measureAttr = {
val factTable = tableMeta.carbonTable.getFactTableName
new LinkedHashSet(
tableMeta.carbonTable.
getMeasureByTableName(tableMeta.carbonTable.getFactTableName).
asScala.asJava).asScala.toSeq
.map(x => AttributeReference(x.getColName, CarbonMetastoreTypes.toDataType(
metaData.carbonTable.getMeasureByName(factTable, x.getColName).getDataType.toString
.toLowerCase match {
case "int" => "long"
case "short" => "long"
case "decimal" => "decimal(" + x.getPrecision + "," + x.getScale + ")"
case others => others
}),
nullable = true)())
}
override val output = {
val columns = tableMeta.carbonTable.getCreateOrderColumn(tableMeta.carbonTable.getFactTableName)
.asScala
// convert each column to Attribute
columns.filter(!_.isInvisible).map { column =>
if (column.isDimesion()) {
val output: DataType = column.getDataType.toString.toLowerCase match {
case "array" =>
CarbonMetastoreTypes.toDataType(s"array<${getArrayChildren(column.getColName)}>")
case "struct" =>
CarbonMetastoreTypes.toDataType(s"struct<${getStructChildren(column.getColName)}>")
case dType =>
val dataType = addDecimalScaleAndPrecision(column, dType)
CarbonMetastoreTypes.toDataType(dataType)
}
AttributeReference(column.getColName, output, nullable = true )(
qualifier = Option(tableName + "." + column.getColName))
} else {
val output = CarbonMetastoreTypes.toDataType {
column.getDataType.toString
.toLowerCase match {
case "decimal" => "decimal(" + column.getColumnSchema.getPrecision + "," + column
.getColumnSchema.getScale + ")"
case others => others
}
}
AttributeReference(column.getColName, output, nullable = true)(
qualifier = Option(tableName + "." + column.getColName))
}
}
}
def addDecimalScaleAndPrecision(dimval: CarbonColumn, dataType: String): String = {
var dType = dataType
if (dimval.getDataType == DECIMAL) {
dType +=
"(" + dimval.getColumnSchema.getPrecision + "," + dimval.getColumnSchema.getScale + ")"
}
dType
}
// TODO: Use data from the footers.
override lazy val statistics = Statistics(sizeInBytes = this.sizeInBytes)
override def equals(other: Any): Boolean = {
other match {
case p: CarbonRelation =>
p.databaseName == databaseName && p.output == output && p.tableName == tableName
case _ => false
}
}
def addDecimalScaleAndPrecision(dimval: CarbonDimension, dataType: String): String = {
var dType = dataType
if (dimval.getDataType == DECIMAL) {
dType +=
"(" + dimval.getColumnSchema.getPrecision + "," + dimval.getColumnSchema.getScale + ")"
}
dType
}
private var tableStatusLastUpdateTime = 0L
private var sizeInBytesLocalValue = 0L
def sizeInBytes: Long = {
val tableStatusNewLastUpdatedTime = SegmentStatusManager.getTableStatusLastModifiedTime(
tableMeta.carbonTable.getAbsoluteTableIdentifier)
if (tableStatusLastUpdateTime != tableStatusNewLastUpdatedTime) {
val tablePath = CarbonStorePath.getCarbonTablePath(
tableMeta.storePath,
tableMeta.carbonTableIdentifier).getPath
val fileType = FileFactory.getFileType(tablePath)
if(FileFactory.isFileExist(tablePath, fileType)) {
tableStatusLastUpdateTime = tableStatusNewLastUpdatedTime
sizeInBytesLocalValue = FileFactory.getDirectorySize(tablePath)
}
}
sizeInBytesLocalValue
}
}
| mayunSaicmotor/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonMetastore.scala | Scala | apache-2.0 | 35,751 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.model
import cc.factorie.variable.{DiscreteVar, TensorVar, Var}
import cc.factorie.{la, model}
import scala.reflect.ClassTag
abstract class Template2[N1<:Var,N2<:Var](implicit nm1:ClassTag[N1], nm2:ClassTag[N2]) extends Family2[N1,N2] with Template
{
val neighborClass1 = nm1.runtimeClass
val neighborClass2 = nm2.runtimeClass
def neighborClasses: Seq[Class[_]] = Seq(neighborClass1, neighborClass2)
def addLimitedDiscreteCurrentValuesIn12(variables:Iterable[Var]): Unit = {
if (classOf[DiscreteVar].isAssignableFrom(neighborClass1) && classOf[DiscreteVar].isAssignableFrom(neighborClass2))
for (variable <- variables; factor <- factors(variable)) factor.addLimitedDiscreteCurrentValues12 //limitedDiscreteValues.+=((factor._1.asInstanceOf[DiscreteVar].intValue, factor._2.asInstanceOf[DiscreteVar].intValue))
}
// override def limitDiscreteValuesIteratorAsIn(variables:Iterable[DiscreteVar]): Unit = {
// if (classOf[DiscreteVar].isAssignableFrom(neighborClass1) && classOf[DiscreteVar].isAssignableFrom(neighborClass2))
// for (variable <- variables; factor <- factors(variable)) limitedDiscreteValues.+=((factor._1.asInstanceOf[DiscreteVar].intValue, factor._2.asInstanceOf[DiscreteVar].intValue))
// }
final override def addFactors(v:Var, result:scala.collection.mutable.Set[model.Factor]): Unit = {
if (neighborClass1.isAssignableFrom(v.getClass)) result ++= unroll1(v.asInstanceOf[N1])
if (neighborClass2.isAssignableFrom(v.getClass)) result ++= unroll2(v.asInstanceOf[N2])
unroll(v) match { case fs:IterableSingleFactor[Factor] => result += fs.factor; case Nil => {}; case fs => result ++= fs }
}
//* Override this method if you want to re-capture old unrollCascade functionality. */
def unroll(v:Var): Iterable[Factor] = Nil
def unroll1(v:N1): Iterable[FactorType]
def unroll2(v:N2): Iterable[FactorType]
def limitDiscreteValuesAsIn(vars:Iterable[DiscreteVar]): Unit = {
// Loop over vars separately from factors to avoid creating all factors for what could be very large data
(classOf[DiscreteVar].isAssignableFrom(neighborClass1), classOf[DiscreteVar].isAssignableFrom(neighborClass2)) match {
case (true, true) => {
for (v <- vars; factor <- factors(v).asInstanceOf[Iterable[Factor2[DiscreteVar,DiscreteVar]]]) {
if (limitedDiscreteValues12 eq null) limitedDiscreteValues12 = new la.SparseBinaryTensor2(factor._1.domain.dimensionSize, factor._2.domain.dimensionSize)
limitedDiscreteValues12.+=(factor._1.intValue, factor._2.intValue)
if (limitedDiscreteValues1 eq null) limitedDiscreteValues1 = new la.SparseBinaryTensor1(factor._1.domain.dimensionSize)
limitedDiscreteValues1.+=(factor._1.intValue)
}
}
case (true, false) => {
for (v <- vars; factor <- factors(v).asInstanceOf[Iterable[Factor2[DiscreteVar,DiscreteVar]]]) {
if (limitedDiscreteValues1 eq null) limitedDiscreteValues1 = new la.SparseBinaryTensor1(factor._1.domain.dimensionSize)
limitedDiscreteValues1.+=(factor._1.intValue)
}
}
case (false, true) => {
throw new Error("Not yet implemented.")
}
case (false, false) => {}
}
}
}
abstract class TupleTemplate2[N1<:Var:ClassTag,N2<:Var:ClassTag] extends Template2[N1,N2] with TupleFamily2[N1,N2]
abstract class TupleTemplateWithStatistics2[N1<:Var:ClassTag,N2<:Var:ClassTag] extends Template2[N1,N2] with TupleFamilyWithStatistics2[N1,N2]
abstract class TensorTemplate2[N1<:Var:ClassTag,N2<:Var:ClassTag] extends Template2[N1,N2] with TensorFamily2[N1,N2]
abstract class TensorTemplateWithStatistics2[N1<:TensorVar:ClassTag,N2<:TensorVar:ClassTag] extends Template2[N1,N2] with TensorFamilyWithStatistics2[N1,N2]
abstract class DotTemplate2[N1<:Var:ClassTag,N2<:Var:ClassTag] extends Template2[N1,N2] with DotFamily2[N1,N2]
abstract class DotTemplateWithStatistics2[N1<:TensorVar:ClassTag,N2<:TensorVar:ClassTag] extends Template2[N1,N2] with DotFamilyWithStatistics2[N1,N2]
/*
trait DiscreteFactorSettings2 extends Template {
this: VectorTemplate {
type TemplateType <: DotTemplate
type Neighbor1Type <: DiscreteVar
type Neighbor2Type <: DiscreteVar
type FactorType <: { def _1:DiscreteVariable ; def _2:DiscreteVariable }
} =>
lazy val ndd1: DiscreteDomain = throw new Error // TODO nd1.asInstanceOf[DiscreteDomain[DiscreteVar]]
lazy val ndsize1 = ndd1.size
lazy val ndd2: DiscreteDomain = throw new Error // TODO nd2.asInstanceOf[DiscreteDomain[DiscreteVar]]
lazy val ndsize2 = ndd2.size
// Managing settings iteration
override def hasSettingsIterator: Boolean = true
override def forSettings(factor:FactorType)(f: =>Unit): Unit = {
if (settingsSparsified) {
forIndex(sparseSettings1.length)(i => {
factor._1.set(i)(null)
forIndex(sparseSettings1(i).length)(j => {
factor._2.set(j)(null)
f
})
})
} else {
var i = 0
while (i < ndsize1) {
factor._1.set(i)(null)
var j = 0
while (j < ndsize2) {
factor._2.set(j)(null)
f
j += 1
}
}
}
}
// Call function f for each valid (possibly sparsified) variable value setting
// of the neighboring variables specified in 'vs'.
override def forSettingsOf(factor:FactorType, vs:Seq[Variable])(f: =>Unit): Unit = {
if (vs.size == 1) {
val v = vs.head
if (factor._1 eq v) {
// vary v1, keep v2 constant
val v = factor._1 // Get it with the correct type
if (settingsSparsified) {
val sparseSettings = sparseSettings2(factor._2.intValue)
forIndex(sparseSettings.length)(i => { v.set(sparseSettings(i))(null); f })
} else forIndex(ndsize1)(i => { v.set(i)(null); f })
} else if (factor._2 eq v) {
// vary v2, keep v1 constant
val v = factor._2 // Get it with the correct type
if (settingsSparsified) {
val sparseSettings = sparseSettings1(factor._1.intValue)
forIndex(sparseSettings.length)(i => { v.set(sparseSettings(i))(null); f })
} else forIndex(ndsize2)(i => { v.set(i)(null); f })
}
} else if (vs.size == 2) {
throw new Error("Not yet implemented.")
} else throw new Error("Asked to vary settings of too many variables.")
}
private var settingsSparsified = false
// Redundant storage of valid v1,v2 value pairs
private var sparseSettings1: Array[Array[Int]] = null // first index=v1, second index=v2
private var sparseSettings2: Array[Array[Int]] = null // first index=v2, second index=v1
// Initialize sparseSettings1 and sparseSettings2 to cover all values in factors touching the variables in 'vs'.
override def sparsifySettingsFor(vs:Iterable[Variable]): Unit = {
println("Template sparsifySettingsFor ndsize1="+ndsize1+" ndsize2="+ndsize2)
assert (ndsize1 > 0, "sparsifySettingsFor before Domain size properly set.")
assert (ndsize2 > 0, "sparsifySettingsFor before Domain size properly set.")
val sparse1 = new HashMap[Int,scala.collection.mutable.Set[Int]]
val sparse2 = new HashMap[Int,scala.collection.mutable.Set[Int]]
vs.foreach(v => {
this.factors(v).foreach(f => {
sparse1.getOrElseUpdate(f._1.intValue, new HashSet[Int]) += f._2.intValue
sparse2.getOrElseUpdate(f._2.intValue, new HashSet[Int]) += f._1.intValue
})
})
sparseSettings1 = new Array[Array[Int]](ndsize1)
sparseSettings2 = new Array[Array[Int]](ndsize2)
forIndex(sparseSettings1.length)(i => sparseSettings1(i) = sparse1.getOrElse(i, new HashSet[Int]).toArray)
forIndex(sparseSettings2.length)(i => sparseSettings2(i) = sparse2.getOrElse(i, new HashSet[Int]).toArray)
settingsSparsified = true
}
}
*/
| Craigacp/factorie | src/main/scala/cc/factorie/model/Template2.scala | Scala | apache-2.0 | 8,566 |
package cobase.user
import javax.inject.Inject
import cobase.DBTableDefinitions
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.util.PasswordInfo
import com.mohiva.play.silhouette.impl.daos.DelegableAuthInfoDAO
import play.api.db.slick.{DatabaseConfigProvider, HasDatabaseConfigProvider}
import play.api.libs.concurrent.Execution.Implicits._
import slick.driver.JdbcProfile
import scala.concurrent.Future
/**
* The DAO to store the password information.
*/
class PasswordInfoDAO @Inject() (protected val dbConfigProvider: DatabaseConfigProvider) extends DelegableAuthInfoDAO[PasswordInfo] with HasDatabaseConfigProvider[JdbcProfile] with DBTableDefinitions {
import driver.api._
protected def passwordInfoQuery(loginInfo: LoginInfo) = for {
dbLoginInfo <- loginInfoQuery(loginInfo)
dbPasswordInfo <- slickPasswordInfos if dbPasswordInfo.loginInfoId === dbLoginInfo.id
} yield dbPasswordInfo
// Use subquery workaround instead of join to get authinfo because slick only supports selecting
// from a single table for update/delete queries (https://github.com/slick/slick/issues/684).
protected def passwordInfoSubQuery(loginInfo: LoginInfo) =
slickPasswordInfos.filter(_.loginInfoId in loginInfoQuery(loginInfo).map(_.id))
protected def addAction(loginInfo: LoginInfo, authInfo: PasswordInfo) =
loginInfoQuery(loginInfo).result.head.flatMap { dbLoginInfo =>
slickPasswordInfos +=
DBPasswordInfo(authInfo.hasher, authInfo.password, authInfo.salt, dbLoginInfo.id.get)
}.transactionally
protected def updateAction(loginInfo: LoginInfo, authInfo: PasswordInfo) =
passwordInfoSubQuery(loginInfo).
map(dbPasswordInfo => (dbPasswordInfo.hasher, dbPasswordInfo.password, dbPasswordInfo.salt)).
update((authInfo.hasher, authInfo.password, authInfo.salt))
/**
* Finds the auth info which is linked with the specified login info.
*
* @param loginInfo The linked login info.
* @return The retrieved auth info or None if no auth info could be retrieved for the given login info.
*/
def find(loginInfo: LoginInfo): Future[Option[PasswordInfo]] = {
db.run(passwordInfoQuery(loginInfo).result.headOption).map { dbPasswordInfoOption =>
dbPasswordInfoOption.map(dbPasswordInfo =>
PasswordInfo(dbPasswordInfo.hasher, dbPasswordInfo.password, dbPasswordInfo.salt))
}
}
/**
* Adds new auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be added.
* @param authInfo The auth info to add.
* @return The added auth info.
*/
def add(loginInfo: LoginInfo, authInfo: PasswordInfo): Future[PasswordInfo] =
db.run(addAction(loginInfo, authInfo)).map(_ => authInfo)
/**
* Updates the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be updated.
* @param authInfo The auth info to update.
* @return The updated auth info.
*/
def update(loginInfo: LoginInfo, authInfo: PasswordInfo): Future[PasswordInfo] =
db.run(updateAction(loginInfo, authInfo)).map(_ => authInfo)
/**
* Saves the auth info for the given login info.
*
* This method either adds the auth info if it doesn't exists or it updates the auth info
* if it already exists.
*
* @param loginInfo The login info for which the auth info should be saved.
* @param authInfo The auth info to save.
* @return The saved auth info.
*/
def save(loginInfo: LoginInfo, authInfo: PasswordInfo): Future[PasswordInfo] = {
val query = loginInfoQuery(loginInfo).joinLeft(slickPasswordInfos).on(_.id === _.loginInfoId)
val action = query.result.head.flatMap {
case (dbLoginInfo, Some(dbPasswordInfo)) => updateAction(loginInfo, authInfo)
case (dbLoginInfo, None) => addAction(loginInfo, authInfo)
}
db.run(action).map(_ => authInfo)
}
/**
* Removes the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be removed.
* @return A future to wait for the process to be completed.
*/
def remove(loginInfo: LoginInfo): Future[Unit] =
db.run(passwordInfoSubQuery(loginInfo).delete).map(_ => ())
}
| Cobase/cobase-pro | app/cobase/user/PasswordInfoDAO.scala | Scala | mit | 4,254 |
package io.getquill.context.jdbc
import java.sql.{ Date, Timestamp, Types }
import java.time.{ LocalDate, LocalDateTime }
import java.util.{ Calendar, TimeZone }
import java.{ sql, util }
trait Encoders {
this: JdbcContextBase[_, _] =>
type Encoder[T] = JdbcEncoder[T]
protected val dateTimeZone = TimeZone.getDefault
case class JdbcEncoder[T](sqlType: Int, encoder: BaseEncoder[T]) extends BaseEncoder[T] {
override def apply(index: Index, value: T, row: PrepareRow) =
encoder(index + 1, value, row)
}
def encoder[T](sqlType: Int, f: (Index, T, PrepareRow) => Unit): Encoder[T] =
JdbcEncoder(sqlType, (index: Index, value: T, row: PrepareRow) => {
f(index, value, row)
row
})
def encoder[T](sqlType: Int, f: PrepareRow => (Index, T) => Unit): Encoder[T] =
encoder(sqlType, (index: Index, value: T, row: PrepareRow) => f(row)(index, value))
implicit def mappedEncoder[I, O](implicit mapped: MappedEncoding[I, O], e: Encoder[O]): Encoder[I] =
JdbcEncoder(e.sqlType, mappedBaseEncoder(mapped, e.encoder))
private[this] val nullEncoder: Encoder[Int] = encoder(Types.INTEGER, _.setNull)
implicit def optionEncoder[T](implicit d: Encoder[T]): Encoder[Option[T]] =
JdbcEncoder(
d.sqlType,
(index, value, row) =>
value match {
case Some(v) => d.encoder(index, v, row)
case None => nullEncoder.encoder(index, d.sqlType, row)
}
)
implicit val stringEncoder: Encoder[String] = encoder(Types.VARCHAR, _.setString)
implicit val bigDecimalEncoder: Encoder[BigDecimal] =
encoder(Types.NUMERIC, (index, value, row) => row.setBigDecimal(index, value.bigDecimal))
implicit val byteEncoder: Encoder[Byte] = encoder(Types.TINYINT, _.setByte)
implicit val shortEncoder: Encoder[Short] = encoder(Types.SMALLINT, _.setShort)
implicit val intEncoder: Encoder[Int] = encoder(Types.INTEGER, _.setInt)
implicit val longEncoder: Encoder[Long] = encoder(Types.BIGINT, _.setLong)
implicit val floatEncoder: Encoder[Float] = encoder(Types.FLOAT, _.setFloat)
implicit val doubleEncoder: Encoder[Double] = encoder(Types.DOUBLE, _.setDouble)
implicit val byteArrayEncoder: Encoder[Array[Byte]] = encoder(Types.VARBINARY, _.setBytes)
implicit val dateEncoder: Encoder[util.Date] =
encoder(Types.TIMESTAMP, (index, value, row) =>
row.setTimestamp(index, new sql.Timestamp(value.getTime), Calendar.getInstance(dateTimeZone)))
implicit val localDateEncoder: Encoder[LocalDate] =
encoder(Types.DATE, (index, value, row) =>
row.setDate(index, Date.valueOf(value), Calendar.getInstance(dateTimeZone)))
implicit val localDateTimeEncoder: Encoder[LocalDateTime] =
encoder(Types.TIMESTAMP, (index, value, row) =>
row.setTimestamp(index, Timestamp.valueOf(value), Calendar.getInstance(dateTimeZone)))
} | mentegy/quill | quill-jdbc/src/main/scala/io/getquill/context/jdbc/Encoders.scala | Scala | apache-2.0 | 2,844 |
package com.twitter.finagle.memcached.unit
import com.twitter.concurrent.Broker
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.memcached._
import com.twitter.finagle.service.Backoff
import com.twitter.finagle.service.exp.FailureAccrualPolicy
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.util.{Await, Future, MockTimer, Time}
import org.junit.runner.RunWith
import org.mockito.Mockito.{times, verify, when}
import org.mockito.Matchers
import org.mockito.Matchers._
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class KetamaFailureAccrualFactoryTest extends FunSuite with MockitoSugar {
class Helper(
ejectFailedHost: Boolean,
serviceRep: Future[Int] = Future.exception(new Exception),
underlyingStatus: Status = Status.Open)
{
val underlyingService = mock[Service[Int, Int]]
when(underlyingService.close(any[Time])) thenReturn Future.Done
when(underlyingService.status) thenReturn underlyingStatus
when(underlyingService(Matchers.anyInt)) thenReturn serviceRep
val underlying = mock[ServiceFactory[Int, Int]]
when(underlying.close(any[Time])) thenReturn Future.Done
when(underlying.status) thenReturn underlyingStatus
when(underlying()) thenReturn Future.value(underlyingService)
val key = mock[KetamaClientKey]
val broker = new Broker[NodeHealth]
val timer = new MockTimer
val factory =
new KetamaFailureAccrualFactory[Int, Int](
underlying, FailureAccrualPolicy.consecutiveFailures(3, Backoff.const(10.seconds)), timer, key, broker, ejectFailedHost, NullStatsReceiver)
val service = Await.result(factory())
verify(underlying)()
}
test("fail immediately after consecutive failures, revive after markDeadFor duration") {
val h = new Helper(false)
import h._
Time.withCurrentTimeFrozen { timeControl =>
intercept[Exception] {
Await.result(service(123))
}
intercept[Exception] {
Await.result(service(123))
}
assert(factory.isAvailable)
assert(service.isAvailable)
// triggers markDead
intercept[Exception] {
Await.result(service(123))
}
assert(!factory.isAvailable)
assert(!service.isAvailable)
assert(broker.recv.sync().isDefined == false)
// skips dispatch
intercept[FailureAccrualException] {
Await.result(factory())
}
verify(underlyingService, times(3))(123)
timeControl.advance(10.seconds)
timer.tick()
// revives after duration
assert(factory.isAvailable)
assert(service.isAvailable)
assert(broker.recv.sync().isDefined == false)
when(underlyingService(123)) thenReturn Future.value(123)
assert(Await.result(service(123)) == 123)
// failures # is reset to 0
intercept[Exception] {
Await.result(service(456))
}
assert(factory.isAvailable)
assert(service.isAvailable)
verify(underlyingService, times(4))(123)
verify(underlyingService, times(1))(456)
}
}
test("busy state of the underlying serviceFactory does not trigger FailureAccrualException") {
val h = new Helper(false, Future.exception(new Exception), Status.Busy)
import h._
Time.withCurrentTimeFrozen { timeControl =>
intercept[Exception] {
Await.result(service(123))
}
intercept[Exception] {
Await.result(service(123))
}
assert(!factory.isAvailable)
assert(!service.isAvailable)
// still dispatches
verify(underlyingService, times(2))(123)
// triggers markDead by the 3rd failure
intercept[Exception] {
Await.result(service(123))
}
assert(!factory.isAvailable)
assert(!service.isAvailable)
assert(broker.recv.sync().isDefined == false)
// skips dispatch after consecutive failures
intercept[FailureAccrualException] {
Await.result(factory())
}
verify(underlyingService, times(3))(123)
}
}
test("eject and revive failed host when ejectFailedHost=true") {
val h = new Helper(true)
import h._
Time.withCurrentTimeFrozen { timeControl =>
intercept[Exception] {
Await.result(service(123))
}
intercept[Exception] {
Await.result(service(123))
}
assert(factory.isAvailable)
assert(service.isAvailable)
// triggers markDead
intercept[Exception] {
Await.result(service(123))
}
assert(!factory.isAvailable)
assert(!service.isAvailable)
// ejects
val recv = broker.recv.sync()
assert(Await.result(recv) == NodeMarkedDead(key))
timeControl.advance(10.seconds)
timer.tick()
// Probing, not revived yet.
assert(factory.isAvailable)
assert(service.isAvailable)
when(underlyingService(123)) thenReturn Future.value(321)
Await.result(service(123))
// A good dispatch; revived
assert(factory.isAvailable)
assert(service.isAvailable)
val recv2 = broker.recv.sync()
assert(Await.result(recv2) == NodeRevived(key))
}
}
test("treat successful response and cancelled exceptions as success") {
val successes =
Seq(
Future.value(123),
Future.exception(new CancelledRequestException(new Exception)),
Future.exception(new CancelledConnectionException(new Exception)),
Future.exception(ChannelWriteException(new CancelledRequestException(new Exception))),
Future.exception(ChannelWriteException(new CancelledConnectionException(new Exception))))
successes.foreach { rep =>
val h = new Helper(false, rep)
import h._
def assertReponse(rep: Future[Int]) {
if (rep.isReturn) assert(Await.result(service(123)) == rep.get)
else intercept[Exception](Await.result(service(123)))
}
Time.withCurrentTimeFrozen { _ =>
assertReponse(rep)
assertReponse(rep)
assert(factory.isAvailable)
assert(service.isAvailable)
// not trigger markDead
assertReponse(rep)
assert(factory.isAvailable)
assert(service.isAvailable)
}
}
}
}
| a-manumohan/finagle | finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/KetamaFailureAccrualFactoryTest.scala | Scala | apache-2.0 | 6,315 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.scalar
import org.apache.ignite.{IgniteCache, Ignite}
import org.apache.ignite.cluster.ClusterGroup
import org.apache.ignite.compute.ComputeJob
import org.apache.ignite.internal.util.lang._
import org.apache.ignite.lang._
import org.apache.ignite.scalar.lang._
import org.apache.ignite.scalar.pimps._
import org.jetbrains.annotations._
import java.util.TimerTask
import scala.collection._
import scala.util.control.Breaks._
/**
* ==Overview==
* Mixin for `scalar` object providing `implicit` and `explicit` conversions between
* Java and Scala Ignite components.
*
* It is very important to review this class as it defines what `implicit` conversions
* will take place when using Scalar. Note that object `scalar` mixes in this
* trait and therefore brings with it all implicits into the scope.
*/
trait ScalarConversions {
/**
* Helper transformer from Java collection to Scala sequence.
*
* @param c Java collection to transform.
* @param f Transforming function.
*/
def toScalaSeq[A, B](@Nullable c: java.util.Collection[A], f: A => B): Seq[B] = {
assert(f != null)
if (c == null)
return null
val iter = c.iterator
val lst = new mutable.ListBuffer[B]
while (iter.hasNext) lst += f(iter.next)
lst.toSeq
}
/**
* Helper transformer from Java iterator to Scala sequence.
*
* @param i Java iterator to transform.
* @param f Transforming function.
*/
def toScalaSeq[A, B](@Nullable i: java.util.Iterator[A], f: A => B): Seq[B] = {
assert(f != null)
if (i == null)
return null
val lst = new mutable.ListBuffer[B]
while (i.hasNext) lst += f(i.next)
lst.toSeq
}
/**
* Helper converter from Java iterator to Scala sequence.
*
* @param i Java iterator to convert.
*/
def toScalaSeq[A](@Nullable i: java.util.Iterator[A]): Seq[A] =
toScalaSeq(i, (e: A) => e)
/**
* Helper transformer from Java iterable to Scala sequence.
*
* @param i Java iterable to transform.
* @param f Transforming function.
*/
def toScalaSeq[A, B](@Nullable i: java.lang.Iterable[A], f: A => B): Seq[B] = {
assert(f != null)
if (i == null) return null
toScalaSeq(i.iterator, f)
}
/**
* Helper converter from Java iterable to Scala sequence.
*
* @param i Java iterable to convert.
*/
def toScalaSeq[A](@Nullable i: java.lang.Iterable[A]): Seq[A] =
toScalaSeq(i, (e: A) => e)
// /**
// * Helper converter from Java collection to Scala sequence.
// *
// * @param c Java collection to convert.
// */
// def toScalaSeq[A](@Nullable c: java.util.Collection[A]): Seq[A] =
// toScalaSeq(c, (e: A) => e)
/**
* Helper converter from Java entry collection to Scala iterable of pair.
*
* @param c Java collection to convert.
*/
def toScalaItr[K, V](@Nullable c: java.util.Collection[java.util.Map.Entry[K, V]]): Iterable[(K, V)] = {
val lst = new mutable.ListBuffer[(K, V)]
c.toArray().foreach {
case f: java.util.Map.Entry[K, V] => lst += Tuple2(f.getKey(), f.getValue())
}
lst
}
/**
* Helper transformer from Scala sequence to Java collection.
*
* @param s Scala sequence to transform.
* @param f Transforming function.
*/
def toJavaCollection[A, B](@Nullable s: Seq[A], f: A => B): java.util.Collection[B] = {
assert(f != null)
if (s == null) return null
val lst = new java.util.ArrayList[B](s.length)
s.foreach(a => lst.add(f(a)))
lst
}
/**
* Helper converter from Scala sequence to Java collection.
*
* @param s Scala sequence to convert.
*/
def toJavaCollection[A](@Nullable s: Seq[A]): java.util.Collection[A] =
toJavaCollection(s, (e: A) => e)
/**
* Helper transformer from Scala iterator to Java collection.
*
* @param i Scala iterator to transform.
* @param f Transforming function.
*/
def toJavaCollection[A, B](@Nullable i: Iterator[A], f: A => B): java.util.Collection[B] = {
assert(f != null)
if (i == null) return null
val lst = new java.util.ArrayList[B]
i.foreach(a => lst.add(f(a)))
lst
}
/**
* Converts from `Symbol` to `String`.
*
* @param s Symbol to convert.
*/
implicit def fromSymbol(s: Symbol): String =
if (s == null)
null
else
s.toString().substring(1)
/**
* Introduction of `^^` operator for `Any` type that will call `break`.
*
* @param v `Any` value.
*/
implicit def toReturnable(v: Any) = new {
// Ignore the warning below.
def ^^ {
break()
}
}
/**
* Explicit converter for `TimerTask`. Note that since `TimerTask` implements `Runnable`
* we can't use the implicit conversion.
*
* @param f Closure to convert.
* @return Time task instance.
*/
def timerTask(f: => Unit): TimerTask = new TimerTask {
def run() {
f
}
}
/**
* Extension for `Tuple2`.
*
* @param t Tuple to improve.
*/
implicit def toTuple2x[T1, T2](t: (T1, T2)) = new {
def isSome: Boolean =
t._1 != null || t._2 != null
def isNone: Boolean =
!isSome
def isAll: Boolean =
t._1 != null && t._2 != null
def opt1: Option[T1] =
Option(t._1)
def opt2: Option[T2] =
Option(t._2)
}
/**
* Extension for `Tuple3`.
*
* @param t Tuple to improve.
*/
implicit def toTuple3x[T1, T2, T3](t: (T1, T2, T3)) = new {
def isSome: Boolean =
t._1 != null || t._2 != null || t._3 != null
def isNone: Boolean =
!isSome
def isAll: Boolean =
t._1 != null && t._2 != null && t._3 != null
def opt1: Option[T1] =
Option(t._1)
def opt2: Option[T2] =
Option(t._2)
def opt3: Option[T3] =
Option(t._3)
}
// /**
// * Implicit converter from cache KV-pair predicate to cache entry predicate. Note that predicate
// * will use peek()
// *
// * @param p Cache KV-pair predicate to convert.
// */
// implicit def toEntryPred[K, V](p: (K, V) => Boolean): (_ >: Cache.Entry[K, V]) => Boolean =
// (e: Cache.Entry[K, V]) => p(e.getKey, e.getValue)
/**
* Implicit converter from vararg of one-argument Scala functions to Java `GridPredicate`s.
*
* @param s Sequence of one-argument Scala functions to convert.
*/
implicit def toVarArgs[T](s: Seq[T => Boolean]): Seq[IgnitePredicate[_ >: T]] =
s.map((f: T => Boolean) => toPredicate(f))
/**
* Implicit converter from vararg of two-argument Scala functions to Java `GridPredicate2`s.
*
* @param s Sequence of two-argument Scala functions to convert.
*/
implicit def toVarArgs2[T1, T2](s: Seq[(T1, T2) => Boolean]): Seq[IgniteBiPredicate[_ >: T1, _ >: T2]] =
s.map((f: (T1, T2) => Boolean) => toPredicate2(f))
/**
* Implicit converter from vararg of three-argument Scala functions to Java `GridPredicate3`s.
*
* @param s Sequence of three-argument Scala functions to convert.
*/
implicit def toVarArgs3[T1, T2, T3](s: Seq[(T1, T2, T3) => Boolean]):
Seq[GridPredicate3[_ >: T1, _ >: T2, _ >: T3]] =
s.map((f: (T1, T2, T3) => Boolean) => toPredicate3(f))
/**
* Implicit converter from Scala function and Java `GridReducer`.
*
* @param r Scala function to convert.
*/
implicit def toReducer[E, R](r: Seq[E] => R): IgniteReducer[E, R] =
new ScalarReducer(r)
/**
* Implicit converter from Java `GridReducer` to Scala function.
*
* @param r Java `GridReducer` to convert.
*/
implicit def fromReducer[E, R](r: IgniteReducer[E, R]): Seq[E] => R =
new ScalarReducerFunction[E, R](r)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param r Java-side reducer to pimp.
*/
implicit def reducerDotScala[E, R](r: IgniteReducer[E, R]) = new {
def scala: Seq[E] => R =
fromReducer(r)
}
/**
* Implicit converter from Scala function and Java `GridReducer2`.
*
* @param r Scala function to convert.
*/
implicit def toReducer2[E1, E2, R](r: (Seq[E1], Seq[E2]) => R): IgniteReducer2[E1, E2, R] =
new ScalarReducer2(r)
/**
* Implicit converter from Java `GridReducer2` to Scala function.
*
* @param r Java `GridReducer2` to convert.
*/
implicit def fromReducer2[E1, E2, R](r: IgniteReducer2[E1, E2, R]): (Seq[E1], Seq[E2]) => R =
new ScalarReducer2Function[E1, E2, R](r)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param r Java-side reducer to pimp.
*/
implicit def reducer2DotScala[E1, E2, R](r: IgniteReducer2[E1, E2, R]) = new {
def scala: (Seq[E1], Seq[E2]) => R =
fromReducer2(r)
}
/**
* Implicit converter from Scala function and Java `GridReducer3`.
*
* @param r Scala function to convert.
*/
implicit def toReducer3[E1, E2, E3, R](r: (Seq[E1], Seq[E2], Seq[E3]) => R): IgniteReducer3[E1, E2, E3, R] =
new ScalarReducer3(r)
/**
* Implicit converter from Java `GridReducer3` to Scala function.
*
* @param r Java `GridReducer3` to convert.
*/
implicit def fromReducer3[E1, E2, E3, R](r: IgniteReducer3[E1, E2, E3, R]): (Seq[E1], Seq[E2], Seq[E3]) => R =
new ScalarReducer3Function[E1, E2, E3, R](r)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param r Java-side reducer to pimp.
*/
implicit def reducer3DotScala[E1, E2, E3, R](r: IgniteReducer3[E1, E2, E3, R]) = new {
def scala: (Seq[E1], Seq[E2], Seq[E3]) => R =
fromReducer3(r)
}
/**
* Implicit converter from `Grid` to `ScalarGridPimp` "pimp".
*
* @param impl Grid to convert.
*/
implicit def toScalarGrid(impl: Ignite): ScalarGridPimp =
ScalarGridPimp(impl)
/**
* Implicit converter from `GridProjection` to `ScalarProjectionPimp` "pimp".
*
* @param impl Grid projection to convert.
*/
implicit def toScalarProjection(impl: ClusterGroup): ScalarProjectionPimp[ClusterGroup] =
ScalarProjectionPimp(impl)
/**
* Implicit converter from `Cache` to `ScalarCachePimp` "pimp".
*
* @param impl Grid cache to convert.
*/
implicit def toScalarCache[K, V](impl: IgniteCache[K, V]): ScalarCachePimp[K, V] =
ScalarCachePimp[K, V](impl)
/**
* Implicit converter from Scala function to `ComputeJob`.
*
* @param f Scala function to convert.
*/
implicit def toJob(f: () => Any): ComputeJob =
new ScalarJob(f)
/**
* Implicit converter from Scala tuple to `GridTuple2`.
*
* @param t Scala tuple to convert.
*/
implicit def toTuple2[A, B](t: (A, B)): IgniteBiTuple[A, B] =
new IgniteBiTuple[A, B](t._1, t._2)
/**
* Implicit converter from `GridTuple2` to Scala tuple.
*
* @param t `GridTuple2` to convert.
*/
implicit def fromTuple2[A, B](t: IgniteBiTuple[A, B]): (A, B) =
(t.get1, t.get2)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param t Java-side tuple to pimp.
*/
implicit def tuple2DotScala[A, B](t: IgniteBiTuple[A, B]) = new {
def scala: (A, B) =
fromTuple2(t)
}
/**
* Implicit converter from Scala tuple to `GridTuple3`.
*
* @param t Scala tuple to convert.
*/
implicit def toTuple3[A, B, C](t: (A, B, C)): GridTuple3[A, B, C] =
new GridTuple3[A, B, C](t._1, t._2, t._3)
/**
* Implicit converter from `GridTuple3` to Scala tuple.
*
* @param t `GridTuple3` to convert.
*/
implicit def fromTuple3[A, B, C](t: GridTuple3[A, B, C]): (A, B, C) =
(t.get1, t.get2, t.get3)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param t Java-side tuple to pimp.
*/
implicit def tuple3DotScala[A, B, C](t: GridTuple3[A, B, C]) = new {
def scala: (A, B, C) =
fromTuple3(t)
}
/**
* Implicit converter from Scala tuple to `GridTuple4`.
*
* @param t Scala tuple to convert.
*/
implicit def toTuple4[A, B, C, D](t: (A, B, C, D)): GridTuple4[A, B, C, D] =
new GridTuple4[A, B, C, D](t._1, t._2, t._3, t._4)
/**
* Implicit converter from `GridTuple4` to Scala tuple.
*
* @param t `GridTuple4` to convert.
*/
implicit def fromTuple4[A, B, C, D](t: GridTuple4[A, B, C, D]): (A, B, C, D) =
(t.get1, t.get2, t.get3, t.get4)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param t Java-side tuple to pimp.
*/
implicit def tuple4DotScala[A, B, C, D](t: GridTuple4[A, B, C, D]) = new {
def scala: (A, B, C, D) =
fromTuple4(t)
}
/**
* Implicit converter from Scala tuple to `GridTuple5`.
*
* @param t Scala tuple to convert.
*/
implicit def toTuple5[A, B, C, D, E](t: (A, B, C, D, E)): GridTuple5[A, B, C, D, E] =
new GridTuple5[A, B, C, D, E](t._1, t._2, t._3, t._4, t._5)
/**
* Implicit converter from `GridTuple5` to Scala tuple.
*
* @param t `GridTuple5` to convert.
*/
implicit def fromTuple5[A, B, C, D, E](t: GridTuple5[A, B, C, D, E]): (A, B, C, D, E) =
(t.get1, t.get2, t.get3, t.get4, t.get5)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param t Java-side tuple to pimp.
*/
implicit def tuple5DotScala[A, B, C, D, E](t: GridTuple5[A, B, C, D, E]) = new {
def scala: (A, B, C, D, E) =
fromTuple5(t)
}
/**
* Implicit converter from Scala function to `GridInClosure`.
*
* @param f Scala function to convert.
*/
implicit def toInClosure[T](f: T => Unit): IgniteInClosure[T] =
f match {
case (p: ScalarInClosureFunction[T]) => p.inner
case _ => new ScalarInClosure[T](f)
}
/**
* Implicit converter from Scala function to `GridInClosureX`.
*
* @param f Scala function to convert.
*/
def toInClosureX[T](f: T => Unit): IgniteInClosureX[T] =
f match {
case (p: ScalarInClosureXFunction[T]) => p.inner
case _ => new ScalarInClosureX[T](f)
}
/**
* Implicit converter from `GridInClosure` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromInClosure[T](f: IgniteInClosure[T]): T => Unit =
new ScalarInClosureFunction[T](f)
/**
* Implicit converter from `GridInClosureX` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromInClosureX[T](f: IgniteInClosureX[T]): T => Unit =
new ScalarInClosureXFunction[T](f)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def inClosureDotScala[T](f: IgniteInClosure[T]) = new {
def scala: T => Unit =
fromInClosure(f)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def inClosureXDotScala[T](f: IgniteInClosureX[T]) = new {
def scala: T => Unit =
fromInClosureX(f)
}
/**
* Implicit converter from Scala function to `GridInClosure2`.
*
* @param f Scala function to convert.
*/
implicit def toInClosure2[T1, T2](f: (T1, T2) => Unit): IgniteBiInClosure[T1, T2] =
f match {
case (p: ScalarInClosure2Function[T1, T2]) => p.inner
case _ => new ScalarInClosure2[T1, T2](f)
}
/**
* Implicit converter from Scala function to `GridInClosure2X`.
*
* @param f Scala function to convert.
*/
implicit def toInClosure2X[T1, T2](f: (T1, T2) => Unit): IgniteInClosure2X[T1, T2] =
f match {
case (p: ScalarInClosure2XFunction[T1, T2]) => p.inner
case _ => new ScalarInClosure2X[T1, T2](f)
}
/**
* Implicit converter from `GridInClosure2` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromInClosure2[T1, T2](f: IgniteBiInClosure[T1, T2]): (T1, T2) => Unit =
new ScalarInClosure2Function(f)
/**
* Implicit converter from `GridInClosure2X` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromInClosure2X[T1, T2](f: IgniteInClosure2X[T1, T2]): (T1, T2) => Unit =
new ScalarInClosure2XFunction(f)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def inClosure2DotScala[T1, T2](f: IgniteBiInClosure[T1, T2]) = new {
def scala: (T1, T2) => Unit =
fromInClosure2(f)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def inClosure2XDotScala[T1, T2](f: IgniteInClosure2X[T1, T2]) = new {
def scala: (T1, T2) => Unit =
fromInClosure2X(f)
}
/**
* Implicit converter from Scala function to `GridInClosure3`.
*
* @param f Scala function to convert.
*/
implicit def toInClosure3[T1, T2, T3](f: (T1, T2, T3) => Unit): GridInClosure3[T1, T2, T3] =
f match {
case (p: ScalarInClosure3Function[T1, T2, T3]) => p.inner
case _ => new ScalarInClosure3[T1, T2, T3](f)
}
/**
* Implicit converter from Scala function to `GridInClosure3X`.
*
* @param f Scala function to convert.
*/
def toInClosure3X[T1, T2, T3](f: (T1, T2, T3) => Unit): GridInClosure3X[T1, T2, T3] =
f match {
case (p: ScalarInClosure3XFunction[T1, T2, T3]) => p.inner
case _ => new ScalarInClosure3X[T1, T2, T3](f)
}
/**
* Implicit converter from `GridInClosure3` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromInClosure3[T1, T2, T3](f: GridInClosure3[T1, T2, T3]): (T1, T2, T3) => Unit =
new ScalarInClosure3Function(f)
/**
* Implicit converter from `GridInClosure3X` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromInClosure3X[T1, T2, T3](f: GridInClosure3X[T1, T2, T3]): (T1, T2, T3) => Unit =
new ScalarInClosure3XFunction(f)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def inClosure3DotScala[T1, T2, T3](f: GridInClosure3[T1, T2, T3]) = new {
def scala: (T1, T2, T3) => Unit =
fromInClosure3(f)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def inClosure3XDotScala[T1, T2, T3](f: GridInClosure3X[T1, T2, T3]) = new {
def scala: (T1, T2, T3) => Unit =
fromInClosure3X(f)
}
/**
* Implicit converter from Scala function to `GridOutClosure`.
*
* @param f Scala function to convert.
*/
implicit def toCallable[R](f: () => R): IgniteCallable[R] =
f match {
case p: ScalarOutClosureFunction[R] => p.inner
case _ => new ScalarOutClosure[R](f)
}
/**
* Implicit converter from Scala function to `GridOutClosureX`.
*
* @param f Scala function to convert.
*/
def toOutClosureX[R](f: () => R): IgniteOutClosureX[R] =
f match {
case (p: ScalarOutClosureXFunction[R]) => p.inner
case _ => new ScalarOutClosureX[R](f)
}
/**
* Implicit converter from `GridOutClosure` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromOutClosure[R](f: IgniteCallable[R]): () => R =
new ScalarOutClosureFunction[R](f)
/**
* Implicit converter from `GridOutClosureX` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromOutClosureX[R](f: IgniteOutClosureX[R]): () => R =
new ScalarOutClosureXFunction[R](f)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def outClosureDotScala[R](f: IgniteCallable[R]) = new {
def scala: () => R =
fromOutClosure(f)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def outClosureXDotScala[R](f: IgniteOutClosureX[R]) = new {
def scala: () => R =
fromOutClosureX(f)
}
/**
* Implicit converter from Scala function to `GridAbsClosure`.
*
* @param f Scala function to convert.
*/
implicit def toRunnable(f: () => Unit): IgniteRunnable =
f match {
case (f: ScalarAbsClosureFunction) => f.inner
case _ => new ScalarAbsClosure(f)
}
/**
* Implicit converter from Scala function to `GridAbsClosureX`.
*
* @param f Scala function to convert.
*/
def toAbsClosureX(f: () => Unit): GridAbsClosureX =
f match {
case (f: ScalarAbsClosureXFunction) => f.inner
case _ => new ScalarAbsClosureX(f)
}
/**
* Implicit converter from `GridAbsClosure` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromAbsClosure(f: GridAbsClosure): () => Unit =
new ScalarAbsClosureFunction(f)
/**
* Implicit converter from `GridAbsClosureX` to Scala wrapping function.
*
* @param f Grid closure to convert.
*/
implicit def fromAbsClosureX(f: GridAbsClosureX): () => Unit =
new ScalarAbsClosureXFunction(f)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side absolute closure to pimp.
*/
implicit def absClosureDotScala(f: GridAbsClosure) = new {
def scala: () => Unit =
fromAbsClosure(f)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side absolute closure to pimp.
*/
implicit def absClosureXDotScala(f: GridAbsClosureX) = new {
def scala: () => Unit =
fromAbsClosureX(f)
}
/**
* Implicit converter from Scala predicate to `GridAbsPredicate`.
*
* @param f Scala predicate to convert.
*/
implicit def toAbsPredicate(f: () => Boolean): GridAbsPredicate =
f match {
case (p: ScalarAbsPredicateFunction) => p.inner
case _ => new ScalarAbsPredicate(f)
}
/**
* Implicit converter from Scala predicate to `GridAbsPredicateX`.
*
* @param f Scala predicate to convert.
*/
implicit def toAbsPredicateX(f: () => Boolean): GridAbsPredicateX =
f match {
case (p: ScalarAbsPredicateXFunction) => p.inner
case _ => new ScalarAbsPredicateX(f)
}
/**
* Implicit converter from `GridAbsPredicate` to Scala wrapping predicate.
*
* @param p Grid predicate to convert.
*/
implicit def fromAbsPredicate(p: GridAbsPredicate): () => Boolean =
new ScalarAbsPredicateFunction(p)
/**
* Implicit converter from `GridAbsPredicateX` to Scala wrapping predicate.
*
* @param p Grid predicate to convert.
*/
implicit def fromAbsPredicateX(p: GridAbsPredicateX): () => Boolean =
new ScalarAbsPredicateXFunction(p)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param p Java-side predicate to pimp.
*/
implicit def absPredicateDotScala(p: GridAbsPredicate) = new {
def scala: () => Boolean =
fromAbsPredicate(p)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param p Java-side predicate to pimp.
*/
implicit def absPredicateXDotScala(p: GridAbsPredicateX) = new {
def scala: () => Boolean =
fromAbsPredicateX(p)
}
/**
* Implicit converter from `java.lang.Runnable` to `GridAbsClosure`.
*
* @param r Java runnable to convert.
*/
implicit def toAbsClosure2(r: java.lang.Runnable): GridAbsClosure =
GridFunc.as(r)
/**
* Implicit converter from Scala predicate to Scala wrapping predicate.
*
* @param f Scala predicate to convert.
*/
implicit def toPredicate[T](f: T => Boolean) =
f match {
case null => null
case (p: ScalarPredicateFunction[T]) => p.inner
case _ => new ScalarPredicate[T](f)
}
/**
* Implicit converter from Scala predicate to Scala wrapping predicate.
*
* @param f Scala predicate to convert.
*/
def toPredicateX[T](f: T => Boolean) =
f match {
case (p: ScalarPredicateXFunction[T]) => p.inner
case _ => new ScalarPredicateX[T](f)
}
/**
* Implicit converter from `GridPredicate` to Scala wrapping predicate.
*
* @param p Grid predicate to convert.
*/
implicit def fromPredicate[T](p: IgnitePredicate[T]): T => Boolean =
new ScalarPredicateFunction[T](p)
/**
* Implicit converter from `GridPredicate` to Scala wrapping predicate.
*
* @param p Grid predicate to convert.
*/
implicit def fromPredicateX[T](p: IgnitePredicateX[T]): T => Boolean =
new ScalarPredicateXFunction[T](p)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param p Java-side predicate to pimp.
*/
implicit def predicateDotScala[T](p: IgnitePredicate[T]) = new {
def scala: T => Boolean =
fromPredicate(p)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param p Java-side predicate to pimp.
*/
implicit def predicateXDotScala[T](p: IgnitePredicateX[T]) = new {
def scala: T => Boolean =
fromPredicateX(p)
}
/**
* Implicit converter from Scala predicate to Scala wrapping predicate.
*
* @param f Scala predicate to convert.
*/
implicit def toPredicate2[T1, T2](f: (T1, T2) => Boolean) =
f match {
case (p: ScalarPredicate2Function[T1, T2]) => p.inner
case _ => new ScalarPredicate2[T1, T2](f)
}
/**
* Implicit converter from Scala predicate to Scala wrapping predicate.
*
* @param f Scala predicate to convert.
*/
def toPredicate2X[T1, T2](f: (T1, T2) => Boolean) =
f match {
case (p: ScalarPredicate2XFunction[T1, T2]) => p.inner
case _ => new ScalarPredicate2X[T1, T2](f)
}
/**
* Implicit converter from `GridPredicate2X` to Scala wrapping predicate.
*
* @param p Grid predicate to convert.
*/
implicit def fromPredicate2[T1, T2](p: IgniteBiPredicate[T1, T2]): (T1, T2) => Boolean =
new ScalarPredicate2Function[T1, T2](p)
/**
* Implicit converter from `GridPredicate2X` to Scala wrapping predicate.
*
* @param p Grid predicate to convert.
*/
implicit def fromPredicate2X[T1, T2](p: IgnitePredicate2X[T1, T2]): (T1, T2) => Boolean =
new ScalarPredicate2XFunction[T1, T2](p)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param p Java-side predicate to pimp.
*/
implicit def predicate2DotScala[T1, T2](p: IgniteBiPredicate[T1, T2]) = new {
def scala: (T1, T2) => Boolean =
fromPredicate2(p)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param p Java-side predicate to pimp.
*/
implicit def predicate2XDotScala[T1, T2](p: IgnitePredicate2X[T1, T2]) = new {
def scala: (T1, T2) => Boolean =
fromPredicate2X(p)
}
/**
* Implicit converter from Scala predicate to Scala wrapping predicate.
*
* @param f Scala predicate to convert.
*/
implicit def toPredicate3[T1, T2, T3](f: (T1, T2, T3) => Boolean) =
f match {
case (p: ScalarPredicate3Function[T1, T2, T3]) => p.inner
case _ => new ScalarPredicate3[T1, T2, T3](f)
}
/**
* Implicit converter from Scala predicate to Scala wrapping predicate.
*
* @param f Scala predicate to convert.
*/
def toPredicate32[T1, T2, T3](f: (T1, T2, T3) => Boolean) =
f match {
case (p: ScalarPredicate3XFunction[T1, T2, T3]) => p.inner
case _ => new ScalarPredicate3X[T1, T2, T3](f)
}
/**
* Implicit converter from `GridPredicate3X` to Scala wrapping predicate.
*
* @param p Grid predicate to convert.
*/
implicit def fromPredicate3[T1, T2, T3](p: GridPredicate3[T1, T2, T3]): (T1, T2, T3) => Boolean =
new ScalarPredicate3Function[T1, T2, T3](p)
/**
* Implicit converter from `GridPredicate3X` to Scala wrapping predicate.
*
* @param p Grid predicate to convert.
*/
implicit def fromPredicate3X[T1, T2, T3](p: GridPredicate3X[T1, T2, T3]): (T1, T2, T3) => Boolean =
new ScalarPredicate3XFunction[T1, T2, T3](p)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param p Java-side predicate to pimp.
*/
implicit def predicate3DotScala[T1, T2, T3](p: GridPredicate3[T1, T2, T3]) = new {
def scala: (T1, T2, T3) => Boolean =
fromPredicate3(p)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param p Java-side predicate to pimp.
*/
implicit def predicate3XDotScala[T1, T2, T3](p: GridPredicate3X[T1, T2, T3]) = new {
def scala: (T1, T2, T3) => Boolean =
fromPredicate3X(p)
}
/**
* Implicit converter from Scala closure to `GridClosure`.
*
* @param f Scala closure to convert.
*/
implicit def toClosure[A, R](f: A => R): IgniteClosure[A, R] =
f match {
case (c: ScalarClosureFunction[A, R]) => c.inner
case _ => new ScalarClosure[A, R](f)
}
/**
* Implicit converter from Scala closure to `GridClosureX`.
*
* @param f Scala closure to convert.
*/
def toClosureX[A, R](f: A => R): IgniteClosureX[A, R] =
f match {
case (c: ScalarClosureXFunction[A, R]) => c.inner
case _ => new ScalarClosureX[A, R](f)
}
/**
* Implicit converter from `GridClosure` to Scala wrapping closure.
*
* @param f Grid closure to convert.
*/
implicit def fromClosure[A, R](f: IgniteClosure[A, R]): A => R =
new ScalarClosureFunction[A, R](f)
/**
* Implicit converter from `GridClosureX` to Scala wrapping closure.
*
* @param f Grid closure to convert.
*/
implicit def fromClosureX[A, R](f: IgniteClosureX[A, R]): A => R =
new ScalarClosureXFunction[A, R](f)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def closureDotScala[A, R](f: IgniteClosure[A, R]) = new {
def scala: A => R =
fromClosure(f)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def closureXDotScala[A, R](f: IgniteClosureX[A, R]) = new {
def scala: A => R =
fromClosureX(f)
}
/**
* Implicit converter from Scala closure to `GridClosure2`.
*
* @param f Scala closure to convert.
*/
implicit def toClosure2[A1, A2, R](f: (A1, A2) => R): IgniteBiClosure[A1, A2, R] =
f match {
case (p: ScalarClosure2Function[A1, A2, R]) => p.inner
case _ => new ScalarClosure2[A1, A2, R](f)
}
/**
* Implicit converter from Scala closure to `GridClosure2X`.
*
* @param f Scala closure to convert.
*/
def toClosure2X[A1, A2, R](f: (A1, A2) => R): IgniteClosure2X[A1, A2, R] =
f match {
case (p: ScalarClosure2XFunction[A1, A2, R]) => p.inner
case _ => new ScalarClosure2X[A1, A2, R](f)
}
/**
* Implicit converter from `GridClosure2X` to Scala wrapping closure.
*
* @param f Grid closure to convert.
*/
implicit def fromClosure2[A1, A2, R](f: IgniteBiClosure[A1, A2, R]): (A1, A2) => R =
new ScalarClosure2Function[A1, A2, R](f)
/**
* Implicit converter from `GridClosure2X` to Scala wrapping closure.
*
* @param f Grid closure to convert.
*/
implicit def fromClosure2X[A1, A2, R](f: IgniteClosure2X[A1, A2, R]): (A1, A2) => R =
new ScalarClosure2XFunction[A1, A2, R](f)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def closure2DotScala[A1, A2, R](f: IgniteBiClosure[A1, A2, R]) = new {
def scala: (A1, A2) => R =
fromClosure2(f)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def closure2XDotScala[A1, A2, R](f: IgniteClosure2X[A1, A2, R]) = new {
def scala: (A1, A2) => R =
fromClosure2X(f)
}
/**
* Implicit converter from Scala closure to `GridClosure3X`.
*
* @param f Scala closure to convert.
*/
implicit def toClosure3[A1, A2, A3, R](f: (A1, A2, A3) => R): GridClosure3[A1, A2, A3, R] =
f match {
case (p: ScalarClosure3Function[A1, A2, A3, R]) => p.inner
case _ => new ScalarClosure3[A1, A2, A3, R](f)
}
/**
* Implicit converter from Scala closure to `GridClosure3X`.
*
* @param f Scala closure to convert.
*/
def toClosure3X[A1, A2, A3, R](f: (A1, A2, A3) => R): GridClosure3X[A1, A2, A3, R] =
f match {
case (p: ScalarClosure3XFunction[A1, A2, A3, R]) => p.inner
case _ => new ScalarClosure3X[A1, A2, A3, R](f)
}
/**
* Implicit converter from `GridClosure3` to Scala wrapping closure.
*
* @param f Grid closure to convert.
*/
implicit def fromClosure3[A1, A2, A3, R](f: GridClosure3[A1, A2, A3, R]): (A1, A2, A3) => R =
new ScalarClosure3Function[A1, A2, A3, R](f)
/**
* Implicit converter from `GridClosure3X` to Scala wrapping closure.
*
* @param f Grid closure to convert.
*/
implicit def fromClosure3X[A1, A2, A3, R](f: GridClosure3X[A1, A2, A3, R]): (A1, A2, A3) => R =
new ScalarClosure3XFunction[A1, A2, A3, R](f)
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def closure3DotScala[A1, A2, A3, R](f: GridClosure3[A1, A2, A3, R]) = new {
def scala: (A1, A2, A3) => R =
fromClosure3(f)
}
/**
* Pimp for adding explicit conversion method `scala`.
*
* @param f Java-side closure to pimp.
*/
implicit def closure3XDotScala[A1, A2, A3, R](f: GridClosure3X[A1, A2, A3, R]) = new {
def scala: (A1, A2, A3) => R =
fromClosure3X(f)
}
}
| vldpyatkov/ignite | modules/scalar/src/main/scala/org/apache/ignite/scalar/ScalarConversions.scala | Scala | apache-2.0 | 37,024 |
package com.github.aselab.activerecord.inner
import com.github.aselab.activerecord._
import com.github.aselab.activerecord.dsl._
import org.specs2.specification.Scope
package onetomany {
case class User(name: String) extends ActiveRecord {
val groupId: Long = 0
val otherKey: Option[Long] = None
lazy val group = belongsTo[Group]
lazy val groupByOtherKey = belongsTo[Group](foreignKey = "otherKey")
}
case class Group(name: String) extends ActiveRecord {
lazy val users = hasMany[User]
lazy val usersByOtherKey = hasMany[User](foreignKey = "otherKey")
}
object User extends ActiveRecordCompanion[User]
object Group extends ActiveRecordCompanion[Group]
object Tables extends ActiveRecordTables {
val users = table[User]
val groups = table[Group]
}
trait TestData extends Scope {
val user = User("user1").create
val group = Group("group1").create
}
}
object OneToManyAssociationSpec extends DatabaseSpecification {
import onetomany._
override def schema = Tables
"BelongsToAssociation" should {
"assign persisted record" >> new TestData {
user.group := group
user.group.isLoaded must beTrue
user.group.cache mustEqual List(group)
user.groupId mustEqual(group.id)
}
"assign non-persisted record" >> new TestData {
val newGroup = Group("group2")
user.group.assign(newGroup) must throwA(ActiveRecordException.recordMustBeSaved)
}
"associate persisted record" >> new TestData {
user.group.associate(group)
user.group.toOption must beSome(group)
}
"find by primary key" >> new TestData {
user.group.associate(group)
user.group.find(group.id) must beSome(group)
}
"configure foreignKey" >> new TestData {
user.groupByOtherKey.associate(group)
user.groupByOtherKey.toOption must beSome(group)
user.otherKey must beSome(group.id)
}
"implicit conversions" >> new TestData {
user.group.associate(group)
user.group.count mustEqual 1
user.group.where(_.id === group.id).toList mustEqual List(group)
user.group.name mustEqual group.name
}
}
"HasManyAssociation" should {
"assign to non-persisted record" >> new TestData {
val newGroup = Group("group2")
newGroup.users.assign(user) must throwA(ActiveRecordException.recordMustBeSaved)
}
"assign persisted record" >> new TestData {
group.users.assign(user)
user.groupId mustEqual(group.id)
}
"associate persisted record" >> new TestData {
group.users.associate(user)
group.users.toList mustEqual List(user)
}
"associate non-persisted record" >> new TestData {
val newUser = User("user2")
group.users.associate(newUser)
newUser.isPersisted must beTrue
group.users.toList mustEqual List(newUser)
}
"remove" >> new TestData {
val user2 = User("user2").create
group.usersByOtherKey := List(user, user2)
val removed = group.usersByOtherKey.remove(user)
removed must beSome(user)
group.usersByOtherKey.toList mustEqual List(user2)
group.usersByOtherKey.reload mustEqual List(user2)
}
"remove with not null constraint" >> new TestData {
group.users << user
group.users.remove(user) must throwA(ActiveRecordException.notNullConstraint("groupId"))
}
"removeAll" >> new TestData {
val user2 = User("user2").create
group.usersByOtherKey << user
group.usersByOtherKey += user2
val removed = group.usersByOtherKey.removeAll
removed mustEqual List(user, user2)
removed.forall(m => m.otherKey == None && m.isPersisted) must beTrue
group.usersByOtherKey must beEmpty
group.usersByOtherKey.reload must beEmpty
}
"removeAll with not null constraint" >> new TestData {
group.users << user
group.users.removeAll must throwA(ActiveRecordException.notNullConstraint("groupId"))
}
"deleteAll" >> new TestData {
val user2 = User("user2").create
val user3 = User("user3").create
group.users << Seq(user, user2)
group.users.deleteAll mustEqual List(user, user2)
group.users must beEmpty
User.exists(_.id === user.id) must beFalse
User.exists(_.id === user2.id) must beFalse
User.exists(_.id === user3.id) must beTrue
}
"append records" >> new TestData {
val user2 = User("user2").create
val user3 = User("user3").create
group.users << user
group.users ++= Seq(user2, user3)
group.users.toList must contain(exactly(user, user2, user3))
}
"replace records" >> new TestData {
val user2 = User("user2").create
val user3 = User("user3").create
group.usersByOtherKey << user
group.usersByOtherKey := List(user2, user3)
User.exists(_.id === user.id) must beTrue
group.usersByOtherKey.toList mustEqual List(user2, user3)
}
"replace records with not null constraint" >> new TestData {
val user2 = User("user2").create
val user3 = User("user3").create
group.users << user
group.users := List(user2, user3)
User.exists(_.id === user.id) must beFalse
group.users.toList mustEqual List(user2, user3)
}
"implicit conversions" >> new TestData {
group.users.associate(user)
group.users.map(_.name).toList mustEqual List(user.name)
group.users.where(_.id === user.id).toList mustEqual List(user)
}
}
}
| xdougx/scala-activerecord | activerecord/src/test/scala/inner/OneToManyAssociationSpec.scala | Scala | mit | 5,483 |
package vault
import scalaz.stream._
import java.sql.{Connection, SQLException, PreparedStatement, ResultSet}
import scalaz._, Scalaz._, effect._, Effect._, concurrent._
import DbValue.db
import Db._
object Execute {
def list[A: ToDb, B: FromDb](sql: String, a: A): Db[List[B]] =
process[A, B](sql, a).runLog.map(_.toList)
def list_[A: FromDb](sql: String): Db[List[A]] =
list[Unit, A](sql, ())
def get[A: ToDb, B: FromDb](sql: String, a: A): Db[Option[B]] =
process[A, B](sql, a).runLog.map(_.headOption)
def get_[A: FromDb](sql: String): Db[Option[A]] =
get[Unit, A](sql, ())
def update_(sql: String): Db[Int] =
update[Unit](sql, ())
def update[A: ToDb](sql: String, a: A): Db[Int] = Db.withConnectionX(conn => Task.delay {
DbValue.db({
val stmt = conn.prepareStatement(sql)
ToDb.execute[A](Sql.jdbc(stmt), a);
stmt.executeUpdate
}) })
def execute_(sql: String): Db[Boolean] =
execute[Unit](sql, ())
def execute[A: ToDb](sql: String, a: A): Db[Boolean] =
Db.withConnectionX(conn => Task.delay {
DbValue.db({
val stmt = conn.prepareStatement(sql);
ToDb.execute[A](Sql.jdbc(stmt), a);
stmt.execute
}) })
import Process._
/* this is horrible, and I need to fix it, but the hackery is currently required to get anything near decent performance out... */
def process[A: ToDb, B: FromDb](sql: String, a: A): Process[Db, B] = {
def statement(sql: String): Db[PreparedStatement] =
Db.withConnectionX(conn => Task.delay { DbValue.ok(conn.prepareStatement(sql)) })
def run[A: ToDb](statement: PreparedStatement, a: A): Db[ResultSet] =
Db.delay { ToDb.execute[A](Sql.jdbc(statement), a); statement.executeQuery }
def close[A <: java.lang.AutoCloseable]: A => Db[Unit] =
a => Db.liftTask(Task.delay { a.close })
await(statement(sql))(s =>
await(run(s, a))(rs =>
await(Db.getChunkSize)(size => {
import scala.collection.mutable._
val buffer: ArrayBuffer[B] = new ArrayBuffer[B](size)
var failure: DbFailure = null
var done = false
def chunk: ArrayBuffer[B] = {
if (done)
throw Process.End
buffer.clear
var i = 0; while (i < size && !done) {
if (rs.next) {
FromDb.perform[B](Row.jdbc(rs)) match {
case DbOk(b) => buffer += b
case DbErr(f) => done = true; failure = f
}
} else done = true
i += 1
}
buffer
}
def next = Db.delayDbValue {
val r = chunk
if (failure != null)
DbErr(failure)
else
DbOk(r)
}
def go: Process[Db, B] =
await(next)(bs => emitAll(bs) ++ go)
go
}
), eval(close(s)).drain, eval(close(s)).drain))
}
}
| markhibberd/vault | src/main/scala/vault/Execute.scala | Scala | bsd-3-clause | 2,964 |
/*
* Copyright (c) 2016, Innoave.com
* All rights reserved.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL INNOAVE.COM OR ITS CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.innoave.soda.l10n.format
import java.text.{MessageFormat => JMessageFormat}
import com.innoave.soda.l10n.Locale
import com.innoave.soda.l10n.MessageFormat
trait JavaMessageFormatProducer {
final def messageFormatFor(pattern: String, locale: Locale): MessageFormat =
JavaMessageFormat(pattern, locale)
}
final class JavaMessageFormat(val delegate: JMessageFormat) extends MessageFormat {
override def locale: Locale =
Locale.fromJavaLocale(delegate.getLocale)
override def format(args: Any*): String =
delegate.format(args.map(_.asInstanceOf[java.lang.Object]).toArray, new StringBuffer(), null).toString
override def format(args: Array[_]): String =
delegate.format(args.map(_.asInstanceOf[java.lang.Object]), new StringBuffer(), null).toString
}
object JavaMessageFormat {
def apply(pattern: String, locale: Locale): JavaMessageFormat =
new JavaMessageFormat(new JMessageFormat(pattern, locale.asJavaLocale))
}
| innoave/soda | l10n/src/main/scala/com/innoave/soda/l10n/format/JavaMessageFormat.scala | Scala | apache-2.0 | 1,816 |
/*
* Copyright (c) 2013-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow
package collectors
package scalastream
// Akka and Spray
import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.io.IO
import spray.can.Http
// Scala Futures
import scala.concurrent.duration._
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Success, Failure}
// Java
import java.io.File
import java.nio.ByteBuffer
import java.util.concurrent.ScheduledThreadPoolExecutor
import java.util.concurrent.TimeUnit
// Argot
import org.clapper.argot._
// Config
import com.typesafe.config.{ConfigFactory,Config,ConfigException}
// Logging
import org.slf4j.LoggerFactory
// Snowplow
import sinks._
// Main entry point of the Scala collector.
object ScalaCollector extends App {
lazy val log = LoggerFactory.getLogger(getClass())
import log.{error, debug, info, trace}
import ArgotConverters._ // Argument specifications
val parser = new ArgotParser(
programName = generated.Settings.name,
compactUsage = true,
preUsage = Some("%s: Version %s. Copyright (c) 2015, %s.".format(
generated.Settings.name,
generated.Settings.version,
generated.Settings.organization)
)
)
// Mandatory config argument
val config = parser.option[Config](List("config"), "filename",
"Configuration file.") { (c, opt) =>
val file = new File(c)
if (file.exists) {
ConfigFactory.parseFile(file)
} else {
parser.usage("Configuration file \\"%s\\" does not exist".format(c))
ConfigFactory.empty()
}
}
parser.parse(args)
val rawConf = config.value.getOrElse(throw new RuntimeException("--config option must be provided"))
val collectorConfig = new CollectorConfig(rawConf)
implicit val system = ActorSystem.create("scala-stream-collector", rawConf)
lazy val executorService = new ScheduledThreadPoolExecutor(collectorConfig.threadpoolSize)
val sinks = collectorConfig.sinkEnabled match {
case Sink.Kinesis => {
val good = KinesisSink.createAndInitialize(collectorConfig, InputType.Good, executorService)
val bad = KinesisSink.createAndInitialize(collectorConfig, InputType.Bad, executorService)
CollectorSinks(good, bad)
}
case Sink.Stdout => {
val good = new StdoutSink(InputType.Good)
val bad = new StdoutSink(InputType.Bad)
CollectorSinks(good, bad)
}
}
// The handler actor replies to incoming HttpRequests.
val handler = system.actorOf(
Props(classOf[CollectorServiceActor], collectorConfig, sinks),
name = "handler"
)
val bind = Http.Bind(
handler,
interface=collectorConfig.interface,
port=collectorConfig.port)
val bindResult = IO(Http).ask(bind)(5.seconds) flatMap {
case b: Http.Bound => Future.successful(())
case failed: Http.CommandFailed => Future.failed(new RuntimeException(failed.toString))
}
bindResult onComplete {
case Success(_) =>
case Failure(f) => {
error("Failure binding to port", f)
System.exit(1)
}
}
}
// Return Options from the configuration.
object Helper {
implicit class RichConfig(val underlying: Config) extends AnyVal {
def getOptionalString(path: String): Option[String] = try {
Some(underlying.getString(path))
} catch {
case e: ConfigException.Missing => None
}
}
}
// Instead of comparing strings and validating every time
// the sink is accessed, validate the string here and
// store this enumeration.
object Sink extends Enumeration {
type Sink = Value
val Kinesis, Stdout, Test = Value
}
// How a collector should set cookies
case class CookieConfig(name: String, expiration: Long, domain: Option[String])
// Rigidly load the configuration file here to error when
// the collector process starts rather than later.
class CollectorConfig(config: Config) {
import Helper.RichConfig
private val collector = config.getConfig("collector")
val interface = collector.getString("interface")
val port = collector.getInt("port")
val production = collector.getBoolean("production")
private val p3p = collector.getConfig("p3p")
val p3pPolicyRef = p3p.getString("policyref")
val p3pCP = p3p.getString("CP")
private val cookie = collector.getConfig("cookie")
val cookieConfig = if (cookie.getBoolean("enabled")) {
Some(CookieConfig(
cookie.getString("name"),
cookie.getDuration("expiration", TimeUnit.MILLISECONDS),
cookie.getOptionalString("domain")))
} else None
private val sink = collector.getConfig("sink")
// TODO: either change this to ADTs or switch to withName generation
val sinkEnabled = sink.getString("enabled") match {
case "kinesis" => Sink.Kinesis
case "stdout" => Sink.Stdout
case "test" => Sink.Test
case _ => throw new RuntimeException("collector.sink.enabled unknown.")
}
private val kinesis = sink.getConfig("kinesis")
private val aws = kinesis.getConfig("aws")
val awsAccessKey = aws.getString("access-key")
val awsSecretKey = aws.getString("secret-key")
private val stream = kinesis.getConfig("stream")
val streamGoodName = stream.getString("good")
val streamBadName = stream.getString("bad")
private val streamRegion = stream.getString("region")
val streamEndpoint = s"https://kinesis.${streamRegion}.amazonaws.com"
val threadpoolSize = kinesis.hasPath("thread-pool-size") match {
case true => kinesis.getInt("thread-pool-size")
case _ => 10
}
val buffer = kinesis.getConfig("buffer")
val byteLimit = buffer.getInt("byte-limit")
val recordLimit = buffer.getInt("record-limit")
val timeLimit = buffer.getInt("time-limit")
val backoffPolicy = kinesis.getConfig("backoffPolicy")
val minBackoff = backoffPolicy.getLong("minBackoff")
val maxBackoff = backoffPolicy.getLong("maxBackoff")
val useIpAddressAsPartitionKey = kinesis.hasPath("useIpAddressAsPartitionKey") && kinesis.getBoolean("useIpAddressAsPartitionKey")
def cookieName = cookieConfig.map(_.name)
def cookieDomain = cookieConfig.flatMap(_.domain)
def cookieExpiration = cookieConfig.map(_.expiration)
}
| bigdecisions/snowplow | 2-collectors/scala-stream-collector/src/main/scala/com.snowplowanalytics.snowplow.collectors/scalastream/ScalaCollectorApp.scala | Scala | apache-2.0 | 6,820 |
/**
* Enumerate graphs based on a given set of nodes and their connectivity
*/
package uk.ac.cdrc.mintsearch.graph
import org.neo4j.graphdb.Path
import uk.ac.cdrc.mintsearch._
import uk.ac.cdrc.mintsearch.neo4j.GraphDBContext
import scala.annotation.tailrec
import scala.collection.JavaConverters._
/**
* This class defines a procedure to assemble embeddings from the ranking lists of nodes.
* The principal in this procedure is to find all connected components of the nodes in
* the graph store. The connection is defined by the traversalDescription which may not
* require all the node on the connecting path in the embeddings. So the assembled
* subgraphs may have extra nodes other than the embeddings to indicate that the nodes in
* an embeddings are connected.
*/
trait SubGraphEnumeratorContext {
self: GraphDBContext with TraversalStrategy with NeighbourAwareContext =>
/**
* Assemble graphs from dangled nodes
* @param dangled is a set of nodeIds
* @return a stream of sub graph stores from the dangled nodes
*/
@tailrec
final def composeGraphs(dangled: Set[NodeId], acc: Seq[GraphSnippet] = Seq.empty): Seq[GraphSnippet] = {
dangled.toList match {
case x :: xs =>
val seed = dangled.take(1)
val subGraph = expandingSubGraph(seed, dangled)
composeGraphs(dangled -- subGraph.nodeIds, acc :+ subGraph)
case Nil =>
acc
}
}
/**
* Expanding a seed set of nodes to its maximum size of sub graph within the graph store
* @param seedNodes is a set of nodes
* @param range is a set of nodes indicating the boundary of neighbour searching, only
* nodes within the range will be considered in the returned sub graphs
* @return return the biggest sub graph expanding from the seed nodes within the range
*/
def expandingSubGraph(seedNodes: Set[NodeId], range: Set[NodeId]): GraphSnippet = {
val (nodeIds, path) = stepExpandingSubGraph(seedNodes, Map.empty, range).reduce((_, b) => b)
val nodes = for (n <- nodeIds) yield db.getNodeById(n)
val relationships = (for (p <- path.values; r <- p.relationships().asScala) yield r.getId) map db.getRelationshipById
GraphSnippet(nodes.toList, relationships.toList)
}
/**
* A method defining the intermedia step for expanding a sub graph from a seed set of nodes
* @param seedNodes is a set of nodes to start from
* @param seedPaths is a set of paths carried forward for later assembling
* @param range is the range for sub graph boundaries
* @return a series steps towards the maxim range of sub graphs
*/
def stepExpandingSubGraph(seedNodes: Set[NodeId], seedPaths: Map[NodeId, Path], range: Set[NodeId]): Stream[(Set[NodeId], Map[NodeId, Path])] = {
val pathToNeighbours = (for {
nid <- seedNodes & range
p <- db.getNodeById(nid).NeighboursIn(range)
} yield p.endNode().getId -> p).toMap
if (pathToNeighbours.isEmpty)
(seedNodes, seedPaths) #:: Stream.empty
else
(seedNodes, seedPaths) #:: stepExpandingSubGraph(seedNodes ++ pathToNeighbours.keySet, seedPaths ++ pathToNeighbours, range -- seedNodes)
}
}
| spacelis/mint-search | neo4j-plugin/src/main/scala/uk/ac/cdrc/mintsearch/graph/SubGraphEnumeratorContext.scala | Scala | apache-2.0 | 3,178 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model.assessmentscores
import model.UniqueIdentifier
import model.fsacscores.AssessmentScoresFinalFeedbackExamples
object AssessmentScoresAllExercisesExamples {
val AppId1 = UniqueIdentifier.randomUniqueIdentifier
val AppId2 = UniqueIdentifier.randomUniqueIdentifier
val AppId3 = UniqueIdentifier.randomUniqueIdentifier
val AppId4 = UniqueIdentifier.randomUniqueIdentifier
val AppId5 = UniqueIdentifier.randomUniqueIdentifier
val AppId6 = UniqueIdentifier.randomUniqueIdentifier
val AssessorOnlyAnalysisExercise = AssessmentScoresAllExercises(
AppId1,
Some(AssessmentScoresExerciseExamples.Example1),
None,
None
)
val AssessorOnlyGroupExercise = AssessmentScoresAllExercises(
AppId2,
None,
Some(AssessmentScoresExerciseExamples.Example2),
None
)
val AssessorOnlyLeadershipExercise = AssessmentScoresAllExercises(
AppId3,
None,
None,
Some(AssessmentScoresExerciseExamples.Example3)
)
val AssessorAllButAnalysisExercise = AssessmentScoresAllExercises(
AppId4,
None,
Some(AssessmentScoresExerciseExamples.Example2),
Some(AssessmentScoresExerciseExamples.Example3),
Some(AssessmentScoresFinalFeedbackExamples.Example1)
)
val NoExercises = AssessmentScoresAllExercises(
AppId5,
None,
None,
None,
None
)
val AllExercises = AssessmentScoresAllExercises(
AppId6,
Some(AssessmentScoresExerciseExamples.Example1),
Some(AssessmentScoresExerciseExamples.Example2),
Some(AssessmentScoresExerciseExamples.Example3),
Some(AssessmentScoresFinalFeedbackExamples.Example1)
)
val AllExercisesButFinalFeedback = AssessmentScoresAllExercises(
AppId6,
Some(AssessmentScoresExerciseExamples.Example1),
Some(AssessmentScoresExerciseExamples.Example2),
Some(AssessmentScoresExerciseExamples.Example3),
None
)
}
| hmrc/fset-faststream | test/model/assessmentscores/AssessmentScoresAllExercisesExamples.scala | Scala | apache-2.0 | 2,477 |
package scadla.assembly
import scadla._
import squants.space.Millimeters
case class Frame(translation: Vector, orientation: Quaternion) {
def compose(f: Frame): Frame = {
val t = translation + orientation.rotate(f.translation)
val o = orientation * f.orientation //TODO is that the right order
Frame(t, o)
}
def inverse: Frame = {
val o = orientation.inverse
val t = o.rotate(translation)
Frame(t, o) //TODO does that make sense ?
}
def toRefence(s: Solid): Solid = {
Rotate(orientation, Translate(translation, s))
}
def fromReference(s: Solid): Solid = {
Translate(translation * -1, Rotate(orientation.inverse, s))
}
def directTo(p: Point): Point = ((p.toVector + translation).rotateBy(orientation)).toPoint
def directTo(p: Polyhedron): Polyhedron = {
Polyhedron(p.faces.map{ case Face(p1, p2, p3) =>
Face(directTo(p1), directTo(p2), directTo(p3))
})
}
def directFrom(p: Polyhedron): Polyhedron = inverse.directTo(p)
}
object Frame {
def apply(t: Vector): Frame = Frame(t, Quaternion(1,0,0,0, t.unit))
def apply(q: Quaternion): Frame = Frame(Vector(0,0,0, q.unit), q)
def apply(): Frame = Frame(Vector(0,0,0,Millimeters), Quaternion(1,0,0,0,Millimeters))
}
| dzufferey/scadla | src/main/scala/scadla/assembly/Frame.scala | Scala | apache-2.0 | 1,249 |
object Test extends App {
def testList = {
val list = new java.util.ArrayList[Int]
list.add(1)
list.add(2)
list.add(3)
import scala.collection.JavaConverters._
val next = list.asScala ++ List(4,5,6)
assert(next != list.asScala)
val raw = list.asScala
val cloned = raw.clone
list.add(1)
assert(raw != cloned)
}
def testSet = {
val set = new java.util.HashSet[Int]
set.add(1)
set.add(2)
set.add(3)
import scala.collection.JavaConverters._
val next = set.asScala ++ Set(4,5,6)
assert(next != set.asScala)
val raw = set.asScala
val cloned = raw.clone
set.add(4)
assert(raw != cloned)
}
def testMap = {
val map = new java.util.HashMap[Int,Int]
map.put(1,1)
map.put(2,2)
map.put(3,3)
import scala.collection.JavaConverters._
val next = map.asScala ++ Map(4->4,5->5,6->6)
assert(next != map.asScala)
val raw = map.asScala
val cloned = raw.clone
map.put(4,4)
assert(raw != cloned)
}
def testCollection = {
val list: java.util.Collection[Int] = new java.util.ArrayDeque[Int]
list.add(1)
list.add(2)
list.add(3)
import scala.collection.JavaConverters._
val next = list.asScala ++ List(4,5,6)
assert(next != list.asScala)
// Note: Clone is hidden at this level, so no overridden cloning.
}
testList
testSet
testMap
testCollection
}
| som-snytt/dotty | tests/run/t6114.scala | Scala | apache-2.0 | 1,415 |
package io.buoyant.linkerd
package protocol
import com.twitter.finagle.buoyant.linkerd.Headers
import com.twitter.finagle.http.{Method, Response, Request}
import com.twitter.finagle.stats.{InMemoryStatsReceiver, NullStatsReceiver}
import com.twitter.finagle._
import com.twitter.io.Buf
import com.twitter.util.{Future, Time, Var}
import com.twitter.finagle.tracing.NullTracer
import io.buoyant.test.FunSuite
import java.net.InetSocketAddress
class RetriesEndToEndTest extends FunSuite {
case class Downstream(name: String, service: Service[Request, Response]) {
val stack = Http.server.stack.remove(Headers.Ctx.serverModule.role)
val server = Http.server.withStack(stack)
.configured(param.Label(name))
.configured(param.Tracer(NullTracer))
.serve(":*", service)
val address = server.boundAddress.asInstanceOf[InetSocketAddress]
val port = address.getPort
}
def upstream(server: ListeningServer) = {
val address = Address(server.boundAddress.asInstanceOf[InetSocketAddress])
val name = Name.Bound(Var.value(Addr.Bound(address)), address)
val stack = Http.client.stack.remove(Headers.Ctx.clientModule.role)
Http.client.withStack(stack)
.configured(param.Stats(NullStatsReceiver))
.configured(param.Tracer(NullTracer))
.newClient(name, "upstream").toService
}
val writeException = new WriteException {}
test("requeues") {
var i = 0
val downstream = Downstream("ds", Service.mk { req =>
val rsp = i match {
case 0 => Future.value(Response()) // first request
case 1 => Future.exception(writeException) // second request
case 2 => Future.value(Response()) // second request (requeue)
case 3 => Future.exception(writeException) // third request (budget exceeded)
}
i += 1
rsp
})
val config =
s"""|routers:
|- protocol: http
| dtab: /svc/* => /$$/inet/127.1/${downstream.port}
| service:
| retries:
| budget:
| minRetriesPerSec: 0
| percentCanRetry: 0.0
| client:
| failFast: false
| failureAccrual:
| kind: none
| requeueBudget:
| minRetriesPerSec: 0
| # each request may generate 0.5 retries, on average
| percentCanRetry: 0.5
| ttlSecs: 10
| servers:
| - port: 0
|""".stripMargin
val stats = new InMemoryStatsReceiver
val linker = Linker.load(config).configured(param.Stats(stats))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
Time.withCurrentTimeFrozen { tc =>
def budget = stats.gauges(Seq("http", "client", s"$$/inet/127.1/${downstream.port}", "retries", "budget"))
def requeues = stats.counters.getOrElse(
Seq("http", "client", s"$$/inet/127.1/${downstream.port}", "retries", "requeues"),
0
)
def requestLimit = stats.counters.getOrElse(
Seq("http", "client", s"$$/inet/127.1/${downstream.port}", "retries", "request_limit"),
0
)
val req = Request()
req.host = "foo"
assert(await(client(req)).statusCode == 200) // first request (success)
assert(budget() == 0) // (0.5).toInt
assert(requeues == 0)
assert(requestLimit == 0)
assert(await(client(req)).statusCode == 200) // second request (success after a requeue)
assert(budget() == 0)
assert(requeues == 1)
assert(requestLimit == 0)
assert(await(client(req)).statusCode == 502) // third request (failed, no budget available for requeue)
assert(budget() == 0) // (0.5).toInt
assert(requeues == 1)
assert(requestLimit == 1)
// close the downstream
await(downstream.server.close())
assert(await(client(req)).statusCode == 502) // service acquisition failed
assert(requeues == 26) // tried 25 times for service acquisition
}
} finally {
await(client.close())
await(downstream.server.close())
await(server.close())
await(router.close())
}
}
test("per-client requeue policy") {
class FailEveryN(n: Int) extends Service[Request, Response] {
private[this] var i = 0
def apply(req: Request): Future[Response] = {
if (i%n == 0) {
i += 1
Future.exception(writeException)
} else {
i += 1
Future.value(Response())
}
}
}
val downstreamA = Downstream("a", new FailEveryN(2))
val downstreamB = Downstream("b", new FailEveryN(2))
val config =
s"""|routers:
|- protocol: http
| dtab: |
| /svc/a => /$$/inet/127.1/${downstreamA.port} ;
| /svc/b => /$$/inet/127.1/${downstreamB.port} ;
| client:
| kind: io.l5d.static
| configs:
| - prefix: /
| failFast: false
| failureAccrual:
| kind: none
| - prefix: /$$/inet/127.1/${downstreamA.port}
| requeueBudget:
| minRetriesPerSec: 0
| # each request may generate 1.0 requeue, on average
| percentCanRetry: 1.0
| ttlSecs: 10
| - prefix: /$$/inet/127.1/${downstreamB.port}
| requeueBudget:
| minRetriesPerSec: 0
| # each request may generate 0.5 requeues, on average
| percentCanRetry: 0.5
| ttlSecs: 10
| servers:
| - port: 0
|""".stripMargin
val stats = new InMemoryStatsReceiver
val linker = Linker.load(config).configured(param.Stats(stats))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
Time.withCurrentTimeFrozen { tc =>
def budget(ds: Downstream) =
stats.gauges(Seq("http", "client", s"$$/inet/127.1/${ds.port}", "retries", "budget"))
def requeues(ds: Downstream) = stats.counters.getOrElse(
Seq("http", "client", s"$$/inet/127.1/${ds.port}", "retries", "requeues"),
0
)
def requestLimit(ds: Downstream) = stats.counters.getOrElse(
Seq("http", "client", s"$$/inet/127.1/${ds.port}", "retries", "request_limit"),
0
)
val req = Request()
req.host = "a"
for (i <- 1 to 10) {
// each request initially fails and then succeeds after 1 requeue
assert(await(client(req)).statusCode == 200)
assert(budget(downstreamA)() == 0)
assert(requeues(downstreamA) == i)
assert(requestLimit(downstreamA) == 0)
}
req.host = "b"
// budget == 0
assert(await(client(req)).statusCode == 502) // failure, no budget to requeue
assert(budget(downstreamB)() == 0)
assert(requeues(downstreamB) == 0)
assert(requestLimit(downstreamB) == 1)
// budget == 0.5
assert(await(client(req)).statusCode == 200) // success on first try
assert(budget(downstreamB)() == 1)
assert(requeues(downstreamB) == 0)
assert(requestLimit(downstreamB) == 1)
// budget == 1
assert(await(client(req)).statusCode == 200) // success after a requeue
assert(budget(downstreamB)() == 0)
assert(requeues(downstreamB) == 1)
assert(requestLimit(downstreamB) == 1)
// budget == 0.5
assert(await(client(req)).statusCode == 200) // success after a requeue
assert(budget(downstreamB)() == 0)
assert(requeues(downstreamB) == 2)
assert(requestLimit(downstreamB) == 1)
// budget == 0
assert(await(client(req)).statusCode == 502) // failure, no budget to requeue
assert(budget(downstreamB)() == 0)
assert(requeues(downstreamB) == 2)
assert(requestLimit(downstreamB) == 2)
}
} finally {
await(client.close())
await(downstreamA.server.close())
await(downstreamB.server.close())
await(server.close())
await(router.close())
}
}
test("retries") {
val success = Future.value(Response())
val failure = Future.value {
val rsp = Response()
rsp.statusCode = 500
rsp
}
var i = 0
val downstream = Downstream("ds", Service.mk { req =>
val rsp = i match {
case 0 => success // first request
case 1 => failure // second request
case 2 => success // second request (retry)
case 3 => failure // third request (budget exceeded)
}
i += 1
rsp
})
val config =
s"""|routers:
|- protocol: http
| dtab: /svc/* => /$$/inet/127.1/${downstream.port}
| client:
| failFast: false
| failureAccrual:
| kind: none
| service:
| responseClassifier:
| kind: io.l5d.http.retryableRead5XX
| retries:
| budget:
| minRetriesPerSec: 0
| # each request may generate 0.5 retries, on average
| percentCanRetry: 0.5
| ttlSecs: 10
| servers:
| - port: 0
|""".stripMargin
val stats = new InMemoryStatsReceiver
val linker = Linker.load(config).configured(param.Stats(stats))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
Time.withCurrentTimeFrozen { tc =>
def budget = stats.gauges(Seq("http", "service", "svc/foo", "retries", "budget"))
def retries = stats.counters.getOrElse(
Seq("http", "service", "svc/foo", "retries", "total"),
0
)
def budgetExhausted = stats.counters.getOrElse(
Seq("http", "service", "svc/foo", "retries", "budget_exhausted"),
0
)
val req = Request()
req.host = "foo"
assert(await(client(req)).statusCode == 200) // first request (success)
assert(budget() == 0) // (0.5).toInt
assert(retries == 0)
assert(budgetExhausted == 0)
assert(await(client(req)).statusCode == 200) // second request (success after a retry)
assert(budget() == 0)
assert(retries == 1)
assert(budgetExhausted == 0)
assert(await(client(req)).statusCode == 500) // third request (failed, no budget available for retry)
assert(budget() == 0) // (0.5).toInt
assert(retries == 1)
assert(budgetExhausted == 1)
}
} finally {
await(client.close())
await(downstream.server.close())
await(server.close())
await(router.close())
}
}
test("per-service retry policy") {
class FailEveryN(n: Int) extends Service[Request, Response] {
private[this] var i = 0
def apply(req: Request): Future[Response] = {
if (i%n == 0) {
i += 1
val rsp = Response()
rsp.statusCode = 500
Future.value(rsp)
} else {
i += 1
Future.value(Response())
}
}
}
val downstreamA = Downstream("a", new FailEveryN(2))
val downstreamB = Downstream("b", new FailEveryN(2))
val config =
s"""|routers:
|- protocol: http
| dtab: |
| /svc/a => /$$/inet/127.1/${downstreamA.port} ;
| /svc/b => /$$/inet/127.1/${downstreamB.port} ;
| client:
| failFast: false
| failureAccrual:
| kind: none
| service:
| kind: io.l5d.static
| configs:
| - prefix: /svc/a
| retries:
| budget:
| minRetriesPerSec: 0
| # each request may generate 1.0 requeue, on average
| percentCanRetry: 1.0
| ttlSecs: 10
| - prefix: /svc/b
| retries:
| budget:
| minRetriesPerSec: 0
| # each request may generate 0.5 requeues, on average
| percentCanRetry: 0.5
| ttlSecs: 10
| - prefix: /
| responseClassifier:
| kind: io.l5d.http.retryableRead5XX
| servers:
| - port: 0
|""".stripMargin
val stats = new InMemoryStatsReceiver
val linker = Linker.load(config).configured(param.Stats(stats))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
Time.withCurrentTimeFrozen { tc =>
def budget(ds: Downstream) =
stats.gauges(Seq("http", "service", s"svc/${ds.name}", "retries", "budget"))
def retries(ds: Downstream) = stats.counters.getOrElse(
Seq("http", "service", s"svc/${ds.name}", "retries", "total"),
0
)
def budgetExhausted(ds: Downstream) = stats.counters.getOrElse(
Seq("http", "service", s"svc/${ds.name}", "retries", "budget_exhausted"),
0
)
val req = Request()
req.host = "a"
for (i <- 1 to 10) {
// each request initially fails and then succeeds after 1 retry
assert(await(client(req)).statusCode == 200)
assert(budget(downstreamA)() == 0)
assert(retries(downstreamA) == i)
assert(budgetExhausted(downstreamA) == 0)
}
req.host = "b"
// budget == 0
assert(await(client(req)).statusCode == 500) // failure, no budget to retry
assert(budget(downstreamB)() == 0)
assert(retries(downstreamB) == 0)
assert(budgetExhausted(downstreamB) == 1)
// budget == 0.5
assert(await(client(req)).statusCode == 200) // success on first try
assert(budget(downstreamB)() == 1)
assert(retries(downstreamB) == 0)
assert(budgetExhausted(downstreamB) == 1)
// budget == 1
assert(await(client(req)).statusCode == 200) // success after a retry
assert(budget(downstreamB)() == 0)
assert(retries(downstreamB) == 1)
assert(budgetExhausted(downstreamB) == 1)
// budget == 0.5
assert(await(client(req)).statusCode == 200) // success after a retry
assert(budget(downstreamB)() == 0)
assert(retries(downstreamB) == 2)
assert(budgetExhausted(downstreamB) == 1)
// budget == 0
assert(await(client(req)).statusCode == 500) // failure, no budget to retry
assert(budget(downstreamB)() == 0)
assert(retries(downstreamB) == 2)
assert(budgetExhausted(downstreamB) == 2)
}
} finally {
await(client.close())
await(downstreamA.server.close())
await(downstreamB.server.close())
await(server.close())
await(router.close())
}
}
test("budgets for each service and client are independent") {
val downstreamA = Downstream("a", Service.mk { req => Future.value(Response()) })
val downstreamB = Downstream("b", Service.mk { req => Future.value(Response()) })
val config =
s"""|routers:
|- protocol: http
| dtab: |
| /svc/a => /$$/inet/127.1/${downstreamA.port} ;
| /svc/b => /$$/inet/127.1/${downstreamB.port} ;
| servers:
| - port: 0
|""".stripMargin
val stats = new InMemoryStatsReceiver
val linker = Linker.load(config).configured(param.Stats(stats))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
Time.withCurrentTimeFrozen { tc =>
val req = Request()
req.host = "a"
for (i <- 1 to 10) {
await(client(req))
}
// Each budget starts with a balance of 100 from minRetriesPerSec
// we subtract that off to see just the deposits
def retryBudget(svc: String): Int =
stats.gauges.get(Seq("http", "service", svc, "retries", "budget")).map(_() - 100).getOrElse(0.0f).toInt
def requeueBudget(clnt: String): Int =
stats.gauges.get(Seq("http", "client", clnt, "retries", "budget")).map(_() - 100).getOrElse(0.0f).toInt
// 20% budget
assert(retryBudget("svc/a") == 2)
assert(retryBudget("svc/b") == 0)
assert(requeueBudget(s"$$/inet/127.1/${downstreamA.port}") == 2)
assert(requeueBudget(s"$$/inet/127.1/${downstreamB.port}") == 0)
req.host = "b"
for (i <- 1 to 10) {
await(client(req))
}
assert(retryBudget("svc/a") == 2)
assert(retryBudget("svc/b") == 2)
assert(requeueBudget(s"$$/inet/127.1/${downstreamA.port}") == 2)
assert(requeueBudget(s"$$/inet/127.1/${downstreamB.port}") == 2)
}
} finally {
await(client.close())
await(downstreamA.server.close())
await(downstreamB.server.close())
await(server.close())
await(router.close())
}
}
test("l5d-retryable header is respected by default") {
var i = 0
val downstream = Downstream("ds", Service.mk { req =>
val rsp = i match {
case 0 => Future.value {
val rsp = Response()
rsp.statusCode = 500
Headers.Retryable.set(rsp.headerMap, retryable = true)
rsp
}
case 1 => Future.value(Response())
}
i += 1
rsp
})
val config =
s"""|routers:
|- protocol: http
| dtab: /svc/* => /$$/inet/127.1/${downstream.port}
| servers:
| - port: 0
|""".stripMargin
val stats = new InMemoryStatsReceiver
val linker = Linker.load(config).configured(param.Stats(stats))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
Time.withCurrentTimeFrozen { tc =>
def retries = stats.counters.getOrElse(
Seq("http", "service", "svc/foo", "retries", "total"),
0
)
val req = Request()
req.host = "foo"
assert(await(client(req)).statusCode == 200)
assert(retries == 1)
}
} finally {
await(client.close())
await(downstream.server.close())
await(server.close())
await(router.close())
}
}
test("l5d-retryable header takes precedence over repsonse classifier") {
var i = 0
val downstream = Downstream("ds", Service.mk { req =>
val rsp = i match {
case 0 => Future.value {
val rsp = Response()
rsp.statusCode = 500
Headers.Retryable.set(rsp.headerMap, retryable = true)
rsp
}
case 1 => Future.value(Response())
}
i += 1
rsp
})
val config =
s"""|routers:
|- protocol: http
| dtab: /svc/* => /$$/inet/127.1/${downstream.port}
| service:
| responseClassifier:
| kind: io.l5d.http.retryableRead5XX
| servers:
| - port: 0
|""".stripMargin
val stats = new InMemoryStatsReceiver
val linker = Linker.load(config).configured(param.Stats(stats))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
Time.withCurrentTimeFrozen { tc =>
def retries = stats.counters.getOrElse(
Seq("http", "service", "svc/foo", "retries", "total"),
0
)
val req = Request()
req.method = Method.Post
req.host = "foo"
// POST should not usually be retryable, but l5d-retryable indicates
// the request can be retried anyway
assert(await(client(req)).statusCode == 200)
assert(retries == 1)
}
} finally {
await(client.close())
await(downstream.server.close())
await(server.close())
await(router.close())
}
}
test("chunked error responses should not leak connections on retries") {
val stats = new InMemoryStatsReceiver
val tracer = NullTracer
val downstream = Downstream("dog", Service.mk { req =>
val rsp = Response()
rsp.statusCode = 500
rsp.setChunked(true)
rsp.close()
Future.value(rsp)
})
val label = s"$$/inet/127.1/${downstream.port}"
val dtab = Dtab.read(s"/svc/dog => /$label;")
val yaml =
s"""|routers:
|- protocol: http
| dtab: ${dtab.show}
| service:
| responseClassifier:
| kind: io.l5d.http.retryableRead5XX
| servers:
| - port: 0
|""".stripMargin
val linker = Linker.load(yaml)
.configured(param.Stats(stats))
.configured(param.Tracer(tracer))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
val req = Request()
req.host = "dog"
val errrsp = await(client(req))
assert(errrsp.statusCode == 500)
assert(stats.counters.get(Seq("http", "server", "127.0.0.1/0", "requests")) == Some(1))
assert(stats.counters.get(Seq("http", "client", label, "requests")) == Some(101))
assert(stats.gauges.get(Seq("http", "client", label, "connections")).map(_.apply.toInt) == Some(1))
} finally {
await(client.close())
await(downstream.server.close())
await(server.close())
await(router.close())
}
}
}
| pawelprazak/linkerd | linkerd/protocol/http/src/e2e/scala/io/buoyant/linkerd/protocol/RetriesEndToEndTest.scala | Scala | apache-2.0 | 21,875 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable.{HashMap, HashSet}
import scala.concurrent.Future
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.{ExecutorAllocationClient, SparkEnv, SparkException, TaskState}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
import org.apache.spark.executor.ExecutorLogUrlHandler
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Network._
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.rpc._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend.ENDPOINT_NAME
import org.apache.spark.util.{RpcUtils, SerializableBuffer, ThreadUtils, Utils}
/**
* A scheduler backend that waits for coarse-grained executors to connect.
* This backend holds onto each executor for the duration of the Spark job rather than relinquishing
* executors whenever a task is done and asking the scheduler to launch a new executor for
* each new task. Executors may be launched in a variety of ways, such as Mesos tasks for the
* coarse-grained Mesos mode or standalone processes for Spark's standalone deploy mode
* (spark.deploy.*).
*/
private[spark]
class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: RpcEnv)
extends ExecutorAllocationClient with SchedulerBackend with Logging {
// Use an atomic variable to track total number of cores in the cluster for simplicity and speed
protected val totalCoreCount = new AtomicInteger(0)
// Total number of executors that are currently registered
protected val totalRegisteredExecutors = new AtomicInteger(0)
protected val conf = scheduler.sc.conf
private val maxRpcMessageSize = RpcUtils.maxMessageSizeBytes(conf)
private val defaultAskTimeout = RpcUtils.askRpcTimeout(conf)
// Submit tasks only after (registered resources / total expected resources)
// is equal to at least this value, that is double between 0 and 1.
private val _minRegisteredRatio =
math.min(1, conf.get(SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO).getOrElse(0.0))
// Submit tasks after maxRegisteredWaitingTime milliseconds
// if minRegisteredRatio has not yet been reached
private val maxRegisteredWaitingTimeNs = TimeUnit.MILLISECONDS.toNanos(
conf.get(SCHEDULER_MAX_REGISTERED_RESOURCE_WAITING_TIME))
private val createTimeNs = System.nanoTime()
// Accessing `executorDataMap` in the inherited methods from ThreadSafeRpcEndpoint doesn't need
// any protection. But accessing `executorDataMap` out of the inherited methods must be
// protected by `CoarseGrainedSchedulerBackend.this`. Besides, `executorDataMap` should only
// be modified in the inherited methods from ThreadSafeRpcEndpoint with protection by
// `CoarseGrainedSchedulerBackend.this`.
private val executorDataMap = new HashMap[String, ExecutorData]
// Number of executors for each ResourceProfile requested by the cluster
// manager, [[ExecutorAllocationManager]]
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private val requestedTotalExecutorsPerResourceProfile = new HashMap[ResourceProfile, Int]
private val listenerBus = scheduler.sc.listenerBus
// Executors we have requested the cluster manager to kill that have not died yet; maps
// the executor ID to whether it was explicitly killed by the driver (and thus shouldn't
// be considered an app-related failure). Visible for testing only.
@GuardedBy("CoarseGrainedSchedulerBackend.this")
private[scheduler] val executorsPendingToRemove = new HashMap[String, Boolean]
// Executors that have been lost, but for which we don't yet know the real exit reason.
private val executorsPendingLossReason = new HashSet[String]
// Executors which are being decommissioned
protected val executorsPendingDecommission = new HashSet[String]
// A map of ResourceProfile id to map of hostname with its possible task number running on it
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var rpHostToLocalTaskCount: Map[Int, Map[String, Int]] = Map.empty
// The number of pending tasks per ResourceProfile id which is locality required
@GuardedBy("CoarseGrainedSchedulerBackend.this")
protected var numLocalityAwareTasksPerResourceProfileId = Map.empty[Int, Int]
// The num of current max ExecutorId used to re-register appMaster
@volatile protected var currentExecutorIdCounter = 0
// Current set of delegation tokens to send to executors.
private val delegationTokens = new AtomicReference[Array[Byte]]()
// The token manager used to create security tokens.
private var delegationTokenManager: Option[HadoopDelegationTokenManager] = None
private val reviveThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-revive-thread")
class DriverEndpoint extends IsolatedRpcEndpoint with Logging {
override val rpcEnv: RpcEnv = CoarseGrainedSchedulerBackend.this.rpcEnv
protected val addressToExecutorId = new HashMap[RpcAddress, String]
// Spark configuration sent to executors. This is a lazy val so that subclasses of the
// scheduler can modify the SparkConf object before this view is created.
private lazy val sparkProperties = scheduler.sc.conf.getAll
.filter { case (k, _) => k.startsWith("spark.") }
.toSeq
private val logUrlHandler: ExecutorLogUrlHandler = new ExecutorLogUrlHandler(
conf.get(UI.CUSTOM_EXECUTOR_LOG_URL))
override def onStart(): Unit = {
// Periodically revive offers to allow delay scheduling to work
val reviveIntervalMs = conf.get(SCHEDULER_REVIVE_INTERVAL).getOrElse(1000L)
reviveThread.scheduleAtFixedRate(() => Utils.tryLogNonFatalError {
Option(self).foreach(_.send(ReviveOffers))
}, 0, reviveIntervalMs, TimeUnit.MILLISECONDS)
}
override def receive: PartialFunction[Any, Unit] = {
case StatusUpdate(executorId, taskId, state, data, resources) =>
scheduler.statusUpdate(taskId, state, data.value)
if (TaskState.isFinished(state)) {
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
val rpId = executorInfo.resourceProfileId
val prof = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
val taskCpus = ResourceProfile.getTaskCpusOrDefaultForProfile(prof, conf)
executorInfo.freeCores += taskCpus
resources.foreach { case (k, v) =>
executorInfo.resourcesInfo.get(k).foreach { r =>
r.release(v.addresses)
}
}
makeOffers(executorId)
case None =>
// Ignoring the update since we don't know about the executor.
logWarning(s"Ignored task status update ($taskId state $state) " +
s"from unknown executor with ID $executorId")
}
}
case ReviveOffers =>
makeOffers()
case KillTask(taskId, executorId, interruptThread, reason) =>
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.executorEndpoint.send(
KillTask(taskId, executorId, interruptThread, reason))
case None =>
// Ignoring the task kill since the executor is not registered.
logWarning(s"Attempted to kill task $taskId for unknown executor $executorId.")
}
case KillExecutorsOnHost(host) =>
scheduler.getExecutorsAliveOnHost(host).foreach { exec =>
killExecutors(exec.toSeq, adjustTargetNumExecutors = false, countFailures = false,
force = true)
}
case UpdateDelegationTokens(newDelegationTokens) =>
updateDelegationTokens(newDelegationTokens)
case RemoveExecutor(executorId, reason) =>
// We will remove the executor's state and cannot restore it. However, the connection
// between the driver and the executor may be still alive so that the executor won't exit
// automatically, so try to tell the executor to stop itself. See SPARK-13519.
executorDataMap.get(executorId).foreach(_.executorEndpoint.send(StopExecutor))
removeExecutor(executorId, reason)
case DecommissionExecutor(executorId, decommissionInfo) =>
logError(s"Received decommission executor message ${executorId}: $decommissionInfo")
decommissionExecutor(executorId, decommissionInfo, adjustTargetNumExecutors = false)
case RemoveWorker(workerId, host, message) =>
removeWorker(workerId, host, message)
case LaunchedExecutor(executorId) =>
executorDataMap.get(executorId).foreach { data =>
data.freeCores = data.totalCores
}
makeOffers(executorId)
case e =>
logError(s"Received unexpected message. ${e}")
}
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RegisterExecutor(executorId, executorRef, hostname, cores, logUrls,
attributes, resources, resourceProfileId) =>
if (executorDataMap.contains(executorId)) {
context.sendFailure(new IllegalStateException(s"Duplicate executor ID: $executorId"))
} else if (scheduler.nodeBlacklist.contains(hostname) ||
isBlacklisted(executorId, hostname)) {
// If the cluster manager gives us an executor on a blacklisted node (because it
// already started allocating those resources before we informed it of our blacklist,
// or if it ignored our blacklist), then we reject that executor immediately.
logInfo(s"Rejecting $executorId as it has been blacklisted.")
context.sendFailure(new IllegalStateException(s"Executor is blacklisted: $executorId"))
} else {
// If the executor's rpc env is not listening for incoming connections, `hostPort`
// will be null, and the client connection should be used to contact the executor.
val executorAddress = if (executorRef.address != null) {
executorRef.address
} else {
context.senderAddress
}
logInfo(s"Registered executor $executorRef ($executorAddress) with ID $executorId, " +
s" ResourceProfileId $resourceProfileId")
addressToExecutorId(executorAddress) = executorId
totalCoreCount.addAndGet(cores)
totalRegisteredExecutors.addAndGet(1)
val resourcesInfo = resources.map { case (rName, info) =>
// tell the executor it can schedule resources up to numSlotsPerAddress times,
// as configured by the user, or set to 1 as that is the default (1 task/resource)
val numParts = scheduler.sc.resourceProfileManager
.resourceProfileFromId(resourceProfileId).getNumSlotsPerAddress(rName, conf)
(info.name, new ExecutorResourceInfo(info.name, info.addresses, numParts))
}
val data = new ExecutorData(executorRef, executorAddress, hostname,
0, cores, logUrlHandler.applyPattern(logUrls, attributes), attributes,
resourcesInfo, resourceProfileId)
// This must be synchronized because variables mutated
// in this block are read when requesting executors
CoarseGrainedSchedulerBackend.this.synchronized {
executorDataMap.put(executorId, data)
if (currentExecutorIdCounter < executorId.toInt) {
currentExecutorIdCounter = executorId.toInt
}
}
listenerBus.post(
SparkListenerExecutorAdded(System.currentTimeMillis(), executorId, data))
// Note: some tests expect the reply to come after we put the executor in the map
context.reply(true)
}
case StopDriver =>
context.reply(true)
stop()
case StopExecutors =>
logInfo("Asking each executor to shut down")
for ((_, executorData) <- executorDataMap) {
executorData.executorEndpoint.send(StopExecutor)
}
context.reply(true)
case RemoveWorker(workerId, host, message) =>
removeWorker(workerId, host, message)
context.reply(true)
case DecommissionExecutor(executorId, decommissionInfo) =>
logError(s"Received decommission executor message ${executorId}: ${decommissionInfo}.")
context.reply(decommissionExecutor(executorId, decommissionInfo,
adjustTargetNumExecutors = false))
case RetrieveSparkAppConfig(resourceProfileId) =>
val rp = scheduler.sc.resourceProfileManager.resourceProfileFromId(resourceProfileId)
val reply = SparkAppConfig(
sparkProperties,
SparkEnv.get.securityManager.getIOEncryptionKey(),
Option(delegationTokens.get()),
rp)
context.reply(reply)
case IsExecutorAlive(executorId) => context.reply(isExecutorActive(executorId))
case e =>
logError(s"Received unexpected ask ${e}")
}
// Make fake resource offers on all executors
private def makeOffers(): Unit = {
// Make sure no executor is killed while some task is launching on it
val taskDescs = withLock {
// Filter out executors under killing
val activeExecutors = executorDataMap.filterKeys(isExecutorActive)
val workOffers = activeExecutors.map {
case (id, executorData) =>
new WorkerOffer(id, executorData.executorHost, executorData.freeCores,
Some(executorData.executorAddress.hostPort),
executorData.resourcesInfo.map { case (rName, rInfo) =>
(rName, rInfo.availableAddrs.toBuffer)
}, executorData.resourceProfileId)
}.toIndexedSeq
scheduler.resourceOffers(workOffers, true)
}
if (taskDescs.nonEmpty) {
launchTasks(taskDescs)
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
addressToExecutorId
.get(remoteAddress)
.foreach(removeExecutor(_,
ExecutorProcessLost("Remote RPC client disassociated. Likely due to " +
"containers exceeding thresholds, or network issues. Check driver logs for WARN " +
"messages.")))
}
// Make fake resource offers on just one executor
private def makeOffers(executorId: String): Unit = {
// Make sure no executor is killed while some task is launching on it
val taskDescs = withLock {
// Filter out executors under killing
if (isExecutorActive(executorId)) {
val executorData = executorDataMap(executorId)
val workOffers = IndexedSeq(
new WorkerOffer(executorId, executorData.executorHost, executorData.freeCores,
Some(executorData.executorAddress.hostPort),
executorData.resourcesInfo.map { case (rName, rInfo) =>
(rName, rInfo.availableAddrs.toBuffer)
}, executorData.resourceProfileId))
scheduler.resourceOffers(workOffers, false)
} else {
Seq.empty
}
}
if (taskDescs.nonEmpty) {
launchTasks(taskDescs)
}
}
// Launch tasks returned by a set of resource offers
private def launchTasks(tasks: Seq[Seq[TaskDescription]]): Unit = {
for (task <- tasks.flatten) {
val serializedTask = TaskDescription.encode(task)
if (serializedTask.limit() >= maxRpcMessageSize) {
Option(scheduler.taskIdToTaskSetManager.get(task.taskId)).foreach { taskSetMgr =>
try {
var msg = "Serialized task %s:%d was %d bytes, which exceeds max allowed: " +
s"${RPC_MESSAGE_MAX_SIZE.key} (%d bytes). Consider increasing " +
s"${RPC_MESSAGE_MAX_SIZE.key} or using broadcast variables for large values."
msg = msg.format(task.taskId, task.index, serializedTask.limit(), maxRpcMessageSize)
taskSetMgr.abort(msg)
} catch {
case e: Exception => logError("Exception in error callback", e)
}
}
}
else {
val executorData = executorDataMap(task.executorId)
// Do resources allocation here. The allocated resources will get released after the task
// finishes.
val rpId = executorData.resourceProfileId
val prof = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
val taskCpus = ResourceProfile.getTaskCpusOrDefaultForProfile(prof, conf)
executorData.freeCores -= taskCpus
task.resources.foreach { case (rName, rInfo) =>
assert(executorData.resourcesInfo.contains(rName))
executorData.resourcesInfo(rName).acquire(rInfo.addresses)
}
logDebug(s"Launching task ${task.taskId} on executor id: ${task.executorId} hostname: " +
s"${executorData.executorHost}.")
executorData.executorEndpoint.send(LaunchTask(new SerializableBuffer(serializedTask)))
}
}
}
// Remove a disconnected executor from the cluster
private def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
logDebug(s"Asked to remove executor $executorId with reason $reason")
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
// This must be synchronized because variables mutated
// in this block are read when requesting executors
val killed = CoarseGrainedSchedulerBackend.this.synchronized {
addressToExecutorId -= executorInfo.executorAddress
executorDataMap -= executorId
executorsPendingLossReason -= executorId
executorsPendingDecommission -= executorId
executorsPendingToRemove.remove(executorId).getOrElse(false)
}
totalCoreCount.addAndGet(-executorInfo.totalCores)
totalRegisteredExecutors.addAndGet(-1)
scheduler.executorLost(executorId, if (killed) ExecutorKilled else reason)
listenerBus.post(
SparkListenerExecutorRemoved(System.currentTimeMillis(), executorId, reason.toString))
case None =>
// SPARK-15262: If an executor is still alive even after the scheduler has removed
// its metadata, we may receive a heartbeat from that executor and tell its block
// manager to reregister itself. If that happens, the block manager master will know
// about the executor, but the scheduler will not. Therefore, we should remove the
// executor from the block manager when we hit this case.
scheduler.sc.env.blockManager.master.removeExecutorAsync(executorId)
logInfo(s"Asked to remove non-existent executor $executorId")
}
}
// Remove a lost worker from the cluster
private def removeWorker(workerId: String, host: String, message: String): Unit = {
logDebug(s"Asked to remove worker $workerId with reason $message")
scheduler.workerRemoved(workerId, host, message)
}
/**
* Stop making resource offers for the given executor. The executor is marked as lost with
* the loss reason still pending.
*
* @return Whether executor should be disabled
*/
protected def disableExecutor(executorId: String): Boolean = {
val shouldDisable = CoarseGrainedSchedulerBackend.this.synchronized {
if (isExecutorActive(executorId)) {
executorsPendingLossReason += executorId
true
} else {
// Returns true for explicitly killed executors, we also need to get pending loss reasons;
// For others return false.
executorsPendingToRemove.contains(executorId)
}
}
if (shouldDisable) {
logInfo(s"Disabling executor $executorId.")
scheduler.executorLost(executorId, LossReasonPending)
}
shouldDisable
}
}
val driverEndpoint = rpcEnv.setupEndpoint(ENDPOINT_NAME, createDriverEndpoint())
protected def minRegisteredRatio: Double = _minRegisteredRatio
/**
* Request that the cluster manager decommission the specified executors.
*
* @param executorsAndDecomInfo Identifiers of executors & decommission info.
* @param adjustTargetNumExecutors whether the target number of executors will be adjusted down
* after these executors have been decommissioned.
* @return the ids of the executors acknowledged by the cluster manager to be removed.
*/
override def decommissionExecutors(
executorsAndDecomInfo: Array[(String, ExecutorDecommissionInfo)],
adjustTargetNumExecutors: Boolean): Seq[String] = {
val executorsToDecommission = executorsAndDecomInfo.filter { case (executorId, _) =>
CoarseGrainedSchedulerBackend.this.synchronized {
// Only bother decommissioning executors which are alive.
if (isExecutorActive(executorId)) {
executorsPendingDecommission += executorId
true
} else {
false
}
}
}
// If we don't want to replace the executors we are decommissioning
if (adjustTargetNumExecutors) {
adjustExecutors(executorsToDecommission.map(_._1))
}
executorsToDecommission.filter { case (executorId, decomInfo) =>
doDecommission(executorId, decomInfo)
}.map(_._1)
}
private def doDecommission(executorId: String,
decomInfo: ExecutorDecommissionInfo): Boolean = {
logInfo(s"Asking executor $executorId to decommissioning.")
try {
scheduler.executorDecommission(executorId, decomInfo)
if (driverEndpoint != null) {
logInfo("Propagating executor decommission to driver.")
driverEndpoint.send(DecommissionExecutor(executorId, decomInfo))
}
} catch {
case e: Exception =>
logError(s"Unexpected error during decommissioning ${e.toString}", e)
return false
}
// Send decommission message to the executor (it could have originated on the executor
// but not necessarily.
CoarseGrainedSchedulerBackend.this.synchronized {
executorDataMap.get(executorId) match {
case Some(executorInfo) =>
executorInfo.executorEndpoint.send(DecommissionSelf)
case None =>
// Ignoring the executor since it is not registered.
logWarning(s"Attempted to decommission unknown executor $executorId.")
return false
}
}
logInfo(s"Asked executor $executorId to decommission.")
if (conf.get(STORAGE_DECOMMISSION_ENABLED)) {
try {
logInfo(s"Asking block manager corresponding to executor $executorId to decommission.")
scheduler.sc.env.blockManager.master.decommissionBlockManagers(Seq(executorId))
} catch {
case e: Exception =>
logError("Unexpected error during block manager " +
s"decommissioning for executor $executorId: ${e.toString}", e)
return false
}
logInfo(s"Acknowledged decommissioning block manager corresponding to $executorId.")
}
true
}
override def start(): Unit = {
if (UserGroupInformation.isSecurityEnabled()) {
delegationTokenManager = createTokenManager()
delegationTokenManager.foreach { dtm =>
val ugi = UserGroupInformation.getCurrentUser()
val tokens = if (dtm.renewalEnabled) {
dtm.start()
} else {
val creds = ugi.getCredentials()
dtm.obtainDelegationTokens(creds)
if (creds.numberOfTokens() > 0 || creds.numberOfSecretKeys() > 0) {
SparkHadoopUtil.get.serialize(creds)
} else {
null
}
}
if (tokens != null) {
updateDelegationTokens(tokens)
}
}
}
}
protected def createDriverEndpoint(): DriverEndpoint = new DriverEndpoint()
def stopExecutors(): Unit = {
try {
if (driverEndpoint != null) {
logInfo("Shutting down all executors")
driverEndpoint.askSync[Boolean](StopExecutors)
}
} catch {
case e: Exception =>
throw new SparkException("Error asking standalone scheduler to shut down executors", e)
}
}
override def stop(): Unit = {
reviveThread.shutdownNow()
stopExecutors()
delegationTokenManager.foreach(_.stop())
try {
if (driverEndpoint != null) {
driverEndpoint.askSync[Boolean](StopDriver)
}
} catch {
case e: Exception =>
throw new SparkException("Error stopping standalone scheduler's driver endpoint", e)
}
}
/**
* Reset the state of CoarseGrainedSchedulerBackend to the initial state. Currently it will only
* be called in the yarn-client mode when AM re-registers after a failure.
* Visible for testing only.
* */
protected[scheduler] def reset(): Unit = {
val executors: Set[String] = synchronized {
requestedTotalExecutorsPerResourceProfile.clear()
executorDataMap.keys.toSet
}
// Remove all the lingering executors that should be removed but not yet. The reason might be
// because (1) disconnected event is not yet received; (2) executors die silently.
executors.foreach { eid =>
removeExecutor(eid,
ExecutorProcessLost("Stale executor after cluster manager re-registered."))
}
}
override def reviveOffers(): Unit = Utils.tryLogNonFatalError {
driverEndpoint.send(ReviveOffers)
}
override def killTask(
taskId: Long, executorId: String, interruptThread: Boolean, reason: String): Unit = {
driverEndpoint.send(KillTask(taskId, executorId, interruptThread, reason))
}
override def defaultParallelism(): Int = {
conf.getInt("spark.default.parallelism", math.max(totalCoreCount.get(), 2))
}
/**
* Called by subclasses when notified of a lost worker. It just fires the message and returns
* at once.
*/
protected def removeExecutor(executorId: String, reason: ExecutorLossReason): Unit = {
driverEndpoint.send(RemoveExecutor(executorId, reason))
}
protected def removeWorker(workerId: String, host: String, message: String): Unit = {
driverEndpoint.send(RemoveWorker(workerId, host, message))
}
def sufficientResourcesRegistered(): Boolean = true
override def isReady(): Boolean = {
if (sufficientResourcesRegistered) {
logInfo("SchedulerBackend is ready for scheduling beginning after " +
s"reached minRegisteredResourcesRatio: $minRegisteredRatio")
return true
}
if ((System.nanoTime() - createTimeNs) >= maxRegisteredWaitingTimeNs) {
logInfo("SchedulerBackend is ready for scheduling beginning after waiting " +
s"maxRegisteredResourcesWaitingTime: $maxRegisteredWaitingTimeNs(ns)")
return true
}
false
}
/**
* Return the number of executors currently registered with this backend.
*/
private def numExistingExecutors: Int = synchronized { executorDataMap.size }
override def getExecutorIds(): Seq[String] = synchronized {
executorDataMap.keySet.toSeq
}
override def isExecutorActive(id: String): Boolean = synchronized {
executorDataMap.contains(id) &&
!executorsPendingToRemove.contains(id) &&
!executorsPendingLossReason.contains(id) &&
!executorsPendingDecommission.contains(id)
}
/**
* Get the max number of tasks that can be concurrent launched based on the ResourceProfile
* could be used, even if some of them are being used at the moment.
* Note that please don't cache the value returned by this method, because the number can change
* due to add/remove executors.
*
* @param rp ResourceProfile which to use to calculate max concurrent tasks.
* @return The max number of tasks that can be concurrent launched currently.
*/
override def maxNumConcurrentTasks(rp: ResourceProfile): Int = synchronized {
val (rpIds, cpus, resources) = {
executorDataMap
.filter { case (id, _) => isExecutorActive(id) }
.values.toArray.map { executor =>
(
executor.resourceProfileId,
executor.totalCores,
executor.resourcesInfo.map { case (name, rInfo) => (name, rInfo.totalAddressAmount) }
)
}.unzip3
}
TaskSchedulerImpl.calculateAvailableSlots(scheduler, conf, rp.id, rpIds, cpus, resources)
}
// this function is for testing only
def getExecutorAvailableResources(
executorId: String): Map[String, ExecutorResourceInfo] = synchronized {
executorDataMap.get(executorId).map(_.resourcesInfo).getOrElse(Map.empty)
}
// this function is for testing only
def getExecutorResourceProfileId(executorId: String): Int = synchronized {
val execDataOption = executorDataMap.get(executorId)
execDataOption.map(_.resourceProfileId).getOrElse(ResourceProfile.UNKNOWN_RESOURCE_PROFILE_ID)
}
/**
* Request an additional number of executors from the cluster manager. This is
* requesting against the default ResourceProfile, we will need an API change to
* allow against other profiles.
* @return whether the request is acknowledged.
*/
final override def requestExecutors(numAdditionalExecutors: Int): Boolean = {
if (numAdditionalExecutors < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of additional executor(s) " +
s"$numAdditionalExecutors from the cluster manager. Please specify a positive number!")
}
logInfo(s"Requesting $numAdditionalExecutors additional executor(s) from the cluster manager")
val response = synchronized {
val defaultProf = scheduler.sc.resourceProfileManager.defaultResourceProfile
val numExisting = requestedTotalExecutorsPerResourceProfile.getOrElse(defaultProf, 0)
requestedTotalExecutorsPerResourceProfile(defaultProf) = numExisting + numAdditionalExecutors
// Account for executors pending to be added or removed
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions.
* @param resourceProfileIdToNumExecutors The total number of executors we'd like to have per
* ResourceProfile. The cluster manager shouldn't kill any
* running executor to reach this number, but, if all
* existing executors were to die, this is the number
* of executors we'd want to be allocated.
* @param numLocalityAwareTasksPerResourceProfileId The number of tasks in all active stages that
* have a locality preferences per
* ResourceProfile. This includes running,
* pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
final override def requestTotalExecutors(
resourceProfileIdToNumExecutors: Map[Int, Int],
numLocalityAwareTasksPerResourceProfileId: Map[Int, Int],
hostToLocalTaskCount: Map[Int, Map[String, Int]]
): Boolean = {
val totalExecs = resourceProfileIdToNumExecutors.values.sum
if (totalExecs < 0) {
throw new IllegalArgumentException(
"Attempted to request a negative number of executor(s) " +
s"$totalExecs from the cluster manager. Please specify a positive number!")
}
val resourceProfileToNumExecutors = resourceProfileIdToNumExecutors.map { case (rpid, num) =>
(scheduler.sc.resourceProfileManager.resourceProfileFromId(rpid), num)
}
val response = synchronized {
this.requestedTotalExecutorsPerResourceProfile.clear()
this.requestedTotalExecutorsPerResourceProfile ++= resourceProfileToNumExecutors
this.numLocalityAwareTasksPerResourceProfileId = numLocalityAwareTasksPerResourceProfileId
this.rpHostToLocalTaskCount = hostToLocalTaskCount
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Request executors from the cluster manager by specifying the total number desired,
* including existing pending and running executors.
*
* The semantics here guarantee that we do not over-allocate executors for this application,
* since a later request overrides the value of any prior request. The alternative interface
* of requesting a delta of executors risks double counting new executors when there are
* insufficient resources to satisfy the first request. We make the assumption here that the
* cluster manager will eventually fulfill all requests when resources free up.
*
* @return a future whose evaluation indicates whether the request is acknowledged.
*/
protected def doRequestTotalExecutors(
resourceProfileToTotalExecs: Map[ResourceProfile, Int]): Future[Boolean] =
Future.successful(false)
/**
* Adjust the number of executors being requested to no longer include the provided executors.
*/
private def adjustExecutors(executorIds: Seq[String]) = {
if (executorIds.nonEmpty) {
executorIds.foreach { exec =>
withLock {
val rpId = executorDataMap(exec).resourceProfileId
val rp = scheduler.sc.resourceProfileManager.resourceProfileFromId(rpId)
if (requestedTotalExecutorsPerResourceProfile.isEmpty) {
// Assume that we are killing an executor that was started by default and
// not through the request api
requestedTotalExecutorsPerResourceProfile(rp) = 0
} else {
val requestedTotalForRp = requestedTotalExecutorsPerResourceProfile(rp)
requestedTotalExecutorsPerResourceProfile(rp) = math.max(requestedTotalForRp - 1, 0)
}
}
}
doRequestTotalExecutors(requestedTotalExecutorsPerResourceProfile.toMap)
} else {
Future.successful(true)
}
}
/**
* Request that the cluster manager kill the specified executors.
*
* @param executorIds identifiers of executors to kill
* @param adjustTargetNumExecutors whether the target number of executors be adjusted down
* after these executors have been killed
* @param countFailures if there are tasks running on the executors when they are killed, whether
* those failures be counted to task failure limits?
* @param force whether to force kill busy executors, default false
* @return the ids of the executors acknowledged by the cluster manager to be removed.
*/
final override def killExecutors(
executorIds: Seq[String],
adjustTargetNumExecutors: Boolean,
countFailures: Boolean,
force: Boolean): Seq[String] = {
logInfo(s"Requesting to kill executor(s) ${executorIds.mkString(", ")}")
val response = withLock {
val (knownExecutors, unknownExecutors) = executorIds.partition(executorDataMap.contains)
unknownExecutors.foreach { id =>
logWarning(s"Executor to kill $id does not exist!")
}
// If an executor is already pending to be removed, do not kill it again (SPARK-9795)
// If this executor is busy, do not kill it unless we are told to force kill it (SPARK-9552)
val executorsToKill = knownExecutors
.filter { id => !executorsPendingToRemove.contains(id) }
.filter { id => force || !scheduler.isExecutorBusy(id) }
executorsToKill.foreach { id => executorsPendingToRemove(id) = !countFailures }
logInfo(s"Actual list of executor(s) to be killed is ${executorsToKill.mkString(", ")}")
// If we do not wish to replace the executors we kill, sync the target number of executors
// with the cluster manager to avoid allocating new ones. When computing the new target,
// take into account executors that are pending to be added or removed.
val adjustTotalExecutors =
if (adjustTargetNumExecutors) {
adjustExecutors(executorsToKill)
} else {
Future.successful(true)
}
val killExecutors: Boolean => Future[Boolean] =
if (executorsToKill.nonEmpty) {
_ => doKillExecutors(executorsToKill)
} else {
_ => Future.successful(false)
}
val killResponse = adjustTotalExecutors.flatMap(killExecutors)(ThreadUtils.sameThread)
killResponse.flatMap(killSuccessful =>
Future.successful (if (killSuccessful) executorsToKill else Seq.empty[String])
)(ThreadUtils.sameThread)
}
defaultAskTimeout.awaitResult(response)
}
/**
* Kill the given list of executors through the cluster manager.
* @return whether the kill request is acknowledged.
*/
protected def doKillExecutors(executorIds: Seq[String]): Future[Boolean] =
Future.successful(false)
/**
* Request that the cluster manager kill all executors on a given host.
* @return whether the kill request is acknowledged.
*/
final override def killExecutorsOnHost(host: String): Boolean = {
logInfo(s"Requesting to kill any and all executors on host ${host}")
// A potential race exists if a new executor attempts to register on a host
// that is on the blacklist and is no no longer valid. To avoid this race,
// all executor registration and killing happens in the event loop. This way, either
// an executor will fail to register, or will be killed when all executors on a host
// are killed.
// Kill all the executors on this host in an event loop to ensure serialization.
driverEndpoint.send(KillExecutorsOnHost(host))
true
}
/**
* Create the delegation token manager to be used for the application. This method is called
* once during the start of the scheduler backend (so after the object has already been
* fully constructed), only if security is enabled in the Hadoop configuration.
*/
protected def createTokenManager(): Option[HadoopDelegationTokenManager] = None
/**
* Called when a new set of delegation tokens is sent to the driver. Child classes can override
* this method but should always call this implementation, which handles token distribution to
* executors.
*/
protected def updateDelegationTokens(tokens: Array[Byte]): Unit = {
SparkHadoopUtil.get.addDelegationTokens(tokens, conf)
delegationTokens.set(tokens)
executorDataMap.values.foreach { ed =>
ed.executorEndpoint.send(UpdateDelegationTokens(tokens))
}
}
protected def currentDelegationTokens: Array[Byte] = delegationTokens.get()
/**
* Checks whether the executor is blacklisted. This is called when the executor tries to
* register with the scheduler, and will deny registration if this method returns true.
*
* This is in addition to the blacklist kept by the task scheduler, so custom implementations
* don't need to check there.
*/
protected def isBlacklisted(executorId: String, hostname: String): Boolean = false
// SPARK-27112: We need to ensure that there is ordering of lock acquisition
// between TaskSchedulerImpl and CoarseGrainedSchedulerBackend objects in order to fix
// the deadlock issue exposed in SPARK-27112
private def withLock[T](fn: => T): T = scheduler.synchronized {
CoarseGrainedSchedulerBackend.this.synchronized { fn }
}
}
private[spark] object CoarseGrainedSchedulerBackend {
val ENDPOINT_NAME = "CoarseGrainedScheduler"
}
| rednaxelafx/apache-spark | core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala | Scala | apache-2.0 | 41,095 |
package mr.merc.image
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.BeforeAndAfter
class MImageCacheTest extends AnyFunSuite with BeforeAndAfter {
before {
MImageCache.clear();
}
test("can load image") {
val image = MImageCache.get("/testImages/testImage.png")
assert(image.width.value === 97)
assert(image.height.value === 19)
}
test("doesn't load image for the second time") {
val image1 = MImageCache.get("/testImages/testImage.png")
val image2 = MImageCache.get("/testImages/testImage.png")
assert(image1 === image2)
assert(MImageCache.cache.size === 1)
}
test("throw exception when image doesn't exist") {
intercept[NullPointerException] {
val image = MImageCache.get("/testImages/noExistingImage.png")
}
}
} | RenualdMarch/merc | src/test/scala/mr/merc/image/MImageCacheTest.scala | Scala | gpl-3.0 | 798 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import scala.collection._
import java.util.LinkedList
import java.util.concurrent._
import java.util.concurrent.atomic._
import kafka.network._
import kafka.utils._
import com.yammer.metrics.core.Gauge
import kafka.metrics.KafkaMetricsGroup
/**
* A request whose processing needs to be delayed for at most the given delayMs
* The associated keys are used for bookeeping, and represent the "trigger" that causes this request to check if it is satisfied,
* for example a key could be a (topic, partition) pair.
*/
class DelayedRequest(val keys: Seq[Any], val request: RequestChannel.Request, delayMs: Long) extends DelayedItem[RequestChannel.Request](request, delayMs) {
val satisfied = new AtomicBoolean(false)
}
/**
* A helper class for dealing with asynchronous requests with a timeout. A DelayedRequest has a request to delay
* and also a list of keys that can trigger the action. Implementations can add customized logic to control what it means for a given
* request to be satisfied. For example it could be that we are waiting for user-specified number of acks on a given (topic, partition)
* to be able to respond to a request or it could be that we are waiting for a given number of bytes to accumulate on a given request
* to be able to respond to that request (in the simple case we might wait for at least one byte to avoid busy waiting).
*
* For us the key is generally a (topic, partition) pair.
* By calling
* watch(delayedRequest)
* we will add triggers for each of the given keys. It is up to the user to then call
* val satisfied = update(key, request)
* when a request relevant to the given key occurs. This triggers bookeeping logic and returns back any requests satisfied by this
* new request.
*
* An implementation provides extends two helper functions
* def checkSatisfied(request: R, delayed: T): Boolean
* this function returns true if the given request (in combination with whatever previous requests have happened) satisfies the delayed
* request delayed. This method will likely also need to do whatever bookkeeping is necessary.
*
* The second function is
* def expire(delayed: T)
* this function handles delayed requests that have hit their time limit without being satisfied.
*
*/
abstract class RequestPurgatory[T <: DelayedRequest, R](brokerId: Int = 0) extends Logging with KafkaMetricsGroup {
/* a list of requests watching each key */
private val watchersForKey = new Pool[Any, Watchers](Some((key: Any) => new Watchers))
newGauge(
"NumDelayedRequests",
new Gauge[Int] {
def getValue = expiredRequestReaper.unsatisfied.get()
}
)
/* background thread expiring requests that have been waiting too long */
private val expiredRequestReaper = new ExpiredRequestReaper
private val expirationThread = Utils.daemonThread("request-expiration-task", expiredRequestReaper)
expirationThread.start()
/**
* Add a new delayed request watching the contained keys
*/
def watch(delayedRequest: T) {
for(key <- delayedRequest.keys) {
var lst = watchersFor(key)
lst.add(delayedRequest)
}
expiredRequestReaper.enqueue(delayedRequest)
}
/**
* Update any watchers and return a list of newly satisfied requests.
*/
def update(key: Any, request: R): Seq[T] = {
val w = watchersForKey.get(key)
if(w == null)
Seq.empty
else
w.collectSatisfiedRequests(request)
}
private def watchersFor(key: Any) = watchersForKey.getAndMaybePut(key)
/**
* Check if this request satisfied this delayed request
*/
protected def checkSatisfied(request: R, delayed: T): Boolean
/**
* Handle an expired delayed request
*/
protected def expire(delayed: T)
/**
* Shutdown the expirey thread
*/
def shutdown() {
expiredRequestReaper.shutdown()
}
/**
* A linked list of DelayedRequests watching some key with some associated
* bookkeeping logic.
*/
private class Watchers {
/* a few magic parameters to help do cleanup to avoid accumulating old watchers */
private val CleanupThresholdSize = 100
private val CleanupThresholdPrct = 0.5
private val requests = new LinkedList[T]
/* you can only change this if you have added something or marked something satisfied */
var liveCount = 0.0
def add(t: T) {
synchronized {
requests.add(t)
liveCount += 1
maybePurge()
}
}
private def maybePurge() {
if(requests.size > CleanupThresholdSize && liveCount / requests.size < CleanupThresholdPrct) {
val iter = requests.iterator()
while(iter.hasNext) {
val curr = iter.next
if(curr.satisfied.get())
iter.remove()
}
}
}
def decLiveCount() {
synchronized {
liveCount -= 1
}
}
def collectSatisfiedRequests(request: R): Seq[T] = {
val response = new mutable.ArrayBuffer[T]
synchronized {
val iter = requests.iterator()
while(iter.hasNext) {
val curr = iter.next
if(curr.satisfied.get) {
// another thread has satisfied this request, remove it
iter.remove()
} else {
// synchronize on curr to avoid any race condition with expire
// on client-side.
val satisfied = curr synchronized checkSatisfied(request, curr)
if(satisfied) {
iter.remove()
val updated = curr.satisfied.compareAndSet(false, true)
if(updated == true) {
response += curr
liveCount -= 1
expiredRequestReaper.satisfyRequest()
}
}
}
}
}
response
}
}
/**
* Runnable to expire requests that have sat unfullfilled past their deadline
*/
private class ExpiredRequestReaper extends Runnable with Logging {
this.logIdent = "ExpiredRequestReaper-%d ".format(brokerId)
/* a few magic parameters to help do cleanup to avoid accumulating old watchers */
private val CleanupThresholdSize = 100
private val CleanupThresholdPrct = 0.5
private val delayed = new DelayQueue[T]
private val running = new AtomicBoolean(true)
private val shutdownLatch = new CountDownLatch(1)
private val needsPurge = new AtomicBoolean(false)
/* The count of elements in the delay queue that are unsatisfied */
private [kafka] val unsatisfied = new AtomicInteger(0)
/** Main loop for the expiry thread */
def run() {
while(running.get) {
try {
val curr = pollExpired()
curr synchronized {
expire(curr)
}
} catch {
case ie: InterruptedException =>
if(needsPurge.getAndSet(false)) {
val purged = purgeSatisfied()
debug("Forced purge of " + purged + " requests from delay queue.")
}
case e: Exception =>
error("Error in long poll expiry thread: ", e)
}
}
shutdownLatch.countDown()
}
/** Add a request to be expired */
def enqueue(t: T) {
delayed.add(t)
unsatisfied.incrementAndGet()
if(unsatisfied.get > CleanupThresholdSize && unsatisfied.get / delayed.size.toDouble < CleanupThresholdPrct)
forcePurge()
}
private def forcePurge() {
needsPurge.set(true)
expirationThread.interrupt()
}
/** Shutdown the expiry thread*/
def shutdown() {
debug("Shutting down.")
running.set(false)
expirationThread.interrupt()
shutdownLatch.await()
debug("Shut down complete.")
}
/** Record the fact that we satisfied a request in the stats for the expiry queue */
def satisfyRequest(): Unit = unsatisfied.getAndDecrement()
/**
* Get the next expired event
*/
private def pollExpired(): T = {
while(true) {
val curr = delayed.take()
val updated = curr.satisfied.compareAndSet(false, true)
if(updated) {
unsatisfied.getAndDecrement()
for(key <- curr.keys)
watchersFor(key).decLiveCount()
return curr
}
}
throw new RuntimeException("This should not happen")
}
/**
* Delete all expired events from the delay queue
*/
private def purgeSatisfied(): Int = {
var purged = 0
val iter = delayed.iterator()
while(iter.hasNext) {
val curr = iter.next()
if(curr.satisfied.get) {
iter.remove()
purged += 1
}
}
purged
}
}
} | dchenbecker/kafka-sbt | core/src/main/scala/kafka/server/RequestPurgatory.scala | Scala | apache-2.0 | 9,442 |
package temportalist.esotericraft.galvanization.common.task.ai.world
import net.minecraft.block.state.IBlockState
import net.minecraft.entity.EntityCreature
import net.minecraft.util.EnumFacing
import net.minecraft.util.EnumFacing.Axis
import net.minecraft.util.math.BlockPos
import net.minecraft.world.World
import temportalist.esotericraft.api.galvanize.ai.EnumTaskType
import temportalist.esotericraft.galvanization.common.task.ai.core.TaskBase
import temportalist.esotericraft.galvanization.common.task.ai.interfaces.ITaskSized
import temportalist.origin.api.common.lib.Vect
/**
*
* Created by TheTemportalist on 5/26/2016.
*
* @author TheTemportalist
*/
abstract class TaskHarvest(
pos: BlockPos, face: EnumFacing
) extends TaskBase(pos, face) with ITaskSized {
// ~~~~~ Task Info ~~~~~
override def getTaskType: EnumTaskType = EnumTaskType.WORLD_INTERACTION
// ~~~~~ Bounding Box ~~~~~
override def getRadius(axis: Axis): Double = axis match {
case Axis.X => 4.5
case Axis.Y => 0.5
case Axis.Z => 4.5
case _ => 0
}
// ~~~~~ AI ~~~~~
private var destinationPos: Vect = null
def isBlockValid(world: World, pos: BlockPos, state: IBlockState): Boolean
def harvestState(world: World, pos: BlockPos, state: IBlockState, entity: EntityCreature): Unit
override def shouldExecute(entity: EntityCreature): Boolean = {
val world = entity.getEntityWorld
val aabb = this.getBoundingBox
for {
x <- aabb.minX.toInt to aabb.maxX.toInt
y <- aabb.minY.toInt to aabb.maxY.toInt
z <- aabb.minZ.toInt to aabb.maxZ.toInt
} {
val pos = new BlockPos(x, y, z)
if (this.isBlockValid(world, pos, world.getBlockState(pos)))
return true
}
false
}
override def updateTask(entity: EntityCreature): Unit = {
if (this.destinationPos == null)
this.getClosestValidBlockToOrigin(entity.getEntityWorld) match {
case pos: BlockPos => this.destinationPos = new Vect(pos)
case _ => // null pos
}
if (this.destinationPos == null) return
val distToDestination = (new Vect(entity) - this.destinationPos).length
if (distToDestination <= 2.0) {
val pos = this.destinationPos.toBlockPos
val targetState = entity.getEntityWorld.getBlockState(pos)
if (this.isBlockValid(entity.getEntityWorld, pos, targetState))
this.harvestState(entity.getEntityWorld, pos, targetState, entity)
this.destinationPos = null
}
else {
this.moveEntityTowards(entity,
this.destinationPos.x, this.destinationPos.y, this.destinationPos.z,
1F, this.getCanFly)
}
}
def getClosestValidBlockToOrigin(world: World): BlockPos = {
val originBlockPos = new Vect(this.pos)
val aabb = this.getBoundingBox
// the distance from the target block to the origin
var leastDistance = -1D
// the block pos of the target block
var posTarget: BlockPos = null
for {
x <- aabb.minX.toInt to aabb.maxX.toInt
y <- aabb.minY.toInt to aabb.maxY.toInt
z <- aabb.minZ.toInt to aabb.maxZ.toInt
} {
val posBlock = new BlockPos(x, y, z)
if (this.isBlockValid(world, posBlock, world.getBlockState(posBlock))) {
val posBlockVect = new Vect(posBlock) + Vect.CENTER
val posDiff = originBlockPos - posBlockVect
val dist = posDiff.length
if (leastDistance < 0 || dist < leastDistance) {
leastDistance = dist
posTarget = posBlock
}
}
}
posTarget
}
// ~~~~~ End ~~~~~
}
| TheTemportalist/EsoTeriCraft | src/main/scala/temportalist/esotericraft/galvanization/common/task/ai/world/TaskHarvest.scala | Scala | apache-2.0 | 3,376 |
/*§
===========================================================================
Chronos
===========================================================================
Copyright (C) 2015-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.chronos.interpreter.io
import java.io.{BufferedReader, InputStreamReader}
import info.gianlucacosta.chronos.interpreter.atoms.StringAtom
import info.gianlucacosta.chronos.interpreter.exceptions.FailedReadException
class ConsoleInput extends BasicInput {
private val lineReader = new BufferedReader(new InputStreamReader(System.in))
override def readString(prompt: String): StringAtom = {
print(prompt)
val line = lineReader.readLine()
if (line == null) {
throw new FailedReadException("Interrupted input")
}
StringAtom(line)
}
}
| giancosta86/Chronos | src/main/scala/info/gianlucacosta/chronos/interpreter/io/ConsoleInput.scala | Scala | apache-2.0 | 1,510 |
package org.scalaide.core.quickassist
import org.junit.Test
class CreateClassTests {
import UiQuickAssistTests._
@Test
def createClassQuickFixes(): Unit = {
withQuickFixes("createclass/UsesMissingClass.scala")("Create class 'ThisClassDoesNotExist'")
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/quickassist/CreateClassTests.scala | Scala | bsd-3-clause | 269 |
package dk.gp.gp
import dk.bayes.dsl.infer
import dk.bayes.dsl.variable.gaussian.multivariate.MultivariateGaussian
import dk.bayes.dsl.variable.Gaussian
import breeze.linalg.DenseMatrix
import breeze.linalg.DenseVector
import dk.gp.cov.CovFunc
import breeze.linalg.cholesky
/**
* Returns p(y) = integral of p(f)*p(y|f)df
*
* p(f) - Gaussian process latent variable
* p(y|f) - Gaussian process conditional latent variable
*
*
*/
case class GPPredictSingle(f: dk.bayes.math.gaussian.MultivariateGaussian, x: DenseMatrix[Double], covFunc: CovFunc, covFuncParams: DenseVector[Double], mean: Double = 0d) {
private val condGPFactory = ConditionalGPFactory(x, covFunc, covFuncParams, mean)
private val fvchol = cholesky(f.v)
/**
* @param t A single N dim variable for which p(y) is computed
* @param f
* @param x
* @param covFunc
* @param covFuncParams
* @param mean
*
* @return p(y)
*/
def predictSingle(t: DenseMatrix[Double]): dk.bayes.math.gaussian.MultivariateGaussian = {
val (a, b, v) = condGPFactory.create(t)
val skillMean = a * f.m + b
val al = a*fvchol
val skillVar = v + al*al.t
val predicted = dk.bayes.math.gaussian.MultivariateGaussian(skillMean, skillVar)
predicted
}
} | danielkorzekwa/bayes-scala-gp | src/main/scala/dk/gp/gp/GPPredictSingle.scala | Scala | bsd-2-clause | 1,261 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.tests
package models
// #scalatest-models
case class Role(name:String)
case class User(id: String, name: String, email:String)
// #scalatest-models
| wsargent/playframework | documentation/manual/working/scalaGuide/main/tests/code/models/User.scala | Scala | apache-2.0 | 247 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kinesis
import java.util.List
import scala.util.Random
import scala.util.control.NonFatal
import com.amazonaws.services.kinesis.clientlibrary.exceptions.{InvalidStateException, KinesisClientLibDependencyException, ShutdownException, ThrottlingException}
import com.amazonaws.services.kinesis.clientlibrary.interfaces.{IRecordProcessor, IRecordProcessorCheckpointer}
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason
import com.amazonaws.services.kinesis.model.Record
import org.apache.spark.internal.Logging
/**
* Kinesis-specific implementation of the Kinesis Client Library (KCL) IRecordProcessor.
* This implementation operates on the Array[Byte] from the KinesisReceiver.
* The Kinesis Worker creates an instance of this KinesisRecordProcessor for each
* shard in the Kinesis stream upon startup. This is normally done in separate threads,
* but the KCLs within the KinesisReceivers will balance themselves out if you create
* multiple Receivers.
*
* @param receiver Kinesis receiver
* @param workerId for logging purposes
*/
private[kinesis] class KinesisRecordProcessor[T](receiver: KinesisReceiver[T], workerId: String)
extends IRecordProcessor with Logging {
// shardId populated during initialize()
@volatile
private var shardId: String = _
/**
* The Kinesis Client Library calls this method during IRecordProcessor initialization.
*
* @param shardId assigned by the KCL to this particular RecordProcessor.
*/
override def initialize(shardId: String) {
this.shardId = shardId
logInfo(s"Initialized workerId $workerId with shardId $shardId")
}
/**
* This method is called by the KCL when a batch of records is pulled from the Kinesis stream.
* This is the record-processing bridge between the KCL's IRecordProcessor.processRecords()
* and Spark Streaming's Receiver.store().
*
* @param batch list of records from the Kinesis stream shard
* @param checkpointer used to update Kinesis when this batch has been processed/stored
* in the DStream
*/
override def processRecords(batch: List[Record], checkpointer: IRecordProcessorCheckpointer) {
if (!receiver.isStopped()) {
try {
// Limit the number of processed records from Kinesis stream. This is because the KCL cannot
// control the number of aggregated records to be fetched even if we set `MaxRecords`
// in `KinesisClientLibConfiguration`. For example, if we set 10 to the number of max
// records in a worker and a producer aggregates two records into one message, the worker
// possibly 20 records every callback function called.
val maxRecords = receiver.getCurrentLimit
for (start <- 0 until batch.size by maxRecords) {
val miniBatch = batch.subList(start, math.min(start + maxRecords, batch.size))
receiver.addRecords(shardId, miniBatch)
logDebug(s"Stored: Worker $workerId stored ${miniBatch.size} records " +
s"for shardId $shardId")
}
receiver.setCheckpointer(shardId, checkpointer)
} catch {
case NonFatal(e) =>
/*
* If there is a failure within the batch, the batch will not be checkpointed.
* This will potentially cause records since the last checkpoint to be processed
* more than once.
*/
logError(s"Exception: WorkerId $workerId encountered and exception while storing " +
s" or checkpointing a batch for workerId $workerId and shardId $shardId.", e)
/* Rethrow the exception to the Kinesis Worker that is managing this RecordProcessor. */
throw e
}
} else {
/* RecordProcessor has been stopped. */
logInfo(s"Stopped: KinesisReceiver has stopped for workerId $workerId" +
s" and shardId $shardId. No more records will be processed.")
}
}
/**
* Kinesis Client Library is shutting down this Worker for 1 of 2 reasons:
* 1) the stream is resharding by splitting or merging adjacent shards
* (ShutdownReason.TERMINATE)
* 2) the failed or latent Worker has stopped sending heartbeats for whatever reason
* (ShutdownReason.ZOMBIE)
*
* @param checkpointer used to perform a Kinesis checkpoint for ShutdownReason.TERMINATE
* @param reason for shutdown (ShutdownReason.TERMINATE or ShutdownReason.ZOMBIE)
*/
override def shutdown(
checkpointer: IRecordProcessorCheckpointer,
reason: ShutdownReason): Unit = {
logInfo(s"Shutdown: Shutting down workerId $workerId with reason $reason")
// null if not initialized before shutdown:
if (shardId == null) {
logWarning(s"No shardId for workerId $workerId?")
} else {
reason match {
/*
* TERMINATE Use Case. Checkpoint.
* Checkpoint to indicate that all records from the shard have been drained and processed.
* It's now OK to read from the new shards that resulted from a resharding event.
*/
case ShutdownReason.TERMINATE => receiver.removeCheckpointer(shardId, checkpointer)
/*
* ZOMBIE Use Case or Unknown reason. NoOp.
* No checkpoint because other workers may have taken over and already started processing
* the same records.
* This may lead to records being processed more than once.
* Return null so that we don't checkpoint
*/
case _ => receiver.removeCheckpointer(shardId, null)
}
}
}
}
private[kinesis] object KinesisRecordProcessor extends Logging {
/**
* Retry the given amount of times with a random backoff time (millis) less than the
* given maxBackOffMillis
*
* @param expression expression to evaluate
* @param numRetriesLeft number of retries left
* @param maxBackOffMillis: max millis between retries
*
* @return evaluation of the given expression
* @throws Unretryable exception, unexpected exception,
* or any exception that persists after numRetriesLeft reaches 0
*/
@annotation.tailrec
def retryRandom[T](expression: => T, numRetriesLeft: Int, maxBackOffMillis: Int): T = {
util.Try { expression } match {
/* If the function succeeded, evaluate to x. */
case util.Success(x) => x
/* If the function failed, either retry or throw the exception */
case util.Failure(e) => e match {
/* Retry: Throttling or other Retryable exception has occurred */
case _: ThrottlingException | _: KinesisClientLibDependencyException
if numRetriesLeft > 1 =>
val backOffMillis = Random.nextInt(maxBackOffMillis)
Thread.sleep(backOffMillis)
logError(s"Retryable Exception: Random backOffMillis=${backOffMillis}", e)
retryRandom(expression, numRetriesLeft - 1, maxBackOffMillis)
/* Throw: Shutdown has been requested by the Kinesis Client Library. */
case _: ShutdownException =>
logError(s"ShutdownException: Caught shutdown exception, skipping checkpoint.", e)
throw e
/* Throw: Non-retryable exception has occurred with the Kinesis Client Library */
case _: InvalidStateException =>
logError(s"InvalidStateException: Cannot save checkpoint to the DynamoDB table used" +
s" by the Amazon Kinesis Client Library. Table likely doesn't exist.", e)
throw e
/* Throw: Unexpected exception has occurred */
case _ =>
logError(s"Unexpected, non-retryable exception.", e)
throw e
}
}
}
}
| bravo-zhang/spark | external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisRecordProcessor.scala | Scala | apache-2.0 | 8,427 |
package se.lu.nateko.cp.meta.ingestion.badm
import scala.concurrent.Future
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.HttpMethods
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.RequestEntity
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import spray.json.JsObject
import spray.json.JsValue
import scala.concurrent.ExecutionContext
object EtcEntriesFetcher {
def getJson(uri: Uri, payload: JsObject)(implicit system: ActorSystem, m: Materializer): Future[JsObject] = {
import system.dispatcher
for(
entity <- Marshal(payload).to[RequestEntity];
request = HttpRequest(HttpMethods.POST, uri, Nil, entity);
resp <- Http().singleRequest(request);
json <- responseToJson(resp).collect{
case obj: JsObject => obj
}
) yield json
}
def responseToJson(resp: HttpResponse)(implicit m: Materializer, ctxt: ExecutionContext): Future[JsValue] = {
resp.status match {
case StatusCodes.OK => Unmarshal(resp.entity).to[JsValue]
case _ =>
resp.discardEntityBytes()
Future.failed(new Exception(s"Got ${resp.status} from the ETC metadata server"))
}
}
}
| ICOS-Carbon-Portal/meta | src/main/scala/se/lu/nateko/cp/meta/ingestion/badm/EtcEntriesFetcher.scala | Scala | gpl-3.0 | 1,441 |
package org.dele.text.maen
import org.dele.text.maen.matchers.TMatcher
/**
* Created by jiaji on 2016-08-15.
*/
sealed trait MatchFromMagnet {
type Result = AtomSeqMatch
def apply():Result
}
object MatchFromMagnet {
implicit def fromRange(tp:(TMatchResultPool, Range, TMatcher)) = new MatchFromMagnet {
def apply():Result = {
val resultPool = tp._1
val range = tp._2
val matcher = tp._3
new AtomSeqMatch(resultPool, range, matcher, AtomSeqMatch.EmptySubMatches)
}
}
implicit def fromIndex(tp:(TMatchResultPool, Int, TMatcher)) = new MatchFromMagnet {
def apply():Result = {
val resultPool = tp._1
val index = tp._2
val matcher = tp._3
new AtomSeqMatch(resultPool, index to index, matcher, AtomSeqMatch.EmptySubMatches)
}
}
implicit def fromIndexWithSubMatches(tp:(TMatchResultPool, TMatcher, List[AtomSeqMatch])) = new MatchFromMagnet {
def apply():Result = {
val resultPool = tp._1
val matcher = tp._2
val subMatches = tp._3
val start = subMatches.map(_.range.start).min
val end = subMatches.map(_.range.end).max
new AtomSeqMatch(resultPool, start to end, matcher, subMatches)
}
}
}
| new2scala/text-util | maen/src/main/scala/org/dele/text/maen/MatchFromMagnet.scala | Scala | apache-2.0 | 1,218 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.