code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright 2016-2020 47 Degrees Open Source <https://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalaexercises.evaluator
import cats.effect.Sync
import io.circe.{Decoder, Encoder}
import org.http4s._
import org.http4s.circe._
/**
* Provides Json serialization codecs for the http4s services
*/
trait Http4sCodecInstances {
implicit def entityDecoderOf[F[_]: Sync, A: Decoder]: EntityDecoder[F, A] = jsonOf[F, A]
implicit def entityEncoderOf[F[_]: Sync, A: Encoder]: EntityEncoder[F, A] = jsonEncoderOf[F, A]
}
object codecs extends Http4sCodecInstances
| scala-exercises/evaluator | server/src/main/scala/org/scalaexercises/evaluator/codecs.scala | Scala | apache-2.0 | 1,117 |
package core
import io.apibuilder.spec.v0.models.{Application, Organization, Import}
import org.scalatest.{FunSpec, Matchers}
class ImporterSpec extends FunSpec with Matchers {
describe("with an invalid service") {
val json = """
{
"name": "Import Shared",
"apidoc": { "version": "0.9.6" }
}
"""
val path = TestHelper.writeToTempFile(json)
val imp = Importer(FileServiceFetcher(), s"file://$path")
imp.validate.size should be > 0
}
describe("with a valid service") {
val json = """
{
"name": "Import Shared",
"apidoc": { "version": "0.9.6" },
"organization": { "key": "test" },
"application": { "key": "import-shared" },
"namespace": "test.apibuilder.import-shared",
"version": "1.0.0",
"attributes": [],
"imports": [],
"headers": [],
"unions": [],
"enums": [],
"resources": [],
"info": {},
"models": [
{
"name": "user",
"plural": "users",
"fields": [
{ "name": "id", "type": "long", "required": true, "attributes": [] }
],
"attributes": []
}
]
}
"""
it("parses service") {
val path = TestHelper.writeToTempFile(json)
val imp = Importer(FileServiceFetcher(), s"file://$path")
imp.validate should be(Seq.empty)
val service = imp.service
service.name should be("Import Shared")
service.organization should be(Organization(key = "test"))
service.application should be(Application(key = "import-shared"))
service.namespace should be("test.apibuilder.import-shared")
service.models.map(_.name) should be(Seq("user"))
val user = service.models.find(_.name == "user").get
user.fields.map(_.name) should be(Seq("id"))
}
}
}
| mbryzek/apidoc | core/src/test/scala/core/ImporterSpec.scala | Scala | mit | 1,834 |
package org.littlewings.tweetbot.standard.lyrics.cachestore
import java.util.UUID
import org.infinispan.commons.configuration.ConfiguredBy
import org.infinispan.commons.persistence.Store
import org.infinispan.persistence.spi.InitializationContext
import org.littlewings.tweetbot.cachestore.AutoReloadableInMemoryCacheStore
import org.littlewings.tweetbot.standard.lyrics.{Lyrics, LyricsFactory}
@Store
@ConfiguredBy(classOf[LyricsCacheStoreConfiguration])
class LyricsCacheStore extends AutoReloadableInMemoryCacheStore[String, Lyrics] {
private[cachestore] var artistNameAlias: String = _
override protected def reload: Map[String, Lyrics] = {
val lyrics = LyricsFactory.buildLyrics(artistNameAlias)
lyrics
.map(l => (UUID.randomUUID.toString -> l))
.toMap
}
override def init(ctx: InitializationContext): Unit = {
super.init(ctx)
artistNameAlias = context.getConfiguration[LyricsCacheStoreConfiguration].artistNameAlias
}
}
| kazuhira-r/tweet-bot | src/main/scala/org/littlewings/tweetbot/standard/lyrics/cachestore/LyricsCacheStore.scala | Scala | mit | 971 |
////////////////////////////////////////////////////////////////////////////////
// //
// OpenSolid is a generic library for the representation and manipulation //
// of geometric objects such as points, curves, surfaces, and volumes. //
// //
// Copyright 2007-2015 by Ian Mackenzie //
// [email protected] //
// //
// This Source Code Form is subject to the terms of the Mozilla Public //
// License, v. 2.0. If a copy of the MPL was not distributed with this file, //
// you can obtain one at http://mozilla.org/MPL/2.0/. //
// //
////////////////////////////////////////////////////////////////////////////////
package org.opensolid.core
final case class LineSegment2d(startPoint: Point2d, endPoint: Point2d)
extends Scalable2d[LineSegment2d]
with Bounded[Bounds2d]
with GeometricallyComparable[LineSegment2d]
with Curve2d {
def endpoints: (Point2d, Point2d) =
(startPoint, endPoint)
def vector: Vector2d =
startPoint.vectorTo(endPoint)
def direction: Option[Direction2d] =
vector.direction
def normalDirection: Option[Direction2d] =
direction.map(_.normalDirection)
def axis: Option[Axis2d] =
direction.map(Axis2d(startPoint, _))
def reversed: LineSegment2d =
LineSegment2d(endPoint, startPoint)
def length: Double =
vector.length
def squaredLength: Double =
vector.squaredLength
def midpoint: Point2d =
Point2d.midpoint(startPoint, endPoint)
override def bounds: Bounds2d =
startPoint.hull(endPoint)
override def isEqualTo(that: LineSegment2d, tolerance: Double): Boolean =
this.startPoint.isEqualTo(that.startPoint, tolerance) &&
this.endPoint.isEqualTo(that.endPoint, tolerance)
override def scaledAbout(point: Point2d, scale: Double): LineSegment2d = {
require(scale > 0.0)
LineSegment2d(startPoint.scaledAbout(point, scale), endPoint.scaledAbout(point, scale))
}
override def transformedBy(transformation: Transformation2d): LineSegment2d =
LineSegment2d(
startPoint.transformedBy(transformation),
endPoint.transformedBy(transformation)
)
def projectedOnto(axis: Axis2d): LineSegment2d =
LineSegment2d(startPoint.projectedOnto(axis), endPoint.projectedOnto(axis))
def placedOnto(plane: Plane3d): LineSegment3d =
LineSegment3d(startPoint.placedOnto(plane), endPoint.placedOnto(plane))
def parameterized: ParametricCurve2d =
parameterizedBy(startPoint + CurveParameter * vector, Interval.Unit)
}
object LineSegment2d {
def fromEndpoints(endpoints: (Point2d, Point2d)): LineSegment2d = endpoints match {
case (startPoint, endPoint) => LineSegment2d(startPoint, endPoint)
}
}
| ianmackenzie/opensolid-core | src/main/scala/org/opensolid/core/LineSegment2d.scala | Scala | mpl-2.0 | 3,077 |
object Dependencies {
import Dependency._
val sparkExtSql =
Seq(
sparkSql % "provided"
, Test.scalaTest
)
val sparkExtMllib =
Seq(
sparkMLLib % "provided"
, s2Geometry
, Test.scalaTest
)
val sparkExtTest =
Seq(
sparkSql % "provided"
, Test.scalaTest
)
val sparkExtExample =
Seq(
sparkMLLib
)
}
| collectivemedia/spark-ext | project/Dependencies.scala | Scala | apache-2.0 | 397 |
package org.bitcoins.core.script.constant
import org.bitcoins.core.number.Int64
import org.bitcoins.core.script.ScriptOperationFactory
import org.bitcoins.core.util.{ BitcoinSUtil, BitcoinScriptUtil, Factory }
import scala.util.{ Failure, Success, Try }
/**
* Created by chris on 1/6/16.
*/
/**
* This is the root class of Script. Every element in the Script language is a
* ScriptToken - think of this the same way you think about Object in Java.
*/
sealed trait ScriptToken {
/** The hexadecimal representation of this [[ScriptToken]]. */
def hex: String
/** The byte representation of this [[ScriptToken]]. */
def bytes: Seq[Byte] = BitcoinSUtil.decodeHex(hex)
/** The conversion from the byte representation of a [[ScriptToken]] to a number. */
def toLong = ScriptNumberUtil.toLong(hex)
}
/**
* A script operation is an instruction that takes an input and gives an output
* Think of these as functions.
*/
trait ScriptOperation extends ScriptToken {
def opCode: Int
override def hex: String = BitcoinSUtil.encodeHex(opCode.toByte)
}
/** A constant in the Script language for instance as String or a number. */
sealed abstract class ScriptConstant extends ScriptToken {
/** Returns if the [[ScriptConstant]] is encoded in the shortest possible way. */
def isShortestEncoding: Boolean = BitcoinScriptUtil.isShortestEncoding(this)
}
/** Represents a [[ScriptNumber]] in the Script language. */
sealed abstract class ScriptNumber extends ScriptConstant {
def +(that: ScriptNumber): ScriptNumber = ScriptNumber(underlying + that.underlying)
def unary_- = ScriptNumber(-underlying)
def -(that: ScriptNumber): ScriptNumber = ScriptNumber(underlying - that.underlying)
def *(that: ScriptNumber): ScriptNumber = ScriptNumber(underlying * that.underlying)
def <(that: ScriptNumber): Boolean = underlying < that.underlying
def <=(that: ScriptNumber): Boolean = underlying <= that.underlying
def >(that: ScriptNumber): Boolean = underlying > that.underlying
def >=(that: ScriptNumber): Boolean = underlying >= that.underlying
def <(that: Int64): Boolean = underlying < that.toLong
def <=(that: Int64): Boolean = underlying <= that.toLong
def >(that: Int64): Boolean = underlying > that.toLong
def >=(that: Int64): Boolean = underlying >= that.toLong
def &(that: ScriptNumber): ScriptNumber = ScriptNumber(underlying & that.underlying)
def &(that: Int64): ScriptNumber = ScriptNumber(underlying & that.toLong)
def |(that: ScriptNumber): ScriptNumber = ScriptNumber(underlying | that.underlying)
/**
* This equality just checks that the underlying scala numbers are equivalent, NOT if the numbers
* are bitwise equivalent in Script. For instance ScriptNumber(0x01).numEqual(ScriptNumber(0x00000000001)) == true
* but (ScriptNumber(0x01) == (ScriptNumber(0x00000000001))) == false.
*/
def numEqual(that: ScriptNumber): Boolean = underlying == that.underlying
def toInt = {
val l = toLong
require(l <= Int.MaxValue && l >= Int.MinValue)
l.toInt
}
override def toLong = underlying
/** The underlying number of the [[ScriptNumber]]. */
protected def underlying: Long
}
object ScriptNumber extends Factory[ScriptNumber] {
/** Represents the number zero inside of bitcoin's script language. */
lazy val zero: ScriptNumber = ScriptNumberImpl(0, "")
/** Represents the number one inside of bitcoin's script language. */
lazy val one: ScriptNumber = ScriptNumberImpl(1)
/** Represents the number negative one inside of bitcoin's script language. */
lazy val negativeOne: ScriptNumber = ScriptNumberImpl(-1)
/** Bitcoin has a numbering system which has a negative zero. */
lazy val negativeZero: ScriptNumber = fromHex("80")
def fromBytes(bytes: Seq[Byte]) = {
if (bytes.isEmpty) zero
else ScriptNumberImpl(ScriptNumberUtil.toLong(bytes), BitcoinSUtil.encodeHex(bytes))
}
def apply(underlying: Long): ScriptNumber = {
if (underlying == 0) zero else apply(ScriptNumberUtil.longToHex(underlying))
}
def apply(bytes: Seq[Byte], requireMinimal: Boolean): Try[ScriptNumber] = apply(BitcoinSUtil.encodeHex(bytes), requireMinimal)
def apply(hex: String, requireMinimal: Boolean): Try[ScriptNumber] = {
if (requireMinimal && !BitcoinScriptUtil.isShortestEncoding(hex)) {
Failure(new IllegalArgumentException("The given hex was not the shortest encoding for the script number: " + hex))
} else {
val number = apply(hex)
Success(number)
}
}
/**
* This represents a [[ScriptNumber]] inside of bitcoin
*
* @param underlying the number being represented
* @param hex the hex representation of the number - this can be different than the obvious value for
* the number. For instance we could have padded the number with another word of zeros
*/
private case class ScriptNumberImpl(underlying: Long, override val hex: String) extends ScriptNumber
/**
* Companion object for [[ScriptNumberImpl]] that gives us access to more constructor types for the
* [[ScriptNumberImpl]] case class.
*/
private object ScriptNumberImpl {
def apply(hex: String): ScriptNumber = ScriptNumberImpl(ScriptNumberUtil.toLong(hex), hex)
def apply(bytes: Seq[Byte]): ScriptNumber = ScriptNumberImpl(ScriptNumberUtil.toLong(bytes))
def apply(underlying: Long): ScriptNumber = ScriptNumberImpl(underlying, ScriptNumberUtil.longToHex(underlying))
def apply(int64: Int64): ScriptNumber = ScriptNumberImpl(int64.toLong)
}
}
/** The next byte contains the number of bytes to be pushed onto the stack. */
case object OP_PUSHDATA1 extends ScriptOperation {
override def opCode = 76
/** The maximum amount of bytes OP_PUSHDATA1 can push onto the stack. */
def max = 255
}
/** The next two bytes contain the number of bytes to be pushed onto the stack. */
case object OP_PUSHDATA2 extends ScriptOperation {
override def opCode = 77
/** The max amount of data that OP_PUSHDATA2 can push onto the stack. */
def max = 65535
}
/** The next four bytes contain the number of bytes to be pushed onto the stack. */
case object OP_PUSHDATA4 extends ScriptOperation {
override def opCode = 78
/** The maximum amount of data that OP_PUSHDATA4 can be push on the stack. */
def max = 4294967295L
}
/**
* Represents a [[ScriptNumberOperation]] where the the number in the operation is pushed onto the stack
* i.e. OP_0 would be push 0 onto the stack, OP_1 would be push 1 onto the stack.
*/
sealed abstract class ScriptNumberOperation extends ScriptNumber with ScriptOperation {
override def hex = opCode.toHexString
}
/** An empty array of bytes is pushed onto the stack. (This is not a no-op: an item is added to the stack.) */
case object OP_0 extends ScriptNumberOperation {
override def opCode = 0
override def hex = "00"
override def underlying = 0
}
/** An empty array of bytes is pushed onto the stack. (This is not a no-op: an item is added to the stack.) */
case object OP_FALSE extends ScriptNumberOperation {
override def opCode = OP_0.opCode
override def hex = OP_0.hex
override def underlying = OP_0.underlying
override def bytes = OP_0.bytes
}
/** The number 1 is pushed onto the stack. */
case object OP_TRUE extends ScriptNumberOperation {
override def opCode = 81
override def underlying = 1
}
/** The number -1 is pushed onto the stack. */
case object OP_1NEGATE extends ScriptNumberOperation {
override def opCode = 79
override def underlying = -1
}
/** The number 1 is pushed onto the stack. */
case object OP_1 extends ScriptNumberOperation {
override def opCode = OP_TRUE.opCode
override def underlying = OP_TRUE.underlying
}
/** The number 2 is pushed onto the stack. */
case object OP_2 extends ScriptNumberOperation {
override def opCode = 82
override def underlying = 2
}
/** The number 3 is pushed onto the stack. */
case object OP_3 extends ScriptNumberOperation {
override def opCode = 83
override def underlying = 3
}
/** The number 4 is pushed onto the stack. */
case object OP_4 extends ScriptNumberOperation {
override def opCode = 84
override def underlying = 4
}
/** The number 5 is pushed onto the stack. */
case object OP_5 extends ScriptNumberOperation {
override def opCode = 85
override def underlying = 5
}
/** The number 6 is pushed onto the stack. */
case object OP_6 extends ScriptNumberOperation {
override def opCode = 86
override def underlying = 6
}
/** The number 7 is pushed onto the stack. */
case object OP_7 extends ScriptNumberOperation {
override def opCode = 87
override def underlying = 7
}
/** The number 8 is pushed onto the stack. */
case object OP_8 extends ScriptNumberOperation {
override def opCode = 88
override def underlying = 8
}
/** The number 9 is pushed onto the stack. */
case object OP_9 extends ScriptNumberOperation {
override def opCode = 89
override def underlying = 9
}
/** The number 10 is pushed onto the stack. */
case object OP_10 extends ScriptNumberOperation {
override def opCode = 90
override def underlying = 10
}
/** The number 11 is pushed onto the stack. */
case object OP_11 extends ScriptNumberOperation {
override def opCode = 91
override def underlying = 11
}
/** The number 12 is pushed onto the stack. */
case object OP_12 extends ScriptNumberOperation {
override def opCode = 92
override def underlying = 12
}
/** The number 13 is pushed onto the stack. */
case object OP_13 extends ScriptNumberOperation {
override def opCode = 93
override def underlying = 13
}
/** The number 14 is pushed onto the stack. */
case object OP_14 extends ScriptNumberOperation {
override def opCode = 94
override def underlying = 14
}
/** The number 15 is pushed onto the stack. */
case object OP_15 extends ScriptNumberOperation {
override def opCode = 95
override def underlying = 15
}
/** The number 16 is pushed onto the stack. */
case object OP_16 extends ScriptNumberOperation {
override def opCode = 96
override def underlying = 16
}
object ScriptNumberOperation extends ScriptOperationFactory[ScriptNumberOperation] {
/** Finds the [[ScriptNumberOperation]] based on the given integer. */
def fromNumber(underlying: Long): Option[ScriptNumberOperation] = operations.find(_.underlying == underlying)
def operations = Seq(OP_0, OP_1, OP_1NEGATE, OP_2, OP_3, OP_4, OP_5, OP_6, OP_7, OP_8, OP_9, OP_10, OP_11, OP_12, OP_13, OP_14, OP_15, OP_16)
}
object ScriptConstant extends Factory[ScriptConstant] {
lazy val zero = ScriptConstant("00")
lazy val negativeZero = ScriptConstant("80")
lazy val negativeOne = ScriptConstant("81")
/** Creates a [[ScriptConstant]] from a sequence of bytes. */
def fromBytes(bytes: Seq[Byte]): ScriptConstant = ScriptConstantImpl(BitcoinSUtil.encodeHex(bytes))
/** Represent a public key or hash of a public key on our stack. */
private case class ScriptConstantImpl(hex: String) extends ScriptConstant {
def this(bytes: List[Byte]) = this(BitcoinSUtil.encodeHex(bytes))
}
}
| Christewart/bitcoin-s-core | src/main/scala/org/bitcoins/core/script/constant/Constants.scala | Scala | mit | 11,084 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.cli
import javax.xml.transform.stream._
import org.clapper.argot.ArgotUsageException
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Wadl2CheckerSuite extends FunSuite {
test ("--help should generate usage info") {
Wadl2Checker.parser.reset()
intercept[ArgotUsageException] {
Wadl2Checker.handleArgs(Array("--help"))
}
}
test ("-h should generate usage info") {
Wadl2Checker.parser.reset()
intercept[ArgotUsageException] {
Wadl2Checker.handleArgs(Array("-h"))
}
}
test ("-bad_data should generate usage info") {
Wadl2Checker.parser.reset()
intercept[ArgotUsageException] {
Wadl2Checker.handleArgs(Array("-bad_data"))
}
}
test ("Too many params should generate usage info") {
Wadl2Checker.parser.reset()
intercept[ArgotUsageException] {
Wadl2Checker.handleArgs(Array("input","output","junk"))
}
}
test ("-d should set removeDups") {
Wadl2Checker.parser.reset()
assert (Wadl2Checker.removeDups.value == None)
Wadl2Checker.handleArgs(Array("-d"))
assert (Wadl2Checker.removeDups.value.get == true)
}
test ("-r should set raxRoles") {
Wadl2Checker.parser.reset()
assert (Wadl2Checker.raxRoles.value == None)
Wadl2Checker.handleArgs(Array("-r"))
assert (Wadl2Checker.raxRoles.value.get == true)
}
test ("--remove-dups should set removeDups") {
Wadl2Checker.parser.reset()
assert (Wadl2Checker.removeDups.value == None)
Wadl2Checker.handleArgs(Array("--remove-dups"))
assert (Wadl2Checker.removeDups.value.get == true)
}
test ("-v should set validate") {
Wadl2Checker.parser.reset()
assert (Wadl2Checker.validate.value == None)
Wadl2Checker.handleArgs(Array("-v"))
assert (Wadl2Checker.validate.value.get == true)
}
test ("--validate should set validate") {
Wadl2Checker.parser.reset()
assert (Wadl2Checker.validate.value == None)
Wadl2Checker.handleArgs(Array("--validate"))
assert (Wadl2Checker.validate.value.get == true)
}
test ("no params should set source and result with input/output stream"){
Wadl2Checker.parser.reset()
Wadl2Checker.handleArgs(Array())
assert (Wadl2Checker.getSource.asInstanceOf[StreamSource].getInputStream() != null)
assert (Wadl2Checker.getSource.asInstanceOf[StreamSource].getSystemId() == null)
assert (Wadl2Checker.getResult.asInstanceOf[StreamResult].getOutputStream() != null)
assert (Wadl2Checker.getResult.asInstanceOf[StreamResult].getSystemId() == null)
}
test ("one params should set source to systemid and result to stream"){
Wadl2Checker.parser.reset()
Wadl2Checker.handleArgs(Array("test.wadl"))
assert (Wadl2Checker.getSource.asInstanceOf[StreamSource].getInputStream() == null)
assert (Wadl2Checker.getSource.asInstanceOf[StreamSource].getSystemId() != null)
assert (Wadl2Checker.getResult.asInstanceOf[StreamResult].getOutputStream() != null)
assert (Wadl2Checker.getResult.asInstanceOf[StreamResult].getSystemId() == null)
}
test ("two params should set source and result to systemid"){
Wadl2Checker.parser.reset()
Wadl2Checker.handleArgs(Array("test.wadl", "out.xml"))
assert (Wadl2Checker.getSource.asInstanceOf[StreamSource].getInputStream() == null)
assert (Wadl2Checker.getSource.asInstanceOf[StreamSource].getSystemId() != null)
assert (Wadl2Checker.getResult.asInstanceOf[StreamResult].getOutputStream() == null)
assert (Wadl2Checker.getResult.asInstanceOf[StreamResult].getSystemId() != null)
}
}
| tylerroyal/api-checker | cli/wadl2checker/src/test/scala/com/rackspace/com/papi/components/checker/cli/Wadl2CheckerSuite.scala | Scala | apache-2.0 | 4,300 |
package lila.setup
import chess.Mode
private[setup] trait HumanConfig extends Config {
// casual or rated
val mode: Mode
}
private[setup] trait BaseHumanConfig extends BaseConfig {
val modes = Mode.all map (_.id)
val modeChoices = Mode.all map { e =>
e.id.toString -> e.toString
}
}
| luanlv/lila | modules/setup/src/main/HumanConfig.scala | Scala | mit | 302 |
package org.scalameter
package deprecatedjapi
import org.scalameter.japi.SerializableMethod
import scala.collection.JavaConverters._
abstract class JavaPerformanceTest[U] extends BasePerformanceTest[U] with Serializable {
import BasePerformanceTest._
private val Group = classOf[org.scalameter.deprecatedjapi.Group]
private val UsingInterface = classOf[org.scalameter.deprecatedjapi.Using[Object, Object]]
final def warmer: org.scalameter.Warmer = javaWarmer.get
final def aggregator: org.scalameter.Aggregator[U] = javaAggregator.get
final def executor: org.scalameter.Executor[U] = javaExecutor.get
final def measurer: org.scalameter.Measurer[U] = javaMeasurer.get
final def reporter: org.scalameter.Reporter[U] = javaReporter.get
final def persistor: org.scalameter.Persistor = javaPersistor.get
def javaWarmer: org.scalameter.deprecatedjapi.Warmer
def javaAggregator: org.scalameter.deprecatedjapi.Aggregator[U]
def javaExecutor: org.scalameter.deprecatedjapi.Executor[U]
def javaMeasurer: org.scalameter.deprecatedjapi.Measurer[U]
def javaPersistor: org.scalameter.deprecatedjapi.Persistor
def javaReporter: org.scalameter.deprecatedjapi.Reporter[U]
type SameType
constructScope(this, this.getClass)
def getClassInstance(enclosing: Object, s: String): Object = {
val clss = Class.forName(s)
var outerClasses = clss.getEnclosingClass()
if (outerClasses == null) {
Class.forName(s).newInstance.asInstanceOf[Object]
} else {
val ctor = Class.forName(s).getDeclaredConstructors()(0)
ctor.newInstance(enclosing).asInstanceOf[Object]
}
}
def config(instance: Object, c: Class[_]): List[KeyValue] = {
val fields = c.getDeclaredFields
fields.find(_.getName == "config") match {
case None =>
// println(s"no config found in $c")
List()
case Some(f) =>
val jcontext = f.get(instance).asInstanceOf[JContext]
val kvs = for ((kname, value) <- jcontext.getKeyMap.asScala) yield {
val key = org.scalameter.Key.parseKey(kname)
(key, value).asInstanceOf[KeyValue]
}
kvs.toList
}
}
def isGroupOrUsing(c: Class[_]) = Group.isAssignableFrom(c) || UsingInterface.isAssignableFrom(c)
def constructScope(instance: Object, c: Class[_]): Unit = {
for (clzz <- c.getClasses() if isGroupOrUsing(clzz)) {
classScope(getClassInstance(this, clzz.getName), clzz)
}
}
def classScope(instance: Object, c: Class[_]): Unit = {
for (interface <- c.getInterfaces) {
interface match {
case Group =>
val classGroupName = c.getName
val kvs = config(instance, c)
val s = Scope(classGroupName, setupzipper.value.current.context).config(kvs: _*)
val oldscope = s.context(Key.dsl.scope)
val ct = s.context + (Key.dsl.scope -> (c.getSimpleName() :: oldscope))
setupzipper.value = setupzipper.value.descend.setContext(ct)
for (clzz <- c.getClasses if isGroupOrUsing(clzz)) {
classScope(getClassInstance(instance, clzz.getName), clzz)
}
setupzipper.value = setupzipper.value.ascend
case UsingInterface =>
val kvs = config(instance, c)
val snippetMethod = new SerializableMethod(c.getMethod("snippet", classOf[Object]))
var setupbeforeall: Option[() => Unit] = None
var teardownafterall: Option[() => Unit] = None
var setp: Option[Object => Any] = None
var teardown: Option[Object => Any] = None
val gen = c.getMethod("generator").invoke(instance).asInstanceOf[JavaGenerator[Any]]
for (ms <- c.getMethods) {
val m = new SerializableMethod(ms)
ms.getName match {
case "beforeTests" => {
setupbeforeall = Some(() => { m.invoke(instance) })
}
case "afterTests" => teardownafterall = Some(() => { m.invoke(instance) })
case "setup" => {
if (classOf[org.scalameter.deprecatedjapi.VoidGen] isAssignableFrom gen.getClass) {
setp = Some((v: Object) => { m.invokeA(instance, null) })
} else {
setp = Some((v: Object) => { m.invokeA(instance, v) })
}
}
case "teardown" => {
if (classOf[org.scalameter.deprecatedjapi.VoidGen] isAssignableFrom gen.getClass) {
teardown = Some((v: Object) => { m.invokeA(instance, null) })
} else {
teardown = Some((v: Object) => { m.invokeA(instance, v) })
}
}
case _ =>
}
}
var snippet = (s: Object) => { snippetMethod.invokeA(instance, s) }
if (classOf[org.scalameter.deprecatedjapi.VoidGen] isAssignableFrom gen.getClass) {
snippet = (s: Object) => { snippetMethod.invokeA(instance, null) }
}
val generator = gen.get
val context = setupzipper.value.current.context ++ kvs
val setup = Setup(context, generator.asInstanceOf[Gen[Object]], setupbeforeall, teardownafterall, setp, teardown, None, snippet)
setupzipper.value = setupzipper.value.addItem(setup)
case _ =>
// ignore, does not contain any benchmark-related information
}
}
}
}
abstract class QuickBenchmark extends JavaPerformanceTest[Double] {
def javaReporter: org.scalameter.deprecatedjapi.Reporter[Double] = new org.scalameter.deprecatedjapi.LoggingReporter
def javaPersistor: org.scalameter.deprecatedjapi.Persistor = new org.scalameter.deprecatedjapi.NonePersistor
def javaExecutor: org.scalameter.deprecatedjapi.Executor[Double] =
new org.scalameter.deprecatedjapi.LocalExecutor(javaWarmer, javaAggregator, javaMeasurer)
def javaMeasurer: org.scalameter.deprecatedjapi.Measurer[Double] = new DefaultMeasurer
def javaWarmer = new org.scalameter.deprecatedjapi.Warmer {
def get = new org.scalameter.Executor.Warmer.Default()
}
def javaAggregator: org.scalameter.deprecatedjapi.Aggregator[Double] = new MinAggregator
}
abstract class Microbenchmark extends JavaPerformanceTest[Double] {
def javaWarmer = new org.scalameter.deprecatedjapi.Warmer {
def get = new org.scalameter.Warmer.Default
}
def javaAggregator: org.scalameter.deprecatedjapi.Aggregator[Double] = new org.scalameter.deprecatedjapi.MinAggregator[Double]
def javaMeasurer: org.scalameter.deprecatedjapi.Measurer[Double] = new org.scalameter.deprecatedjapi.Measurer[Double] {
def get = new org.scalameter.Measurer.IgnoringGC with org.scalameter.Measurer.PeriodicReinstantiation[Double] {
override val defaultFrequency = 12
override val defaultFullGC = true
}.asInstanceOf[Executor.Measurer[Double]]
}
def javaExecutor: org.scalameter.deprecatedjapi.Executor[Double] =
new org.scalameter.deprecatedjapi.SeparateJvmsExecutor(javaWarmer, javaAggregator, javaMeasurer)
def javaReporter: org.scalameter.deprecatedjapi.Reporter[Double] = new org.scalameter.deprecatedjapi.LoggingReporter[Double]
def javaPersistor: org.scalameter.deprecatedjapi.Persistor = new org.scalameter.deprecatedjapi.NonePersistor
}
abstract class HTMLReport extends JavaPerformanceTest[Double] {
import Executor.Measurer
import reporting._
def javaPersistor: org.scalameter.deprecatedjapi.Persistor = new org.scalameter.deprecatedjapi.GZIPJSONSerializationPersistor
def javaWarmer = new org.scalameter.deprecatedjapi.Warmer {
def get = new org.scalameter.Warmer.Default
}
def javaAggregator: org.scalameter.deprecatedjapi.Aggregator[Double] = new org.scalameter.deprecatedjapi.AverageAggregator
def javaMeasurer: org.scalameter.deprecatedjapi.Measurer[Double] = new org.scalameter.deprecatedjapi.Measurer[Double] {
def get = new Measurer.IgnoringGC with Measurer.PeriodicReinstantiation[Double]
with Measurer.OutlierElimination[Double] with Measurer.RelativeNoise {
def numeric: Numeric[Double] = implicitly[Numeric[Double]]
}
}
def javaExecutor: org.scalameter.deprecatedjapi.Executor[Double] =
new org.scalameter.deprecatedjapi.SeparateJvmsExecutor(javaWarmer, javaAggregator, javaMeasurer)
def online: Boolean
def javaTester: org.scalameter.deprecatedjapi.RegressionReporterTester
def javaHistorian: org.scalameter.deprecatedjapi.RegressionReporterHistorian
def javaReporter: org.scalameter.deprecatedjapi.Reporter[Double] = new org.scalameter.deprecatedjapi.Reporter[Double] {
def get = new org.scalameter.Reporter.Composite(
new RegressionReporter[Double](javaTester.get, javaHistorian.get),
HtmlReporter(false)
)
}
}
abstract class OnlineRegressionReport extends HTMLReport {
def javaTester: org.scalameter.deprecatedjapi.RegressionReporterTester = new org.scalameter.deprecatedjapi.OverlapIntervalsTester()
def javaHistorian: org.scalameter.deprecatedjapi.RegressionReporterHistorian = new org.scalameter.deprecatedjapi.ExponentialBackoffHistorian()
def online = true
}
abstract class OfflineRegressionReport extends HTMLReport {
def javaTester: org.scalameter.deprecatedjapi.RegressionReporterTester = new org.scalameter.deprecatedjapi.OverlapIntervalsTester()
def javaHistorian: org.scalameter.deprecatedjapi.RegressionReporterHistorian = new org.scalameter.deprecatedjapi.ExponentialBackoffHistorian()
def online = false
}
abstract class OfflineReport extends HTMLReport {
def javaTester: org.scalameter.deprecatedjapi.RegressionReporterTester = new org.scalameter.deprecatedjapi.AccepterTester()
def javaHistorian: org.scalameter.deprecatedjapi.RegressionReporterHistorian = new org.scalameter.deprecatedjapi.ExponentialBackoffHistorian()
def online = false
}
| kjanosz/scalameter | src/main/scala/org/scalameter/deprecatedjapi/JavaPerformanceTest.scala | Scala | bsd-3-clause | 9,808 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.linkagerule.input
import de.fuberlin.wiwiss.silk.entity.Entity
import de.fuberlin.wiwiss.silk.config.Prefixes
import de.fuberlin.wiwiss.silk.linkagerule.Operator
import xml.Node
import de.fuberlin.wiwiss.silk.util.{ValidationException, Identifier, DPair}
import de.fuberlin.wiwiss.silk.runtime.resource.ResourceLoader
/**
* A TransformInput applies a transform to input values.
*/
case class TransformInput(id: Identifier = Operator.generateId, transformer: Transformer, inputs: Seq[Input]) extends Input {
require(inputs.size > 0, "Number of inputs must be > 0.")
def apply(entities: DPair[Entity]): Set[String] = {
val values = for (input <- inputs) yield input(entities)
transformer(values)
}
override def toString = transformer match {
case Transformer(name, params) => "Transformer(type=" + name + ", params=" + params + ", inputs=" + inputs + ")"
}
override def toXML(implicit prefixes: Prefixes) = transformer match {
case Transformer(plugin, params) => {
<TransformInput id={id} function={plugin.id}>
{ inputs.map { input => input.toXML } }
{ params.map { case (name, value) => <Param name={name} value={value}/> } }
</TransformInput>
}
}
}
object TransformInput {
def fromXML(node: Node, resourceLoader: ResourceLoader)(implicit prefixes: Prefixes) = {
val id = Operator.readId(node)
val inputs = Input.fromXML(node.child, resourceLoader)
if(inputs.isEmpty) throw new ValidationException("No input defined", id, "Transformation")
try {
val transformer = Transformer((node \\ "@function").text, Operator.readParams(node), resourceLoader)
TransformInput(id, transformer, inputs)
} catch {
case ex: Exception => throw new ValidationException(ex.getMessage, id, "Tranformation")
}
}
}
| fusepoolP3/p3-silk | silk-core/src/main/scala/de/fuberlin/wiwiss/silk/linkagerule/input/TransformInput.scala | Scala | apache-2.0 | 2,406 |
import scala.swing.SimpleSwingApplication
import scala.swing.Swing._
import scala.swing.{MainFrame, Panel}
import scala.swing.event._
import javax.swing.Timer
import java.awt.{Color, Graphics2D, Point, geom}
import java.awt.event.{ActionEvent, ActionListener}
import java.nio.file._
import scala.collection.JavaConversions._
import scala.sys.process._
import java.io.File
object TurtleShape extends java.awt.geom.Path2D.Double {
moveTo(0, 0)
lineTo(20, 0)
lineTo(20, 20)
lineTo(0, 20)
closePath()
}
class FileWatcher(targetFilePath: String, onFileWatcher: LogoParser.OnFileWatcher) extends Runnable {
def run:Unit = {
val targetFile = new File(targetFilePath)
val watcher = FileSystems.getDefault.newWatchService
val file = Paths.get(targetFile.getParentFile().getAbsolutePath() + "/")
file.register(watcher, StandardWatchEventKinds.ENTRY_MODIFY)
val key = watcher.take
val events = key.pollEvents
val e = events.toList
.map(a => a.context().asInstanceOf[Path].toAbsolutePath.endsWith(targetFile.getName()))
.exists(_ == true)
onFileWatcher(e)
}
}
object LogoParser extends SimpleSwingApplication {
type OnFileWatcher = (Boolean) => Unit
var parsedTurtles:List[Turtle] = List.empty[Turtle]
var inputFile:String = "/home/ashish/Downloads/logo_test.txt"
override def main(args: Array[String]) = {
if( args.length == 1 ){
inputFile = args(0)
parseInput
super.main(args)
val watcher = new FileWatcher(inputFile, this.onFileWatcher)
(new Thread(watcher)).start
}else{
println("Sorry! You need to specificy one argument which is a path to an input file.")
}
}
def parseInput: Unit = {
val input = scala.io.Source.fromFile(inputFile).mkString
this.parsedTurtles = parseInput(input)
}
def onFileWatcher(didFileChange: Boolean): Unit = {
if(didFileChange){
parseInput
ui.reset
}
val watcher = new FileWatcher(inputFile, this.onFileWatcher)
(new Thread(watcher)).start
}
def parseInput(input:String): List[Turtle] = {
val turtleActions = TurtleScriptParser.parseAll(TurtleScriptParser.parseTurtleExpressions, input)
val parsedTurtles = if( turtleActions.successful ) {
turtleActions.get.foldLeft( List[Turtle](Turtle()) )((list, turtleExpression) => {
list ::: turtleExpression.updateTurtle(list.last)
})
}else{
println("Sorry! Could not parse input")
List.empty[Turtle]
}
println(parsedTurtles)
parsedTurtles
}
lazy val ui = new Panel with ActionListener {
background = Color.white
preferredSize = (800, 600)
focusable = true
var actionIndex = 1
def reset: Unit = {
actionIndex = 1
repaint
}
def actionPerformed(e: ActionEvent) {
actionIndex += 1
if( actionIndex <= parsedTurtles.size ){
repaint
}
}
override def paintComponent(g: Graphics2D) = {
super.paintComponent(g)
val currentTurtles = parsedTurtles.take(actionIndex)
val pathTurtles = currentTurtles.foldLeft((new geom.GeneralPath(), List[Turtle]()))((p, turtle) => {
if( p._2.size == 0 || p._2.last.penDown == false ){
p._1.moveTo(turtle.position._1, turtle.position._2)
}else{
p._1.lineTo(turtle.position._1, turtle.position._2)
}
(p._1, p._2 :+ turtle)
})
g.setColor(Color.BLACK)
g.draw(pathTurtles._1)
if(currentTurtles.length > 0){
g.setColor(Color.BLUE)
g.translate(currentTurtles.last.position._1, currentTurtles.last.position._2)
g.fill(TurtleShape)
}
g.dispose()
}
}
def top = new MainFrame {
title = "Turtle Logo Demo"
contents = ui
}
val timer = new Timer(200, ui)
timer.start()
} | adatta02/turtle-graphics | src/LogoParser.scala | Scala | mit | 4,079 |
package com.outr.iconsole
import scala.language.experimental.macros
import scala.language.implicitConversions
import scala.reflect.macros.blackbox
object ProcessorGenerator {
def registerFromObject[T](c: blackbox.Context)(module: c.Expr[Option[String]], obj: c.Expr[T])(implicit t: c.WeakTypeTag[T]): c.Expr[List[CommandProcessor]] = {
import c.universe._
val processors = processorsFor[T](c)(module, obj)(t)
c.Expr[List[CommandProcessor]](
q"""
$processors.foreach { p =>
com.outr.iconsole.CommandProcessor.register(p)
}
$processors
""")
}
def processorsFor[T](c: blackbox.Context)(module: c.Expr[Option[String]], obj: c.Expr[T])(implicit t: c.WeakTypeTag[T]): c.Expr[List[CommandProcessor]] = {
import c.universe._
val members = weakTypeOf[T].decls
val methods = members.filter { m =>
val term = m.asTerm
term.isMethod && !term.isConstructor && term.isPublic && !m.name.decodedName.toString.contains("$default$")
}
val defaultArgs = members.filter { m =>
val term = m.asTerm
term.isMethod && !term.isConstructor && term.isPublic && m.name.decodedName.toString.contains("$default$")
}.map(m => m.name.decodedName.toString -> m).toMap
val commandProcessors = methods.map { m =>
val description = m.annotations.find(_.tree.tpe <:< typeOf[description]).flatMap { a =>
a.tree.children.tail.collect({ case Literal(Constant(value: String)) => value }).headOption
}.getOrElse("")
val shortDescription = m.annotations.find(_.tree.tpe <:< typeOf[shortDescription]).flatMap { a =>
a.tree.children.tail.collect({ case Literal(Constant(value: String)) => value }).headOption
}.getOrElse("")
val name = m.name.decodedName.toString
val paramList = m.info.paramLists.head
var arguments = List.empty[c.Tree]
val params = paramList.zipWithIndex.map {
case (param, index) => {
val paramName = param.name.decodedName.toString
val paramType = param.info.resultType
val defaultArg: Option[c.Tree] = defaultArgs.get(s"$name$$default$$${index + 1}").map(m => q"$m")
def extractString: c.Tree = q"com.outr.iconsole.ProcessorGenerator.extractArg[String](command, $paramName, $index, $defaultArg)"
val a = q"new com.outr.iconsole.Argument($paramName, $index, ${paramType.toString}, $defaultArg)"
arguments = a :: arguments
if (paramType =:= typeOf[Boolean]) {
q"$extractString.toBoolean"
} else if (paramType =:= typeOf[Int]) {
q"$extractString.toInt"
} else if (paramType =:= typeOf[Long]) {
q"$extractString.toLong"
} else if (paramType =:= typeOf[Float]) {
q"$extractString.toFloat"
} else if (paramType =:= typeOf[Double]) {
q"$extractString.toDouble"
} else if (paramType =:= typeOf[Option[Boolean]]) {
q"Option($extractString).map(_.toBoolean)"
} else if (paramType =:= typeOf[Option[Int]]) {
q"Option($extractString).map(_.toInt)"
} else if (paramType =:= typeOf[Option[Long]]) {
q"Option($extractString).map(_.toLong)"
} else if (paramType =:= typeOf[Option[Float]]) {
q"Option($extractString).map(_.toFloat)"
} else if (paramType =:= typeOf[Option[Double]]) {
q"Option($extractString).map(_.toDouble)"
} else {
q"""
import com.outr.iconsole.DefaultConversions._ // Default implicits
com.outr.iconsole.ProcessorGenerator.extractArg[$paramType](command, $paramName, $index, $defaultArg)
"""
}
}
}
arguments = arguments.reverse
q"""
import com.outr.iconsole._
CommandProcessor($name, $module, $shortDescription, $description, Vector(..$arguments), autoRegister = false) { command =>
$m(..$params)
}
"""
}
c.Expr[List[CommandProcessor]](q"List(..$commandProcessors)")
}
def extractArg[T](command: Command, name: String, index: Int, default: => Option[T])(implicit string2T: String => T): T = {
val s = command.args.get(name).orElse(command.args.get(s"arg${index + 1}"))
s.map(string2T).orElse(default).getOrElse(throw new RuntimeException(s"No argument provided for parameter `$name` (index: $index)."))
}
}
class Argument(val name: String, val index: Int, val `type`: String, defaultGetter: => Option[Any]) {
def default: Option[Any] = defaultGetter
override def toString: String = default match {
case Some(d) => {
val defaultString = if (`type` == "String") {
s""""$d""""
} else {
d.toString
}
s"$name: ${`type`} = $defaultString"
}
case None => s"$name: ${`type`}"
}
} | outr/iconsole | core/js/src/main/scala/com/outr/iconsole/ProcessorGenerator.scala | Scala | apache-2.0 | 4,867 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
/*
TODO:
- index all available AutoPlugins to get the tasks that will be added
- error message when a task doesn't exist that it would be provided by plugin x, enabled by natures y,z, blocked by a, b
*/
import sbt.librarymanagement.Configuration
import sbt.internal.util.logic.{ Atom, Clause, Clauses, Formula, Literal, Logic, Negated }
import Logic.{ CyclicNegation, InitialContradictions, InitialOverlap, LogicException }
import Def.Setting
import Plugins._
import annotation.tailrec
import sbt.util.Logger
import PluginTrigger._
/**
* An AutoPlugin defines a group of settings and the conditions where the settings are automatically added to a build (called "activation").
* The `requires` and `trigger` methods together define the conditions, and a method like `projectSettings` defines the settings to add.
*
* Steps for plugin authors:
*
* 1. Determine if the `AutoPlugin` should automatically be activated when all requirements are met, or should be opt-in.
* 1. Determine the `AutoPlugin`s that, when present (or absent), act as the requirements for the `AutoPlugin`.
* 1. Determine the settings/configurations to that the `AutoPlugin` injects when activated.
* 1. Determine the keys and other names to be automatically imported to `*.sbt` scripts.
*
* For example, the following will automatically add the settings in `projectSettings`
* to a project that has both the `Web` and `Javascript` plugins enabled.
*
* {{{
* object MyPlugin extends sbt.AutoPlugin {
* override def requires = Web && Javascript
* override def trigger = allRequirements
* override def projectSettings = Seq(...)
*
* object autoImport {
* lazy val obfuscate = taskKey[Seq[File]]("Obfuscates the source.")
* }
* }
* }}}
*
* Steps for users:
*
* 1. Add dependencies on plugins in `project/plugins.sbt` as usual with `addSbtPlugin`
* 1. Add key plugins to projects, which will automatically select the plugin + dependent plugin settings to add for those projects.
* 1. Exclude plugins, if desired.
*
* For example, given plugins Web and Javascript (perhaps provided by plugins added with `addSbtPlugin`),
*
* {{{
* myProject.enablePlugins(Web && Javascript)
* }}}
*
* will activate `MyPlugin` defined above and have its settings automatically added. If the user instead defines
* {{{
* myProject.enablePlugins(Web && Javascript).disablePlugins(MyPlugin)
* }}}
*
* then the `MyPlugin` settings (and anything that activates only when `MyPlugin` is activated) will not be added.
*/
abstract class AutoPlugin extends Plugins.Basic with PluginsFunctions {
/**
* Determines whether this AutoPlugin will be activated for this project when the `requires` clause is satisfied.
*
* When this method returns `allRequirements`, and `requires` method returns `Web && Javascript`, this plugin
* instance will be added automatically if the `Web` and `Javascript` plugins are enabled.
*
* When this method returns `noTrigger`, and `requires` method returns `Web && Javascript`, this plugin
* instance will be added only if the build user enables it, but it will automatically add both `Web` and `Javascript`.
*/
def trigger: PluginTrigger = noTrigger
/**
* This AutoPlugin requires the plugins the Plugins matcher returned by this method. See [[trigger]].
*/
def requires: Plugins = plugins.JvmPlugin
val label: String = getClass.getName.stripSuffix("$")
override def toString: String = label
/** The `Configuration`s to add to each project that activates this AutoPlugin.*/
def projectConfigurations: Seq[Configuration] = Nil
/** The `Setting`s to add in the scope of each project that activates this AutoPlugin. */
def projectSettings: Seq[Setting[_]] = Nil
/**
* The `Setting` to add to the build scope for each project that activates this AutoPlugin.
* The settings returned here are guaranteed to be added to a given build scope only once
* regardless of how many projects for that build activate this AutoPlugin.
*/
def buildSettings: Seq[Setting[_]] = Nil
/** The `Setting`s to add to the global scope exactly once if any project activates this AutoPlugin. */
def globalSettings: Seq[Setting[_]] = Nil
// TODO?: def commands: Seq[Command]
/** The [[Project]]s to add to the current build. */
def extraProjects: Seq[Project] = Nil
/** The [[Project]]s to add to the current build based on an existing project. */
def derivedProjects(@deprecated("unused", "") proj: ProjectDefinition[_]): Seq[Project] = Nil
private[sbt] def unary_! : Exclude = Exclude(this)
/** If this plugin does not have any requirements, it means it is actually a root plugin. */
private[sbt] final def isRoot: Boolean =
requires match {
case Empty => true
case _ => false
}
/** If this plugin does not have any requirements, it means it is actually a root plugin. */
private[sbt] final def isAlwaysEnabled: Boolean =
isRoot && (trigger == AllRequirements)
}
/**
* An error that occurs when auto-plugins aren't configured properly.
* It translates the error from the underlying logic system to be targeted at end users.
*/
final class AutoPluginException private (val message: String, val origin: Option[LogicException])
extends RuntimeException(message) {
/** Prepends `p` to the error message derived from `origin`. */
def withPrefix(p: String) = new AutoPluginException(p + message, origin)
}
object AutoPluginException {
def apply(msg: String): AutoPluginException = new AutoPluginException(msg, None)
def apply(origin: LogicException): AutoPluginException =
new AutoPluginException(Plugins.translateMessage(origin), Some(origin))
}
/** An expression that matches `AutoPlugin`s. */
sealed trait Plugins {
def &&(o: Basic): Plugins
}
sealed trait PluginsFunctions {
/** [[Plugins]] instance that doesn't require any [[Plugins]]s. */
def empty: Plugins = Plugins.Empty
/** This plugin is activated when all required plugins are present. */
def allRequirements: PluginTrigger = AllRequirements
/** This plugin is activated only when it is manually activated. */
def noTrigger: PluginTrigger = NoTrigger
}
object Plugins extends PluginsFunctions {
/**
* Given the available auto plugins `defined`, returns a function that selects [[AutoPlugin]]s for the provided [[AutoPlugin]]s.
* The [[AutoPlugin]]s are topologically sorted so that a required [[AutoPlugin]] comes before its requiring [[AutoPlugin]].
*/
def deducer(defined0: List[AutoPlugin]): (Plugins, Logger) => Seq[AutoPlugin] =
if (defined0.isEmpty)(_, _) => Nil
else {
// TODO: defined should return all the plugins
val allReqs = (defined0 flatMap { asRequirements }).toSet
val diff = allReqs diff defined0.toSet
val defined =
if (diff.nonEmpty) diff.toList ::: defined0
else defined0
val byAtom = defined map { x =>
(Atom(x.label), x)
}
val byAtomMap = byAtom.toMap
if (byAtom.size != byAtomMap.size) duplicateProvidesError(byAtom)
// Ignore clauses for plugins that does not require anything else.
// Avoids the requirement for pure Nature strings *and* possible
// circular dependencies in the logic.
val allRequirementsClause =
defined.filterNot(_.isRoot).flatMap(d => asRequirementsClauses(d))
val allEnabledByClause = defined.filterNot(_.isRoot).flatMap(d => asEnabledByClauses(d))
// Note: Here is where the function begins. We're given a list of plugins now.
(requestedPlugins, log) => {
timed("Plugins.deducer#function", log) {
def explicitlyDisabled(p: AutoPlugin): Boolean = hasExclude(requestedPlugins, p)
val alwaysEnabled: List[AutoPlugin] =
defined.filter(_.isAlwaysEnabled).filterNot(explicitlyDisabled)
val knowledge0: Set[Atom] = ((flatten(requestedPlugins) ++ alwaysEnabled) collect {
case x: AutoPlugin => Atom(x.label)
}).toSet
val clauses = Clauses((allRequirementsClause ::: allEnabledByClause) filterNot {
_.head subsetOf knowledge0
})
// println(s"allRequirementsClause = $allRequirementsClause")
// println(s"allEnabledByClause = $allEnabledByClause")
// println(s"clauses = $clauses")
// println("")
log.debug(
s"deducing auto plugins based on known facts ${knowledge0.toString} and clauses ${clauses.toString}"
)
Logic.reduce(
clauses,
(flattenConvert(requestedPlugins) ++ convertAll(alwaysEnabled)).toSet
) match {
case Left(problem) => throw AutoPluginException(problem)
case Right(results) =>
log.debug(s" :: deduced result: ${results}")
val selectedAtoms: List[Atom] = results.ordered
val selectedPlugins = selectedAtoms map { a =>
byAtomMap.getOrElse(
a,
throw AutoPluginException(s"${a} was not found in atom map.")
)
}
val forbidden: Set[AutoPlugin] =
(selectedPlugins flatMap { Plugins.asExclusions }).toSet
val c = selectedPlugins.toSet & forbidden
if (c.nonEmpty) {
exclusionConflictError(requestedPlugins, selectedPlugins, c.toSeq sortBy {
_.label
})
}
val retval = topologicalSort(selectedPlugins)
// log.debug(s" :: sorted deduced result: ${retval.toString}")
retval
}
}
}
}
private[sbt] def topologicalSort(ns: List[AutoPlugin]): List[AutoPlugin] = {
@tailrec
def doSort(
found0: List[AutoPlugin],
notFound0: List[AutoPlugin],
limit0: Int
): List[AutoPlugin] = {
if (limit0 < 0) throw AutoPluginException(s"Failed to sort ${ns} topologically")
else if (notFound0.isEmpty) found0
else {
val (found1, notFound1) = notFound0 partition { n =>
asRequirements(n).toSet subsetOf found0.toSet
}
doSort(found0 ::: found1, notFound1, limit0 - 1)
}
}
val (roots, nonRoots) = ns partition (_.isRoot)
doSort(roots, nonRoots, ns.size * ns.size + 1)
}
private[sbt] def translateMessage(e: LogicException) = e match {
case ic: InitialContradictions =>
s"Contradiction in selected plugins. These plugins were both included and excluded: ${literalsString(ic.literals.toSeq)}"
case io: InitialOverlap =>
s"Cannot directly enable plugins. Plugins are enabled when their required plugins are satisfied. The directly selected plugins were: ${literalsString(io.literals.toSeq)}"
case cn: CyclicNegation =>
s"Cycles in plugin requirements cannot involve excludes. The problematic cycle is: ${literalsString(cn.cycle)}"
}
private[this] def literalsString(lits: Seq[Literal]): String =
lits map { case Atom(l) => l; case Negated(Atom(l)) => l } mkString (", ")
private[this] def duplicateProvidesError(byAtom: Seq[(Atom, AutoPlugin)]): Unit = {
val dupsByAtom = Map(byAtom.groupBy(_._1).toSeq.map {
case (k, v) =>
k -> v.map(_._2)
}: _*)
val dupStrings =
for ((atom, dups) <- dupsByAtom if dups.size > 1)
yield s"${atom.label} by ${dups.mkString(", ")}"
val (ns, nl) = if (dupStrings.size > 1) ("s", "\\n\\t") else ("", " ")
val message = s"Plugin$ns provided by multiple AutoPlugins:$nl${dupStrings.mkString(nl)}"
throw AutoPluginException(message)
}
private[this] def exclusionConflictError(
requested: Plugins,
selected: Seq[AutoPlugin],
conflicting: Seq[AutoPlugin]
): Unit = {
def listConflicts(ns: Seq[AutoPlugin]) =
(ns map { c =>
val reasons = (if (flatten(requested) contains c) List("requested")
else Nil) ++
(if (c.requires != empty && c.trigger == allRequirements)
List(s"enabled by ${c.requires.toString}")
else Nil) ++ {
val reqs = selected filter { x =>
asRequirements(x) contains c
}
if (reqs.nonEmpty) List(s"""required by ${reqs.mkString(", ")}""")
else Nil
} ++ {
val exs = selected filter { x =>
asExclusions(x) contains c
}
if (exs.nonEmpty) List(s"""excluded by ${exs.mkString(", ")}""")
else Nil
}
s""" - conflict: ${c.label} is ${reasons.mkString("; ")}"""
}).mkString("\\n")
throw AutoPluginException(s"""Contradiction in enabled plugins:
- requested: ${requested.toString}
- enabled: ${selected.mkString(", ")}
${listConflicts(conflicting)}""")
}
private[sbt] final object Empty extends Plugins {
def &&(o: Basic): Plugins = o
override def toString = "<none>"
}
/** An included or excluded Nature/Plugin. */
// TODO: better name than Basic. Also, can we dump this class
sealed abstract class Basic extends Plugins {
def &&(o: Basic): Plugins = And(this :: o :: Nil)
}
private[sbt] final case class Exclude(n: AutoPlugin) extends Basic {
override def toString = s"!$n"
}
private[sbt] final case class And(plugins: List[Basic]) extends Plugins {
def &&(o: Basic): Plugins = And(o :: plugins)
override def toString = plugins.mkString(" && ")
}
private[sbt] def and(a: Plugins, b: Plugins) = b match {
case Empty => a
case And(ns) => ns.foldLeft(a)(_ && _)
case b: Basic => a && b
}
private[sbt] def remove(a: Plugins, del: Set[Basic]): Plugins = a match {
case b: Basic => if (del(b)) Empty else b
case Empty => Empty
case And(ns) =>
val removed = ns.filterNot(del)
if (removed.isEmpty) Empty else And(removed)
}
/** Defines enabled-by clauses for `ap`. */
private[sbt] def asEnabledByClauses(ap: AutoPlugin): List[Clause] =
// `ap` is the head and the required plugins for `ap` is the body.
if (ap.trigger == AllRequirements) Clause(convert(ap.requires), Set(Atom(ap.label))) :: Nil
else Nil
/** Defines requirements clauses for `ap`. */
private[sbt] def asRequirementsClauses(ap: AutoPlugin): List[Clause] =
// required plugin is the head and `ap` is the body.
asRequirements(ap) map { x =>
Clause(convert(ap), Set(Atom(x.label)))
}
private[sbt] def asRequirements(ap: AutoPlugin): List[AutoPlugin] =
flatten(ap.requires).toList collect {
case x: AutoPlugin => x
}
private[sbt] def asExclusions(ap: AutoPlugin): List[AutoPlugin] =
flatten(ap.requires).toList collect {
case Exclude(x) => x
}
// TODO - This doesn't handle nested AND boolean logic...
private[sbt] def hasExclude(n: Plugins, p: AutoPlugin): Boolean = n match {
case `p` => false
case Exclude(`p`) => true
// TODO - This is stupidly advanced. We do a nested check through possible and-ed
// lists of plugins exclusions to see if the plugin ever winds up in an excluded=true case.
// This would handle things like !!p or !(p && z)
case Exclude(n) => hasInclude(n, p)
case And(ns) => ns.forall(n => hasExclude(n, p))
case _: Basic => false
case Empty => false
}
private[sbt] def hasInclude(n: Plugins, p: AutoPlugin): Boolean = n match {
case `p` => true
case Exclude(n) => hasExclude(n, p)
case And(ns) => ns.forall(n => hasInclude(n, p))
case _: Basic => false
case Empty => false
}
private[this] def flattenConvert(n: Plugins): Seq[Literal] = n match {
case And(ns) => convertAll(ns)
case b: Basic => convertBasic(b) :: Nil
case Empty => Nil
}
private[sbt] def flatten(n: Plugins): Seq[Basic] = n match {
case And(ns) => ns
case b: Basic => b :: Nil
case Empty => Nil
}
private[this] def convert(n: Plugins): Formula = n match {
case And(ns) => convertAll(ns).reduce[Formula](_ && _)
case b: Basic => convertBasic(b)
case Empty => Formula.True
}
private[this] def convertBasic(b: Basic): Literal = b match {
case Exclude(n) => !convertBasic(n)
case a: AutoPlugin => Atom(a.label)
}
private[this] def convertAll(ns: Seq[Basic]): Seq[Literal] = ns map convertBasic
/** True if the trigger clause `n` is satisfied by `model`. */
def satisfied(n: Plugins, model: Set[AutoPlugin]): Boolean =
flatten(n) forall {
case Exclude(a) => !model(a)
case ap: AutoPlugin => model(ap)
}
private val autoImport = "autoImport"
/** Determines whether a plugin has a stable autoImport member by:
*
* 1. Checking whether there exists a public field.
* 2. Checking whether there exists a public object.
*
* The above checks work for inherited members too.
*
* @param ap The found plugin.
* @param loader The plugin loader.
* @return True if plugin has a stable member `autoImport`, otherwise false.
*/
private[sbt] def hasAutoImportGetter(ap: AutoPlugin, loader: ClassLoader): Boolean = {
import java.lang.reflect.Field
import scala.util.control.Exception.catching
// Make sure that we don't detect user-defined methods called autoImport
def existsAutoImportVal(clazz: Class[_]): Option[Field] = {
catching(classOf[NoSuchFieldException])
.opt(clazz.getDeclaredField(autoImport))
.orElse(Option(clazz.getSuperclass).flatMap(existsAutoImportVal))
}
val pluginClazz = ap.getClass
existsAutoImportVal(pluginClazz)
.orElse(
catching(classOf[ClassNotFoundException])
.opt(Class.forName(s"${pluginClazz.getName}$autoImport$$", false, loader))
)
.isDefined
}
/** Debugging method to time how long it takes to run various compilation tasks. */
private[this] def timed[T](label: String, log: Logger)(t: => T): T = {
val start = System.nanoTime
val result = t
val elapsed = System.nanoTime - start
log.debug(label + " took " + (elapsed / 1e6) + " ms")
result
}
}
| sbt/sbt | main/src/main/scala/sbt/Plugins.scala | Scala | apache-2.0 | 18,242 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.util.concurrent._
import atomic._
import org.apache.kafka.common.utils.KafkaThread
/**
* A scheduler for running jobs
*
* This interface controls a job scheduler that allows scheduling either repeating background jobs
* that execute periodically or delayed one-time actions that are scheduled in the future.
*/
trait Scheduler {
/**
* Initialize this scheduler so it is ready to accept scheduling of tasks
*/
def startup()
/**
* Shutdown this scheduler. When this method is complete no more executions of background tasks will occur.
* This includes tasks scheduled with a delayed execution.
*/
def shutdown()
/**
* Check if the scheduler has been started
*/
def isStarted: Boolean
/**
* Schedule a task
* @param name The name of this task
* @param delay The amount of time to wait before the first execution
* @param period The period with which to execute the task. If < 0 the task will execute only once.
* @param unit The unit for the preceding times.
*/
def schedule(name: String, fun: ()=>Unit, delay: Long = 0, period: Long = -1, unit: TimeUnit = TimeUnit.MILLISECONDS)
}
/**
* A scheduler based on java.util.concurrent.ScheduledThreadPoolExecutor
*
* It has a pool of kafka-scheduler- threads that do the actual work.
*
* @param threads The number of threads in the thread pool
* @param threadNamePrefix The name to use for scheduler threads. This prefix will have a number appended to it.
* @param daemon If true the scheduler threads will be "daemon" threads and will not block jvm shutdown.
*/
@threadsafe
class KafkaScheduler(val threads: Int,
val threadNamePrefix: String = "kafka-scheduler-",
daemon: Boolean = true) extends Scheduler with Logging {
private var executor: ScheduledThreadPoolExecutor = null
private val schedulerThreadId = new AtomicInteger(0)
override def startup() {
debug("Initializing task scheduler.")
this synchronized {
if(isStarted)
throw new IllegalStateException("This scheduler has already been started!")
executor = new ScheduledThreadPoolExecutor(threads)
executor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false)
executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false)
executor.setThreadFactory(new ThreadFactory() {
def newThread(runnable: Runnable): Thread =
new KafkaThread(threadNamePrefix + schedulerThreadId.getAndIncrement(), runnable, daemon)
})
}
}
override def shutdown() {
debug("Shutting down task scheduler.")
// We use the local variable to avoid NullPointerException if another thread shuts down scheduler at same time.
val cachedExecutor = this.executor
if (cachedExecutor != null) {
this synchronized {
cachedExecutor.shutdown()
this.executor = null
}
cachedExecutor.awaitTermination(1, TimeUnit.DAYS)
}
}
def scheduleOnce(name: String, fun: () => Unit): Unit = {
schedule(name, fun, delay = 0L, period = -1L, unit = TimeUnit.MILLISECONDS)
}
def schedule(name: String, fun: () => Unit, delay: Long, period: Long, unit: TimeUnit) {
debug("Scheduling task %s with initial delay %d ms and period %d ms."
.format(name, TimeUnit.MILLISECONDS.convert(delay, unit), TimeUnit.MILLISECONDS.convert(period, unit)))
this synchronized {
ensureRunning()
val runnable = CoreUtils.runnable {
try {
trace("Beginning execution of scheduled task '%s'.".format(name))
fun()
} catch {
case t: Throwable => error("Uncaught exception in scheduled task '" + name +"'", t)
} finally {
trace("Completed execution of scheduled task '%s'.".format(name))
}
}
if(period >= 0)
executor.scheduleAtFixedRate(runnable, delay, period, unit)
else
executor.schedule(runnable, delay, unit)
}
}
def resizeThreadPool(newSize: Int): Unit = {
executor.setCorePoolSize(newSize)
}
def isStarted: Boolean = {
this synchronized {
executor != null
}
}
private def ensureRunning(): Unit = {
if (!isStarted)
throw new IllegalStateException("Kafka scheduler is not running.")
}
}
| ollie314/kafka | core/src/main/scala/kafka/utils/KafkaScheduler.scala | Scala | apache-2.0 | 5,202 |
package com.auginte.scarango.response.raw.query.simple
import spray.json.{JsObject, JsString, JsValue}
/**
* Model for raw response of Document
*/
class Document(fields: Map[String, JsValue]) extends JsObject(fields) {
def id = fieldToString("_id")
def revision = fieldToString("_rev")
def key = fieldToString("_key")
private def fieldToString(key: String) = Document.jsToString(fields(key))
}
object Document {
def jsToString(value: JsValue): String = js2String(value)
def js2String: JsValue => String = {
case JsString(s) => s
case other => other.toString
}
} | aurelijusb/scarango | src/main/scala/com/auginte/scarango/response/raw/query/simple/Document.scala | Scala | apache-2.0 | 592 |
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.core.result.writer
import scala.collection.mutable.Map
import org.joda.time.DateTime
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import com.excilys.ebi.gatling.core.util.StringHelper.END_OF_LINE
@RunWith(classOf[JUnitRunner])
class ConsoleDataWriterSpec extends Specification {
val time = new DateTime().withDate(2012, 8, 24).withTime(13, 37, 0, 0)
def progressBar(summary: ConsoleSummary) = summary.toString.split(END_OF_LINE)(3)
"console summary progress bar" should {
"handle it correctly when all the users are waiting" in {
val counters = new UserCounters(11)
val summary = ConsoleSummary(10000, Map("request1" -> counters), Map.empty, time)
summary.complete must beFalse
progressBar(summary) must beEqualTo("Users : [ ] 0%")
}
"handle it correctly when all the users are running" in {
val counters = new UserCounters(11)
for (i <- 1 to 11) counters.userStart
val summary = ConsoleSummary(10000, Map("request1" -> counters), Map.empty, time)
summary.complete must beFalse
progressBar(summary) must beEqualTo("Users : [-----------------------------------------------------------------] 0%")
}
"handle it correctly when all the users are done" in {
val counters = new UserCounters(11)
for (i <- 1 to 11) counters.userStart
for (i <- 1 to 11) counters.userDone
val summary = ConsoleSummary(10000, Map("request1" -> counters), Map.empty, time)
summary.complete must beTrue
progressBar(summary) must beEqualTo("Users : [#################################################################]100%")
}
"handle it correctly when there are running and done users" in {
val counters = new UserCounters(11)
for (i <- 1 to 11) counters.userStart
for (i <- 1 to 10) counters.userDone
val summary = ConsoleSummary(10000, Map("request1" -> counters), Map.empty, time)
summary.complete must beFalse
progressBar(summary) must beEqualTo("Users : [###########################################################------] 90%")
}
}
}
| Tjoene/thesis | Case_Programs/gatling-1.4.0/gatling-core/src/test/scala/com/excilys/ebi/gatling/core/result/writer/ConsoleDataWriterSpec.scala | Scala | gpl-2.0 | 2,821 |
package reactivemongo.core.protocol
import scala.util.{ Failure, Success }
import scala.concurrent.{ ExecutionContext, Future }
import reactivemongo.io.netty.buffer.{ ByteBuf, Unpooled }
import reactivemongo.core.errors._
/**
* A Mongo Wire Protocol Response messages.
*
* @param header the header of this response
* @param reply the reply operation contained in this response
* @param documents the body of this response, a [[http://static.netty.io/3.5/api/org/jboss/netty/buffer/ByteBuf.html ByteBuf]] containing 0, 1, or many documents
* @param info some meta information about this response
*/
@deprecated("Internal: will be made private", "0.16.0")
sealed abstract class Response(
val header: MessageHeader,
val reply: Reply,
val documents: ByteBuf,
val info: ResponseInfo) extends Product4[MessageHeader, Reply, ByteBuf, ResponseInfo] with Serializable {
@inline def _1 = header
@inline def _2 = reply
@inline def _3 = documents
@inline def _4 = info
def canEqual(that: Any): Boolean = that match {
case _: Response => true
case _ => false
}
/** If this response is in error, explain this error. */
lazy val error: Option[DatabaseException] = {
if (reply.inError) {
val bson = Response.parse(this)
if (bson.hasNext) Some(DatabaseException(bson.next))
else None
} else None
}
private[reactivemongo] def cursorID(id: Long): Response
private[reactivemongo] def startingFrom(offset: Int): Response
override def toString = s"Response($header, $reply, $info)"
}
@deprecated("Internal: will be made private", "0.16.0")
object Response {
import reactivemongo.api.{ BSONSerializationPack, SerializationPack }
import reactivemongo.bson.BSONDocument
def apply(
header: MessageHeader,
reply: Reply,
documents: ByteBuf,
info: ResponseInfo): Response = Successful(header, reply, documents, info)
@inline def parse(response: Response): Iterator[BSONDocument] =
parse(BSONSerializationPack)(response)
private[reactivemongo] def parse[P <: SerializationPack](pack: P)(
response: Response): Iterator[pack.Document] =
ReplyDocumentIterator.parse(pack)(response)(pack.IdentityReader)
def unapply(response: Response): Option[(MessageHeader, Reply, ByteBuf, ResponseInfo)] = Some((response.header, response.reply, response.documents, response.info))
private[reactivemongo] def preload(response: Response)(
implicit
ec: ExecutionContext): Future[(Response, BSONDocument)] =
response match {
case r @ WithCursor(_, _, _, _, _, cursorDoc, _) =>
Future.successful(r -> cursorDoc)
case CommandError(_, _, _, cause) =>
Future.failed(cause)
case Successful(_, Reply(_, _, _, 0), _, _) =>
Future.failed(ReactiveMongoException(
s"Cannot preload empty response: $response"))
case Successful(header, reply, docs, info) => {
val buf = docs.duplicate()
ResponseDecoder.first(buf) match {
case Success(first) => Future {
buf.resetReaderIndex()
val other = Successful(header, reply, buf, info)
other.first = Option(first)
other -> first
}
case Failure(cause) => Future.failed(cause)
}
}
}
// ---
private[reactivemongo] final case class Successful(
_header: MessageHeader,
_reply: Reply,
_documents: ByteBuf,
_info: ResponseInfo) extends Response(
_header, _reply, _documents, _info) {
@volatile private[reactivemongo] var first = Option.empty[BSONDocument]
private[reactivemongo] def cursorID(id: Long): Response =
copy(_reply = this._reply.copy(cursorID = id))
private[reactivemongo] def startingFrom(offset: Int): Response =
copy(_reply = this._reply.copy(startingFrom = offset))
}
// For MongoDB 3.2+ response with cursor
private[reactivemongo] final case class WithCursor(
_header: MessageHeader,
_reply: Reply,
_documents: ByteBuf,
_info: ResponseInfo,
ns: String,
private[core]cursor: BSONDocument,
private[core]preloaded: Seq[BSONDocument]) extends Response(
_header, _reply, _documents, _info) {
private[reactivemongo] def cursorID(id: Long): Response =
copy(_reply = this._reply.copy(cursorID = id))
private[reactivemongo] def startingFrom(offset: Int): Response =
copy(_reply = this._reply.copy(startingFrom = offset))
}
private[reactivemongo] final case class CommandError(
_header: MessageHeader,
_reply: Reply,
_info: ResponseInfo,
private[reactivemongo]cause: DatabaseException) extends Response(_header, _reply, Unpooled.EMPTY_BUFFER, _info) {
override lazy val error: Option[DatabaseException] = Some(cause)
private[reactivemongo] def cursorID(id: Long): Response =
copy(_reply = this._reply.copy(cursorID = id))
private[reactivemongo] def startingFrom(offset: Int): Response =
copy(_reply = this._reply.copy(startingFrom = offset))
}
}
| ornicar/ReactiveMongo | core/src/main/scala/core/protocol/Response.scala | Scala | apache-2.0 | 5,016 |
/* Copyright (c) 2010 Richard Searle
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cognitiveentity.xml.combinators
import _root_.org.specs2.mutable._
import scala.xml.PrettyPrinter
import Picklers._
/**
* @author Richard Searle
*/
class NestedTest extends PicklerAsserts{
val pInternal = Internal("tagged",123)
val pContained = Contained("tagged",123)
val inVariantInternal = """<variant xmlns="testing-uri">
<value kind="internal"/>
<internal xmlns="nested-uri">
<tag>tagged</tag>
<value>123</value>
</internal>
</variant>
"""
val inVariantContained = """<variant xmlns="testing-uri">
<value kind="contained"/>
<tag xmlns="contained-uri">tagged</tag>
<value xmlns="contained-uri">123</value>
</variant>
"""
"parseVariantInternal" in {
val result:PicklerResult[Common] = Variant.pickler.unpickle(LinearStore(inVariantInternal))
result match {
case Success(v:Internal, _) =>pInternal must beEqualTo(v)
case f: NoSuccess => failure(f toString)
}
}
"unparseVariantInternal" in {
val xml = Variant.pickler.pickle(pInternal)
inVariantInternal must beEqualTo(normalize(xml.document))
}
"unparseVariantContaned" in {
val xml = Variant.pickler.pickle(pContained)
inVariantContained must beEqualTo(normalize(xml.document))
}
"parseInternal" in {
val in = """<rating xmlns="http://schemas.google.com/g/2005">
<name xmlns="testing-uri">name</name>
<internal xmlns="nested-uri">
<tag>tagged</tag>
<value>123</value>
</internal>
</rating>"""
val result = Nested.pickler(Internal.internalPickler).unpickle(LinearStore(in))
result match {
case Success(v, _) => Nested("name",Internal("tagged",123),Nil) must beEqualTo(v)
case f: NoSuccess => failure(f toString)
}
}
"parseVariantContained" in {
val result = Variant.pickler.unpickle(LinearStore(inVariantContained))
result match {
case Success(v, _) => pContained must beEqualTo(v)
case f: NoSuccess => failure(f toString)
}
}
"parseContained" in {
val in = """<rating xmlns="http://schemas.google.com/g/2005">
<name xmlns="testing-uri">name</name>
<tag xmlns="contained-uri">tagged</tag>
<value xmlns="contained-uri">123</value>
</rating>"""
val result = Nested.pickler(Contained.pickler).unpickle(LinearStore(in))
result match {
case Success(v, _) => Nested("name",Contained("tagged",123),Nil) must beEqualTo(v)
case f: NoSuccess => failure(f toString)
}
}
"parseSingle" in {
val in = """<rating xmlns="http://schemas.google.com/g/2005">
<name xmlns="testing-uri">name</name>
<tag xmlns="contained-uri">tagged</tag>
</rating>"""
val result = Nested.pickler(Single.pickler).unpickle(LinearStore(in))
result match {
case Success(v, _) => Nested("name",Single("tagged"),Nil) must beEqualTo(v)
case f: NoSuccess => failure(f toString)
}
}
"unparseInternal" in {
val r = Nested("name",pInternal,Internal("l1",111)::Nil)
val xml = Nested.pickler(Internal.internalPickler).pickle(r)
"""<rating xmlns="http://schemas.google.com/g/2005">
<name xmlns="testing-uri">name</name>
<internal xmlns="nested-uri">
<tag>tagged</tag>
<value>123</value>
</internal>
<internal xmlns="nested-uri">
<tag>l1</tag>
<value>111</value>
</internal>
</rating>
""" must beEqualTo(normalize(xml.document))
}
"unparseContained" in {
val r = Nested("name",pContained,Contained("l1",111)::Nil)
val xml = Nested.pickler(Contained.pickler).pickle(r)
"""<rating xmlns="http://schemas.google.com/g/2005">
<name xmlns="testing-uri">name</name>
<tag xmlns="contained-uri">tagged</tag>
<value xmlns="contained-uri">123</value>
<tag xmlns="contained-uri">l1</tag>
<value xmlns="contained-uri">111</value>
</rating>
""" must beEqualTo(normalize(xml.document))
}
"unparseSingle" in {
val r = Nested("name",Single("xx"),Single("l1")::Nil)
val xml = Nested.pickler(Single.pickler).pickle(r)
"""<rating xmlns="http://schemas.google.com/g/2005">
<name xmlns="testing-uri">name</name>
<tag xmlns="contained-uri">xx</tag>
<tag xmlns="contained-uri">l1</tag>
</rating>
""" must beEqualTo(normalize(xml.document))
}
} | searler/ScalaXML | src/test/scala/cognitiveentity/xml/combinators/NestedTest.scala | Scala | apache-2.0 | 5,154 |
package objektwerks
object DeltaLakeApp extends App {
import SparkInstance._
import Person.personStructType
batch()
structuredStreaming()
def batch(): Unit = {
val personsPath = "./target/delta/persons"
val personsDataframe = sparkSession.read.json("./data/person/person.json")
personsDataframe.write.format("delta").mode("overwrite").save(personsPath)
val personsDelta = sparkSession.read.format("delta").load(personsPath)
personsDelta.select("*").show
assert( personsDelta.select("*").count == 4 )
}
def structuredStreaming(): Unit = {
val rolesPath = "./target/delta/roles"
sparkSession
.readStream
.schema(personStructType)
.json("./data/person")
.groupBy("role", "name")
.count
.writeStream
.format("delta")
.outputMode("complete")
.option("checkpointLocation", "./target/delta/roles/checkpoints")
.start(rolesPath)
.awaitTermination(30000) // Time-dependent due to slow Delta Lake IO!
sparkSession
.readStream
.format("delta")
.load(rolesPath)
.writeStream
.format("console")
.outputMode("append")
.start
.awaitTermination(30000)
val rolesDelta = sparkSession.read.format("delta").load(rolesPath)
rolesDelta.select("*").show
assert( rolesDelta.select("*").count == 4 )
}
} | objektwerks/spark | src/main/scala/objektwerks/DeltaLakeApp.scala | Scala | apache-2.0 | 1,362 |
package org.crudible.core.binding
import org.crudible.core.model.FormElement
case class Section protected (protected val _label: Option[String], protected val _children: Seq[FormElement]) extends FormElement {
def attach(node: FormElement*) = {
new Section(label, _children ++ node)
}
def label(newLabel: String) = {
new Section(Some(newLabel), _children)
}
def children() = {
_children
}
def label() = {
_label
}
} | rehei/crudible | crudible-core/src/main/scala/org/crudible/core/binding/Section.scala | Scala | apache-2.0 | 460 |
package org.bitcoins.core.serializers.p2p.messages
import org.bitcoins.core.p2p._
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.serializers.{RawBitcoinSerializer, RawSerializerHelper}
import org.bitcoins.crypto.DoubleSha256Digest
import scodec.bits.ByteVector
import scala.annotation.tailrec
/** This trait is responsible for the serialization and deserialization of
* getblocks messages in on the p2p network
* @see https://bitcoin.org/en/developer-reference#getblocks
*/
trait RawGetBlocksMessageSerializer
extends RawBitcoinSerializer[GetBlocksMessage] {
def read(bytes: ByteVector): GetBlocksMessage = {
val version = ProtocolVersion(bytes.take(4))
val hashCount =
CompactSizeUInt.parseCompactSizeUInt(bytes.slice(4, bytes.size))
val blockHeaderStartByte = (hashCount.byteSize + 4).toInt
val blockHeaderBytesStopHash = bytes.slice(blockHeaderStartByte, bytes.size)
val (blockHashHeaders, remainingBytes) =
parseBlockHeaders(blockHeaderBytesStopHash, hashCount)
val stopHash = DoubleSha256Digest(remainingBytes.slice(0, 32))
GetBlocksMessage(version, hashCount, blockHashHeaders, stopHash)
}
def write(getBlocksMessage: GetBlocksMessage): ByteVector = {
getBlocksMessage.protocolVersion.bytes ++
getBlocksMessage.hashCount.bytes ++
RawSerializerHelper.writeNetworkElements(
getBlocksMessage.blockHeaderHashes) ++
getBlocksMessage.stopHash.bytes
}
/** Helper function to parse block headers from a sequence of bytes
* Hashes are 32 bytes
* @param bytes the bytes which need to be parsed into BlockHeader hashes
* @param compactSizeUInt the p2p network object used to indicate how many block header hashes there are
* @return the sequence of hashes and the remaining bytes that need to be parsed
*/
private def parseBlockHeaders(
bytes: ByteVector,
compactSizeUInt: CompactSizeUInt): (
List[DoubleSha256Digest],
ByteVector) = {
@tailrec
def loop(
remainingHeaders: Long,
accum: List[DoubleSha256Digest],
remainingBytes: ByteVector): (List[DoubleSha256Digest], ByteVector) = {
if (remainingHeaders <= 0) (accum.reverse, remainingBytes)
else {
val dsha256 = DoubleSha256Digest(remainingBytes.slice(0, 32))
val rem = remainingBytes.slice(32, remainingBytes.size)
loop(remainingHeaders = remainingHeaders - 1,
accum = dsha256 :: accum,
remainingBytes = rem)
}
}
loop(compactSizeUInt.num.toInt, List.empty, bytes)
}
}
object RawGetBlocksMessageSerializer extends RawGetBlocksMessageSerializer
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/serializers/p2p/messages/RawGetBlocksMessageSerializer.scala | Scala | mit | 2,671 |
package org.questions.strings
import org.specs2.mutable.Specification
/**
* @author maximn
* @since 14-Nov-2015
*/
class WildCardMatchingTest extends Specification {
val matcher = new WildCardMatching
"same string" should {
"be matching" in {
val string = "abc"
matcher.isMatching(string, string) must beTrue
}
}
"different string" should {
"not match" in {
matcher.isMatching("ab", "bc") must beFalse
}
}
"question mark" should {
"allow to skip one char" in {
matcher.isMatching("abc", "a?c") must beTrue
}
}
"star" should {
"replace many chars" in {
matcher.isMatching("abcd", "a*d") must beTrue
}
"replace zero chars" in {
matcher.isMatching("abc", "ab*c") must beTrue
}
"replace one char" in {
matcher.isMatching("abc", "a*c") must beTrue
}
"match if star is in the end" in {
matcher.isMatching("abc", "a*") must beTrue
}
}
}
| maximn/coding-interview-questions | src/test/scala/org/questions/strings/WildCardMatchingTest.scala | Scala | apache-2.0 | 965 |
/*
* Copyright 2014 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.anormdb
import anorm.SqlParser._
import anorm._
import com.twitter.util.{Duration, Future, FuturePool, Time}
import com.twitter.zipkin.Constants
import com.twitter.zipkin.common._
import com.twitter.zipkin.storage.{IndexedTraceId, SpanStore, TraceIdDuration}
import com.twitter.zipkin.util.Util
import java.nio.ByteBuffer
import java.sql.{Connection, PreparedStatement}
// TODO: connection pooling for real parallelism
class AnormSpanStore(
db: SpanStoreDB,
openCon: Option[Connection] = None,
pool: FuturePool = FuturePool.unboundedPool
) extends SpanStore {
// Database connection object
private[this] implicit val conn = openCon match {
case None => db.getConnection()
case Some(con) => con
}
def close(deadline: Time): Future[Unit] = pool {
conn.close()
}
implicit object byteArrayToStatement extends ToStatement[Array[Byte]] {
def set(s: PreparedStatement, i: Int, b: Array[Byte]): Unit = s.setBytes(i, b)
}
private[this] val spanInsertSql = SQL("""
|INSERT INTO zipkin_spans
| (span_id, parent_id, trace_id, span_name, debug, duration, created_ts)
|VALUES
| ({span_id}, {parent_id}, {trace_id}, {span_name}, {debug}, {duration}, {created_ts})
""".stripMargin).asBatch
private[this] val annInsertSql = SQL("""
|INSERT INTO zipkin_annotations
| (span_id, trace_id, span_name, service_name, value, ipv4, port,
| a_timestamp, duration)
|VALUES
| ({span_id}, {trace_id}, {span_name}, {service_name}, {value},
| {ipv4}, {port}, {timestamp}, {duration})
""".stripMargin).asBatch
private[this] val binAnnInsertSql = SQL("""
|INSERT INTO zipkin_binary_annotations
| (span_id, trace_id, span_name, service_name, annotation_key,
| annotation_value, annotation_type_value, ipv4, port)
|VALUES
| ({span_id}, {trace_id}, {span_name}, {service_name}, {key}, {value},
| {annotation_type_value}, {ipv4}, {port})
""".stripMargin).asBatch
// store a list of spans
def apply(spans: Seq[Span]): Future[Unit] = {
var hasSpans = false
var hasAnns = false
var hasBinAnns = false
val init = (spanInsertSql, annInsertSql, binAnnInsertSql)
val (spanBatch, annBatch, binAnnBatch) =
spans.foldLeft(init) { case ((sb, ab, bb), span) =>
hasSpans = true
val sbp = sb.addBatch(
("span_id" -> span.id),
("parent_id" -> span.parentId),
("trace_id" -> span.traceId),
("span_name" -> span.name),
("debug" -> (if (span.debug) 1 else 0)),
("duration" -> span.duration),
("created_ts" -> span.firstAnnotation.map(_.timestamp))
)
if (!shouldIndex(span)) (sbp, ab, bb) else {
val abp = span.annotations.foldLeft(ab) { (ab, a) =>
hasAnns = true
ab.addBatch(
("span_id" -> span.id),
("trace_id" -> span.traceId),
("span_name" -> span.name),
("service_name" -> a.serviceName),
("value" -> a.value),
("ipv4" -> a.host.map(_.ipv4)),
("port" -> a.host.map(_.port)),
("timestamp" -> a.timestamp),
("duration" -> a.duration.map(_.inNanoseconds)))
}
val bbp = span.binaryAnnotations.foldLeft(bb) { (bb, b) =>
hasBinAnns = true
bb.addBatch(
("span_id" -> span.id),
("trace_id" -> span.traceId),
("span_name" -> span.name),
("service_name" -> b.host.map(_.serviceName).getOrElse("unknown")), // from Annotation
("key" -> b.key),
("value" -> Util.getArrayFromBuffer(b.value)),
("annotation_type_value" -> b.annotationType.value),
("ipv4" -> b.host.map(_.ipv4)),
("port" -> b.host.map(_.ipv4)))
}
(sbp, abp, bbp)
}
}
// This parallelism is a lie. There's only one DB connection (for now anyway).
Future.join(Seq(
if (hasSpans) pool { spanBatch.execute() } else Future.Done,
if (hasAnns) pool { annBatch.execute() } else Future.Done,
if (hasBinAnns) pool { binAnnBatch.execute() } else Future.Done
))
}
def setTimeToLive(traceId: String, ttl: Duration): Future[Unit] =
Future.Done
def getTimeToLive(traceId: String): Future[Duration] =
Future.value(Duration.Top)
private[this] def tracesExistSql(ids: Seq[String]) = SQL("""
SELECT trace_id FROM zipkin_spans WHERE trace_id IN (%s)
""".stripMargin.format(ids.map { id => "'"+id+"'"}.mkString(",")))
// using string interpolation
//""".stripMargin.format(ids.map { id => s"'", $id, "'"}.mkString(",")))
//""".format(ids.mkString(",")))
def tracesExist(traceIds: Seq[String]): Future[Set[String]] = pool {
tracesExistSql(traceIds)
.as(str("trace_id") *)
.toSet
}
private[this] def ep(ipv4: Option[Int], port: Option[Int], name: String) =
(ipv4, port) match {
case (Some(ipv4), Some(port)) => Some(Endpoint(ipv4, port.toShort, name))
case _ => None
}
private[this] def spansSql(ids: Seq[String]) = SQL("""
|SELECT span_id, parent_id, trace_id, span_name, debug
|FROM zipkin_spans
|WHERE trace_id IN (%s)
""".stripMargin.format(ids.map { id => "'"+id+"'"}.mkString(",")))
private[this] val spansResults = (
str("span_id") ~
get[Option[String]]("parent_id") ~
str("trace_id") ~
str("span_name") ~
int("debug")
) map { case sId~pId~tId~sn~d =>
Span(tId, sn, sId, pId, List.empty, List.empty, d > 0)
}
private[this] def annsSql(ids: Seq[String]) = SQL("""
|SELECT span_id, trace_id, span_name, service_name, value, ipv4, port, a_timestamp, duration
|FROM zipkin_annotations
|WHERE trace_id IN (%s)
""".stripMargin.format(ids.map { id => "'"+id+"'"}.mkString(",")))
private[this] val annsResults = (
str("span_id") ~
str("trace_id") ~
str("span_name") ~
str("service_name") ~
str("value") ~
get[Option[Int]]("ipv4") ~
get[Option[Int]]("port") ~
long("a_timestamp") ~
get[Option[Long]]("duration")
) map { case sId~tId~spN~svcN~v~ip~p~ts~d =>
(tId, sId) -> Annotation(ts, v, ep(ip, p, svcN), d map Duration.fromNanoseconds)
}
private[this] def binAnnsSql(ids: Seq[String]) = SQL("""
|SELECT span_id, trace_id, span_name, service_name, annotation_key,
| annotation_value, annotation_type_value, ipv4, port
|FROM zipkin_binary_annotations
|WHERE trace_id IN (%s)
""".stripMargin.format(ids.map { id => "'"+id+"'"}.mkString(",")))
private[this] val binAnnsResults = (
str("span_id") ~
str("trace_id") ~
str("span_name") ~
str("service_name") ~
str("annotation_key") ~
db.bytes("annotation_value") ~
int("annotation_type_value") ~
get[Option[Int]]("ipv4") ~
get[Option[Int]]("port")
) map { case sId~tId~spN~svcN~key~annV~annTV~ip~p =>
val annVal = ByteBuffer.wrap(annV)
val annType = AnnotationType.fromInt(annTV)
(tId, sId) -> BinaryAnnotation(key, annVal, annType, ep(ip, p, svcN))
}
// parallel queries here are also a lie (see above).
def getSpansByTraceIds(ids: Seq[String]): Future[Seq[Seq[Span]]] = {
val spans = pool {
val sql = spansSql(ids)
println("SPAN_SQL "+sql)
val res = sql.as(spansResults *)
//println("results " + res)
spansSql(ids).as(spansResults *)
} map { _.distinct.groupBy(_.traceId) }
val anns = pool {
annsSql(ids).as(annsResults *)
} map { _.groupBy(_._1) }
val binAnns = pool {
binAnnsSql(ids).as(binAnnsResults *)
} map { _.groupBy(_._1) }
Future.join(spans, anns, binAnns) map { case (spans, anns, binAnns) =>
ids map { id =>
for (tSpans <- spans.get(id).toSeq; tSpan <- tSpans) yield {
val tsAnns = anns.get((id, tSpan.id)).map(_.map(_._2)).toList.flatten
val tsBinAnns = binAnns.get((id, tSpan.id)).map(_.map(_._2)).toList.flatten
tSpan.copy(annotations = tsAnns, binaryAnnotations = tsBinAnns)
}
} filter { _.nonEmpty }
}
}
def getSpansByTraceId(traceId: String): Future[Seq[Span]] =
getSpansByTraceIds(Seq(traceId)).map(_.head)
private[this] val idsByNameSql = SQL("""
|SELECT trace_id, MAX(a_timestamp)
|FROM zipkin_annotations
|WHERE service_name = {service_name}
| AND (span_name = {span_name} OR {span_name} = '')
| AND a_timestamp < {end_ts}
|GROUP BY trace_id
|ORDER BY a_timestamp DESC
|LIMIT {limit}
""".stripMargin)
private[this] val idsByNameResults = (
str("trace_id") ~
long("MAX(a_timestamp)")
) map { case a~b => IndexedTraceId(a, b) }
def getTraceIdsByName(
serviceName: String,
spanName: Option[String],
endTs: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = pool {
idsByNameSql
.on("service_name" -> serviceName)
.on("span_name" -> spanName.getOrElse(""))
.on("end_ts" -> endTs)
.on("limit" -> limit)
.as(idsByNameResults *)
}
private[this] val byAnnValSql = SQL("""
|SELECT zba.trace_id, s.created_ts
|FROM zipkin_binary_annotations AS zba
|LEFT JOIN zipkin_spans AS s
| ON zba.trace_id = s.trace_id
|WHERE zba.service_name = {service_name}
| AND zba.annotation_key = {annotation}
| AND zba.annotation_value = {value}
| AND s.created_ts < {end_ts}
| AND s.created_ts IS NOT NULL
|GROUP BY zba.trace_id
|ORDER BY s.created_ts DESC
|LIMIT {limit}
""".stripMargin)
private[this] val byAnnValResult = (
str("trace_id") ~
long("created_ts")
) map { case a~b => IndexedTraceId(a, b) }
private[this] val byAnnSql = SQL("""
|SELECT trace_id, MAX(a_timestamp)
|FROM zipkin_annotations
|WHERE service_name = {service_name}
| AND value = {annotation}
| AND a_timestamp < {end_ts}
|GROUP BY trace_id
|ORDER BY a_timestamp DESC
|LIMIT {limit}
""".stripMargin)
private[this] val byAnnResult = (
str("trace_id") ~
long("MAX(a_timestamp)")
) map { case a~b => IndexedTraceId(a, b) }
def getTraceIdsByAnnotation(
serviceName: String,
annotation: String,
value: Option[ByteBuffer],
endTs: Long,
limit: Int
): Future[Seq[IndexedTraceId]] =
if (Constants.CoreAnnotations.contains(annotation))
Future.value(Seq.empty)
else pool {
val sql = value
.map(_ => byAnnValSql)
.getOrElse(byAnnSql)
.on("service_name" -> serviceName)
.on("annotation" -> annotation)
.on("end_ts" -> endTs)
.on("limit" -> limit)
value match {
case Some(bytes) =>
sql.on("value" -> Util.getArrayFromBuffer(bytes)).as(byAnnValResult *)
case None =>
sql.as(byAnnResult *)
}
}
private[this] def byDurationSql(ids: Seq[String]) = SQL("""
|SELECT trace_id, duration, created_ts
|FROM zipkin_spans
|WHERE trace_id IN (%s) AND created_ts IS NOT NULL
|GROUP BY trace_id
""".stripMargin.format(ids.map { id => "'"+id+"'"}.mkString(",")))
//""".stripMargin.format(ids.mkString(",")))
private[this] val byDurationResults = (
str("trace_id") ~
get[Option[Long]]("duration") ~
long("created_ts")
) map { case a~b~c => TraceIdDuration(a, b.getOrElse(0), c) }
def getTracesDuration(traceIds: Seq[String]): Future[Seq[TraceIdDuration]] = pool {
byDurationSql(traceIds)
.as(byDurationResults *)
}
private[this] val svcNamesSql = SQL("""
|SELECT service_name
|FROM zipkin_annotations
|GROUP BY service_name
|ORDER BY service_name ASC
""".stripMargin)
def getAllServiceNames: Future[Set[String]] = pool {
svcNamesSql.as(str("service_name") *).toSet
}
private[this] val spanNamesSql = SQL("""
|SELECT span_name
|FROM zipkin_annotations
|WHERE service_name = {service} AND span_name <> ''
|GROUP BY span_name
|ORDER BY span_name ASC
""".stripMargin)
def getSpanNames(service: String): Future[Set[String]] = pool {
spanNamesSql.on("service" -> service).as(str("span_name") *).toSet
}
}
| cogitate/twitter-zipkin-uuid | zipkin-anormdb/src/main/scala/com/twitter/zipkin/storage/anormdb/AnormSpanStore.scala | Scala | apache-2.0 | 12,783 |
_oprot.{{protocolWriteMethod}}({{name}})
| elipoz/scrooge | scrooge-generator/src/main/resources/scalagen/writeBase.scala | Scala | apache-2.0 | 41 |
package java.util.logging
import java.lang.StringBuilder
abstract class Formatter protected () {
def format(record: LogRecord): String
def getHead(h: Handler): String = ""
def getTail(h: Handler): String = ""
def formatMessage(record: LogRecord): String = {
val msg = record.getMessage
val params = record.getParameters
if (params != null && params.length > 0) {
// The Java spec uses java.text formatting not available in Scala.js
// Instead we'll do simple text replacement, very imperative
var msgAccumulator = new StringBuilder()
var inParam = false
var paramInFlight: StringBuilder = null
var substitutionFailure = false // track failure to break the loop
var i = 0
// Do one run over msg keeping track if a param needs replacement
while (i < msg.length && !substitutionFailure) {
val currentChar = msg.charAt(i)
i = i + 1
if (currentChar == '{' && !inParam) {
// Beginning of param
inParam = true
paramInFlight = new StringBuilder()
} else if (inParam && currentChar != '}') {
// accumulate the param
paramInFlight.append(currentChar)
} else if (currentChar == '}') {
// end of param, replace placeholder by value if possible
inParam = false
val (failed, replacement) = {
try {
val index = paramInFlight.toString().toInt
if (index >= 0 && index < params.length) {
(false, params(index).toString)
} else if (index > 0) {
(false, "{" + index + "}")
} else {
// Negative indexes break substitution on the JVM
(true, "")
}
} catch {
case e: Exception =>
// The JVM will halt replacing if it cannot parse one param
(true, "")
}
}
// The JVM will fail if e.g. there are bogus params and would not replace
// any parameter
if (failed)
substitutionFailure = failed
else
msgAccumulator.append(replacement)
} else {
msgAccumulator.append(currentChar)
}
}
if (substitutionFailure || inParam) msg
else msgAccumulator.toString()
} else {
msg
}
}
}
| scala-js/scala-js-java-logging | src/main/scala/java/util/logging/Formatter.scala | Scala | bsd-3-clause | 2,395 |
package aia.persistence.rest
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.Try
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import akka.stream._
import akka.stream.scaladsl._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import spray.json._
import aia.persistence._
class ShoppersService(val shoppers: ActorRef, val system: ActorSystem, val requestTimeout: Timeout) extends ShoppersRoutes {
val executionContext = system.dispatcher
}
trait ShoppersRoutes extends ShopperMarshalling {
def routes =
deleteItem ~
updateItem ~
getBasket ~
updateBasket ~
deleteBasket ~
pay
def shoppers: ActorRef
implicit def requestTimeout: Timeout
implicit def executionContext: ExecutionContext
def pay = {
post {
pathPrefix("shopper" / ShopperIdSegment / "pay") { shopperId =>
shoppers ! Shopper.PayBasket(shopperId)
complete(OK)
}
}
}
def getBasket = {
get {
pathPrefix("shopper" / ShopperIdSegment / "basket") { shopperId =>
pathEnd {
onSuccess(shoppers.ask(Basket.GetItems(shopperId)).mapTo[Items]) {
case Items(Nil) => complete(NotFound)
case items: Items => complete(items)
}
}
}
}
}
def updateBasket = {
post {
pathPrefix("shopper" / ShopperIdSegment / "basket") { shopperId =>
pathEnd {
entity(as[Items]) { items =>
shoppers ! Basket.Replace(items, shopperId)
complete(OK)
} ~
entity(as[Item]) { item =>
shoppers ! Basket.Add(item, shopperId)
complete(OK)
}
}
}
}
}
def deleteBasket = {
delete {
pathPrefix("shopper" / ShopperIdSegment / "basket") { shopperId =>
pathEnd {
shoppers ! Basket.Clear(shopperId)
complete(OK)
}
}
}
}
def updateItem = {
post {
pathPrefix("shopper" / ShopperIdSegment / "basket" / ProductIdSegment) {
(shopperId, productId) =>
pathEnd {
entity(as[ItemNumber]) { itemNumber =>
val ItemNumber(number) = itemNumber
val updateItem = Basket.UpdateItem(productId, number, shopperId)
onSuccess(shoppers.ask(updateItem)
.mapTo[Option[Basket.ItemUpdated]]) {
case Some(_) => complete(OK)
case None => complete(NotFound)
}
}
}
}
}
}
def deleteItem = {
delete {
pathPrefix("shopper" / ShopperIdSegment / "basket" / ProductIdSegment) {
(shopperId, productId) =>
pathEnd {
val removeItem = Basket.RemoveItem(productId, shopperId)
onSuccess(shoppers.ask(removeItem)
.mapTo[Option[Basket.ItemRemoved]]) {
case Some(_) => complete(OK)
case None => complete(NotFound)
}
}
}
}
}
val ShopperIdSegment = Segment.flatMap(id => Try(id.toLong).toOption)
val ProductIdSegment = Segment.flatMap(id => if(!id.isEmpty) Some(id) else None)
}
| RayRoestenburg/akka-in-action | chapter-persistence/src/main/scala/aia/persistence/rest/ShopperService.scala | Scala | mit | 3,388 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.ast.convert.plannerQuery
import org.neo4j.cypher.internal.compiler.v2_3.helpers.CollectionSupport
import org.neo4j.cypher.internal.compiler.v2_3.planner.logical.plans.IdName
import org.neo4j.cypher.internal.compiler.v2_3.planner.{Selections, PlannerQuery, QueryGraph, QueryHorizon}
import org.neo4j.cypher.internal.frontend.v2_3.helpers.NonEmptyList._
case class PlannerQueryBuilder(private val q: PlannerQuery, returns: Seq[IdName] = Seq.empty)
extends CollectionSupport {
def withReturns(returns: Seq[IdName]): PlannerQueryBuilder = copy(returns = returns)
def updateGraph(f: QueryGraph => QueryGraph): PlannerQueryBuilder =
copy(q = q.updateTailOrSelf(_.updateGraph(f)))
def withHorizon(horizon: QueryHorizon): PlannerQueryBuilder =
copy(q = q.updateTailOrSelf(_.withHorizon(horizon)))
def withTail(newTail: PlannerQuery): PlannerQueryBuilder = {
copy(q = q.updateTailOrSelf(_.withTail(newTail)))
}
def currentlyAvailableIdentifiers: Set[IdName] =
currentQueryGraph.coveredIds
def currentQueryGraph: QueryGraph = {
var current = q
while (current.tail.nonEmpty) {
current = current.tail.get
}
current.graph
}
def build(): PlannerQuery = {
def fixArgumentIdsOnOptionalMatch(plannerQuery: PlannerQuery): PlannerQuery = {
val optionalMatches = plannerQuery.graph.optionalMatches
val (_, newOptionalMatches) = optionalMatches.foldMap(plannerQuery.graph.coveredIds) { case (args, qg) =>
(args ++ qg.allCoveredIds, qg.withArgumentIds(args intersect qg.allCoveredIds))
}
plannerQuery
.updateGraph(_.withOptionalMatches(newOptionalMatches))
.updateTail(fixArgumentIdsOnOptionalMatch)
}
val fixedArgumentIds = q.foldMap {
case (head, tail) =>
val symbols = head.horizon.exposedSymbols(head.graph)
val newTailGraph = tail.graph.withArgumentIds(symbols)
tail.withGraph(newTailGraph)
}
def groupInequalities(plannerQuery: PlannerQuery): PlannerQuery = {
plannerQuery
.updateGraph(_.mapSelections {
case Selections(predicates) =>
val optPredicates = predicates.toNonEmptyListOption
val newPredicates = optPredicates.map { predicates =>
groupInequalityPredicates(predicates).toList.toSet
}.getOrElse(predicates)
Selections(newPredicates)
})
.updateTail(groupInequalities)
}
val withFixedArgumentIds = fixArgumentIdsOnOptionalMatch(fixedArgumentIds)
val withGroupedInequalities = groupInequalities(withFixedArgumentIds)
withGroupedInequalities
}
}
object PlannerQueryBuilder {
val empty = new PlannerQueryBuilder(PlannerQuery.empty)
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/ast/convert/plannerQuery/PlannerQueryBuilder.scala | Scala | apache-2.0 | 3,557 |
import sbt._
import Keys._
import android.Keys._
object General {
val settings = Defaults.defaultSettings ++ Seq (
name := "webSilvia",
version := "0.1",
scalaVersion := "2.10.4",
resolvers ++= Seq(
"sonatype-s" at "http://oss.sonatype.org/content/repositories/snapshots",
"audiobox" at "http://audiobox.keytwo.net"
),
libraryDependencies ++= Seq(
"org.scalaz" %% "scalaz-core" % "7.1.0-RC1",
"org.scalaz" %% "scalaz-effect" % "7.1.0-RC1",
"org.scalaz" %% "scalaz-concurrent" % "7.1.0-RC1",
"com.google.zxing" % "android-integration" % "3.1.0",
"io.socket" % "socket.io-client" % "0.2.1"
),
scalacOptions := Seq(
"-encoding", "utf8",
"-target:jvm-1.6",
"-feature",
"-unchecked",
"-deprecation",
"-optimise",
"-Ywarn-value-discard"
),
javacOptions ++= Seq(
"-encoding", "utf8",
"-source", "1.6",
"-target", "1.6"
)
)
val proguardSettings = Seq (
useProguard in Android := true,
proguardOptions in Android += "-keep class scala.Function1",
proguardOptions in Android += "-keep class scala.PartialFunction",
proguardOptions in Android += "-keep class scala.util.parsing.combinator.Parsers",
proguardOptions in Android += "-dontwarn javax.swing.SwingWorker",
proguardOptions in Android += "-dontwarn javax.swing.SwingUtilities",
proguardCache in Android += ProguardCache("scalaz") % "org.scalaz"
)
lazy val fullAndroidSettings =
General.settings ++
android.Plugin.androidBuild ++
proguardSettings
}
object AndroidBuild extends Build {
lazy val main = Project (
"webSilvia",
file("."),
settings = General.fullAndroidSettings ++ Seq(
platformTarget in Android := "android-15"
)
)
lazy val tests = Project (
"tests",
file("tests"),
settings = General.settings
) dependsOn main
}
| relrod/webSilvia-android | project/Build.scala | Scala | bsd-3-clause | 1,948 |
package uk.gov.dvla.vehicles.retain.gatling
import io.gatling.core.Predef._
import io.gatling.core.feeder._
object Scenarios {
// Happy paths
def assetsAreAccessible = {
val data = RecordSeqFeederBuilder[String](records = Array[Record[String]]())
val chain = new Chains(data)
scenario("Assets Are accessible")
.exec(
chain.assetsAreAccessible
)
}
def registeredKeeperAndFullKeeperAddress = {
val data = csv("data/happy/RegisteredKeeperAndFullKeeperAddress.csv").circular
val chain = new Chains(data)
endToEnd(
scenarioName = "Single retention from start to finish with Registered Keeper And Full Keeper Address",
chain
)
}
def registeredKeeperAndPartialKeeperAddress = {
val data = csv("data/happy/RegisteredKeeperAndPartialKeeperAddress.csv").circular
val chain = new Chains(data)
endToEnd(
scenarioName = "Single retention from start to finish with Registered Keeper And Partial Keeper Address",
chain
)
}
def registeredKeeperAndMakeNoModel = {
val data = csv("data/happy/RegisteredKeeperAndMakeNoModel.csv").circular
val chain = new Chains(data)
endToEnd(
scenarioName = "Single retention from start to finish with Registered Keeper And make no model",
chain
)
}
def registeredKeeperAndModelNoMake = {
val data = csv("data/happy/RegisteredKeeperAndModelNoMake.csv").circular
val chain = new Chains(data)
endToEnd(
scenarioName = "Single retention from start to finish with Registered Keeper And model no make",
chain
)
}
def registeredKeeperVeryLongMakeAndModel = {
val data = csv("data/happy/RegisteredKeeperVeryLongMakeAndModel.csv").circular
val chain = new Chains(data)
endToEnd(
scenarioName = "Single retention from start to finish with Registered Keeper with very long make and model",
chain
)
}
def notRegisteredKeeperAndFullKeeperAddress = {
val data = csv("data/happy/NotRegisteredKeeperAndFullKeeperAddress.csv").circular
val chain = new Chains(data)
scenario("Single retention from start to finish with not Registered Keeper And Full Keeper Address")
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.beforeYouStartToVehicleLookup,
chain.vehicleLookupToSetupBusinessDetails,
chain.setupBusinessDetailsToBusinessChooseYourAddress,
chain.businessChooseYourAddressToConfirmBusiness,
chain.confirmBusinessToIframePayment,
chain.paymentCallbackToRetainToSuccess
)
)
}
// Sad paths
def vrmNotFound = {
val data = csv("data/sad/VrmNotFound.csv").circular
val chain = new Chains(data)
scenario("Vrm not found")
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.beforeYouStartToVehicleLookup,
chain.vehicleLookupToVehicleLookupFailure
)
)
}
def eligibilityCheckDirectToPaper = {
val data = csv("data/sad/EligibilityCheckDirectToPaper.csv").circular
val chain = new Chains(data)
scenario("Eligibility Check - Direct to Paper")
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.beforeYouStartToVehicleLookup,
chain.vehicleLookupToDirectToPaper
)
)
}
def notEligibleToTransact = {
val data = csv("data/sad/NotEligibleToTransact.csv").circular
val chain = new Chains(data)
scenario("Eligibility Check - Not Eligible To Transact")
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.beforeYouStartToVehicleLookup,
chain.vehicleLookupToNotEligibleToTransact
)
)
}
private def endToEnd(scenarioName: String, chain: Chains) =
scenario(scenarioName)
.exitBlockOnFail(
exec(
chain.beforeYouStart,
chain.beforeYouStartToVehicleLookup,
chain.vehicleLookupToConfirm//,
// chain.confirmToIframePayment,
// chain.paymentCallbackToRetainToSuccess
)
)
}
| dvla/vrm-retention-online | gatling-tests/src/test/scala/uk/gov/dvla/vehicles/retain/gatling/Scenarios.scala | Scala | mit | 4,080 |
/*
* Seldon -- open source prediction engine
* =======================================
* Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/)
*
**********************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************************
*/
package io.seldon.spark.graph
import org.apache.log4j.Level
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.graphx.Edge
import org.apache.spark.graphx.Graph
import org.apache.spark.graphx.lib.TriangleCountEdge
import org.apache.spark.graphx.VertexRDD
import org.apache.commons.lang.StringEscapeUtils
import scala.collection.mutable.ListBuffer
import org.apache.spark.graphx.GraphLoader
case class Config(
local : Boolean = false,
inputPath : String = "",
outputPath : String = "",
awsKey : String = "",
awsSecret : String = "",
minNumTrianglesEdge : Int = 1)
case class SemanticAttrs(name : Int,triangles : Map[Long,Int],numTriangles : Int,degrees : Int,clusterCoef : Float)
class SemanticCohesion(private val sc : SparkContext,config : Config) {
def run()
{
// create graph from vertices and edges
val graph = GraphLoader.edgeListFile(sc,config.inputPath)
// do traingle counting on graph removing traingles with low support
val triCounts = TriangleCountEdge.run(graph,config.minNumTrianglesEdge).vertices.filter(_._2._1.size > 0)
// join to original graph the triangle result
val graph2 = graph.outerJoinVertices(triCounts){(id,oldAttr,tri) =>
tri match {
case Some((mapOfTriangles,numTriangles)) => (oldAttr,mapOfTriangles,numTriangles)
case None => (oldAttr,Map(id->0),0)
}
}
val degrees: VertexRDD[Int] = graph.degrees
// calc cluster coefficient
val graph3 = graph2.outerJoinVertices(degrees){(id,oldAttr,degOpt) =>
val (name,mapOfTriangles,numOfTriangles) = oldAttr
degOpt match {
case Some(deg) => if (deg>1){SemanticAttrs(name,mapOfTriangles,numOfTriangles,deg,2*numOfTriangles/(deg*(deg-1)).floatValue())}else{SemanticAttrs(name,mapOfTriangles,numOfTriangles,0,0)}
case None => SemanticAttrs(name,mapOfTriangles,numOfTriangles,0,0)
}
}
val semEdges = graph3.vertices.flatMap{v =>
val map = v._2.triangles
val buf = new ListBuffer[Edge[Float]]()
for ((vid,triangles) <- map)
{
if (v._2.numTriangles > 0)
{
//buf.append(Edge(v._1,vid,triangles.toFloat/(v._2.numTriangles.toFloat * 2.0F) * v._2.clusterCoef))
buf.append(Edge(v._1,vid,triangles))
}
}
buf
}
val semVertices = graph3.mapVertices((vid,vd) => (vd.name,vd.numTriangles)).vertices.filter(_._2._2 > 0);
val graph4 = Graph(semVertices,semEdges)
graph4.edges.saveAsTextFile(config.outputPath+"/edges")
graph4.vertices.saveAsTextFile(config.outputPath+"/vertices")
//graph3.vertices.saveAsTextFile(config.outputPath+"/graph")
}
}
object SemanticCohesion
{
def main(args: Array[String])
{
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
val parser = new scopt.OptionParser[Config]("ClusterUsersByDimension") {
head("CrateVWTopicTraining", "1.x")
opt[Unit]('l', "local") action { (_, c) => c.copy(local = true) } text("debug mode - use local Master")
opt[String]('i', "input-path") required() valueName("path url") action { (x, c) => c.copy(inputPath = x) } text("path prefix for input")
opt[String]('o', "output-path") required() valueName("path url") action { (x, c) => c.copy(outputPath = x) } text("path prefix for output")
opt[String]('a', "awskey") required() valueName("aws access key") action { (x, c) => c.copy(awsKey = x) } text("aws key")
opt[String]('s', "awssecret") required() valueName("aws secret") action { (x, c) => c.copy(awsSecret = x) } text("aws secret")
opt[Int]('m', "min-triangles") required() valueName("min number triangles") action { (x, c) => c.copy(minNumTrianglesEdge = x) } text("min triangles")
}
parser.parse(args, Config()) map { config =>
val conf = new SparkConf()
.setAppName("CreateVWTopicTraining")
if (config.local)
conf.setMaster("local")
.set("spark.executor.memory", "8g")
val sc = new SparkContext(conf)
try
{
sc.hadoopConfiguration.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem")
sc.hadoopConfiguration.set("fs.s3n.awsAccessKeyId", config.awsKey)
sc.hadoopConfiguration.set("fs.s3n.awsSecretAccessKey", config.awsSecret)
val cByd = new SemanticCohesion(sc,config)
cByd.run()
}
finally
{
println("Shutting down job")
sc.stop()
}
} getOrElse
{
}
// set up environment
}
} | smrjan/seldon-server | offline-jobs/spark/src/main/scala/io/seldon/spark/graph/SemanticCohesion.scala | Scala | apache-2.0 | 5,594 |
package worker
import java.util.UUID
import scala.concurrent.forkjoin.ThreadLocalRandom
import scala.concurrent.duration._
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
object WorkProducer {
case object Tick
}
class WorkProducer(frontend: ActorRef) extends Actor with ActorLogging {
import WorkProducer._
import context.dispatcher
def scheduler = context.system.scheduler
def rnd = ThreadLocalRandom.current
def nextWorkId(): String = UUID.randomUUID().toString
var n = 0
override def preStart(): Unit =
scheduler.scheduleOnce(5.seconds, self, Tick)
// override postRestart so we don't call preStart and schedule a new Tick
override def postRestart(reason: Throwable): Unit = ()
def receive = {
case Tick =>
n += 1
log.info("Produced work: {}", n)
val work = Work(nextWorkId(), n)
frontend ! work
context.become(waitAccepted(work), discardOld = false)
}
def waitAccepted(work: Work): Actor.Receive = {
case Frontend.Ok =>
context.unbecome()
scheduler.scheduleOnce(rnd.nextInt(3, 10).seconds, self, Tick)
case Frontend.NotOk =>
log.info("Work not accepted, retry after a while")
scheduler.scheduleOnce(3.seconds, frontend, work)
}
} | typesafehub/activator-akka-distributed-workers | src/main/scala/worker/WorkProducer.scala | Scala | cc0-1.0 | 1,275 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import org.antlr.v4.runtime._
import org.antlr.v4.runtime.atn.PredictionMode
import org.antlr.v4.runtime.misc.{Interval, ParseCancellationException}
import org.antlr.v4.runtime.tree.TerminalNodeImpl
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.trees.Origin
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{DataType, StructType}
/**
* Base SQL parsing infrastructure.
*/
abstract class AbstractSqlParser extends ParserInterface with Logging {
/** Creates/Resolves DataType for a given SQL string. */
override def parseDataType(sqlText: String): DataType = parse(sqlText) { parser =>
astBuilder.visitSingleDataType(parser.singleDataType())
}
/** Creates Expression for a given SQL string. */
override def parseExpression(sqlText: String): Expression = parse(sqlText) { parser =>
astBuilder.visitSingleExpression(parser.singleExpression())
}
/** Creates TableIdentifier for a given SQL string. */
override def parseTableIdentifier(sqlText: String): TableIdentifier = parse(sqlText) { parser =>
astBuilder.visitSingleTableIdentifier(parser.singleTableIdentifier())
}
/** Creates FunctionIdentifier for a given SQL string. */
override def parseFunctionIdentifier(sqlText: String): FunctionIdentifier = {
parse(sqlText) { parser =>
astBuilder.visitSingleFunctionIdentifier(parser.singleFunctionIdentifier())
}
}
/** Creates a multi-part identifier for a given SQL string */
override def parseMultipartIdentifier(sqlText: String): Seq[String] = {
parse(sqlText) { parser =>
astBuilder.visitSingleMultipartIdentifier(parser.singleMultipartIdentifier())
}
}
/**
* Creates StructType for a given SQL string, which is a comma separated list of field
* definitions which will preserve the correct Hive metadata.
*/
override def parseTableSchema(sqlText: String): StructType = parse(sqlText) { parser =>
astBuilder.visitSingleTableSchema(parser.singleTableSchema())
}
/** Creates LogicalPlan for a given SQL string. */
override def parsePlan(sqlText: String): LogicalPlan = parse(sqlText) { parser =>
astBuilder.visitSingleStatement(parser.singleStatement()) match {
case plan: LogicalPlan => plan
case _ =>
val position = Origin(None, None)
throw new ParseException(Option(sqlText), "Unsupported SQL statement", position, position)
}
}
/** Get the builder (visitor) which converts a ParseTree into an AST. */
protected def astBuilder: AstBuilder
protected def parse[T](command: String)(toResult: SqlBaseParser => T): T = {
logDebug(s"Parsing command: $command")
val lexer = new SqlBaseLexer(new UpperCaseCharStream(CharStreams.fromString(command)))
lexer.removeErrorListeners()
lexer.addErrorListener(ParseErrorListener)
lexer.legacy_setops_precedence_enbled = SQLConf.get.setOpsPrecedenceEnforced
lexer.ansi = SQLConf.get.ansiEnabled
val tokenStream = new CommonTokenStream(lexer)
val parser = new SqlBaseParser(tokenStream)
parser.addParseListener(PostProcessor)
parser.removeErrorListeners()
parser.addErrorListener(ParseErrorListener)
parser.legacy_setops_precedence_enbled = SQLConf.get.setOpsPrecedenceEnforced
parser.ansi = SQLConf.get.ansiEnabled
try {
try {
// first, try parsing with potentially faster SLL mode
parser.getInterpreter.setPredictionMode(PredictionMode.SLL)
toResult(parser)
}
catch {
case e: ParseCancellationException =>
// if we fail, parse with LL mode
tokenStream.seek(0) // rewind input stream
parser.reset()
// Try Again.
parser.getInterpreter.setPredictionMode(PredictionMode.LL)
toResult(parser)
}
}
catch {
case e: ParseException if e.command.isDefined =>
throw e
case e: ParseException =>
throw e.withCommand(command)
case e: AnalysisException =>
val position = Origin(e.line, e.startPosition)
throw new ParseException(Option(command), e.message, position, position)
}
}
}
/**
* Concrete SQL parser for Catalyst-only SQL statements.
*/
class CatalystSqlParser(conf: SQLConf) extends AbstractSqlParser {
val astBuilder = new AstBuilder(conf)
}
/** For test-only. */
object CatalystSqlParser extends AbstractSqlParser {
val astBuilder = new AstBuilder(SQLConf.get)
}
/**
* This string stream provides the lexer with upper case characters only. This greatly simplifies
* lexing the stream, while we can maintain the original command.
*
* This is based on Hive's org.apache.hadoop.hive.ql.parse.ParseDriver.ANTLRNoCaseStringStream
*
* The comment below (taken from the original class) describes the rationale for doing this:
*
* This class provides and implementation for a case insensitive token checker for the lexical
* analysis part of antlr. By converting the token stream into upper case at the time when lexical
* rules are checked, this class ensures that the lexical rules need to just match the token with
* upper case letters as opposed to combination of upper case and lower case characters. This is
* purely used for matching lexical rules. The actual token text is stored in the same way as the
* user input without actually converting it into an upper case. The token values are generated by
* the consume() function of the super class ANTLRStringStream. The LA() function is the lookahead
* function and is purely used for matching lexical rules. This also means that the grammar will
* only accept capitalized tokens in case it is run from other tools like antlrworks which do not
* have the UpperCaseCharStream implementation.
*/
private[parser] class UpperCaseCharStream(wrapped: CodePointCharStream) extends CharStream {
override def consume(): Unit = wrapped.consume
override def getSourceName(): String = wrapped.getSourceName
override def index(): Int = wrapped.index
override def mark(): Int = wrapped.mark
override def release(marker: Int): Unit = wrapped.release(marker)
override def seek(where: Int): Unit = wrapped.seek(where)
override def size(): Int = wrapped.size
override def getText(interval: Interval): String = {
// ANTLR 4.7's CodePointCharStream implementations have bugs when
// getText() is called with an empty stream, or intervals where
// the start > end. See
// https://github.com/antlr/antlr4/commit/ac9f7530 for one fix
// that is not yet in a released ANTLR artifact.
if (size() > 0 && (interval.b - interval.a >= 0)) {
wrapped.getText(interval)
} else {
""
}
}
override def LA(i: Int): Int = {
val la = wrapped.LA(i)
if (la == 0 || la == IntStream.EOF) la
else Character.toUpperCase(la)
}
}
/**
* The ParseErrorListener converts parse errors into AnalysisExceptions.
*/
case object ParseErrorListener extends BaseErrorListener {
override def syntaxError(
recognizer: Recognizer[_, _],
offendingSymbol: scala.Any,
line: Int,
charPositionInLine: Int,
msg: String,
e: RecognitionException): Unit = {
val (start, stop) = offendingSymbol match {
case token: CommonToken =>
val start = Origin(Some(line), Some(token.getCharPositionInLine))
val length = token.getStopIndex - token.getStartIndex + 1
val stop = Origin(Some(line), Some(token.getCharPositionInLine + length))
(start, stop)
case _ =>
val start = Origin(Some(line), Some(charPositionInLine))
(start, start)
}
throw new ParseException(None, msg, start, stop)
}
}
/**
* A [[ParseException]] is an [[AnalysisException]] that is thrown during the parse process. It
* contains fields and an extended error message that make reporting and diagnosing errors easier.
*/
class ParseException(
val command: Option[String],
message: String,
val start: Origin,
val stop: Origin) extends AnalysisException(message, start.line, start.startPosition) {
def this(message: String, ctx: ParserRuleContext) = {
this(Option(ParserUtils.command(ctx)),
message,
ParserUtils.position(ctx.getStart),
ParserUtils.position(ctx.getStop))
}
override def getMessage: String = {
val builder = new StringBuilder
builder ++= "\\n" ++= message
start match {
case Origin(Some(l), Some(p)) =>
builder ++= s"(line $l, pos $p)\\n"
command.foreach { cmd =>
val (above, below) = cmd.split("\\n").splitAt(l)
builder ++= "\\n== SQL ==\\n"
above.foreach(builder ++= _ += '\\n')
builder ++= (0 until p).map(_ => "-").mkString("") ++= "^^^\\n"
below.foreach(builder ++= _ += '\\n')
}
case _ =>
command.foreach { cmd =>
builder ++= "\\n== SQL ==\\n" ++= cmd
}
}
builder.toString
}
def withCommand(cmd: String): ParseException = {
new ParseException(Option(cmd), message, start, stop)
}
}
/**
* The post-processor validates & cleans-up the parse tree during the parse process.
*/
case object PostProcessor extends SqlBaseBaseListener {
/** Throws error message when exiting a explicitly captured wrong identifier rule */
override def exitErrorIdent(ctx: SqlBaseParser.ErrorIdentContext): Unit = {
val ident = ctx.getParent.getText
throw new ParseException(s"Possibly unquoted identifier $ident detected. " +
s"Please consider quoting it with back-quotes as `$ident`", ctx)
}
/** Remove the back ticks from an Identifier. */
override def exitQuotedIdentifier(ctx: SqlBaseParser.QuotedIdentifierContext): Unit = {
replaceTokenByIdentifier(ctx, 1) { token =>
// Remove the double back ticks in the string.
token.setText(token.getText.replace("``", "`"))
token
}
}
/** Treat non-reserved keywords as Identifiers. */
override def exitNonReserved(ctx: SqlBaseParser.NonReservedContext): Unit = {
replaceTokenByIdentifier(ctx, 0)(identity)
}
private def replaceTokenByIdentifier(
ctx: ParserRuleContext,
stripMargins: Int)(
f: CommonToken => CommonToken = identity): Unit = {
val parent = ctx.getParent
parent.removeLastChild()
val token = ctx.getChild(0).getPayload.asInstanceOf[Token]
val newToken = new CommonToken(
new org.antlr.v4.runtime.misc.Pair(token.getTokenSource, token.getInputStream),
SqlBaseParser.IDENTIFIER,
token.getChannel,
token.getStartIndex + stripMargins,
token.getStopIndex - stripMargins)
parent.addChild(new TerminalNodeImpl(f(newToken)))
}
}
| bdrillard/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParseDriver.scala | Scala | apache-2.0 | 11,729 |
package com.mesosphere.cosmos
import org.scalatest.FreeSpec
import org.scalatest.mock.MockitoSugar
import org.scalatest.prop.TableDrivenPropertyChecks
abstract class UnitSpec extends FreeSpec with TableDrivenPropertyChecks with MockitoSugar
| movicha/cosmos | cosmos-server/src/test/scala/com/mesosphere/cosmos/UnitSpec.scala | Scala | apache-2.0 | 243 |
package jp.pigumer.sbt.cloud.aws.ecr
import jp.pigumer.sbt.cloud.aws.cloudformation.AwscfSettings
import com.amazonaws.services.ecr.{AmazonECR, AmazonECRClientBuilder}
trait Ecr {
lazy val ecr: (AwscfSettings) => AmazonECR = settings ⇒
AmazonECRClientBuilder.standard.withCredentials(settings.credentialsProvider).withRegion(settings.region).build
}
| PigumerGroup/sbt-aws-cloudformation | src/main/scala/jp/pigumer/sbt/cloud/aws/ecr/Ecr.scala | Scala | mit | 362 |
package com.arcusys.learn.models.response.users
import com.arcusys.learn.models.response.certificates.{ CertificateResponseContract }
case class UserWithCertificatesResponse(id: Long,
name: String,
picture: String = "",
pageUrl: String = "",
certificates: Seq[CertificateResponseContract])
| ViLPy/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/models/response/users/UserWithCertificatesResponse.scala | Scala | lgpl-3.0 | 300 |
package com.twitter.zipkin.collector.processor
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
import com.twitter.zipkin.gen
import com.twitter.zipkin.common.Span
import com.twitter.ostrich.stats.Stats
import com.twitter.util.Future
/**
* Adds server side duration data to ostrich, which
* in turn can be sent to a monitoring system where it can be queried.
*/
class OstrichProcessor(serviceStatsPrefix: String) extends Processor {
def processSpan(span: Span): Future[Unit] = {
for {
start <- span.getAnnotation(gen.Constants.SERVER_RECV)
end <- span.getAnnotation(gen.Constants.SERVER_SEND)
} {
span.serviceNames.foreach(serviceName => {
Stats.addMetric(serviceStatsPrefix + serviceName, (end - start).toInt)
Stats.addMetric(serviceStatsPrefix + serviceName + "." + span.name, (end - start).toInt)
})
}
Future.Unit
}
def shutdown() {}
} | lanrion/zipkin | zipkin-server/src/main/scala/com/twitter/zipkin/collector/processor/OstrichProcessor.scala | Scala | apache-2.0 | 1,466 |
/*
* Copyright (C) 2015 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.hook.file
import org.openmole.core.highlight.HighLight
import org.openmole.core.pluginmanager._
import org.openmole.core.pluginregistry.{ PluginInfo, PluginRegistry }
import org.openmole.core.workflow.hook.CSVHook
import org.osgi.framework.{ BundleActivator, BundleContext }
class Activator extends BundleActivator {
override def stop(context: BundleContext): Unit =
PluginRegistry.unregister(this)
override def start(context: BundleContext): Unit = {
import org.openmole.core.highlight.HighLight._
val keyWords: Vector[HighLight] =
Vector(
HookHighLight(CSVHook.getClass),
HookHighLight(AppendToFileHook.getClass),
HookHighLight(classOf[CopyFileHook]),
HookHighLight(classOf[SaveHook]),
HookHighLight(classOf[MatrixHook])
)
PluginRegistry.register(
this,
nameSpaces = Vector(this.getClass.getPackage),
nameSpaceTraits = Vector(classOf[FilePackage]),
highLight = keyWords)
}
}
| openmole/openmole | openmole/plugins/org.openmole.plugin.hook.file/src/main/scala/org/openmole/plugin/hook/file/Activator.scala | Scala | agpl-3.0 | 1,725 |
/*
* Copyright (C) 2011 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo
package picture
import core.Picture
import java.awt.Color
import java.awt.Paint
import java.awt.geom.AffineTransform
import net.kogics.kojo.kgeom.PolyLine
import util.Utils
import kgeom.PolyLine
trait Transformer extends Picture with CorePicOps2 {
val tpic: Picture
def bounds = tpic.bounds
def dumpInfo() = tpic.dumpInfo()
def rotate(angle: Double) = tpic.rotate(angle)
def rotateAboutPoint(angle: Double, x: Double, y: Double) = tpic.rotateAboutPoint(angle, x, y)
def scale(factor: Double) = tpic.scale(factor)
def scale(x: Double, y: Double) = tpic.scale(x, y)
def opacityMod(f: Double) = tpic.opacityMod(f)
def hueMod(f: Double) = tpic.hueMod(f)
def satMod(f: Double) = tpic.satMod(f)
def britMod(f: Double) = tpic.britMod(f)
def translate(x: Double, y: Double) = tpic.translate(x, y)
def offset(x: Double, y: Double) = tpic.offset(x, y)
def flipX() = tpic.flipX()
def flipY() = tpic.flipY()
def transformBy(trans: AffineTransform) = tpic.transformBy(trans)
def decorateWith(painter: Painter) = tpic.decorateWith(painter)
def tnode = tpic.tnode
def axesOn() = tpic.axesOn()
def axesOff() = tpic.axesOff()
def toggleV() = tpic.toggleV()
def position = tpic.position
def setPosition(x: Double, y: Double) = tpic.setPosition(x, y)
def heading = tpic.heading
def setHeading(angle: Double) = tpic.setHeading(angle)
def setPenColor(color: Color) = tpic.setPenColor(color)
def setPenThickness(th: Double) = tpic.setPenThickness(th)
def setFillColor(color: Paint) = tpic.setFillColor(color)
def morph(fn: Seq[PolyLine] => Seq[PolyLine]) = tpic.morph(fn)
def foreachPolyLine(fn: PolyLine => Unit) = tpic.foreachPolyLine(fn)
def intersects(other: Picture) = {
if (this == other) {
false
}
else {
tpic.intersects(other)
}
}
def intersection(other: Picture) = {
if (this == other) {
Impl.Gf.createGeometryCollection(null)
}
else {
tpic.intersection(other)
}
}
def distanceTo(other: Picture) = tpic.distanceTo(other)
def area = tpic.area
def perimeter = tpic.perimeter
def picGeom = tpic.picGeom
def visible() = tpic.visible()
def invisible() = tpic.invisible()
def isDrawn() = tpic.isDrawn()
def myCanvas = tpic.myCanvas
def erase() = tpic.erase()
}
abstract class Transform(pic: Picture) extends Transformer {
val tpic = pic
}
case class Rot(angle: Double)(pic: Picture) extends Transform(pic) {
def draw() {
pic.rotate(angle)
pic.draw()
}
def copy = Rot(angle)(pic.copy)
}
case class Rotp(angle: Double, x: Double, y: Double)(pic: Picture) extends Transform(pic) {
def draw() {
pic.rotateAboutPoint(angle, x, y)
pic.draw()
}
def copy = Rotp(angle, x, y)(pic.copy)
}
case class Scale(factor: Double)(pic: Picture) extends Transform(pic) {
def draw() {
pic.scale(factor)
pic.draw()
}
def copy = Scale(factor)(pic.copy)
}
case class ScaleXY(x: Double, y: Double)(pic: Picture) extends Transform(pic) {
def draw() {
pic.scale(x, y)
pic.draw()
}
def copy = ScaleXY(x, y)(pic.copy)
}
case class Trans(x: Double, y: Double)(pic: Picture) extends Transform(pic) {
def draw() {
pic.translate(x, y)
pic.draw()
}
def copy = Trans(x, y)(pic.copy)
}
case class Offset(x: Double, y: Double)(pic: Picture) extends Transform(pic) {
def draw() {
pic.offset(x, y)
pic.draw()
}
def copy = Trans(x, y)(pic.copy)
}
case class FlipY(pic: Picture) extends Transform(pic) {
def draw() {
pic.flipY()
pic.draw()
}
def copy = FlipY(pic.copy)
}
case class FlipX(pic: Picture) extends Transform(pic) {
def draw() {
pic.flipX()
pic.draw()
}
def copy = FlipX(pic.copy)
}
case class AxesOn(pic: Picture) extends Transform(pic) {
def draw() {
pic.draw()
pic.axesOn()
}
def copy = AxesOn(pic.copy)
}
case class Opac(f: Double)(pic: Picture) extends Transform(pic) {
def draw() {
pic.opacityMod(f)
pic.draw()
}
def copy = Opac(f)(pic.copy)
}
case class Hue(f: Double)(pic: Picture) extends Transform(pic) {
Utils.checkHsbModFactor(f)
def draw() {
pic.draw()
pic.hueMod(f)
}
def copy = Hue(f)(pic.copy)
}
case class Sat(f: Double)(pic: Picture) extends Transform(pic) {
Utils.checkHsbModFactor(f)
def draw() {
pic.draw()
pic.satMod(f)
}
def copy = Sat(f)(pic.copy)
}
case class Brit(f: Double)(pic: Picture) extends Transform(pic) {
Utils.checkHsbModFactor(f)
def draw() {
pic.draw()
pic.britMod(f)
}
def copy = Brit(f)(pic.copy)
}
object Deco {
def apply(pic: Picture)(painter: Painter): Deco = Deco(pic)(painter)
}
class Deco(pic: Picture)(painter: Painter) extends Transform(pic) {
def draw() {
pic.decorateWith(painter)
pic.draw()
}
def copy = Deco(pic.copy)(painter)
}
import java.awt.Color
import java.awt.Paint
case class Fill(color: Paint)(pic: Picture) extends Deco(pic)({ t =>
t.setFillColor(color)
}) {
override def copy = Fill(color)(pic.copy)
}
case class Stroke(color: Color)(pic: Picture) extends Deco(pic)({ t =>
t.setPenColor(color)
}) {
override def copy = Stroke(color)(pic.copy)
}
case class StrokeWidth(w: Double)(pic: Picture) extends Deco(pic)({ t =>
t.setPenThickness(w)
}) {
override def copy = StrokeWidth(w)(pic.copy)
}
abstract class ComposableTransformer extends Function1[Picture,Picture] {outer =>
def apply(p: Picture): Picture
def ->(p: Picture) = apply(p)
def * (other: ComposableTransformer) = new ComposableTransformer {
def apply(p: Picture): Picture = {
outer.apply(other.apply(p))
}
}
}
case class Rotc(angle: Double) extends ComposableTransformer {
def apply(p: Picture) = Rot(angle)(p)
}
case class Rotpc(angle: Double, x: Double, y: Double) extends ComposableTransformer {
def apply(p: Picture) = Rotp(angle, x, y)(p)
}
case class Scalec(factor: Double) extends ComposableTransformer {
def apply(p: Picture) = Scale(factor)(p)
}
case class ScaleXYc(x: Double, y: Double) extends ComposableTransformer {
def apply(p: Picture) = ScaleXY(x, y)(p)
}
case class Opacc(f: Double) extends ComposableTransformer {
def apply(p: Picture) = Opac(f)(p)
}
case class Huec(f: Double) extends ComposableTransformer {
def apply(p: Picture) = Hue(f)(p)
}
case class Satc(f: Double) extends ComposableTransformer {
def apply(p: Picture) = Sat(f)(p)
}
case class Britc(f: Double) extends ComposableTransformer {
def apply(p: Picture) = Brit(f)(p)
}
case class Transc(x: Double, y: Double) extends ComposableTransformer {
def apply(p: Picture) = Trans(x, y)(p)
}
case class Offsetc(x: Double, y: Double) extends ComposableTransformer {
def apply(p: Picture) = Offset(x, y)(p)
}
case object FlipYc extends ComposableTransformer {
def apply(p: Picture) = FlipY(p)
}
case object FlipXc extends ComposableTransformer {
def apply(p: Picture) = FlipX(p)
}
case object AxesOnc extends ComposableTransformer {
def apply(p: Picture) = AxesOn(p)
}
case class Fillc(color: Paint) extends ComposableTransformer {
def apply(p: Picture) = Fill(color)(p)
}
case class Strokec(color: Color) extends ComposableTransformer {
def apply(p: Picture) = Stroke(color)(p)
}
case class StrokeWidthc(w: Double) extends ComposableTransformer {
def apply(p: Picture) = StrokeWidth(w)(p)
}
case class Decoc(painter: Painter) extends ComposableTransformer {
def apply(p: Picture) = Deco(p)(painter)
}
| vnkmr7620/kojo | KojoEnv/src/net/kogics/kojo/picture/transforms.scala | Scala | gpl-3.0 | 8,030 |
package concrete.constraint.extension
import java.util
import concrete.Domain
trait Relation extends Iterable[Array[Int]] {
type Self2 <: Relation
def filterTrie(doms: Array[Domain], modified: List[Int]): Self2
def supported(domains: Array[Domain]): Array[util.HashSet[Int]]
def contains(t: Array[Int]): Boolean
def +(t: Seq[Int]): Self2
def -(t: Seq[Int]): Self2
def ++(t: Iterable[Seq[Int]]): Relation = t.foldLeft(Relation.this)(_ + _)
def --(t: Iterable[Seq[Int]]): Relation = t.foldLeft(Relation.this)(_ - _)
def edges: Int
// def copy: Self2
def findSupport(scope: Array[Domain], p: Int, i: Int): Option[Array[Int]]
def lambda: BigInt
def depth: Int
} | concrete-cp/concrete | src/main/scala/concrete/constraint/extension/Relation.scala | Scala | lgpl-2.1 | 689 |
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.recorder.http.handler
import java.net.URI
import scala.collection.JavaConversions.asScalaBuffer
import org.jboss.netty.channel.{ ChannelFuture, ChannelFutureListener, ChannelHandlerContext, ExceptionEvent, MessageEvent, SimpleChannelHandler }
import org.jboss.netty.handler.codec.http.{ DefaultHttpRequest, HttpRequest }
import com.excilys.ebi.gatling.http.Headers
import com.excilys.ebi.gatling.recorder.config.ProxyConfig
import com.excilys.ebi.gatling.recorder.controller.RecorderController
import com.ning.http.util.Base64
import grizzled.slf4j.Logging
abstract class AbstractBrowserRequestHandler(controller: RecorderController, proxyConfig: ProxyConfig) extends SimpleChannelHandler with Logging {
override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) {
event.getMessage match {
case request: HttpRequest =>
proxyConfig.host.map { _ =>
for {
username <- proxyConfig.username
password <- proxyConfig.password
} {
val proxyAuth = "Basic " + Base64.encode((username + ":" + password).getBytes)
request.setHeader(Headers.Names.PROXY_AUTHORIZATION, proxyAuth)
}
}.getOrElse(request.removeHeader("Proxy-Connection")) // remove Proxy-Connection header if it's not significant
val future = connectToServerOnBrowserRequestReceived(ctx, request)
controller.receiveRequest(request)
sendRequestToServerAfterConnection(future, request);
case _ => // whatever
}
}
def connectToServerOnBrowserRequestReceived(ctx: ChannelHandlerContext, request: HttpRequest): ChannelFuture
override def exceptionCaught(ctx: ChannelHandlerContext, e: ExceptionEvent) {
error("Exception caught", e.getCause)
// Properly closing
val future = ctx.getChannel.getCloseFuture
future.addListener(new ChannelFutureListener {
def operationComplete(future: ChannelFuture) = future.getChannel.close
})
ctx.sendUpstream(e)
}
private def sendRequestToServerAfterConnection(future: ChannelFuture, request: HttpRequest) {
Option(future).map { future =>
future.addListener(new ChannelFutureListener {
def operationComplete(future: ChannelFuture) = future.getChannel.write(buildRequestWithRelativeURI(request))
})
}
}
private def buildRequestWithRelativeURI(request: HttpRequest) = {
val uri = new URI(request.getUri)
val newUri = new URI(null, null, null, -1, uri.getPath, uri.getQuery, uri.getFragment).toString
val newRequest = new DefaultHttpRequest(request.getProtocolVersion, request.getMethod, newUri)
newRequest.setChunked(request.isChunked)
newRequest.setContent(request.getContent)
for (header <- request.getHeaders)
newRequest.addHeader(header.getKey, header.getValue)
newRequest
}
}
| Tjoene/thesis | Case_Programs/gatling-1.4.0/gatling-recorder/src/main/scala/com/excilys/ebi/gatling/recorder/http/handler/AbstractBrowserRequestHandler.scala | Scala | gpl-2.0 | 3,393 |
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.carlomicieli.scalakoans
import org.scalatest._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AboutUniformAccessPrinciple extends FunSuite with ShouldMatchers {
class CalculatesAgeUsingMethod(var currentYear: Int, birthYear: Int) {
def age = currentYear - birthYear
// calculated when method is called
}
class CalculatesAgeUsingProperty(var currentYear: Int, birthYear: Int) {
val ageAsVal = currentYear - birthYear
var ageAsVar = currentYear - birthYear
lazy val ageAsLazy = currentYear - birthYear
// calculated at instantiation, returns property when called
}
test("Can access age as parameterless method") {
val me = new CalculatesAgeUsingMethod(2010, 2003)
me.age should be(7)
//! @#?! "Int does not take parameters"
//! me.age() should be (7)
}
test("What happens when I update current year using method") {
val me = new CalculatesAgeUsingMethod(2010, 2003)
me.currentYear = 2011
me.age should be(8)
}
test("Can access age as property") {
val me = new CalculatesAgeUsingProperty(2010, 2003)
me.ageAsVal should be(7)
me.ageAsVar should be(7)
}
test("What happens when I update current year using property") {
val me = new CalculatesAgeUsingProperty(2010, 2003)
me.currentYear = 2011
me.ageAsVal should be(7)
me.ageAsVar should be(7)
me.ageAsLazy should be(8)
}
} | CarloMicieli/first-steps-with-scala | src/test/scala/io/github/carlomicieli/scalakoans/AboutUniformAccessPrinciple.scala | Scala | apache-2.0 | 2,081 |
package tierney.core
import cats.~>
import cats.free.Free
import cats.free.FreeApplicative
import cats.data.EitherK
/**
* Higher-kinded functor
*/
trait FunctorK[S[_[_], _]] {
self =>
def map[F[_], G[_]](f: F ~> G): S[F, ?] ~> S[G, ?]
final def andThen[T[_[_], _]](other: FunctorK[T]): FunctorK[Lambda[(F[_], A) => T[S[F, ?], A]]] =
new FunctorK[Lambda[(F[_], A) => T[S[F, ?], A]]] {
override def map[F[_], G[_]](f: F ~> G): T[S[F, ?], ?] ~> T[S[G, ?], ?] =
other.map[S[F, ?], S[G, ?]](self.map(f))
}
}
object FunctorK {
implicit def freeFunctorK: FunctorK[Free] = new FunctorK[Free] {
override def map[F[_], G[_]](f: F ~> G): Free[F, ?] ~> Free[G, ?] =
Lambda[Free[F, ?] ~> Free[G, ?]](_.foldMap[Free[G, ?]](f andThen[Free[G, ?]] Lambda[G ~> Free[G, ?]](Free.liftF(_))))
}
implicit def freeApplicativeFunctorK: FunctorK[FreeApplicative] = new FunctorK[FreeApplicative] {
override def map[F[_], G[_]](f: F ~> G): FreeApplicative[F, ?] ~> FreeApplicative[G, ?] =
Lambda[FreeApplicative[F, ?] ~> FreeApplicative[G, ?]](_.foldMap[FreeApplicative[G, ?]](f andThen[FreeApplicative[G, ?]] Lambda[G ~> FreeApplicative[G, ?]](FreeApplicative.lift(_))))
}
implicit def coproductFunctorK[F[_]]: FunctorK[Lambda[(G[_], A) => EitherK[F, G, A]]] = new FunctorK[Lambda[(G[_], A) => EitherK[F, G, A]]] {
override def map[G[_], H[_]](f: G ~> H): EitherK[F, G, ?] ~> EitherK[F, H, ?] =
Lambda[EitherK[F, G, ?] ~> EitherK[F, H, ?]](_.fold[EitherK[F, H, ?]](Lambda[F ~> EitherK[F, H, ?]](EitherK.left(_)), f andThen[EitherK[F, H, ?]] Lambda[H ~> EitherK[F, H, ?]](EitherK.right(_))))
}
} | m50d/tierney | core/src/main/scala/tierney/core/FunctorK.scala | Scala | apache-2.0 | 1,666 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.sql.readers
/**
* Parser for parsing lines in bulk. Use this when efficiency is desired.
*
* @param iter iterator over lines in the file
* @param fieldSep the delimiter used to separate fields in a line
* @param lineSep the delimiter used to separate lines
* @param quote character used to quote fields
* @param escape character used to escape the quote character
* @param ignoreLeadingSpace ignore white space before a field
* @param ignoreTrailingSpace ignore white space after a field
* @param headers headers for the columns
* @param inputBufSize size of buffer to use for parsing input, tune for performance
* @param maxCols maximum number of columns allowed, for safety against bad inputs
*/
class CarbonBulkCsvReader (iter: Iterator[String],
split: Int,
fieldSep: Char = ',',
lineSep: String = "\\n",
quote: Char = '"',
escape: Char = '\\\\',
commentMarker: Char = '#',
ignoreLeadingSpace: Boolean = true,
ignoreTrailingSpace: Boolean = true,
headers: Seq[String],
inputBufSize: Int = 128,
maxCols: Int = 20480)
extends CsvReader(fieldSep,
lineSep,
quote,
escape,
commentMarker,
ignoreLeadingSpace,
ignoreTrailingSpace,
headers,
inputBufSize,
maxCols)
with Iterator[Array[String]] {
private val reader = new CarbonStringIteratorReader(iter)
parser.beginParsing(reader)
private var nextRecord = parser.parseNext()
/**
* get the next parsed line.
*
* @return array of strings where each string is a field in the CSV record
*/
def next: Array[String] = {
val curRecord = nextRecord
if(curRecord != null) {
nextRecord = parser.parseNext()
} else {
throw new NoSuchElementException("next record is null")
}
curRecord
}
def hasNext: Boolean = nextRecord != null
}
/**
* A Reader that "reads" from a sequence of lines. Spark's textFile method removes newlines at
* end of each line Univocity parser requires a Reader that provides access to the data to be
* parsed and needs the newlines to be present
* @param iter iterator over RDD[String]
*/
private class CarbonStringIteratorReader(val iter: Iterator[String]) extends java.io.Reader {
private var next: Long = 0
private var length: Long = 0 // length of input so far
private var start: Long = 0
private var str: String = null // current string from iter
/**
* fetch next string from iter, if done with current one
* pretend there is a new line at the end of every string we get from from iter
*/
private def refill(): Unit = {
if (length == next) {
if (iter.hasNext) {
str = iter.next
start = length
// add a space to every line except the last one to store '\\n'
if (iter.hasNext) {
length += (str.length + 1) // allowance for newline removed by SparkContext.textFile()
} else {
length += str.length
}
} else {
str = null
}
}
}
/**
* read the next character, if at end of string pretend there is a new line
*/
override def read(): Int = {
refill()
if(next >= length) {
-1
} else {
val cur = next - start
next += 1
if (cur == str.length) '\\n' else str.charAt(cur.toInt)
}
}
/**
* read from str into cbuf
*/
def read(cbuf: Array[Char], off: Int, len: Int): Int = {
refill()
var n = 0
if ((off < 0) || (off > cbuf.length) || (len < 0) ||
((off + len) > cbuf.length) || ((off + len) < 0)) {
throw new IndexOutOfBoundsException()
} else if (len == 0) {
n = 0
} else {
if (next >= length) { // end of input
n = -1
} else {
n = Math.min(length - next, len).toInt // lesser of amount of input available or buf size
// add a '\\n' to every line except the last one
if (n == length - next && iter.hasNext) {
str.getChars((next - start).toInt, (next - start + n - 1).toInt, cbuf, off)
cbuf(off + n - 1) = '\\n'
} else {
str.getChars((next - start).toInt, (next - start + n).toInt, cbuf, off)
}
next += n
if (n < len) {
val m = read(cbuf, off + n, len - n) // have more space, fetch more input from iter
if(m != -1) n += m
}
}
}
n
}
override def skip(ns: Long): Long = {
throw new IllegalArgumentException("Skip not implemented")
}
override def ready: Boolean = {
refill()
true
}
override def markSupported: Boolean = false
override def mark(readAheadLimit: Int): Unit = {
throw new IllegalArgumentException("Mark not implemented")
}
override def reset(): Unit = {
throw new IllegalArgumentException("Mark and hence reset not implemented")
}
def close(): Unit = { }
}
| foryou2030/incubator-carbondata | integration/spark/src/main/scala/org/apache/carbondata/spark/csv/CarbonCsvReader.scala | Scala | apache-2.0 | 5,653 |
package com.agoda.kafka.connector.jdbc.models
import enumeratum._
import scala.collection.immutable.IndexedSeq
/**
* Mode of operation
* ~~~~~~~~~~~~~~~~~
*
* Timestamp Mode :: creation timestamp of a record is stored as offset.
*
* Incrementing Mode :: unique (auto) incrementing integral id of a record is stored as offset.
*
* Timestamp + Incrementing Mode :: pair of creation timestamp and unique (auto) incrementing integral id of a record
* is stored as offset.
*/
sealed abstract class Mode(override val entryName: String) extends EnumEntry
object Mode extends Enum[Mode] {
val values: IndexedSeq[Mode] = findValues
case object TimestampMode extends Mode("timestamp")
case object IncrementingMode extends Mode("incrementing")
case object TimestampIncrementingMode extends Mode("timestamp+incrementing")
}
| arpanchaudhury/kafka-jdbc-connector | src/main/scala/com/agoda/kafka/connector/jdbc/models/Mode.scala | Scala | apache-2.0 | 906 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.react
import scala.scalajs.js
import scala.scalajs.js._
import org.scalajs.dom.html
import js.{ UndefOr, Any, Function => JFn }
import js.annotation.{ JSBracketAccess, JSName }
import js.{ Any => jAny }
// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/react/react.d.ts
@js.native
abstract class ComponentLifecycle[P, S] extends js.Any {
def componentWillMount(): Unit = js.native
def componentDidMount(): Unit = js.native
def componentWillReceiveProps(nextProps: P, nextContext: Any): Unit = js.native
def shouldComponentUpdate(nextProps: P, nextState: js.Any): Boolean = js.native
def componentWillUpdate(nextProps: P, nextState: js.Any): Unit = js.native
def componentDidUpdate(prevProps: P, prevState: js.Any): Unit = js.native
def componentWillUnmount(): Unit = js.native
}
| glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/react/ComponentLifecycle.scala | Scala | apache-2.0 | 1,429 |
trait M[X]
trait N[X]
class Foo[A](a: N[A]) {
}
val nb: N[Boolean] = null
/*start*/new Foo(nb)/*end*/
// Foo[Boolean] | ilinum/intellij-scala | testdata/typeInference/bugs4/SCL3510C.scala | Scala | apache-2.0 | 119 |
import sbt._
import Import._
import Keys._
object MultiPublishTest extends Build
{
override lazy val settings = super.settings ++ Seq(
organization := "A",
version := "1.0",
ivyPaths <<= baseDirectory( dir => new IvyPaths(dir, Some(dir / "ivy" / "cache")) ),
externalResolvers <<= baseDirectory map { base => Resolver.file("local", base / "ivy" / "local" asFile)(Resolver.ivyStylePatterns) :: Nil }
)
lazy val root = Project("root", file(".")) dependsOn(sub) aggregate(sub) settings( mavenStyle, interProject, name := "Publish Test" )
lazy val sub = Project("sub", file("sub")) settings( mavenStyle, name := "Sub Project" )
lazy val mavenStyle = publishMavenStyle <<= baseDirectory { base => (base / "mavenStyle") exists }
def interProject =
projectDependencies <<= (publishMavenStyle, publishMavenStyle in sub, projectDependencies) map { (style, subStyle, pd) => if(style == subStyle) pd else Nil }
}
| dansanduleac/sbt | sbt/src/sbt-test/dependency-management/publish-local/project/MultiPublishTest.scala | Scala | bsd-3-clause | 926 |
package com.cdegroot.sgame
/**
* This class is a very minimal replacement of java.awt.Point
*/
case class SGPoint(x: Int, y: Int) {
override def toString = "SGPoint(" + x + "," + y + ")"
// Helper methods to make client code more readable when point is used as dimension
def width = x
def height = y
}
// Companion object for some syntactic sugar
object SGPoint {
def apply(x: Double, y: Double): SGPoint = SGPoint(x.toInt, y.toInt)
def apply(point: SGPoint): SGPoint = SGPoint(point.x, point.y)
val None = SGPoint(0, 0)
} | cdegroot/sgame | src/main/scala/com/cdegroot/sgame/SGPoint.scala | Scala | bsd-3-clause | 539 |
/**
* Copyright 2017 https://github.com/sndnv
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package noisecluster.jvm
package object control {
sealed trait ServiceLevel
object ServiceLevel {
case object Audio extends ServiceLevel
case object Transport extends ServiceLevel
case object Application extends ServiceLevel
case object Host extends ServiceLevel
}
sealed trait ServiceAction
object ServiceAction {
case object Start extends ServiceAction
case object Stop extends ServiceAction
case object Restart extends ServiceAction
}
sealed trait ServiceState
object ServiceState {
case object Starting extends ServiceState
case object Active extends ServiceState
case object Stopping extends ServiceState
case object Stopped extends ServiceState
case object Restarting extends ServiceState
}
}
| sndnv/noisecluster | noisecluster-jvm/src/main/scala/noisecluster/jvm/control/package.scala | Scala | apache-2.0 | 1,407 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import junit.framework.Assert._
import collection.mutable.ArrayBuffer
import kafka.common.InvalidTopicException
import org.junit.Test
class TopicTest {
@Test
def testInvalidTopicNames() {
val invalidTopicNames = new ArrayBuffer[String]()
invalidTopicNames += ("", ".", "..")
var longName = "ATCG"
for (i <- 1 to 6)
longName += longName
invalidTopicNames += longName
val badChars = Array('/', '\\\\', ',', '\\0', ':', "\\"", '\\'', ';', '*', '?', '.')
for (weirdChar <- badChars) {
invalidTopicNames += "Is" + weirdChar + "funny"
}
for (i <- 0 until invalidTopicNames.size) {
try {
Topic.validate(invalidTopicNames(i))
fail("Should throw InvalidTopicException.")
}
catch {
case e: InvalidTopicException => "This is good."
}
}
val validTopicNames = new ArrayBuffer[String]()
validTopicNames += ("valid", "TOPIC", "nAmEs", "ar6", "VaL1d", "_0-9_")
for (i <- 0 until validTopicNames.size) {
try {
Topic.validate(validTopicNames(i))
}
catch {
case e: Exception => fail("Should not throw exception.")
}
}
}
}
| dchenbecker/kafka-sbt | core/src/test/scala/unit/kafka/utils/TopicTest.scala | Scala | apache-2.0 | 1,988 |
/*************************************************************************
* *
* This file is part of the 20n/act project. *
* 20n/act enables DNA prediction for synthetic biology/bioengineering. *
* Copyright (C) 2017 20n Labs, Inc. *
* *
* Please direct all queries to [email protected]. *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
* *
*************************************************************************/
package com.act.similarity
import java.io.File
import chemaxon.calculations.clean.Cleaner
import chemaxon.formats.MolImporter
import chemaxon.license.LicenseManager
import chemaxon.marvin.alignment.{AlignmentMolecule, AlignmentMoleculeFactory, AlignmentProperties, PairwiseAlignment, PairwiseSimilarity3D}
import chemaxon.struc.Molecule
import com.act.utils.{TSVParser, TSVWriter}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.JavaConverters._
/**
* This class distributes Chemaxon similarity computation across a set of Spark workers. Note that this is still slow
* even when running in parallel on a single machine, and you need to place the Chemaxon license file in
* `/home/spark/.chemaxon/license.cxl` on every machine (Chemaxon's license manager doesn't actually seem to handle
* licenses imported from strings, which is an incredible pain).
*
* Instead of this, you should probably be using SMARTS queries found in com.act.analysis.similarity. These are fast
* and get the job done for sub-structure searches.
*/
object compute {
val ALIGNMENT_MOLECULE_FACTORY = new AlignmentMoleculeFactory()
def run(license_file : String, inchi1: String, inchi2: String): Map[String, Double] = {
try {
LicenseManager.setLicenseFile(license_file)
} catch {
case e: Exception => throw new RuntimeException(e.getMessage)
}
try {
val queryMol: Molecule = MolImporter.importMol(inchi1)
Cleaner.clean(queryMol, 3)
val queryFragment = findLargestFragment(queryMol.convertToFrags)
val am: AlignmentMolecule = ALIGNMENT_MOLECULE_FACTORY.create(
queryFragment, AlignmentProperties.DegreeOfFreedomType.TRANSLATE_ROTATE)
val alignment = new PairwiseAlignment
alignment.setQuery(am)
val pairwise3d = new PairwiseSimilarity3D()
pairwise3d.setQuery(queryFragment)
val targetMol: Molecule = MolImporter.importMol(inchi2)
Cleaner.clean(targetMol, 3)
val targetFragment = findLargestFragment(targetMol.convertToFrags())
val targetAm: AlignmentMolecule = ALIGNMENT_MOLECULE_FACTORY.create(
targetFragment, AlignmentProperties.DegreeOfFreedomType.TRANSLATE_ROTATE)
val alignment_score = alignment.similarity(targetAm)
var threed_score: Double = 0.0
var threed_tanimoto: Double = 0.0
try {
threed_score = pairwise3d.similarity(targetFragment)
threed_tanimoto = pairwise3d.getShapeTanimoto
} catch {
case e: Exception => println(s"Caught exception: ${e.getMessage}")
}
Map(
"alignment_score" -> alignment_score, "alignment_tanimoto" -> alignment.getShapeTanimoto,
"3d_score" -> threed_score, "3d_tanimoto" -> threed_tanimoto
)
} catch {
// Abandon molecules that throw exceptions.
case e: Exception =>
System.err.println(s"Caught exception: ${e.getMessage}")
Map("alignment_score" -> 0.0, "alignment_tanimoto" -> 0.0,
"3d_score" -> 0.0, "3d_tanimoto" -> 0.0
)
}
}
def findLargestFragment(fragments: Array[Molecule]): Molecule = {
fragments.foldLeft(null: Molecule) { (a, m) => if (a == null || a.getAtomCount < m.getAtomCount) m else a}
}
}
object similarity {
def main(args: Array[String]) {
if (args.length != 4) {
System.err.println("Usage: license_file query_inchi target_tsv output_tsv")
System.exit(-1)
}
val license_file = args(0)
val query_inchi = args(1) // TODO: make this take a TSV
val target_tsv = args(2)
LicenseManager.setLicenseFile(license_file)
val tsv_parser = new TSVParser
tsv_parser.parse(new File(target_tsv))
val id_inchi_pairs = tsv_parser.getResults.asScala.map(m => (m.get("id"), m.get("inchi")))
val conf = new SparkConf().setAppName("Spark Similarity Computation")
conf.getAll.foreach(x => println(s"${x._1}: ${x._2}"))
val spark = new SparkContext(conf)
val chems: RDD[(String, String)] = spark.makeRDD(id_inchi_pairs, Math.min(1000, id_inchi_pairs.size))
val resultsRDD: RDD[(String, Map[String, Double])] =
chems.map(t => (t._1, compute.run(license_file, query_inchi, t._2)))
val results = resultsRDD.collect()
val header: List[String] = List("id", "alignment_score", "alignment_tanimoto", "3d_score", "3d_tanimoto")
val tsvWriter = new TSVWriter[String, String](header.asJava)
tsvWriter.open(new File(args(3)))
try {
results.foreach(v => {
val row: Map[String, String] = Map("id" -> v._1,
"alignment_score" -> v._2("alignment_score").toString,
"alignment_tanimoto" -> v._2("alignment_tanimoto").toString,
"3d_score" -> v._2("3d_score").toString,
"3d_tanimoto" -> v._2("3d_tanimoto").toString
)
tsvWriter.append(row.asJava)
})
} finally {
tsvWriter.close()
}
}
}
| 20n/act | reachables/src/main/scala/similarity.scala | Scala | gpl-3.0 | 6,576 |
import scalaxb.compiler.wsdl11.{Driver}
import java.io.{File}
import scalaxb.compiler.{Config}
object Wsdl11Soap11Test extends TestBase {
override val module = new Driver // with Verbose
lazy val generated = module.process(inFile,
Config(packageNames = Map(None -> Some(packageName)),
packageDir = true, outdir = tmp, async = false))
val packageName = "genericbarcode"
val inFile = new File("integration/src/test/resources/genericbarcode.wsdl")
"stockquote.scala file must compile" in {
(List("""import genericbarcode._""",
"""val service = (new BarCodeSoapBindings with scalaxb.Soap11Clients with scalaxb.DispatchHttpClients {}).service
val data = BarCodeData(120, 120, 0, 1, 1, 20, 20, true, None, None, None, 10.0f, Both, CodeEAN128B, NoneType, BottomCenter, PNG)
println(scalaxb.toXML(data, "BarCodeParam", defaultScope))
val response = service.generateBarCode(data, Some("1234"))""",
"""response.right.get.toString.contains("iVB")"""), generated) must evaluateTo(true,
outdir = "./tmp", usecurrentcp = true)
}
}
| Banno/scalaxb | integration/src/test/scala/Wsdl11Soap11Test.scala | Scala | mit | 1,088 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.template
import java.nio.charset.Charset
import io.gatling.commons.util.StringHelper
import com.dongxiguo.fastring.Fastring.Implicits._
import StringHelper.RichString
import io.gatling.charts.FileNamingConventions
import io.gatling.charts.component.RequestStatistics
import io.gatling.charts.report.GroupContainer
import io.gatling.charts.report.Container.{ Group, Request }
private[charts] class StatsJsTemplate(stats: GroupContainer, outputJson: Boolean) {
private def fieldName(field: String) = if (outputJson) '"' + field + '"' else field
def getOutput(charset: Charset): Fastring = {
def renderStats(request: RequestStatistics, path: String): Fastring = {
val jsonStats = new GlobalStatsJsonTemplate(request, outputJson).getOutput
fast"""${fieldName("name")}: "${request.name.escapeJsIllegalChars}",
${fieldName("path")}: "${request.path.escapeJsIllegalChars}",
${fieldName("pathFormatted")}: "$path",
${fieldName("stats")}: $jsonStats"""
}
def renderSubGroups(group: GroupContainer): Iterable[Fastring] =
group.groups.values.map { subGroup =>
fast""""${subGroup.name.toGroupFileName(charset)}": {
${renderGroup(subGroup)}
}"""
}
def renderSubRequests(group: GroupContainer): Iterable[Fastring] =
group.requests.values.map { request =>
fast""""${request.name.toRequestFileName(charset)}": {
${fieldName("type")}: "$Request",
${renderStats(request.stats, request.stats.path.toRequestFileName(charset))}
}"""
}
def renderGroup(group: GroupContainer): Fastring =
fast"""${fieldName("type")}: "$Group",
${renderStats(group.stats, group.stats.path.toGroupFileName(charset))},
${fieldName("contents")}: {
${(renderSubGroups(group) ++ renderSubRequests(group)).mkFastring(",")}
}
"""
if (outputJson)
fast"""{
${renderGroup(stats)}
}"""
else
fast"""var stats = {
${renderGroup(stats)}
}
function fillStats(stat){
$$("#numberOfRequests").append(stat.numberOfRequests.total);
$$("#numberOfRequestsOK").append(stat.numberOfRequests.ok);
$$("#numberOfRequestsKO").append(stat.numberOfRequests.ko);
$$("#minResponseTime").append(stat.minResponseTime.total);
$$("#minResponseTimeOK").append(stat.minResponseTime.ok);
$$("#minResponseTimeKO").append(stat.minResponseTime.ko);
$$("#maxResponseTime").append(stat.maxResponseTime.total);
$$("#maxResponseTimeOK").append(stat.maxResponseTime.ok);
$$("#maxResponseTimeKO").append(stat.maxResponseTime.ko);
$$("#meanResponseTime").append(stat.meanResponseTime.total);
$$("#meanResponseTimeOK").append(stat.meanResponseTime.ok);
$$("#meanResponseTimeKO").append(stat.meanResponseTime.ko);
$$("#standardDeviation").append(stat.standardDeviation.total);
$$("#standardDeviationOK").append(stat.standardDeviation.ok);
$$("#standardDeviationKO").append(stat.standardDeviation.ko);
$$("#percentiles1").append(stat.percentiles1.total);
$$("#percentiles1OK").append(stat.percentiles1.ok);
$$("#percentiles1KO").append(stat.percentiles1.ko);
$$("#percentiles2").append(stat.percentiles2.total);
$$("#percentiles2OK").append(stat.percentiles2.ok);
$$("#percentiles2KO").append(stat.percentiles2.ko);
$$("#percentiles3").append(stat.percentiles3.total);
$$("#percentiles3OK").append(stat.percentiles3.ok);
$$("#percentiles3KO").append(stat.percentiles3.ko);
$$("#percentiles4").append(stat.percentiles4.total);
$$("#percentiles4OK").append(stat.percentiles4.ok);
$$("#percentiles4KO").append(stat.percentiles4.ko);
$$("#meanNumberOfRequestsPerSecond").append(stat.meanNumberOfRequestsPerSecond.total);
$$("#meanNumberOfRequestsPerSecondOK").append(stat.meanNumberOfRequestsPerSecond.ok);
$$("#meanNumberOfRequestsPerSecondKO").append(stat.meanNumberOfRequestsPerSecond.ko);
}
"""
}
}
| MykolaB/gatling | gatling-charts/src/main/scala/io/gatling/charts/template/StatsJsTemplate.scala | Scala | apache-2.0 | 4,566 |
package org.zbritva.graph.tree
/**
* Created by iigaliev on 20.05.2016.
*/
class TreeNode() {
var node_columns: List[String] = List[String]()
//list to childs with cost of computing
//first cost of computing is if current node is properly sorted (wrong)
//second cost of computing if current node is NOT! properly sorted (wrong)
//TODO change two costes count according child of node,
// for each child must be defined two costes one for if parent properly sorted for this child,
// other if parent not properly sotded
//costs are obsoleted
var node_childs: List[(Int, Int, TreeNode)] = List[(Int, Int, TreeNode)]()
//If current node is properly sorted
var cost_with_sorting: Int = Int.MaxValue
//if current node not properly sorted
var cost_without_sorting: Int = Int.MinValue
def getCostOfSorting(): Int = {
cost_with_sorting
}
def getCostOfWitoutSorting(): Int = {
cost_without_sorting
}
def setCostOfSorting(value: Int): Unit = {
cost_with_sorting = value
}
def setCostOfWitoutSorting(value: Int): Unit = {
cost_without_sorting = value
}
def setNodeColumns(columns: List[String]): Unit = {
node_columns = columns.sorted(Ordering.String)
}
def getNodeColumns(): List[String] = {
node_columns
}
def addChild(node: TreeNode, sortedcost: Int, unsortedcost: Int): Unit = {
val relation = (sortedcost, unsortedcost, node)
node_childs = node_childs.::(relation)
}
def addChild(node: TreeNode): Unit = {
val relation = (Int.MaxValue, Int.MinValue, node)
node_childs = node_childs.::(relation)
}
def getChildren(): List[(Int, Int, TreeNode)] = {
node_childs
}
// override def equals(o: Any) = super.equals(o)
// override def hashCode = super.hashCode
override def hashCode: Int = {
var strHash: String = ""
for (columns <- node_columns.sorted(Ordering.String)) {
strHash += columns
}
strHash.hashCode
}
override def equals(other: Any) = other match {
case that: TreeNode =>
var strHash: String = ""
for (columns <- that.node_columns.sorted(Ordering.String)) {
strHash += columns
}
var strHashOther: String = ""
for (columns <- this.node_columns.sorted(Ordering.String)) {
strHashOther += columns
}
strHash.equals(strHashOther)
case _ => false
}
def checkColumns(columns: List[String]): Boolean = {
val this_node_columns_set = getNodeColumns().toSet
val current_node_set = columns.toSet
var intersection = this_node_columns_set.intersect(current_node_set)
if (intersection.size == columns.length && intersection.size == this_node_columns_set.size) {
return true
}
false
}
}
| zBritva/SparkCUBE | src/org/zbritva/graph/tree/TreeNode.scala | Scala | apache-2.0 | 2,837 |
package com.twitter.finatra.http.integration.doeverything.main
import com.twitter.finagle.Filter
import com.twitter.finagle.http.{Request, Response}
import com.twitter.finatra.http.filters.CommonFilters
import com.twitter.finatra.http.integration.doeverything.main.controllers.{DoEverythingController, DoNothingController, NonGuiceController}
import com.twitter.finatra.http.integration.doeverything.main.domain.DomainTestUserReader
import com.twitter.finatra.http.integration.doeverything.main.exceptions.{BarExceptionMapper, FooExceptionMapper}
import com.twitter.finatra.http.integration.doeverything.main.filters.IdentityFilter
import com.twitter.finatra.http.integration.doeverything.main.modules.DoEverythingModule
import com.twitter.finatra.http.routing.HttpRouter
import com.twitter.finatra.http.{Controller, HttpServer}
object DoEverythingServerMain extends DoEverythingServer
class DoEverythingServer extends HttpServer {
override val name = "example-server"
flag("magicNum", "26", "Magic number")
override val modules = Seq(
DoEverythingModule)
override def configureHttp(router: HttpRouter) {
router.
register[DomainTestUserReader].
filter[CommonFilters].
filter(Filter.identity[Request, Response]).
add[DoEverythingController].
add(new NonGuiceController).
add(Filter.identity[Request, Response], new Controller {}).
exceptionMapper[FooExceptionMapper].
exceptionMapper(injector.instance[BarExceptionMapper]).
add[IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, DoNothingController].
add[IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, IdentityFilter, DoNothingController]
}
override def warmup() {
run[DoEverythingWarmupHandler]()
}
}
| joecwu/finatra | http/src/test/scala/com/twitter/finatra/http/integration/doeverything/main/DoEverythingServer.scala | Scala | apache-2.0 | 2,767 |
package me.flygare.utils
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
trait HttpConnection extends HttpConfig{
implicit val actorSystem = ActorSystem("system")
implicit val actorMaterializer = ActorMaterializer()
}
| flygare/Minopt | RestService/src/main/scala/me.flygare/utils/HttpConnection.scala | Scala | mit | 244 |
/*
* Copyright 2017 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.mongodb
import java.net.URI
import laws.discipline._, arbitrary._
class UriCodecTests extends DisciplineSuite {
checkAll("BsonValueCodec[URI]", BsonValueCodecTests[URI].codec[String, Float])
}
| nrinaudo/kantan.mongodb | core/src/test/scala/kantan/mongodb/UriCodecTests.scala | Scala | apache-2.0 | 812 |
package me.frmr.wepay.api {
import org.scalatest.FunSpec
import net.liftweb.common._
import me.frmr.wepay._
import WePayTestHelpers._
import me.frmr.wepay.api._
class CreditCardSpec extends FunSpec {
implicit val authorizationToken = testAuthorizationToken
describe("A Credit Card") {
var creditCardId : Long = 0l
it("should be createable") {
val theAddress = CreditCardAddress("75 5th St NW", None, "Atlanta", "GA", "US", "30308")
val saveResponse = CreditCard("Burt Reynolds", "[email protected]",
Some("4003830171874018"), Some(1234),
Some(10), Some(2019), Some(theAddress)).save
assert(saveResponse match {
case Full(CreditCardResponse(ccId, _)) =>
creditCardId = ccId
true
case _ =>
false
}, saveResponse)
}
it("should be authorizeable") {
val authorizeResult = CreditCard.authorize(creditCardId)
assert(authorizeResult match {
case Full(CreditCardResponse(_, _)) =>
true
case _ =>
false
}, authorizeResult)
}
it("should be able to authorize a checkout") {
val authorization = CheckoutAuthorization(None, Some(creditCardId), Some("credit_card"))
val checkoutResponse = Checkout(testAccountId, "Text CC Checkout", "PERSONAL", 1.0,
authorization = Some(authorization)).save
assert(checkoutResponse match {
case Full(resp:CheckoutResponse) if resp.state == Some("authorized") =>
Checkout.cancel(resp.checkout_id, "Just a unit test.")
true
case _ =>
false
}, checkoutResponse)
}
}
}
}
| farmdawgnation/wepay-scala | src/test/scala/me/frmr/wepay/api/CreditCardSpec.scala | Scala | apache-2.0 | 1,828 |
package mesosphere.marathon.util
import mesosphere.AkkaUnitTest
import scala.concurrent.Future
import scala.concurrent.duration._
class TimeoutTest extends AkkaUnitTest {
"Timeout" when {
"async" should {
"complete" in {
Timeout(1.second)(Future.successful(1)).futureValue should equal(1)
}
"fail if the method fails" in {
val failure = Timeout(1.second)(Future.failed(new IllegalArgumentException())).failed.futureValue
failure shouldBe a[IllegalArgumentException]
}
"fail with a timeout exception if the method took too long" in {
val failure = Timeout(1.milli)(Future(Thread.sleep(1000))).failed.futureValue
failure shouldBe a[TimeoutException]
}
}
"blocking" should {
"complete" in {
Timeout.blocking(1.second)(1).futureValue should equal(1)
}
"fail if the method fails" in {
val failure = Timeout.blocking(1.second)(throw new IllegalArgumentException).failed.futureValue
failure shouldBe a[IllegalArgumentException]
}
"fail with a timeout if the method took too long" in {
val failure = Timeout.blocking(1.milli)(Thread.sleep(1000)).failed.futureValue
failure shouldBe a[TimeoutException]
}
}
"unsafe" should {
"complete" in {
Timeout.unsafe(1.second)(Future.successful(1)).futureValue should equal(1)
}
"fail if the method fails" in {
val failure = Timeout.unsafe(1.second)(Future.failed(new IllegalArgumentException)).failed.futureValue
failure shouldBe a[IllegalArgumentException]
}
"fail with a timeout if the method took too long" in {
val failure = Timeout.unsafe(1.milli)(Future(Thread.sleep(1000))).failed.futureValue
failure shouldBe a[TimeoutException]
}
}
"unsafe blocking" should {
"complete" in {
Timeout.unsafeBlocking(1.second)(1).futureValue should equal(1)
}
"fail if the method fails" in {
val failure = Timeout.unsafeBlocking(1.second)(throw new IllegalArgumentException).failed.futureValue
failure shouldBe a[IllegalArgumentException]
}
"fail with a timeout if the method took too long" in {
val failure = Timeout.unsafeBlocking(1.milli)(Thread.sleep(1000)).failed.futureValue
failure shouldBe a[TimeoutException]
}
}
}
}
| timcharper/marathon | src/test/scala/mesosphere/marathon/util/TimeoutTest.scala | Scala | apache-2.0 | 2,390 |
package com.clackjones.connectivitymap.service
case class Experiment (
var id : String,
querySignatureId : String,
randomSignatureCount : Int
)
case class ExperimentResult(
experimentId : String,
scores: Iterable[ConnectionScoreResult]
)
case class ConnectionScoreResult(referenceSetName: String, connectionScore: Float, pValue: Float, setSize: Integer) {
override def toString(): String = {
/* for outputting to tab file */
List(referenceSetName, connectionScore, pValue).mkString("\\t")
}
}
| hiraethus/scala-connectivity-map | src/main/scala/com/clackjones/connectivitymap/service/Experiment.scala | Scala | gpl-3.0 | 517 |
package com.github.mdr.ascii.layout.drawing
import com.github.mdr.ascii.util.Utils._
import com.github.mdr.ascii.common.Direction._
import com.github.mdr.ascii.common.Region
import scala.annotation.tailrec
/**
* Raise edges if there are no conflicting diagram elements. For example:
*
* ╭───────╮ ╭───────╮
* │ A │ │ A │
* ╰─┬─┬─┬─╯ ╰─┬─┬─┬─╯
* │ │ │ │ │ │
* │ │ ╰────╮ ╭──╯ ╰╮╰────╮
* │ ╰╮ │ => │ │ │
* ╭──╯ │ │ │ │ │
* │ │ │ │ │ │
* v v v v v v
* ╭───╮ ╭───╮ ╭───╮ ╭───╮ ╭───╮ ╭───╮
* │ B │ │ C │ │ D │ │ B │ │ C │ │ D │
* ╰───╯ ╰───╯ ╰───╯ ╰───╯ ╰───╯ ╰───╯
*
*/
object EdgeElevator {
def elevateEdges(drawing: Drawing): Drawing = {
val edgeTracker = new EdgeTracker(drawing)
var currentDrawing = drawing
val segmentInfos = for {
edgeElement ← drawing.edgeElements
triple @ (segment1, segment2, segment3) ← adjacentTriples(edgeElement.segments)
if segment2.direction.isHorizontal
} yield EdgeSegmentInfo(edgeElement, segment1, segment2, segment3)
var segmentUpdates: Map[EdgeDrawingElement, List[(EdgeSegment, EdgeSegment)]] = Map()
for {
segmentInfo ← segmentInfos.sortBy(_.row)
updatedEdgeSegment ← elevate(segmentInfo, edgeTracker)
} segmentUpdates = addToMultimap(segmentUpdates, segmentInfo.edgeElement, segmentInfo.segment2 -> updatedEdgeSegment)
for ((edge, updates) ← segmentUpdates)
currentDrawing = currentDrawing.replaceElement(edge, updateEdge(edge, updates))
currentDrawing
}
@tailrec
private def updateEdge(edge: EdgeDrawingElement, updates: List[(EdgeSegment, EdgeSegment)]): EdgeDrawingElement =
updates match {
case Nil ⇒ edge
case (oldSegment, newSegment) :: rest ⇒ updateEdge(edge.replaceSegment(oldSegment, newSegment), rest)
}
private def elevate(segmentInfo: EdgeSegmentInfo, edgeTracker: EdgeTracker): Option[EdgeSegment] = {
import segmentInfo._
val firstRow = segmentInfo.segment1.start.row + 1 /* 2 */
val lastRow = segmentInfo.segment2.start.row - 1
for {
row ← firstRow to lastRow
segment ← elevate(row, segmentInfo, edgeTracker)
} return Some(segment)
None
}
private def elevate(row: Int, segmentInfo: EdgeSegmentInfo, edgeTracker: EdgeTracker): Option[EdgeSegment] = {
edgeTracker.removeEdgeSegments(segmentInfo)
val newSegmentInfo = segmentInfo.withRow(row)
if (edgeTracker collidesWith newSegmentInfo) {
edgeTracker.addEdgeSegments(segmentInfo)
None
} else {
edgeTracker.addEdgeSegments(newSegmentInfo)
Some(newSegmentInfo.segment2)
}
}
}
| jlmauduy/ascii-graphs | src/main/scala/com/github/mdr/ascii/layout/drawing/EdgeElevator.scala | Scala | mit | 3,152 |
package vk.scout.auth.oauth2.browser
import javafx.application.Platform
import javafx.embed.swing.JFXPanel
import javafx.scene.Scene
import javafx.scene.web.{WebEngine, WebView}
import javax.swing._
import java.awt._
import java.net.{MalformedURLException, URL}
import java.awt.event.{WindowAdapter, WindowEvent}
import javafx.beans.value.{ObservableValue, ChangeListener}
abstract class WebBrowser extends JQueryWebView {
// inspired by http://docs.oracle.com/javafx/2/swing/SimpleSwingBrowser.java.htm
protected[this] val frame: JFrame = new JFrame
private[this] val jfxPanel: JFXPanel = new JFXPanel
protected[this] var engine: WebEngine = null
private[this] val panel: JPanel = new JPanel(new BorderLayout)
// https://forums.oracle.com/thread/2395986
Platform.setImplicitExit(false)
frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE)
frame.setPreferredSize(new Dimension(1024, 600))
createScene()
panel.add(jfxPanel, BorderLayout.CENTER)
frame.getContentPane.add(panel)
frame.pack()
// frame.setVisible(true)
protected[this] def modifyEngineListeners(): Unit
private[this] def createScene() {
Platform.runLater(new Runnable {
def run() {
val view: WebView = new WebView
engine = view.getEngine
engine.locationProperty.addListener(new ChangeListener[String] {
def changed(ov: ObservableValue[_ <: String], oldValue: String, newValue: String) {
SwingUtilities.invokeLater(new Runnable {
def run() {
frame.setTitle(newValue)
}
})
}
})
modifyEngineListeners()
jfxPanel.setScene(new Scene(view))
}
})
}
protected[this] def loadURL(url: String) {
Platform.runLater(new Runnable {
def run() {
var tmp: String = toURL(url)
if (tmp == null) {
tmp = toURL("http://" + url)
}
engine.load(tmp)
}
})
}
private[this] def toURL(str: String): String = {
try {
new URL(str).toExternalForm
}
catch {
case exception: MalformedURLException => {
return null
}
}
}
protected[this] def close() = frame.dispatchEvent(new WindowEvent(frame, WindowEvent.WINDOW_CLOSING))
protected[this] def notifyOnClose(lock: AnyRef) {
// inspired by http://stackoverflow.com/questions/1341699/how-do-i-make-a-thread-wait-for-jframe-to-close-in-java
frame.addWindowListener(new WindowAdapter {
override def windowClosing(arg0: WindowEvent) {
lock synchronized {
lock.notify()
}
}
})
}
}
| ipostanogov/vk-scout | src/main/scala/vk/scout/auth/oauth2/browser/WebBrowser.scala | Scala | mit | 2,635 |
/*
ASIB - A Scala IRC Bot
Copyright (C) 2012 Iain Cambridge
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package asib.command.user
import asib.Asib
import asib.util.Config
import asib.util.Uri
import dispatch._
import net.liftweb.json.JsonParser
import net.liftweb.json.JsonAST._
class Weather extends AbstractUserCommand {
val helpMessage = "Fetches the local time usage - !weather <location>"
def execute(username: String, channel: String, args: String) = {
val http = new Http()
val weatherApiKey = Config.getString("weatherApiKey")
// Took me a while to figure out but if the JSON value isn't there Config.getString returns
// a string containing "None". This will be due to lift's JSON lib return None and me
// turning it to a string.
if (weatherApiKey == "None" || weatherApiKey == "") {
Asib.sendMsg(channel, username + ", the weather api key is required. Goto " +
"http://www.worldweatheronline.com/ to sign up and get one")
} else {
// Uses The url object's apply functionality.
val u = url("http://free.worldweatheronline.com/feed/weather.ashx?key=" + weatherApiKey +
"&q=" + Uri.encode(args) + "&format=json&num_of_days=1")
val json = http(u >- JsonParser.parse)
val desc = (json \\ "data" \\ "current_condition" \\ "weatherDesc" \\ "value" values)
val temp = (json \\ "data" \\ "current_condition" \\ "temp_C" values)
val location = (json \\ "data" \\ "request" \\ "query" values)
Asib.sendMsg(channel, username + " The weather in " + location + " is " + desc +
" and temperature is " + temp + "°C")
}
}
}
| icambridge-old/asib | src/main/scala/asib/command/user/Weather.scala | Scala | gpl-3.0 | 2,198 |
package de.markschaefer.lib.eventflow
import java.io.BufferedReader
import java.text.SimpleDateFormat
import java.text.DateFormat
import java.util.Calendar
class EventFlow[T](val name: String, val flow: List[Event[T]]) {
override def toString = name + "\\n" + flow.toString
}
abstract class Converter[T] {
def fromString(s: String): T;
}
object EventFlow {
private val df: DateFormat = new SimpleDateFormat("dd.MM.yyyy;HH:mm:ss")
def loadFromReader[T](reader: BufferedReader, converter: Converter[T]): List[Event[T]] = {
var events: List[Event[T]] = Nil
var line = reader.readLine()
while (line != null) {
line = cleanLine(line)
if (line.startsWith("end")) {
return events
}
if (!line.isEmpty()) {
if (line.startsWith("repeat")) {
events = repeat(line, reader, converter, events)
} else {
events = parse(line, events, converter)
}
}
line = reader.readLine()
}
events
}
private def parse[T](line: String, events: List[Event[T]], converter: Converter[T]) = {
val firstDash = line.indexOf("-")
if (firstDash == -1) {
throw new RuntimeException();
}
val date = line.substring(0, firstDash).trim()
val eventString = line.substring(firstDash + 1).trim()
val calendar = Calendar.getInstance()
calendar.setTime(df.parse(date));
events :+ new Event(calendar, converter.fromString(eventString))
}
private def repeat[T](line: String, reader: BufferedReader, converter: Converter[T], events: List[Event[T]]) = {
var ne = events
val split = line.split("\\\\s")
if (split.length != 3) {
throw new RuntimeException();
}
val repetitions = Integer.parseInt(split(1).trim())
val unit = split(2).trim()
var u = 0
if ("years".equals(unit)) {
u = Calendar.YEAR;
} else if ("months".equals(unit)) {
u = Calendar.MONTH;
} else if ("days".equals(unit)) {
u = Calendar.DATE;
} else if ("hours".equals(unit)) {
u = Calendar.HOUR;
} else {
throw new RuntimeException("Unknown unit for repeat: " + unit);
}
val eventsToBeRepeated = loadFromReader(reader, converter)
for (i ← 0 until repetitions) {
for (event ← eventsToBeRepeated) {
val date = event.date.clone().asInstanceOf[Calendar]
date.add(u, i);
ne = ne :+ new Event(date, event.event)
}
}
ne
}
private def cleanLine(line: String): String = {
line.replaceAll("#.*", "").trim()
}
} | mark--/eventflow | de.markschaefer.lib.eventflow/src/de/markschaefer/lib/eventflow/EventFlow.scala | Scala | gpl-2.0 | 2,543 |
package org.hello.common
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class WorldCommonSuite extends FunSuite {
test("fooCommon") {
new WorldCommon().fooCommon()
}
} | scoverage/gradle-scoverage | src/functionalTest/resources/projects/scala-multi-module/common/src/test/scala/org/hello/common/WorldCommonSuite.scala | Scala | apache-2.0 | 263 |
package com.bobwilsonsgarage.carrepair
import akka.actor.SupervisorStrategy.{Restart, Stop}
import akka.actor._
import com.bobwilsonsgarage.carrepair.CarRepairServiceEndpointInternalProtocol.InitializeCarRepairServiceEndpoint
import common.protocol.CarRepairService
import common.protocol.CarRepairServiceProtocol._
import scala.concurrent.duration._
/**
* Companion of CarRepairServiceEndpointOverseer.
*/
object CarRepairServiceEndpointOverseer {
def props() = Props[CarRepairServiceEndpointOverseer]
}
/**
* Supervisor of CarRepairServiceEndpoint
*
* @author dbolene
*/
class CarRepairServiceEndpointOverseer extends Actor with ActorLogging {
import CarRepairServiceEndpointOverseerProtocol._
override def receive = {
case csse: CreateCarRepairServiceEndpoint =>
try {
val carRepairServiceActor = context.actorOf(CarRepairServiceEndpoint.props, CarRepairService.endpointName)
carRepairServiceActor ! InitializeCarRepairServiceEndpoint(csse.registry)
sender() ! CarRepairServiceEndpointCreated(carRepairServiceActor)
} catch {
case ex: InvalidActorNameException =>
sender () ! CarRepairServiceEndpointCreateFailed
log.error("Attempt to create already instantiated CarRepairServiceEndpoint", ex)
}
case msg =>
log.info(s"received unknown message: $msg")
}
override val supervisorStrategy =
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
case e: InvalidActorNameException =>
log.error("Stop after InvalidActorNameException: " + e)
Stop
case e: Exception =>
log.error("initiating Restart after Exception: " + e)
Restart
}
}
object CarRepairServiceEndpointOverseerProtocol {
case class CreateCarRepairServiceEndpoint(registry: ActorRef)
case class CarRepairServiceEndpointCreated(actorRef: ActorRef)
case object CarRepairServiceEndpointCreateFailed
} | dbolene/BobWilsonsGarage | carRepair/src/main/scala/com/bobwilsonsgarage/carrepair/CarRepairServiceEndpointOverseer.scala | Scala | apache-2.0 | 1,937 |
/*
* Copyright (c) 2008-2009, Matthias Mann
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Matthias Mann nor the names of its contributors may
* be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.dbpedia.spotlight.util
import org.dbpedia.spotlight.log.SpotlightLog
/**
From: http://www.matthiasmann.de/content/view/25/26/
In nearly every project you have the need to do some simple profiling. Scala makes this very easy and flexible.
Let's assume we load a potentially large XML file:
def loadDataFile(name:String) = ....
Now instead of adding a lot of timing code to your application we can simple do:
import Profiling._
val data=timed(printTime("loaded XML in ")){
loadDataFile(name)
}
*/
object Profiling {
def timed[T](report: Long=>Unit)(body: =>T) = {
val start = System.nanoTime
val r = body
report(System.nanoTime - start)
r
}
private val timeUnits = List("ns", "us", "ms", "s")
def formatTime(delta:Long) = {
def formatTime(v:Long, units:List[String], tail:List[String]):List[String] = {
def makeTail(what:Long) = (what + units.head) :: tail
if(!units.tail.isEmpty && v >= 1000)
formatTime(v / 1000, units.tail, makeTail(v % 1000))
else
makeTail(v)
}
formatTime(delta, timeUnits, Nil).mkString(" ")
}
def printTime(msg:String) = (delta:Long) => {
SpotlightLog.info(this.getClass, "%s %s", msg, formatTime(delta))
}
} | Skunnyk/dbpedia-spotlight-model | core/src/main/scala/org/dbpedia/spotlight/util/Profiling.scala | Scala | apache-2.0 | 2,922 |
package org.dsa.iot.rx.core
import org.dsa.iot.rx.RxTransformer
import org.dsa.iot.scala.Having
/**
* Caches the items emitted by the source, so that future subscriptions can "replay" them from the
* beginning.
*
* <img width="640" height="410" src="https://raw.githubusercontent.com/wiki/ReactiveX/RxJava/images/rx-operators/cache.png" alt="" />
*/
class Cache[T] extends RxTransformer[T, T] {
def capacity(size: Int): Cache[T] = this having (capacity <~ Some(size))
val capacity = Port[Option[Int]]("capacity")
protected def compute = capacity.in flatMap {
case Some(size) => source.in.cacheWithInitialCapacity(size)
case _ => source.in.cache
}
}
/**
* Factory for [[Cache]] instances.
*/
object Cache {
/**
* Creates a new Cache instance with unlimited cache capacity.
*/
def apply[T]: Cache[T] = create(None)
/**
* Creates a new Cache instance with the specified capacity.
*/
def apply[T](capacity: Int): Cache[T] = create(Some(capacity))
private def create[T](capacity: Option[Int]) = {
val block = new Cache[T]
block.capacity <~ capacity
block
}
} | IOT-DSA/dslink-scala-ignition | src/main/scala/org/dsa/iot/rx/core/Cache.scala | Scala | apache-2.0 | 1,130 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import java.util.TimeZone
import java.text.SimpleDateFormat
import scala.util.matching.Regex
/**
* Holds some coversion functions for dealing with strings as RichDate objects
*/
object DateOps extends java.io.Serializable {
val PACIFIC = TimeZone.getTimeZone("America/Los_Angeles")
val UTC = TimeZone.getTimeZone("UTC")
val DATE_WITHOUT_DASH = "yyyyMMdd"
val DATE_WITH_DASH = "yyyy-MM-dd"
val DATEHOUR_WITHOUT_DASH = "yyyyMMddHH"
val DATEHOUR_WITH_DASH = "yyyy-MM-dd HH"
val DATETIME_WITHOUT_DASH = "yyyyMMddHHmm"
val DATETIME_WITH_DASH = "yyyy-MM-dd HH:mm"
val DATETIME_HMS_WITHOUT_DASH = "yyyyMMddHHmmss"
val DATETIME_HMS_WITH_DASH = "yyyy-MM-dd HH:mm:ss"
val DATETIME_HMSM_WITH_DASH = "yyyy-MM-dd HH:mm:ss.SSS"
private[scalding] sealed abstract class Format(val pattern: String, val validator: Regex) {
def matches(s: String): Boolean = validator.findFirstIn(s).isDefined
}
private[scalding] object Format {
private val date = """\\d{4}-\\d{2}-\\d{2}"""
private val sep = """(T?|\\s*)"""
private val emptyBegin = """^\\s*"""
private val emptyEnd = """\\s*$"""
case object DATE_WITHOUT_DASH extends Format(DateOps.DATE_WITHOUT_DASH, new Regex(emptyBegin + """\\d{8}""" + emptyEnd))
case object DATE_WITH_DASH extends Format(DateOps.DATE_WITH_DASH, new Regex(emptyBegin + date + emptyEnd))
case object DATEHOUR_WITHOUT_DASH extends Format(DateOps.DATEHOUR_WITHOUT_DASH, new Regex(emptyBegin + """\\d{10}""" + emptyEnd))
case object DATEHOUR_WITH_DASH extends Format(DateOps.DATEHOUR_WITH_DASH, new Regex(emptyBegin + date + sep + """\\d\\d""" + emptyEnd))
case object DATETIME_WITHOUT_DASH extends Format(DateOps.DATETIME_WITHOUT_DASH, new Regex(emptyBegin + """\\d{12}""" + emptyEnd))
case object DATETIME_WITH_DASH extends Format(DateOps.DATETIME_WITH_DASH, new Regex(emptyBegin + date + sep + """\\d\\d:\\d\\d""" + emptyEnd))
case object DATETIME_HMS_WITHOUT_DASH extends Format(DateOps.DATETIME_HMS_WITHOUT_DASH, new Regex(emptyBegin + """\\d{14}""" + emptyEnd))
case object DATETIME_HMS_WITH_DASH extends Format(DateOps.DATETIME_HMS_WITH_DASH, new Regex(emptyBegin + date + sep + """\\d\\d:\\d\\d:\\d\\d""" + emptyEnd))
case object DATETIME_HMSM_WITH_DASH extends Format(DateOps.DATETIME_HMSM_WITH_DASH, new Regex(emptyBegin + date + sep + """\\d\\d:\\d\\d:\\d\\d\\.\\d{1,3}""" + emptyEnd))
}
private val prepare: String => String = { (str: String) =>
str.replace("T", " ") //We allow T to separate dates and times, just remove it and then validate
.replaceAll("[/_]", "-") // Allow for slashes and underscores
}
/**
* Return the guessed format for this datestring
*/
private[scalding] def getFormatObject(s: String): Option[Format] = {
val formats: List[Format] = List(
Format.DATE_WITH_DASH,
Format.DATEHOUR_WITH_DASH,
Format.DATETIME_WITH_DASH,
Format.DATETIME_HMS_WITH_DASH,
Format.DATETIME_HMSM_WITH_DASH,
Format.DATE_WITHOUT_DASH,
Format.DATEHOUR_WITHOUT_DASH,
Format.DATETIME_WITHOUT_DASH,
Format.DATETIME_HMS_WITHOUT_DASH)
formats.find { _.matches(prepare(s)) }
}
/**
* Return the guessed format for this datestring
*/
def getFormat(s: String): Option[String] = getFormatObject(s).map(_.pattern)
/**
* The DateParser returned here is based on SimpleDateFormat, which is not thread-safe.
* Do not share the result across threads.
*/
def getDateParser(s: String): Option[DateParser] =
getFormat(s).map { fmt => DateParser.from(new SimpleDateFormat(fmt)).contramap(prepare) }
}
| tglstory/scalding | scalding-date/src/main/scala/com/twitter/scalding/DateOps.scala | Scala | apache-2.0 | 4,159 |
package com.ldaniels528.ricochet
import java.awt.event.{MouseEvent, MouseMotionListener}
import java.awt.{Cursor, Color, Dimension, Graphics2D}
import javax.swing.{JFrame, JPanel}
/**
* Ricochet Application
* @author "Lawrence Daniels" <[email protected]>
*/
class RicochetApp() extends JFrame("Ricochet") {
super.setContentPane(new ViewPanel())
super.pack()
super.setVisible(true)
super.setCursor(Cursor.getPredefinedCursor(Cursor.HAND_CURSOR))
super.addMouseMotionListener(new MouseMotionListener {
override def mouseMoved(e: MouseEvent) {
val pt = e.getLocationOnScreen
GameManager.player_? foreach { player =>
player.targetX = Some(pt.x)
}
}
override def mouseDragged(e: MouseEvent) = ()
})
// graphics-related values
private val contentPane = getContentPane
private val buffer = createImage(contentPane.getWidth, contentPane.getHeight)
private val offScreen = buffer.getGraphics.asInstanceOf[Graphics2D]
private val theScreen = contentPane.getGraphics.asInstanceOf[Graphics2D]
// game-related values
private val frameCounter = new FrameCounter()
private var alive = true
private var hz: Double = 0
/**
* Called to end the execution of the main loop
*/
def die(): Unit = alive = false
/**
* Main loop
*/
def run() {
while (alive) {
// capture the start time of the frame
val startTime = System.currentTimeMillis()
// clear the off-screen buffer
offScreen.setColor(Color.BLACK)
offScreen.fillRect(0, 0, contentPane.getWidth, contentPane.getHeight)
// update the game manager
GameManager.update(offScreen, hz, contentPane.getWidth, contentPane.getHeight)
// compute the delta time factor and frames/second
frameCounter.render(offScreen)
// update the display
theScreen.drawImage(buffer, 0, 0, this)
// capture the cycles/second (in Hz)
hz = (System.currentTimeMillis() - startTime).toDouble / 1000d
}
}
/**
* Game View Panel
* @author "Lawrence Daniels" <[email protected]>
*/
class ViewPanel() extends JPanel(false) {
super.setPreferredSize(new Dimension(1024, 768))
}
}
/**
* Radius Application Singleton
* @author "Lawrence Daniels" <[email protected]>
*/
object RicochetApp {
/**
* Application entry-point
* @param args the given command line arguments
*/
def main(args: Array[String]) = new RicochetApp().run()
}
| ldaniels528/ricochet | src/main/scala/com/ldaniels528/ricochet/RicochetApp.scala | Scala | apache-2.0 | 2,478 |
//
// FreqMineData.scala -- Scala benchmark data classes for FreqMine
// Project OrcTests
//
// Copyright (c) 2018 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.test.item.scalabenchmarks.fpgrowth
import scala.util.Random
import scala.collection.JavaConverters._
import java.util.ArrayList
import orc.test.item.scalabenchmarks.BenchmarkConfig
import orc.test.item.scalabenchmarks.ExpectedBenchmarkResult
import orc.test.item.scalabenchmarks.UnorderedHash
import scala.util.hashing.MurmurHash3
case class FrequentItemSet(items: Iterable[Long], support: Long) {
override def hashCode(): Int = MurmurHash3.unorderedHash(items) + (support.## * 37)
override def toString(): String = s"<${items.mkString(",")}:${support}>"
}
object FrequentItemSet {
def apply(items: Iterable[Long], support: Long) = new FrequentItemSet(items, support)
def apply(items: java.util.Collection[Long], support: Long) = new FrequentItemSet(items.asScala, support)
def apply(items: Array[Long], support: Long) = new FrequentItemSet(items, support)
}
object FreqMineData extends ExpectedBenchmarkResult[ArrayList[ArrayList[Long]]] with UnorderedHash[ArrayList[ArrayList[Long]]] {
import BenchmarkConfig._
val (nTransactions, dataSize, uniqueItems) = {
val data = generate()
val dataSize = data.asScala.map(_.size).sum
val uniqueItems = data.asScala.foldLeft(Set[Long]())((acc, t) => acc ++ t.asScala).size
(data.size, dataSize, uniqueItems)
}
def generate(): ArrayList[ArrayList[Long]] = {
generate(problemSizeScaledInt(1000))
}
def generate(nTrans: Int): ArrayList[ArrayList[Long]] = {
val rnd = new Random(10) // Fixed seed PRNG
def normalInt(mean: Double, stddev: Double): Long = {
val v = rnd.nextGaussian() * stddev + mean
(v.abs.ceil max 1).toLong
}
def logNormalInt(mean: Double, stddev: Double): Long = {
val v = math.exp(rnd.nextGaussian() * stddev + mean)
(v.abs.ceil max 1).toLong
}
val out = new ArrayList[ArrayList[Long]](nTrans)
for (_ <- 0 until nTrans) {
val n = logNormalInt(1.8, 0.8)
val mean = normalInt(1, 2) + 1
val is = for (_ <- 0 until n.toInt) yield {
logNormalInt(mean, 2)
}
out.add(new ArrayList[Long](is.toSet.asJava))
}
out
}
def main(args: Array[String]): Unit = {
for(t <- generate().asScala.take(1000)) {
println(t.asScala.mkString(" "))
}
println(s"nTransactions = $nTransactions, dataSize = $dataSize, uniqueItems = $uniqueItems")
}
val expectedMap: Map[Int, Int] = Map(
1 -> 0x5bc57a67,
10 -> 0x93078f48,
100 -> 0x2b2a5cdf,
)
}
| orc-lang/orc | OrcTests/src/orc/test/item/scalabenchmarks/fpgrowth/FreqMineData.scala | Scala | bsd-3-clause | 2,909 |
package paperdoll.core.layer
import shapeless.{ Coproduct, :+:, CNil }
import scalaz.Leibniz
/**
* Typeclass representing that the layer stack T is a subset of the layer stack S, and bridging between the
* layer stack world and the effectful value world.
* This probably duplicates some functionality that's present in more general form in shapeless.
* However, if so, I can't understand that general form well enough to express this in terms of it.
*/
sealed trait Subset[S <: Coproduct, T <: Coproduct] {
type LS <: Layers[S]
type LT <: Layers[T]
def inject[X](value: LT#O[X]): LS#O[X]
}
object Subset {
implicit def nilSubset[S <: Coproduct](implicit l: Layers[S]) = new Subset[S, CNil] {
override type LS = Layers.Aux[S, l.O]
override type LT = Layers[CNil] {
type O[X] = CNil
}
override def inject[X](value: CNil) = value.impossible
}
implicit def consSubset[S <: Coproduct, TH <: Layer, L1 <: Layers[_], TT <: Coproduct, L2 <: Layers[_]](
implicit m: Member[S, TH] { type L = L1 }, tl: Subset[S, TT] { type LS = L2 }, le: Leibniz[Nothing, Any, L1, L2]) =
new Subset[S, TH :+: TT] {
override type LS = Layers.Aux[S, L2#O]
override type LT = Layers[TH :+: TT] {
type O[X] = TH#F[X] :+: tl.LT#O[X]
}
override def inject[X](value: TH#F[X] :+: tl.LT#O[X]) =
value.eliminate(x ⇒ le.subst[({type K[LL] = Member[S, TH]{type L = LL}})#K](m).inject(x), tl.inject(_))
}
def apply[S <: Coproduct, T <: Coproduct](implicit s: Subset[S, T]): Subset[S, T] {type LS = s.LS; type LT = s.LT} = s
} | m50d/paperdoll | core/src/main/scala/paperdoll/core/layer/Subset.scala | Scala | apache-2.0 | 1,582 |
package java.io
class PrintStream(_out: OutputStream, autoFlush: Boolean, ecoding: String)
extends FilterOutputStream(_out) with Appendable {
import java.util.Locale
def this(out: OutputStream) = this(out, false, "")
def this(out: OutputStream, autoFlush: Boolean) = this(out, autoFlush, "")
override def write(b: Int) = {
_out.write(b)
if (autoFlush && b == 10) flush()
}
def append(c: Char) = this
def append(csq: CharSequence) = this
def append(csq: CharSequence, start: Int, end: Int) = this
var hasError = false
def checkError() = hasError
def setError() { hasError = true }
def clearError() { hasError = false }
def print(b: Boolean): Unit = print(b.toString)
def print(c: Char): Unit = print(c.toString)
def print(i: Int): Unit = print(i.toString)
def print(l: Long): Unit = print(l.toString)
def print(f: Float): Unit = print(f.toString)
def print(d: Double): Unit = print(d.toString)
def print(s: Array[Char]): Unit = print("character array")
def print(s: String): Unit = if (s eq null) print("null") else writeString(s)
def print(o: Object): Unit = if (o eq null) print("null") else print(o.toString)
private def writeString(s: String) = {
val bytes = new Array[Byte](s.length)
var i = 0
while (i < s.length) {
bytes(i) = s.charAt(i).toByte
i += 1
}
write(bytes)
}
def println(): Unit = write(10)
def println(x: Boolean): Unit = { print(x); println() }
def println(x: Char): Unit = { print(x); println() }
def println(x: Int): Unit = { print(x); println() }
def println(x: Long): Unit = { print(x); println() }
def println(x: Float): Unit = { print(x); println() }
def println(x: Double): Unit = { print(x); println() }
def println(x: String): Unit = { print(x); println() }
def println(x: Object): Unit = { print(x); println() }
def printf(format: String, args: Array[Object]): Unit = print("printf")
def printf(l: Locale, format: String, args: Array[Object]): Unit = print("printf")
def format(format: String, args: Array[Object]): Unit = print("printf")
def format(l: Locale, format: String, args: Array[Object]): Unit = print("printf")
}
| swhgoon/scala-js | javalib/source/src/java/io/PrintStream.scala | Scala | bsd-3-clause | 2,177 |
package org.bidpulse.example
import akka.actor.{Actor, ActorLogging, ActorRef}
import org.bidpulse.pipeline.{Channel, Pipeline, Publisher}
class DummyPublisher(pipeline: ActorRef) extends Actor with Publisher with ActorLogging {
pipeline ! Pipeline.Subscribe
override def receive: Receive = {
case msg @ (_: Channel.ProjectsPublished | _: Channel.ProjectPublished | _: Channel.ProjectUpdatePublished) =>
log.info(s"Message published\\n>>>> $msg")
}
}
| EugenyLoy/BidPulse | src/main/scala/org/bidpulse/example/DummyPublisher.scala | Scala | mit | 472 |
package com.twitter.finagle.netty4.http.handler
import com.twitter.conversions.StorageUnitOps._
import com.twitter.util.StorageUnit.zero
import io.netty.buffer.Unpooled
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.handler.codec.http._
import org.scalatest.FunSuite
class FixedLengthMessageAggregatorTest extends FunSuite {
test("full messages pass through") {
val agg = new FixedLengthMessageAggregator(10.megabytes)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val content = Unpooled.wrappedBuffer(new Array[Byte](11))
val req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content)
assert(channel.writeInbound(req))
val reqObserved = channel.readInbound[FullHttpRequest]()
assert(reqObserved.method == HttpMethod.POST)
assert(reqObserved.content == req.content)
}
test("chunked messages aren't aggregated") {
val agg = new FixedLengthMessageAggregator(10.megabytes)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val content = Unpooled.wrappedBuffer(new Array[Byte](11))
val head = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/")
HttpUtil.setTransferEncodingChunked(head, true)
val body = new DefaultLastHttpContent(content)
assert(channel.writeInbound(head))
assert(channel.writeInbound(body))
val reqObserved = channel.readInbound[HttpRequest]()
assert(reqObserved.method == HttpMethod.POST)
val bodyObserved = channel.readInbound[HttpContent]()
assert(bodyObserved.content == content)
}
test(
"fixed length messages which are chunked and smaller than " +
"the specified length are aggregated"
) {
val agg = new FixedLengthMessageAggregator(12.bytes)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val content = Unpooled.wrappedBuffer(new Array[Byte](11))
val head = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/")
HttpUtil.setContentLength(head, content.readableBytes)
val body = new DefaultLastHttpContent(content)
assert(!channel.writeInbound(head))
assert(channel.writeInbound(body))
val reqObserved = channel.readInbound[FullHttpRequest]()
assert(reqObserved.method == HttpMethod.POST)
assert(reqObserved.content == content)
}
test("fixed length messages which are chunked and equal to the specified length are aggregated") {
val agg = new FixedLengthMessageAggregator(11.bytes)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val content = Unpooled.wrappedBuffer(new Array[Byte](11))
val head = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/")
HttpUtil.setContentLength(head, content.readableBytes)
val body = new DefaultLastHttpContent(content)
assert(!channel.writeInbound(head))
assert(channel.writeInbound(body))
val reqObserved = channel.readInbound[FullHttpRequest]()
assert(reqObserved.method == HttpMethod.POST)
assert(reqObserved.content == content)
}
test("fixed length messages that don't have body are aggregated by zero-length aggregator") {
val agg = new FixedLengthMessageAggregator(zero)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val head = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/")
HttpUtil.setContentLength(head, 0)
assert(!channel.writeInbound(head))
assert(channel.writeInbound(new DefaultLastHttpContent()))
val reqObserved = channel.readInbound[FullHttpRequest]()
assert(reqObserved.method == HttpMethod.POST)
assert(reqObserved.content == Unpooled.EMPTY_BUFFER)
}
test(
"fixed length messages which are chunked and larger than than the " +
"specified size remain chunked"
) {
val agg = new FixedLengthMessageAggregator(11.byte)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val content = Unpooled.wrappedBuffer(new Array[Byte](12))
val head = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/")
HttpUtil.setContentLength(head, content.readableBytes)
val body = new DefaultLastHttpContent(content)
assert(channel.writeInbound(head))
assert(channel.writeInbound(body))
val reqObserved = channel.readInbound[HttpRequest]()
assert(reqObserved.method == HttpMethod.POST)
val bodyObserved = channel.readInbound[HttpContent]()
assert(bodyObserved.content == content)
}
test("fixed length messages that have body are not aggregated by zero-length aggregator") {
val agg = new FixedLengthMessageAggregator(zero)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val content = Unpooled.wrappedBuffer(new Array[Byte](11))
val head = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/")
HttpUtil.setContentLength(head, content.readableBytes)
val body = new DefaultLastHttpContent(content)
assert(channel.writeInbound(head))
assert(channel.writeInbound(body))
val reqObserved = channel.readInbound[HttpRequest]()
assert(reqObserved.method == HttpMethod.POST)
val bodyObserved = channel.readInbound[HttpContent]()
assert(bodyObserved.content == content)
}
test("requests with no content-length and transfer-encoding are aggregated") {
val agg = new FixedLengthMessageAggregator(zero)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val head = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/")
assert(!channel.writeInbound(head)) // shouldn't pass through
assert(channel.writeInbound(new DefaultLastHttpContent()))
val reqObserved = channel.readInbound[FullHttpRequest]()
assert(reqObserved.method == HttpMethod.POST)
assert(reqObserved.content == Unpooled.EMPTY_BUFFER)
}
test("responses that will not have a body are aggregated") {
Set(
HttpResponseStatus.NO_CONTENT,
HttpResponseStatus.NOT_MODIFIED,
HttpResponseStatus.CONTINUE,
HttpResponseStatus.SWITCHING_PROTOCOLS,
HttpResponseStatus.PROCESSING
).foreach { status =>
val agg = new FixedLengthMessageAggregator(zero)
val channel: EmbeddedChannel = new EmbeddedChannel(new HttpRequestEncoder(), agg)
val head = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status)
assert(!channel.writeInbound(head)) // shouldn't pass through
assert(channel.writeInbound(new DefaultLastHttpContent()))
val reqObserved = channel.readInbound[FullHttpResponse]()
assert(reqObserved.status == status)
}
}
}
| luciferous/finagle | finagle-netty4-http/src/test/scala/com/twitter/finagle/netty4/http/handler/FixedLengthMessageAggregatorTest.scala | Scala | apache-2.0 | 6,727 |
package com.datastax.spark.connector.types
import org.apache.commons.lang3.SerializationUtils
import org.junit.Assert._
import org.junit.Test
class TypeSerializationTest {
private def testSerialization(t: ColumnType[_]) {
assertEquals(t, SerializationUtils.roundtrip(t))
}
@Test
def testSerializationOfPrimitiveTypes() {
testSerialization(AsciiType)
testSerialization(TextType)
testSerialization(IntType)
testSerialization(BigIntType)
testSerialization(DoubleType)
testSerialization(FloatType)
testSerialization(BooleanType)
testSerialization(UUIDType)
testSerialization(TimeUUIDType)
testSerialization(TimestampType)
testSerialization(DecimalType)
testSerialization(BigIntType)
testSerialization(InetType)
testSerialization(CounterType)
testSerialization(SmallIntType)
testSerialization(TinyIntType)
testSerialization(DateType)
}
@Test
def testSerializationOfCollectionTypes() {
testSerialization(ListType(IntType))
testSerialization(ListType(ListType(IntType)))
testSerialization(SetType(TextType))
testSerialization(MapType(BigIntType, TimestampType))
}
}
| maasg/spark-cassandra-connector | spark-cassandra-connector/src/test/scala/com/datastax/spark/connector/types/TypeSerializationTest.scala | Scala | apache-2.0 | 1,170 |
package io.cumulus.models.sharing
import akka.util.ByteString
import io.cumulus.json.JsonFormat._
import io.cumulus.utils.Crypto
import play.api.libs.json.{Format, Json}
/**
* TODO
*/
case class SharingSecurity(
secretCodeHash: ByteString,
salt1: ByteString,
salt2: ByteString
) {
/**
* Test if the provided secret code is the secret code of the sharing.
*/
def checkSecretCode(toTest: ByteString): Boolean = {
// To test the password, we need to generate the hash then the second hash, and compare the results
val toTestHash = Crypto.scrypt(toTest, salt1)
val toTestHashHash = Crypto.scrypt(toTestHash, salt2)
toTestHashHash == secretCodeHash
}
}
object SharingSecurity {
def create(secretCode: ByteString): SharingSecurity = {
// Hash of the secret code to get a 256Bit AES key
val salt = Crypto.randomBytes(16)
val secretCodeHash = Crypto.scrypt(secretCode, salt)
// Hash the hash of the secret code
val salt2 = Crypto.randomBytes(16)
val secretCodeHashHash = Crypto.scrypt(secretCodeHash, salt2)
SharingSecurity(
secretCodeHash = secretCodeHashHash,
salt1 = salt,
salt2 = salt2
)
}
implicit val format: Format[SharingSecurity] =
Json.format[SharingSecurity]
}
| Cumulus-Cloud/cumulus | server/cumulus-core/src/main/scala/io/cumulus/models/sharing/SharingSecurity.scala | Scala | mit | 1,294 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.codegenerator.opencl
import cogx.compiler.parser.op._
import cogx.compiler.CompilerError
import cogx.platform.types.Opcode
/** Translates an opcode to an OpenCL function name.
*
* @author Greg Snider
*/
private[cogx]
object OpcodeToFunction extends CompilerError {
/** Get the name of the OpenCL function corresponding to `opcode`. */
def apply(opcode: Opcode): String = {
opcode match {
case op: ComplexBinaryOp =>
op match {
case ComplexAddOp => "complexAdd"
case ComplexSubtractOp => "complexSubtract"
case ComplexMultiplyOp => "complexMultiply"
case ComplexDivideOp => "complexDivide"
case PolarToComplexOp => "polarToComplex"
case RealImaginaryToComplexOp => "realImaginaryToComplex"
case _ => internalError("opcode " + op + " has no function"); ""
}
case op: ComplexBinaryComplexConstOpcode =>
op match {
case ComplexAddConstOp(value) => "complexAdd"
case ComplexSubtractConstOp(value) => "complexSubtract"
case ComplexMultiplyConstOp(value) => "complexMultiply"
case ComplexDivideConstOp(value) => "complexDivide"
case _ => internalError("opcode " + op + " has no function"); ""
}
case op: ComplexBinaryRealConstOp =>
op match {
case ComplexAddRealConstOp(value) => "complexAddReal"
case ComplexSubtractRealConstOp(value) => "complexSubtractReal"
case ComplexMultiplyRealConstOp(value) => "complexMultiplyReal"
case ComplexDivideRealConstOp(value) => "complexDivideReal"
case _ => internalError("opcode " + op + " has no function"); ""
}
case op: ComplexUnaryOp =>
op match {
case ComplexExpOp => "complexExp"
case ConjugateOp => "conjugate"
case ComplexReciprocalOp => "complexReciprocal"
case ComplexUnaryMinusOp => "negate"
case RealToComplexOp => "realToComplex"
case ComplexCopyOp(uniqueId) => "copy"
case _ => internalError("opcode " + op + " has no function"); ""
}
// The ComplexToRealOp ops are defined here so that the
// ComplexToRealHyperKernel could be made better through use of
// OpcodeToFunction() in the future.
case op: ComplexToRealOp =>
op match {
case PhaseOp => "phase"
case OrientationOp => "orientation"
case MagnitudeOp => "magnitude"
case RealPartOp => "realPart"
case ImaginaryPartOp => "imaginaryPart"
case _ => internalError("opcode " + op + " has no function"); ""
}
case op: BinaryConstOpcode =>
op match {
case AddConstOp(value) => "add"
case SubtractConstOp(value) => "subtract"
case MultiplyConstOp(value) => "multiply"
case DivideConstOp(value) => "divide"
case ModuloConstOp(value) => "fmod"
case GreaterThanConstOp(value) => "greaterThan"
case GreaterThanEqualsConstOp(value) => "greaterThanEqual"
case LessThanConstOp(value) => "lessThan"
case LessThanEqualsConstOp(value) => "lessThanEqual"
case EqualsConstOp(value) => "equals"
case NotEqualsConstOp(value) => "notEquals"
case MaxConstOp(value) => "fmax"
case MinConstOp(value) => "fmin"
case PowConstOp(exponent) => "pow"
case PownConstOp(exponent) => "powInt"
case _ => internalError("opcode " + op + " has no function"); ""
}
case op: BinaryOpcode =>
op match {
case AddOp => "add"
case SubtractOp => "subtract"
case MultiplyOp => "multiply"
case DivideOp => "divide"
case ModuloOp => "fmod"
case GreaterThanOp => "greaterThan"
case GreaterThanEqualsOp => "greaterThanEqual"
case LessThanOp => "lessThan"
case LessThanEqualsOp => "lessThanEqual"
case EqualsOp => "equals"
case NotEqualsOp => "notEquals"
case MaxOp => "fmax"
case MinOp => "fmin"
case Atan2Op => "atan2"
case _ => internalError("opcode " + op + " has no function"); ""
}
case op: UnaryOpcode =>
op match {
case AbsOp => "fabs"
case AcosOp => "acos"
case AcoshOp => "acosh"
case AsinOp => "asin"
case CosOp => "native_cos"
case CoshOp => "cosh"
case ExpOp => "native_exp"
case FloorOp => "floor"
case LogOp => "native_log"
case ReciprocalOp => "reciprocal"
case RectifyOp => "rectify"
case SignumOp => "signum"
case SinOp => "native_sin"
case SinhOp => "sinh"
case SqOp => "sq"
case SqrtOp => "sqrt"
case TanOp => "native_tan"
case TanhOp => "tanh"
case UnaryMinusOp => "negate"
case CopyOp(uniqueId) => "copy"
// Field Reductions
case FieldReduceMaxOp => "max"
case FieldReduceMinOp => "min"
case FieldReduceSumOp => "add"
// Tensor Reductions
case x: TensorReduceMaxOp => "fmax"
case x: TensorReduceMinOp => "fmin"
case x: TensorReduceSumOp => "add"
case _ => internalError("opcode " + op + " has no function"); ""
}
case op: NulleryOpcode =>
op match {
case op: ConstantOp =>
internalError("opcode " + op + " has no function"); ""
case op: SensorOp =>
internalError("opcode " + op + " has no function"); ""
case op: VectorSensorOp =>
internalError("opcode " + op + " has no function"); ""
case op: ColorSensorOp =>
internalError("opcode " + op + " has no function"); ""
case NullOp =>
internalError("opcode " + op + " has no function"); ""
case InputProxyOp =>
internalError("opcode " + op + " has no function"); ""
}
}
}
} | hpe-cct/cct-core | src/main/scala/cogx/compiler/codegenerator/opencl/OpcodeToFunction.scala | Scala | apache-2.0 | 6,724 |
package athena.util
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
//stolen from spray
private[athena] trait ObjectRegistry[K, V <: AnyRef] {
private[this] val _registry = new AtomicReference(Map.empty[K, V])
@tailrec
protected final def register(key: K, obj: V): obj.type = {
val reg = registry
val updated = reg.updated(key, obj)
if (_registry.compareAndSet(reg, updated)) obj
else register(key, obj)
}
protected def registry: Map[K, V] = _registry.get
def getForKey(key: K): Option[V] = {
val foo = registry.get(key)
foo
}
}
| vast-engineering/athena | src/main/scala/athena/util/ObjectRegistry.scala | Scala | apache-2.0 | 608 |
package org.jetbrains.plugins.scala
package lang
package completion
package filters.definitions
import com.intellij.psi._
import com.intellij.psi.filters.ElementFilter
import org.jetbrains.annotations.NonNls
import org.jetbrains.plugins.scala.lang.completion.ScalaCompletionUtil._
import org.jetbrains.plugins.scala.lang.lexer._
import org.jetbrains.plugins.scala.lang.parser._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates._
/**
* @author Alexander Podkhalyuzin
* Date: 28.05.2008
*/
class DefTypeFilter extends ElementFilter {
def isAcceptable(element: Object, context: PsiElement): Boolean = {
if (context.isInstanceOf[PsiComment]) return false
val leaf = getLeafByOffset(context.getTextRange.getStartOffset, context)
if (leaf != null) {
val parent = leaf.getParent
parent match {
case _: ScReferenceExpression =>
case _ => return false
}
parent.getParent match {
case parent@(_: ScBlock | _: ScCaseClause | _: ScTemplateBody | _: ScClassParameter | _: ScalaFile)
if !parent.isInstanceOf[ScalaFile] || parent.asInstanceOf[ScalaFile].isScriptFile() =>
if ((leaf.getPrevSibling == null || leaf.getPrevSibling.getPrevSibling == null ||
leaf.getPrevSibling.getPrevSibling.getNode.getElementType != ScalaTokenTypes.kDEF) &&
(parent.getPrevSibling == null || parent.getPrevSibling.getPrevSibling == null ||
(parent.getPrevSibling.getPrevSibling.getNode.getElementType != ScalaElementTypes.MATCH_STMT ||
!parent.getPrevSibling.getPrevSibling.getLastChild.isInstanceOf[PsiErrorElement])))
return true
case _ =>
}
}
false
}
def isClassAcceptable(hintClass: java.lang.Class[_]): Boolean = {
true
}
@NonNls
override def toString: String = {
"'def', 'type' keyword filter"
}
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/completion/filters/definitions/DefTypeFilter.scala | Scala | apache-2.0 | 2,139 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.util.CharVarcharUtils
import org.apache.spark.sql.connector.SchemaRequiredDataSource
import org.apache.spark.sql.connector.catalog.InMemoryPartitionTableCatalog
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.SimpleInsertSource
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
import org.apache.spark.sql.types._
// The base trait for char/varchar tests that need to be run with different table implementations.
trait CharVarcharTestSuite extends QueryTest with SQLTestUtils {
def format: String
def checkColType(f: StructField, dt: DataType): Unit = {
assert(f.dataType == CharVarcharUtils.replaceCharVarcharWithString(dt))
assert(CharVarcharUtils.getRawType(f.metadata) == Some(dt))
}
def checkPlainResult(df: DataFrame, dt: String, insertVal: String): Unit = {
val dataType = CatalystSqlParser.parseDataType(dt)
checkColType(df.schema(1), dataType)
dataType match {
case CharType(len) =>
// char value will be padded if (<= len) or trimmed if (> len)
val fixLenStr = if (insertVal != null) {
insertVal.take(len).padTo(len, " ").mkString
} else null
checkAnswer(df, Row("1", fixLenStr))
case VarcharType(len) =>
// varchar value will be remained if (<= len) or trimmed if (> len)
val varLenStrWithUpperBound = if (insertVal != null) {
insertVal.take(len)
} else null
checkAnswer(df, Row("1", varLenStrWithUpperBound))
}
}
test("apply char padding/trimming and varchar trimming: top-level columns") {
Seq("CHAR(5)", "VARCHAR(5)").foreach { typ =>
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c $typ) USING $format")
(0 to 5).map(n => "a" + " " * n).foreach { v =>
sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
checkPlainResult(spark.table("t"), typ, v)
}
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkPlainResult(spark.table("t"), typ, null)
}
}
}
test("char type values should be padded or trimmed: partitioned columns") {
// via dynamic partitioned columns
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY (c)")
(0 to 5).map(n => "a" + " " * n).foreach { v =>
sql(s"INSERT OVERWRITE t VALUES ('1', '$v')")
checkPlainResult(spark.table("t"), "CHAR(5)", v)
}
}
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY (c)")
(0 to 5).map(n => "a" + " " * n).foreach { v =>
// via dynamic partitioned columns with drop partition command
sql(s"INSERT INTO t VALUES ('1', '$v')")
checkPlainResult(spark.table("t"), "CHAR(5)", v)
sql(s"ALTER TABLE t DROP PARTITION(c='a')")
checkAnswer(spark.table("t"), Nil)
// via static partitioned columns with drop partition command
sql(s"INSERT INTO t PARTITION (c ='$v') VALUES ('1')")
checkPlainResult(spark.table("t"), "CHAR(5)", v)
sql(s"ALTER TABLE t DROP PARTITION(c='a')")
checkAnswer(spark.table("t"), Nil)
}
}
}
test("varchar type values length check and trim: partitioned columns") {
(0 to 5).foreach { n =>
// SPARK-34192: we need to create a a new table for each round of test because of
// trailing spaces in partition column will be treated differently.
// This is because Mysql and Derby(used in tests) considers 'a' = 'a '
// whereas others like (Postgres, Oracle) doesn't exhibit this problem.
// see more at:
// https://issues.apache.org/jira/browse/HIVE-13618
// https://issues.apache.org/jira/browse/SPARK-34192
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c VARCHAR(5)) USING $format PARTITIONED BY (c)")
val v = "a" + " " * n
// via dynamic partitioned columns
sql(s"INSERT INTO t VALUES ('1', '$v')")
checkPlainResult(spark.table("t"), "VARCHAR(5)", v)
sql(s"ALTER TABLE t DROP PARTITION(c='$v')")
checkAnswer(spark.table("t"), Nil)
// via static partitioned columns
sql(s"INSERT INTO t PARTITION (c='$v') VALUES ('1')")
checkPlainResult(spark.table("t"), "VARCHAR(5)", v)
sql(s"ALTER TABLE t DROP PARTITION(c='$v')")
checkAnswer(spark.table("t"), Nil)
}
}
}
test("oversize char/varchar values for alter table partition operations") {
Seq("CHAR(5)", "VARCHAR(5)").foreach { typ =>
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c $typ) USING $format PARTITIONED BY (c)")
Seq("ADD", "DROP").foreach { op =>
val e = intercept[RuntimeException](sql(s"ALTER TABLE t $op PARTITION(c='abcdef')"))
assert(e.getMessage.contains("Exceeds char/varchar type length limitation: 5"))
}
val e1 = intercept[RuntimeException] {
sql(s"ALTER TABLE t PARTITION (c='abcdef') RENAME TO PARTITION (c='2')")
}
assert(e1.getMessage.contains("Exceeds char/varchar type length limitation: 5"))
val e2 = intercept[RuntimeException] {
sql(s"ALTER TABLE t PARTITION (c='1') RENAME TO PARTITION (c='abcdef')")
}
assert(e2.getMessage.contains("Exceeds char/varchar type length limitation: 5"))
}
}
}
test("SPARK-34233: char/varchar with null value for partitioned columns") {
Seq("CHAR(5)", "VARCHAR(5)").foreach { typ =>
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c $typ) USING $format PARTITIONED BY (c)")
sql("INSERT INTO t VALUES ('1', null)")
checkPlainResult(spark.table("t"), typ, null)
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkPlainResult(spark.table("t"), typ, null)
sql("INSERT OVERWRITE t PARTITION (c=null) VALUES ('1')")
checkPlainResult(spark.table("t"), typ, null)
sql("ALTER TABLE t DROP PARTITION(c=null)")
checkAnswer(spark.table("t"), Nil)
}
}
}
test("char/varchar type values length check: partitioned columns of other types") {
Seq("CHAR(5)", "VARCHAR(5)").foreach { typ =>
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c $typ) USING $format PARTITIONED BY (c)")
Seq(1, 10, 100, 1000, 10000).foreach { v =>
sql(s"INSERT OVERWRITE t VALUES ('1', $v)")
checkPlainResult(spark.table("t"), typ, v.toString)
sql(s"ALTER TABLE t DROP PARTITION(c=$v)")
checkAnswer(spark.table("t"), Nil)
}
val e1 = intercept[SparkException](sql(s"INSERT OVERWRITE t VALUES ('1', 100000)"))
assert(e1.getCause.getMessage.contains("Exceeds char/varchar type length limitation: 5"))
val e2 = intercept[RuntimeException](sql("ALTER TABLE t DROP PARTITION(c=100000)"))
assert(e2.getMessage.contains("Exceeds char/varchar type length limitation: 5"))
}
}
}
test("char type values should be padded: nested in struct") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c STRUCT<c: CHAR(5)>) USING $format")
sql("INSERT INTO t VALUES ('1', struct('a'))")
checkAnswer(spark.table("t"), Row("1", Row("a" + " " * 4)))
checkColType(spark.table("t").schema(1), new StructType().add("c", CharType(5)))
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkAnswer(spark.table("t"), Row("1", null))
sql("INSERT OVERWRITE t VALUES ('1', struct(null))")
checkAnswer(spark.table("t"), Row("1", Row(null)))
}
}
test("char type values should be padded: nested in array") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c ARRAY<CHAR(5)>) USING $format")
sql("INSERT INTO t VALUES ('1', array('a', 'ab'))")
checkAnswer(spark.table("t"), Row("1", Seq("a" + " " * 4, "ab" + " " * 3)))
checkColType(spark.table("t").schema(1), ArrayType(CharType(5)))
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkAnswer(spark.table("t"), Row("1", null))
sql("INSERT OVERWRITE t VALUES ('1', array(null))")
checkAnswer(spark.table("t"), Row("1", Seq(null)))
}
}
test("char type values should be padded: nested in map key") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c MAP<CHAR(5), STRING>) USING $format")
sql("INSERT INTO t VALUES ('1', map('a', 'ab'))")
checkAnswer(spark.table("t"), Row("1", Map(("a" + " " * 4, "ab"))))
checkColType(spark.table("t").schema(1), MapType(CharType(5), StringType))
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkAnswer(spark.table("t"), Row("1", null))
}
}
test("char type values should be padded: nested in map value") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c MAP<STRING, CHAR(5)>) USING $format")
sql("INSERT INTO t VALUES ('1', map('a', 'ab'))")
checkAnswer(spark.table("t"), Row("1", Map(("a", "ab" + " " * 3))))
checkColType(spark.table("t").schema(1), MapType(StringType, CharType(5)))
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkAnswer(spark.table("t"), Row("1", null))
sql("INSERT OVERWRITE t VALUES ('1', map('a', null))")
checkAnswer(spark.table("t"), Row("1", Map("a" -> null)))
}
}
test("char type values should be padded: nested in both map key and value") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c MAP<CHAR(5), CHAR(10)>) USING $format")
sql("INSERT INTO t VALUES ('1', map('a', 'ab'))")
checkAnswer(spark.table("t"), Row("1", Map(("a" + " " * 4, "ab" + " " * 8))))
checkColType(spark.table("t").schema(1), MapType(CharType(5), CharType(10)))
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkAnswer(spark.table("t"), Row("1", null))
}
}
test("char type values should be padded: nested in struct of array") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c STRUCT<c: ARRAY<CHAR(5)>>) USING $format")
sql("INSERT INTO t VALUES ('1', struct(array('a', 'ab')))")
checkAnswer(spark.table("t"), Row("1", Row(Seq("a" + " " * 4, "ab" + " " * 3))))
checkColType(spark.table("t").schema(1),
new StructType().add("c", ArrayType(CharType(5))))
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkAnswer(spark.table("t"), Row("1", null))
sql("INSERT OVERWRITE t VALUES ('1', struct(null))")
checkAnswer(spark.table("t"), Row("1", Row(null)))
sql("INSERT OVERWRITE t VALUES ('1', struct(array(null)))")
checkAnswer(spark.table("t"), Row("1", Row(Seq(null))))
}
}
test("char type values should be padded: nested in array of struct") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c ARRAY<STRUCT<c: CHAR(5)>>) USING $format")
sql("INSERT INTO t VALUES ('1', array(struct('a'), struct('ab')))")
checkAnswer(spark.table("t"), Row("1", Seq(Row("a" + " " * 4), Row("ab" + " " * 3))))
checkColType(spark.table("t").schema(1),
ArrayType(new StructType().add("c", CharType(5))))
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkAnswer(spark.table("t"), Row("1", null))
sql("INSERT OVERWRITE t VALUES ('1', array(null))")
checkAnswer(spark.table("t"), Row("1", Seq(null)))
sql("INSERT OVERWRITE t VALUES ('1', array(struct(null)))")
checkAnswer(spark.table("t"), Row("1", Seq(Row(null))))
}
}
test("char type values should be padded: nested in array of array") {
withTable("t") {
sql(s"CREATE TABLE t(i STRING, c ARRAY<ARRAY<CHAR(5)>>) USING $format")
sql("INSERT INTO t VALUES ('1', array(array('a', 'ab')))")
checkAnswer(spark.table("t"), Row("1", Seq(Seq("a" + " " * 4, "ab" + " " * 3))))
checkColType(spark.table("t").schema(1), ArrayType(ArrayType(CharType(5))))
sql("INSERT OVERWRITE t VALUES ('1', null)")
checkAnswer(spark.table("t"), Row("1", null))
sql("INSERT OVERWRITE t VALUES ('1', array(null))")
checkAnswer(spark.table("t"), Row("1", Seq(null)))
sql("INSERT OVERWRITE t VALUES ('1', array(array(null)))")
checkAnswer(spark.table("t"), Row("1", Seq(Seq(null))))
}
}
private def testTableWrite(f: String => Unit): Unit = {
withTable("t") { f("char") }
withTable("t") { f("varchar") }
}
test("length check for input string values: top-level columns") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c $typeName(5)) USING $format")
sql("INSERT INTO t VALUES (null)")
checkAnswer(spark.table("t"), Row(null))
val e = intercept[SparkException](sql("INSERT INTO t VALUES ('123456')"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: partitioned columns") {
// DS V2 doesn't support partitioned table.
if (!conf.contains(SQLConf.DEFAULT_CATALOG.key)) {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(i INT, c $typeName(5)) USING $format PARTITIONED BY (c)")
sql("INSERT INTO t VALUES (1, null)")
checkAnswer(spark.table("t"), Row(1, null))
val e = intercept[SparkException](sql("INSERT INTO t VALUES (1, '123456')"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
}
test("length check for input string values: nested in struct") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c STRUCT<c: $typeName(5)>) USING $format")
sql("INSERT INTO t SELECT struct(null)")
checkAnswer(spark.table("t"), Row(Row(null)))
val e = intercept[SparkException](sql("INSERT INTO t SELECT struct('123456')"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: nested in array") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c ARRAY<$typeName(5)>) USING $format")
sql("INSERT INTO t VALUES (array(null))")
checkAnswer(spark.table("t"), Row(Seq(null)))
val e = intercept[SparkException](sql("INSERT INTO t VALUES (array('a', '123456'))"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: nested in map key") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c MAP<$typeName(5), STRING>) USING $format")
val e = intercept[SparkException](sql("INSERT INTO t VALUES (map('123456', 'a'))"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: nested in map value") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c MAP<STRING, $typeName(5)>) USING $format")
sql("INSERT INTO t VALUES (map('a', null))")
checkAnswer(spark.table("t"), Row(Map("a" -> null)))
val e = intercept[SparkException](sql("INSERT INTO t VALUES (map('a', '123456'))"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: nested in both map key and value") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c MAP<$typeName(5), $typeName(5)>) USING $format")
val e1 = intercept[SparkException](sql("INSERT INTO t VALUES (map('123456', 'a'))"))
assert(e1.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
val e2 = intercept[SparkException](sql("INSERT INTO t VALUES (map('a', '123456'))"))
assert(e2.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: nested in struct of array") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c STRUCT<c: ARRAY<$typeName(5)>>) USING $format")
sql("INSERT INTO t SELECT struct(array(null))")
checkAnswer(spark.table("t"), Row(Row(Seq(null))))
val e = intercept[SparkException](sql("INSERT INTO t SELECT struct(array('123456'))"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: nested in array of struct") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c ARRAY<STRUCT<c: $typeName(5)>>) USING $format")
sql("INSERT INTO t VALUES (array(struct(null)))")
checkAnswer(spark.table("t"), Row(Seq(Row(null))))
val e = intercept[SparkException](sql("INSERT INTO t VALUES (array(struct('123456')))"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: nested in array of array") {
testTableWrite { typeName =>
sql(s"CREATE TABLE t(c ARRAY<ARRAY<$typeName(5)>>) USING $format")
sql("INSERT INTO t VALUES (array(array(null)))")
checkAnswer(spark.table("t"), Row(Seq(Seq(null))))
val e = intercept[SparkException](sql("INSERT INTO t VALUES (array(array('123456')))"))
assert(e.getCause.getMessage.contains(s"Exceeds char/varchar type length limitation: 5"))
}
}
test("length check for input string values: with trailing spaces") {
withTable("t") {
sql(s"CREATE TABLE t(c1 CHAR(5), c2 VARCHAR(5)) USING $format")
sql("INSERT INTO t VALUES ('12 ', '12 ')")
sql("INSERT INTO t VALUES ('1234 ', '1234 ')")
checkAnswer(spark.table("t"), Seq(
Row("12" + " " * 3, "12 "),
Row("1234 ", "1234 ")))
}
}
test("length check for input string values: with implicit cast") {
withTable("t") {
sql(s"CREATE TABLE t(c1 CHAR(5), c2 VARCHAR(5)) USING $format")
sql("INSERT INTO t VALUES (1234, 1234)")
checkAnswer(spark.table("t"), Row("1234 ", "1234"))
val e1 = intercept[SparkException](sql("INSERT INTO t VALUES (123456, 1)"))
assert(e1.getCause.getMessage.contains("Exceeds char/varchar type length limitation: 5"))
val e2 = intercept[SparkException](sql("INSERT INTO t VALUES (1, 123456)"))
assert(e2.getCause.getMessage.contains("Exceeds char/varchar type length limitation: 5"))
}
}
private def testConditions(df: DataFrame, conditions: Seq[(String, Boolean)]): Unit = {
checkAnswer(df.selectExpr(conditions.map(_._1): _*), Row.fromSeq(conditions.map(_._2)))
}
test("char type comparison: top-level columns") {
withTable("t") {
sql(s"CREATE TABLE t(c1 CHAR(2), c2 CHAR(5)) USING $format")
sql("INSERT INTO t VALUES ('a', 'a')")
testConditions(spark.table("t"), Seq(
("c1 = 'a'", true),
("'a' = c1", true),
("c1 = 'a '", true),
("c1 > 'a'", false),
("c1 IN ('a', 'b')", true),
("c1 = c2", true),
("c1 < c2", false),
("c1 IN (c2)", true),
("c1 <=> null", false)))
}
}
test("char type comparison: partitioned columns") {
withTable("t") {
sql(s"CREATE TABLE t(i INT, c1 CHAR(2), c2 CHAR(5)) USING $format PARTITIONED BY (c1, c2)")
sql("INSERT INTO t VALUES (1, 'a', 'a')")
testConditions(spark.table("t"), Seq(
("c1 = 'a'", true),
("'a' = c1", true),
("c1 = 'a '", true),
("c1 > 'a'", false),
("c1 IN ('a', 'b')", true),
("c1 = c2", true),
("c1 < c2", false),
("c1 IN (c2)", true),
("c1 <=> null", false)))
}
}
private def testNullConditions(df: DataFrame, conditions: Seq[String]): Unit = {
conditions.foreach { cond =>
checkAnswer(df.selectExpr(cond), Row(null))
}
}
test("SPARK-34233: char type comparison with null values") {
val conditions = Seq("c = null", "c IN ('e', null)", "c IN (null)")
withTable("t") {
sql(s"CREATE TABLE t(c CHAR(2)) USING $format")
sql("INSERT INTO t VALUES ('a')")
testNullConditions(spark.table("t"), conditions)
}
withTable("t") {
sql(s"CREATE TABLE t(i INT, c CHAR(2)) USING $format PARTITIONED BY (c)")
sql("INSERT INTO t VALUES (1, 'a')")
testNullConditions(spark.table("t"), conditions)
}
}
test("char type comparison: partition pruning") {
withTable("t") {
sql(s"CREATE TABLE t(i INT, c1 CHAR(2), c2 VARCHAR(5)) USING $format PARTITIONED BY (c1, c2)")
sql("INSERT INTO t VALUES (1, 'a', 'a')")
Seq(("c1 = 'a'", true),
("'a' = c1", true),
("c1 = 'a '", true),
("c1 > 'a'", false),
("c1 IN ('a', 'b')", true),
("c2 = 'a '", false),
("c2 = 'a'", true),
("c2 IN ('a', 'b')", true)).foreach { case (con, res) =>
val df = spark.table("t")
withClue(con) {
checkAnswer(df.where(con), df.where(res.toString))
}
}
}
}
test("char type comparison: join") {
withTable("t1", "t2") {
sql(s"CREATE TABLE t1(c CHAR(2)) USING $format")
sql(s"CREATE TABLE t2(c CHAR(5)) USING $format")
sql("INSERT INTO t1 VALUES ('a')")
sql("INSERT INTO t2 VALUES ('a')")
checkAnswer(sql("SELECT t1.c FROM t1 JOIN t2 ON t1.c = t2.c"), Row("a "))
}
}
test("char type comparison: nested in struct") {
withTable("t") {
sql(s"CREATE TABLE t(c1 STRUCT<c: CHAR(2)>, c2 STRUCT<c: CHAR(5)>) USING $format")
sql("INSERT INTO t VALUES (struct('a'), struct('a'))")
testConditions(spark.table("t"), Seq(
("c1 = c2", true),
("c1 < c2", false),
("c1 IN (c2)", true)))
}
}
test("char type comparison: nested in array") {
withTable("t") {
sql(s"CREATE TABLE t(c1 ARRAY<CHAR(2)>, c2 ARRAY<CHAR(5)>) USING $format")
sql("INSERT INTO t VALUES (array('a', 'b'), array('a', 'b'))")
testConditions(spark.table("t"), Seq(
("c1 = c2", true),
("c1 < c2", false),
("c1 IN (c2)", true)))
}
}
test("char type comparison: nested in struct of array") {
withTable("t") {
sql("CREATE TABLE t(c1 STRUCT<a: ARRAY<CHAR(2)>>, c2 STRUCT<a: ARRAY<CHAR(5)>>) " +
s"USING $format")
sql("INSERT INTO t VALUES (struct(array('a', 'b')), struct(array('a', 'b')))")
testConditions(spark.table("t"), Seq(
("c1 = c2", true),
("c1 < c2", false),
("c1 IN (c2)", true)))
}
}
test("char type comparison: nested in array of struct") {
withTable("t") {
sql("CREATE TABLE t(c1 ARRAY<STRUCT<c: CHAR(2)>>, c2 ARRAY<STRUCT<c: CHAR(5)>>) " +
s"USING $format")
sql("INSERT INTO t VALUES (array(struct('a')), array(struct('a')))")
testConditions(spark.table("t"), Seq(
("c1 = c2", true),
("c1 < c2", false),
("c1 IN (c2)", true)))
}
}
test("char type comparison: nested in array of array") {
withTable("t") {
sql("CREATE TABLE t(c1 ARRAY<ARRAY<CHAR(2)>>, c2 ARRAY<ARRAY<CHAR(5)>>) " +
s"USING $format")
sql("INSERT INTO t VALUES (array(array('a')), array(array('a')))")
testConditions(spark.table("t"), Seq(
("c1 = c2", true),
("c1 < c2", false),
("c1 IN (c2)", true)))
}
}
test("SPARK-33892: DESCRIBE TABLE w/ char/varchar") {
withTable("t") {
sql(s"CREATE TABLE t(v VARCHAR(3), c CHAR(5)) USING $format")
checkAnswer(sql("desc t").selectExpr("data_type").where("data_type like '%char%'"),
Seq(Row("char(5)"), Row("varchar(3)")))
}
}
test("SPARK-34003: fix char/varchar fails w/ both group by and order by ") {
withTable("t") {
sql(s"CREATE TABLE t(v VARCHAR(3), i INT) USING $format")
sql("INSERT INTO t VALUES ('c', 1)")
checkAnswer(sql("SELECT v, sum(i) FROM t GROUP BY v ORDER BY v"), Row("c", 1))
}
}
test("SPARK-34003: fix char/varchar fails w/ order by functions") {
withTable("t") {
sql(s"CREATE TABLE t(v VARCHAR(3), i INT) USING $format")
sql("INSERT INTO t VALUES ('c', 1)")
checkAnswer(sql("SELECT substr(v, 1, 2), sum(i) FROM t GROUP BY v ORDER BY substr(v, 1, 2)"),
Row("c", 1))
checkAnswer(sql("SELECT sum(i) FROM t GROUP BY v ORDER BY substr(v, 1, 2)"),
Row(1))
}
}
test("SPARK-34114: varchar type will strip tailing spaces to certain length at write time") {
withTable("t") {
sql(s"CREATE TABLE t(v VARCHAR(3)) USING $format")
sql("INSERT INTO t VALUES ('c ')")
checkAnswer(spark.table("t"), Row("c "))
}
}
test("SPARK-34114: varchar type will remain the value length with spaces at read time") {
withTable("t") {
sql(s"CREATE TABLE t(v VARCHAR(3)) USING $format")
sql("INSERT INTO t VALUES ('c ')")
checkAnswer(spark.table("t"), Row("c "))
}
}
test("SPARK-34833: right-padding applied correctly for correlated subqueries - join keys") {
withTable("t1", "t2") {
sql(s"CREATE TABLE t1(v VARCHAR(3), c CHAR(5)) USING $format")
sql(s"CREATE TABLE t2(v VARCHAR(5), c CHAR(8)) USING $format")
sql("INSERT INTO t1 VALUES ('c', 'b')")
sql("INSERT INTO t2 VALUES ('a', 'b')")
Seq("t1.c = t2.c", "t2.c = t1.c",
"t1.c = 'b'", "'b' = t1.c", "t1.c = 'b '", "'b ' = t1.c",
"t1.c = 'b '", "'b ' = t1.c").foreach { predicate =>
checkAnswer(sql(
s"""
|SELECT v FROM t1
|WHERE 'a' IN (SELECT v FROM t2 WHERE $predicate)
""".stripMargin),
Row("c"))
}
}
}
test("SPARK-34833: right-padding applied correctly for correlated subqueries - other preds") {
withTable("t") {
sql(s"CREATE TABLE t(c0 INT, c1 CHAR(5), c2 CHAR(7)) USING $format")
sql("INSERT INTO t VALUES (1, 'abc', 'abc')")
Seq("c1 = 'abc'", "'abc' = c1", "c1 = 'abc '", "'abc ' = c1",
"c1 = 'abc '", "'abc ' = c1", "c1 = c2", "c2 = c1",
"c1 IN ('xxx', 'abc', 'xxxxx')", "c1 IN ('xxx', 'abc ', 'xxxxx')",
"c1 IN ('xxx', 'abc ', 'xxxxx')",
"c1 IN (c2)", "c2 IN (c1)").foreach { predicate =>
checkAnswer(sql(
s"""
|SELECT c0 FROM t t1
|WHERE (
| SELECT count(*) AS c
| FROM t
| WHERE c0 = t1.c0 AND $predicate
|) > 0
""".stripMargin),
Row(1))
}
}
}
}
// Some basic char/varchar tests which doesn't rely on table implementation.
class BasicCharVarcharTestSuite extends QueryTest with SharedSparkSession {
import testImplicits._
test("user-specified schema in cast") {
def assertNoCharType(df: DataFrame): Unit = {
checkAnswer(df, Row("0"))
assert(df.schema.map(_.dataType) == Seq(StringType))
}
val logAppender = new LogAppender("The Spark cast operator does not support char/varchar" +
" type and simply treats them as string type. Please use string type directly to avoid" +
" confusion.")
withLogAppender(logAppender) {
assertNoCharType(spark.range(1).select($"id".cast("char(5)")))
assertNoCharType(spark.range(1).select($"id".cast(CharType(5))))
assertNoCharType(spark.range(1).selectExpr("CAST(id AS CHAR(5))"))
assertNoCharType(sql("SELECT CAST(id AS CHAR(5)) FROM range(1)"))
}
}
def failWithInvalidCharUsage[T](fn: => T): Unit = {
val e = intercept[AnalysisException](fn)
assert(e.getMessage contains "char/varchar type can only be used in the table schema")
}
test("invalidate char/varchar in functions") {
failWithInvalidCharUsage(sql("""SELECT from_json('{"a": "str"}', 'a CHAR(5)')"""))
withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) {
val df = sql("""SELECT from_json('{"a": "str"}', 'a CHAR(5)')""")
checkAnswer(df, Row(Row("str")))
val schema = df.schema.head.dataType.asInstanceOf[StructType]
assert(schema.map(_.dataType) == Seq(StringType))
}
}
test("invalidate char/varchar in SparkSession createDataframe") {
val df = spark.range(10).map(_.toString).toDF()
val schema = new StructType().add("id", CharType(5))
failWithInvalidCharUsage(spark.createDataFrame(df.collectAsList(), schema))
failWithInvalidCharUsage(spark.createDataFrame(df.rdd, schema))
failWithInvalidCharUsage(spark.createDataFrame(df.toJavaRDD, schema))
withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) {
val df1 = spark.createDataFrame(df.collectAsList(), schema)
checkAnswer(df1, df)
assert(df1.schema.head.dataType === StringType)
}
}
test("invalidate char/varchar in spark.read.schema") {
failWithInvalidCharUsage(spark.read.schema(new StructType().add("id", CharType(5))))
failWithInvalidCharUsage(spark.read.schema("id char(5)"))
withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) {
val ds = spark.range(10).map(_.toString)
val df1 = spark.read.schema(new StructType().add("id", CharType(5))).csv(ds)
assert(df1.schema.map(_.dataType) == Seq(StringType))
val df2 = spark.read.schema("id char(5)").csv(ds)
assert(df2.schema.map(_.dataType) == Seq(StringType))
def checkSchema(df: DataFrame): Unit = {
val schemas = df.queryExecution.analyzed.collect {
case l: LogicalRelation => l.relation.schema
case d: DataSourceV2Relation => d.table.schema()
}
assert(schemas.length == 1)
assert(schemas.head.map(_.dataType) == Seq(StringType))
}
// user-specified schema in DataFrameReader: DSV1
checkSchema(spark.read.schema(new StructType().add("id", CharType(5)))
.format(classOf[SimpleInsertSource].getName).load())
checkSchema(spark.read.schema("id char(5)")
.format(classOf[SimpleInsertSource].getName).load())
// user-specified schema in DataFrameReader: DSV2
checkSchema(spark.read.schema(new StructType().add("id", CharType(5)))
.format(classOf[SchemaRequiredDataSource].getName).load())
checkSchema(spark.read.schema("id char(5)")
.format(classOf[SchemaRequiredDataSource].getName).load())
}
}
test("invalidate char/varchar in udf's result type") {
failWithInvalidCharUsage(spark.udf.register("testchar", () => "B", VarcharType(1)))
failWithInvalidCharUsage(spark.udf.register("testchar2", (x: String) => x, VarcharType(1)))
withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) {
spark.udf.register("testchar", () => "B", VarcharType(1))
spark.udf.register("testchar2", (x: String) => x, VarcharType(1))
val df1 = spark.sql("select testchar()")
checkAnswer(df1, Row("B"))
assert(df1.schema.head.dataType === StringType)
val df2 = spark.sql("select testchar2('abc')")
checkAnswer(df2, Row("abc"))
assert(df2.schema.head.dataType === StringType)
}
}
test("invalidate char/varchar in spark.readStream.schema") {
failWithInvalidCharUsage(spark.readStream.schema(new StructType().add("id", CharType(5))))
failWithInvalidCharUsage(spark.readStream.schema("id char(5)"))
withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) {
withTempPath { dir =>
spark.range(2).write.save(dir.toString)
val df1 = spark.readStream.schema(new StructType().add("id", CharType(5)))
.load(dir.toString)
assert(df1.schema.map(_.dataType) == Seq(StringType))
val df2 = spark.readStream.schema("id char(5)").load(dir.toString)
assert(df2.schema.map(_.dataType) == Seq(StringType))
}
}
}
}
class FileSourceCharVarcharTestSuite extends CharVarcharTestSuite with SharedSparkSession {
override def format: String = "parquet"
override protected def sparkConf: SparkConf = {
super.sparkConf.set(SQLConf.USE_V1_SOURCE_LIST, "parquet")
}
test("create table w/ location and fit length values") {
Seq("char", "varchar").foreach { typ =>
withTempPath { dir =>
withTable("t") {
sql("SELECT '12' as col").write.format(format).save(dir.toString)
sql(s"CREATE TABLE t (col $typ(2)) using $format LOCATION '$dir'")
val df = sql("select * from t")
checkAnswer(sql("select * from t"), Row("12"))
}
}
}
}
test("create table w/ location and over length values") {
Seq("char", "varchar").foreach { typ =>
withTempPath { dir =>
withTable("t") {
sql("SELECT '123456' as col").write.format(format).save(dir.toString)
sql(s"CREATE TABLE t (col $typ(2)) using $format LOCATION '$dir'")
checkAnswer(sql("select * from t"), Row("123456"))
}
}
}
}
test("SPARK-35359: create table and insert data over length values") {
Seq("char", "varchar").foreach { typ =>
withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) {
withTable("t") {
sql(s"CREATE TABLE t (col $typ(2)) using $format")
sql("INSERT INTO t SELECT 'aaa'")
checkAnswer(sql("select * from t"), Row("aaa"))
}
}
}
}
test("alter table set location w/ fit length values") {
Seq("char", "varchar").foreach { typ =>
withTempPath { dir =>
withTable("t") {
sql("SELECT '12' as col").write.format(format).save(dir.toString)
sql(s"CREATE TABLE t (col $typ(2)) using $format")
sql(s"ALTER TABLE t SET LOCATION '$dir'")
checkAnswer(spark.table("t"), Row("12"))
}
}
}
}
test("alter table set location w/ over length values") {
Seq("char", "varchar").foreach { typ =>
withTempPath { dir =>
withTable("t") {
sql("SELECT '123456' as col").write.format(format).save(dir.toString)
sql(s"CREATE TABLE t (col $typ(2)) using $format")
sql(s"ALTER TABLE t SET LOCATION '$dir'")
checkAnswer(spark.table("t"), Row("123456"))
}
}
}
}
test("SPARK-34114: should not trim right for read-side length check and char padding") {
Seq("char", "varchar").foreach { typ =>
withTempPath { dir =>
withTable("t") {
sql("SELECT '12 ' as col").write.format(format).save(dir.toString)
sql(s"CREATE TABLE t (col $typ(2)) using $format LOCATION '$dir'")
checkAnswer(spark.table("t"), Row("12 "))
}
}
}
}
}
class DSV2CharVarcharTestSuite extends CharVarcharTestSuite
with SharedSparkSession {
override def format: String = "foo"
protected override def sparkConf = {
super.sparkConf
.set("spark.sql.catalog.testcat", classOf[InMemoryPartitionTableCatalog].getName)
.set(SQLConf.DEFAULT_CATALOG.key, "testcat")
}
}
| holdenk/spark | sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala | Scala | apache-2.0 | 35,623 |
/*
* Copyright 2012 Jahziah Wagner <jahziah[dot]wagner[at]gmail[dot]com>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.openlobby.battle
trait BattleModelObserver {
}
| jahwag/OpenLobby | modules/Battle/src/main/scala/com/openlobby/battle/BattleModelObserver.scala | Scala | apache-2.0 | 700 |
package org.scalawiki.bots.stat
import org.scalawiki.dto.filter.{AllRevisionsFilter, RevisionFilter}
import org.scalawiki.dto.{Page, Revision}
import org.xwiki.blame.AnnotatedElement
class RevisionAnnotation(val page: Page, revFilter: RevisionFilter = AllRevisionsFilter) {
val revisions = revFilter(page.revisions)
val annotation: Option[Annotation] = Annotation.create(page)
def pageAnnotatedElements: Seq[AnnotatedElement[Revision, String]] =
annotation.fold(Seq.empty[AnnotatedElement[Revision, String]])(_.annotatedElements)
val annotatedElements = pageAnnotatedElements
.filter(element => revFilter.predicate(element.getRevision))
val byRevisionContent: Map[Revision, Seq[String]] = annotatedElements.groupBy(_.getRevision).mapValues(_.map(_.getElement)).toMap
val byUserContent: Map[String, Seq[String]] = annotatedElements.groupBy(_.getRevision.user.flatMap(_.name) getOrElse "").mapValues(_.map(_.getElement)).toMap
}
| intracer/scalawiki | scalawiki-bots/src/main/scala/org/scalawiki/bots/stat/RevisionAnnotation.scala | Scala | apache-2.0 | 955 |
package edu.depauw.util
class Hello(thing: String) {
def greet = "Hello, " + thing
}
object Main {
def main(args: Array[String]) {
val hw = new Hello("World")
println(hw.greet)
}
}
| bhoward/EscalatorOld | Prototype/src/main/scala/edu/depauw/util/Hello.scala | Scala | apache-2.0 | 197 |
package org.openmole.core.workflow.puzzle
import org.openmole.core.context.Val
import org.openmole.core.workflow.dsl._
import org.openmole.core.workflow.execution.LocalEnvironment
import org.openmole.core.workflow.mole._
import org.openmole.core.workflow.task._
import org.openmole.core.workflow.test.TestHook
import org.openmole.core.workflow.validation.Validation
import org.scalatest._
class PuzzleSpec extends FlatSpec with Matchers {
import org.openmole.core.workflow.test.Stubs._
"A single task" should "be a valid mole" in {
val t = EmptyTask()
t.run()
}
"HList containing dsl container" should "be usable like a dsl container" in {
import shapeless._
val task = EmptyTask()
val test = DSLContainer(task, ()) :: 9 :: HNil
(test: DSLContainer[_]).run()
(test: MoleExecution).run()
test.run()
test on LocalEnvironment(1)
}
"Strain" should "pass a val through a single of task" in {
@volatile var lastExecuted = false
val i = Val[Int]
val first = EmptyTask() set (outputs += i, i := 1)
val last = FromContextTask("last") { p ⇒
import p._
context(i) should equal(1)
lastExecuted = true
context
} set (inputs += i)
val mole = first -- Strain(EmptyTask()) -- last
mole run
lastExecuted should equal(true)
}
"Strain" should "pass a val through a sequence of tasks" in {
@volatile var lastExecuted = false
val i = Val[Int]
val first = EmptyTask() set (outputs += i, i := 1)
val last = FromContextTask("last") { p ⇒
import p._
context(i) should equal(1)
lastExecuted = true
context
} set (inputs += i)
val mole = first -- Strain(EmptyTask() -- EmptyTask()) -- last
mole run
lastExecuted should equal(true)
}
"outputs method" should "return the dsl outputs" in {
val i = Val[Int]
val j = Val[String]
val t = EmptyTask() set (outputs += (i, j))
val o = (EmptyTask() -- t).outputs.toSet
o.contains(i) should equal(true)
o.contains(j) should equal(true)
}
"DSL container" should "be hookable" in {
@volatile var hookExecuted = false
val i = Val[Int]
val first = EmptyTask() set (outputs += i, i := 1)
val last = EmptyTask()
val container = DSLContainer(first, (), output = Some(first))
val h = TestHook { context ⇒ hookExecuted = true }
(container hook h) run
hookExecuted should equal(true)
}
"DSL" should "be compatible with script generation" in {
def dsl(i: Int): DSL = EmptyTask()
val wf = EmptyTask() -- (0 until 2).map(dsl)
Validation(wf).isEmpty should be(true)
}
"By" should "be convertible to DSL" in {
val t = EmptyTask()
val m = DSLContainer(t, ())
val e = LocalEnvironment(1)
val dsl1: DSL = (t by 2)
val dsl2: DSL = (t on e by 2)
val dsl3: DSL = (t by 2 on e)
val dsl4: DSL = (t by 2 on e by 2)
val dsl5: DSL = (m by 2)
val dsl6: DSL = (m on e)
val dsl7: DSL = (m on e by 2)
val dsl8: DSL = (m by 2 on e)
val dsl9: DSL = (m by 2 on e by 2)
}
}
| openmole/openmole | openmole/core/org.openmole.core.workflow/src/test/scala/org/openmole/core/workflow/puzzle/PuzzleSpec.scala | Scala | agpl-3.0 | 3,093 |
package thurloe.service
import akka.actor.{Actor, Props}
import com.typesafe.config.{Config, ConfigFactory}
import org.parboiled.common.FileUtils
import spray.http.StatusCodes._
import spray.http._
import spray.routing.Route
import thurloe.dataaccess.HttpSendGridDAO
import thurloe.database.ThurloeDatabaseConnector
import scala.language.postfixOps
object ThurloeServiceActor {
def props(config: Config) = Props(new ThurloeServiceActor(config))
}
class ThurloeServiceActor(config: Config) extends Actor with FireCloudProtectedServices with StatusService {
val authConfig = ConfigFactory.load().getConfig("auth")
override val dataAccess = ThurloeDatabaseConnector
override def actorRefFactory = context
override val sendGridDAO = new HttpSendGridDAO
protected val swaggerUiPath = "META-INF/resources/webjars/swagger-ui/3.25.0"
override def receive = runRoute(
swaggerUiService ~
fireCloudProtectedRoutes ~
statusRoute
)
def withResourceFileContents(path: String)(innerRoute: String => Route): Route =
innerRoute( FileUtils.readAllTextFromResource(path) )
val swaggerUiService = {
path("") {
get {
serveIndex()
}
} ~
path("thurloe.yaml") {
get {
withResourceFileContents("swagger/thurloe.yaml") { apiDocs =>
complete(apiDocs)
}
}
} ~
// We have to be explicit about the paths here since we're matching at the root URL and we don't
// want to catch all paths lest we circumvent Spray's not-found and method-not-allowed error
// messages.
(pathPrefixTest("swagger-ui") | pathPrefixTest("oauth2") | pathSuffixTest("js")
| pathSuffixTest("css") | pathPrefixTest("favicon")) {
get {
getFromResourceDirectory(swaggerUiPath)
}
}
}
private def serveIndex(): Route = {
withResourceFileContents(swaggerUiPath + "/index.html") { indexHtml =>
complete {
val swaggerOptions =
"""
| validatorUrl: null,
| apisSorter: "alpha",
| operationsSorter: "alpha"
""".stripMargin
HttpEntity(ContentType(MediaTypes.`text/html`),
indexHtml
.replace("""url: "https://petstore.swagger.io/v2/swagger.json"""", "url: '/thurloe.yaml'")
.replace("""layout: "StandaloneLayout"""", s"""layout: "StandaloneLayout", $swaggerOptions""")
.replace("window.ui = ui", s"""ui.initOAuth({
| clientId: "${authConfig.getString("googleClientId")}",
| appName: "thurloe",
| scopeSeparator: " ",
| additionalQueryStringParams: {}
| })
| window.ui = ui
| """.stripMargin))
}
}
}
}
| broadinstitute/thurloe | src/main/scala/thurloe/service/ThurloeServiceActor.scala | Scala | bsd-3-clause | 3,035 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import java.util
import java.util.concurrent.{TimeUnit, CountDownLatch}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.{Collections, Properties}
import com.yammer.metrics.core.Gauge
import joptsimple.OptionParser
import kafka.consumer.{Blacklist, ConsumerConfig, ConsumerThreadId, ConsumerTimeoutException, TopicFilter, Whitelist, ZookeeperConsumerConnector}
import kafka.javaapi.consumer.ConsumerRebalanceListener
import kafka.message.MessageAndMetadata
import kafka.metrics.KafkaMetricsGroup
import kafka.serializer.DefaultDecoder
import kafka.utils.{CommandLineUtils, Logging, CoreUtils}
import org.apache.kafka.clients.producer.internals.ErrorLoggingCallback
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata}
import org.apache.kafka.common.utils.Utils
import scala.collection.JavaConversions._
/**
* The mirror maker has the following architecture:
* - There are N mirror maker thread shares one ZookeeperConsumerConnector and each owns a Kafka stream.
* - All the mirror maker threads share one producer.
* - Each mirror maker thread periodically flushes the producer and then commits all offsets.
*
* @note For mirror maker, the following settings are set by default to make sure there is no data loss:
* 1. use new producer with following settings
* acks=all
* retries=max integer
* block.on.buffer.full=true
* max.in.flight.requests.per.connection=1
* 2. Consumer Settings
* auto.commit.enable=false
* 3. Mirror Maker Setting:
* abort.on.send.failure=true
*/
object MirrorMaker extends Logging with KafkaMetricsGroup {
private var connectors: Seq[ZookeeperConsumerConnector] = null
private var producer: MirrorMakerProducer = null
private var mirrorMakerThreads: Seq[MirrorMakerThread] = null
private val isShuttingdown: AtomicBoolean = new AtomicBoolean(false)
// Track the messages not successfully sent by mirror maker.
private var numDroppedMessages: AtomicInteger = new AtomicInteger(0)
private var messageHandler: MirrorMakerMessageHandler = null
private var offsetCommitIntervalMs = 0
private var abortOnSendFailure: Boolean = true
@volatile private var exitingOnSendFailure: Boolean = false
// If a message send failed after retries are exhausted. The offset of the messages will also be removed from
// the unacked offset list to avoid offset commit being stuck on that offset. In this case, the offset of that
// message was not really acked, but was skipped. This metric records the number of skipped offsets.
newGauge("MirrorMaker-numDroppedMessages",
new Gauge[Int] {
def value = numDroppedMessages.get()
})
def main(args: Array[String]) {
info("Starting mirror maker")
val parser = new OptionParser
val consumerConfigOpt = parser.accepts("consumer.config",
"Embedded consumer config for consuming from the source cluster.")
.withRequiredArg()
.describedAs("config file")
.ofType(classOf[String])
val producerConfigOpt = parser.accepts("producer.config",
"Embedded producer config.")
.withRequiredArg()
.describedAs("config file")
.ofType(classOf[String])
val numStreamsOpt = parser.accepts("num.streams",
"Number of consumption streams.")
.withRequiredArg()
.describedAs("Number of threads")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val whitelistOpt = parser.accepts("whitelist",
"Whitelist of topics to mirror.")
.withRequiredArg()
.describedAs("Java regex (String)")
.ofType(classOf[String])
val blacklistOpt = parser.accepts("blacklist",
"Blacklist of topics to mirror.")
.withRequiredArg()
.describedAs("Java regex (String)")
.ofType(classOf[String])
val offsetCommitIntervalMsOpt = parser.accepts("offset.commit.interval.ms",
"Offset commit interval in ms")
.withRequiredArg()
.describedAs("offset commit interval in millisecond")
.ofType(classOf[java.lang.Integer])
.defaultsTo(60000)
val consumerRebalanceListenerOpt = parser.accepts("consumer.rebalance.listener",
"The consumer rebalance listener to use for mirror maker consumer.")
.withRequiredArg()
.describedAs("A custom rebalance listener of type ConsumerRebalanceListener")
.ofType(classOf[String])
val rebalanceListenerArgsOpt = parser.accepts("rebalance.listener.args",
"Arguments used by custom rebalance listener for mirror maker consumer")
.withRequiredArg()
.describedAs("Arguments passed to custom rebalance listener constructor as a string.")
.ofType(classOf[String])
val messageHandlerOpt = parser.accepts("message.handler",
"The consumer rebalance listener to use for mirror maker consumer.")
.withRequiredArg()
.describedAs("A custom rebalance listener of type MirrorMakerMessageHandler")
.ofType(classOf[String])
val messageHandlerArgsOpt = parser.accepts("message.handler.args",
"Arguments used by custom rebalance listener for mirror maker consumer")
.withRequiredArg()
.describedAs("Arguments passed to message handler constructor.")
.ofType(classOf[String])
val abortOnSendFailureOpt = parser.accepts("abort.on.send.failure",
"Configure the mirror maker to exit on a failed send.")
.withRequiredArg()
.describedAs("Stop the entire mirror maker when a send failure occurs")
.ofType(classOf[String])
.defaultsTo("true")
val helpOpt = parser.accepts("help", "Print this message.")
if (args.length == 0)
CommandLineUtils.printUsageAndDie(parser, "Continuously copy data between two Kafka clusters.")
val options = parser.parse(args: _*)
if (options.has(helpOpt)) {
parser.printHelpOn(System.out)
System.exit(0)
}
CommandLineUtils.checkRequiredArgs(parser, options, consumerConfigOpt, producerConfigOpt)
if (List(whitelistOpt, blacklistOpt).count(options.has) != 1) {
println("Exactly one of whitelist or blacklist is required.")
System.exit(1)
}
abortOnSendFailure = options.valueOf(abortOnSendFailureOpt).toBoolean
offsetCommitIntervalMs = options.valueOf(offsetCommitIntervalMsOpt).intValue()
val numStreams = options.valueOf(numStreamsOpt).intValue()
Runtime.getRuntime.addShutdownHook(new Thread("MirrorMakerShutdownHook") {
override def run() {
cleanShutdown()
}
})
// create producer
val producerProps = Utils.loadProps(options.valueOf(producerConfigOpt))
// Defaults to no data loss settings.
maybeSetDefaultProperty(producerProps, ProducerConfig.RETRIES_CONFIG, Int.MaxValue.toString)
maybeSetDefaultProperty(producerProps, ProducerConfig.BLOCK_ON_BUFFER_FULL_CONFIG, "true")
maybeSetDefaultProperty(producerProps, ProducerConfig.ACKS_CONFIG, "all")
maybeSetDefaultProperty(producerProps, ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1")
producer = new MirrorMakerProducer(producerProps)
// Create consumer connector
val consumerConfigProps = Utils.loadProps(options.valueOf(consumerConfigOpt))
// Disable consumer auto offsets commit to prevent data loss.
maybeSetDefaultProperty(consumerConfigProps, "auto.commit.enable", "false")
// Set the consumer timeout so we will not block for low volume pipeline. The timeout is necessary to make sure
// Offsets are still committed for those low volume pipelines.
maybeSetDefaultProperty(consumerConfigProps, "consumer.timeout.ms", "10000")
// The default client id is group id, we manually set client id to groupId-index to avoid metric collision
val groupIdString = consumerConfigProps.getProperty("group.id")
connectors = (0 until numStreams) map { i =>
consumerConfigProps.setProperty("client.id", groupIdString + "-" + i.toString)
val consumerConfig = new ConsumerConfig(consumerConfigProps)
new ZookeeperConsumerConnector(consumerConfig)
}
// Set consumer rebalance listener.
// Custom rebalance listener will be invoked after internal listener finishes its work.
val customRebalanceListenerClass = options.valueOf(consumerRebalanceListenerOpt)
val rebalanceListenerArgs = options.valueOf(rebalanceListenerArgsOpt)
val customRebalanceListener = {
if (customRebalanceListenerClass != null) {
if (rebalanceListenerArgs != null)
Some(CoreUtils.createObject[ConsumerRebalanceListener](customRebalanceListenerClass, rebalanceListenerArgs))
else
Some(CoreUtils.createObject[ConsumerRebalanceListener](customRebalanceListenerClass))
} else {
None
}
}
connectors.foreach {
connector =>
val consumerRebalanceListener = new InternalRebalanceListener(connector, customRebalanceListener)
connector.setConsumerRebalanceListener(consumerRebalanceListener)
}
// create filters
val filterSpec = if (options.has(whitelistOpt))
new Whitelist(options.valueOf(whitelistOpt))
else
new Blacklist(options.valueOf(blacklistOpt))
// Create mirror maker threads
mirrorMakerThreads = (0 until numStreams) map ( i =>
new MirrorMakerThread(connectors(i), filterSpec, i)
)
// Create and initialize message handler
val customMessageHandlerClass = options.valueOf(messageHandlerOpt)
val messageHandlerArgs = options.valueOf(messageHandlerArgsOpt)
messageHandler = {
if (customMessageHandlerClass != null) {
if (messageHandlerArgs != null)
CoreUtils.createObject[MirrorMakerMessageHandler](customMessageHandlerClass, messageHandlerArgs)
else
CoreUtils.createObject[MirrorMakerMessageHandler](customMessageHandlerClass)
} else {
defaultMirrorMakerMessageHandler
}
}
mirrorMakerThreads.foreach(_.start())
mirrorMakerThreads.foreach(_.awaitShutdown())
}
def commitOffsets(connector: ZookeeperConsumerConnector) {
if (!exitingOnSendFailure) {
trace("Committing offsets.")
connector.commitOffsets
} else {
info("Exiting on send failure, skip committing offsets.")
}
}
def cleanShutdown() {
if (isShuttingdown.compareAndSet(false, true)) {
info("Start clean shutdown.")
// Shutdown consumer threads.
info("Shutting down consumer threads.")
if (mirrorMakerThreads != null) {
mirrorMakerThreads.foreach(_.shutdown())
mirrorMakerThreads.foreach(_.awaitShutdown())
}
info("Closing producer.")
producer.close()
info("Kafka mirror maker shutdown successfully")
}
}
private def maybeSetDefaultProperty(properties: Properties, propertyName: String, defaultValue: String) {
val propertyValue = properties.getProperty(propertyName)
properties.setProperty(propertyName, Option(propertyValue).getOrElse(defaultValue))
if (properties.getProperty(propertyName) != defaultValue)
info("Property %s is overridden to %s - data loss or message reordering is possible.".format(propertyName, propertyValue))
}
class MirrorMakerThread(connector: ZookeeperConsumerConnector,
filterSpec: TopicFilter,
val threadId: Int) extends Thread with Logging with KafkaMetricsGroup {
private val threadName = "mirrormaker-thread-" + threadId
private val shutdownLatch: CountDownLatch = new CountDownLatch(1)
private var lastOffsetCommitMs = System.currentTimeMillis()
@volatile private var shuttingDown: Boolean = false
this.logIdent = "[%s] ".format(threadName)
setName(threadName)
override def run() {
info("Starting mirror maker thread " + threadName)
try {
// Creating one stream per each connector instance
val streams = connector.createMessageStreamsByFilter(filterSpec, 1, new DefaultDecoder(), new DefaultDecoder())
require(streams.size == 1)
val stream = streams(0)
val iter = stream.iterator()
while (!exitingOnSendFailure && !shuttingDown) {
try {
while (!exitingOnSendFailure && !shuttingDown && iter.hasNext()) {
val data = iter.next()
trace("Sending message with value size %d".format(data.message().size))
val records = messageHandler.handle(data)
records.foreach(producer.send)
maybeFlushAndCommitOffsets()
}
} catch {
case e: ConsumerTimeoutException =>
trace("Caught ConsumerTimeoutException, continue iteration.")
}
maybeFlushAndCommitOffsets()
}
} catch {
case t: Throwable =>
fatal("Mirror maker thread failure due to ", t)
} finally {
info("Flushing producer.")
producer.flush()
info("Committing consumer offsets.")
commitOffsets(connector)
info("Shutting down consumer connectors.")
connector.shutdown()
shutdownLatch.countDown()
info("Mirror maker thread stopped")
// if it exits accidentally, stop the entire mirror maker
if (!isShuttingdown.get()) {
fatal("Mirror maker thread exited abnormally, stopping the whole mirror maker.")
System.exit(-1)
}
}
}
def maybeFlushAndCommitOffsets() {
if (System.currentTimeMillis() - lastOffsetCommitMs > offsetCommitIntervalMs) {
producer.flush()
commitOffsets(connector)
lastOffsetCommitMs = System.currentTimeMillis()
}
}
def shutdown() {
try {
info(threadName + " shutting down")
shuttingDown = true
}
catch {
case ie: InterruptedException =>
warn("Interrupt during shutdown of the mirror maker thread")
}
}
def awaitShutdown() {
try {
shutdownLatch.await()
info("Mirror maker thread shutdown complete")
} catch {
case ie: InterruptedException =>
warn("Shutdown of the mirror maker thread interrupted")
}
}
}
private class MirrorMakerProducer(val producerProps: Properties) {
val sync = producerProps.getProperty("producer.type", "async").equals("sync")
val producer = new KafkaProducer[Array[Byte], Array[Byte]](producerProps)
def send(record: ProducerRecord[Array[Byte], Array[Byte]]) {
if (sync) {
this.producer.send(record).get()
} else {
this.producer.send(record,
new MirrorMakerProducerCallback(record.topic(), record.key(), record.value()))
}
}
def flush() {
this.producer.flush()
}
def close() {
this.producer.close()
}
def close(timeout: Long) {
this.producer.close(timeout, TimeUnit.MILLISECONDS)
}
}
private class MirrorMakerProducerCallback (topic: String, key: Array[Byte], value: Array[Byte])
extends ErrorLoggingCallback(topic, key, value, false) {
override def onCompletion(metadata: RecordMetadata, exception: Exception) {
if (exception != null) {
// Use default call back to log error. This means the max retries of producer has reached and message
// still could not be sent.
super.onCompletion(metadata, exception)
// If abort.on.send.failure is set, stop the mirror maker. Otherwise log skipped message and move on.
if (abortOnSendFailure) {
info("Closing producer due to send failure.")
exitingOnSendFailure = true
producer.close(0)
}
numDroppedMessages.incrementAndGet()
}
}
}
private class InternalRebalanceListener(connector: ZookeeperConsumerConnector,
customRebalanceListener: Option[ConsumerRebalanceListener])
extends ConsumerRebalanceListener {
override def beforeReleasingPartitions(partitionOwnership: java.util.Map[String, java.util.Set[java.lang.Integer]]) {
producer.flush()
commitOffsets(connector)
// invoke custom consumer rebalance listener
if (customRebalanceListener.isDefined)
customRebalanceListener.get.beforeReleasingPartitions(partitionOwnership)
}
override def beforeStartingFetchers(consumerId: String,
partitionAssignment: java.util.Map[String, java.util.Map[java.lang.Integer, ConsumerThreadId]]) {
if (customRebalanceListener.isDefined)
customRebalanceListener.get.beforeStartingFetchers(consumerId, partitionAssignment)
}
}
/**
* If message.handler.args is specified. A constructor that takes in a String as argument must exist.
*/
trait MirrorMakerMessageHandler {
def handle(record: MessageAndMetadata[Array[Byte], Array[Byte]]): util.List[ProducerRecord[Array[Byte], Array[Byte]]]
}
private object defaultMirrorMakerMessageHandler extends MirrorMakerMessageHandler {
override def handle(record: MessageAndMetadata[Array[Byte], Array[Byte]]): util.List[ProducerRecord[Array[Byte], Array[Byte]]] = {
Collections.singletonList(new ProducerRecord[Array[Byte], Array[Byte]](record.topic, record.key(), record.message()))
}
}
}
| kss160992/kafka-latest | core/src/main/scala/kafka/tools/MirrorMaker.scala | Scala | apache-2.0 | 18,123 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.tstreams.testutils
import java.util.concurrent.CountDownLatch
import com.bwsw.tstreamstransactionserver.netty.server.Server
import com.bwsw.tstreamstransactionserver.options.CommonOptions.ZookeeperOptions
import com.bwsw.tstreamstransactionserver.options.ServerBuilder
import com.bwsw.tstreamstransactionserver.options.ServerOptions.{AuthOptions, CommitLogOptions, StorageOptions}
/**
* Created by Ivan Kudryavtsev on 29.01.17.
*/
object TestStorageServer {
private val serverBuilder = new ServerBuilder()
.withZookeeperOptions(new ZookeeperOptions(endpoints = s"127.0.0.1:${TestUtils.ZOOKEEPER_PORT}"))
private var tempDir: String = TestUtils.getTmpDir()
def getNewClean(): Server = {
tempDir = TestUtils.getTmpDir()
get()
}
def get(): Server = {
val transactionServer = serverBuilder
.withAuthOptions(new AuthOptions(key = TestUtils.AUTH_KEY))
.withServerStorageOptions(new StorageOptions(path = tempDir))
.withCommitLogOptions(new CommitLogOptions(commitLogCloseDelayMs = 100))
.build()
val l = new CountDownLatch(1)
new Thread(() => transactionServer.start(l.countDown())).start()
l.await()
transactionServer
}
def dispose(transactionServer: Server) = {
transactionServer.shutdown()
}
}
| bwsw/t-streams | src/test/scala/com/bwsw/tstreams/testutils/TestStorageServer.scala | Scala | apache-2.0 | 2,108 |
/*
* Copyright 2015-2020 Noel Welsh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package doodle
package examples
import cats.instances.list._
import doodle.algebra.Picture
import doodle.core._
import doodle.language.Basic
import doodle.syntax._
object Polygons {
def picture[F[_]]: Picture[Basic, F, Unit] = {
def polygon(sides: Int, radius: Double): Picture[Basic, F, Unit] = {
val centerAngle = 360.degrees / sides.toDouble
val shape = (0 until sides).foldLeft(ClosedPath.empty) {
(path, index) =>
val point = Point.polar(radius, centerAngle * index.toDouble)
if (index == 0) path.moveTo(point) else path.lineTo(point)
}
shape
.path[Basic, F]
.strokeWidth(3)
.strokeColor(Color.hsl(centerAngle, 1, .5))
}
((3 to 10) map (polygon(_, 200))).toList.allOn
}
}
| underscoreio/doodle | core/shared/src/main/scala/doodle/examples/Polygons.scala | Scala | apache-2.0 | 1,370 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.provider.form
import javax.inject.Inject
import silhouette.LoginInfo
import silhouette.provider.Provider
import silhouette.provider.form.EmailLoginProvider._
import scala.concurrent.Future
/**
* A provider for authenticating with an email address.
*/
class EmailLoginProvider @Inject() () extends Provider {
/**
* Gets the provider ID.
*
* @return The provider ID.
*/
override def id: String = ID
/**
* Authenticates a user with an email address.
*
* @param email The email to authenticate with.
* @return The login info if the authentication was successful, otherwise a failure.
*/
def authenticate(email: String): Future[LoginInfo] = ???
}
/**
* The companion object.
*/
object EmailLoginProvider {
/**
* The provider ID.
*/
val ID = "email-login"
}
| mohiva/silhouette | modules/provider-form/src/main/scala/silhouette/provider/form/EmailLoginProvider.scala | Scala | apache-2.0 | 1,592 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.indexer
import java.util.concurrent.Semaphore
import scala.concurrent._
import scala.concurrent.duration._
import scala.util.{ Failure, Properties, Success }
import akka.actor._
import akka.event.slf4j.SLF4JLogging
import org.apache.commons.vfs2._
import org.ensime.api._
import org.ensime.indexer.database._
import org.ensime.indexer.database.DatabaseService._
import org.ensime.util.file._
import org.ensime.vfs._
/**
* Provides methods to perform ENSIME-specific indexing tasks,
* receives events that require an index update, and provides
* searches against the index.
*
* We have an H2 database for storing relational information
* and Lucene for advanced indexing.
*/
class SearchService(
config: EnsimeConfig,
resolver: SourceResolver
)(
implicit
actorSystem: ActorSystem,
vfs: EnsimeVFS
) extends ClassfileIndexer
with FileChangeListener
with SLF4JLogging {
private[indexer] def isUserFile(file: FileName): Boolean = {
(config.allTargets map (vfs.vfile)) exists (file isAncestor _.getName)
}
private val QUERY_TIMEOUT = 30 seconds
/**
* Changelog:
*
* 1.4 - remove redundant descriptors, doh!
*
* 1.3 - methods include descriptors in (now unique) FQNs
*
* 1.2 - added foreign key to FqnSymbols.file with cascade delete
*
* 1.0 - reverted index due to negative impact to startup time. The
* workaround to large scale deletions is to just nuke the
* .ensime_cache.
*
* 1.1 - added index to FileCheck.file to speed up delete.
*
* 1.0 - initial schema
*/
private val version = "1.4"
private val index = new IndexService(config.cacheDir / ("index-" + version))
private val db = new DatabaseService(config.cacheDir / ("sql-" + version))
import ExecutionContext.Implicits.global
// each jar / directory must acquire a permit, released when the
// data is persisted. This is to keep the heap usage down and is a
// poor man's backpressure.
val semaphore = new Semaphore(Properties.propOrElse("ensime.index.parallel", "10").toInt, true)
private def scan(f: FileObject) = f.findFiles(ClassfileSelector) match {
case null => Nil
case res => res.toList
}
/**
* Indexes everything, making best endeavours to avoid scanning what
* is unnecessary (e.g. we already know that a jar or classfile has
* been indexed).
*
* @return the number of rows (removed, indexed) from the database.
*/
def refresh(): Future[(Int, Int)] = {
// it is much faster during startup to obtain the full list of
// known files from the DB then and check against the disk, than
// check each file against DatabaseService.outOfDate
def findStaleFileChecks(checks: Seq[FileCheck]): List[FileCheck] = {
log.debug("findStaleFileChecks")
for {
check <- checks
name = check.file.getName.getURI
if !check.file.exists || check.changed
} yield check
}.toList
// delete the stale data before adding anything new
// returns number of rows deleted
def deleteReferences(checks: List[FileCheck]): Future[Int] = {
log.debug(s"removing ${checks.size} stale files from the index")
deleteInBatches(checks.map(_.file))
}
// a snapshot of everything that we want to index
def findBases(): Set[FileObject] = {
config.modules.flatMap {
case (name, m) =>
m.targets.flatMap {
case d if !d.exists() => Nil
case d if d.isJar => List(vfs.vfile(d))
case d => scan(vfs.vfile(d))
} ::: m.testTargets.flatMap {
case d if !d.exists() => Nil
case d if d.isJar => List(vfs.vfile(d))
case d => scan(vfs.vfile(d))
} :::
m.compileJars.map(vfs.vfile) ::: m.testJars.map(vfs.vfile)
}
}.toSet ++ config.javaLibs.map(vfs.vfile)
def indexBase(base: FileObject, fileCheck: Option[FileCheck]): Future[Int] = {
val outOfDate = fileCheck.map(_.changed).getOrElse(true)
if (!outOfDate) Future.successful(0)
else {
val boost = isUserFile(base.getName())
val check = FileCheck(base)
val indexed = extractSymbolsFromClassOrJar(base).flatMap(persist(check, _, commitIndex = false, boost = boost))
indexed.onComplete { _ => semaphore.release() }
indexed
}
}
// index all the given bases and return number of rows written
def indexBases(bases: Set[FileObject], checks: Seq[FileCheck]): Future[Int] = {
log.debug("Indexing bases...")
val checksLookup: Map[String, FileCheck] = checks.map(check => (check.filename -> check)).toMap
val basesWithChecks: Set[(FileObject, Option[FileCheck])] = bases.map { base =>
(base, checksLookup.get(base.getName().getURI()))
}
Future.sequence(basesWithChecks.map { case (file, check) => indexBase(file, check) }).map(_.sum)
}
def commitIndex(): Future[Unit] = {
log.debug("committing index to disk...")
val i = Future { blocking { index.commit() } }
val g = db.commit()
for {
_ <- i
_ <- g
} yield {
log.debug("...done committing index")
}
}
// chain together all the future tasks
for {
checks <- db.knownFiles()
stale = findStaleFileChecks(checks)
deletes <- deleteReferences(stale)
bases = findBases()
added <- indexBases(bases, checks)
_ <- commitIndex()
} yield (deletes, added)
}
def refreshResolver(): Unit = resolver.update()
def persist(check: FileCheck, symbols: List[FqnSymbol], commitIndex: Boolean, boost: Boolean): Future[Int] = {
val iwork = Future { blocking { index.persist(check, symbols, commitIndex, boost) } }
val dwork = db.persist(check, symbols)
iwork.flatMap { _ => dwork }
}
// this method leak semaphore on every call, which must be released
// when the List[FqnSymbol] has been processed (even if it is empty)
def extractSymbolsFromClassOrJar(file: FileObject): Future[List[FqnSymbol]] = {
def global: ExecutionContext = null // detach the global implicit
val ec = actorSystem.dispatchers.lookup("akka.search-service-dispatcher")
Future {
blocking {
semaphore.acquire()
file match {
case classfile if classfile.getName.getExtension == "class" =>
// too noisy to log
val check = FileCheck(classfile)
try extractSymbols(classfile, classfile)
finally classfile.close()
case jar =>
log.debug(s"indexing $jar")
val check = FileCheck(jar)
val vJar = vfs.vjar(jar)
try scan(vJar) flatMap (extractSymbols(jar, _))
finally vfs.nuke(vJar)
}
}
}(ec)
}
private val blacklist = Set("sun/", "sunw/", "com/sun/")
private val ignore = Set("$$", "$worker$")
import org.ensime.util.RichFileObject._
private def extractSymbols(container: FileObject, f: FileObject): List[FqnSymbol] = {
f.pathWithinArchive match {
case Some(relative) if blacklist.exists(relative.startsWith) => Nil
case _ =>
val name = container.getName.getURI
val path = f.getName.getURI
val (clazz, refs) = indexClassfile(f)
val depickler = new ClassfileDepickler(f)
val source = resolver.resolve(clazz.name.pack, clazz.source)
val sourceUri = source.map(_.getName.getURI)
if (clazz.access != Public) Nil
else {
FqnSymbol(None, name, path, clazz.name.fqnString, None, sourceUri, clazz.source.line) ::
clazz.methods.toList.filter(_.access == Public).map { method =>
FqnSymbol(None, name, path, method.name.fqnString, None, sourceUri, method.line)
} ::: clazz.fields.toList.filter(_.access == Public).map { field =>
val internal = field.clazz.internalString
FqnSymbol(None, name, path, field.name.fqnString, Some(internal), sourceUri, clazz.source.line)
} ::: depickler.getTypeAliases.toList.filter(_.access == Public).filterNot(_.fqn.contains("<refinement>")).map { rawType =>
// this is a hack, we shouldn't be storing Scala names in the JVM name space
// in particular, it creates fqn names that clash with the above ones
FqnSymbol(None, name, path, rawType.fqn, None, sourceUri, None)
}
}
}
}.filterNot(sym => ignore.exists(sym.fqn.contains))
/** free-form search for classes */
def searchClasses(query: String, max: Int): List[FqnSymbol] = {
val fqns = index.searchClasses(query, max)
Await.result(db.find(fqns), QUERY_TIMEOUT) take max
}
/** free-form search for classes and methods */
def searchClassesMethods(terms: List[String], max: Int): List[FqnSymbol] = {
val fqns = index.searchClassesMethods(terms, max)
Await.result(db.find(fqns), QUERY_TIMEOUT) take max
}
/** only for exact fqns */
def findUnique(fqn: String): Option[FqnSymbol] = Await.result(db.find(fqn), QUERY_TIMEOUT)
/* DELETE then INSERT in H2 is ridiculously slow, so we put all modifications
* into a blocking queue and dedicate a thread to block on draining the queue.
* This has the effect that we always react to a single change on disc but we
* will work through backlogs in bulk.
*
* We always do a DELETE, even if the entries are new, but only INSERT if
* the list of symbols is non-empty.
*/
val backlogActor = actorSystem.actorOf(Props(new IndexingQueueActor(this)), "ClassfileIndexer")
// deletion in both Lucene and H2 is really slow, batching helps
def deleteInBatches(
files: List[FileObject],
batchSize: Int = 1000
): Future[Int] = {
val removing = files.grouped(batchSize).map(delete)
Future.sequence(removing).map(_.sum)
}
// returns number of rows removed
def delete(files: List[FileObject]): Future[Int] = {
// this doesn't speed up Lucene deletes, but it means that we
// don't wait for Lucene before starting the H2 deletions.
val iwork = Future { blocking { index.remove(files) } }
val dwork = db.removeFiles(files)
iwork.flatMap(_ => dwork)
}
def fileChanged(f: FileObject): Unit = backlogActor ! IndexFile(f)
def fileRemoved(f: FileObject): Unit = fileChanged(f)
def fileAdded(f: FileObject): Unit = fileChanged(f)
def shutdown(): Future[Unit] = {
db.shutdown()
}
}
final case class IndexFile(f: FileObject)
class IndexingQueueActor(searchService: SearchService) extends Actor with ActorLogging {
import context.system
import scala.concurrent.duration._
case object Process
// De-dupes files that have been updated since we were last told to
// index them. No need to aggregate values: the latest wins. Key is
// the URI because FileObject doesn't implement equals
var todo = Map.empty[String, FileObject]
// debounce and give us a chance to batch (which is *much* faster)
var worker: Cancellable = _
private val advice = "If the problem persists, you may need to restart ensime."
private def debounce(): Unit = {
Option(worker).foreach(_.cancel())
import context.dispatcher
worker = system.scheduler.scheduleOnce(5 seconds, self, Process)
}
override def receive: Receive = {
case IndexFile(f) =>
todo += f.getName.getURI -> f
debounce()
case Process if todo.isEmpty => // nothing to do
case Process =>
val (batch, remaining) = todo.splitAt(500)
todo = remaining
if (remaining.nonEmpty)
debounce()
import ExecutionContext.Implicits.global
log.debug(s"Indexing ${batch.size} files")
def retry(): Unit = {
batch.foreach(self !)
}
Future.sequence(batch.map {
case (url, f) =>
val filename = f.getName.getPath
// I don't trust VFS's f.exists()
if (!File(filename).exists()) {
Future {
searchService.semaphore.acquire() // nasty, but otherwise we leak
f -> Nil
}
} else searchService.extractSymbolsFromClassOrJar(f).map(f -> )
}).onComplete {
case Failure(t) =>
searchService.semaphore.release()
log.error(t, s"failed to index batch of ${batch.size} files. $advice")
retry()
case Success(indexed) =>
searchService.delete(indexed.map(_._1)(collection.breakOut)).onComplete {
case Failure(t) =>
searchService.semaphore.release()
log.error(t, s"failed to remove stale entries in ${batch.size} files. $advice")
retry()
case Success(_) => indexed.foreach {
case (file, syms) =>
val boost = searchService.isUserFile(file.getName)
val persisting = searchService.persist(FileCheck(file), syms, commitIndex = true, boost = boost)
persisting.onComplete {
case _ => searchService.semaphore.release()
}
persisting.onComplete {
case Failure(t) =>
log.error(t, s"failed to persist entries in $file. $advice")
retry()
case Success(_) =>
}
}
}
}
}
}
| mwielocha/ensime-server | core/src/main/scala/org/ensime/indexer/SearchService.scala | Scala | gpl-3.0 | 13,436 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalautils
/**
* Trait containing an implicit conversion that adds an <code>asAny</code> method to
* anything, which returns the same object as type <code>Any</code>.
*
* <p>
* The purpose of this method is to appease the type checker when necessary. For example,
* in ScalaTest's matchers DSL the type passed to <code>contain</code> must be consistent
* with the element type of the collection on which <code>should</code> is invoked. So
* this type checks:
* </p>
*
* <pre>
* Set(1, 2) should contain (2)
* </pre>
*
* <p>
* But this does not type check:
* </p>
*
* <pre>
* Set(1, 2) should contain ("2")
* </pre>
*
* <p>
* That is all well and good, but it turns out that this does also not type check, because the element type of
* the collection (<code>Any</code>) is a supertype of the type passed to contain (<code>String</code>):
* </p>
*
* <pre>
* Set(1, "2") should contain ("2") // Does not compile
* </pre>
*
* <p>
* You can appease the type checker by casting the type of <code>"2"</code> to <code>Any</code>, a cast that
* will always succeed. Using <code>asAny</code> makes this prettier:
* </p>
*
* <pre>
* Set(1, "2") should contain ("2".asAny)
* </pre>
*
*/
trait AsAny {
/**
* Wrapper class with an <code>asAny</code> method that returns the passed object
* as type <code>Any</code>.
*
* @param o the object to return from <code>asAny</code>
*
* @author Bill Venners
*/
class AsAnyWrapper(o: Any) {
/**
* Returns the object, <code>o</code>, passed to the constructor.
*
* @return the object passed to the constructor
*/
def asAny: Any = o
}
/**
* Implicit conversion that adds an <code>asAny</code> method to an object, which returns
* the exact same object but as type <code>Any</code>.
*/
implicit def convertToAsAnyWrapper(o: Any): AsAnyWrapper = new AsAnyWrapper(o)
}
/**
* Companion object to trait <code>AsAny</code> that facilitates the importing of <code>AsAny</code> members as
* an alternative to mixing it in. One use case is to import <code>AsAny</code> members so you can use
* them in the Scala interpreter:
*
* <pre class="stREPL">
* $ scala -classpath scalatest.jar
* Welcome to Scala version 2.10.0
* Type in expressions to have them evaluated.
* Type :help for more information.
*
* scala> import org.scalatest._
* import org.scalatest._
*
* scala> import Matchers._
* import Matchers._
*
* scala> Set(1, "2") should contain (1)
* <console>:14: error: overloaded method value should with alternatives:
* [R](inv: org.scalautils.TripleEqualsInvocation[R])(implicit constraint: org.scalautils.EqualityConstraint[scala.collection.immutable.Set[Any],R])Unit <and>
* (notWord: org.scalatest.Matchers.NotWord)org.scalatest.Matchers.ResultOfNotWordForTraversable[Any,scala.collection.immutable.Set] <and>
* (beWord: org.scalatest.Matchers.BeWord)org.scalatest.Matchers.ResultOfBeWordForAnyRef[scala.collection.GenTraversable[Any]] <and>
* (containMatcher: org.scalatest.ContainMatcher[Any])Unit <and>
* (containWord: org.scalatest.Matchers.ContainWord)org.scalatest.Matchers.ResultOfContainWordForTraversable[Any] <and>
* (haveWord: org.scalatest.Matchers.HaveWord)org.scalatest.Matchers.ResultOfHaveWordForTraversable[Any] <and>
* (rightMatcherGen1: org.scalatest.Matchers.MatcherGen1[scala.collection.immutable.Set[Any],org.scalautils.Equality])(implicit equality: org.scalautils.Equality[scala.collection.immutable.Set[Any]])Unit <and>
* (rightMatcherX6: org.scalatest.matchers.Matcher[scala.collection.GenTraversable[Any]])Unit
*cannot be applied to (org.scalatest.matchers.Matcher[scala.collection.GenTraversable[Int]])
* Set(1, "2") should contain (1)
* ^
*
* scala> Set(1, "2") should contain (1.asAny)
*
* scala>
* </pre>
*/
object AsAny extends AsAny
| svn2github/scalatest | src/main/scala/org/scalautils/AsAny.scala | Scala | apache-2.0 | 4,511 |
/*
* DragAndDrop.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.mellite
import java.awt.datatransfer.{DataFlavor, Transferable, UnsupportedFlavorException}
import de.sciss.equal
import de.sciss.file.File
import scala.collection.JavaConverters._
object DragAndDrop {
sealed trait Flavor[A] extends DataFlavor
def internalFlavor[A](implicit ct: reflect.ClassTag[A]): Flavor[A] =
new DataFlavor(s"""${DataFlavor.javaJVMLocalObjectMimeType};class="${ct.runtimeClass.getName}"""") with Flavor[A]
def getTransferData[A](t: Transferable, f: Flavor[A]): A =
t.getTransferData(f).asInstanceOf[A]
object Transferable {
/** Creates a transferable for one particular flavor. */
def apply[A](flavor: Flavor[A])(data: A): Transferable = new Transferable {
override def toString = s"Transferable($data)"
// private val flavor = internalFlavor[A]
// println(s"My flavor is $flavor")
def getTransferDataFlavors: Array[DataFlavor] = Array(flavor) // flavors.toArray
def isDataFlavorSupported(_flavor: DataFlavor): Boolean = {
import equal.Implicits._
_flavor === flavor
}
def getTransferData(_flavor: DataFlavor): AnyRef = {
if (!isDataFlavorSupported(_flavor)) throw new UnsupportedFlavorException(_flavor)
data /* .getOrElse(throw new IOException()) */ .asInstanceOf[AnyRef]
}
}
def files(f: File*): Transferable = new Transferable {
private[this] val data: java.util.List[File] = f.asJava
def getTransferDataFlavors: Array[DataFlavor] = Array(DataFlavor.javaFileListFlavor)
def isDataFlavorSupported (flavor: DataFlavor): Boolean = flavor == DataFlavor.javaFileListFlavor
def getTransferData (flavor: DataFlavor): AnyRef = data
}
/** Creates a transferable by wrapping a sequence of existing transferables. */
def seq(xs: Transferable*): Transferable = new Transferable {
def getTransferDataFlavors: Array[DataFlavor] = xs.iterator.flatMap(_.getTransferDataFlavors).toArray
def isDataFlavorSupported(_flavor: DataFlavor): Boolean = xs.exists(_.isDataFlavorSupported(_flavor))
def getTransferData(_flavor: DataFlavor): AnyRef = {
val peer = xs.find(_.isDataFlavorSupported(_flavor)).getOrElse(throw new UnsupportedFlavorException(_flavor))
peer.getTransferData(_flavor)
}
}
}
} | Sciss/Mellite | core/src/main/scala/de/sciss/mellite/DragAndDrop.scala | Scala | agpl-3.0 | 2,620 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import kafka.consumer.BaseConsumerRecord
import org.apache.kafka.common.record.{RecordBatch, TimestampType}
import scala.jdk.CollectionConverters._
import org.junit.Assert._
import org.junit.Test
import scala.annotation.nowarn
@nowarn("cat=deprecation")
class MirrorMakerTest {
@Test
def testDefaultMirrorMakerMessageHandler(): Unit = {
val now = 12345L
val consumerRecord = BaseConsumerRecord("topic", 0, 1L, now, TimestampType.CREATE_TIME, "key".getBytes, "value".getBytes)
val result = MirrorMaker.defaultMirrorMakerMessageHandler.handle(consumerRecord)
assertEquals(1, result.size)
val producerRecord = result.get(0)
assertEquals(now, producerRecord.timestamp)
assertEquals("topic", producerRecord.topic)
assertNull(producerRecord.partition)
assertEquals("key", new String(producerRecord.key))
assertEquals("value", new String(producerRecord.value))
}
@Test
def testDefaultMirrorMakerMessageHandlerWithNoTimestampInSourceMessage(): Unit = {
val consumerRecord = BaseConsumerRecord("topic", 0, 1L, RecordBatch.NO_TIMESTAMP, TimestampType.CREATE_TIME,
"key".getBytes, "value".getBytes)
val result = MirrorMaker.defaultMirrorMakerMessageHandler.handle(consumerRecord)
assertEquals(1, result.size)
val producerRecord = result.get(0)
assertNull(producerRecord.timestamp)
assertEquals("topic", producerRecord.topic)
assertNull(producerRecord.partition)
assertEquals("key", new String(producerRecord.key))
assertEquals("value", new String(producerRecord.value))
}
@Test
def testDefaultMirrorMakerMessageHandlerWithHeaders(): Unit = {
val now = 12345L
val consumerRecord = BaseConsumerRecord("topic", 0, 1L, now, TimestampType.CREATE_TIME, "key".getBytes,
"value".getBytes)
consumerRecord.headers.add("headerKey", "headerValue".getBytes)
val result = MirrorMaker.defaultMirrorMakerMessageHandler.handle(consumerRecord)
assertEquals(1, result.size)
val producerRecord = result.get(0)
assertEquals(now, producerRecord.timestamp)
assertEquals("topic", producerRecord.topic)
assertNull(producerRecord.partition)
assertEquals("key", new String(producerRecord.key))
assertEquals("value", new String(producerRecord.value))
assertEquals("headerValue", new String(producerRecord.headers.lastHeader("headerKey").value))
assertEquals(1, producerRecord.headers.asScala.size)
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/tools/MirrorMakerTest.scala | Scala | apache-2.0 | 3,257 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer
import java.io._
import java.nio.ByteBuffer
import scala.reflect.ClassTag
import org.apache.spark.SparkConf
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.util.ByteBufferInputStream
import org.apache.spark.util.Utils
private[spark] class JavaSerializationStream(
out: OutputStream, counterReset: Int, extraDebugInfo: Boolean)
extends SerializationStream {
private val objOut = new ObjectOutputStream(out)
private var counter = 0
/**
* Calling reset to avoid memory leak:
* http://stackoverflow.com/questions/1281549/memory-leak-traps-in-the-java-standard-api
* But only call it every 100th time to avoid bloated serialization streams (when
* the stream 'resets' object class descriptions have to be re-written)
*/
def writeObject[T: ClassTag](t: T): SerializationStream = {
try {
objOut.writeObject(t)
} catch {
case e: NotSerializableException if extraDebugInfo =>
throw SerializationDebugger.improveException(t, e)
}
counter += 1
if (counterReset > 0 && counter >= counterReset) {
objOut.reset()
counter = 0
}
this
}
def flush() { objOut.flush() }
def close() { objOut.close() }
}
private[spark] class JavaDeserializationStream(in: InputStream, loader: ClassLoader)
extends DeserializationStream {
private val objIn = new ObjectInputStream(in) {
override def resolveClass(desc: ObjectStreamClass): Class[_] = {
// scalastyle:off classforname
Class.forName(desc.getName, false, loader)
// scalastyle:on classforname
}
}
def readObject[T: ClassTag](): T = objIn.readObject().asInstanceOf[T]
def close() { objIn.close() }
}
private[spark] class JavaSerializerInstance(
counterReset: Int, extraDebugInfo: Boolean, defaultClassLoader: ClassLoader)
extends SerializerInstance {
override def serialize[T: ClassTag](t: T): ByteBuffer = {
val bos = new ByteArrayOutputStream()
val out = serializeStream(bos)
out.writeObject(t)
out.close()
ByteBuffer.wrap(bos.toByteArray)
}
override def deserialize[T: ClassTag](bytes: ByteBuffer): T = {
val bis = new ByteBufferInputStream(bytes)
val in = deserializeStream(bis)
in.readObject()
}
override def deserialize[T: ClassTag](bytes: ByteBuffer, loader: ClassLoader): T = {
val bis = new ByteBufferInputStream(bytes)
val in = deserializeStream(bis, loader)
in.readObject()
}
override def serializeStream(s: OutputStream): SerializationStream = {
new JavaSerializationStream(s, counterReset, extraDebugInfo)
}
override def deserializeStream(s: InputStream): DeserializationStream = {
new JavaDeserializationStream(s, defaultClassLoader)
}
def deserializeStream(s: InputStream, loader: ClassLoader): DeserializationStream = {
new JavaDeserializationStream(s, loader)
}
}
/**
* :: DeveloperApi ::
* A Spark serializer that uses Java's built-in serialization.
*一个使用Java内置序列化的Spark序列化程序
*
* Note that this serializer is not guaranteed to be wire-compatible across different versions of
* Spark. It is intended to be used to serialize/de-serialize data within a single
* Spark application.
* 请注意,该序列化不能保证在不同版本的Spark之间进行线路兼容,
* 它旨在用于在单个Spark应用程序中对数据进行序列化/解串行化。
*/
@DeveloperApi
class JavaSerializer(conf: SparkConf) extends Serializer with Externalizable {
private var counterReset = conf.getInt("spark.serializer.objectStreamReset", 100)
private var extraDebugInfo = conf.getBoolean("spark.serializer.extraDebugInfo", true)
protected def this() = this(new SparkConf()) // For deserialization only
override def newInstance(): SerializerInstance = {
//Thread.currentThread().getContextClassLoader,可以获取当前线程的引用,getContextClassLoader用来获取线程的上下文类加载器
val classLoader = defaultClassLoader.getOrElse(Thread.currentThread.getContextClassLoader)
new JavaSerializerInstance(counterReset, extraDebugInfo, classLoader)
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
out.writeInt(counterReset)
out.writeBoolean(extraDebugInfo)
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
counterReset = in.readInt()
extraDebugInfo = in.readBoolean()
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/serializer/JavaSerializer.scala | Scala | apache-2.0 | 5,262 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.eventhubs
import java.io.{BufferedWriter, FileInputStream, OutputStream, OutputStreamWriter}
import java.nio.charset.StandardCharsets.UTF_8
import java.time.Duration
import java.util.concurrent.atomic.AtomicInteger
import org.apache.qpid.proton.amqp.{Binary, Decimal128, Decimal32, Decimal64, DescribedType, Symbol, UnknownDescribedType, UnsignedByte, UnsignedInteger, UnsignedLong, UnsignedShort}
import org.apache.spark.eventhubs.utils.{EventHubsTestUtils, SimpleThrottlingStatusPlugin, SimulatedClient, SimulatedPartitionStatusTracker}
import org.apache.spark.eventhubs.{EventHubsConf, EventPosition, NameAndPartition}
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions.{count, window}
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.sql.streaming.{ProcessingTime, StreamTest}
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.Utils
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.SpanSugar._
abstract class EventHubsSourceTest extends StreamTest with SharedSQLContext {
protected var testUtils: EventHubsTestUtils = _
implicit val formats = Serialization.formats(NoTypeHints)
override def beforeAll: Unit = {
super.beforeAll
testUtils = new EventHubsTestUtils
}
override def afterAll(): Unit = {
if (testUtils != null) {
testUtils.destroyAllEventHubs()
testUtils = null
}
super.afterAll()
}
override val streamingTimeout = 30.seconds
protected def makeSureGetOffsetCalled = AssertOnQuery { q =>
// Because EventHubsSource's initialPartitionOffsets is set lazily, we need to make sure
// its "getOffset" is called before pushing any data. Otherwise, because of the race condition,
// we don't know which data should be fetched when `startingOffsets` is latest.
q.processAllAvailable()
true
}
case class AddEventHubsData(conf: EventHubsConf, data: Int*)(implicit concurrent: Boolean = false,
message: String = "")
extends AddData {
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
if (query.get.isActive) {
query.get.processAllAvailable()
}
val sources = query.get.logicalPlan.collect {
case StreamingExecutionRelation(source, _) if source.isInstanceOf[EventHubsSource] =>
source.asInstanceOf[EventHubsSource]
}
if (sources.isEmpty) {
throw new Exception(
"Could not find EventHubs source in the StreamExecution logical plan to add data to")
} else if (sources.size > 1) {
throw new Exception(
"Could not select the EventHubs source in the StreamExecution logical plan as there" +
"are multiple EventHubs sources:\\n\\t" + sources.mkString("\\n\\t"))
}
val ehSource = sources.head
testUtils.send(conf.name, data = data)
val seqNos = testUtils.getLatestSeqNos(conf)
require(seqNos.size == testUtils.getEventHubs(conf.name).partitionCount)
val offset = EventHubsSourceOffset(seqNos)
logInfo(s"Added data, expected offset $offset")
(ehSource, offset)
}
override def toString: String = {
s"AddEventHubsData(data: $data)"
}
}
}
class EventHubsSourceSuite extends EventHubsSourceTest {
import EventHubsTestUtils._
import testImplicits._
private val eventHubsId = new AtomicInteger(0)
def newEventHubs(): String = {
s"eh-${eventHubsId.getAndIncrement()}"
}
private def getEventHubsConf(ehName: String): EventHubsConf = testUtils.getEventHubsConf(ehName)
case class PartitionsStatusTrackerUpdate(updates: List[(NameAndPartition, Long, Int, Long)]) extends ExternalAction {
override def runAction(): Unit = {
updates.foreach{ u =>
SimulatedPartitionStatusTracker.updatePartitionPerformance(u._1, u._2, u._3, u._4)}
}
}
testWithUninterruptibleThread("deserialization of initial offset with Spark 2.1.0") {
val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount)
testUtils.populateUniformly(eventHub.name, 5000)
withTempDir { metadataPath =>
val parameters =
getEventHubsConf(eventHub.name).toMap
val source = new EventHubsSource(sqlContext, parameters, metadataPath.getAbsolutePath)
source.getOffset.get // Write initial offset
// Make sure Spark 2.1.0 will throw an exception when reading the new log
intercept[java.lang.IllegalArgumentException] {
// Simulate how Spark 2.1.0 reads the log
Utils.tryWithResource(new FileInputStream(metadataPath.getAbsolutePath + "/0")) { in =>
val length = in.read()
val bytes = new Array[Byte](length)
in.read(bytes)
EventHubsSourceOffset(SerializedOffset(new String(bytes, UTF_8)))
}
}
}
}
testWithUninterruptibleThread("deserialization of initial offset written by future version") {
withTempDir { metadataPath =>
val futureMetadataLog =
new HDFSMetadataLog[EventHubsSourceOffset](sqlContext.sparkSession,
metadataPath.getAbsolutePath) {
override def serialize(metadata: EventHubsSourceOffset, out: OutputStream): Unit = {
out.write(0)
val writer = new BufferedWriter(new OutputStreamWriter(out, UTF_8))
writer.write(s"v99999\\n${metadata.json}")
writer.flush()
}
}
val eh = newEventHubs()
testUtils.createEventHubs(eh, DefaultPartitionCount)
testUtils.populateUniformly(eh, 5000)
val parameters = getEventHubsConf(eh).toMap
val offset = EventHubsSourceOffset((eh, 0, 0L), (eh, 1, 0L), (eh, 2, 0L))
futureMetadataLog.add(0, offset)
val source = new EventHubsSource(sqlContext, parameters, metadataPath.getAbsolutePath)
val e = intercept[java.lang.IllegalStateException] {
source.getOffset.get // Read initial offset
}
Seq(
s"maximum supported log version is v${EventHubsSource.VERSION}, but encountered v99999",
"produced by a newer version of Spark and cannot be read by this version"
).foreach { message =>
assert(e.getMessage.contains(message))
}
}
}
test("(de)serialization of initial offsets") {
val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount)
testUtils.populateUniformly(eventHub.name, 5000)
val parameters = getEventHubsConf(eventHub.name).toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
testStream(reader.load())(makeSureGetOffsetCalled, StopStream, StartStream(), StopStream)
}
test("maxSeqNosPerTrigger") {
val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount)
testUtils.populateUniformly(eventHub.name, 5000)
val parameters =
getEventHubsConf(eventHub.name)
.setMaxEventsPerTrigger(4)
.toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
testStream(mapped)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// we'll get one event per partition per trigger
CheckAnswer(0, 0, 0, 0),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// four additional events
CheckAnswer(0, 0, 0, 0, 1, 1, 1, 1),
StopStream,
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// four additional events
CheckAnswer(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// four additional events
CheckAnswer(0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3)
)
}
test("Partitions number increased") {
val name = newEventHubs()
var eventHub = testUtils.createEventHubs(name, DefaultPartitionCount)
testUtils.send(name, partition = Some(0), data = 0 to 9)
testUtils.send(name, partition = Some(1), data = 10 to 19)
testUtils.send(name, partition = Some(2), data = 20 to 29)
testUtils.send(name, partition = Some(3), data = 30 to 39)
val parameters = testUtils
.getEventHubsConfWithoutStartingPositions(eventHub.name)
.setMaxEventsPerTrigger(8)
.setStartingPosition(EventPosition.fromStartOfStream)
.toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
val defaultCheckpointLocation =
Utils.createTempDir(namePrefix = "streaming.metadata").getCanonicalPath
testStream(mapped)(
StartStream(ProcessingTime(100), clock, checkpointLocation = defaultCheckpointLocation),
waitUntilBatchProcessed,
// we'll get one event per partition per trigger
CheckAnswer(0, 1, 10, 11, 20, 21, 30, 31),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// four additional events
CheckAnswer(0, 1, 10, 11, 20, 21, 30, 31, 2, 3, 12, 13, 22, 23, 32, 33),
StopStream
)
// Add partitions to eventhub
eventHub = testUtils.createEventHubs(name, DefaultPartitionCount * 2)
testUtils.send(name, partition = Some(0), data = 0 to 9)
testUtils.send(name, partition = Some(1), data = 10 to 19)
testUtils.send(name, partition = Some(2), data = 20 to 29)
testUtils.send(name, partition = Some(3), data = 30 to 39)
testUtils.send(name, partition = Some(4), data = 40 to 49)
testUtils.send(name, partition = Some(5), data = 50 to 59)
testUtils.send(name, partition = Some(6), data = 60 to 69)
testUtils.send(name, partition = Some(7), data = 70 to 79)
testStream(mapped)(
StartStream(ProcessingTime(100), clock, checkpointLocation = defaultCheckpointLocation),
waitUntilBatchProcessed,
// four additional events
CheckAnswer(4, 14, 24, 34, 40, 50, 60, 70),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// four additional events
CheckAnswer(4, 14, 24, 34, 40, 50, 60, 70, 5, 15, 25, 35, 41, 51, 61, 71)
)
}
test("maxOffsetsPerTrigger with non-uniform partitions") {
val name = newEventHubs()
val eventHub = testUtils.createEventHubs(name, DefaultPartitionCount)
testUtils.send(name, partition = Some(0), data = 100 to 200)
testUtils.send(name, partition = Some(1), data = 10 to 20)
testUtils.send(name, partition = Some(2), data = Seq(1))
// partition 3 of 3 remains empty.
val parameters =
getEventHubsConf(name)
.setMaxEventsPerTrigger(10)
.toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(e => e.toInt)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
testStream(mapped)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// 1 from smallest, 1 from middle, 8 from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107, 11, 108, 109, 110, 111, 112, 113,
114, 115, 116),
StopStream,
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107, 11, 108, 109, 110, 111, 112, 113,
114, 115, 116, 12, 117, 118, 119, 120, 121, 122, 123, 124, 125),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107, 11, 108, 109, 110, 111, 112, 113,
114, 115, 116, 12, 117, 118, 119, 120, 121, 122, 123, 124, 125, 13, 126, 127, 128, 129, 130,
131, 132, 133, 134)
)
}
test("cannot stop EventHubs stream") {
val eh = newEventHubs()
val eventHub = testUtils.createEventHubs(eh, DefaultPartitionCount)
testUtils.populateUniformly(eh, 5000)
val parameters = getEventHubsConf(eh).toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
StopStream
)
}
test(s"assign from latest offsets") {
val eh = newEventHubs()
testFromLatestSeqNos(eh)
}
test(s"assign from earliest offsets") {
val eh = newEventHubs()
testFromEarliestSeqNos(eh)
}
test(s"assign from specific offsets") {
val eh = newEventHubs()
testFromSpecificSeqNos(eh)
}
private def testFromLatestSeqNos(eh: String): Unit = {
val eventHub = testUtils.createEventHubs(eh, DefaultPartitionCount)
testUtils.send(eh, partition = Some(0), data = Seq(-1))
require(testUtils.getEventHubs(eh).getPartitions.size === 4)
// In practice, we would use Position.fromEndOfStream which would
// translate to the configuration below.
val positions = Map(
NameAndPartition(eh, 0) -> EventPosition.fromSequenceNumber(1L),
NameAndPartition(eh, 1) -> EventPosition.fromSequenceNumber(0L),
NameAndPartition(eh, 2) -> EventPosition.fromSequenceNumber(0L),
NameAndPartition(eh, 3) -> EventPosition.fromSequenceNumber(0L)
)
val conf = getEventHubsConf(eh)
.setStartingPositions(positions)
val reader = spark.readStream
.format("eventhubs")
.options(conf.toMap)
val eventhubs = reader
.load()
.selectExpr("body")
.as[String]
val mapped: Dataset[Int] = eventhubs.map(_.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
AddEventHubsData(conf, 1, 2, 3),
CheckAnswer(2, 3, 4),
StopStream,
StartStream(),
CheckAnswer(2, 3, 4), // Should get the data back on recovery
StopStream,
AddEventHubsData(conf, 4, 5, 6), // Add data when stream is stopped
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7), // Should get the added data
AddEventHubsData(conf, 7, 8),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9),
AddEventHubsData(conf, 9, 10, 11, 12, 13, 14, 15, 16),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
)
}
private def testFromEarliestSeqNos(eh: String): Unit = {
val eventHub = testUtils.createEventHubs(eh, DefaultPartitionCount)
require(testUtils.getEventHubs(eh).getPartitions.size === 4)
testUtils.send(eh, data = 1 to 3) // round robin events across partitions
val conf = getEventHubsConf(eh)
val reader = spark.readStream
reader
.format(classOf[EventHubsSourceProvider].getCanonicalName.stripSuffix("$"))
.options(conf.toMap)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped = eventhubs.map(e => e.toInt + 1)
testStream(mapped)(
AddEventHubsData(conf, 4, 5, 6), // Add data when stream is stopped
CheckAnswer(2, 3, 4, 5, 6, 7),
StopStream,
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7),
StopStream,
AddEventHubsData(conf, 7, 8),
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9),
AddEventHubsData(conf, 9, 10, 11, 12, 13, 14, 15, 16),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
)
}
private def testFromSpecificSeqNos(eh: String): Unit = {
testUtils.createEventHubs(eh, partitionCount = 5)
require(testUtils.getEventHubs(eh).getPartitions.size === 5)
val positions = Map(
NameAndPartition(eh, 0) -> EventPosition.fromSequenceNumber(0L),
NameAndPartition(eh, 1) -> EventPosition.fromSequenceNumber(3L),
NameAndPartition(eh, 2) -> EventPosition.fromSequenceNumber(0L),
NameAndPartition(eh, 3) -> EventPosition.fromSequenceNumber(1L),
NameAndPartition(eh, 4) -> EventPosition.fromSequenceNumber(2L)
)
val conf = getEventHubsConf(eh)
.setStartingPositions(positions)
// partition 0 starts at the earliest sequence numbers, these should all be seen
testUtils.send(eh, partition = Some(0), data = Seq(-20, -21, -22))
// partition 1 starts at the latest sequence numbers, these should all be skipped
testUtils.send(eh, partition = Some(1), data = Seq(-10, -11, -12))
// partition 2 starts at 0, these should all be seen
testUtils.send(eh, partition = Some(2), data = Seq(0, 1, 2))
// partition 3 starts at 1, first should be skipped
testUtils.send(eh, partition = Some(3), data = Seq(10, 11, 12))
// partition 4 starts at 2, first and second should be skipped
testUtils.send(eh, partition = Some(4), data = Seq(20, 21, 22))
val reader = spark.readStream
.format("eventhubs")
.options(conf.toMap)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped: Dataset[Int] = eventhubs.map(_.toInt)
testStream(mapped)(
makeSureGetOffsetCalled,
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22),
StopStream,
StartStream(),
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22), // Should get the data back on recovery
AddEventHubsData(conf, 30, 31, 32, 33, 34),
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22, 30, 31, 32, 33, 34),
StopStream
)
}
test("with application properties") {
val properties: Option[Map[String, Object]] = Some(
Map(
"A" -> "Hello, world.",
"B" -> Map.empty,
"C" -> "432".getBytes,
"D" -> null,
"E" -> Boolean.box(true),
"F" -> Int.box(1),
"G" -> Long.box(1L),
"H" -> Char.box('a'),
"I" -> new Binary("1".getBytes),
"J" -> Symbol.getSymbol("x-opt-partition-key"),
"K" -> new Decimal128(Array[Byte](0, 1, 2, 3, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0)),
"L" -> new Decimal32(12),
"M" -> new Decimal64(13),
"N" -> new UnsignedByte(1.toByte),
"O" -> new UnsignedLong(987654321L),
"P" -> new UnsignedShort(Short.box(1)),
"Q" -> new UnknownDescribedType("descriptor", "described")
))
// The expected serializes to:
// [Map(E -> true, N -> "1", J -> "x-opt-partition-key", F -> 1, A -> "Hello, world.",
// M -> 13, I -> [49], G -> 1, L -> 12, B -> {}, P -> "1", C -> [52,51,50], H -> "a",
// K -> [0,1,2,3,0,0,0,0,0,1,2,3,0,0,0,0], O -> "987654321", D -> null)]
val expected = properties.get
.mapValues {
case b: Binary =>
val buf = b.asByteBuffer()
val arr = new Array[Byte](buf.remaining)
buf.get(arr)
arr.asInstanceOf[AnyRef]
case d128: Decimal128 => d128.asBytes.asInstanceOf[AnyRef]
case d32: Decimal32 => d32.getBits.asInstanceOf[AnyRef]
case d64: Decimal64 => d64.getBits.asInstanceOf[AnyRef]
case s: Symbol => s.toString.asInstanceOf[AnyRef]
case ub: UnsignedByte => ub.toString.asInstanceOf[AnyRef]
case ui: UnsignedInteger => ui.toString.asInstanceOf[AnyRef]
case ul: UnsignedLong => ul.toString.asInstanceOf[AnyRef]
case us: UnsignedShort => us.toString.asInstanceOf[AnyRef]
case c: Character => c.toString.asInstanceOf[AnyRef]
case d: DescribedType => d.getDescribed
case default => default
}
.map { p =>
p._2 match {
case s: String => p._1 -> s
case default => p._1 -> Serialization.write(p._2)
}
}
val eventHub = testUtils.createEventHubs(newEventHubs(), partitionCount = 1)
testUtils.populateUniformly(eventHub.name, 5000, properties)
val parameters =
getEventHubsConf(eventHub.name)
.setMaxEventsPerTrigger(1)
.toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
val eventhubs = reader
.load()
.select("properties")
.as[Map[String, String]]
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
testStream(eventhubs)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// we'll get one event per partition per trigger
CheckAnswer(expected)
)
}
test("input row metrics") {
val eh = newEventHubs()
val eventHub = testUtils.createEventHubs(eh, DefaultPartitionCount)
testUtils.send(eh, data = Seq(-1))
require(testUtils.getEventHubs(eh).getPartitions.size === 4)
val positions = Map(
NameAndPartition(eh, 0) -> EventPosition.fromSequenceNumber(1L),
NameAndPartition(eh, 1) -> EventPosition.fromSequenceNumber(0L),
NameAndPartition(eh, 2) -> EventPosition.fromSequenceNumber(0L),
NameAndPartition(eh, 3) -> EventPosition.fromSequenceNumber(0L)
)
val conf = getEventHubsConf(eh)
.setStartingPositions(positions)
val eventhubs = spark.readStream
.format("eventhubs")
.options(conf.toMap)
.load()
.select("body")
.as[String]
val mapped = eventhubs.map(e => e.toInt + 1)
testStream(mapped)(
StartStream(trigger = ProcessingTime(1)),
makeSureGetOffsetCalled,
AddEventHubsData(conf, 1, 2, 3),
CheckAnswer(2, 3, 4),
AssertOnQuery { query =>
val recordsRead = query.recentProgress.map(_.numInputRows).sum
recordsRead == 3
}
)
}
test("EventHubs column types") {
val now = System.currentTimeMillis()
val eh = newEventHubs()
testUtils.createEventHubs(eh, partitionCount = 1)
val conf = getEventHubsConf(eh)
.setStartingPositions(Map.empty)
.setStartingPosition(EventPosition.fromSequenceNumber(0L))
require(testUtils.getEventHubs(eh).getPartitions.size === 1)
testUtils.send(eh, data = Seq(1))
val eventhubs = spark.readStream
.format("eventhubs")
.options(conf.toMap)
.load()
val query = eventhubs.writeStream
.format("memory")
.outputMode("append")
.queryName("eventhubsColumnTypes")
.start()
query.processAllAvailable()
val rows = spark.table("eventhubsColumnTypes").collect()
assert(rows.length === 1, s"Unexpected results: ${rows.toList}")
val row = rows(0)
assert(row.getAs[Array[Byte]]("body") === "1".getBytes(UTF_8), s"Unexpected results: $row")
assert(row.getAs[String]("partition") === "0", s"Unexpected results: $row")
assert(row.getAs[String]("offset") === "0", s"Unexpected results: $row")
assert(row.getAs[Long]("sequenceNumber") === 0, s"Unexpected results: $row")
assert(row.getAs[String]("publisher") === null, s"Unexpected results: $row")
assert(row.getAs[String]("partitionKey") === null, s"Unexpected results: $row")
// We cannot check the exact timestamp as it's the time that messages were inserted by the
// producer. So here we just use a low bound to make sure the internal conversion works.
assert(row.getAs[java.sql.Timestamp]("enqueuedTime").getTime >= now,
s"Unexpected results: $row")
assert(row.getAs[Map[String, String]]("properties") === Map(), s"Unexpected results: $row")
query.stop()
}
test("EventHubsSource with watermark") {
val now = System.currentTimeMillis()
val eh = newEventHubs()
testUtils.createEventHubs(eh, partitionCount = 1)
val conf = getEventHubsConf(eh)
.setStartingPositions(Map.empty)
.setStartingPosition(EventPosition.fromSequenceNumber(0L))
require(testUtils.getEventHubs(eh).getPartitions.size === 1)
testUtils.send(eh, data = Seq(1))
val eventhubs = spark.readStream
.format("eventhubs")
.options(conf.toMap)
.load()
val windowedAggregation = eventhubs
.withWatermark("enqueuedTime", "10 seconds")
.groupBy(window($"enqueuedTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start") as 'window, $"count")
val query = windowedAggregation.writeStream
.format("memory")
.outputMode("complete")
.queryName("eventhubsWatermark")
.start()
query.processAllAvailable()
val rows = spark.table("eventhubsWatermark").collect()
assert(rows.length === 1, s"Unexpected results: ${rows.toList}")
val row = rows(0)
// We cannot check the exact window start time as it depends on the time that messages were
// inserted by the producer. So here we just use a low bound to make sure the internal
// conversion works.
assert(row.getAs[java.util.Date]("window").getTime >= now - 5 * 1000,
s"Unexpected results: $row")
assert(row.getAs[Int]("count") === 1, s"Unexpected results: $row")
query.stop()
}
test("setSlowPartitionAdjustment without any slow partition") {
val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount)
testUtils.populateUniformly(eventHub.name, 5000)
val partitions: List[NameAndPartition] = List(NameAndPartition(eventHub.name, 0),
NameAndPartition(eventHub.name, 1),
NameAndPartition(eventHub.name, 2),
NameAndPartition(eventHub.name, 3))
val parameters =
getEventHubsConf(eventHub.name)
.setMaxEventsPerTrigger(20)
.setSlowPartitionAdjustment(true)
.setMaxAcceptableBatchReceiveTime(Duration.ofMillis(4))
.setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin)
.setStartingPosition(EventPosition.fromSequenceNumber(0L))
.toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
val noSlowPartition: Map[NameAndPartition, Double] =
Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0)
testStream(mapped)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// we'll get 5 events per partition per trigger
Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4),
PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 5, 9L), (partitions(1), 0L, 5, 11L),
(partitions(2), 0L, 5, 9L), (partitions(3), 0L, 5, 11L))),
//Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty),
Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// the difference between max and min time per event is less than the acceptable time difference (1 MS)
// we should get 5 events per partition per trigger
Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 8, 9),
PartitionsStatusTrackerUpdate(List( (partitions(0), 5L, 5, 16L), (partitions(1), 5L, 5, 13L),
(partitions(2), 5L, 5, 16L), (partitions(3), 5L, 5, 15L))),
Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
//Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// the difference between max and min time per event is less than the acceptable time difference (1 MS)
// we should get 5 events per partition per trigger
Assert(Set[Long](0, 1, 2).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10, 11, 12, 13, 14),
// miss the perforamnce update for this batch. Next round every partitions is considered as normal speed
Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
//Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// we should get 5 events per partition per trigger
Assert(Set[Long](1, 2, 3).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15, 16, 17, 18, 19),
// get update for three partitions (missing partition 1)
PartitionsStatusTrackerUpdate(List( (partitions(0), 15L, 5, 55L),
(partitions(2), 15L, 5, 52L), (partitions(3), 15L, 5, 43L))),
Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
//Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// all partitions have receiveTimePerEvent <= avg + stdDev
// we should get 5 events per partition per trigger
Assert(Set[Long](2, 3, 4).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20, 21, 22, 23, 24),
StopStream,
StartStream(ProcessingTime(100), clock),
// get update for the last batch before stopping the stream. It should be ignored because the tracker
// state should be clean at the start of the stream
PartitionsStatusTrackerUpdate(List( (partitions(0), 20L, 5, 100L), (partitions(1), 20L, 5, 13L),
(partitions(2), 20L, 5, 16L), (partitions(3), 20L, 5, 15L))),
Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty),
waitUntilBatchProcessed,
// last received status update should be ignored since it belongs to a batch before restarting the stream
// we should get 5 events per partition per trigger
Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(25, 26, 27, 28, 29, 25, 26, 27, 28, 29, 25, 26, 27, 28, 29, 25, 26, 27, 28, 29),
PartitionsStatusTrackerUpdate(List( (partitions(0), 25L, 5, 73L), (partitions(1), 25L, 5, 72L),
(partitions(2), 25L, 5, 66L), (partitions(3), 25L, 5, 73L))),
Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
//Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// all partitions have receiveTimePerEvent <= avg + stdDev
// we should get 5 events per partition per trigger
Assert(Set[Long](0, 1, 2).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(30, 31, 32, 33, 34, 30, 31, 32, 33, 34, 30, 31, 32, 33, 34, 30, 31, 32, 33, 34)
)
}
test("setSlowPartitionAdjustment with slow partitions") {
val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount)
testUtils.populateUniformly(eventHub.name, 10000)
val partitions: List[NameAndPartition] = List(NameAndPartition(eventHub.name, 0),
NameAndPartition(eventHub.name, 1),
NameAndPartition(eventHub.name, 2),
NameAndPartition(eventHub.name, 3))
val parameters =
getEventHubsConf(eventHub.name)
.setMaxEventsPerTrigger(20)
.setSlowPartitionAdjustment(true)
.setMaxAcceptableBatchReceiveTime(Duration.ofMillis(3))
.setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin)
.setStartingPosition(EventPosition.fromSequenceNumber(0L))
.toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
testStream(mapped)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// we'll get 5 events per partition per trigger
Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4),
// for the next batch, let's make partition 2 slow
PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 5, 18L), (partitions(1), 0L, 5, 21L),
(partitions(2), 0L, 5, 42L), (partitions(3), 0L, 5, 25L))),
Assert(Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 0.63, partitions(3) -> 1.0)
.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// we should get 3 events for partition 2, 5 events for other partitions
Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 5, 6, 7, 8, 9),
// for the next batch, let's make partition 1 slow and recover partition 2 from being slow
PartitionsStatusTrackerUpdate(List( (partitions(0), 5L, 5, 18L), (partitions(1), 5L, 5, 163L),
(partitions(2), 5L, 3, 10L), (partitions(3), 5L, 5, 15L))),
Assert(Map(partitions(0) -> 1.0, partitions(1) -> 0.33, partitions(2) -> 1.0, partitions(3) -> 1.0)
.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// we should get 4 events for partitions 0 and 3, 5 events for partition 2, and just 1 event for partition 1
// partitions 0 and 3 gets 4 eventsbecause of the fewer number of events in those partitions (this is not related to the adjusment logic)
Assert(Set[Long](0, 1, 2).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(10, 11, 12, 13, 10, 8, 9, 10, 11, 12, 10, 11, 12, 13),
// for the next batch, let's only have 2 updates (one slow, on fast parttion)
// since we don't have enough updated partitions, we should continue with the previous partition performance
PartitionsStatusTrackerUpdate(List( (partitions(0), 10L, 4, 13L), (partitions(3), 10L, 4, 168L))),
Assert(Map(partitions(0) -> 1.0, partitions(1) -> 0.33, partitions(2) -> 1.0, partitions(3) -> 1.0)
.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// we should get 4 events for partitions 0 and 3, 5 events for partition 2, and just 1 event for partition 1
Assert(Set[Long](1, 2, 3).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(14, 15, 16, 17, 11, 13, 14, 15, 16, 17, 14, 15, 16, 17),
// let's get back to normal fro all partitions
PartitionsStatusTrackerUpdate(List( (partitions(0), 14L, 4, 12L), (partitions(1), 11L, 1, 3L),
(partitions(2), 13L, 5, 14L), (partitions(3), 14L, 4, 11L))),
Assert( Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0)
.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// all partitions have receiveTimePerEvent <= avg + stdDev
// Since partition 1 is behind, the prorate logic (irrelevent of slow partitions logics) tries to catch it up
// therefore, partition 1 gets 5 events and other partitions are getting 4 each
Assert(Set[Long](2, 3, 4).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(18, 19, 20, 21, 12, 13, 14, 15, 16, 18, 19, 20, 21, 18, 19, 20, 21)
)
}
test("setSlowPartitionAdjustment with more than one slow partitions") {
val eventHub = testUtils.createEventHubs(newEventHubs(), 5)
testUtils.populateUniformly(eventHub.name, 1000)
val partitions: List[NameAndPartition] = List(NameAndPartition(eventHub.name, 0),
NameAndPartition(eventHub.name, 1),
NameAndPartition(eventHub.name, 2),
NameAndPartition(eventHub.name, 3),
NameAndPartition(eventHub.name, 4))
val parameters =
getEventHubsConf(eventHub.name)
.setMaxEventsPerTrigger(50)
.setSlowPartitionAdjustment(true)
.setMaxAcceptableBatchReceiveTime(Duration.ofMillis(4))
.setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin)
.setStartingPosition(EventPosition.fromSequenceNumber(0L))
.toMap
val reader = spark.readStream
.format("eventhubs")
.options(parameters)
val eventhubs = reader
.load()
.select("body")
.as[String]
val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
testStream(mapped)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// we'll get 10 events per partition per trigger
Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
// for the next batch, let's make partitions 0 and 4 slow
PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 10, 62L), (partitions(1), 0L, 10, 21L),
(partitions(2), 0L, 10, 20L), (partitions(3), 0L, 10, 40L), (partitions(4), 0L, 10, 65L))),
Assert(Map(partitions(0) -> 0.67, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0, partitions(4) -> 0.64)
.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// we should get 10 events for partition 1, 2, 3 and 6 events for partitions 0, 4
Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)),
CheckLastBatch(10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15)
)
}
}
| hdinsight/spark-eventhubs | core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala | Scala | apache-2.0 | 42,283 |
package me.eax.examples.statsd.client
import scala.util._
import scala.concurrent._
import me.eax.examples.statsd.client.utils._
import scala.concurrent.ExecutionContext.Implicits.global
object StatsDClientExample extends App {
val client = new MetricsClientImpl
for(i <- 1 to 500) {
val inc = (1 + Random.nextInt(5)).toLong
val time = (1 + Random.nextInt(100)).toLong
val value = (1 + Random.nextInt(1000)).toLong
client.incrementCounter("test.counter", inc)
client.recordTime("test.time", time)
client.recordValue("test.value", value)
recordTimeF("thread.sleep.future") { Future { Thread.sleep(100) } }
recordTime("thread.sleep") { Thread.sleep(100) }
}
}
| afiskon/scala-statsd-example | src/main/scala/me/eax/examples/statsd/client/StatsDClientExample.scala | Scala | mit | 699 |
import io.simao.lobster.{FeedItem, Feed, BotTwitterStatus}
import org.joda.time.DateTime
import org.scalamock.scalatest.MockFactory
import org.scalatest.FunSuite
import twitter4j.Status
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
class BotTwitterStatusTest extends FunSuite with MockFactory {
implicit val ec = ExecutionContext.global
val item1 = FeedItem("g0", "Title 1", "link 1", "link 2", DateTime.now().minusDays(1), List("tag0", "tag1"), Some(11))
val item2 = FeedItem("g1", "Title 1", "link 1", "link 2", DateTime.now().minusDays(2), List(), Some(11))
val feed = Feed(List(item1, item2))
test("returns a list of Future[FeedItem], ordered by date") {
val updateTwitterFn = (_: String) ⇒ Future.successful("Tweeted Text")
val subject = new BotTwitterStatus(updateTwitterFn)
val updates = Future.sequence(subject.update(feed, DateTime.now().minusDays(10), 10))
val result = Await.result(updates, 2.seconds)
assert(result === List(item2, item1))
}
test("tags are built properly") {
val updateTwitterFn = mockFunction[String, Future[String]]
updateTwitterFn.expects("Title 1 link 1 link 2 #tag0 #tag1")
.once()
.returning(Future.successful("Tweeted Text"))
updateTwitterFn.expects("Title 1 link 1 link 2")
.once()
.returning(Future.successful("Tweeted Text"))
val subject = new BotTwitterStatus(updateTwitterFn)
val updates = Future.sequence(subject.update(feed, DateTime.now().minusDays(10), 10))
Await.ready(updates, 2.seconds)
}
}
| simao/lobsters | src/test/scala/BotTwitterStatusTest.scala | Scala | mit | 1,584 |
/**
Open Bank Project - API
Copyright (C) 2011, 2013, TESOBE / Music Pictures Ltd
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Email: [email protected]
TESOBE / Music Pictures Ltd
Osloerstrasse 16/17
Berlin 13359, Germany
This product includes software developed at
TESOBE (http://www.tesobe.com/)
by
Ayoub Benali: ayoub AT tesobe DOT com
Nina Gänsdorfer: nina AT tesobe DOT com
*/
package com.tesobe.model
import net.liftweb.mapper._
class BankAccountDetails extends LongKeyedMapper[BankAccountDetails] with CreatedUpdated{
def getSingleton = BankAccountDetails
def primaryKeyField = id
object id extends MappedLongIndex(this)
object accountNumber extends MappedString(this, 32)
object userId extends MappedString(this, 64){
override def defaultValue = ""
}
object bankNationalIdentifier extends MappedString(this, 32)
object pinCode extends MappedString(this, 2048)
}
object BankAccountDetails extends BankAccountDetails with LongKeyedMetaMapper[BankAccountDetails]{
override def dbIndexes = UniqueIndex(accountNumber, bankNationalIdentifier) ::super.dbIndexes
}
class BankLog extends LongKeyedMapper[BankLog] with CreatedTrait{
def getSingleton = BankLog
def primaryKeyField = id
object id extends MappedLongIndex(this)
object nationalIdentifier extends MappedString(this, 32)
object transactionsFetched extends MappedBoolean(this)
}
object BankLog extends BankLog with LongKeyedMetaMapper[BankLog] | OpenBankProject/Bank-account-data-storage | src/main/scala/com/tesobe/model/BankAccountDetails.scala | Scala | agpl-3.0 | 2,045 |
package com.faacets.qalg
package impl
import scala.{specialized => sp}
import scala.annotation.tailrec
import spire.algebra._
import spire.syntax.cfor._
import spire.syntax.field._
import algebra._
import indup.algebra._
object VecDense {
val seed = 0xFAACE73
@inline def hash[@sp(Double, Long) A: Zero](v: math.Vector[A, _]): Int = {
import scala.util.hashing.MurmurHash3._
var a = 0
var b = 1L
var n = 0
cforRange(0 until v.size) { k =>
val x = v(k)
if (Zero[A].canBeNonZero(x)) {
val h = k * 41 + x.##
a += h
if (h != 0) b *= h
n += 1
}
}
var h = seed
h = mix(h, v.size)
h = mix(h, a)
h = mix(h, b.toInt)
h = mixLast(h, (b >> 32).toInt)
finalizeHash(h, n)
}
@inline def hash[V, @sp(Double, Long) A](v: V)(implicit V: Vec[V, A]): Int = {
import V._
import scala.util.hashing.MurmurHash3._
var a = 0
var b = 1L
var n = 0
cforRange(0 until length(v)) { k =>
val x = apply(v, k)
if (zeroA.canBeNonZero(x)) {
val h = k * 41 + x.##
a += h
if (h != 0) b *= h
n += 1
}
}
var h = seed
h = mix(h, length(v))
h = mix(h, a)
h = mix(h, b.toInt)
h = mixLast(h, (b >> 32).toInt)
finalizeHash(h, n)
}
@inline def equal[@sp(Double, Long) A](lhs: math.Vector[A, _], rhs: math.Vector[A, _]): Boolean = {
val n = lhs.size
if (rhs.size != n) return false
cforRange(0 until n) { k =>
if (lhs(k) != rhs(k)) return false
}
true
}
@inline def eqv[V, @sp(Double, Long) A: Eq](lhs: V, rhs: V)(implicit V: Vec[V, A]): Boolean = {
val n = V.length(lhs)
if (V.length(rhs) != n) return false
cforRange(0 until n) { k =>
if (Eq[A].neqv(V.apply(lhs, k), V.apply(rhs, k))) return false
}
true
}
final class MapperImpl[@sp(Double, Long) A, @sp(Double, Long) B, V, W](val builder: V => VecBuilder[W, B])(implicit val V: VecBuild[V, A], val W: VecBuild[W, B]) extends Mapper[A, B, V, W] {
def map(v: V)(f: A => B): W = {
val d = V.length(v)
val b = builder(v)
cforRange(0 until d) { i =>
b.add(i, f(V.apply(v, i)))
}
b.result()
}
}
@inline def feedTo[V, @sp(Double, Long) A](v: V, b: VecBuilder[_, A])(implicit V: VecBuild[V, A]): Unit = {
import V._
cforRange(0 until length(v)) { i =>
b.add(i, apply(v, i))
}
}
@inline def plus[V, @sp(Double, Long) A](x: V, y: V)(implicit V: VecRing[V, A]): V = {
import V._
val d = length(x)
require(d == length(y))
val b = builder(d, options(x))
cforRange(0 until d) { i =>
b.add(i, apply(x, i) + apply(y, i))
}
b.result()
}
@inline def minus[V, @sp(Double, Long) A](x: V, y: V)(implicit V: VecRing[V, A]): V = {
import V._
val d = length(x)
require(d == length(y))
val b = builder(d, options(x))
cforRange(0 until d) { i =>
b.add(i, apply(x, i) - apply(y, i))
}
b.result()
}
@inline def negate[V, @sp(Double, Long) A](v: V)(implicit V: VecRing[V, A]): V = {
import V._
val d = length(v)
val b = builder(d, options(v))
cforRange(0 until d) { i =>
b.add(i, -apply(v, i))
}
b.result()
}
@inline def timesl[V, @sp(Double, Long) A](a: A, v: V)(implicit V: VecRing[V, A]): V = {
import V._
val d = length(v)
val b = builder(d, options(v))
cforRange(0 until d) { i =>
b.add(i, a * apply(v, i))
}
b.result()
}
@inline def timesr[V, @sp(Double, Long) A](v: V, a: A)(implicit V: VecRing[V, A]): V = {
import V._
val d = length(v)
val b = builder(d, options(v))
cforRange(0 until d) { i =>
b.add(i, apply(v, i) * a)
}
b.result()
}
@inline def divr[V, @sp(Double, Long) A](v: V, a: A)(implicit V: VecField[V, A]): V = {
import V._
val d = length(v)
val b = builder(d, options(v))
cforRange(0 until d) { i =>
b.add(i, apply(v, i) / a)
}
b.result()
}
@inline def dot[V, @sp(Double, Long) A](x: V, y: V)(implicit V: VecRing[V, A]): A = {
import V._
val d = length(x)
require(d == length(y))
var a = scalar.zero
cforRange(0 until d) { i =>
a += apply(x, i) * apply(y, i)
}
a
}
}
| denisrosset/qalg | core/src/main/scala/qalg/impl/VecDense.scala | Scala | mit | 4,279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.