code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.http4s
import java.io.{File, InputStream, Reader}
import java.nio.{ByteBuffer, CharBuffer}
import java.nio.file.Path
import scala.concurrent.{ExecutionContext, Future}
import scala.language.implicitConversions
import org.http4s.EntityEncoder._
import org.http4s.headers.{`Transfer-Encoding`, `Content-Type`}
import org.http4s.multipart.{Multipart, MultipartEncoder}
import scalaz._
import scalaz.concurrent.Task
import scalaz.std.option._
import scalaz.stream.{Process0, Channel, Process, io}
import scalaz.stream.nio.file
import scalaz.stream.Cause.{End, Terminated}
import scalaz.stream.Process.emit
import scalaz.syntax.apply._
import scodec.bits.ByteVector
trait EntityEncoder[A] { self =>
/** Convert the type `A` to an [[EntityEncoder.Entity]] in the `Task` monad */
def toEntity(a: A): Task[EntityEncoder.Entity]
/** Headers that may be added to a [[Message]]
*
* Examples of such headers would be Content-Type.
* __NOTE:__ The Content-Length header will be generated from the resulting Entity and thus should not be added.
*/
def headers: Headers
/** Make a new [[EntityEncoder]] using this type as a foundation */
def contramap[B](f: B => A): EntityEncoder[B] = new EntityEncoder[B] {
override def toEntity(a: B): Task[Entity] = self.toEntity(f(a))
override def headers: Headers = self.headers
}
/** Get the [[org.http4s.headers.Content-Type]] of the body encoded by this [[EntityEncoder]], if defined the headers */
def contentType: Option[`Content-Type`] = headers.get(`Content-Type`)
/** Get the [[Charset]] of the body encoded by this [[EntityEncoder]], if defined the headers */
def charset: Option[Charset] = headers.get(`Content-Type`).flatMap(_.charset)
/** Generate a new EntityEncoder that will contain the `Content-Type` header */
def withContentType(tpe: `Content-Type`): EntityEncoder[A] = new EntityEncoder[A] {
override def toEntity(a: A): Task[Entity] = self.toEntity(a)
override val headers: Headers = self.headers.put(tpe)
}
}
object EntityEncoder extends EntityEncoderInstances {
final case class Entity(body: EntityBody, length: Option[Long] = None)
/** summon an implicit [[EntityEncoder]] */
def apply[A](implicit ev: EntityEncoder[A]): EntityEncoder[A] = ev
object Entity {
implicit val entityInstance: Monoid[Entity] = Monoid.instance(
(a, b) => Entity(a.body ++ b.body, (a.length |@| b.length) { _ + _ }),
empty
)
lazy val empty = Entity(EmptyBody, Some(0L))
}
/** Create a new [[EntityEncoder]] */
def encodeBy[A](hs: Headers)(f: A => Task[Entity]): EntityEncoder[A] = new EntityEncoder[A] {
override def toEntity(a: A): Task[Entity] = f(a)
override def headers: Headers = hs
}
/** Create a new [[EntityEncoder]] */
def encodeBy[A](hs: Header*)(f: A => Task[Entity]): EntityEncoder[A] = {
val hdrs = if(hs.nonEmpty) Headers(hs.toList) else Headers.empty
encodeBy(hdrs)(f)
}
/** Create a new [[EntityEncoder]]
*
* This constructor is a helper for types that can be serialized synchronously, for example a String.
*/
def simple[A](hs: Header*)(toChunk: A => ByteVector): EntityEncoder[A] = encodeBy(hs:_*){ a =>
val c = toChunk(a)
Task.now(Entity(emit(c), Some(c.length)))
}
}
trait EntityEncoderInstances0 {
/** Encodes a value from its Show instance. Too broad to be implicit, too useful to not exist. */
def showEncoder[A](implicit charset: Charset = DefaultCharset, show: Show[A]): EntityEncoder[A] = {
val hdr = `Content-Type`(MediaType.`text/plain`).withCharset(charset)
simple[A](hdr)(a => ByteVector.view(show.shows(a).getBytes(charset.nioCharset)))
}
implicit def futureEncoder[A](implicit W: EntityEncoder[A], ec: ExecutionContext): EntityEncoder[Future[A]] =
new EntityEncoder[Future[A]] {
override def toEntity(a: Future[A]): Task[Entity] = util.task.futureToTask(a).flatMap(W.toEntity)
override def headers: Headers = W.headers
}
implicit def naturalTransformationEncoder[F[_], A](implicit N: ~>[F, Task], W: EntityEncoder[A]): EntityEncoder[F[A]] =
taskEncoder[A](W).contramap { f: F[A] => N(f) }
/**
* A process encoder is intended for streaming, and does not calculate its bodies in
* advance. As such, it does not calculate the Content-Length in advance. This is for
* use with chunked transfer encoding.
*/
implicit def sourceEncoder[A](implicit W: EntityEncoder[A]): EntityEncoder[Process[Task, A]] =
new EntityEncoder[Process[Task, A]] {
override def toEntity(a: Process[Task, A]): Task[Entity] = {
Task.now(Entity(a.flatMap(a => Process.await(W.toEntity(a))(_.body)), None))
}
override def headers: Headers =
W.headers.get(`Transfer-Encoding`) match {
case Some(transferCoding) if transferCoding.hasChunked =>
W.headers
case _ =>
W.headers.put(`Transfer-Encoding`(TransferCoding.chunked))
}
}
implicit def process0Encoder[A](implicit W: EntityEncoder[A]): EntityEncoder[Process0[A]] =
sourceEncoder[A].contramap(_.toSource)
}
trait EntityEncoderInstances extends EntityEncoderInstances0 {
private val DefaultChunkSize = 4096
implicit def stringEncoder(implicit charset: Charset = DefaultCharset): EntityEncoder[String] = {
val hdr = `Content-Type`(MediaType.`text/plain`).withCharset(charset)
simple(hdr)(s => ByteVector.view(s.getBytes(charset.nioCharset)))
}
implicit def charBufferEncoder(implicit charset: Charset = DefaultCharset): EntityEncoder[CharBuffer] =
stringEncoder.contramap(_.toString)
implicit def charArrayEncoder(implicit charset: Charset = DefaultCharset): EntityEncoder[Array[Char]] =
stringEncoder.contramap(new String(_))
implicit val byteVectorEncoder: EntityEncoder[ByteVector] =
simple(`Content-Type`(MediaType.`application/octet-stream`))(identity)
implicit val byteArrayEncoder: EntityEncoder[Array[Byte]] = byteVectorEncoder.contramap(ByteVector.apply)
implicit val byteBufferEncoder: EntityEncoder[ByteBuffer] = byteVectorEncoder.contramap(ByteVector.apply)
implicit def taskEncoder[A](implicit W: EntityEncoder[A]): EntityEncoder[Task[A]] = new EntityEncoder[Task[A]] {
override def toEntity(a: Task[A]): Task[Entity] = a.flatMap(W.toEntity)
override def headers: Headers = W.headers
}
// TODO parameterize chunk size
// TODO if Header moves to Entity, can add a Content-Disposition with the filename
implicit val fileEncoder: EntityEncoder[File] =
chunkedEncoder { f: File => file.chunkR(f.getAbsolutePath) }
// TODO parameterize chunk size
// TODO if Header moves to Entity, can add a Content-Disposition with the filename
implicit val filePathEncoder: EntityEncoder[Path] = fileEncoder.contramap(_.toFile)
// TODO parameterize chunk size
implicit def inputStreamEncoder[A <: InputStream]: EntityEncoder[A] =
chunkedEncoder { is: InputStream => io.chunkR(is) }
// TODO parameterize chunk size
implicit def readerEncoder[A <: Reader](implicit charset: Charset = DefaultCharset): EntityEncoder[A] =
// TODO polish and contribute back to scalaz-stream
sourceEncoder[Array[Char]].contramap { r: Reader =>
val unsafeChunkR = io.resource(Task.delay(r))(
src => Task.delay(src.close())) { src =>
Task.now { buf: Array[Char] => Task.delay {
val m = src.read(buf)
m match {
case l if l == buf.length => buf
case -1 => throw Terminated(End)
case _ => buf.slice(0, m)
}
}}
}
val chunkR = unsafeChunkR.map(f => (n: Int) => {
val buf = new Array[Char](n)
f(buf)
})
Process.constant(DefaultChunkSize).toSource.through(chunkR)
}
def chunkedEncoder[A](f: A => Channel[Task, Int, ByteVector], chunkSize: Int = DefaultChunkSize): EntityEncoder[A] =
sourceEncoder[ByteVector].contramap { a => Process.constant(chunkSize).toSource.through(f(a)) }
implicit val multipartEncoder: EntityEncoder[Multipart] =
MultipartEncoder
implicit val entityEncoderContravariant: Contravariant[EntityEncoder] = new Contravariant[EntityEncoder] {
override def contramap[A, B](r: EntityEncoder[A])(f: (B) => A): EntityEncoder[B] = r.contramap(f)
}
implicit val serverSentEventEncoder: EntityEncoder[EventStream] =
sourceEncoder[ByteVector].contramap[EventStream] { _.pipe(ServerSentEvent.encoder) }
.withContentType(MediaType.`text/event-stream`)
}
| hvesalai/http4s | core/src/main/scala/org/http4s/EntityEncoder.scala | Scala | apache-2.0 | 8,522 |
package mesosphere.marathon.core.launcher
import org.rogach.scallop.ScallopConf
trait OfferProcessorConfig extends ScallopConf {
lazy val offerMatchingTimeout = opt[Int](
"offer_matching_timeout",
descr = "Offer matching timeout (ms). Stop trying to match additional tasks for this offer after this time.",
default = Some(1000))
lazy val saveTasksToLaunchTimeout = opt[Int](
"save_tasks_to_launch_timeout",
descr = "Timeout (ms) after matching an offer for saving all matched tasks that we are about to launch. " +
"When reaching the timeout, only the tasks that we could save within the timeout are also launched. " +
"All other task launches are temporarily rejected and retried later.",
default = Some(3000))
lazy val declineOfferDuration = opt[Long](
"decline_offer_duration",
descr = "(Default: 120 seconds) " +
"The duration (milliseconds) for which to decline offers by default",
default = Some(120000))
}
| timcharper/marathon | src/main/scala/mesosphere/marathon/core/launcher/OfferProcessorConfig.scala | Scala | apache-2.0 | 978 |
/*
* Copyright 2011-2018 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.nta.ntarule.rules_2_02
import java.net.URI
import scala.collection.immutable
import eu.cdevreeze.nta.common.taxonomy.Taxonomy
import eu.cdevreeze.nta.common.validator.Result
import eu.cdevreeze.nta.common.validator.TaxonomyDocumentValidator
import eu.cdevreeze.nta.common.validator.TaxonomyValidatorFactory
import eu.cdevreeze.nta.common.validator.ValidationScope
import eu.cdevreeze.nta.ntarule.NtaRules
import eu.cdevreeze.tqa.base.dom.TaxonomyDocument
import eu.cdevreeze.tqa.base.dom.XsdSchema
import eu.cdevreeze.yaidom.core.EName
import eu.cdevreeze.nta.ntarule.NtaRuleConfigWrapper
import eu.cdevreeze.nta.ntarule.NtaRuleConfigWrapper
/**
* Validator of rule 2.02.00.10. The rule says that the the schema document must not have blockDefault, finalDefault and
* version attributes.
*
* @author Chris de Vreeze
*/
final class Validator_2_02_00_10(val excludedDocumentUris: Set[URI]) extends TaxonomyDocumentValidator {
import Validator_2_02_00_10._
def ruleName: String = NtaRules.extractRuleName(getClass)
def validateDocument(
doc: TaxonomyDocument,
taxonomy: Taxonomy,
validationScope: ValidationScope): immutable.IndexedSeq[Result] = {
require(isTypeOfDocumentToValidate(doc, taxonomy), s"Document ${doc.uri} should not be validated")
val blockDefaultOption = doc.documentElement.attributeOption(BlockDefaultEName)
val finalDefaultOption = doc.documentElement.attributeOption(FinalDefaultEName)
val versionOption = doc.documentElement.attributeOption(VersionEName)
val blockDefaultErrors = blockDefaultOption.toIndexedSeq.map(_ => Result.makeErrorResult(
ruleName,
"block-default-not-allowed",
s"Attribute blockDefault not allowed in '${doc.uri}'"))
val finalDefaultErrors = finalDefaultOption.toIndexedSeq.map(_ => Result.makeErrorResult(
ruleName,
"final-default-not-allowed",
s"Attribute finalDefault not allowed in '${doc.uri}'"))
val versionErrors = versionOption.toIndexedSeq.map(_ => Result.makeErrorResult(
ruleName,
"version-not-allowed",
s"Attribute version not allowed in '${doc.uri}'"))
blockDefaultErrors ++ finalDefaultErrors ++ versionErrors
}
def isTypeOfDocumentToValidate(doc: TaxonomyDocument, taxonomy: Taxonomy): Boolean = {
doc.documentElement.isInstanceOf[XsdSchema]
}
}
object Validator_2_02_00_10 extends TaxonomyValidatorFactory {
type Validator = Validator_2_02_00_10
type CfgWrapper = NtaRuleConfigWrapper
def ruleName: String = {
NtaRules.extractRuleName(classOf[Validator_2_02_00_10])
}
def create(configWrapper: NtaRuleConfigWrapper): Validator_2_02_00_10 = {
new Validator_2_02_00_10(
configWrapper.excludedDocumentUrisForRule(ruleName))
}
private val BlockDefaultEName = EName("blockDefault")
private val FinalDefaultEName = EName("finalDefault")
private val VersionEName = EName("version")
}
| dvreeze/nta | src/main/scala/eu/cdevreeze/nta/ntarule/rules_2_02/Validator_2_02_00_10.scala | Scala | apache-2.0 | 3,528 |
package spark.streaming.examples
import spark.streaming.{Seconds, StreamingContext}
import spark.storage.StorageLevel
import com.twitter.algebird._
import spark.streaming.StreamingContext._
import spark.SparkContext._
/**
* Illustrates the use of the Count-Min Sketch, from Twitter's Algebird library, to compute
* windowed and global Top-K estimates of user IDs occurring in a Twitter stream.
* <br>
* <strong>Note</strong> that since Algebird's implementation currently only supports Long inputs,
* the example operates on Long IDs. Once the implementation supports other inputs (such as String),
* the same approach could be used for computing popular topics for example.
* <p>
* <p>
* <a href="http://highlyscalable.wordpress.com/2012/05/01/probabilistic-structures-web-analytics-data-mining/">
* This blog post</a> has a good overview of the Count-Min Sketch (CMS). The CMS is a datastructure
* for approximate frequency estimation in data streams (e.g. Top-K elements, frequency of any given element, etc),
* that uses space sub-linear in the number of elements in the stream. Once elements are added to the CMS, the
* estimated count of an element can be computed, as well as "heavy-hitters" that occur more than a threshold
* percentage of the overall total count.
* <p><p>
* Algebird's implementation is a monoid, so we can succinctly merge two CMS instances in the reduce operation.
*/
object TwitterAlgebirdCMS {
def main(args: Array[String]) {
if (args.length < 1) {
System.err.println("Usage: TwitterAlgebirdCMS <master>" +
" [filter1] [filter2] ... [filter n]")
System.exit(1)
}
// CMS parameters
val DELTA = 1E-3
val EPS = 0.01
val SEED = 1
val PERC = 0.001
// K highest frequency elements to take
val TOPK = 10
val (master, filters) = (args.head, args.tail)
val ssc = new StreamingContext(master, "TwitterAlgebirdCMS", Seconds(10),
System.getenv("SPARK_HOME"), Seq(System.getenv("SPARK_EXAMPLES_JAR")))
val stream = ssc.twitterStream(None, filters, StorageLevel.MEMORY_ONLY_SER)
val users = stream.map(status => status.getUser.getId)
val cms = new CountMinSketchMonoid(EPS, DELTA, SEED, PERC)
var globalCMS = cms.zero
val mm = new MapMonoid[Long, Int]()
var globalExact = Map[Long, Int]()
val approxTopUsers = users.mapPartitions(ids => {
ids.map(id => cms.create(id))
}).reduce(_ ++ _)
val exactTopUsers = users.map(id => (id, 1))
.reduceByKey((a, b) => a + b)
approxTopUsers.foreach(rdd => {
if (rdd.count() != 0) {
val partial = rdd.first()
val partialTopK = partial.heavyHitters.map(id =>
(id, partial.frequency(id).estimate)).toSeq.sortBy(_._2).reverse.slice(0, TOPK)
globalCMS ++= partial
val globalTopK = globalCMS.heavyHitters.map(id =>
(id, globalCMS.frequency(id).estimate)).toSeq.sortBy(_._2).reverse.slice(0, TOPK)
println("Approx heavy hitters at %2.2f%% threshold this batch: %s".format(PERC,
partialTopK.mkString("[", ",", "]")))
println("Approx heavy hitters at %2.2f%% threshold overall: %s".format(PERC,
globalTopK.mkString("[", ",", "]")))
}
})
exactTopUsers.foreach(rdd => {
if (rdd.count() != 0) {
val partialMap = rdd.collect().toMap
val partialTopK = rdd.map(
{case (id, count) => (count, id)})
.sortByKey(ascending = false).take(TOPK)
globalExact = mm.plus(globalExact.toMap, partialMap)
val globalTopK = globalExact.toSeq.sortBy(_._2).reverse.slice(0, TOPK)
println("Exact heavy hitters this batch: %s".format(partialTopK.mkString("[", ",", "]")))
println("Exact heavy hitters overall: %s".format(globalTopK.mkString("[", ",", "]")))
}
})
ssc.start()
}
}
| pxgao/spark-0.7.3 | examples/src/main/scala/spark/streaming/examples/TwitterAlgebirdCMS.scala | Scala | bsd-3-clause | 3,867 |
package amailp.intellij.robot.psi
import com.intellij.psi._
import com.intellij.lang.ASTNode
import com.intellij.extapi.psi.ASTWrapperPsiElement
import amailp.intellij.robot.findUsage.UsageFindable
import amailp.intellij.robot.psi.reference.KeywordToDefinitionReference
import amailp.intellij.robot.psi.utils.RobotPsiUtils
import com.intellij.psi.impl.source.resolve.reference.ReferenceProvidersRegistry
/**
* An instance of a keyword when is used
*/
class Keyword(node: ASTNode)
extends ASTWrapperPsiElement(node)
with RobotPsiUtils
with UsageFindable
with PsiNameIdentifierOwner {
override def getReferences: Array[PsiReference] =
Array[PsiReference](new KeywordToDefinitionReference(this)) ++
ReferenceProvidersRegistry.getReferencesFromProviders(this)
def getTextStrippedFromIgnoredPrefixes = {
val textLowerCase = getText.toLowerCase
for {
prefix <- Keyword.lowerCaseIgnoredPrefixes
if textLowerCase.startsWith(prefix)
stripped = textLowerCase.replaceFirst(prefix, "").trim
} yield stripped
} ensuring { _.size < 2 }
override def setName(name: String): PsiElement = {
val dummyKeyword = createKeyword(name)
this.getNode.getTreeParent.replaceChild(this.getNode, dummyKeyword.getNode)
this
}
def getType: String = "Keyword"
def getDescriptiveName: String = getNode.getText
override def getNameIdentifier: PsiElement = this
}
object Keyword {
val ignoredPrefixes = List("Given", "When", "Then", "And")
val lowerCaseIgnoredPrefixes = ignoredPrefixes.map(_.toLowerCase)
}
| AmailP/robot-plugin | src/main/scala/amailp/intellij/robot/psi/Keyword.scala | Scala | gpl-3.0 | 1,569 |
package com.twitter.finagle.mux
import com.twitter.concurrent.AsyncQueue
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.mux.lease.exp.Lessor
import com.twitter.finagle.netty3.{ChannelBufferBuf, BufChannelBuffer}
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.QueueTransport
import com.twitter.finagle.{Filter, SimpleFilter, Service, Status, Path, Failure}
import com.twitter.io.Buf
import com.twitter.util.{Await, Future, Promise, Return, Throw, Time, TimeControl}
import java.util.concurrent.atomic.AtomicInteger
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.junit.runner.RunWith
import org.mockito.Matchers.any
import org.mockito.Mockito.{never, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.junit.{AssertionsForJUnit, JUnitRunner}
import org.scalatest.mock.MockitoSugar
import org.scalatest.{OneInstancePerTest, FunSuite, Tag}
private object TestContext {
val testContext = new Contexts.broadcast.Key[Buf]("com.twitter.finagle.mux.MuxContext") {
def marshal(buf: Buf) = buf
def tryUnmarshal(buf: Buf) = Return(buf)
}
}
private[mux] class ClientServerTest(canDispatch: Boolean)
extends FunSuite
with OneInstancePerTest
with MockitoSugar
with AssertionsForJUnit
with Eventually
with IntegrationPatience {
import TestContext._
val tracer = new BufferingTracer
class Ctx {
val clientToServer = new AsyncQueue[ChannelBuffer]
val serverToClient = new AsyncQueue[ChannelBuffer]
val serverTransport =
new QueueTransport(writeq=serverToClient, readq=clientToServer)
val clientTransport =
new QueueTransport(writeq=clientToServer, readq=serverToClient)
val service = mock[Service[Request, Response]]
val client = new ClientDispatcher("test", clientTransport, NullStatsReceiver)
val nping = new AtomicInteger(0)
val pingReq, pingRep = new Latch
def ping() = {
nping.incrementAndGet()
val f = pingRep.get
pingReq.flip()
f
}
val filter = new SimpleFilter[Message, Message] {
def apply(req: Message, service: Service[Message, Message]): Future[Message] = req match {
case Message.Tdispatch(tag, _, _, _, _) if !canDispatch =>
Future.value(Message.Rerr(tag, "Tdispatch not enabled"))
case Message.Tping(tag) =>
ping() before Future.value(Message.Rping(tag))
case req => service(req)
}
}
val server = new ServerDispatcher(
serverTransport, filter andThen Processor andThen service,
Lessor.nil, tracer, NullStatsReceiver)
}
// Push a tracer for the client.
override def test(testName: String, testTags: Tag*)(f: => Unit): Unit =
super.test(testName, testTags:_*) {
Trace.letTracer(tracer)(f)
}
def buf(b: Byte*) = Buf.ByteArray(b:_*)
test("handle concurrent requests, handling out of order replies") {
val ctx = new Ctx
import ctx._
val p1, p2, p3 = new Promise[Response]
val reqs = (1 to 3) map { i => Request(Path.empty, buf(i.toByte)) }
when(service(reqs(0))).thenReturn(p1)
when(service(reqs(1))).thenReturn(p2)
when(service(reqs(2))).thenReturn(p3)
val f1 = client(reqs(0))
val f2 = client(reqs(1))
val f3 = client(reqs(2))
for (i <- 0 to 2)
verify(service)(reqs(i))
for (f <- Seq(f1, f2, f3))
assert(f.poll === None)
val reps = Seq(10, 20, 9) map { i => Response(buf(i.toByte)) }
p2.setValue(reps(1))
assert(f1.poll === None)
assert(f2.poll === Some(Return(reps(1))))
assert(f3.poll === None)
p1.setValue(reps(0))
assert(f1.poll === Some(Return(reps(0))))
assert(f3.poll === None)
p3.setValue(reps(2))
assert(f3.poll === Some(Return(reps(2))))
}
test("server respond to pings") {
val ctx = new Ctx
import ctx._
for (i <- 0 until 5) {
assert(nping.get === i)
val pinged = client.ping()
assert(!pinged.isDefined)
assert(nping.get === i+1)
pingRep.flip()
assert(pinged.isDefined && Await.result(pinged) == ())
}
}
test("concurrent pings") {
val ctx = new Ctx
import ctx._
val pinged = (client.ping() join client.ping()).unit
assert(!pinged.isDefined)
assert(nping.get === 2)
pingRep.flip()
assert(pinged.isDefined && Await.result(pinged) == ())
}
test("server nacks new requests after draining") {
val ctx = new Ctx
import ctx._
val req1 = Request(Path.empty, buf(1))
val p1 = new Promise[Response]
when(service(req1)).thenReturn(p1)
val f1 = client(req1)
verify(service)(req1)
server.close(Time.now)
assert(f1.poll === None)
val req2 = Request(Path.empty, buf(2))
client(req2).poll match {
case Some(Throw(f: Failure)) => assert(f.isFlagged(Failure.Restartable))
case _ => fail()
}
verify(service, never)(req2)
val rep1 = Response(buf(123))
p1.setValue(rep1)
assert(f1.poll === Some(Return(rep1)))
}
test("requeueable failures transit server-to-client") {
val ctx = new Ctx
import ctx._
val req1 = Request(Path.empty, buf(1))
val p1 = new Promise[Response]
when(service(req1)).thenReturn(Future.exception(
Failure.rejected("come back tomorrow")))
client(req1).poll match {
case Some(Throw(f: Failure)) => assert(f.isFlagged(Failure.Restartable))
case bad => fail(s"got $bad")
}
}
test("handle errors") {
val ctx = new Ctx
import ctx._
val req = Request(Path.empty, buf(1))
when(service(req)).thenReturn(Future.exception(new Exception("sad panda")))
assert(client(req).poll === Some(
Throw(ServerApplicationError("java.lang.Exception: sad panda"))))
}
test("propagate interrupts") {
val ctx = new Ctx
import ctx._
val req = Request(Path.empty, buf(1))
val p = new Promise[Response]
when(service(req)).thenReturn(p)
val f = client(req)
assert(f.poll === None)
assert(p.isInterrupted === None)
val exc = new Exception("sad panda")
f.raise(exc)
assert(p.isInterrupted === Some(
ClientDiscardedRequestException("java.lang.Exception: sad panda")))
assert(f.poll === Some(Throw(exc)))
}
test("propagate trace ids") {
val ctx = new Ctx
import ctx._
when(service(any[Request])).thenAnswer(
new Answer[Future[Response]]() {
def answer(invocation: InvocationOnMock) =
Future.value(Response(Buf.Utf8(Trace.id.toString)))
}
)
val id = Trace.nextId
val resp = Trace.letId(id) {
client(Request(Path.empty, buf(1)))
}
assert(resp.poll.isDefined)
val Buf.Utf8(respStr) = Await.result(resp).body
assert(respStr === id.toString)
}
test("propagate trace flags") {
val ctx = new Ctx
import ctx._
when(service(any[Request])).thenAnswer(
new Answer[Future[Response]] {
def answer(invocation: InvocationOnMock) = {
val buf = ChannelBuffers.directBuffer(8)
buf.writeLong(Trace.id.flags.toLong)
Future.value(Response(ChannelBufferBuf.Owned(buf)))
}
}
)
val flags = Flags().setDebug
val id = Trace.nextId.copy(flags=flags)
val resp = Trace.letId(id) {
val p = client(Request(Path.empty, buf(1)))
p
}
assert(resp.poll.isDefined)
val respCb = BufChannelBuffer(Await.result(resp).body)
assert(respCb.readableBytes === 8)
val respFlags = Flags(respCb.readLong())
assert(respFlags === flags)
}
test("failure detection") {
sessionFailureDetector.let("threshold:10.milliseconds:2") {
val ctx = new Ctx
import ctx._
assert(nping.get === 1)
assert(client.status == Status.Busy)
pingRep.flip()
Status.awaitOpen(client.status)
// This is technically racy, but would require a pretty
// pathological test environment.
assert(client.status === Status.Open)
eventually { assert(client.status == Status.Busy) }
// Now begin replying.
def loop(): Future[Unit] = {
val f = pingReq.get
pingRep.flip()
f before loop()
}
loop()
eventually {
assert(client.status === Status.Open)
}
}
}
}
@RunWith(classOf[JUnitRunner])
class ClientServerTestNoDispatch extends ClientServerTest(false) {
test("does not dispatch destinations") {
val ctx = new Ctx
import ctx._
val withDst = Request(Path.read("/dst/name"), buf(123))
val withoutDst = Request(Path.empty, buf(123))
val rep = Response(buf(23))
when(service(withoutDst)).thenReturn(Future.value(rep))
assert(Await.result(client(withDst)) === rep)
verify(service)(withoutDst)
}
}
@RunWith(classOf[JUnitRunner])
class ClientServerTestDispatch extends ClientServerTest(true) {
import TestContext._
// Note: We test trace propagation here, too,
// since it's a default request context.
test("Transmits request contexts") {
val ctx = new Ctx
import ctx._
when(service(any[Request])).thenAnswer(
new Answer[Future[Response]] {
def answer(invocation: InvocationOnMock) =
Future.value(Response(
Contexts.broadcast.get(testContext)
.getOrElse(Buf.Empty)))
}
)
// No context set
assert(Await.result(client(Request(Path.empty, Buf.Empty))).body.isEmpty)
val f = Contexts.broadcast.let(testContext, Buf.Utf8("My context!")) {
client(Request.empty)
}
assert(Await.result(f).body === Buf.Utf8("My context!"))
}
test("dispatches destinations") {
val ctx = new Ctx
import ctx._
val req = Request(Path.read("/dst/name"), buf(123))
val rep = Response(buf(23))
when(service(req)).thenReturn(Future.value(rep))
assert(Await.result(client(req)) === rep)
verify(service)(req)
}
}
| cogitate/twitter-finagle-uuid | finagle-mux/src/test/scala/com/twitter/finagle/mux/ClientServerTest.scala | Scala | apache-2.0 | 10,009 |
package io.udash.demos.jquery.views.functions
import io.udash.demos.jquery.views.FunctionView
import io.udash.wrappers.jquery._
/** Based on examples from: <a href="http://api.jquery.com/animate/">jQuery Docs</a>. */
object AppendPrependView extends FunctionView {
import scalatags.JsDom.all._
override protected val content = div(
h3(".append()"),
p(id := "append")("I would like to say: "),
h3(".prepend()"),
p(id := "prepend")("amigo!")
).render
override protected def script = () => {
jQ("#append", content).append("<b>hello</b>")
jQ("#prepend", content).prepend("<i>Hello</i> ")
}
} | UdashFramework/scala-js-jquery | example/src/main/scala/io/udash/demos/jquery/views/functions/AppendPrependView.scala | Scala | apache-2.0 | 626 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.typesystem.builders
import org.apache.atlas.AtlasException
import org.apache.atlas.typesystem.types.{ClassType, Multiplicity, TypeSystem}
import org.testng.annotations.{BeforeMethod,Test}
class MultiplicityTest {
@BeforeMethod
def beforeAll {
TypeSystem.getInstance().reset()
val b = new TypesBuilder
import b._
val tDef = types {
_trait("Dimension") {}
_trait("PII") {}
_trait("Metric") {}
_trait("ETL") {}
_trait("JdbcAccess") {}
_class("DB") {
"name" ~ (string, required, indexed, unique)
"owner" ~ (string)
"createTime" ~ (int)
}
_class("StorageDesc") {
"inputFormat" ~ (string, required)
"outputFormat" ~ (string, required)
}
_class("Column") {
"name" ~ (string, required)
"dataType" ~ (string, required)
"sd" ~ ("StorageDesc", required)
}
_class("Table", List()) {
"name" ~ (string, required, indexed)
"db" ~ ("DB", required)
"sd" ~ ("StorageDesc", required)
}
_class("LoadProcess") {
"name" ~ (string, required)
"inputTables" ~ (array("Table"), collection)
"outputTable" ~ ("Table", required)
}
_class("View") {
"name" ~ (string, required)
"inputTables" ~ (array("Table"), collection)
}
_class("AT") {
"name" ~ (string, required)
"stringSet" ~ (array("string"), multiplicty(0, Int.MaxValue, true))
}
}
TypeSystem.getInstance().defineTypes(tDef)
}
@Test
def test1 {
val b = new InstanceBuilder
import b._
val instances = b create {
val a = instance("AT") { // use instance to create Referenceables. use closure to
// set attributes of instance
'name ~ "A1" // use '~' to set attributes. Use a Symbol (names starting with ') for
'stringSet ~ Seq("a", "a")
}
}
val ts = TypeSystem.getInstance()
import scala.collection.JavaConversions._
val typedInstances = instances.map { i =>
val iTyp = ts.getDataType(classOf[ClassType], i.getTypeName)
iTyp.convert(i, Multiplicity.REQUIRED)
}
typedInstances.foreach { i =>
println(i)
}
}
@Test(expectedExceptions = Array(classOf[AtlasException]) , expectedExceptionsMessageRegExp = "A multiplicty of more than one requires a collection type for attribute 'stringSet'")
def WrongMultiplicity {
val b = new TypesBuilder
import b._
val tDef = types {
_class("Wrong") {
"name" ~ (string, required)
"stringSet" ~ (string, multiplicty(0, Int.MaxValue, true))
}
}
TypeSystem.getInstance().defineTypes(tDef)
}
}
| jnhagelberg/incubator-atlas | typesystem/src/test/scala/org/apache/atlas/typesystem/builders/MultiplicityTest.scala | Scala | apache-2.0 | 3,562 |
/**
* Copyright 2015 Devon Miller
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package im
package vdom
package backend
/**
* Adler32 checksum.
*
* @see http://patterngazer.blogspot.com/2012/01/naive-adler32-example-in-clojure-and.html
*/
private[backend] trait Adler32 {
val base = 65521
def rebased(value: Int) = value % base
def cumulated(acc: (Int, Int), item: Byte): (Int, Int) = {
val a = rebased(acc._1 + (item & 0xff))
(a, (a + acc._2) % base)
}
def checksum(data: Traversable[Byte]): Int
def checksumText(data: Traversable[Char]): Int
}
protected[backend] object Adler32 extends Adler32 {
override def checksum(data: Traversable[Byte]): Int = {
val result = data.foldLeft((1, 0)) { cumulated(_, _) }
(result._2 << 16) | result._1
}
def checksumText(data: Traversable[Char]) = {
checksum(data.toSeq.map(_.toByte))
}
}
protected[backend] trait Utils {
/**
* Return Adler32 checksum.
*/
def adler32(str: String): Int = Adler32.checksumText(str)
/**
* Name of the custom data attribute that stores a checksum.
*/
val ChecksumAttrName = "data-scala-vdom-checksum"
/**
* Add checksum to end of markup using complete adhoc string regex
* to find the end tag. Assumes the first set of characters
* is the start of an element and the tag ends in ">" or "/>".
*/
def addChecksumToMarkup(markup: String): String = {
val checksum = adler32(markup)
markup.replaceFirst("(/?>)", " " + ChecksumAttrName + "=\\"" + checksum + "\\"$1")
}
/** Append two strings */
def stringAppend(left: String, right: String) = left + right
/** Filter for filtering attribute lists. Keeps StyleKey attributes. */
def keepStyles(el: KeyValue[_]) = el match {
case KeyValue(StyleKey(_), _) => true
case _ => false
}
/** Filter for filtering attribute lists. Keeps AttrKey attributes. */
def keepAttributes(el: KeyValue[_]) = el match {
case KeyValue(AttrKey(_, _), _) => true
case _ => false
}
/**
* Take a value and convert it to a quoted string suitable for use
* as an attribute's value in markup.
*/
def quoteValueForBrowser[T](v: T): String =
"\\"" + escapeTextForBrowser(v) + "\\""
private[this] val escapeTable = Seq(
("&".r, "&"),
(">".r, ">"),
("<".r, "<"),
("\\"".r, """),
("'".r, "'"))
/**
* Escape certain char sequences to avoid scripting attacks.
*
* TODO: Horribly inefficient!
*/
def escapeTextForBrowser[T](v: T): String = {
var rval = v.toString
for (p <- escapeTable)
rval = p._1.replaceAllIn(rval, p._2)
rval
}
/**
* Check hint structure and apply business rule about whether this
* value should be ignored and does not need to be set into a DOM object.
*/
def ignoreValue[T](key: KeyPart, hintopt: Option[AttrHint], value: T): Boolean = {
(hintopt.map(_.values).getOrElse(Hints.EmptyHints), value) match {
case (hints, false) if (hints(Hints.HasBooleanValue)) => true
case _ => false
}
}
/**
* Create markup for a key value pair. It considers both the hint and the value
* when generating markup.
*
* If no hint is found, generate a simple `name = 'value'` and convert the
* value to a quoted string. If the value is None then it
* generates a None return value.
*
* @return None if no markup was generated or a string of markup.
*/
def createMarkupForProperty(kv: KeyValue[_], hintopt: Option[AttrHint]): Option[String] = {
kv.value.
filterNot(ignoreValue(kv.key, hintopt, _)).
map { v =>
(hintopt.map(_.values).getOrElse(Hints.EmptyHints), v) match {
case (hints, true) if (hints(Hints.HasBooleanValue) || hints(Hints.HasOverloadedBooleanValue)) =>
kv.key.name + """="""""
case _ => kv.key.name + "=" + quoteValueForBrowser(v)
}
}
}
/**
* Take a style value and prepare it to be inserted into markup.
*/
def quoteStyleValueForBrowser[T](hintopt: Option[StyleHint], v: T) = {
(hintopt.map(_.values).getOrElse(collection.BitSet.empty), v) match {
case (_, null) => ""
case (_, true) => ""
case (_, false) => ""
case (_, "") => ""
case (hints, x@_) if (hints(Hints.Unitless)) => v.toString
case _ => v.toString.trim + "px"
}
}
/** Convert camel cased to a hyphenated name. */
def hyphenate(name: String) = name.replaceAll("([A-Z])", "-$1").toLowerCase
/**
* Process a style name for proper formation. Hyphenate and fix
* some.
*
* For "ms-" prefix convert to "-ms-" per react.
*/
def processStyleName(name: String) = {
hyphenate(name).trim.replaceAll("^ms-", "-ms-")
}
/**
* Create style markup. This does NOT include `style=` or
* surrounding quotes. None values conceptually indicate
* we should ignore the value so no markup is generated for
* keys with None values.
*
* @return None if no markup was generated or a string of markup.
*
* TODO Allow the specification of a hint source.
*/
def createMarkupForStyles(kv: KeyValue[_], hintopt: Option[StyleHint]): Option[String] =
kv.value.map { v => processStyleName(kv.key.name) + ":" + quoteStyleValueForBrowser(hintopt, v) }
/**
* Norm the string for comparison purposes. Remove repeated whitespace,
* remove whitespace before non-alpha characters, remove leading/trailing
* whitespace. Note that newlines are stripped as well and cannot
* detect correctness if a newline MUST be in the normalized string.
*/
def norm(input: String): String = {
input.trim().
replaceAll("(\\\\s)+", " ").
replaceAll("\\\\s(\\\\W)", "$1").
replaceAll("(\\\\W)\\\\s", "$1").
toUpperCase
}
}
protected[backend] object Utils extends Utils | aappddeevv/scala-vdom | shared/src/main/scala/im/vdom/backend/Utils.scala | Scala | apache-2.0 | 6,316 |
package controller
import skinny.filter.TxPerRequestFilter
class MustacheController extends ApplicationController with TxPerRequestFilter {
def index = {
set("echo" -> params.get("echo"))
render("/mustache/index")
}
}
| BlackPrincess/skinny-framework | example/src/main/scala/controller/MustacheController.scala | Scala | mit | 234 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package relite
import r._
import r.data._
import r.data.internal._
import r.builtins.{ CallFactory, Primitives }
import r.nodes.ast._
import r.nodes.exec.{ BaseR, RNode }
import r.runtime._;
import org.antlr.runtime._
import java.io._
import scala.collection.JavaConversions._
object NBody {
def main(args: Array[String]): Unit = {
DeliteBridge.install()
def test(prog: String): Unit = {
val res = RContext.eval(RContext.parseFile(
new ANTLRInputStream(new ByteArrayInputStream(prog.getBytes))))
println(res.pretty)
println(res)
// assert(res.pretty == "1, 2, 3")
}
test("""
Delite({
pi <- 3.141592653589793
solar_mass <- 4 * pi * pi #ok
days_per_year <- 365.24
n_bodies <- 5
body_x <- c(
0, # sun
4.84143144246472090e+00, # jupiter
8.34336671824457987e+00, # saturn
1.28943695621391310e+01, # uranus
1.53796971148509165e+01 # neptune
)
body_y <- c(
0, # sun
-1.16032004402742839e+00, # jupiter #ok
4.12479856412430479e+00, # saturn
-1.51111514016986312e+01, # uranus #ok
-2.59193146099879641e+01 # neptune #ok
)
body_z <- c(
0, # sun
-1.03622044471123109e-01, # jupiter #ok
-4.03523417114321381e-01, # saturn #ok
-2.23307578892655734e-01, # uranus #ok
1.79258772950371181e-01 # neptune
)
body_vx <- c(
0, # sun
1.66007664274403694e-03 * days_per_year, # jupiter #ok
-2.76742510726862411e-03 * days_per_year, # saturn #ok
2.96460137564761618e-03 * days_per_year, # uranus #ok
2.68067772490389322e-03 * days_per_year # neptune #ok
)
body_vy <- c(
0, # sun
7.69901118419740425e-03 * days_per_year, # jupiter #ok
4.99852801234917238e-03 * days_per_year, # saturn #ok
2.37847173959480950e-03 * days_per_year, # uranus #ok
1.62824170038242295e-03 * days_per_year # neptune #ok
)
body_vz <- c(
0, # sun
-6.90460016972063023e-05 * days_per_year, # jupiter #ok
2.30417297573763929e-05 * days_per_year, # saturn #ok
-2.96589568540237556e-05 * days_per_year, # uranus #ok
-9.51592254519715870e-05 * days_per_year # neptune #ok
)
body_mass <- c(
solar_mass, # sun
9.54791938424326609e-04 * solar_mass, # jupiter #ok
2.85885980666130812e-04 * solar_mass, # saturn #ok
4.36624404335156298e-05 * solar_mass, # uranus #ok
5.15138902046611451e-05 * solar_mass # neptune #ok
)
offset_momentum <- function() {
body_vx[[1]] <<- -sum(body_vx * body_mass) / solar_mass
body_vy[[1]] <<- -sum(body_vy * body_mass) / solar_mass
body_vz[[1]] <<- -sum(body_vz * body_mass) / solar_mass
}
advance <- function(dt) {
dxx <- outer(body_x, body_x, "-") # ~2x faster then nested for loops
dyy <- outer(body_y, body_y, "-")
dzz <- outer(body_z, body_z, "-")
distance <- sqrt(dxx * dxx + dyy * dyy + dzz * dzz)
mag <- dt / (distance * distance * distance) # ~fast as distance^3
diag(mag) <- 0
body_vx <<- body_vx - as.vector((dxx * mag) %*% body_mass)
body_vy <<- body_vy - as.vector((dyy * mag) %*% body_mass)
body_vz <<- body_vz - as.vector((dzz * mag) %*% body_mass)
body_x <<- body_x + dt * body_vx
body_y <<- body_y + dt * body_vy
body_z <<- body_z + dt * body_vz
}
energy <- function() {
dxx <- outer(body_x, body_x, "-")
dyy <- outer(body_y, body_y, "-")
dzz <- outer(body_z, body_z, "-")
distance <- sqrt(dxx * dxx + dyy * dyy + dzz * dzz)
q <- (body_mass %o% body_mass) / distance
return(sum(0.5 * body_mass *
(body_vx * body_vx + body_vy * body_vy + body_vz * body_vz)) -
sum(q[upper.tri(q)]))
}
nbody <- function(args) {
n = if (length(args)) as.integer(args[[1]]) else 1000L
# options(digits=9)
offset_momentum()
cat(energy(), "\\n")
for (i in 1:n){
advance(0.01)
print(body_x)
print(body_y)
print(body_z)
print(body_vx)
print(body_vy)
print(body_vz)
}
cat(energy(), "\\n")
}
nbody(1)
})
""")
}
}
| lidijaf/Relite | test-src/Nbody.scala | Scala | agpl-3.0 | 5,203 |
import akka.dispatch._
import akka.actor._
import java.util.concurrent.Executors
import java.util.concurrent.CountDownLatch
object Main extends App {
def waitABit(latch: CountDownLatch) = new Runnable() {
override def run() = {
Future.blocking()
Thread.sleep(100)
// use some CPU somehow
/* var j = 1
for (i <- 1 to 3000)
j = (j * i * 1.8).toInt */
latch.countDown()
}
}
val unboundedPool = Executors.newFixedThreadPool(1000)
val system = ActorSystem("Foo")
val numWaits = 200
// warm up threads
for (i <- 1 to numWaits) {
unboundedPool.execute(waitABit(new CountDownLatch(1)))
}
val unboundedLatch = new CountDownLatch(numWaits)
val startUnbounded = System.currentTimeMillis()
for (i <- 1 to numWaits) {
unboundedPool.execute(waitABit(unboundedLatch))
}
unboundedLatch.await()
val endUnbounded = System.currentTimeMillis()
val akkaDefaultLatch = new CountDownLatch(numWaits)
val startAkkaDefault = System.currentTimeMillis()
for (i <- 1 to numWaits) {
system.dispatcher.execute(waitABit(akkaDefaultLatch))
}
akkaDefaultLatch.await()
val endAkkaDefault = System.currentTimeMillis()
println("unbounded pool waited " + (endUnbounded - startUnbounded))
println("akka default fork-join waited " + (endAkkaDefault - startAkkaDefault))
System.exit(0)
}
| havocp/beaucatcher | channel/src/test/scala/Sleeps.scala | Scala | apache-2.0 | 1,475 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.TaskContext
class FakeTask(stageId: Int, prefLocs: Seq[TaskLocation] = Nil) extends Task[Int](stageId, 0) {
override def runTask(context: TaskContext): Int = 0
override def preferredLocations: Seq[TaskLocation] = prefLocs
}
object FakeTask {
/**
* Utility method to create a TaskSet, potentially setting a particular sequence of preferred
* locations for each task (given as varargs) if this sequence is not empty.
*/
def createTaskSet(numTasks: Int, prefLocs: Seq[TaskLocation]*): TaskSet = {
if (prefLocs.size != 0 && prefLocs.size != numTasks) {
throw new IllegalArgumentException("Wrong number of task locations")
}
val tasks = Array.tabulate[Task[_]](numTasks) { i =>
new FakeTask(i, if (prefLocs.size != 0) prefLocs(i) else Nil)
}
new TaskSet(tasks, 0, 0, 0, null)
}
}
| Dax1n/spark-core | core/src/test/scala/org/apache/spark/scheduler/FakeTask.scala | Scala | apache-2.0 | 1,691 |
package org.jetbrains.plugins.hocon.highlight
import com.intellij.lexer.{LayeredLexer, StringLiteralLexer}
import com.intellij.openapi.editor.colors.TextAttributesKey
import com.intellij.openapi.fileTypes.{SyntaxHighlighter, SyntaxHighlighterFactory}
import com.intellij.openapi.project.Project
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.psi.StringEscapesTokenTypes
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.hocon.highlight.{HoconHighlighterColors => HHC}
import org.jetbrains.plugins.hocon.lexer.HoconLexer
class HoconSyntaxHighlighterFactory extends SyntaxHighlighterFactory {
def getSyntaxHighlighter(project: Project, virtualFile: VirtualFile) =
HoconSyntaxHighlighter
}
object HoconSyntaxHighlighter extends SyntaxHighlighter {
import org.jetbrains.plugins.hocon.lexer.HoconTokenType._
private val tokenHighlights = Map[IElementType, Array[TextAttributesKey]](
BadCharacter -> Array(HHC.BadCharacter),
QuotedString -> Array(HHC.QuotedString),
MultilineString -> Array(HHC.MultilineString),
HashComment -> Array(HHC.HashComment),
DoubleSlashComment -> Array(HHC.DoubleSlashComment),
LBrace -> Array(HHC.Braces),
RBrace -> Array(HHC.Braces),
LBracket -> Array(HHC.Brackets),
RBracket -> Array(HHC.Brackets),
SubLBrace -> Array(HHC.SubBraces),
SubRBrace -> Array(HHC.SubBraces),
Comma -> Array(HHC.Comma),
Equals -> Array(HHC.KeyValueSeparator),
Colon -> Array(HHC.KeyValueSeparator),
PlusEquals -> Array(HHC.KeyValueSeparator),
Dollar -> Array(HHC.SubstitutionSign),
QMark -> Array(HHC.OptionalSubstitutionSign),
UnquotedChars -> Array(HHC.UnquotedString),
Period -> Array(HHC.UnquotedString),
LParen -> Array(HHC.UnquotedString),
RParen -> Array(HHC.UnquotedString),
StringEscapesTokenTypes.VALID_STRING_ESCAPE_TOKEN -> Array(HHC.ValidStringEscape),
StringEscapesTokenTypes.INVALID_CHARACTER_ESCAPE_TOKEN -> Array(HHC.InvalidStringEscape),
StringEscapesTokenTypes.INVALID_UNICODE_ESCAPE_TOKEN -> Array(HHC.InvalidStringEscape)
)
def getTokenHighlights(tokenType: IElementType): Array[TextAttributesKey] =
tokenHighlights.getOrElse(tokenType, Array.empty)
def getHighlightingLexer = new LayeredLexer(new HoconLexer) {
registerSelfStoppingLayer(new StringLiteralLexer('\\"', QuotedString), Array(QuotedString), IElementType.EMPTY_ARRAY)
}
}
| ghik/intellij-hocon | src/org/jetbrains/plugins/hocon/highlight/HoconSyntaxHighlighter.scala | Scala | apache-2.0 | 2,421 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package patterns
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.expressions.Literal
import org.jetbrains.plugins.scala.lang.parser.parsing.types.StableId
import org.jetbrains.plugins.scala.lang.parser.parsing.xml.pattern.XmlPattern
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils
/**
* @author Alexander Podkhalyuzin
* Date: 29.02.2008
*/
/*
* SimplePattern ::= '_'
* | varid
* | Literal
* | StableId
* | StableId '(' [Patterns [',']] ')'
* | StableId '(' [Patterns ','] [(varid | '_' ) '@'] '_' '*'')'
* |'(' [Patterns [',']] ')'
* | XmlPattern
*/
object SimplePattern extends SimplePattern {
override protected def literal = Literal
override protected def interpolationPattern = InterpolationPattern
override protected def pattern = Pattern
override protected def patterns = Patterns
}
trait SimplePattern extends ParserNode {
protected def literal: Literal
protected def pattern: Pattern
protected def interpolationPattern: InterpolationPattern
protected def patterns: Patterns
def parse(builder: ScalaPsiBuilder): Boolean = {
def isVarId = builder.getTokenText.substring(0, 1).toLowerCase ==
builder.getTokenText.substring(0, 1) && !(
builder.getTokenText.apply(0) == '`' && builder.getTokenText.apply(builder.getTokenText.length - 1) == '`'
)
val simplePatternMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.tUNDER =>
builder.advanceLexer() //Ate _
builder.getTokenText match {
case "*" =>
simplePatternMarker.rollbackTo()
return false
case _ =>
}
simplePatternMarker.done(ScalaElementTypes.WILDCARD_PATTERN)
return true
case ScalaTokenTypes.tLPARENTHESIS =>
builder.advanceLexer() //Ate (
builder.disableNewlines
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
builder.restoreNewlinesState
simplePatternMarker.done(ScalaElementTypes.TUPLE_PATTERN)
return true
case _ =>
}
if (patterns parse builder) {
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
builder.restoreNewlinesState
simplePatternMarker.done(ScalaElementTypes.TUPLE_PATTERN)
return true
case _ =>
builder error ScalaBundle.message("rparenthesis.expected")
builder.restoreNewlinesState
simplePatternMarker.done(ScalaElementTypes.TUPLE_PATTERN)
return true
}
}
if (pattern parse builder) {
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
case _ =>
builder error ScalaBundle.message("rparenthesis.expected")
}
builder.restoreNewlinesState
simplePatternMarker.done(ScalaElementTypes.PATTERN_IN_PARENTHESIS)
return true
}
case _ =>
}
if (interpolationPattern parse builder) {
simplePatternMarker.done(ScalaElementTypes.INTERPOLATION_PATTERN)
return true
}
if (literal parse builder) {
simplePatternMarker.done(ScalaElementTypes.LITERAL_PATTERN)
return true
}
if (XmlPattern.parse(builder)) {
simplePatternMarker.drop()
return true
}
if (lookAhead(builder, ScalaTokenTypes.tIDENTIFIER) &&
!lookAhead(builder, ScalaTokenTypes.tIDENTIFIER, ScalaTokenTypes.tDOT) &&
!lookAhead(builder, ScalaTokenTypes.tIDENTIFIER, ScalaTokenTypes.tLPARENTHESIS) &&
isVarId) {
val rpm = builder.mark
builder.getTokenText
builder.advanceLexer()
rpm.done(ScalaElementTypes.REFERENCE_PATTERN)
simplePatternMarker.drop()
return true
}
val rb1 = builder.mark
if (StableId parse (builder, ScalaElementTypes.REFERENCE_EXPRESSION)) {
builder.getTokenType match {
case ScalaTokenTypes.tLPARENTHESIS =>
rb1.rollbackTo()
StableId parse (builder, ScalaElementTypes.REFERENCE)
val args = builder.mark
builder.advanceLexer() //Ate (
builder.disableNewlines
def parseSeqWildcard(withComma: Boolean): Boolean = {
if (if (withComma)
lookAhead(builder, ScalaTokenTypes.tCOMMA, ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER)
else lookAhead(builder, ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER)) {
val wild = builder.mark
if (withComma) builder.advanceLexer()
builder.getTokenType
builder.advanceLexer()
if (builder.getTokenType == ScalaTokenTypes.tIDENTIFIER && "*".equals(builder.getTokenText)) {
builder.advanceLexer()
wild.done(ScalaElementTypes.SEQ_WILDCARD)
true
} else {
wild.rollbackTo()
false
}
} else {
false
}
}
def parseSeqWildcardBinding(withComma: Boolean): Boolean = {
if (if (withComma) lookAhead(builder, ScalaTokenTypes.tCOMMA, ScalaTokenTypes.tIDENTIFIER, ScalaTokenTypes.tAT,
ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER) || lookAhead(builder, ScalaTokenTypes.tCOMMA, ScalaTokenTypes.tUNDER, ScalaTokenTypes.tAT,
ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER)
else lookAhead(builder, ScalaTokenTypes.tIDENTIFIER, ScalaTokenTypes.tAT,
ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER) || lookAhead(builder, ScalaTokenTypes.tUNDER, ScalaTokenTypes.tAT,
ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER)) {
val wild = builder.mark
if (withComma) builder.advanceLexer() // ,
builder.getTokenType
if (isVarId) {
builder.advanceLexer() // id
} else {
wild.rollbackTo()
return false
}
builder.getTokenType
builder.advanceLexer() // @
builder.getTokenType
if (ParserUtils.eatSeqWildcardNext(builder)) {
wild.done(ScalaElementTypes.NAMING_PATTERN)
return true
}
else {
wild.rollbackTo()
return false
}
}
false
}
if (!parseSeqWildcard(withComma = false) && !parseSeqWildcardBinding(withComma = false) && pattern.parse(builder)) {
while (builder.getTokenType == ScalaTokenTypes.tCOMMA) {
builder.advanceLexer() // eat comma
if (!parseSeqWildcard(withComma = false) && !parseSeqWildcardBinding(withComma = false)) pattern.parse(builder)
}
}
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
case _ =>
builder error ErrMsg("rparenthesis.expected")
}
builder.restoreNewlinesState
args.done(ScalaElementTypes.PATTERN_ARGS)
simplePatternMarker.done(ScalaElementTypes.CONSTRUCTOR_PATTERN)
return true
case _ =>
rb1.drop()
simplePatternMarker.done(ScalaElementTypes.STABLE_REFERENCE_PATTERN)
return true
}
}
simplePatternMarker.rollbackTo()
false
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/parser/parsing/patterns/SimplePattern.scala | Scala | apache-2.0 | 8,010 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.plugin.transformation.json
import com.stratio.sparta.sdk.pipeline.transformation.Parser
import com.stratio.sparta.sdk.properties.ValidatingPropertyMap._
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
import java.io.{Serializable => JSerializable}
import org.joda.time.DateTime
import scala.util.Try
class CustomDateParser(order: Integer,
inputField: Option[String],
outputFields: Seq[String],
schema: StructType,
properties: Map[String, JSerializable])
extends Parser(order, inputField, outputFields, schema, properties) {
val dateField = propertiesWithCustom.getString("dateField", "date")
val hourField = propertiesWithCustom.getString("hourField", "hourRounded")
val dayField = propertiesWithCustom.getString("dayField", "dayRounded")
val weekField = propertiesWithCustom.getString("weekField", "week")
val hourDateField = propertiesWithCustom.getString("hourDateField", "hourDate")
val yearPrefix = propertiesWithCustom.getString("yearPrefix", "20")
//scalastyle:off
override def parse(row: Row): Seq[Row] = {
val inputValue = Try(row.get(schema.fieldIndex(dateField))).toOption
val newData = Try {
inputValue match {
case Some(value) =>
val valueStr = {
value match {
case valueCast: Array[Byte] => new Predef.String(valueCast)
case valueCast: String => valueCast
case _ => value.toString
}
}
val valuesParsed = Map(
hourField -> getDateWithBeginYear(valueStr).concat(valueStr.substring(4, valueStr.length)),
hourDateField -> getHourDate(valueStr),
dayField -> getDateWithBeginYear(valueStr).concat(valueStr.substring(4, valueStr.length - 2)),
weekField -> getWeek(valueStr)
)
outputFields.map { outputField =>
val outputSchemaValid = outputFieldsSchema.find(field => field.name == outputField)
outputSchemaValid match {
case Some(outSchema) =>
valuesParsed.get(outSchema.name) match {
case Some(valueParsed) =>
parseToOutputType(outSchema, valueParsed)
case None =>
returnWhenError(new IllegalStateException(
s"The values parsed don't contain the schema field: ${outSchema.name}"))
}
case None =>
returnWhenError(new IllegalStateException(
s"Impossible to parse outputField: $outputField in the schema"))
}
}
case None =>
returnWhenError(new IllegalStateException(s"The input value is null or empty"))
}
}
returnData(newData, removeInputField(row))
}
def getDateWithBeginYear(inputDate: String): String =
inputDate.substring(0, inputDate.length - 4).concat(yearPrefix)
def getHourDate(inputDate: String): Long = {
val day = inputDate.substring(0, 2).toInt
val month = inputDate.substring(2, 4).toInt
val year = yearPrefix.concat(inputDate.substring(4, 6)).toInt
val hour = inputDate.substring(6, inputDate.length).toInt
val date = new DateTime(year, month, day, hour, 0)
date.getMillis
}
def getWeek(inputDate: String): Int = {
val day = inputDate.substring(0, 2).toInt
val month = inputDate.substring(2, 4).toInt
val year = yearPrefix.concat(inputDate.substring(4, 6)).toInt
val date = new DateTime(year, month, day, 0, 0)
date.getWeekOfWeekyear
}
//scalastyle:on
}
| diegohurtado/sparta | examples/parser/src/main/scala/com/stratio/sparta/CustomDateParser.scala | Scala | apache-2.0 | 4,294 |
/*
* Licensed to STRATIO (C) under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. The STRATIO (C) licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.stratio.connector.sparksql.core.engine.query
import com.stratio.connector.sparksql.core.Configuration
import com.stratio.connector.sparksql.core.`package`.SparkSQLContext
import com.stratio.connector.sparksql.core.connection.ConnectionHandler
import com.stratio.crossdata.common.result.QueryResult
import com.stratio.connector.commons.timer
import org.apache.spark.sql.types.StructType
import scala.concurrent.duration._
import akka.actor.{Props, Actor}
import com.stratio.connector.commons.{Loggable, Metrics}
import com.stratio.crossdata.common.logicalplan.LogicalWorkflow
import com.stratio.connector.sparksql.CrossdataConverters._
import org.apache.spark.sql.{Row, DataFrame}
import QueryEngine.toColumnMetadata
import QueryExecutor._
/**
* Minimum query execution unit.
*
* @param sqlContext The SQLContext
* @param defaultChunkSize Max row size in a chunk
* @param provider SparkSQL Data source provider
* @param asyncStoppable Whether its tasks can be stopped or not
*/
class QueryExecutor(
sqlContext: SparkSQLContext,
defaultChunkSize: Int,
provider: DataFrameProvider,
connectionHandler: ConnectionHandler,
asyncStoppable: Boolean = true) extends Actor
with Loggable
with Metrics
with Configuration {
import QueryManager._
import timer._
type Chunk = (Iterator[Row], Int)
var currentJob: Option[AsyncJob] = None
var currentSchema: Option[StructType] = None
/** Current job chunks iterator */
var rddChunks: Iterator[Chunk] = List.empty[Chunk].iterator
/** Maximum time for waiting at count approx in chunk split */
val timeoutCountApprox = connectorConfig.get.getInt(CountApproxTimeout).seconds
override def receive = {
case job@SyncExecute(_, workflow) =>
val requester = sender()
val dataFrame = timeFor(s"Processed sync. job request: $job") {
QueryEngine.executeQuery(workflow, sqlContext, connectionHandler)
}
val result = timeFor(s"Unique query result processed.") {
QueryResult.createQueryResult(
toResultSet(dataFrame, toColumnMetadata(workflow)), 0, true)
}
requester ! result
case job@PagedExecute(_, _, _, pageSize) =>
timeFor(s"$me Processed paged job request : $job") {
startNewJob(job, pageSize)
}
case job: AsyncExecute =>
timeFor(s"$me Processed async. job request : $job") {
startNewJob(job)
}
case ProcessNextChunk(queryId) if currentJob.exists(_.queryId == queryId) =>
timeFor(s"$me Processed 'ProcessNextChunk' request (query: $queryId") {
keepProcessingJob()
}
case Stop(queryId) if currentJob.exists(_.queryId == queryId) =>
timeFor(s"$me Stopped request (query: $queryId") {
stopCurrentJob()
}
case other => logger.error(s"[${other.getClass} Unhandled message : $other]")
}
// Helpers
def me: String = s"[QueryExecutor#${context.self}}]"
/**
* Start a new async query job. This will execute the given query
* on SparkSQL and the repartition results for handling them in
* smaller pieces called chunks. Chunk size should be small enough
* in order to fit in driver memory.
*
* @param job Query to be asynchronously executed.
*/
def startNewJob(job: AsyncJob, pageSize: Int = defaultChunkSize): Unit =
timeFor(s"$me Job ${job.queryId} is started") {
// Update current job
currentJob = Option(job)
// Create SchemaRDD from query
val dataFrame = provider(job.workflow, connectionHandler, sqlContext)
// Update current schema
currentSchema = Option(dataFrame.schema)
if (asyncStoppable) {
val repartitioned = dataFrame.rdd
rddChunks = repartitioned
.toLocalIterator
.grouped(pageSize)
.map(_.iterator)
.zipWithIndex
if (repartitioned.partitions.length == 0) {
val result = QueryResult.createQueryResult(
toResultSet(List().toIterator, dataFrame.schema, toColumnMetadata(job.workflow)),
0,
true)
result.setQueryId(job.queryId)
job.resultHandler.processResult(result)
}
}
else {
timeFor(s"$me Processed ${job.queryId} as unstoppable...") {
// Prepare query as an only chunk, omitting stop messages
rddChunks = List(dataFrame.rdd.toLocalIterator -> 0).iterator
}
}
// Begin processing current job
keepProcessingJob()
}
/**
* Process or set as finished current job if there are no chunks left.
*/
def keepProcessingJob(): Unit =
for {
job <- currentJob
schema <- currentSchema
} {
if (!rddChunks.hasNext) {
logger.debug(s"$me Job ${job.queryId} has " +
s"no chunks left to process")
context.parent ! Finished(job.queryId)
}
else {
logger.debug(s"$me Preparing to process " +
s"next chunk of ${job.queryId}")
val chunk = rddChunks.next()
val isLast = !rddChunks.hasNext
processChunk(chunk, isLast, schema)
self ! ProcessNextChunk(job.queryId)
}
}
/**
* Process the given chunk as query result set.
*
* @param chunk The chunk to be processed
* @param isLast Indicate the way to find out if given chunk is the last.
*/
def processChunk(
chunk: => Chunk,
isLast: => Boolean,
schema: StructType): Unit = {
val (rows, idx) = chunk
currentJob.foreach { job =>
job.resultHandler.processResult {
logger.debug(s"Preparing query result [$idx] for query ${job.queryId}")
val result = QueryResult.createQueryResult(
toResultSet(rows, schema, toColumnMetadata(job.workflow)),
idx,
isLast)
result.setQueryId(job.queryId)
logger.info(s"Query result [$idx] for query ${job.queryId} sent to resultHandler")
result
}
}
}
/**
* Stop processing current asynchronous query job.
*/
def stopCurrentJob(): Unit = {
currentJob = None
currentSchema = None
rddChunks = List().iterator
}
}
object QueryExecutor {
type DataFrameProvider = (LogicalWorkflow, ConnectionHandler, SparkSQLContext) => DataFrame
def apply(
sqlContext: SparkSQLContext,
defaultChunkSize: Int,
provider: DataFrameProvider,
connectionHandler: ConnectionHandler,
asyncStoppable: Boolean = true): Props =
Props(new QueryExecutor(
sqlContext,
defaultChunkSize,
provider,
connectionHandler,
asyncStoppable))
case class ProcessNextChunk(queryId: QueryManager#QueryId)
}
| Stratio/stratio-connector-sparkSQL | connector-sparksql/src/main/scala/com/stratio/connector/sparksql/core/engine/query/QueryExecutor.scala | Scala | apache-2.0 | 7,613 |
package io.toolsplus.atlassian.connect.play.auth.jwt
import io.toolsplus.atlassian.jwt.api.CanonicalHttpRequest
import play.api.mvc.Request
case class CanonicalPlayHttpRequest[A](request: Request[A])
extends CanonicalHttpRequest {
override def method: String = request.method
override def relativePath: String = request.path
override def parameterMap: Map[String, Seq[String]] = request.queryString
}
| toolsplus/atlassian-connect-play | modules/core/app/io/toolsplus/atlassian/connect/play/auth/jwt/CanonicalPlayHttpRequest.scala | Scala | apache-2.0 | 415 |
package bot.application
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{Directive0, Route, ValidationRejection}
import bot.line.client.SignatureVerifier
import bot.line.json.EventsJsonSupport
import bot.line.model.event.{Event, Events}
trait BaseLineBot[T] extends EventsJsonSupport {
val channelSecret: String
val signatureVerifier: SignatureVerifier
def verifySignature: Directive0 =
(headerValueByName("X-Line-Signature") & entity(as[String])).tflatMap {
case (signature, body) if signatureVerifier.isValid(channelSecret, body, signature) => pass
case _ => reject(ValidationRejection("Invalid signature"))
}
def routes: Route = {
(path("line" / "callback") & post & verifySignature & entity(as[Events])) { entity =>
receive(entity.events)
complete {
"OK"
}
}
}
def receive(events: List[Event]): T
}
| xoyo24/akka-http-line-bot | src/main/scala/bot/application/BaseLineBot.scala | Scala | mit | 906 |
case class SpaceAge(seconds: Long) {
def onEarth(): Double = ((seconds + 0.005) / 31557600.0 * 100 round) / 100.0
def onMercury(): Double =
((seconds + 0.005) / 31557600.0 / 0.2408467 * 100 round) / 100.0
def onVenus(): Double =
((seconds + 0.005) / 31557600.0 / 0.61519726 * 100 round) / 100.0
def onMars(): Double =
((seconds + 0.005) / 31557600.0 / 1.8808158 * 100 round) / 100.0
def onJupiter(): Double =
((seconds + 0.005) / 31557600 / 11.862615 * 100 round) / 100.0
def onSaturn(): Double =
((seconds + 0.005) / 31557600 / 29.447498 * 100 round) / 100.0
def onUranus(): Double =
((seconds + 0.005) / 31557600 / 84.016846 * 100 round) / 100.0
def onNeptune(): Double =
((seconds + 0.005) / 31557600 / 164.79132 * 100 round) / 100.0
}
| stanciua/exercism | scala/space-age/src/main/scala/SpaceAge.scala | Scala | mit | 783 |
package chess
import chess.Pos._
import chess.variant.FromPosition
class CastleTest extends ChessTest {
"king side" should {
val goodHist = """
PPPPPPPP
R QK R"""
val badHist = goodHist updateHistory (_ withoutCastles White)
"impossible" in {
"standard chess" in {
"near bishop in the way" in {
goodHist place (White.bishop, F1) flatMap (_ destsFrom E1) must bePoss()
}
"distant knight in the way" in {
goodHist place (White.knight, G1) flatMap (_ destsFrom E1) must bePoss(F1)
}
"not allowed by history" in {
badHist destsFrom E1 must bePoss(F1)
}
}
"chess960" in {
val board960 = """
PPPPPPPP
RQK R """.chess960 withHistory History.castle(White, kingSide = true, queenSide = true)
"near bishop in the way" in {
board960 place (White.bishop, D1) flatMap (_ destsFrom C1) must bePoss()
}
"distant knight in the way" in {
board960 place (White.knight, F1) flatMap (_ destsFrom C1) must bePoss(D1)
}
}
}
"possible" in {
"standard" in {
val game = Game(goodHist, White)
"viable moves" in {
game.board destsFrom E1 must bePoss(F1, G1, H1)
}
"correct new board" in {
game.playMove(E1, G1) must beGame("""
PPPPPPPP
R Q RK """)
}
}
"chess960 close kingside" in {
val board: Board = """
PPPPP
B KR""".chess960
val game = Game(board, White)
"viable moves" in {
board destsFrom G1 must bePoss(F1, H1)
}
"correct new board" in {
game.playMove(G1, H1) must beGame("""
PPPPP
B RK """)
}
}
"chess960 close kingside with 2 rooks around" in {
val board: Board = """
PPPPPPPP
RKRBB """.chess960
"viable moves" in {
board destsFrom B1 must bePoss()
}
}
"chess960 close queenside" in {
val board: Board = """
PPPPPPPP
RK B""".chess960
val game = Game(board, White)
"viable moves" in {
board destsFrom B1 must bePoss(A1, C1)
}
"correct new board" in {
game.playMove(B1, A1) must beGame("""
PPPPPPPP
KR B""")
}
}
"chess960 close queenside as black" in {
val game = Game(
"""
b rkr q
p pppppp
p n
K""".chess960,
Black
)
"viable moves" in {
game.board destsFrom E8 must bePoss(D8, F8)
}
"correct new board" in {
game.playMove(E8, D8) must beGame("""
bkr r q
p pppppp
p n
K""")
}
}
"from position with chess960 castling" in {
val game = Game(
makeBoard(
"""rk r
pppbnppp
p n
P Pp
P q
R NP
PP PP
KNQRB""",
FromPosition
),
Black
)
"dests" in {
game.board destsFrom B8 must bePoss(A8, C8, E8)
}
}
}
}
"queen side" should {
val goodHist = """
PPPPPPPP
R KB R"""
val badHist = goodHist updateHistory (_ withoutCastles White)
"impossible" in {
"near queen in the way" in {
goodHist place (White.queen, D1) flatMap (_ destsFrom E1) must bePoss()
}
"bishop in the way" in {
goodHist place (White.bishop, C1) flatMap (_ destsFrom E1) must bePoss(D1)
}
"distant knight in the way" in {
goodHist place (White.knight, C1) flatMap (_ destsFrom E1) must bePoss(D1)
}
"not allowed by history" in {
badHist destsFrom E1 must bePoss(D1)
}
}
"possible" in {
val game = Game(goodHist, White)
"viable moves" in {
game.board destsFrom E1 must bePoss(A1, C1, D1)
}
"correct new board" in {
game.playMove(E1, C1) must beGame("""
PPPPPPPP
KR B R""")
}
}
}
"impact history" in {
val board = """
PPPPPPPP
R K R""" withHistory History.castle(White, kingSide = true, queenSide = true)
val game = Game(board, White)
"if king castles kingside" in {
val g2 = game.playMove(E1, G1)
"correct new board" in {
g2 must beGame("""
PPPPPPPP
R RK """)
}
"cannot castle queenside anymore" in {
g2.toOption flatMap (_.board destsFrom G1) must bePoss(H1)
}
"cannot castle kingside anymore even if the position looks good" in {
g2.toOption flatMap (_.board.seq(
_ move (F1, H1),
_ move (G1, E1)
)) flatMap (_ destsFrom E1) must bePoss(D1, F1)
}
}
"if king castles queenside" in {
val g2 = game.playMove(E1, C1)
"correct new board" in {
g2 must beGame("""
PPPPPPPP
KR R""")
}
"cannot castle kingside anymore" in {
g2.toOption flatMap (_.board destsFrom C1) must bePoss(B1)
}
"cannot castle queenside anymore even if the position looks good" in {
g2.toOption flatMap (_.board.seq(
_ move (D1, A1),
_ move (C1, E1)
)) flatMap (_ destsFrom E1) must bePoss(D1, F1)
}
}
"if king moves" in {
"to the right" in {
val g2 = game.playMove(E1, F1) map (_ as White)
"cannot castle anymore" in {
g2.toOption flatMap (_.board destsFrom F1) must bePoss(E1, G1)
}
"neither if the king comes back" in {
val g3 = g2 flatMap (_.playMove(F1, E1)) map (_ as White)
g3.toOption flatMap (_.board destsFrom E1) must bePoss(D1, F1)
}
}
"to the left" in {
val g2 = game.playMove(E1, D1) map (_ as White)
"cannot castle anymore" in {
g2.toOption flatMap (_.board destsFrom D1) must bePoss(C1, E1)
}
"neither if the king comes back" in {
val g3 = g2 flatMap (_.playMove(D1, E1)) map (_ as White)
g3.toOption flatMap (_.board destsFrom E1) must bePoss(D1, F1)
}
}
}
"if kingside rook moves" in {
val g2 = game.playMove(H1, G1) map (_ as White)
"can only castle queenside" in {
g2.toOption flatMap (_.board destsFrom E1) must bePoss(C1, D1, F1, A1)
}
"if queenside rook moves" in {
val g3 = g2 flatMap (_.playMove(A1, B1))
"can not castle at all" in {
g3.toOption flatMap (_.board destsFrom E1) must bePoss(D1, F1)
}
}
}
"if queenside rook moves" in {
val g2 = game.playMove(A1, B1) map (_ as White)
"can only castle kingside" in {
g2.toOption flatMap (_.board destsFrom E1) must bePoss(D1, F1, G1, H1)
}
"if kingside rook moves" in {
val g3 = g2 flatMap (_.playMove(H1, G1))
"can not castle at all" in {
g3.toOption flatMap (_.board destsFrom E1) must bePoss(D1, F1)
}
}
}
}
"threat on king prevents castling" in {
val board: Board = """R K R"""
"by a rook" in {
board place (Black.rook, E3) flatMap (_ destsFrom E1) must bePoss(D1, D2, F2, F1)
}
"by a knight" in {
board place (Black.knight, D3) flatMap (_ destsFrom E1) must bePoss(D1, D2, E2, F1)
}
}
"threat on castle trip prevents castling" in {
"king side" in {
val board: Board = """R QK R"""
"close" in {
board place (Black.rook, F3) flatMap (_ destsFrom E1) must bePoss(D2, E2)
}
"far" in {
board place (Black.rook, G3) flatMap (_ destsFrom E1) must bePoss(D2, E2, F2, F1)
}
}
"queen side" in {
val board: Board = """R KB R"""
"close" in {
board place (Black.rook, D3) flatMap (_ destsFrom E1) must bePoss(E2, F2)
}
"far" in {
board place (Black.rook, C3) flatMap (_ destsFrom E1) must bePoss(D1, D2, E2, F2)
}
}
"chess 960" in {
"far kingside" in {
val board: Board = """BK R"""
"rook threat" in {
board place (Black.rook, F3) flatMap (_ destsFrom B1) must bePoss(A2, B2, C2, C1)
}
"enemy king threat" in {
board place (Black.king, E2) flatMap (_ destsFrom B1) must bePoss(A2, B2, C2, C1)
}
}
}
}
"threat on rook does not prevent castling" in {
"king side" in {
val board: Board = """R QK R"""
board place (Black.rook, H3) flatMap (_ destsFrom E1) must bePoss(
D2,
E2,
F1,
F2,
G1,
H1
)
}
"queen side" in {
val board: Board = """R KB R"""
board place (Black.rook, A3) flatMap (_ destsFrom E1) must bePoss(
A1,
C1,
D1,
D2,
E2,
F2
)
}
}
}
| ornicar/scalachess | src/test/scala/CastleTest.scala | Scala | mit | 8,670 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.batch.table
import java.io.File
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.api.scala.{ExecutionEnvironment, _}
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.TableProgramsCollectionTestBase
import org.apache.flink.table.runtime.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.table.sinks.CsvTableSink
import org.apache.flink.test.util.TestBaseUtils
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
@RunWith(classOf[Parameterized])
class TableSinkITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testBatchTableSink(): Unit = {
val tmpFile = File.createTempFile("flink-table-sink-test", ".tmp")
tmpFile.deleteOnExit()
val path = tmpFile.toURI.toString
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
env.setParallelism(4)
val input = CollectionDataSets.get3TupleDataSet(env)
.map(x => x).setParallelism(4) // increase DOP to 4
val results = input.toTable(tEnv, 'a, 'b, 'c)
.where('a < 5 || 'a > 17)
.select('c, 'b)
.writeToSink(new CsvTableSink(path, fieldDelim = "|"))
env.execute()
val expected = Seq(
"Hi|1", "Hello|2", "Hello world|2", "Hello world, how are you?|3",
"Comment#12|6", "Comment#13|6", "Comment#14|6", "Comment#15|6").mkString("\\n")
TestBaseUtils.compareResultsByLinesInMemory(expected, path)
}
}
| yew1eb/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/batch/table/TableSinkITCase.scala | Scala | apache-2.0 | 2,474 |
package net.gadgil.finance.portfolio
import org.specs2.mutable._
class PortfolioToolsSpecification extends Specification {
"portfolio parser" should {
val theDefinition = """
portfolio pf1
long THRM USD 10000 on 2010-12-14 at 11.14
long LKQ USD 10000 on 2010-12-14 at 23.20 stop loss 50%
long DORM USD 10000 on 2010-12-14 at 38.86
short CWEI USD 10000 on 2010-12-14 at 12.91 stop loss 100%
short TEN USD 10000 on 2010-12-14 at 41.19 stop loss 100%
"""
"parse test" in {
val x = PortfolioDefinition.parseAll(PortfolioDefinition.portfolioStruct, theDefinition)
println(x)
x.get.name must be equalTo "pf1"
x.get.positions(0).symbol must be equalTo "THRM"
}
}
}
| navaidya/optionpricing | finance.portfolio/src/test/scala/PortfolioTools.scala | Scala | mit | 747 |
/*
* Copyright 2016 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.sampling.evaluators
import scalismo.sampling.DistributionEvaluator
import scala.language.implicitConversions
/**
* evaluate a product of distributions
*
* @param evaluators Sequence of distributions to evaluate
*/
class ProductEvaluator[A](evaluators: Seq[DistributionEvaluator[A]]) extends DistributionEvaluator[A] {
override def logValue(sample: A): Double = {
evaluators.iterator.map(e => e.logValue(sample)).sum
}
}
object ProductEvaluator {
def apply[A](evaluators: DistributionEvaluator[A]*) = new ProductEvaluator[A](evaluators.toSeq)
def apply[A](builder: implicits.ProductBuilder[A]) = builder.toProductEvaluator
/** implicit builder for ProductEvaluator */
object implicits {
implicit def toProductBuilder[A](eval: DistributionEvaluator[A]): ProductBuilder[A] = new ProductBuilder[A](eval)
implicit def toProductEvaluator[A](builder: ProductBuilder[A]): ProductEvaluator[A] = builder.toProductEvaluator
class ProductBuilder[A](evals: DistributionEvaluator[A]*) {
def toProductEvaluator: ProductEvaluator[A] = new ProductEvaluator[A](evals.toSeq)
def *(other: DistributionEvaluator[A]): ProductBuilder[A] = new ProductBuilder[A](evals :+ other: _*)
}
}
}
| unibas-gravis/scalismo | src/main/scala/scalismo/sampling/evaluators/ProductEvaluator.scala | Scala | apache-2.0 | 1,881 |
/*
* Copyright (C) 2016 Language Technology Group and Interactive Graphics Systems Group, Technische Universität Darmstadt, Germany
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package models
import org.joda.time.LocalDateTime
import scalikejdbc.WrappedResultSet
/**
* Document representation.
*
* @param id unique document identifier.
* @param content document body containing raw text.
* @param created creation date and time of the document.
* @param highlightedContent document content enriched with tags (<em> [...] </em>) for highlighting. This field is used
* to highlight full-text search results.
*/
case class Document(id: Long, content: String, created: LocalDateTime, highlightedContent: Option[String] = None)
/** Companion object for [[models.Document]] instances. */
object Document {
/** Factory method to create documents from database result sets. */
def apply(rs: WrappedResultSet): Document = Document(
rs.int("id"),
rs.string("content"),
rs.jodaLocalDateTime("created")
)
}
| tudarmstadt-lt/newsleak-frontend | app/models/Document.scala | Scala | agpl-3.0 | 1,656 |
package com.verisign.hio
import com.typesafe.config.ConfigFactory
import com.verisign.hio.commands.{Acat, Ahead}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hdfs.HdfsConfiguration
import scala.language.reflectiveCalls
object Main {
private val appConf = ConfigFactory.load()
private val hadoopConf: HdfsConfiguration = {
val hadoopConfDir =
Option(System.getenv("HADOOP_CONF_DIR")).getOrElse(appConf.getString("hadoop.envs.hadoop-conf-dir"))
val c = new HdfsConfiguration()
c.addResource(new Path(hadoopConfDir, "core-default.xml"))
c.addResource(new Path(hadoopConfDir, "core-site.xml"))
c.addResource(new Path(hadoopConfDir, "hdfs-default.xml"))
c.addResource(new Path(hadoopConfDir, "hdfs-site.xml"))
c
}
def cmdNames = new {
val acat = appConf.getString("hio.commands.acat.name")
val ahead = appConf.getString("hio.commands.ahead.name")
}
def main(args: Array[String]) {
val appConf = ConfigFactory.load()
if (args != null && args.nonEmpty) {
val cmd = args(0)
val restArgs = args drop 1
cmd match {
case s if s == cmdNames.acat => Acat(restArgs, hadoopConf, appConf)
case s if s == cmdNames.ahead => Ahead(restArgs, hadoopConf, appConf)
case _ => printMainHelpAndExit()
}
}
else {
printMainHelpAndExit()
}
}
private def printMainHelpAndExit(exitCode: Int = 1): Unit = {
Console.err.println(s"${appConf.getString("hio.meta.name")}: ${appConf.getString("hio.meta.summary")}")
Console.err.println()
Console.err.println("Commands:")
Console.err.println(f" ${cmdNames.acat}%-10s -- ${appConf.getString("hio.commands.acat.summary")}")
Console.err.println(f" ${cmdNames.ahead}%-10s -- ${appConf.getString("hio.commands.ahead.summary")}")
System.exit(exitCode)
}
} | verisign/hio | src/main/scala/com/verisign/hio/Main.scala | Scala | apache-2.0 | 1,844 |
package parsing.ir
import parsing.multi.Lexer._
import parsing.multi.LookaheadLexer._
/**
* Created by hongdi.ren.
*/
abstract class Lexer(input: String) {
var p: Int = 0
var c: Char = input.charAt(p)
def consume(): Unit = {
p = p + 1
if (p >= input.length) c = EOF
else c = input.charAt(p)
}
def `match`(x: Char): Unit = {
if (c == x) consume()
else throw new IllegalArgumentException(s"expecting $x; found $c")
}
def nextToken(): Token
def getTokenName(tokenType: Int): String
}
object Lexer {
val EOF: Char = (-1).toChar
val EOF_TYPE: Int = 1
}
class IRLexer(input: String) extends Lexer(input) {
override def getTokenName(x: Int): String = tokenNames(x)
override def nextToken(): Token = {
while (c != EOF) {
c match {
case _ if blank(c) => WS()
case ',' =>
consume()
return Token(COMMA, ",")
case '[' =>
consume()
return Token(LBRACK, "[")
case ']' =>
consume()
return Token(RBRACK, "]")
case '=' =>
consume()
return Token(EQUALS, "=")
case _ if isLETTER() => return NAME()
case _ => throw new IllegalArgumentException("invalid character: " + c)
}
}
Token(EOF_TYPE, "<EOF>")
}
def isLETTER(): Boolean = c.isLetter
def NAME(): Token = {
val sb = new StringBuilder
do {
sb.append(c)
consume()
} while (isLETTER())
Token(IRLexer.NAME, sb.toString())
}
def WS(): Unit = while (blank(c)) consume()
}
object IRLexer {
val NAME: Int = 2
val COMMA: Int = 3
val LBRACK: Int = 4
val RBRACK: Int = 5
val EQUALS: Int = 6
val tokenNames: IndexedSeq[String] = Vector("n/a", "<EOF>", "NAME", "COMMA", "LBRACK", "RBRACK", "EQUALS")
val blank: Set[Char] = Set(' ', '\\t', '\\n', '\\r')
}
| Ryan-Git/LangImplPatterns | src/main/scala/parsing/ir/IRLexer.scala | Scala | apache-2.0 | 1,859 |
package net.revenj.serialization
import scala.collection.mutable
import scala.util.Try
object Queries {
trait CommandQuery[T <: net.revenj.patterns.Command] {
def from(input: Array[Byte], len: Int, contentType: String, arguments: Map[String, String]): Try[T]
def from(input: java.io.InputStream, contentType: String, arguments: Map[String, String]): Try[T]
def to(command: T, contentType: String, headers: mutable.Map[String, String], output: java.io.OutputStream): Try[_]
}
case class QueryInfo(commandName: String, query: CommandQuery[_ <: net.revenj.patterns.Command])
}
trait Queries {
def find(name: String): Option[Queries.QueryInfo]
} | ngs-doo/revenj | scala/revenj-core/src/main/scala/net/revenj/serialization/Queries.scala | Scala | bsd-3-clause | 668 |
// Copyright 2012 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.hfile.writer.concrete
import io.fsq.hfile.writer.service.CompressionAlgorithm
import java.io.{BufferedOutputStream, FilterOutputStream, OutputStream}
import org.apache.hadoop.conf.{Configurable, Configuration}
import org.apache.hadoop.io.compress.{CodecPool, CompressionCodec, CompressionOutputStream, Compressor}
import org.apache.hadoop.util.ReflectionUtils
class FinishOnFlushCompressionStream(val cout: CompressionOutputStream) extends FilterOutputStream(cout) {
override def write(b: Array[Byte], off: Int, len: Int): Unit = {
cout.write(b, off, len)
}
@Override
override def flush(): Unit = {
cout.finish()
cout.flush()
cout.resetState()
}
}
abstract class ConcreteCompressionAlgorithm extends CompressionAlgorithm {
val conf = new Configuration
conf.setBoolean("hadoop.native.lib", true)
val DataOBufSize: Int = 4 * 1024
def codecOpt(): Option[CompressionCodec]
def createCompressionStream(downStream: OutputStream, compressorOpt: Option[Compressor]): OutputStream = {
val compressor = compressorOpt.getOrElse(throw new Exception("no compressor passed to createCompressionStream"))
val cos = createPlainCompressionStream(downStream, compressor)
new BufferedOutputStream(new FinishOnFlushCompressionStream(cos), DataOBufSize)
}
def createPlainCompressionStream(downStream: OutputStream, compressor: Compressor): CompressionOutputStream = {
val codec = codecOpt().getOrElse(throw new Exception("no codec received"))
codec.asInstanceOf[Configurable].getConf().setInt("io.file.buffer.size", 32 * 1024)
codec.createOutputStream(downStream, compressor)
}
def compressorOpt(): Option[Compressor] = {
codecOpt().map(c => {
val compressor: Compressor = CodecPool.getCompressor(c)
if (compressor.finished) {
compressor.reset()
}
compressor
})
}
def returnCompressor(compressor: Compressor): Unit = {
CodecPool.returnCompressor(compressor)
}
}
object ConcreteCompressionAlgorithm {
def algorithmByName(algorithm: String): CompressionAlgorithm = {
algorithm match {
case "none" => new NoneCompressionAlgorithm
case "snappy" => new SnappyCompressionAlgorithm
case _ => throw new Exception("compression algorithm '%s' not supported".format(algorithm))
}
}
}
class NoneCompressionAlgorithm extends ConcreteCompressionAlgorithm {
def codecOpt(): Option[CompressionCodec] = None
override def createCompressionStream(downStream: OutputStream, compressorOpt: Option[Compressor]): OutputStream = {
new BufferedOutputStream(downStream)
}
val compressionName: String = "none"
val compressionId: Int = 2
}
class SnappyCompressionAlgorithm extends ConcreteCompressionAlgorithm {
lazy val codecOpt: Option[CompressionCodec] = {
val externalCodec = ClassLoader.getSystemClassLoader().loadClass("org.apache.hadoop.io.compress.SnappyCodec")
Some(ReflectionUtils.newInstance(externalCodec, conf).asInstanceOf[CompressionCodec])
}
val compressionName: String = "snappy"
val compressionId: Int = 3
}
| foursquare/fsqio | src/jvm/io/fsq/hfile/writer/concrete/ConcreteCompressionAlgorithm.scala | Scala | apache-2.0 | 3,148 |
/*
* Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twosigma.beakerx.scala.chart.xychart.plotitem
import com.twosigma.beakerx.chart.Color
import org.junit.Test
import org.scalatest.Matchers._
class BarsTest {
@Test
@throws[Exception]
def widthsEmpty(): Unit = {
val bars = new Bars()
bars.widths shouldBe empty
}
@Test
@throws[Exception]
def widths(): Unit = {
val bars = new Bars()
bars.width = List(1, 2, 3)
bars.widths should equal (Seq(1, 2, 3))
bars.width = Array(1.0, 3.0)
bars.widths should equal (Seq(1, 3))
}
@Test
@throws[Exception]
def widthEmpty(): Unit = {
val bars = new Bars()
bars.width shouldBe empty
}
@Test
@throws[Exception]
def width(): Unit = {
val bars = new Bars()
bars.width = 2.5
bars.width should contain(2.5)
}
@Test
@throws[Exception]
def outlineColor(): Unit = {
val bars = new Bars()
bars.outlineColor shouldBe empty
bars.outlineColor = Color.RED
bars.outlineColor should contain(Color.RED)
}
@Test
@throws[Exception]
def outlineColors(): Unit = {
val bars = new Bars()
bars.outlineColors shouldBe empty
bars.outlineColor = Seq(Color.RED, Color.GREEN)
bars.outlineColors shouldBe Seq(Color.RED, Color.GREEN)
}
} | jpallas/beakerx | kernel/scala/src/test/scala/com/twosigma/beakerx/scala/chart/xychart/plotitem/BarsTest.scala | Scala | apache-2.0 | 1,856 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.reflect.reify
package phases
import scala.annotation.tailrec
import scala.tools.nsc.symtab.Flags._
trait Reshape {
self: Reifier =>
import global._
import definitions._
import treeInfo.Unapplied
private val runDefinitions = currentRun.runDefinitions
import runDefinitions._
/**
* Rolls back certain changes that were introduced during typechecking of the reifee.
*
* These include:
* * Undoing macro expansions
* * Replacing type trees with TypeTree(tpe)
* * Reassembling CompoundTypeTrees into reifiable form
* * Transforming Modifiers.annotations into Symbol.annotations
* * Transforming Annotated annotations into AnnotatedType annotations
* * Transforming Annotated(annot, expr) into Typed(expr, TypeTree(Annotated(annot, _))
* * Non-idempotencies of the typechecker: https://github.com/scala/bug/issues/5464
*/
val reshape = new Transformer {
var currentSymbol: Symbol = NoSymbol
override def transform(tree0: Tree) = {
val tree = undoMacroExpansion(tree0)
currentSymbol = tree.symbol
val preTyper = tree match {
case tree if tree.isErroneous =>
tree
case tt @ TypeTree() =>
toPreTyperTypeTree(tt)
case ctt @ CompoundTypeTree(_) =>
toPreTyperCompoundTypeTree(ctt)
case toa @ TypedOrAnnotated(_) =>
toPreTyperTypedOrAnnotated(toa)
case ta @ TypeApply(_, _) if isCrossStageTypeBearer(ta) =>
if (reifyDebug) println("cross-stage type bearer, retaining: " + tree)
ta
case ta @ TypeApply(hk, ts) =>
val discard = ts collect { case tt: TypeTree => tt } exists isDiscarded
if (reifyDebug && discard) println("discarding TypeApply: " + tree)
if (discard) hk else ta
case classDef @ ClassDef(mods, name, params, impl) =>
val Template(parents, self, body) = impl
var body1 = trimAccessors(classDef, body)
body1 = trimSyntheticCaseClassMembers(classDef, body1)
val impl1 = Template(parents, self, body1).copyAttrs(impl)
ClassDef(mods, name, params, impl1).copyAttrs(classDef)
case moduledef @ ModuleDef(mods, name, impl) =>
val Template(parents, self, body) = impl
var body1 = trimAccessors(moduledef, body)
body1 = trimSyntheticCaseClassMembers(moduledef, body1)
val impl1 = Template(parents, self, body1).copyAttrs(impl)
ModuleDef(mods, name, impl1).copyAttrs(moduledef)
case template @ Template(parents, self, body) =>
val discardedParents = parents collect { case tt: TypeTree => tt } filter isDiscarded
if (reifyDebug && discardedParents.length > 0) println("discarding parents in Template: " + discardedParents.mkString(", "))
val parents1 = parents diff discardedParents
val body1 = trimSyntheticCaseClassCompanions(body)
Template(parents1, self, body1).copyAttrs(template)
case block @ Block(stats, expr) =>
val stats1 = trimSyntheticCaseClassCompanions(stats)
Block(stats1, expr).copyAttrs(block)
case unapply @ UnApply(Unapplied(Select(fun, nme.unapply | nme.unapplySeq)), args) =>
if (reifyDebug) println("unapplying unapply: " + tree)
Apply(fun, args).copyAttrs(unapply)
case _ =>
tree
}
super.transform(preTyper)
}
private def undoMacroExpansion(tree: Tree): Tree =
tree.attachments.get[analyzer.MacroExpansionAttachment] match {
case Some(analyzer.MacroExpansionAttachment(original, _)) =>
def mkImplicitly(tp: Type) = atPos(tree.pos)(
gen.mkNullaryCall(Predef_implicitly, List(tp))
)
val sym = original.symbol
original match {
// this hack is necessary until I fix implicit macros
// so far tag materialization is implemented by sneaky macros hidden in scala-compiler.jar
// hence we cannot reify references to them, because noone will be able to see them later
// when implicit macros are fixed, these sneaky macros will move to corresponding companion objects
// of, say, ClassTag or TypeTag
case Apply(TypeApply(_, List(tt)), _) if sym == materializeClassTag => mkImplicitly(appliedType(ClassTagClass, tt.tpe))
case Apply(TypeApply(_, List(tt)), List(pre)) if sym == materializeWeakTypeTag => mkImplicitly(typeRef(pre.tpe, WeakTypeTagClass, List(tt.tpe)))
case Apply(TypeApply(_, List(tt)), List(pre)) if sym == materializeTypeTag => mkImplicitly(typeRef(pre.tpe, TypeTagClass, List(tt.tpe)))
case _ => original
}
case _ => tree
}
override def transformModifiers(mods: Modifiers) = {
val mods1 = toPreTyperModifiers(mods, currentSymbol)
super.transformModifiers(mods1)
}
private def toPreTyperModifiers(mods: Modifiers, sym: Symbol) = {
if (!sym.annotations.isEmpty) {
val postTyper = sym.annotations filter (_.original != EmptyTree)
if (reifyDebug && !postTyper.isEmpty) println("reify symbol annotations for: " + sym)
if (reifyDebug && !postTyper.isEmpty) println("originals are: " + sym.annotations)
val preTyper = postTyper map toPreTyperAnnotation
mods.withAnnotations(preTyper)
} else {
mods
}
}
/** Restore pre-typer representation of a type.
*
* NB: This is the trickiest part of reification!
*
* In most cases, we're perfectly fine to reify a Type itself (see `reifyType`).
* However if the type involves a symbol declared inside the quasiquote (i.e. registered in `boundSyms`),
* then we cannot reify it, or otherwise subsequent reflective compilation will fail.
*
* Why will it fail? Because reified deftrees (e.g. ClassDef(...)) will generate fresh symbols during that compilation,
* so naively reified symbols will become out of sync, which brings really funny compilation errors and/or crashes, e.g.:
* https://github.com/scala/bug/issues/5230
*
* To deal with this unpleasant fact, we need to fall back from types to equivalent trees (after all, parser trees don't contain any types, just trees, so it should be possible).
* Luckily, these original trees get preserved for us in the `original` field when Trees get transformed into TypeTrees.
* And if an original of a type tree is empty, we can safely assume that this type is non-essential (e.g. was inferred/generated by the compiler).
* In that case the type can be omitted (e.g. reified as an empty TypeTree), since it will be inferred again later on.
*
* An important property of the original is that it isn't just a pre-typer tree.
* It's actually kind of a post-typer tree with symbols assigned to its Idents (e.g. Ident("List") will contain a symbol that points to immutable.this.List).
* This is very important, since subsequent reflective compilation won't have to resolve these symbols.
* In general case, such resolution cannot be performed, since reification doesn't preserve lexical context,
* which means that reflective compilation won't be aware of, say, imports that were provided when the reifee has been compiled.
*
* This workaround worked surprisingly well and allowed me to fix several important reification bugs, until the abstraction has leaked.
* Suddenly I found out that in certain contexts original trees do not contain symbols, but are just parser trees.
* To the moment I know only one such situation: typedAnnotations does not typecheck the annotation in-place, but rather creates new trees and typechecks them, so the original remains symless.
* Thus we apply a workaround for that in typedAnnotated. I hope this will be the only workaround in this department.
* upd. There are also problems with CompoundTypeTrees. I had to use attachments to retain necessary information.
*
* upd. Recently I went ahead and started using original for all TypeTrees, regardless of whether they refer to local symbols or not.
* As a result, `reifyType` is never called directly by tree reification (and, wow, it seems to work great!).
* The only usage of `reifyType` now is for servicing typetags, however, I have some ideas how to get rid of that as well.
*/
private def isDiscarded(tt: TypeTree) = tt.original == null
private def toPreTyperTypeTree(tt: TypeTree): Tree = {
if (!isDiscarded(tt)) {
// here we rely on the fact that the originals that reach this point
// have all necessary symbols attached to them (i.e. that they can be recompiled in any lexical context)
// if this assumption fails, please, don't be quick to add postprocessing here (like I did before)
// but rather try to fix this in Typer, so that it produces quality originals (like it's done for typedAnnotated)
if (reifyDebug) println("TypeTree, essential: %s (%s)".format(tt.tpe, tt.tpe.kind))
if (reifyDebug) println("verdict: rolled back to original %s".format(tt.original.toString.replaceAll("\\\\s+", " ")))
transform(tt.original)
} else {
// type is deemed to be non-essential
// erase it and hope that subsequent reflective compilation will be able to recreate it again
if (reifyDebug) println("TypeTree, non-essential: %s (%s)".format(tt.tpe, tt.tpe.kind))
if (reifyDebug) println("verdict: discarded")
TypeTree()
}
}
private def toPreTyperCompoundTypeTree(ctt: CompoundTypeTree): Tree = {
val CompoundTypeTree(tmpl @ Template(parents, self, stats)) = ctt
if (stats.nonEmpty) CannotReifyCompoundTypeTreeWithNonEmptyBody(ctt)
assert(self eq noSelfType, self)
val att = tmpl.attachments.get[CompoundTypeTreeOriginalAttachment]
val CompoundTypeTreeOriginalAttachment(parents1, stats1) = att.getOrElse(CompoundTypeTreeOriginalAttachment(parents, stats))
CompoundTypeTree(Template(parents1, self, stats1))
}
@tailrec
private def toPreTyperTypedOrAnnotated(tree: Tree): Tree = tree match {
case ty @ Typed(expr1, tpt) =>
if (reifyDebug) println("reify typed: " + tree)
val original = tpt match {
case tt @ TypeTree() => tt.original
case tpt => tpt
}
val annotatedArg = {
@tailrec
def loop(tree: Tree): Tree = tree match {
case annotated1 @ Annotated(ann, annotated2 @ Annotated(_, _)) => loop(annotated2)
case annotated1 @ Annotated(ann, arg) => arg
case _ => EmptyTree
}
loop(original)
}
if (annotatedArg != EmptyTree) {
if (annotatedArg.isType) {
if (reifyDebug) println("verdict: was an annotated type, reify as usual")
ty
} else {
if (reifyDebug) println("verdict: was an annotated value, equivalent is " + original)
toPreTyperTypedOrAnnotated(original)
}
} else {
if (reifyDebug) println("verdict: wasn't annotated, reify as usual")
ty
}
case at @ Annotated(annot, arg) =>
if (reifyDebug) println("reify type annotations for: " + tree)
assert(at.tpe.isInstanceOf[AnnotatedType], "%s (%s)".format(at.tpe, at.tpe.kind))
val annot1 = toPreTyperAnnotation(at.tpe.asInstanceOf[AnnotatedType].annotations(0))
if (reifyDebug) println("originals are: " + annot1)
Annotated(annot1, arg).copyAttrs(at)
}
/** Restore pre-typer representation of an annotation.
* The trick here is to retain the symbols that have been populated during typechecking of the annotation.
* If we do not do that, subsequent reflective compilation will fail.
*/
private def toPreTyperAnnotation(ann: AnnotationInfo): Tree = {
val args = if (ann.assocs.isEmpty) {
ann.args
} else {
def toScalaAnnotation(jann: ClassfileAnnotArg): Tree = (jann: @unchecked) match {
case LiteralAnnotArg(const) => Literal(const)
case ArrayAnnotArg(arr) => Apply(Ident(definitions.ArrayModule), arr.toList map toScalaAnnotation)
case NestedAnnotArg(ann) => toPreTyperAnnotation(ann)
}
ann.assocs map { case (nme, arg) => NamedArg(Ident(nme), toScalaAnnotation(arg)) }
}
def extractOriginal: PartialFunction[Tree, Tree] = { case Apply(Select(New(tpt), _), _) => tpt }
assert(extractOriginal.isDefinedAt(ann.original), s"$ann has unexpected original ${showRaw(ann.original)}" )
New(TypeTree(ann.atp) setOriginal extractOriginal(ann.original), List(args))
}
private def trimAccessors(deff: Tree, stats: List[Tree]): List[Tree] = {
val symdefs = (stats collect { case vodef: ValOrDefDef => vodef } map (vodeff => vodeff.symbol -> vodeff)).toMap
val accessors = scala.collection.mutable.Map[ValDef, List[DefDef]]()
stats collect { case ddef: DefDef => ddef } foreach (defdef => {
val valdef = symdefs get defdef.symbol.accessedOrSelf collect { case vdef: ValDef => vdef } getOrElse null
if (valdef != null) accessors(valdef) = accessors.getOrElse(valdef, Nil) :+ defdef
def detectBeanAccessors(prefix: String): Unit = {
if (defdef.name.startsWith(prefix)) {
val name = defdef.name.toString.substring(prefix.length)
def uncapitalize(s: String) = if (s.length == 0) "" else { val chars = s.toCharArray; chars(0) = chars(0).toLower; new String(chars) }
def findValDef(name: String) = symdefs.values collectFirst {
case vdef: ValDef if vdef.name.dropLocal string_== name => vdef
}
val valdef = findValDef(name).orElse(findValDef(uncapitalize(name))).orNull
if (valdef != null) accessors(valdef) = accessors.getOrElse(valdef, Nil) :+ defdef
}
}
detectBeanAccessors("get")
detectBeanAccessors("set")
detectBeanAccessors("is")
})
val stats1 = stats flatMap {
case vdef @ ValDef(mods, name, tpt, rhs) if !mods.isLazy =>
val mods1 = if (accessors.contains(vdef)) {
val ddef = accessors(vdef)(0) // any accessor will do
val Modifiers(flags, _, annotations) = mods
var flags1 = flags & ~LOCAL
if (!ddef.symbol.isPrivate) flags1 = flags1 & ~PRIVATE
val privateWithin1 = ddef.mods.privateWithin
val annotations1 =
accessors(vdef).foldLeft(annotations){ (curr, acc) =>
curr ++ (acc.symbol.annotations.filterNot(_ == UnmappableAnnotation ).map(toPreTyperAnnotation))
}
Modifiers(flags1, privateWithin1, annotations1) setPositions mods.positions
} else {
mods
}
val mods2 = toPreTyperModifiers(mods1, vdef.symbol)
val name1 = name.dropLocal
val vdef1 = ValDef(mods2, name1.toTermName, tpt, rhs)
if (reifyDebug) println("resetting visibility of field: %s => %s".format(vdef, vdef1))
Some(vdef1) // no copyAttrs here, because new ValDef and old symbols are now out of sync
case ddef: DefDef if !ddef.mods.isLazy =>
// lazy val accessors are removed in reshapeLazyVals
// as they are needed to recreate lazy vals
if (accessors.values.exists(_.contains(ddef))) {
if (reifyDebug) println("discarding accessor method: " + ddef)
None
} else {
Some(ddef)
}
case tree =>
Some(tree)
}
stats1
}
private def trimSyntheticCaseClassMembers(deff: Tree, stats: List[Tree]): List[Tree] =
stats filterNot (memberDef => memberDef.isDef && {
val isSynthetic = memberDef.symbol.isSynthetic
// this doesn't work for local classes, e.g. for ones that are top-level to a quasiquote (see comments to companionClass)
// that's why I replace the check with an assumption that all synthetic members are, in fact, generated of case classes
// val isCaseMember = deff.symbol.isCaseClass || deff.symbol.companionClass.isCaseClass
val isCaseMember = true
if (isSynthetic && isCaseMember && reifyDebug) println("discarding case class synthetic def: " + memberDef)
isSynthetic && isCaseMember
})
private def trimSyntheticCaseClassCompanions(stats: List[Tree]): List[Tree] =
stats diff (stats collect { case moddef: ModuleDef => moddef } filter (moddef => {
val isSynthetic = moddef.symbol.isSynthetic
// this doesn't work for local classes, e.g. for ones that are top-level to a quasiquote (see comments to companionClass)
// that's why I replace the check with an assumption that all synthetic modules are, in fact, companions of case classes
// val isCaseCompanion = moddef.symbol.companionClass.isCaseClass
val isCaseCompanion = true
if (isSynthetic && isCaseCompanion && reifyDebug) println("discarding synthetic case class companion: " + moddef)
isSynthetic && isCaseCompanion
}))
}
}
| martijnhoekstra/scala | src/compiler/scala/reflect/reify/phases/Reshape.scala | Scala | apache-2.0 | 17,675 |
object Test {
val m = scala.collection.mutable.MultiMap(1, 2, 3)
}
| folone/dotty | tests/untried/neg/noMember2.scala | Scala | bsd-3-clause | 69 |
package collins.controllers.actions.state
import scala.concurrent.Future
import play.api.data.Form
import play.api.data.Forms.ignored
import play.api.data.Forms.tuple
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import collins.controllers.Api
import collins.controllers.SecureController
import collins.controllers.actions.RequestDataHolder
import collins.controllers.actions.SecureAction
import collins.controllers.validators.ParamValidation
import collins.models.State
import collins.models.{Status => AStatus}
import collins.util.MessageHelper
import collins.util.security.SecuritySpecification
import collins.validation.StringUtil
import CreateAction.Messages.fuck
object CreateAction {
object Messages extends MessageHelper("controllers.AssetStateApi.createState") {
def invalidName = messageWithDefault("invalidName", "The specified name is invalid")
def invalidDescription = messageWithDefault("invalidDescription",
"The specified description is invalid")
def invalidStatus = rootMessage("asset.status.invalid")
def invalidLabel = messageWithDefault("invalidLabel", "The specified label is invalid")
}
}
/**
* Create a new asset state
*
* @apigroup AssetState
* @apimethod PUT
* @apiurl /api/state/:name
* @apiparam name String A unique name between 2 and 32 characters, must be upper case
* @apiparam status Option[String] Status name to bind this state to, or Any to bind to all status
* @apiparam label String A friendly display label between 2 and 32 characters
* @apiparam description String A longer description of the state between 2 and 255 characters
* @apirespond 201 success
* @apirespond 400 invalid input
* @apirespond 409 name already in use
* @apirespond 500 error saving state
* @apiperm controllers.AssetStateApi.createState
* @collinsshell {{{
* collins-shell state create --name=NAME --label=LABEL --description='DESCRIPTION' [--status=Status]
* }}}
* @curlexample {{{
* curl -v -u blake:admin:first --basic \\
* -d label='Test Label' \\
* -d description='This is for testing' \\
* http://localhost:9000/api/state/TESTING
* }}}
*/
case class CreateAction(
name: String,
spec: SecuritySpecification,
handler: SecureController
) extends SecureAction(spec, handler) with ParamValidation {
import CreateAction.Messages._
case class ActionDataHolder(state: State) extends RequestDataHolder
val stateForm = Form(tuple(
"id" -> ignored(0:Int),
"status" -> validatedOptionalText(2),
"label" -> validatedText(2, 32),
"description" -> validatedText(2, 255)
))
override def validate(): Validation = stateForm.bindFromRequest()(request).fold(
err => Left(RequestDataHolder.error400(fieldError(err))),
form => {
val (id, statusOpt, label, description) = form
val validatedName = StringUtil.trim(name)
.filter(s => s.length > 1 && s.length <= 32)
.map(_.toUpperCase)
val statusId = getStatusId(statusOpt)
if (statusOpt.isDefined && !statusId.isDefined) {
Left(RequestDataHolder.error400(invalidStatus))
} else if (!validatedName.isDefined) {
Left(RequestDataHolder.error400(invalidName))
} else if (State.findByName(validatedName.get).isDefined) {
Left(RequestDataHolder.error409(invalidName))
} else {
Right(
ActionDataHolder(State(0, statusId.getOrElse(State.ANY_STATUS), validatedName.get, label, description))
)
}
}
)
override def execute(rdh: RequestDataHolder) = Future {
rdh match {
case ActionDataHolder(state) => try {
State.create(state) match {
case ok if ok.id > 0 =>
Api.statusResponse(true, Status.Created)
case bad =>
Api.statusResponse(false, Status.InternalServerError)
}
} catch {
case e: Throwable =>
Api.errorResponse("Failed to add state", Status.InternalServerError, Some(e))
}
}
}
protected def fieldError(f: Form[_]) = f match {
case e if e.error("name").isDefined => invalidName
case e if e.error("label").isDefined => invalidLabel
case e if e.error("description").isDefined => invalidDescription
case e if e.error("status").isDefined => invalidStatus
case n => fuck
}
protected def getStatusId(status: Option[String]): Option[Int] = status.flatMap { s =>
(s.toUpperCase == State.ANY_NAME.toUpperCase) match {
case true => Some(State.ANY_STATUS)
case false => AStatus.findByName(s).map(_.id)
}
}
}
| funzoneq/collins | app/collins/controllers/actions/state/CreateAction.scala | Scala | apache-2.0 | 4,585 |
package l1distopt.utils
import breeze.linalg.{DenseVector, NumericOps, SparseVector, Vector}
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import scala.math._
object OptUtils {
/**
* Loads data stored in LIBSVM format columnwise (i.e., by feature)
* Used for storing training dataset
*
* @param sc SparkContext
* @param filename location of data
* @param numSplits number of data splits
* @param numFeats number of features in the dataset
* @return
*/
def loadLIBSVMDataColumn(
sc: SparkContext,
filename: String,
numSplits: Int,
numFeats: Int): (RDD[(Int, SparseVector[Double])], DenseVector[Double]) = {
// read in text file
val data = sc.textFile(filename,numSplits).coalesce(numSplits) // note: coalesce can result in data being sent over the network. avoid this for large datasets
val numEx = data.count().toInt
// find number of elements per partition
val numParts = data.partitions.size
val sizes = data.mapPartitionsWithSplit{ case(i,lines) =>
Iterator(i -> lines.length)
}.collect().sortBy(_._1)
val offsets = sizes.map(x => x._2).scanLeft(0)(_+_).toArray
// parse input
val parsedData = data.mapPartitionsWithSplit { case(partition, lines) =>
lines.zipWithIndex.flatMap{ case(line, idx) =>
// calculate index for line
val index = offsets(partition) + idx
if(index < numEx) {
// parse label
val parts = line.trim().split(' ')
var label = parts(0).toDouble
// parse features
val featureArray = parts.slice(1,parts.length)
.map(_.split(':')
match { case Array(i,j) => (i.toInt-1, (index, j.toDouble))}).toArray
Iterator((label, featureArray))
}
else {
Iterator()
}
}
}
// collect all of the labels
val y = new DenseVector[Double](parsedData.map(x => x._1).collect())
// arrange RDD by feature
val feats = parsedData.flatMap(x => x._2.iterator)
.groupByKey().map(x => (x._1, x._2.toArray)).map(x => (x._1, new SparseVector[Double](x._2.map(y => y._1), x._2.map(y => y._2), numEx)))
// return data and labels
println("successfully loaded training data")
return (feats,y)
}
/**
* Loads data stored in LIBSVM format
* Used for storing test dataset
*
* @param sc SparkContext
* @param filename location of data
* @param numSplits number of data splits
* @param numFeats number of features in the dataset
* @return
*/
def loadLIBSVMData(
sc: SparkContext,
filename: String,
numSplits: Int,
numFeats: Int): RDD[LabeledPoint] = {
// read in text file
val data = sc.textFile(filename,numSplits).coalesce(numSplits)
val numEx = data.count()
// find number of elements per partition
val numParts = data.partitions.size
val sizes = data.mapPartitionsWithSplit{ case(i,lines) =>
Iterator(i -> lines.length)
}.collect().sortBy(_._1)
val offsets = sizes.map(x => x._2).scanLeft(0)(_+_).toArray
// parse input
data.mapPartitionsWithSplit { case(partition, lines) =>
lines.zipWithIndex.flatMap{ case(line, idx) =>
// calculate index for line
val index = offsets(partition) + idx
if(index < numEx){
// parse label
val parts = line.trim().split(' ')
var label = parts(0).toDouble
// parse features
val featureArray = parts.slice(1,parts.length)
.map(_.split(':')
match { case Array(i,j) => (i.toInt-1,j.toDouble)}).toArray
var features = new SparseVector[Double](featureArray.map(x=>x._1),
featureArray.map(x=>x._2), numFeats)
// create classification point
Iterator(LabeledPoint(label, features))
}
else{
Iterator()
}
}
}
}
/**
* Computes the primal objective function value for elastic net regression:
* 1/(2n)||A * alpha - b||_2^2 + \\lambda * (eta*||alpha||_1 + (1-eta)*.5*||alpha||_2^2)
*
* @param alpha primal variable vector
* @param w residual vector w = A * alpha - b
* @param lambda regularization parameter
* @param eta elastic net parameter
* @return
*/
def computeElasticNetObjective(
alpha: Vector[Double],
w: Vector[Double],
lambda: Double,
eta: Double): Double = {
val err = w.norm(2)
val twonorm = alpha.norm(2)
val regularization = lambda * (eta * alpha.norm(1) + (1 - eta) * .5 * twonorm * twonorm)
return err * err / (2 * w.size) + regularization
}
/**
* Computes the RMSE on a test dataset
*
* @param testData RDD of labeledPoints
* @param alpha primal variable vector
* @return
*/
def computeRMSE(testData: RDD[LabeledPoint], alpha: Vector[Double]): Double = {
val squared_err = testData.map(pt => pow(((pt.features dot alpha) - pt.label), 2)).mean()
return sqrt(squared_err)
}
} | gingsmith/proxcocoa | src/main/scala/utils/OptUtils.scala | Scala | apache-2.0 | 5,055 |
import generic.*
import Tree.*
import List.*
import java.io.*
import Shapes.*
import SearchResult.*
object Test {
import Serialization.*
private var lCount, tCount, sCount = 0
// ------- Code that will eventually be produced by macros -------------
implicit def ListSerializable[Elem](implicit es: Serializable[Elem]): Serializable[List[Elem]] = {
implicit lazy val lsElem: Serializable[List[Elem]] = {
lCount += 1 // test code to verify we create bounded number of Serializables
RecSerializable[List[Elem], List.Shape[Elem]]
}
lsElem
}
implicit def TreeSerializable[R]: Serializable[Tree[R]] = {
implicit lazy val tR: Serializable[Tree[R]] = {
tCount += 1 // test code to verify we create bounded number of Serializables
RecSerializable[Tree[R], Tree.Shape[R]]
}
tR
}
implicit lazy val tsInt: Serializable[Tree[Int]] = TreeSerializable[Int]
implicit lazy val tsBoolean: Serializable[Tree[Boolean]] = TreeSerializable[Boolean]
implicit lazy val SearchResultSerializable: Serializable[SearchResult] = {
sCount += 1
RecSerializable[SearchResult, SearchResult.Shape]
}
// ------- Test code --------------------------------------------------------
/** Serialize data, then deserialize it back and check that it is the same. */
def sds[D](data: D)(implicit ser: Serializable[D]) = {
val outBytes = new ByteArrayOutputStream
val out = new DataOutputStream(outBytes)
ser.write(data, out)
out.flush()
val inBytes = new ByteArrayInputStream(outBytes.toByteArray)
val in = new DataInputStream(inBytes)
val result = ser.read(in)
assert(data == result, s"$data != $result")
}
val data1 =
Cons(1, Cons(2, Cons(3, Nil)))
val data2 =
If(IsZero(Pred(Succ(Zero))), Succ(Succ(Zero)), Pred(Pred(Zero)))
val data3 = Cons(Color.Red, Cons(Color.Green, Cons(Color.Blue, Nil)))
val data4 = Ambiguous(Success(Color.Green), Diverging)
def main(args: Array[String]) = {
sds(data1)
assert(lCount == 1, lCount)
sds(data2)
assert(tCount == 2, tCount)
sds(data3)
assert(lCount == 2, lCount)
sds(data4)
assert(sCount == 1, sCount)
}
}
| dotty-staging/dotty | tests/run/generic/Test.scala | Scala | apache-2.0 | 2,182 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.writers
import javax.xml.xpath.{ XPath, XPathConstants, XPathFactory }
import com.wegtam.tensei.adt.DFASDL
import com.wegtam.tensei.agent.DefaultSpec
import org.dfasdl.utils.ElementNames
import org.w3c.dom.{ Element, NodeList }
class DatabaseWriterFunctionsTest extends DefaultSpec with DatabaseWriterFunctions {
describe("DatabaseWriterFunctions") {
describe("createForeignKeyStatements") {
describe("given a table without foreign keys") {
it("should return an empty list") {
val targetDfasdl =
DFASDL(
id = "T",
content =
"""
|<dfasdl xmlns="http://www.dfasdl.org/DFASDL" default-encoding="utf-8" semantic="niem">
| <seq id="target1" keepID="true">
| <elem id="target1-row">
| <num id="A" db-column-name="id"/>
| <str id="B" db-column-name="name"/>
| </elem>
| </seq>
| <seq id="target2" keepID="true">
| <elem id="target2-row">
| <num id="C" db-column-name="id" db-auto-inc="true"/>
| <str id="D" db-column-name="firstname" db-foreign-key="I"/>
| <num id="E" db-column-name="my_name" db-foreign-key="F"/>
| </elem>
| </seq>
| <seq id="target3" keepID="true">
| <elem id="target3-row">
| <num id="F" db-column-name="id"/>
| <str id="G" db-column-name="name" db-foreign-key="L"/>
| </elem>
| </seq>
| <seq id="target4" keepID="true">
| <elem id="target4-row">
| <num id="H" db-column-name="id"/>
| <str id="I" db-column-name="name"/>
| <num id="J" db-column-name="another_id"/>
| <str id="J2" db-column-name="yet_another_foreigner"/>
| </elem>
| </seq>
| <seq id="target5" keepID="true">
| <elem id="target5-row">
| <num id="K" db-column-name="id" db-foreign-key="A"/>
| <str id="L" db-column-name="name"/>
| </elem>
| </seq>
|</dfasdl>
| """.stripMargin
)
val docWithSchema = createNormalizedDocument(targetDfasdl.content)
val table = docWithSchema.getElementById("target1")
createForeignKeyStatements(table)(docWithSchema) should be(Seq.empty[String])
}
}
describe("given a table with foreign keys") {
it("should return the correct statements") {
val targetDfasdl =
DFASDL(
id = "T",
content =
"""
|<dfasdl xmlns="http://www.dfasdl.org/DFASDL" default-encoding="utf-8" semantic="niem">
| <seq id="target1" keepID="true">
| <elem id="target1-row">
| <num id="A" db-column-name="id"/>
| <str id="B" db-column-name="name"/>
| </elem>
| </seq>
| <seq id="target2" keepID="true">
| <elem id="target2-row">
| <num id="C" db-column-name="id" db-auto-inc="true"/>
| <str id="D" db-column-name="firstname" db-foreign-key="I"/>
| <num id="E" db-column-name="my_name" db-foreign-key="F"/>
| </elem>
| </seq>
| <seq id="target3" keepID="true">
| <elem id="target3-row">
| <num id="F" db-column-name="id"/>
| <str id="G" db-column-name="name" db-foreign-key="L"/>
| </elem>
| </seq>
| <seq id="target4" keepID="true">
| <elem id="target4-row">
| <num id="H" db-column-name="id"/>
| <str id="I" db-column-name="name"/>
| <num id="J" db-column-name="another_id"/>
| <str id="J2" db-column-name="yet_another_foreigner"/>
| </elem>
| </seq>
| <seq id="target5" keepID="true">
| <elem id="target5-row">
| <num id="K" db-column-name="id" db-foreign-key="A"/>
| <str id="L" db-column-name="name"/>
| </elem>
| </seq>
|</dfasdl>
| """.stripMargin
)
val docWithSchema = createNormalizedDocument(targetDfasdl.content)
val table = docWithSchema.getElementById("target2")
val expectedStatements = List(
"ALTER TABLE target2 ADD FOREIGN KEY (firstname) REFERENCES target4(name)",
"ALTER TABLE target2 ADD FOREIGN KEY (my_name) REFERENCES target3(id)"
)
createForeignKeyStatements(table)(docWithSchema) should be(expectedStatements)
}
}
}
describe("sortTables") {
describe("given no foreign keys") {
it("should return the original list") {
val targetDfasdl =
DFASDL(
id = "T",
content =
"""
|<dfasdl xmlns="http://www.dfasdl.org/DFASDL" default-encoding="utf-8" semantic="niem">
| <seq id="target1" keepID="true">
| <elem id="target1-row">
| <num id="A" db-column-name="id"/>
| <str id="B" db-column-name="name"/>
| </elem>
| </seq>
| <seq id="target2" keepID="true">
| <elem id="target2-row">
| <num id="C" db-column-name="id" db-auto-inc="true"/>
| <str id="D" db-column-name="firstname"/>
| <num id="E" db-column-name="my_name"/>
| </elem>
| </seq>
| <seq id="target3" keepID="true">
| <elem id="target3-row">
| <num id="F" db-column-name="id"/>
| <str id="G" db-column-name="name"/>
| </elem>
| </seq>
| <seq id="target4" keepID="true">
| <elem id="target4-row">
| <num id="H" db-column-name="id"/>
| <str id="I" db-column-name="name"/>
| <num id="J" db-column-name="another_id"/>
| <str id="J2" db-column-name="yet_another_foreigner"/>
| </elem>
| </seq>
| <seq id="target5" keepID="true">
| <elem id="target5-row">
| <num id="K" db-column-name="id"/>
| <str id="L" db-column-name="name"/>
| </elem>
| </seq>
|</dfasdl>
| """.stripMargin
)
val doc = createNormalizedDocument(targetDfasdl.content, useSchema = false)
val docWithSchema = createNormalizedDocument(targetDfasdl.content)
val xpath: XPath = XPathFactory.newInstance().newXPath()
val ts = xpath
.evaluate(
s"/${ElementNames.ROOT}/${ElementNames.SEQUENCE} | /${ElementNames.ROOT}/${ElementNames.FIXED_SEQUENCE}",
doc.getDocumentElement,
XPathConstants.NODESET
)
.asInstanceOf[NodeList]
val tables = for (idx <- 0 until ts.getLength) yield ts.item(idx).asInstanceOf[Element]
sortTables(tables)(docWithSchema) should be(tables)
}
}
describe("given foreign keys") {
it("should sort the tables correctly") {
val targetDfasdl =
DFASDL(
id = "T",
content =
"""
|<dfasdl xmlns="http://www.dfasdl.org/DFASDL" default-encoding="utf-8" semantic="niem">
| <seq id="target1" keepID="true">
| <elem id="target1-row">
| <num id="A" db-column-name="id"/>
| <str id="B" db-column-name="name"/>
| </elem>
| </seq>
| <seq id="target2" keepID="true">
| <elem id="target2-row">
| <num id="C" db-column-name="id" db-auto-inc="true"/>
| <str id="D" db-column-name="firstname" db-foreign-key="I"/>
| <num id="E" db-column-name="my_name" db-foreign-key="F"/>
| </elem>
| </seq>
| <seq id="target3" keepID="true">
| <elem id="target3-row">
| <num id="F" db-column-name="id"/>
| <str id="G" db-column-name="name" db-foreign-key="L"/>
| </elem>
| </seq>
| <seq id="target4" keepID="true">
| <elem id="target4-row">
| <num id="H" db-column-name="id"/>
| <str id="I" db-column-name="name"/>
| <num id="J" db-column-name="another_id"/>
| <str id="J2" db-column-name="yet_another_foreigner"/>
| </elem>
| </seq>
| <seq id="target5" keepID="true">
| <elem id="target5-row">
| <num id="K" db-column-name="id" db-foreign-key="A"/>
| <str id="L" db-column-name="name"/>
| </elem>
| </seq>
|</dfasdl>
| """.stripMargin
)
val doc = createNormalizedDocument(targetDfasdl.content, useSchema = false)
val xpath: XPath = XPathFactory.newInstance().newXPath()
val ts = xpath
.evaluate(
s"/${ElementNames.ROOT}/${ElementNames.SEQUENCE} | /${ElementNames.ROOT}/${ElementNames.FIXED_SEQUENCE}",
doc.getDocumentElement,
XPathConstants.NODESET
)
.asInstanceOf[NodeList]
val tables = for (idx <- 0 until ts.getLength) yield ts.item(idx).asInstanceOf[Element]
val docWithSchema = createNormalizedDocument(targetDfasdl.content)
val sortedTables =
for (id <- List("target1", "target4", "target5", "target3", "target2"))
yield docWithSchema.getElementById(id)
sortTables(tables)(docWithSchema).map(_.getAttribute("id")) should be(
sortedTables.map(_.getAttribute("id"))
)
}
}
describe("given foreign keys with cross references") {
it("should sort the tables correctly") {
val targetDfasdl =
DFASDL(
id = "T",
content =
"""
|<dfasdl xmlns="http://www.dfasdl.org/DFASDL" default-encoding="utf-8" semantic="niem">
| <seq id="target1" keepID="true">
| <elem id="target1-row">
| <num id="A" db-column-name="id"/>
| <str id="B" db-column-name="name"/>
| </elem>
| </seq>
| <seq id="target2" keepID="true">
| <elem id="target2-row">
| <num id="C" db-column-name="id" db-auto-inc="true"/>
| <str id="D" db-column-name="firstname" db-foreign-key="I"/>
| <num id="E" db-column-name="my_name" db-foreign-key="F"/>
| </elem>
| </seq>
| <seq id="target3" keepID="true">
| <elem id="target3-row">
| <num id="F" db-column-name="id"/>
| <str id="G" db-column-name="name" db-foreign-key="L"/>
| </elem>
| </seq>
| <seq id="target4" keepID="true">
| <elem id="target4-row">
| <num id="H" db-column-name="id"/>
| <str id="I" db-column-name="name"/>
| <num id="J" db-column-name="another_id" db-foreign-key="K"/>
| <str id="J2" db-column-name="yet_another_foreigner" db-foreign-key="G"/>
| </elem>
| </seq>
| <seq id="target5" keepID="true">
| <elem id="target5-row">
| <num id="K" db-column-name="id"/>
| <str id="L" db-column-name="name"/>
| </elem>
| </seq>
|</dfasdl>
| """.stripMargin
)
val doc = createNormalizedDocument(targetDfasdl.content, useSchema = false)
val xpath: XPath = XPathFactory.newInstance().newXPath()
val ts = xpath
.evaluate(
s"/${ElementNames.ROOT}/${ElementNames.SEQUENCE} | /${ElementNames.ROOT}/${ElementNames.FIXED_SEQUENCE}",
doc.getDocumentElement,
XPathConstants.NODESET
)
.asInstanceOf[NodeList]
val tables = for (idx <- 0 until ts.getLength) yield ts.item(idx).asInstanceOf[Element]
val docWithSchema = createNormalizedDocument(targetDfasdl.content)
val sortedTables =
for (id <- List("target1", "target5", "target3", "target4", "target2"))
yield docWithSchema.getElementById(id)
sortTables(tables)(docWithSchema).map(_.getAttribute("id")) should be(
sortedTables.map(_.getAttribute("id"))
)
}
}
}
}
}
| Tensei-Data/tensei-agent | src/test/scala/com/wegtam/tensei/agent/writers/DatabaseWriterFunctionsTest.scala | Scala | agpl-3.0 | 15,110 |
package models
import org.joda.time.DateTime
import scalikejdbc._
case class CronLine(
id: Long,
cronId: Long,
line: Option[Int] = None,
body: Option[String] = None,
command: Option[String] = None,
month: Option[String] = None,
day: Option[String] = None,
week: Option[String] = None,
hour: Option[String] = None,
minute: Option[String] = None,
createdAt: Option[DateTime] = None,
updatedAt: Option[DateTime] = None) {
def save()(implicit session: DBSession = CronLine.autoSession): CronLine = CronLine.save(this)(session)
def destroy()(implicit session: DBSession = CronLine.autoSession): Unit = CronLine.destroy(this)(session)
}
object CronLine extends SQLSyntaxSupport[CronLine] {
override val schemaName = Some("ketsuco")
override val tableName = "cron_line"
override val columns = Seq("id", "cron_id", "line", "body", "command", "month", "day", "week", "hour", "minute", "created_at", "updated_at")
def apply(cl: SyntaxProvider[CronLine])(rs: WrappedResultSet): CronLine = apply(cl.resultName)(rs)
def apply(cl: ResultName[CronLine])(rs: WrappedResultSet): CronLine = new CronLine(
id = rs.get(cl.id),
cronId = rs.get(cl.cronId),
line = rs.get(cl.line),
body = rs.get(cl.body),
command = rs.get(cl.command),
month = rs.get(cl.month),
day = rs.get(cl.day),
week = rs.get(cl.week),
hour = rs.get(cl.hour),
minute = rs.get(cl.minute),
createdAt = rs.get(cl.createdAt),
updatedAt = rs.get(cl.updatedAt)
)
val cl = CronLine.syntax("cl")
override val autoSession = AutoSession
def find(id: Long)(implicit session: DBSession = autoSession): Option[CronLine] = {
withSQL {
select.from(CronLine as cl).where.eq(cl.id, id)
}.map(CronLine(cl.resultName)).single.apply()
}
def findAll()(implicit session: DBSession = autoSession): List[CronLine] = {
withSQL(select.from(CronLine as cl)).map(CronLine(cl.resultName)).list.apply()
}
def countAll()(implicit session: DBSession = autoSession): Long = {
withSQL(select(sqls.count).from(CronLine as cl)).map(rs => rs.long(1)).single.apply().get
}
def findBy(where: SQLSyntax)(implicit session: DBSession = autoSession): Option[CronLine] = {
withSQL {
select.from(CronLine as cl).where.append(where)
}.map(CronLine(cl.resultName)).single.apply()
}
def findAllBy(where: SQLSyntax)(implicit session: DBSession = autoSession): List[CronLine] = {
withSQL {
select.from(CronLine as cl).where.append(where)
}.map(CronLine(cl.resultName)).list.apply()
}
def countBy(where: SQLSyntax)(implicit session: DBSession = autoSession): Long = {
withSQL {
select(sqls.count).from(CronLine as cl).where.append(where)
}.map(_.long(1)).single.apply().get
}
def create(
cronId: Long,
line: Option[Int] = None,
body: Option[String] = None,
command: Option[String] = None,
month: Option[String] = None,
day: Option[String] = None,
week: Option[String] = None,
hour: Option[String] = None,
minute: Option[String] = None,
createdAt: Option[DateTime] = None,
updatedAt: Option[DateTime] = None)(implicit session: DBSession = autoSession): CronLine = {
val generatedKey = withSQL {
insert.into(CronLine).columns(
column.cronId,
column.line,
column.body,
column.command,
column.month,
column.day,
column.week,
column.hour,
column.minute,
column.createdAt,
column.updatedAt
).values(
cronId,
line,
body,
command,
month,
day,
week,
hour,
minute,
createdAt,
updatedAt
)
}.updateAndReturnGeneratedKey.apply()
CronLine(
id = generatedKey,
cronId = cronId,
line = line,
body = body,
command = command,
month = month,
day = day,
week = week,
hour = hour,
minute = minute,
createdAt = createdAt,
updatedAt = updatedAt)
}
def batchInsert(entities: Seq[CronLine])(implicit session: DBSession = autoSession): Seq[Int] = {
val params: Seq[Seq[(Symbol, Any)]] = entities.map(entity =>
Seq(
'cronId -> entity.cronId,
'line -> entity.line,
'body -> entity.body,
'command -> entity.command,
'month -> entity.month,
'day -> entity.day,
'week -> entity.week,
'hour -> entity.hour,
'minute -> entity.minute,
'createdAt -> entity.createdAt,
'updatedAt -> entity.updatedAt))
SQL("""insert into cron_line(
cron_id,
line,
body,
command,
month,
day,
week,
hour,
minute,
created_at,
updated_at
) values (
{cronId},
{line},
{body},
{command},
{month},
{day},
{week},
{hour},
{minute},
{createdAt},
{updatedAt}
)""").batchByName(params: _*).apply()
}
def save(entity: CronLine)(implicit session: DBSession = autoSession): CronLine = {
withSQL {
update(CronLine).set(
column.id -> entity.id,
column.cronId -> entity.cronId,
column.line -> entity.line,
column.body -> entity.body,
column.command -> entity.command,
column.month -> entity.month,
column.day -> entity.day,
column.week -> entity.week,
column.hour -> entity.hour,
column.minute -> entity.minute,
column.createdAt -> entity.createdAt,
column.updatedAt -> entity.updatedAt
).where.eq(column.id, entity.id)
}.update.apply()
entity
}
def destroy(entity: CronLine)(implicit session: DBSession = autoSession): Unit = {
withSQL { delete.from(CronLine).where.eq(column.id, entity.id) }.update.apply()
}
}
| akyao/ketsuco | app/models/CronLine.scala | Scala | mit | 5,904 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.oauth1
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.exceptions.ProfileRetrievalException
import com.mohiva.play.silhouette.impl.providers.SocialProfileBuilder._
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth1.LinkedInProvider._
import play.api.libs.ws.{ WSRequest, WSResponse }
import play.api.test.WithApplication
import test.Helper
import scala.concurrent.Future
/**
* Test case for the [[com.mohiva.play.silhouette.impl.providers.oauth1.LinkedInProvider]] class.
*/
class LinkedInProviderSpec extends OAuth1ProviderSpec {
"The `withSettings` method" should {
"create a new instance with customized settings" in new WithApplication with Context {
val s = provider.withSettings { s =>
s.copy("new-request-token-url")
}
s.settings.requestTokenURL must be equalTo "new-request-token-url"
}
}
"The `retrieveProfile` method" should {
"fail with ProfileRetrievalException if API returns error" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
requestHolder.sign(any) returns requestHolder
requestHolder.get() returns Future.successful(response)
response.json returns Helper.loadJson("providers/oauth1/linkedin.error.json")
httpLayer.url(API) returns requestHolder
failed[ProfileRetrievalException](provider.retrieveProfile(oAuthInfo)) {
case e => e.getMessage must equalTo(SpecifiedProfileError.format(
provider.id,
0,
Some("Unknown authentication scheme"),
Some("LY860UAC5U"),
Some(401),
Some(1390421660154L)))
}
}
"fail with ProfileRetrievalException if an unexpected error occurred" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
requestHolder.sign(any) returns requestHolder
requestHolder.get() returns Future.successful(response)
response.json throws new RuntimeException("")
httpLayer.url(API) returns requestHolder
failed[ProfileRetrievalException](provider.retrieveProfile(oAuthInfo)) {
case e => e.getMessage must equalTo(UnspecifiedProfileError.format(provider.id))
}
}
"return the social profile" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
requestHolder.sign(any) returns requestHolder
requestHolder.get() returns Future.successful(response)
response.json returns Helper.loadJson("providers/oauth1/linkedin.success.json")
httpLayer.url(API) returns requestHolder
profile(provider.retrieveProfile(oAuthInfo)) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "NhZXBl_O6f"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
fullName = Some("Apollonia Vanova"),
email = Some("[email protected]"),
avatarURL = Some("http://media.linkedin.com/mpr/mprx/0_fsPnURNRhLhk_Ue2fjKLUZkB2FL6TOe2S4bdUZz61GA9Ysxu_y_sz4THGW5JGJWhaMleN0F61-Dg")
)
}
}
}
/**
* Defines the context for the abstract OAuth1 provider spec.
*
* @return The Context to use for the abstract OAuth1 provider spec.
*/
override protected def context: OAuth1ProviderSpecContext = new Context {}
/**
* The context.
*/
trait Context extends OAuth1ProviderSpecContext {
/**
* The OAuth1 settings.
*/
lazy val oAuthSettings = spy(OAuth1Settings(
requestTokenURL = "https://api.linkedin.com/uas/oauth/requestToken",
accessTokenURL = "https://api.linkedin.com/uas/oauth/accessToken",
authorizationURL = "https://api.linkedin.com/uas/oauth/authenticate",
callbackURL = "https://www.mohiva.com",
consumerKey = "my.consumer.key",
consumerSecret = "my.consumer.secret"))
/**
* The provider to test.
*/
lazy val provider = new LinkedInProvider(httpLayer, oAuthService, oAuthTokenSecretProvider, oAuthSettings)
}
}
| rfranco/play-silhouette | silhouette/test/com/mohiva/play/silhouette/impl/providers/oauth1/LinkedInProviderSpec.scala | Scala | apache-2.0 | 4,890 |
/**
* Copyright 2014 Gianluca Amato <[email protected]>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.parsers
import org.scalatest.FunSuite
import it.unich.jandom.targets.Environment
import it.unich.jandom.domains.numerical.LinearForm
import it.unich.jandom.targets.NumericExpression._
/**
* Test suite for NumericExpressionParser
*/
class NumericExpressionParserSuite extends FunSuite {
val parser = new TempParser
val x = VariableExpression[Int](0)
val y = VariableExpression[Int](1)
val z = VariableExpression[Int](2)
class TempParser extends NumericExpressionParser {
val env = Environment()
val variable = ident ^^ { env.getBindingOrAdd(_) }
def parseExpr(s: String) = parseAll(numexpr, s)
}
test("constant") {
assertResult(LinearExpression(0)) { parser.parseExpr("0").get }
assertResult(LinearExpression(-2)) { parser.parseExpr("-2").get }
}
test("unary minus") {
assertResult(- x) { parser.parseExpr("- x").get }
assertResult(-x+y) { parser.parseExpr("- x + y").get }
}
test("linear expression") {
val expParsed = parser.parseExpr("3*x + y - z").get
val expBuild = 3 * x + y - z
assertResult(expBuild) { expParsed }
assert(expParsed.isInstanceOf[LinearExpression[_]])
}
test("non-linear expression") {
val expParsed = parser.parseExpr("(3*x + z*y - z)/x").get
val expBuild = (3 * x + z*y - z)/x
assertResult(expBuild) { expParsed }
}
}
| rubino22/JDBeta | core/src/test/scala/it/unich/jandom/parsers/NumericExpressionParserSuite.scala | Scala | lgpl-3.0 | 2,130 |
/*
Copyright 2016 ScalABM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.economicsl.agora
package object orderbooks
| EconomicSL/agora | src/main/scala/org/economicsl/agora/markets/auctions/orderbooks/package.scala | Scala | apache-2.0 | 610 |
package eu.daxiongmao.training.scala.chp2
import scala.io.Source
object ReadFile {
def main (args: Array[String] ) {
// Read content of the file provided by args(0)
if (args.length > 0) {
readFileContent(args(0))
} else {
Console.err.println("You must provide a filename (absolute path) to read")
}
}
def readFileContent(filePath: String): Unit = {
for (line <- Source.fromFile(filePath).getLines()) {
println(" | " + line)
}
}
}
| guihome-diaz/training | progInScala/src/main/scala/eu/daxiongmao/training/scala/chp2/ReadFile.scala | Scala | gpl-2.0 | 485 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.sources.{EqualTo, Filter, StringStartsWith}
import org.apache.spark.sql.types.StructType
import org.apache.spark.unsafe.types.UTF8String
class JacksonParserSuite extends SparkFunSuite {
test("skipping rows using pushdown filters") {
def check(
input: String = """{"i":1, "s": "a"}""",
schema: StructType = StructType.fromDDL("i INTEGER"),
filters: Seq[Filter],
expected: Seq[InternalRow]): Unit = {
val options = new JSONOptions(Map.empty[String, String], "GMT", "")
val parser = new JacksonParser(schema, options, false, filters)
val createParser = CreateJacksonParser.string _
val actual = parser.parse(input, createParser, UTF8String.fromString)
assert(actual === expected)
}
check(filters = Seq(), expected = Seq(InternalRow(1)))
check(filters = Seq(EqualTo("i", 1)), expected = Seq(InternalRow(1)))
check(filters = Seq(EqualTo("i", 2)), expected = Seq.empty)
check(
schema = StructType.fromDDL("s STRING"),
filters = Seq(StringStartsWith("s", "b")),
expected = Seq.empty)
check(
schema = StructType.fromDDL("i INTEGER, s STRING"),
filters = Seq(StringStartsWith("s", "a")),
expected = Seq(InternalRow(1, UTF8String.fromString("a"))))
check(
input = """{"i":1,"s": "a", "d": 3.14}""",
schema = StructType.fromDDL("i INTEGER, d DOUBLE"),
filters = Seq(EqualTo("d", 3.14)),
expected = Seq(InternalRow(1, 3.14)))
}
}
| shaneknapp/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/json/JacksonParserSuite.scala | Scala | apache-2.0 | 2,431 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.projections
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.projections.RecordGroupField._
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.TestSaveArgs
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.formats.avro.RecordGroup
class RecordGroupFieldSuite extends ADAMFunSuite {
sparkTest("Use projection when reading parquet record group metadata") {
val path = tmpFile("recordGroupMetadata.parquet")
val rdd = sc.parallelize(Seq(RecordGroup.newBuilder()
.setName("name")
.setSequencingCenter("sequencing_center")
.setDescription("description")
.setRunDateEpoch(42L)
.setFlowOrder("flow_order")
.setKeySequence("key_sequence")
.setLibrary("library")
.setPredictedMedianInsertSize(99)
.setPlatform("platform")
.setPlatformUnit("platform_unit")
.build()))
rdd.saveAsParquet(TestSaveArgs(path))
val projection = Projection(
name,
sample,
sequencingCenter,
description,
runDateEpoch,
flowOrder,
keySequence,
library,
predictedMedianInsertSize,
platform,
platformUnit
)
val recordGroupMetadata: RDD[RecordGroup] = sc.loadParquet(path, optProjection = Some(projection))
assert(recordGroupMetadata.count() === 1)
assert(recordGroupMetadata.first.getName === "name")
assert(recordGroupMetadata.first.getSequencingCenter === "sequencing_center")
assert(recordGroupMetadata.first.getDescription === "description")
assert(recordGroupMetadata.first.getRunDateEpoch === 42L)
assert(recordGroupMetadata.first.getFlowOrder === "flow_order")
assert(recordGroupMetadata.first.getKeySequence === "key_sequence")
assert(recordGroupMetadata.first.getLibrary === "library")
assert(recordGroupMetadata.first.getPredictedMedianInsertSize === 99)
assert(recordGroupMetadata.first.getPlatform === "platform")
assert(recordGroupMetadata.first.getPlatformUnit === "platform_unit")
}
}
| laserson/adam | adam-core/src/test/scala/org/bdgenomics/adam/projections/RecordGroupMetadataFieldSuite.scala | Scala | apache-2.0 | 2,845 |
package org.jetbrains.plugins.scala.project
import java.util.concurrent.ConcurrentHashMap
import com.intellij.openapi.components.AbstractProjectComponent
import com.intellij.openapi.project.Project
/**
* @author Pavel Fatin
*/
class ScalaProjectCache(project: Project, events: ScalaProjectEvents) extends AbstractProjectComponent(project) {
private val cache = new ConcurrentHashMap[AnyRef, AnyRef]()
events.addScalaProjectListener(new ScalaProjectListener {
def onScalaProjectChanged() {
cache.clear()
}
})
def getOrUpdate[K <: AnyRef, V <: AnyRef](key: K)(value: => V): V = {
Option(cache.get(key).asInstanceOf[V]).getOrElse {
val result = value
cache.put(key, result)
result
}
}
}
object ScalaProjectCache {
def instanceIn(project: Project): ScalaProjectCache =
project.getComponent(classOf[ScalaProjectCache])
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/project/ScalaProjectCache.scala | Scala | apache-2.0 | 879 |
package fr.njin.playoauth.common.domain
import fr.njin.playoauth.common.request.AuthzRequest
import scala.concurrent.Future
/**
* Represents the permission granted by the resource owner to a client.
*
* When issuing a code or a token, the authorization server will ask
* to the resource owner the permission. The answer of this demand is represented
* by this permission.
*/
trait OauthPermission {
/**
* @return the id of the client of the permission
*/
def clientId: String
/**
* @return the scopes accepted by the resource owner
*/
def scopes: Option[Seq[String]]
/**
* @return the redirect url of the request
*/
def redirectUri : Option[String]
/**
* @param request the authorization request
* @return true if the resource owner authorize the request
*/
def authorized(request: AuthzRequest): Boolean
}
class BasicOAuthPermission(val accepted: Boolean,
val clientId: String,
val scopes: Option[Seq[String]],
val redirectUri: Option[String]) extends OauthPermission {
def authorized(request: AuthzRequest): Boolean = accepted && request.redirectUri == redirectUri
}
/**
* Repository to retrieve an eventually permission granted by a resource owner to a client
*
* The authorization endpoint will search a permission when a client will request a code or a token.
* Return None if there isn't a permission.
*
* params of apply method are: (ownerId, clientId)
* @tparam P Type of the permission
*/
trait OauthResourceOwnerPermission[P <: OauthPermission]
extends ((String, String) => Future[Option[P]])
| giabao/play-oauth | common/src/main/scala/fr/njin/playoauth/common/domain/OauthPermission.scala | Scala | apache-2.0 | 1,654 |
package fpinscala.parsing
import org.scalatest.{FlatSpec, Matchers}
class ParserSpec extends FlatSpec with Matchers {
}
| ailveen/fpinscala | exercises/src/test/scala/fpinscala/parsing/ParserSpec.scala | Scala | mit | 123 |
import sbt._
import Keys._
import Project.Initialize
trait Marker
{
final lazy val Mark = TaskKey[Unit]("mark")
final def mark: Initialize[Task[Unit]] = mark(baseDirectory)
final def mark(project: Reference): Initialize[Task[Unit]] = mark(baseDirectory in project)
final def mark(baseKey: SettingKey[File]): Initialize[Task[Unit]] = baseKey map { base =>
val toMark = base / "ran"
if(toMark.exists)
error("Already ran (" + toMark + " exists)")
else
IO touch toMark
}
} | mdedetrich/sbt | sbt/src/sbt-test/actions/aggregate/project/Marker.scala | Scala | bsd-3-clause | 486 |
package eventstore
package akka
import io.circe._
import ProjectionsClient.{ProjectionMode, ProjectionStatus}
import scala.util.Try
object ProjectionDetails {
implicit val decoderForProjectionStatus: Decoder[ProjectionStatus] =
Decoder[String].map(s => ProjectionStatus(s))
implicit val decoderForProjectionMode: Decoder[ProjectionMode] =
Decoder[String].emapTry(s => Try(ProjectionMode(s)))
implicit val decoderForProjectionDetails: Decoder[ProjectionDetails] =
Decoder.forProduct11(
"name",
"effectiveName",
"version",
"epoch",
"status",
"stateReason",
"mode",
"writesInProgress",
"readsInProgress",
"progress",
"bufferedEvents"
)(ProjectionDetails.apply)
}
final case class ProjectionDetails(
name: String,
effectiveName: String,
version: Int,
epoch: Int,
status: ProjectionStatus,
stateReason: String,
mode: ProjectionMode,
writesInProgress: Int,
readsInProgress: Int,
progress: Double,
bufferedEvents: Int
) | EventStore/EventStore.JVM | client/src/main/scala/eventstore/akka/ProjectionDetails.scala | Scala | bsd-3-clause | 1,103 |
package eu.brosbit.opos.snippet.view
import _root_.net.liftweb.util._
import Helpers._
import net.liftweb.json.JsonDSL._
import eu.brosbit.opos.lib.{Formater, ZeroObjectId}
import java.util.Date
import eu.brosbit.opos.model.edu.{Exam, ExamAnswer, Work, WorkAnswer}
class ExamsSn extends BaseSnippet {
def showExams():CssSel = {
val now = new Date()
val nowString = Formater.formatTime(now)
val nowL = now.getTime
val exams = Exam.findAll(("classId" -> user.classId.get)~( "start" -> ("$lt" -> nowL ))~("end" -> ("$gt" -> nowL)))
"#nowDate *" #> nowString &
".col-lg-6" #> exams.map(ex => {
mkExamDiv(ex)
})
}
def showChecked():CssSel = {
val now = new Date()
val nowString = Formater.formatTime(now)
val nowL = now.getTime
val exams = Exam.findAll(("classId" -> user.classId.get)~( "end" -> ("$lt" -> nowL )))
val answers = ExamAnswer.findAll({"authorId" -> user.id.get})
"#nowDate *" #> nowString &
".col-lg-6" #> exams.map(ex => {
mkAnsDiv(ex, answers.find(a => a.exam == ex._id).getOrElse(ExamAnswer.create))
})
}
private def mkExamDiv(ex:Exam) = {
<div class="col-lg-6">
<h4 class="text-info"> {ex.description} </h4>
<p>Przedmiot: <strong>{ex.subjectName}</strong></p>
<p>kod: <strong> {if(ex.quizzes.length > 1) "TAK" else "NIE"} </strong> </p>
<p><em>Start: </em> { Formater.strForDateTimePicker(new Date(ex.start))} <br/>
<em>Koniec: </em> { Formater.strForDateTimePicker(new Date(ex.end))}</p>
<p>
<a href={"/view/showquiz/" + ex._id.toString}> <span
class="btn btn-info" > <span
class="glyphicon glyphicon-edit"></span> Otwórz
</span></a>
</p>
</div>
}
private def mkAnsDiv(ex: Exam, an: ExamAnswer) = {
val points = an.answers.map(_.p).sum
val percent = if(an.max == 0) 0.0F else (100.0F* points.toFloat) / an.max.toFloat
<div class="col-lg-6">
<h4 class="text-info"> {ex.description} </h4>
<p>Przedmiot: <strong>{ex.subjectName}</strong></p>
<p><em>Start: </em> { Formater.strForDateTimePicker(new Date(ex.start))} <br/>
<em>Koniec: </em> { Formater.strForDateTimePicker(new Date(ex.end))}</p>
<div class="alert alert-info"> Wynik: {points.toString + " / " +
an.max.toString + " : " + scala.math.round(percent).toString} %
<hr/>
{an.info}
</div>
</div>
}
} | mikolajs/osp | src/main/scala/eu/brosbit/opos/snippet/view/ExamsSn.scala | Scala | agpl-3.0 | 2,449 |
package supertaggedtests.tagged
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
import shapeless.test.illTyped
class Implicits extends AnyFlatSpec with Matchers {
"Implicit Ops" should "work without imports" in {
val step1 = Step1(5)
step1.next() shouldBe 6
}
"Implicit conversion" should "work without imports" in {
val step1 = Step1(5)
val step2:Step2 = step1
illTyped("step2.next()","value next is not a member of.+")
step2 shouldBe 5
}
}
| Rudogma/scala-supertagged | tests/src/test/scala/supertaggedtests/tagged/Implicits.scala | Scala | mit | 524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5.content
import org.scalatest.{FunSpec, Matchers}
import play.api.data.validation.ValidationError
import play.api.libs.json._
class InspectRequestSpec extends FunSpec with Matchers {
val inspectRequestJson: JsValue = Json.parse("""
{
"code": "<STRING>",
"cursor_pos": 999,
"detail_level": 1
}
""")
val inspectRequest: InspectRequest = InspectRequest(
"<STRING>", 999, 1
)
describe("InspectRequest") {
describe("#toTypeString") {
it("should return correct type") {
InspectRequest.toTypeString should be ("inspect_request")
}
}
describe("implicit conversions") {
it("should implicitly convert from valid json to a InspectRequest instance") {
// This is the least safe way to convert as an error is thrown if it fails
inspectRequestJson.as[InspectRequest] should be (inspectRequest)
}
it("should also work with asOpt") {
// This is safer, but we lose the error information as it returns
// None if the conversion fails
val newInspectRequest = inspectRequestJson.asOpt[InspectRequest]
newInspectRequest.get should be (inspectRequest)
}
it("should also work with validate") {
// This is the safest as it collects all error information (not just first error) and reports it
val InspectRequestResults = inspectRequestJson.validate[InspectRequest]
InspectRequestResults.fold(
(invalid: Seq[(JsPath, Seq[ValidationError])]) => println("Failed!"),
(valid: InspectRequest) => valid
) should be (inspectRequest)
}
it("should implicitly convert from a InspectRequest instance to valid json") {
Json.toJson(inspectRequest) should be (inspectRequestJson)
}
}
}
}
| Myllyenko/incubator-toree | protocol/src/test/scala/org/apache/toree/kernel/protocol/v5/content/InspectRequestSpec.scala | Scala | apache-2.0 | 2,640 |
package debop4s.timeperiod.timerange
import debop4s.timeperiod._
import debop4s.timeperiod.tests.AbstractTimeFunSuite
import debop4s.timeperiod.utils.Times
class DayRangeFunSuite extends AbstractTimeFunSuite {
test("initValues") {
val currentTime = Times.now
val firstDay = Times.startTimeOfDay(currentTime)
val dr = DayRange(currentTime, TimeSpec.EmptyOffsetTimeCalendar)
dr.start should equal(firstDay)
dr.end should equal(firstDay.plusDays(1))
}
test("defaultCalendar") {
val yearStart = Times.startTimeOfYear(Times.now)
(1 to TimeSpec.MonthsPerYear).par.foreach {
m =>
val monthStart = Times.asDate(yearStart.getYear, m, 1)
val monthEnd = Times.endTimeOfMonth(monthStart)
(1 until monthEnd.getDayOfMonth).foreach {
day =>
val dayRange = DayRange(monthStart.plusDays(day - 1))
dayRange.year should equal(yearStart.getYear)
dayRange.monthOfYear should equal(monthStart.getMonthOfYear)
}
}
}
test("construct test") {
val dayRange = DayRange(Times.now)
dayRange.start should equal(Times.today)
val dayRange2 = DayRange(Times.now.getYear, Times.now.getMonthOfYear, Times.now.getDayOfMonth)
dayRange2.start shouldEqual Times.today
}
test("dayOfWeek") {
val dayRange = DayRange(Times.now)
dayRange.dayOfWeek shouldEqual TimeSpec.DefaultTimeCalendar.dayOfWeek(Times.now)
}
test("addDays") {
val time = Times.now
val day = Times.today
val dayRange = DayRange(time)
dayRange.previousDay.start should equal(day.plusDays(-1))
dayRange.nextDay.start should equal(day.plusDays(1))
dayRange.addDays(0) should equal(dayRange)
Range(-60, 120).par.foreach { i =>
dayRange.addDays(i).start shouldEqual day.plusDays(i)
}
}
test("get hoursView") {
val dayRange = DayRange()
val hours = dayRange.hoursView
var index = 0
hours.foreach { h =>
h.start should equal(dayRange.start.plusHours(index))
h.end should equal(h.calendar.mapEnd(h.start.plusHours(1)))
index += 1
}
index shouldEqual TimeSpec.HoursPerDay
}
}
| debop/debop4s | debop4s-timeperiod/src/test/scala/debop4s/timeperiod/timerange/DayRangeFunSuite.scala | Scala | apache-2.0 | 2,155 |
package lila.opening
import org.goochjs.glicko2._
import org.joda.time.DateTime
import reactivemongo.bson.{ BSONDocument, BSONInteger, BSONDouble }
import lila.db.Types.Coll
import lila.rating.{ Glicko, Perf }
import lila.user.{ User, UserRepo }
private[opening] final class Finisher(
api: OpeningApi,
openingColl: Coll) {
def apply(opening: Opening, user: User, win: Boolean): Fu[(Attempt, Option[Boolean])] = {
api.attempt.find(opening.id, user.id) flatMap {
case Some(a) => fuccess(a -> win.some)
case None =>
val userRating = user.perfs.opening.toRating
val openingRating = opening.perf.toRating
updateRatings(userRating, openingRating, win.fold(Glicko.Result.Win, Glicko.Result.Loss))
val date = DateTime.now
val userPerf = user.perfs.opening.add(userRating, date)
val openingPerf = opening.perf.add(openingRating, date)
val a = new Attempt(
id = Attempt.makeId(opening.id, user.id),
openingId = opening.id,
userId = user.id,
date = DateTime.now,
win = win,
openingRating = opening.perf.intRating,
openingRatingDiff = openingPerf.intRating - opening.perf.intRating,
userRating = user.perfs.opening.intRating,
userRatingDiff = userPerf.intRating - user.perfs.opening.intRating)
((api.attempt add a) >> {
openingColl.update(
BSONDocument("_id" -> opening.id),
BSONDocument("$inc" -> BSONDocument(
Opening.BSONFields.attempts -> BSONInteger(1),
Opening.BSONFields.wins -> BSONInteger(win ? 1 | 0)
)) ++ BSONDocument("$set" -> BSONDocument(
Opening.BSONFields.perf -> Perf.perfBSONHandler.write(openingPerf)
))) zip UserRepo.setPerf(user.id, "opening", userPerf)
}) recover {
case e: reactivemongo.core.commands.LastError if e.getMessage.contains("duplicate key error") => ()
} inject (a -> none)
}
}
private val VOLATILITY = Glicko.default.volatility
private val TAU = 0.75d
private val system = new RatingCalculator(VOLATILITY, TAU)
private def mkRating(perf: Perf) = new Rating(
math.max(1000, perf.glicko.rating),
perf.glicko.deviation,
perf.glicko.volatility, perf.nb)
private def updateRatings(u1: Rating, u2: Rating, result: Glicko.Result) {
val results = new RatingPeriodResults()
result match {
case Glicko.Result.Draw => results.addDraw(u1, u2)
case Glicko.Result.Win => results.addResult(u1, u2)
case Glicko.Result.Loss => results.addResult(u2, u1)
}
try {
system.updateRatings(results)
}
catch {
case e: Exception => play.api.Logger("Opening trainer").error(e.getMessage)
}
}
}
| danilovsergey/i-bur | modules/opening/src/main/Finisher.scala | Scala | mit | 2,793 |
package org.scaladebugger.api.profiles.traits.info.events
import com.sun.jdi.event.MethodExitEvent
import org.scaladebugger.api.profiles.traits.info.{MethodInfo, ValueInfo}
/**
* Represents the interface that needs to be implemented to provide
* an abstraction over the JDI method exit event interface.
*/
trait MethodExitEventInfo extends LocatableEventInfo {
/**
* Returns the JDI representation this profile instance wraps.
*
* @return The JDI instance
*/
override def toJdiInstance: MethodExitEvent
/**
* Returns the method that was exited.
*
* @return The information profile about the method
*/
def method: MethodInfo
/**
* Returns the value that the method will return.
*
* @return The information profile about the value
*/
def returnValue: ValueInfo
}
| ensime/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/profiles/traits/info/events/MethodExitEventInfo.scala | Scala | apache-2.0 | 816 |
/**
* Copyright 2014 Gianluca Amato <[email protected]>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.parsers
import scala.util.parsing.combinator.JavaTokenParsers
import it.unich.jandom.domains.numerical.LinearForm
import it.unich.jandom.targets.NumericExpression
import it.unich.jandom.targets.NumericExpression._
import scala.util.parsing.combinator.PackratParsers
/**
* A trait for parsing numeric expressions. To be inherited by real parsers. An implementation
* should define a parser ''variable'' of type ''Parser[Int]'' and provide a variable ''env''
* of type ''Environment''. The result of variable is the id of the variable in the environment
* ''env''. It provides a parser ''numexpr'' for expressions.
* @author Gianluca Amato <[email protected]>
*/
trait NumericExpressionParser extends JavaTokenParsers with PackratParsers {
/**
* Parser for variables.
*/
protected val variable: Parser[Int]
/**
* Parser for multiplication operator. Normally "*", may be overriden in subclasses.
*/
protected val mulExpr: Parser[Any] = "*"
/**
* Parser for division operator. Normally "/", may be overriden in subclasses.
*/
protected val divExpr: Parser[Any] = "/"
private val factor: Parser[NumericExpression] =
"?" ^^ { _ => NonDeterministicExpression } |
"(" ~> numexpr <~ ")" |
variable ^^ { v => LinearExpression(LinearForm.v[Double](v)) } |
wholeNumber ^^ { c => LinearExpression(c.toDouble) } |
"-" ~> factor ^^ { e => - e }
private val term: PackratParser[NumericExpression] =
(term <~ mulExpr) ~ factor ^^ { case t ~ f => t * f } |
(term <~ divExpr) ~ factor ^^ { case t ~ f => t / f } |
factor
/**
* Parser for numeric expressions.
*/
protected val numexpr: PackratParser[NumericExpression] =
(numexpr <~ "+") ~ term ^^ { case e ~ t => e + t } |
(numexpr <~ "-") ~ term ^^ { case e ~ t => e - t } |
term
}
| rubino22/JDBeta | core/src/main/scala/it/unich/jandom/parsers/NumericExpressionParser.scala | Scala | lgpl-3.0 | 2,620 |
package com.oradian.autofuture
import scala.meta._
import scala.meta.parsers.Parsed.{Error, Success}
import scala.meta.tokens.Token
object AdaptTupleArgumentsExplicitly extends AutoFuture {
private[this] case class Injection(open: Int, close: Int) {
require(open < close, "Open offest must be lower than close offset")
}
private[this] def locateInjection(tree: Tree): Injection = {
val open = tree.tokens.find {
case _: Token.LeftParen => true
case _ => false
}.getOrElse(sys.error("Could not find first open parenthesis")).pos.end.offset
val close = tree.tokens.reverse.find {
case _: Token.RightParen => true
case _ => false
}.getOrElse(sys.error("Could not find last closed parenthesis")).pos.start.offset
Injection(open, close)
}
def apply(source: String): AutoFuture.Result = {
source.parse[Source] match {
case Success(parsed) =>
val injections = parsed collect {
/* Transform Some.apply(<multiple parameters>) */
case tree @ Term.Apply(Term.Name("Some"), args) if args.size > 1 =>
locateInjection(tree)
/* Transform Option.apply(<multiple parameters>) */
case tree @ Term.Apply(Term.Name("Option"), args) if args.size > 1 =>
locateInjection(tree)
/* Transform arrow association */
case tree @ Term.ApplyInfix(_, Term.Name("->"), _, args) if args.size > 1 =>
locateInjection(tree)
}
if (injections.isEmpty) {
AutoFuture.Result.Noop
} else {
val sb = new StringBuilder
var last = 0
val opens = injections map { injection => injection.open -> '(' }
val closes = injections map { injection => injection.close -> ')' }
for ((offset, paren) <- (opens ++ closes).sortBy(_._1)) {
(sb ++= source.substring(last, offset)
+= paren)
last = offset
}
sb ++= source.substring(last)
AutoFuture.Result.Success(sb.toString)
}
case Error(pos, message, details) =>
AutoFuture.Result.Error(s"At line ${pos.start.line}: $message")
}
}
}
| oradian/sbt-auto-future | core/src/main/scala/com/oradian/autofuture/AdaptTupleArgumentsExplicitly.scala | Scala | mit | 2,189 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.logical
import org.apache.calcite.plan.RelOptRule._
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.logical._
import org.apache.calcite.rex.{RexBuilder, RexNode}
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.tools.RelBuilder
import org.apache.calcite.util.ImmutableBitSet
import scala.collection.JavaConversions._
class DecomposeGroupingSetRule
extends RelOptRule(
operand(classOf[LogicalAggregate], any),
"DecomposeGroupingSetRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val agg: LogicalAggregate = call.rel(0).asInstanceOf[LogicalAggregate]
!agg.getGroupSets.isEmpty &&
DecomposeGroupingSetRule.getGroupIdExprIndexes(agg.getAggCallList).nonEmpty
}
override def onMatch(call: RelOptRuleCall): Unit = {
val agg: LogicalAggregate = call.rel(0).asInstanceOf[LogicalAggregate]
val groupIdExprs = DecomposeGroupingSetRule.getGroupIdExprIndexes(agg.getAggCallList).toSet
val subAggs = agg.groupSets.map(set =>
DecomposeGroupingSetRule.decompose(call.builder(), agg, groupIdExprs, set))
val union = subAggs.reduce((l, r) => new LogicalUnion(
agg.getCluster,
agg.getTraitSet,
Seq(l, r),
true
))
call.transformTo(union)
}
}
object DecomposeGroupingSetRule {
val INSTANCE = new DecomposeGroupingSetRule
private def getGroupIdExprIndexes(aggCalls: Seq[AggregateCall]): Seq[Int] = {
aggCalls.zipWithIndex.filter { case (call, _) =>
call.getAggregation.getKind match {
case SqlKind.GROUP_ID | SqlKind.GROUPING | SqlKind.GROUPING_ID =>
true
case _ =>
false
}
}.map { case (_, idx) => idx}
}
private def decompose(
relBuilder: RelBuilder,
agg: LogicalAggregate,
groupExprIndexes : Set[Int],
groupSet: ImmutableBitSet) = {
val aggsWithIndexes = agg.getAggCallList.zipWithIndex
// construct aggregate without groupExpressions
val subAgg = new LogicalAggregate(
agg.getCluster,
agg.getTraitSet,
agg.getInput,
false,
groupSet,
Seq(),
aggsWithIndexes.collect{ case (call, idx) if !groupExprIndexes.contains(idx) => call }
)
relBuilder.push(subAgg)
val rexBuilder = relBuilder.getRexBuilder
// get names of grouping fields
val groupingFieldsName = Seq.range(0, agg.getGroupCount)
.map(x => agg.getRowType.getFieldNames.get(x))
// create null literals for all grouping fields
val groupingFields: Array[RexNode] = Seq.range(0, agg.getGroupCount)
.map(x => rexBuilder.makeNullLiteral(agg.getRowType.getFieldList.get(x).getType)).toArray
// override null literals with field access for grouping fields of current aggregation
groupSet.toList.zipWithIndex.foreach { case (group, idx) =>
groupingFields(group) = rexBuilder.makeInputRef(relBuilder.peek(), idx)
}
var aggCnt = 0
val aggFields = aggsWithIndexes.map {
case (call, idx) if groupExprIndexes.contains(idx) =>
// create literal for group expression
lowerGroupExpr(rexBuilder, call, groupSet)
case _ =>
// create access to aggregation result
val aggResult = rexBuilder.makeInputRef(subAgg, subAgg.getGroupCount + aggCnt)
aggCnt += 1
aggResult
}
// add a projection to establish the result schema and set the values of the group expressions.
relBuilder.project(
groupingFields.toSeq ++ aggFields,
groupingFieldsName ++ agg.getAggCallList.map(_.name))
// return aggregation + projection
relBuilder.build()
}
/** Returns a literal for a given group expression. */
private def lowerGroupExpr(
builder: RexBuilder,
call: AggregateCall,
groupSet: ImmutableBitSet) : RexNode = {
val groups = groupSet.asSet()
call.getAggregation.getKind match {
case SqlKind.GROUP_ID =>
val id = groupSet.asList().map(x => 1 << x).sum
builder.makeLiteral(id, call.getType, false)
case SqlKind.GROUPING | SqlKind.GROUPING_ID =>
val res = call.getArgList.foldLeft(0)((res, arg) =>
(res << 1) + (if (groups.contains(arg)) 1 else 0)
)
builder.makeLiteral(res, call.getType, false)
case _ => builder.constantNull()
}
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/rules/logical/DecomposeGroupingSetRule.scala | Scala | apache-2.0 | 5,219 |
package io.getquill.context.sql.dsl
import io.getquill.Query
import io.getquill.context.sql.SqlContext
trait SqlDsl {
this: SqlContext[_, _] =>
implicit class Like(s1: String) {
def like(s2: String) = quote(infix"$s1 like $s2".as[Boolean])
}
implicit class ForUpdate[T](q: Query[T]) {
def forUpdate() = quote(infix"$q FOR UPDATE".as[Query[T]])
}
}
| getquill/quill | quill-sql/src/main/scala/io/getquill/context/sql/dsl/SqlDsl.scala | Scala | apache-2.0 | 370 |
package com.monsanto.stats.tables.models
/**
* Sparse vector of counts per user.
*/
case class ClusteredUserVector(userId: Long, counts: Map[Long,Long], belongProbability: Double)
| MonsantoCo/chinese-restaurant-process | src/main/scala/com/monsanto/stats/tables/models/ClusteredUserVector.scala | Scala | bsd-3-clause | 183 |
import fpinscala.state._
import org.scalatest._
import org.scalatest.prop._
import org.scalacheck._
class StateSpec extends FlatSpec with Matchers with PropertyChecks {
"RNG's nonNegativeInt" should "have produce a nonNegativeInt and a new RNG" in {
def generateNrandomNumber(n: Int, rng: RNG): (Int, RNG) = {
val value = RNG.nonNegativeInt(rng)
assert(value._1 >= 0 && value._1 <= Int.MaxValue)
if (n > 0) generateNrandomNumber(n - 1, value._2)
else value
}
generateNrandomNumber(10000, RNG.Simple(0))
}
"RNG's double" should "have produce a Double between 0 and 1, not including 1" in {
def generateNrandomNumber(n: Int, rng: RNG): (Double, RNG) = {
val value = RNG.double(rng)
assert(value._1 >= 0.0 && value._1 < 1.0)
if (n > 0) generateNrandomNumber(n - 1, value._2)
else value
}
generateNrandomNumber(10000, RNG.Simple(0))
}
"RNG's doubleNew" should "have produce a Double between 0 and 1, not including 1" in {
def generateNrandomNumber(n: Int, rng: RNG): (Double, RNG) = {
val value = RNG.doubleNew(rng)
assert(value._1 >= 0.0 && value._1 < 1.0)
if (n > 0) generateNrandomNumber(n - 1, value._2)
else value
}
generateNrandomNumber(10000, RNG.Simple(0))
}
}
| enpassant/fpinscala | exercises/src/test/scala/fpinscala/state/StateSpec.scala | Scala | mit | 1,290 |
package com.twitter.finagle.util
import com.twitter.util.TimeConversions._
import com.twitter.util.{Timer, TimerTask}
import java.util.Collections
import java.util.concurrent.atomic.AtomicInteger
import org.jboss.netty.{util => nu}
import org.junit.runner.RunWith
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito.{atMost, verify, when, never}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class HashedWheelTimerTest extends FunSuite with MockitoSugar {
test("HashedWheelTimer should Support cancelling recurring tasks") {
val timer = mock[nu.Timer]
val nstop = new AtomicInteger(0)
@volatile var running = true
when(timer.stop()) thenAnswer {
new Answer[java.util.Set[Nothing]] {
override def answer(invocation: InvocationOnMock): java.util.Set[Nothing] = {
running = false
nstop.incrementAndGet()
Collections.emptySet()
}
}
}
val t = new HashedWheelTimer(timer)
val taskCaptor = ArgumentCaptor.forClass(classOf[nu.TimerTask])
val firstTimeout = mock[nu.Timeout]
when(firstTimeout.isCancelled) thenReturn false
when(timer.newTimeout(taskCaptor.capture(), any[Long], any[java.util.concurrent.TimeUnit])) thenReturn firstTimeout
var task: TimerTask = null
task = t.schedule(1.second) {
task.cancel()
}
taskCaptor.getValue.run(firstTimeout)
verify(timer, atMost(1)).newTimeout(any[org.jboss.netty.util.TimerTask], any[Long], any[java.util.concurrent.TimeUnit])
}
test("HashedWheelTimer.Default should ignore stop()") {
val underlying = mock[Timer]
val nonStop = HashedWheelTimer.unstoppable(underlying)
nonStop.stop()
verify(underlying, never()).stop()
}
test("HashedWheelTimer.Default.toString") {
val str = HashedWheelTimer.Default.toString
assert("UnstoppableTimer(HashedWheelTimer.Default)" == str)
}
}
| koshelev/finagle | finagle-netty3/src/test/scala/com/twitter/finagle/util/HashedWheelTimerTest.scala | Scala | apache-2.0 | 2,091 |
/*
** Copyright [2013-2016] [Megam Systems]
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
package models.json.analytics
import scalaz._
import scalaz.NonEmptyList._
import scalaz.Validation
import scalaz.Validation._
import Scalaz._
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.util.Date
import java.nio.charset.Charset
import io.megam.auth.funnel.FunnelErrors._
import controllers.Constants._
import models.analytics._
import models.analytics.{ Connectors }
/**
* @author ranjitha
*
*/
class ConnectorsSerialization(charset: Charset = UTF8Charset) extends io.megam.json.SerializationBase[Connectors] {
protected val SourceKey = "source"
protected val EndpointKey = "endpoint"
protected val PortKey = "port"
protected val DbnameKey = "dbname"
protected val InputsKey = "inputs"
protected val TablesKey = "tables"
override implicit val writer = new JSONW[Connectors] {
import models.json.analytics.KeyValueListSerialization.{ writer => KeyValueListWriter }
import TablesListSerialization.{ writer => TablesListWriter }
override def write(h: Connectors): JValue = {
JObject(
JField(SourceKey, toJSON(h.source)) ::
JField(EndpointKey, toJSON(h.endpoint)) ::
JField(PortKey, toJSON(h.port)) ::
JField(DbnameKey, toJSON(h.dbname)) ::
JField(InputsKey, toJSON(h.inputs)(KeyValueListWriter)) ::
JField(TablesKey, toJSON(h.tables)(TablesListWriter))::
Nil)
}
}
override implicit val reader = new JSONR[Connectors] {
import models.json.analytics.KeyValueListSerialization.{ reader => KeyValueListReader }
import TablesListSerialization.{ reader => TablesListReader}
override def read(json: JValue): Result[Connectors] = {
val sourceField = field[String](SourceKey)(json)
val endpointField = field[String](EndpointKey)(json)
val portField = field[String](PortKey)(json)
val dbnameField = field[String](DbnameKey)(json)
val inputsField = field[KeyValueList](InputsKey)(json)(KeyValueListReader)
val tablesField = field[TablesList](TablesKey)(json)(TablesListReader)
(sourceField |@| endpointField |@| portField |@| dbnameField |@| inputsField |@| tablesField) {
(source: String, endpoint: String, port: String, dbname: String, inputs: KeyValueList, tables: TablesList) =>
new Connectors(source, endpoint, port, dbname, inputs, tables)
}
}
}
}
| meglytics/bidi | app/models/json/analytics/ConnectorsSerialization.scala | Scala | mit | 2,996 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.catalyst.plans.logical.{Filter, LeafNode, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.types.{ArrayType, DataType, MapType, StructType}
/**
* Prunes unnecessary physical columns given a [[PhysicalOperation]] over a data source relation.
* By "physical column", we mean a column as defined in the data source format like Parquet format
* or ORC format. For example, in Spark SQL, a root-level Parquet column corresponds to a SQL
* column, and a nested Parquet column corresponds to a [[StructField]].
*/
object SchemaPruning extends Rule[LogicalPlan] {
import org.apache.spark.sql.catalyst.expressions.SchemaPruning._
override def apply(plan: LogicalPlan): LogicalPlan =
if (conf.nestedSchemaPruningEnabled) {
apply0(plan)
} else {
plan
}
private def apply0(plan: LogicalPlan): LogicalPlan =
plan transformDown {
case op @ PhysicalOperation(projects, filters,
l @ LogicalRelation(hadoopFsRelation: HadoopFsRelation, _, _, _))
if canPruneRelation(hadoopFsRelation) =>
prunePhysicalColumns(l.output, projects, filters, hadoopFsRelation.dataSchema,
prunedDataSchema => {
val prunedHadoopRelation =
hadoopFsRelation.copy(dataSchema = prunedDataSchema)(hadoopFsRelation.sparkSession)
buildPrunedRelation(l, prunedHadoopRelation)
}).getOrElse(op)
}
/**
* This method returns optional logical plan. `None` is returned if no nested field is required or
* all nested fields are required.
*/
private def prunePhysicalColumns(
output: Seq[AttributeReference],
projects: Seq[NamedExpression],
filters: Seq[Expression],
dataSchema: StructType,
leafNodeBuilder: StructType => LeafNode): Option[LogicalPlan] = {
val (normalizedProjects, normalizedFilters) =
normalizeAttributeRefNames(output, projects, filters)
val requestedRootFields = identifyRootFields(normalizedProjects, normalizedFilters)
// If requestedRootFields includes a nested field, continue. Otherwise,
// return op
if (requestedRootFields.exists { root: RootField => !root.derivedFromAtt }) {
val prunedDataSchema = pruneDataSchema(dataSchema, requestedRootFields)
// If the data schema is different from the pruned data schema, continue. Otherwise,
// return op. We effect this comparison by counting the number of "leaf" fields in
// each schemata, assuming the fields in prunedDataSchema are a subset of the fields
// in dataSchema.
if (countLeaves(dataSchema) > countLeaves(prunedDataSchema)) {
val prunedRelation = leafNodeBuilder(prunedDataSchema)
val projectionOverSchema = ProjectionOverSchema(prunedDataSchema)
Some(buildNewProjection(normalizedProjects, normalizedFilters, prunedRelation,
projectionOverSchema))
} else {
None
}
} else {
None
}
}
/**
* Checks to see if the given relation can be pruned. Currently we support Parquet and ORC v1.
*/
private def canPruneRelation(fsRelation: HadoopFsRelation) =
fsRelation.fileFormat.isInstanceOf[ParquetFileFormat] ||
fsRelation.fileFormat.isInstanceOf[OrcFileFormat]
/**
* Normalizes the names of the attribute references in the given projects and filters to reflect
* the names in the given logical relation. This makes it possible to compare attributes and
* fields by name. Returns a tuple with the normalized projects and filters, respectively.
*/
private def normalizeAttributeRefNames(
output: Seq[AttributeReference],
projects: Seq[NamedExpression],
filters: Seq[Expression]): (Seq[NamedExpression], Seq[Expression]) = {
val normalizedAttNameMap = output.map(att => (att.exprId, att.name)).toMap
val normalizedProjects = projects.map(_.transform {
case att: AttributeReference if normalizedAttNameMap.contains(att.exprId) =>
att.withName(normalizedAttNameMap(att.exprId))
}).map { case expr: NamedExpression => expr }
val normalizedFilters = filters.map(_.transform {
case att: AttributeReference if normalizedAttNameMap.contains(att.exprId) =>
att.withName(normalizedAttNameMap(att.exprId))
})
(normalizedProjects, normalizedFilters)
}
/**
* Builds the new output [[Project]] Spark SQL operator that has the `leafNode`.
*/
private def buildNewProjection(
projects: Seq[NamedExpression],
filters: Seq[Expression],
leafNode: LeafNode,
projectionOverSchema: ProjectionOverSchema): Project = {
// Construct a new target for our projection by rewriting and
// including the original filters where available
val projectionChild =
if (filters.nonEmpty) {
val projectedFilters = filters.map(_.transformDown {
case projectionOverSchema(expr) => expr
})
val newFilterCondition = projectedFilters.reduce(And)
Filter(newFilterCondition, leafNode)
} else {
leafNode
}
// Construct the new projections of our Project by
// rewriting the original projections
val newProjects = projects.map(_.transformDown {
case projectionOverSchema(expr) => expr
}).map { case expr: NamedExpression => expr }
if (log.isDebugEnabled) {
logDebug(s"New projects:\n${newProjects.map(_.treeString).mkString("\n")}")
}
Project(newProjects, projectionChild)
}
/**
* Builds a pruned logical relation from the output of the output relation and the schema of the
* pruned base relation.
*/
private def buildPrunedRelation(
outputRelation: LogicalRelation,
prunedBaseRelation: HadoopFsRelation) = {
val prunedOutput = getPrunedOutput(outputRelation.output, prunedBaseRelation.schema)
outputRelation.copy(relation = prunedBaseRelation, output = prunedOutput)
}
// Prune the given output to make it consistent with `requiredSchema`.
private def getPrunedOutput(
output: Seq[AttributeReference],
requiredSchema: StructType): Seq[AttributeReference] = {
// We need to replace the expression ids of the pruned relation output attributes
// with the expression ids of the original relation output attributes so that
// references to the original relation's output are not broken
val outputIdMap = output.map(att => (att.name, att.exprId)).toMap
requiredSchema
.toAttributes
.map {
case att if outputIdMap.contains(att.name) =>
att.withExprId(outputIdMap(att.name))
case att => att
}
}
/**
* Counts the "leaf" fields of the given dataType. Informally, this is the
* number of fields of non-complex data type in the tree representation of
* [[DataType]].
*/
private def countLeaves(dataType: DataType): Int = {
dataType match {
case array: ArrayType => countLeaves(array.elementType)
case map: MapType => countLeaves(map.keyType) + countLeaves(map.valueType)
case struct: StructType =>
struct.map(field => countLeaves(field.dataType)).sum
case _ => 1
}
}
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SchemaPruning.scala | Scala | apache-2.0 | 8,284 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
// scalastyle:off line.size.limit
/*
* Ported by Alistair Johnson from
* https://github.com/gwtproject/gwt/blob/master/user/test/com/google/gwt/emultest/java/math/BigIntegerDivideTest.java
*/
// scalastyle:on line.size.limit
package org.scalajs.testsuite.javalib.math
import java.math.BigInteger
import org.junit.Test
import org.junit.Assert._
class BigIntegerDivideTest {
@Test def testCase1(): Unit = {
val aBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7)
val bBytes = Array[Byte](0)
val aSign = 1
val bSign = 0
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
try {
aNumber.divide(bNumber)
fail()
} catch {
case _: Throwable => // As expected
}
}
@Test def testCase10(): Unit = {
val aBytes = Array[Byte](1, 100, 56, 7, 98, -1, 39, -128, 127, 5, 6, 7, 8, 9)
val bBytes = Array[Byte](15, 48, -29, 7, 98, -1, 39, -128)
val aSign = -1
val bSign = -1
val rBytes = Array[Byte](23, 115, 11, 78, 35, -11)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase11(): Unit = {
val aBytes = Array[Byte](0)
val bBytes = Array[Byte](15, 48, -29, 7, 98, -1, 39, -128)
val aSign = 0
val bSign = -1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(0, result.signum())
}
@Test def testCase12(): Unit = {
val bBytes = Array[Byte](15, 48, -29, 7, 98, -1, 39, -128)
val bSign = -1
val rBytes = Array[Byte](0)
val aNumber = BigInteger.ZERO
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(0, result.signum())
}
@Test def testCase13(): Unit = {
val aBytes = Array[Byte](15, 48, -29, 7, 98, -1, 39, -128)
val aSign = 1
val rBytes = Array[Byte](15, 48, -29, 7, 98, -1, 39, -128)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = BigInteger.ONE
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase14(): Unit = {
val rBytes = Array[Byte](1)
val aNumber = BigInteger.ONE
val bNumber = BigInteger.ONE
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase15(): Unit = {
val aBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7)
val bBytes = Array[Byte](0)
val aSign = 1
val bSign = 0
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
try {
aNumber.remainder(bNumber)
fail()
} catch {
case _: Throwable => // As expected
}
}
@Test def testCase16(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127)
val bBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.remainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(0, result.signum())
}
@Test def testCase17(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127, 75)
val bBytes = Array[Byte](27, -15, 65, 39, 100)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](12, -21, 73, 56, 27)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.remainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase18(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127, 75)
val bBytes = Array[Byte](27, -15, 65, 39, 100)
val aSign = -1
val bSign = -1
val rBytes = Array[Byte](-13, 20, -74, -57, -27)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.remainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(-1, result.signum())
}
@Test def testCase19(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127, 75)
val bBytes = Array[Byte](27, -15, 65, 39, 100)
val aSign = 1
val bSign = -1
val rBytes = Array[Byte](12, -21, 73, 56, 27)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.remainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase2(): Unit = {
val aBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7)
val aSign = 1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = BigInteger.ZERO
try {
aNumber.divide(bNumber)
fail()
} catch {
case _: Throwable => // As expected
}
}
@Test def testCase20(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127, 75)
val bBytes = Array[Byte](27, -15, 65, 39, 100)
val aSign = -1
val bSign = 1
val rBytes = Array[Byte](-13, 20, -74, -57, -27)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.remainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(-1, result.signum())
}
@Test def testCase21(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127, 75)
val bBytes = Array[Byte](27, -15, 65, 39, 100)
val aSign = -1
val bSign = 1
val rBytes = Array[Array[Byte]](Array[Byte](-5, 94, -115, -74, -85, 84), Array[Byte](-13, 20, -74, -57, -27))
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divideAndRemainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result(0).toByteArray()
for (i <- 0 until resBytes.length){
assertEquals(rBytes(0)(i), resBytes(i))
}
assertEquals(-1, result(0).signum())
resBytes = result(1).toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(1)(i), resBytes(i))
assertEquals(-1, result(1).signum())
}
}
@Test def testCase22(): Unit = {
val aBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7)
val bBytes = Array[Byte](1, 30, 40, 56, -1, 45)
val aSign = 1
val bSign = -1
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
try {
aNumber.mod(bNumber)
fail()
} catch {
case _: Throwable => // As expected
}
}
@Test def testCase23(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127, 75)
val bBytes = Array[Byte](27, -15, 65, 39, 100)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](12, -21, 73, 56, 27)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.mod(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase24(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127, 75)
val bBytes = Array[Byte](27, -15, 65, 39, 100)
val aSign = -1
val bSign = 1
val rBytes = Array[Byte](15, 5, -9, -17, 73)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.mod(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase3(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127)
val bBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](1)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase4(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127)
val bBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127)
val aSign = -1
val bSign = 1
val rBytes = Array[Byte](-1)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(-1, result.signum())
}
@Test def testCase5(): Unit = {
val aBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127)
val bBytes = Array[Byte](-127, 100, 56, 7, 98, -1, 39, -128, 127, 1, 2, 3, 4, 5)
val aSign = -1
val bSign = 1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(0, result.signum())
}
@Test def testCase6(): Unit = {
val aBytes = Array[Byte](1, 100, 56, 7, 98, -1, 39, -128, 127)
val bBytes = Array[Byte](15, 100, 56, 7, 98, -1, 39, -128, 127)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](0)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(0, result.signum())
}
@Test def testCase7(): Unit = {
val aBytes = Array[Byte](1, 100, 56, 7, 98, -1, 39, -128, 127, 5, 6, 7, 8, 9)
val bBytes = Array[Byte](15, 48, -29, 7, 98, -1, 39, -128)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](23, 115, 11, 78, 35, -11)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testCase8(): Unit = {
val aBytes = Array[Byte](1, 100, 56, 7, 98, -1, 39, -128, 127, 5, 6, 7, 8, 9)
val bBytes = Array[Byte](15, 48, -29, 7, 98, -1, 39, -128)
val aSign = 1
val bSign = -1
val rBytes = Array[Byte](-24, -116, -12, -79, -36, 11)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(-1, result.signum())
}
@Test def testCase9(): Unit = {
val aBytes = Array[Byte](1, 100, 56, 7, 98, -1, 39, -128, 127, 5, 6, 7, 8, 9)
val bBytes = Array[Byte](15, 48, -29, 7, 98, -1, 39, -128)
val aSign = -1
val bSign = 1
val rBytes = Array[Byte](-24, -116, -12, -79, -36, 11)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(-1, result.signum())
}
@Test def testDivisionKnuth1(): Unit = {
val aBytes = Array[Byte](-7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7)
val bBytes = Array[Byte](-3, -3, -3, -3)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](0, -5, -12, -33, -96, -36, -105, -56, 92, 15, 48, -109)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testDivisionKnuthFirstDigitsEqual(): Unit = {
val aBytes = Array[Byte](2, -3, -4, -5, -1, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
val bBytes = Array[Byte](2, -3, -4, -5, -1, -1, -1, -1)
val aSign = -1
val bSign = -1
val rBytes = Array[Byte](0, -1, -1, -1, -1, -2, -88, -60, 41)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testDivisionKnuthIsNormalized(): Unit = {
val aBytes = Array[Byte](-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
val bBytes = Array[Byte](-1, -1, -1, -1, -1, -1, -1, -1)
val aSign = -1
val bSign = -1
val rBytes = Array[Byte](0, -9, -8, -7, -6, -5, -4, -3)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testDivisionKnuthMultiDigitsByOneDigit(): Unit = {
val aBytes = Array[Byte](113, -83, 123, -5, 18, -34, 67, 39, -29)
val bBytes = Array[Byte](2, -3, -4, -5)
val aSign = 1
val bSign = -1
val rBytes = Array[Byte](-38, 2, 7, 30, 109, -43)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(-1, result.signum())
}
@Test def testDivisionKnuthOneDigitByOneDigit(): Unit = {
val aBytes = Array[Byte](113, -83, 123, -5)
val bBytes = Array[Byte](2, -3, -4, -5)
val aSign = 1
val bSign = -1
val rBytes = Array[Byte](-37)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.divide(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(-1, result.signum())
}
@Test def testRemainderKnuth1(): Unit = {
val aBytes = Array[Byte](-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1)
val bBytes = Array[Byte](0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val aSign = 1
val bSign = 1
val rBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7, 7, 18, -89)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.remainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testRemainderKnuthMultiDigitsByOneDigit(): Unit = {
val aBytes = Array[Byte](113, -83, 123, -5, 18, -34, 67, 39, -29)
val bBytes = Array[Byte](2, -3, -4, -50)
val aSign = 1
val bSign = -1
val rBytes = Array[Byte](2, -37, -60, 59)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.remainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
@Test def testRemainderKnuthOneDigitByOneDigit(): Unit = {
val aBytes = Array[Byte](113, -83, 123, -5)
val bBytes = Array[Byte](2, -3, -4, -50)
val aSign = 1
val bSign = -1
val rBytes = Array[Byte](2, -9, -14, 53)
val aNumber = new BigInteger(aSign, aBytes)
val bNumber = new BigInteger(bSign, bBytes)
val result = aNumber.remainder(bNumber)
var resBytes = Array.ofDim[Byte](rBytes.length)
resBytes = result.toByteArray()
for (i <- 0 until resBytes.length) {
assertEquals(rBytes(i), resBytes(i))
}
assertEquals(1, result.signum())
}
}
| scala-js/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/math/BigIntegerDivideTest.scala | Scala | apache-2.0 | 19,076 |
/*
* Copyright 2012-2013 Eligotech BV.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eligosource.eventsourced.journal.leveldb
import java.io.File
import org.eligosource.eventsourced.core._
import org.eligosource.eventsourced.core.JournalProtocol._
import org.eligosource.eventsourced.journal.common._
abstract class LeveldbJournalSpec extends PersistentJournalSpec {
import JournalSpec._
"persist input messages with a custom event serializer" in { fixture =>
import fixture._
journal ! WriteInMsg(1, Message(CustomEvent("test-1")), writeTarget)
journal ! WriteInMsg(1, Message(CustomEvent("test-2")), writeTarget)
journal ! ReplayInMsgs(1, 0, replayTarget)
dequeue(replayQueue) { m => m must be(Message(CustomEvent("TEST-1"), sequenceNr = 1, timestamp = m.timestamp)) }
dequeue(replayQueue) { m => m must be(Message(CustomEvent("TEST-2"), sequenceNr = 2, timestamp = m.timestamp)) }
}
"persist output messages with a custom event serializer" in { fixture =>
import fixture._
journal ! WriteOutMsg(1, Message(CustomEvent("test-3")), 1, SkipAck, writeTarget)
journal ! WriteOutMsg(1, Message(CustomEvent("test-4")), 1, SkipAck, writeTarget)
journal ! ReplayOutMsgs(1, 0, replayTarget)
dequeue(replayQueue) { m => m must be(Message(CustomEvent("TEST-3"), sequenceNr = 1, timestamp = 0L)) }
dequeue(replayQueue) { m => m must be(Message(CustomEvent("TEST-4"), sequenceNr = 2, timestamp = 0L)) }
}
}
object LeveldbJournalSpec {
val journalDir = new File("es-journal/es-journal-leveldb/target/journal")
}
class LeveldbJournalPSNativeSpec extends LeveldbJournalSpec with LeveldbCleanup {
def journalProps = LeveldbJournalProps(LeveldbJournalSpec.journalDir)
}
class LeveldbJournalSSNativeSpec extends JournalSpec with LeveldbCleanup {
def journalProps = LeveldbJournalProps(LeveldbJournalSpec.journalDir).withSequenceStructure
}
class LeveldbJournalPSJavaSpec extends LeveldbJournalSpec with LeveldbCleanup {
def journalProps = LeveldbJournalProps(LeveldbJournalSpec.journalDir).withNative(false)
}
class LeveldbJournalSSJavaSpec extends JournalSpec with LeveldbCleanup {
def journalProps = LeveldbJournalProps(LeveldbJournalSpec.journalDir).withSequenceStructure.withNative(false)
}
| eligosource/eventsourced | es-journal/es-journal-leveldb/src/test/scala/org/eligosource/eventsourced/journal/leveldb/LeveldbJournalSpec.scala | Scala | apache-2.0 | 2,786 |
///*
// * (C) 2014 Jacob Lorensen, [email protected]
// */
//
//package org.zapto.jablo.myml
//
//import org.junit._
//import Assert._
//import TestHelper.calc
//import TestHelper.e
//import TestHelper.check
//import TestHelper.reparse
//
//class RatExpressionTest {
// @Before
// def setUp: Unit = {
// }
//
// @After
// def tearDown: Unit = {
// }
//
// @Test
// def ratTest1 = {
// val p = check(calc.parseAll(calc.expr, "2/3*x^2"))
// assertEquals(Mul(Q(2, 3), Pot(Var("x"), Z(2))), p.get)
// assertEquals(Q(32, 3), p.get.eval(Map("x" -> Z(4))));
// assertEquals(p.get, reparse(p))
// assertEquals(Q(32,3), ByteCodeMachine.interp(p.get, Map("x"->Z(4))))
// }
//
// @Test
// def ratTest2 = {
// val p = check(calc.parseAll(calc.expr, "2/3+4/5+6/7+8/9"))
// println("ratTets2 - parse: "+ p.get)
// println(" - infix: "+ p.get.infix)
// val ev = p.get.eval(Map())
// println(" - ev : "+ ev)
// println(" - evinf: "+ ev.infix)
// }
//
//
//}
| jablo/myml | src/test/scala/org/zapto/jablo/myml_later/RatExpressionTest.scala | Scala | artistic-2.0 | 1,019 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.util.{Properties, UUID}
import scala.collection.JavaConverters._
import scala.collection.Map
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.json4s.DefaultFormats
import org.json4s.JsonAST._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark._
import org.apache.spark.executor._
import org.apache.spark.metrics.ExecutorMetricType
import org.apache.spark.rdd.RDDOperationScope
import org.apache.spark.resource.{ExecutorResourceRequest, ResourceInformation, ResourceProfile, TaskResourceRequest}
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.storage._
/**
* Serializes SparkListener events to/from JSON. This protocol provides strong backwards-
* and forwards-compatibility guarantees: any version of Spark should be able to read JSON output
* written by any other version, including newer versions.
*
* JsonProtocolSuite contains backwards-compatibility tests which check that the current version of
* JsonProtocol is able to read output written by earlier versions. We do not currently have tests
* for reading newer JSON output with older Spark versions.
*
* To ensure that we provide these guarantees, follow these rules when modifying these methods:
*
* - Never delete any JSON fields.
* - Any new JSON fields should be optional; use `jsonOption` when reading these fields
* in `*FromJson` methods.
*/
private[spark] object JsonProtocol {
// TODO: Remove this file and put JSON serialization into each individual class.
private implicit val format = DefaultFormats
private val mapper = new ObjectMapper().registerModule(DefaultScalaModule)
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
/** ------------------------------------------------- *
* JSON serialization methods for SparkListenerEvents |
* -------------------------------------------------- */
def sparkEventToJson(event: SparkListenerEvent): JValue = {
event match {
case stageSubmitted: SparkListenerStageSubmitted =>
stageSubmittedToJson(stageSubmitted)
case stageCompleted: SparkListenerStageCompleted =>
stageCompletedToJson(stageCompleted)
case taskStart: SparkListenerTaskStart =>
taskStartToJson(taskStart)
case taskGettingResult: SparkListenerTaskGettingResult =>
taskGettingResultToJson(taskGettingResult)
case taskEnd: SparkListenerTaskEnd =>
taskEndToJson(taskEnd)
case jobStart: SparkListenerJobStart =>
jobStartToJson(jobStart)
case jobEnd: SparkListenerJobEnd =>
jobEndToJson(jobEnd)
case environmentUpdate: SparkListenerEnvironmentUpdate =>
environmentUpdateToJson(environmentUpdate)
case blockManagerAdded: SparkListenerBlockManagerAdded =>
blockManagerAddedToJson(blockManagerAdded)
case blockManagerRemoved: SparkListenerBlockManagerRemoved =>
blockManagerRemovedToJson(blockManagerRemoved)
case unpersistRDD: SparkListenerUnpersistRDD =>
unpersistRDDToJson(unpersistRDD)
case applicationStart: SparkListenerApplicationStart =>
applicationStartToJson(applicationStart)
case applicationEnd: SparkListenerApplicationEnd =>
applicationEndToJson(applicationEnd)
case executorAdded: SparkListenerExecutorAdded =>
executorAddedToJson(executorAdded)
case executorRemoved: SparkListenerExecutorRemoved =>
executorRemovedToJson(executorRemoved)
case logStart: SparkListenerLogStart =>
logStartToJson(logStart)
case metricsUpdate: SparkListenerExecutorMetricsUpdate =>
executorMetricsUpdateToJson(metricsUpdate)
case stageExecutorMetrics: SparkListenerStageExecutorMetrics =>
stageExecutorMetricsToJson(stageExecutorMetrics)
case blockUpdate: SparkListenerBlockUpdated =>
blockUpdateToJson(blockUpdate)
case resourceProfileAdded: SparkListenerResourceProfileAdded =>
resourceProfileAddedToJson(resourceProfileAdded)
case _ => parse(mapper.writeValueAsString(event))
}
}
def stageSubmittedToJson(stageSubmitted: SparkListenerStageSubmitted): JValue = {
val stageInfo = stageInfoToJson(stageSubmitted.stageInfo)
val properties = propertiesToJson(stageSubmitted.properties)
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.stageSubmitted) ~
("Stage Info" -> stageInfo) ~
("Properties" -> properties)
}
def stageCompletedToJson(stageCompleted: SparkListenerStageCompleted): JValue = {
val stageInfo = stageInfoToJson(stageCompleted.stageInfo)
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.stageCompleted) ~
("Stage Info" -> stageInfo)
}
def taskStartToJson(taskStart: SparkListenerTaskStart): JValue = {
val taskInfo = taskStart.taskInfo
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.taskStart) ~
("Stage ID" -> taskStart.stageId) ~
("Stage Attempt ID" -> taskStart.stageAttemptId) ~
("Task Info" -> taskInfoToJson(taskInfo))
}
def taskGettingResultToJson(taskGettingResult: SparkListenerTaskGettingResult): JValue = {
val taskInfo = taskGettingResult.taskInfo
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.taskGettingResult) ~
("Task Info" -> taskInfoToJson(taskInfo))
}
def taskEndToJson(taskEnd: SparkListenerTaskEnd): JValue = {
val taskEndReason = taskEndReasonToJson(taskEnd.reason)
val taskInfo = taskEnd.taskInfo
val executorMetrics = taskEnd.taskExecutorMetrics
val taskMetrics = taskEnd.taskMetrics
val taskMetricsJson = if (taskMetrics != null) taskMetricsToJson(taskMetrics) else JNothing
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.taskEnd) ~
("Stage ID" -> taskEnd.stageId) ~
("Stage Attempt ID" -> taskEnd.stageAttemptId) ~
("Task Type" -> taskEnd.taskType) ~
("Task End Reason" -> taskEndReason) ~
("Task Info" -> taskInfoToJson(taskInfo)) ~
("Task Executor Metrics" -> executorMetricsToJson(executorMetrics)) ~
("Task Metrics" -> taskMetricsJson)
}
def jobStartToJson(jobStart: SparkListenerJobStart): JValue = {
val properties = propertiesToJson(jobStart.properties)
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.jobStart) ~
("Job ID" -> jobStart.jobId) ~
("Submission Time" -> jobStart.time) ~
("Stage Infos" -> jobStart.stageInfos.map(stageInfoToJson)) ~ // Added in Spark 1.2.0
("Stage IDs" -> jobStart.stageIds) ~
("Properties" -> properties)
}
def jobEndToJson(jobEnd: SparkListenerJobEnd): JValue = {
val jobResult = jobResultToJson(jobEnd.jobResult)
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.jobEnd) ~
("Job ID" -> jobEnd.jobId) ~
("Completion Time" -> jobEnd.time) ~
("Job Result" -> jobResult)
}
def environmentUpdateToJson(environmentUpdate: SparkListenerEnvironmentUpdate): JValue = {
val environmentDetails = environmentUpdate.environmentDetails
val jvmInformation = mapToJson(environmentDetails("JVM Information").toMap)
val sparkProperties = mapToJson(environmentDetails("Spark Properties").toMap)
val hadoopProperties = mapToJson(environmentDetails("Hadoop Properties").toMap)
val systemProperties = mapToJson(environmentDetails("System Properties").toMap)
val classpathEntries = mapToJson(environmentDetails("Classpath Entries").toMap)
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.environmentUpdate) ~
("JVM Information" -> jvmInformation) ~
("Spark Properties" -> sparkProperties) ~
("Hadoop Properties" -> hadoopProperties) ~
("System Properties" -> systemProperties) ~
("Classpath Entries" -> classpathEntries)
}
def blockManagerAddedToJson(blockManagerAdded: SparkListenerBlockManagerAdded): JValue = {
val blockManagerId = blockManagerIdToJson(blockManagerAdded.blockManagerId)
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.blockManagerAdded) ~
("Block Manager ID" -> blockManagerId) ~
("Maximum Memory" -> blockManagerAdded.maxMem) ~
("Timestamp" -> blockManagerAdded.time) ~
("Maximum Onheap Memory" -> blockManagerAdded.maxOnHeapMem) ~
("Maximum Offheap Memory" -> blockManagerAdded.maxOffHeapMem)
}
def blockManagerRemovedToJson(blockManagerRemoved: SparkListenerBlockManagerRemoved): JValue = {
val blockManagerId = blockManagerIdToJson(blockManagerRemoved.blockManagerId)
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.blockManagerRemoved) ~
("Block Manager ID" -> blockManagerId) ~
("Timestamp" -> blockManagerRemoved.time)
}
def unpersistRDDToJson(unpersistRDD: SparkListenerUnpersistRDD): JValue = {
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.unpersistRDD) ~
("RDD ID" -> unpersistRDD.rddId)
}
def applicationStartToJson(applicationStart: SparkListenerApplicationStart): JValue = {
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.applicationStart) ~
("App Name" -> applicationStart.appName) ~
("App ID" -> applicationStart.appId.map(JString(_)).getOrElse(JNothing)) ~
("Timestamp" -> applicationStart.time) ~
("User" -> applicationStart.sparkUser) ~
("App Attempt ID" -> applicationStart.appAttemptId.map(JString(_)).getOrElse(JNothing)) ~
("Driver Logs" -> applicationStart.driverLogs.map(mapToJson).getOrElse(JNothing)) ~
("Driver Attributes" -> applicationStart.driverAttributes.map(mapToJson).getOrElse(JNothing))
}
def applicationEndToJson(applicationEnd: SparkListenerApplicationEnd): JValue = {
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.applicationEnd) ~
("Timestamp" -> applicationEnd.time)
}
def resourceProfileAddedToJson(profileAdded: SparkListenerResourceProfileAdded): JValue = {
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.resourceProfileAdded) ~
("Resource Profile Id" -> profileAdded.resourceProfile.id) ~
("Executor Resource Requests" ->
executorResourceRequestMapToJson(profileAdded.resourceProfile.executorResources)) ~
("Task Resource Requests" ->
taskResourceRequestMapToJson(profileAdded.resourceProfile.taskResources))
}
def executorAddedToJson(executorAdded: SparkListenerExecutorAdded): JValue = {
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.executorAdded) ~
("Timestamp" -> executorAdded.time) ~
("Executor ID" -> executorAdded.executorId) ~
("Executor Info" -> executorInfoToJson(executorAdded.executorInfo))
}
def executorRemovedToJson(executorRemoved: SparkListenerExecutorRemoved): JValue = {
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.executorRemoved) ~
("Timestamp" -> executorRemoved.time) ~
("Executor ID" -> executorRemoved.executorId) ~
("Removed Reason" -> executorRemoved.reason)
}
def logStartToJson(logStart: SparkListenerLogStart): JValue = {
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.logStart) ~
("Spark Version" -> SPARK_VERSION)
}
def executorMetricsUpdateToJson(metricsUpdate: SparkListenerExecutorMetricsUpdate): JValue = {
val execId = metricsUpdate.execId
val accumUpdates = metricsUpdate.accumUpdates
val executorUpdates = metricsUpdate.executorUpdates
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.metricsUpdate) ~
("Executor ID" -> execId) ~
("Metrics Updated" -> accumUpdates.map { case (taskId, stageId, stageAttemptId, updates) =>
("Task ID" -> taskId) ~
("Stage ID" -> stageId) ~
("Stage Attempt ID" -> stageAttemptId) ~
("Accumulator Updates" -> JArray(updates.map(accumulableInfoToJson).toList))
}) ~
("Executor Metrics Updated" -> executorUpdates.map {
case ((stageId, stageAttemptId), metrics) =>
("Stage ID" -> stageId) ~
("Stage Attempt ID" -> stageAttemptId) ~
("Executor Metrics" -> executorMetricsToJson(metrics))
})
}
def stageExecutorMetricsToJson(metrics: SparkListenerStageExecutorMetrics): JValue = {
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.stageExecutorMetrics) ~
("Executor ID" -> metrics.execId) ~
("Stage ID" -> metrics.stageId) ~
("Stage Attempt ID" -> metrics.stageAttemptId) ~
("Executor Metrics" -> executorMetricsToJson(metrics.executorMetrics))
}
def blockUpdateToJson(blockUpdate: SparkListenerBlockUpdated): JValue = {
val blockUpdatedInfo = blockUpdatedInfoToJson(blockUpdate.blockUpdatedInfo)
("Event" -> SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES.blockUpdate) ~
("Block Updated Info" -> blockUpdatedInfo)
}
/** ------------------------------------------------------------------- *
* JSON serialization methods for classes SparkListenerEvents depend on |
* -------------------------------------------------------------------- */
def stageInfoToJson(stageInfo: StageInfo): JValue = {
val rddInfo = JArray(stageInfo.rddInfos.map(rddInfoToJson).toList)
val parentIds = JArray(stageInfo.parentIds.map(JInt(_)).toList)
val submissionTime = stageInfo.submissionTime.map(JInt(_)).getOrElse(JNothing)
val completionTime = stageInfo.completionTime.map(JInt(_)).getOrElse(JNothing)
val failureReason = stageInfo.failureReason.map(JString(_)).getOrElse(JNothing)
("Stage ID" -> stageInfo.stageId) ~
("Stage Attempt ID" -> stageInfo.attemptNumber) ~
("Stage Name" -> stageInfo.name) ~
("Number of Tasks" -> stageInfo.numTasks) ~
("RDD Info" -> rddInfo) ~
("Parent IDs" -> parentIds) ~
("Details" -> stageInfo.details) ~
("Submission Time" -> submissionTime) ~
("Completion Time" -> completionTime) ~
("Failure Reason" -> failureReason) ~
("Accumulables" -> accumulablesToJson(stageInfo.accumulables.values)) ~
("Resource Profile Id" -> stageInfo.resourceProfileId)
}
def taskInfoToJson(taskInfo: TaskInfo): JValue = {
("Task ID" -> taskInfo.taskId) ~
("Index" -> taskInfo.index) ~
("Attempt" -> taskInfo.attemptNumber) ~
("Launch Time" -> taskInfo.launchTime) ~
("Executor ID" -> taskInfo.executorId) ~
("Host" -> taskInfo.host) ~
("Locality" -> taskInfo.taskLocality.toString) ~
("Speculative" -> taskInfo.speculative) ~
("Getting Result Time" -> taskInfo.gettingResultTime) ~
("Finish Time" -> taskInfo.finishTime) ~
("Failed" -> taskInfo.failed) ~
("Killed" -> taskInfo.killed) ~
("Accumulables" -> accumulablesToJson(taskInfo.accumulables))
}
private lazy val accumulableExcludeList = Set("internal.metrics.updatedBlockStatuses")
def accumulablesToJson(accumulables: Iterable[AccumulableInfo]): JArray = {
JArray(accumulables
.filterNot(_.name.exists(accumulableExcludeList.contains))
.toList.sortBy(_.id).map(accumulableInfoToJson))
}
def accumulableInfoToJson(accumulableInfo: AccumulableInfo): JValue = {
val name = accumulableInfo.name
("ID" -> accumulableInfo.id) ~
("Name" -> name) ~
("Update" -> accumulableInfo.update.map { v => accumValueToJson(name, v) }) ~
("Value" -> accumulableInfo.value.map { v => accumValueToJson(name, v) }) ~
("Internal" -> accumulableInfo.internal) ~
("Count Failed Values" -> accumulableInfo.countFailedValues) ~
("Metadata" -> accumulableInfo.metadata)
}
/**
* Serialize the value of an accumulator to JSON.
*
* For accumulators representing internal task metrics, this looks up the relevant
* [[AccumulatorParam]] to serialize the value accordingly. For all other accumulators,
* this will simply serialize the value as a string.
*
* The behavior here must match that of [[accumValueFromJson]]. Exposed for testing.
*/
private[util] def accumValueToJson(name: Option[String], value: Any): JValue = {
if (name.exists(_.startsWith(InternalAccumulator.METRICS_PREFIX))) {
value match {
case v: Int => JInt(v)
case v: Long => JInt(v)
// We only have 3 kind of internal accumulator types, so if it's not int or long, it must be
// the blocks accumulator, whose type is `java.util.List[(BlockId, BlockStatus)]`
case v: java.util.List[_] =>
JArray(v.asScala.toList.flatMap {
case (id: BlockId, status: BlockStatus) =>
Some(
("Block ID" -> id.toString) ~
("Status" -> blockStatusToJson(status))
)
case _ =>
// Ignore unsupported types. A user may put `METRICS_PREFIX` in the name. We should
// not crash.
None
})
case _ =>
// Ignore unsupported types. A user may put `METRICS_PREFIX` in the name. We should not
// crash.
JNothing
}
} else {
// For all external accumulators, just use strings
JString(value.toString)
}
}
def taskMetricsToJson(taskMetrics: TaskMetrics): JValue = {
val shuffleReadMetrics: JValue =
("Remote Blocks Fetched" -> taskMetrics.shuffleReadMetrics.remoteBlocksFetched) ~
("Local Blocks Fetched" -> taskMetrics.shuffleReadMetrics.localBlocksFetched) ~
("Fetch Wait Time" -> taskMetrics.shuffleReadMetrics.fetchWaitTime) ~
("Remote Bytes Read" -> taskMetrics.shuffleReadMetrics.remoteBytesRead) ~
("Remote Bytes Read To Disk" -> taskMetrics.shuffleReadMetrics.remoteBytesReadToDisk) ~
("Local Bytes Read" -> taskMetrics.shuffleReadMetrics.localBytesRead) ~
("Total Records Read" -> taskMetrics.shuffleReadMetrics.recordsRead)
val shuffleWriteMetrics: JValue =
("Shuffle Bytes Written" -> taskMetrics.shuffleWriteMetrics.bytesWritten) ~
("Shuffle Write Time" -> taskMetrics.shuffleWriteMetrics.writeTime) ~
("Shuffle Records Written" -> taskMetrics.shuffleWriteMetrics.recordsWritten)
val inputMetrics: JValue =
("Bytes Read" -> taskMetrics.inputMetrics.bytesRead) ~
("Records Read" -> taskMetrics.inputMetrics.recordsRead)
val outputMetrics: JValue =
("Bytes Written" -> taskMetrics.outputMetrics.bytesWritten) ~
("Records Written" -> taskMetrics.outputMetrics.recordsWritten)
val updatedBlocks =
JArray(taskMetrics.updatedBlockStatuses.toList.map { case (id, status) =>
("Block ID" -> id.toString) ~
("Status" -> blockStatusToJson(status))
})
("Executor Deserialize Time" -> taskMetrics.executorDeserializeTime) ~
("Executor Deserialize CPU Time" -> taskMetrics.executorDeserializeCpuTime) ~
("Executor Run Time" -> taskMetrics.executorRunTime) ~
("Executor CPU Time" -> taskMetrics.executorCpuTime) ~
("Peak Execution Memory" -> taskMetrics.peakExecutionMemory) ~
("Result Size" -> taskMetrics.resultSize) ~
("JVM GC Time" -> taskMetrics.jvmGCTime) ~
("Result Serialization Time" -> taskMetrics.resultSerializationTime) ~
("Memory Bytes Spilled" -> taskMetrics.memoryBytesSpilled) ~
("Disk Bytes Spilled" -> taskMetrics.diskBytesSpilled) ~
("Shuffle Read Metrics" -> shuffleReadMetrics) ~
("Shuffle Write Metrics" -> shuffleWriteMetrics) ~
("Input Metrics" -> inputMetrics) ~
("Output Metrics" -> outputMetrics) ~
("Updated Blocks" -> updatedBlocks)
}
/** Convert executor metrics to JSON. */
def executorMetricsToJson(executorMetrics: ExecutorMetrics): JValue = {
val metrics = ExecutorMetricType.metricToOffset.map { case (m, _) =>
JField(m, executorMetrics.getMetricValue(m))
}
JObject(metrics.toSeq: _*)
}
def taskEndReasonToJson(taskEndReason: TaskEndReason): JValue = {
val reason = Utils.getFormattedClassName(taskEndReason)
val json: JObject = taskEndReason match {
case fetchFailed: FetchFailed =>
val blockManagerAddress = Option(fetchFailed.bmAddress).
map(blockManagerIdToJson).getOrElse(JNothing)
("Block Manager Address" -> blockManagerAddress) ~
("Shuffle ID" -> fetchFailed.shuffleId) ~
("Map ID" -> fetchFailed.mapId) ~
("Map Index" -> fetchFailed.mapIndex) ~
("Reduce ID" -> fetchFailed.reduceId) ~
("Message" -> fetchFailed.message)
case exceptionFailure: ExceptionFailure =>
val stackTrace = stackTraceToJson(exceptionFailure.stackTrace)
val accumUpdates = accumulablesToJson(exceptionFailure.accumUpdates)
("Class Name" -> exceptionFailure.className) ~
("Description" -> exceptionFailure.description) ~
("Stack Trace" -> stackTrace) ~
("Full Stack Trace" -> exceptionFailure.fullStackTrace) ~
("Accumulator Updates" -> accumUpdates)
case taskCommitDenied: TaskCommitDenied =>
("Job ID" -> taskCommitDenied.jobID) ~
("Partition ID" -> taskCommitDenied.partitionID) ~
("Attempt Number" -> taskCommitDenied.attemptNumber)
case ExecutorLostFailure(executorId, exitCausedByApp, reason) =>
("Executor ID" -> executorId) ~
("Exit Caused By App" -> exitCausedByApp) ~
("Loss Reason" -> reason.map(_.toString))
case taskKilled: TaskKilled =>
val accumUpdates = JArray(taskKilled.accumUpdates.map(accumulableInfoToJson).toList)
("Kill Reason" -> taskKilled.reason) ~
("Accumulator Updates" -> accumUpdates)
case _ => emptyJson
}
("Reason" -> reason) ~ json
}
def blockManagerIdToJson(blockManagerId: BlockManagerId): JValue = {
("Executor ID" -> blockManagerId.executorId) ~
("Host" -> blockManagerId.host) ~
("Port" -> blockManagerId.port)
}
def jobResultToJson(jobResult: JobResult): JValue = {
val result = Utils.getFormattedClassName(jobResult)
val json = jobResult match {
case JobSucceeded => emptyJson
case jobFailed: JobFailed =>
JObject("Exception" -> exceptionToJson(jobFailed.exception))
}
("Result" -> result) ~ json
}
def rddInfoToJson(rddInfo: RDDInfo): JValue = {
val storageLevel = storageLevelToJson(rddInfo.storageLevel)
val parentIds = JArray(rddInfo.parentIds.map(JInt(_)).toList)
("RDD ID" -> rddInfo.id) ~
("Name" -> rddInfo.name) ~
("Scope" -> rddInfo.scope.map(_.toJson)) ~
("Callsite" -> rddInfo.callSite) ~
("Parent IDs" -> parentIds) ~
("Storage Level" -> storageLevel) ~
("Barrier" -> rddInfo.isBarrier) ~
("Number of Partitions" -> rddInfo.numPartitions) ~
("Number of Cached Partitions" -> rddInfo.numCachedPartitions) ~
("Memory Size" -> rddInfo.memSize) ~
("Disk Size" -> rddInfo.diskSize)
}
def storageLevelToJson(storageLevel: StorageLevel): JValue = {
("Use Disk" -> storageLevel.useDisk) ~
("Use Memory" -> storageLevel.useMemory) ~
("Deserialized" -> storageLevel.deserialized) ~
("Replication" -> storageLevel.replication)
}
def blockStatusToJson(blockStatus: BlockStatus): JValue = {
val storageLevel = storageLevelToJson(blockStatus.storageLevel)
("Storage Level" -> storageLevel) ~
("Memory Size" -> blockStatus.memSize) ~
("Disk Size" -> blockStatus.diskSize)
}
def executorInfoToJson(executorInfo: ExecutorInfo): JValue = {
("Host" -> executorInfo.executorHost) ~
("Total Cores" -> executorInfo.totalCores) ~
("Log Urls" -> mapToJson(executorInfo.logUrlMap)) ~
("Attributes" -> mapToJson(executorInfo.attributes)) ~
("Resources" -> resourcesMapToJson(executorInfo.resourcesInfo)) ~
("Resource Profile Id" -> executorInfo.resourceProfileId)
}
def resourcesMapToJson(m: Map[String, ResourceInformation]): JValue = {
val jsonFields = m.map {
case (k, v) => JField(k, v.toJson)
}
JObject(jsonFields.toList)
}
def blockUpdatedInfoToJson(blockUpdatedInfo: BlockUpdatedInfo): JValue = {
("Block Manager ID" -> blockManagerIdToJson(blockUpdatedInfo.blockManagerId)) ~
("Block ID" -> blockUpdatedInfo.blockId.toString) ~
("Storage Level" -> storageLevelToJson(blockUpdatedInfo.storageLevel)) ~
("Memory Size" -> blockUpdatedInfo.memSize) ~
("Disk Size" -> blockUpdatedInfo.diskSize)
}
def executorResourceRequestToJson(execReq: ExecutorResourceRequest): JValue = {
("Resource Name" -> execReq.resourceName) ~
("Amount" -> execReq.amount) ~
("Discovery Script" -> execReq.discoveryScript) ~
("Vendor" -> execReq.vendor)
}
def executorResourceRequestMapToJson(m: Map[String, ExecutorResourceRequest]): JValue = {
val jsonFields = m.map {
case (k, execReq) =>
JField(k, executorResourceRequestToJson(execReq))
}
JObject(jsonFields.toList)
}
def taskResourceRequestToJson(taskReq: TaskResourceRequest): JValue = {
("Resource Name" -> taskReq.resourceName) ~
("Amount" -> taskReq.amount)
}
def taskResourceRequestMapToJson(m: Map[String, TaskResourceRequest]): JValue = {
val jsonFields = m.map {
case (k, taskReq) =>
JField(k, taskResourceRequestToJson(taskReq))
}
JObject(jsonFields.toList)
}
/** ------------------------------ *
* Util JSON serialization methods |
* ------------------------------- */
def mapToJson(m: Map[String, String]): JValue = {
val jsonFields = m.map { case (k, v) => JField(k, JString(v)) }
JObject(jsonFields.toList)
}
def propertiesToJson(properties: Properties): JValue = {
Option(properties).map { p =>
mapToJson(p.asScala)
}.getOrElse(JNothing)
}
def UUIDToJson(id: UUID): JValue = {
("Least Significant Bits" -> id.getLeastSignificantBits) ~
("Most Significant Bits" -> id.getMostSignificantBits)
}
def stackTraceToJson(stackTrace: Array[StackTraceElement]): JValue = {
JArray(stackTrace.map { case line =>
("Declaring Class" -> line.getClassName) ~
("Method Name" -> line.getMethodName) ~
("File Name" -> line.getFileName) ~
("Line Number" -> line.getLineNumber)
}.toList)
}
def exceptionToJson(exception: Exception): JValue = {
("Message" -> exception.getMessage) ~
("Stack Trace" -> stackTraceToJson(exception.getStackTrace))
}
/** --------------------------------------------------- *
* JSON deserialization methods for SparkListenerEvents |
* ---------------------------------------------------- */
private object SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES {
val stageSubmitted = Utils.getFormattedClassName(SparkListenerStageSubmitted)
val stageCompleted = Utils.getFormattedClassName(SparkListenerStageCompleted)
val taskStart = Utils.getFormattedClassName(SparkListenerTaskStart)
val taskGettingResult = Utils.getFormattedClassName(SparkListenerTaskGettingResult)
val taskEnd = Utils.getFormattedClassName(SparkListenerTaskEnd)
val jobStart = Utils.getFormattedClassName(SparkListenerJobStart)
val jobEnd = Utils.getFormattedClassName(SparkListenerJobEnd)
val environmentUpdate = Utils.getFormattedClassName(SparkListenerEnvironmentUpdate)
val blockManagerAdded = Utils.getFormattedClassName(SparkListenerBlockManagerAdded)
val blockManagerRemoved = Utils.getFormattedClassName(SparkListenerBlockManagerRemoved)
val unpersistRDD = Utils.getFormattedClassName(SparkListenerUnpersistRDD)
val applicationStart = Utils.getFormattedClassName(SparkListenerApplicationStart)
val applicationEnd = Utils.getFormattedClassName(SparkListenerApplicationEnd)
val executorAdded = Utils.getFormattedClassName(SparkListenerExecutorAdded)
val executorRemoved = Utils.getFormattedClassName(SparkListenerExecutorRemoved)
val logStart = Utils.getFormattedClassName(SparkListenerLogStart)
val metricsUpdate = Utils.getFormattedClassName(SparkListenerExecutorMetricsUpdate)
val stageExecutorMetrics = Utils.getFormattedClassName(SparkListenerStageExecutorMetrics)
val blockUpdate = Utils.getFormattedClassName(SparkListenerBlockUpdated)
val resourceProfileAdded = Utils.getFormattedClassName(SparkListenerResourceProfileAdded)
}
def sparkEventFromJson(json: JValue): SparkListenerEvent = {
import SPARK_LISTENER_EVENT_FORMATTED_CLASS_NAMES._
(json \\ "Event").extract[String] match {
case `stageSubmitted` => stageSubmittedFromJson(json)
case `stageCompleted` => stageCompletedFromJson(json)
case `taskStart` => taskStartFromJson(json)
case `taskGettingResult` => taskGettingResultFromJson(json)
case `taskEnd` => taskEndFromJson(json)
case `jobStart` => jobStartFromJson(json)
case `jobEnd` => jobEndFromJson(json)
case `environmentUpdate` => environmentUpdateFromJson(json)
case `blockManagerAdded` => blockManagerAddedFromJson(json)
case `blockManagerRemoved` => blockManagerRemovedFromJson(json)
case `unpersistRDD` => unpersistRDDFromJson(json)
case `applicationStart` => applicationStartFromJson(json)
case `applicationEnd` => applicationEndFromJson(json)
case `executorAdded` => executorAddedFromJson(json)
case `executorRemoved` => executorRemovedFromJson(json)
case `logStart` => logStartFromJson(json)
case `metricsUpdate` => executorMetricsUpdateFromJson(json)
case `stageExecutorMetrics` => stageExecutorMetricsFromJson(json)
case `blockUpdate` => blockUpdateFromJson(json)
case `resourceProfileAdded` => resourceProfileAddedFromJson(json)
case other => mapper.readValue(compact(render(json)), Utils.classForName(other))
.asInstanceOf[SparkListenerEvent]
}
}
def stageSubmittedFromJson(json: JValue): SparkListenerStageSubmitted = {
val stageInfo = stageInfoFromJson(json \\ "Stage Info")
val properties = propertiesFromJson(json \\ "Properties")
SparkListenerStageSubmitted(stageInfo, properties)
}
def stageCompletedFromJson(json: JValue): SparkListenerStageCompleted = {
val stageInfo = stageInfoFromJson(json \\ "Stage Info")
SparkListenerStageCompleted(stageInfo)
}
def taskStartFromJson(json: JValue): SparkListenerTaskStart = {
val stageId = (json \\ "Stage ID").extract[Int]
val stageAttemptId =
jsonOption(json \\ "Stage Attempt ID").map(_.extract[Int]).getOrElse(0)
val taskInfo = taskInfoFromJson(json \\ "Task Info")
SparkListenerTaskStart(stageId, stageAttemptId, taskInfo)
}
def taskGettingResultFromJson(json: JValue): SparkListenerTaskGettingResult = {
val taskInfo = taskInfoFromJson(json \\ "Task Info")
SparkListenerTaskGettingResult(taskInfo)
}
/** Extract the executor metrics from JSON. */
def executorMetricsFromJson(json: JValue): ExecutorMetrics = {
val metrics =
ExecutorMetricType.metricToOffset.map { case (metric, _) =>
metric -> jsonOption(json \\ metric).map(_.extract[Long]).getOrElse(0L)
}
new ExecutorMetrics(metrics.toMap)
}
def taskEndFromJson(json: JValue): SparkListenerTaskEnd = {
val stageId = (json \\ "Stage ID").extract[Int]
val stageAttemptId =
jsonOption(json \\ "Stage Attempt ID").map(_.extract[Int]).getOrElse(0)
val taskType = (json \\ "Task Type").extract[String]
val taskEndReason = taskEndReasonFromJson(json \\ "Task End Reason")
val taskInfo = taskInfoFromJson(json \\ "Task Info")
val executorMetrics = executorMetricsFromJson(json \\ "Task Executor Metrics")
val taskMetrics = taskMetricsFromJson(json \\ "Task Metrics")
SparkListenerTaskEnd(stageId, stageAttemptId, taskType, taskEndReason, taskInfo,
executorMetrics, taskMetrics)
}
def jobStartFromJson(json: JValue): SparkListenerJobStart = {
val jobId = (json \\ "Job ID").extract[Int]
val submissionTime =
jsonOption(json \\ "Submission Time").map(_.extract[Long]).getOrElse(-1L)
val stageIds = (json \\ "Stage IDs").extract[List[JValue]].map(_.extract[Int])
val properties = propertiesFromJson(json \\ "Properties")
// The "Stage Infos" field was added in Spark 1.2.0
val stageInfos = jsonOption(json \\ "Stage Infos")
.map(_.extract[Seq[JValue]].map(stageInfoFromJson)).getOrElse {
stageIds.map { id =>
new StageInfo(id, 0, "unknown", 0, Seq.empty, Seq.empty, "unknown",
resourceProfileId = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
}
}
SparkListenerJobStart(jobId, submissionTime, stageInfos, properties)
}
def jobEndFromJson(json: JValue): SparkListenerJobEnd = {
val jobId = (json \\ "Job ID").extract[Int]
val completionTime =
jsonOption(json \\ "Completion Time").map(_.extract[Long]).getOrElse(-1L)
val jobResult = jobResultFromJson(json \\ "Job Result")
SparkListenerJobEnd(jobId, completionTime, jobResult)
}
def resourceProfileAddedFromJson(json: JValue): SparkListenerResourceProfileAdded = {
val profId = (json \\ "Resource Profile Id").extract[Int]
val executorReqs = executorResourceRequestMapFromJson(json \\ "Executor Resource Requests")
val taskReqs = taskResourceRequestMapFromJson(json \\ "Task Resource Requests")
val rp = new ResourceProfile(executorReqs.toMap, taskReqs.toMap)
rp.setResourceProfileId(profId)
SparkListenerResourceProfileAdded(rp)
}
def executorResourceRequestFromJson(json: JValue): ExecutorResourceRequest = {
val rName = (json \\ "Resource Name").extract[String]
val amount = (json \\ "Amount").extract[Int]
val discoveryScript = (json \\ "Discovery Script").extract[String]
val vendor = (json \\ "Vendor").extract[String]
new ExecutorResourceRequest(rName, amount, discoveryScript, vendor)
}
def taskResourceRequestFromJson(json: JValue): TaskResourceRequest = {
val rName = (json \\ "Resource Name").extract[String]
val amount = (json \\ "Amount").extract[Int]
new TaskResourceRequest(rName, amount)
}
def taskResourceRequestMapFromJson(json: JValue): Map[String, TaskResourceRequest] = {
val jsonFields = json.asInstanceOf[JObject].obj
jsonFields.map { case JField(k, v) =>
val req = taskResourceRequestFromJson(v)
(k, req)
}.toMap
}
def executorResourceRequestMapFromJson(json: JValue): Map[String, ExecutorResourceRequest] = {
val jsonFields = json.asInstanceOf[JObject].obj
jsonFields.map { case JField(k, v) =>
val req = executorResourceRequestFromJson(v)
(k, req)
}.toMap
}
def environmentUpdateFromJson(json: JValue): SparkListenerEnvironmentUpdate = {
// For compatible with previous event logs
val hadoopProperties = jsonOption(json \\ "Hadoop Properties").map(mapFromJson(_).toSeq)
.getOrElse(Seq.empty)
val environmentDetails = Map[String, Seq[(String, String)]](
"JVM Information" -> mapFromJson(json \\ "JVM Information").toSeq,
"Spark Properties" -> mapFromJson(json \\ "Spark Properties").toSeq,
"Hadoop Properties" -> hadoopProperties,
"System Properties" -> mapFromJson(json \\ "System Properties").toSeq,
"Classpath Entries" -> mapFromJson(json \\ "Classpath Entries").toSeq)
SparkListenerEnvironmentUpdate(environmentDetails)
}
def blockManagerAddedFromJson(json: JValue): SparkListenerBlockManagerAdded = {
val blockManagerId = blockManagerIdFromJson(json \\ "Block Manager ID")
val maxMem = (json \\ "Maximum Memory").extract[Long]
val time = jsonOption(json \\ "Timestamp").map(_.extract[Long]).getOrElse(-1L)
val maxOnHeapMem = jsonOption(json \\ "Maximum Onheap Memory").map(_.extract[Long])
val maxOffHeapMem = jsonOption(json \\ "Maximum Offheap Memory").map(_.extract[Long])
SparkListenerBlockManagerAdded(time, blockManagerId, maxMem, maxOnHeapMem, maxOffHeapMem)
}
def blockManagerRemovedFromJson(json: JValue): SparkListenerBlockManagerRemoved = {
val blockManagerId = blockManagerIdFromJson(json \\ "Block Manager ID")
val time = jsonOption(json \\ "Timestamp").map(_.extract[Long]).getOrElse(-1L)
SparkListenerBlockManagerRemoved(time, blockManagerId)
}
def unpersistRDDFromJson(json: JValue): SparkListenerUnpersistRDD = {
SparkListenerUnpersistRDD((json \\ "RDD ID").extract[Int])
}
def applicationStartFromJson(json: JValue): SparkListenerApplicationStart = {
val appName = (json \\ "App Name").extract[String]
val appId = jsonOption(json \\ "App ID").map(_.extract[String])
val time = (json \\ "Timestamp").extract[Long]
val sparkUser = (json \\ "User").extract[String]
val appAttemptId = jsonOption(json \\ "App Attempt ID").map(_.extract[String])
val driverLogs = jsonOption(json \\ "Driver Logs").map(mapFromJson)
val driverAttributes = jsonOption(json \\ "Driver Attributes").map(mapFromJson)
SparkListenerApplicationStart(appName, appId, time, sparkUser, appAttemptId, driverLogs,
driverAttributes)
}
def applicationEndFromJson(json: JValue): SparkListenerApplicationEnd = {
SparkListenerApplicationEnd((json \\ "Timestamp").extract[Long])
}
def executorAddedFromJson(json: JValue): SparkListenerExecutorAdded = {
val time = (json \\ "Timestamp").extract[Long]
val executorId = (json \\ "Executor ID").extract[String]
val executorInfo = executorInfoFromJson(json \\ "Executor Info")
SparkListenerExecutorAdded(time, executorId, executorInfo)
}
def executorRemovedFromJson(json: JValue): SparkListenerExecutorRemoved = {
val time = (json \\ "Timestamp").extract[Long]
val executorId = (json \\ "Executor ID").extract[String]
val reason = (json \\ "Removed Reason").extract[String]
SparkListenerExecutorRemoved(time, executorId, reason)
}
def logStartFromJson(json: JValue): SparkListenerLogStart = {
val sparkVersion = (json \\ "Spark Version").extract[String]
SparkListenerLogStart(sparkVersion)
}
def executorMetricsUpdateFromJson(json: JValue): SparkListenerExecutorMetricsUpdate = {
val execInfo = (json \\ "Executor ID").extract[String]
val accumUpdates = (json \\ "Metrics Updated").extract[List[JValue]].map { json =>
val taskId = (json \\ "Task ID").extract[Long]
val stageId = (json \\ "Stage ID").extract[Int]
val stageAttemptId = (json \\ "Stage Attempt ID").extract[Int]
val updates =
(json \\ "Accumulator Updates").extract[List[JValue]].map(accumulableInfoFromJson)
(taskId, stageId, stageAttemptId, updates)
}
val executorUpdates = (json \\ "Executor Metrics Updated") match {
case JNothing => Map.empty[(Int, Int), ExecutorMetrics]
case value: JValue => value.extract[List[JValue]].map { json =>
val stageId = (json \\ "Stage ID").extract[Int]
val stageAttemptId = (json \\ "Stage Attempt ID").extract[Int]
val executorMetrics = executorMetricsFromJson(json \\ "Executor Metrics")
((stageId, stageAttemptId) -> executorMetrics)
}.toMap
}
SparkListenerExecutorMetricsUpdate(execInfo, accumUpdates, executorUpdates)
}
def stageExecutorMetricsFromJson(json: JValue): SparkListenerStageExecutorMetrics = {
val execId = (json \\ "Executor ID").extract[String]
val stageId = (json \\ "Stage ID").extract[Int]
val stageAttemptId = (json \\ "Stage Attempt ID").extract[Int]
val executorMetrics = executorMetricsFromJson(json \\ "Executor Metrics")
SparkListenerStageExecutorMetrics(execId, stageId, stageAttemptId, executorMetrics)
}
def blockUpdateFromJson(json: JValue): SparkListenerBlockUpdated = {
val blockUpdatedInfo = blockUpdatedInfoFromJson(json \\ "Block Updated Info")
SparkListenerBlockUpdated(blockUpdatedInfo)
}
/** --------------------------------------------------------------------- *
* JSON deserialization methods for classes SparkListenerEvents depend on |
* ---------------------------------------------------------------------- */
def stageInfoFromJson(json: JValue): StageInfo = {
val stageId = (json \\ "Stage ID").extract[Int]
val attemptId = jsonOption(json \\ "Stage Attempt ID").map(_.extract[Int]).getOrElse(0)
val stageName = (json \\ "Stage Name").extract[String]
val numTasks = (json \\ "Number of Tasks").extract[Int]
val rddInfos = (json \\ "RDD Info").extract[List[JValue]].map(rddInfoFromJson)
val parentIds = jsonOption(json \\ "Parent IDs")
.map { l => l.extract[List[JValue]].map(_.extract[Int]) }
.getOrElse(Seq.empty)
val details = jsonOption(json \\ "Details").map(_.extract[String]).getOrElse("")
val submissionTime = jsonOption(json \\ "Submission Time").map(_.extract[Long])
val completionTime = jsonOption(json \\ "Completion Time").map(_.extract[Long])
val failureReason = jsonOption(json \\ "Failure Reason").map(_.extract[String])
val accumulatedValues = {
jsonOption(json \\ "Accumulables").map(_.extract[List[JValue]]) match {
case Some(values) => values.map(accumulableInfoFromJson)
case None => Seq.empty[AccumulableInfo]
}
}
val rpId = jsonOption(json \\ "Resource Profile Id").map(_.extract[Int])
val stageProf = rpId.getOrElse(ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
val stageInfo = new StageInfo(stageId, attemptId, stageName, numTasks, rddInfos,
parentIds, details, resourceProfileId = stageProf)
stageInfo.submissionTime = submissionTime
stageInfo.completionTime = completionTime
stageInfo.failureReason = failureReason
for (accInfo <- accumulatedValues) {
stageInfo.accumulables(accInfo.id) = accInfo
}
stageInfo
}
def taskInfoFromJson(json: JValue): TaskInfo = {
val taskId = (json \\ "Task ID").extract[Long]
val index = (json \\ "Index").extract[Int]
val attempt = jsonOption(json \\ "Attempt").map(_.extract[Int]).getOrElse(1)
val launchTime = (json \\ "Launch Time").extract[Long]
val executorId = (json \\ "Executor ID").extract[String].intern()
val host = (json \\ "Host").extract[String].intern()
val taskLocality = TaskLocality.withName((json \\ "Locality").extract[String])
val speculative = jsonOption(json \\ "Speculative").exists(_.extract[Boolean])
val gettingResultTime = (json \\ "Getting Result Time").extract[Long]
val finishTime = (json \\ "Finish Time").extract[Long]
val failed = (json \\ "Failed").extract[Boolean]
val killed = jsonOption(json \\ "Killed").exists(_.extract[Boolean])
val accumulables = jsonOption(json \\ "Accumulables").map(_.extract[Seq[JValue]]) match {
case Some(values) => values.map(accumulableInfoFromJson)
case None => Seq.empty[AccumulableInfo]
}
val taskInfo =
new TaskInfo(taskId, index, attempt, launchTime, executorId, host, taskLocality, speculative)
taskInfo.gettingResultTime = gettingResultTime
taskInfo.finishTime = finishTime
taskInfo.failed = failed
taskInfo.killed = killed
taskInfo.setAccumulables(accumulables)
taskInfo
}
def accumulableInfoFromJson(json: JValue): AccumulableInfo = {
val id = (json \\ "ID").extract[Long]
val name = jsonOption(json \\ "Name").map(_.extract[String])
val update = jsonOption(json \\ "Update").map { v => accumValueFromJson(name, v) }
val value = jsonOption(json \\ "Value").map { v => accumValueFromJson(name, v) }
val internal = jsonOption(json \\ "Internal").exists(_.extract[Boolean])
val countFailedValues =
jsonOption(json \\ "Count Failed Values").exists(_.extract[Boolean])
val metadata = jsonOption(json \\ "Metadata").map(_.extract[String])
new AccumulableInfo(id, name, update, value, internal, countFailedValues, metadata)
}
/**
* Deserialize the value of an accumulator from JSON.
*
* For accumulators representing internal task metrics, this looks up the relevant
* [[AccumulatorParam]] to deserialize the value accordingly. For all other
* accumulators, this will simply deserialize the value as a string.
*
* The behavior here must match that of [[accumValueToJson]]. Exposed for testing.
*/
private[util] def accumValueFromJson(name: Option[String], value: JValue): Any = {
if (name.exists(_.startsWith(InternalAccumulator.METRICS_PREFIX))) {
value match {
case JInt(v) => v.toLong
case JArray(v) =>
v.map { blockJson =>
val id = BlockId((blockJson \\ "Block ID").extract[String])
val status = blockStatusFromJson(blockJson \\ "Status")
(id, status)
}.asJava
case _ => throw new IllegalArgumentException(s"unexpected json value $value for " +
"accumulator " + name.get)
}
} else {
value.extract[String]
}
}
def taskMetricsFromJson(json: JValue): TaskMetrics = {
val metrics = TaskMetrics.empty
if (json == JNothing) {
return metrics
}
metrics.setExecutorDeserializeTime((json \\ "Executor Deserialize Time").extract[Long])
metrics.setExecutorDeserializeCpuTime((json \\ "Executor Deserialize CPU Time") match {
case JNothing => 0
case x => x.extract[Long]
})
metrics.setExecutorRunTime((json \\ "Executor Run Time").extract[Long])
metrics.setExecutorCpuTime((json \\ "Executor CPU Time") match {
case JNothing => 0
case x => x.extract[Long]
})
metrics.setPeakExecutionMemory((json \\ "Peak Execution Memory") match {
case JNothing => 0
case x => x.extract[Long]
})
metrics.setResultSize((json \\ "Result Size").extract[Long])
metrics.setJvmGCTime((json \\ "JVM GC Time").extract[Long])
metrics.setResultSerializationTime((json \\ "Result Serialization Time").extract[Long])
metrics.incMemoryBytesSpilled((json \\ "Memory Bytes Spilled").extract[Long])
metrics.incDiskBytesSpilled((json \\ "Disk Bytes Spilled").extract[Long])
// Shuffle read metrics
jsonOption(json \\ "Shuffle Read Metrics").foreach { readJson =>
val readMetrics = metrics.createTempShuffleReadMetrics()
readMetrics.incRemoteBlocksFetched((readJson \\ "Remote Blocks Fetched").extract[Int])
readMetrics.incLocalBlocksFetched((readJson \\ "Local Blocks Fetched").extract[Int])
readMetrics.incRemoteBytesRead((readJson \\ "Remote Bytes Read").extract[Long])
jsonOption(readJson \\ "Remote Bytes Read To Disk")
.foreach { v => readMetrics.incRemoteBytesReadToDisk(v.extract[Long])}
readMetrics.incLocalBytesRead(
jsonOption(readJson \\ "Local Bytes Read").map(_.extract[Long]).getOrElse(0L))
readMetrics.incFetchWaitTime((readJson \\ "Fetch Wait Time").extract[Long])
readMetrics.incRecordsRead(
jsonOption(readJson \\ "Total Records Read").map(_.extract[Long]).getOrElse(0L))
metrics.mergeShuffleReadMetrics()
}
// Shuffle write metrics
// TODO: Drop the redundant "Shuffle" since it's inconsistent with related classes.
jsonOption(json \\ "Shuffle Write Metrics").foreach { writeJson =>
val writeMetrics = metrics.shuffleWriteMetrics
writeMetrics.incBytesWritten((writeJson \\ "Shuffle Bytes Written").extract[Long])
writeMetrics.incRecordsWritten(
jsonOption(writeJson \\ "Shuffle Records Written").map(_.extract[Long]).getOrElse(0L))
writeMetrics.incWriteTime((writeJson \\ "Shuffle Write Time").extract[Long])
}
// Output metrics
jsonOption(json \\ "Output Metrics").foreach { outJson =>
val outputMetrics = metrics.outputMetrics
outputMetrics.setBytesWritten((outJson \\ "Bytes Written").extract[Long])
outputMetrics.setRecordsWritten(
jsonOption(outJson \\ "Records Written").map(_.extract[Long]).getOrElse(0L))
}
// Input metrics
jsonOption(json \\ "Input Metrics").foreach { inJson =>
val inputMetrics = metrics.inputMetrics
inputMetrics.incBytesRead((inJson \\ "Bytes Read").extract[Long])
inputMetrics.incRecordsRead(
jsonOption(inJson \\ "Records Read").map(_.extract[Long]).getOrElse(0L))
}
// Updated blocks
jsonOption(json \\ "Updated Blocks").foreach { blocksJson =>
metrics.setUpdatedBlockStatuses(blocksJson.extract[List[JValue]].map { blockJson =>
val id = BlockId((blockJson \\ "Block ID").extract[String])
val status = blockStatusFromJson(blockJson \\ "Status")
(id, status)
})
}
metrics
}
private object TASK_END_REASON_FORMATTED_CLASS_NAMES {
val success = Utils.getFormattedClassName(Success)
val resubmitted = Utils.getFormattedClassName(Resubmitted)
val fetchFailed = Utils.getFormattedClassName(FetchFailed)
val exceptionFailure = Utils.getFormattedClassName(ExceptionFailure)
val taskResultLost = Utils.getFormattedClassName(TaskResultLost)
val taskKilled = Utils.getFormattedClassName(TaskKilled)
val taskCommitDenied = Utils.getFormattedClassName(TaskCommitDenied)
val executorLostFailure = Utils.getFormattedClassName(ExecutorLostFailure)
val unknownReason = Utils.getFormattedClassName(UnknownReason)
}
def taskEndReasonFromJson(json: JValue): TaskEndReason = {
import TASK_END_REASON_FORMATTED_CLASS_NAMES._
(json \\ "Reason").extract[String] match {
case `success` => Success
case `resubmitted` => Resubmitted
case `fetchFailed` =>
val blockManagerAddress = blockManagerIdFromJson(json \\ "Block Manager Address")
val shuffleId = (json \\ "Shuffle ID").extract[Int]
val mapId = (json \\ "Map ID").extract[Long]
val mapIndex = json \\ "Map Index" match {
case JNothing =>
// Note, we use the invalid value Int.MinValue here to fill the map index for backward
// compatibility. Otherwise, the fetch failed event will be dropped when the history
// server loads the event log written by the Spark version before 3.0.
Int.MinValue
case x => x.extract[Int]
}
val reduceId = (json \\ "Reduce ID").extract[Int]
val message = jsonOption(json \\ "Message").map(_.extract[String])
new FetchFailed(blockManagerAddress, shuffleId, mapId, mapIndex, reduceId,
message.getOrElse("Unknown reason"))
case `exceptionFailure` =>
val className = (json \\ "Class Name").extract[String]
val description = (json \\ "Description").extract[String]
val stackTrace = stackTraceFromJson(json \\ "Stack Trace")
val fullStackTrace =
jsonOption(json \\ "Full Stack Trace").map(_.extract[String]).orNull
// Fallback on getting accumulator updates from TaskMetrics, which was logged in Spark 1.x
val accumUpdates = jsonOption(json \\ "Accumulator Updates")
.map(_.extract[List[JValue]].map(accumulableInfoFromJson))
.getOrElse(taskMetricsFromJson(json \\ "Metrics").accumulators().map(acc => {
acc.toInfo(Some(acc.value), None)
}))
ExceptionFailure(className, description, stackTrace, fullStackTrace, None, accumUpdates)
case `taskResultLost` => TaskResultLost
case `taskKilled` =>
val killReason = jsonOption(json \\ "Kill Reason")
.map(_.extract[String]).getOrElse("unknown reason")
val accumUpdates = jsonOption(json \\ "Accumulator Updates")
.map(_.extract[List[JValue]].map(accumulableInfoFromJson))
.getOrElse(Seq[AccumulableInfo]())
TaskKilled(killReason, accumUpdates)
case `taskCommitDenied` =>
// Unfortunately, the `TaskCommitDenied` message was introduced in 1.3.0 but the JSON
// de/serialization logic was not added until 1.5.1. To provide backward compatibility
// for reading those logs, we need to provide default values for all the fields.
val jobId = jsonOption(json \\ "Job ID").map(_.extract[Int]).getOrElse(-1)
val partitionId = jsonOption(json \\ "Partition ID").map(_.extract[Int]).getOrElse(-1)
val attemptNo = jsonOption(json \\ "Attempt Number").map(_.extract[Int]).getOrElse(-1)
TaskCommitDenied(jobId, partitionId, attemptNo)
case `executorLostFailure` =>
val exitCausedByApp = jsonOption(json \\ "Exit Caused By App").map(_.extract[Boolean])
val executorId = jsonOption(json \\ "Executor ID").map(_.extract[String])
val reason = jsonOption(json \\ "Loss Reason").map(_.extract[String])
ExecutorLostFailure(
executorId.getOrElse("Unknown"),
exitCausedByApp.getOrElse(true),
reason)
case `unknownReason` => UnknownReason
}
}
def blockManagerIdFromJson(json: JValue): BlockManagerId = {
// On metadata fetch fail, block manager ID can be null (SPARK-4471)
if (json == JNothing) {
return null
}
val executorId = (json \\ "Executor ID").extract[String].intern()
val host = (json \\ "Host").extract[String].intern()
val port = (json \\ "Port").extract[Int]
BlockManagerId(executorId, host, port)
}
private object JOB_RESULT_FORMATTED_CLASS_NAMES {
val jobSucceeded = Utils.getFormattedClassName(JobSucceeded)
val jobFailed = Utils.getFormattedClassName(JobFailed)
}
def jobResultFromJson(json: JValue): JobResult = {
import JOB_RESULT_FORMATTED_CLASS_NAMES._
(json \\ "Result").extract[String] match {
case `jobSucceeded` => JobSucceeded
case `jobFailed` =>
val exception = exceptionFromJson(json \\ "Exception")
new JobFailed(exception)
}
}
def rddInfoFromJson(json: JValue): RDDInfo = {
val rddId = (json \\ "RDD ID").extract[Int]
val name = (json \\ "Name").extract[String]
val scope = jsonOption(json \\ "Scope")
.map(_.extract[String])
.map(RDDOperationScope.fromJson)
val callsite = jsonOption(json \\ "Callsite").map(_.extract[String]).getOrElse("")
val parentIds = jsonOption(json \\ "Parent IDs")
.map { l => l.extract[List[JValue]].map(_.extract[Int]) }
.getOrElse(Seq.empty)
val storageLevel = storageLevelFromJson(json \\ "Storage Level")
val isBarrier = jsonOption(json \\ "Barrier").map(_.extract[Boolean]).getOrElse(false)
val numPartitions = (json \\ "Number of Partitions").extract[Int]
val numCachedPartitions = (json \\ "Number of Cached Partitions").extract[Int]
val memSize = (json \\ "Memory Size").extract[Long]
val diskSize = (json \\ "Disk Size").extract[Long]
val rddInfo =
new RDDInfo(rddId, name, numPartitions, storageLevel, isBarrier, parentIds, callsite, scope)
rddInfo.numCachedPartitions = numCachedPartitions
rddInfo.memSize = memSize
rddInfo.diskSize = diskSize
rddInfo
}
def storageLevelFromJson(json: JValue): StorageLevel = {
val useDisk = (json \\ "Use Disk").extract[Boolean]
val useMemory = (json \\ "Use Memory").extract[Boolean]
val deserialized = (json \\ "Deserialized").extract[Boolean]
val replication = (json \\ "Replication").extract[Int]
StorageLevel(useDisk, useMemory, deserialized, replication)
}
def blockStatusFromJson(json: JValue): BlockStatus = {
val storageLevel = storageLevelFromJson(json \\ "Storage Level")
val memorySize = (json \\ "Memory Size").extract[Long]
val diskSize = (json \\ "Disk Size").extract[Long]
BlockStatus(storageLevel, memorySize, diskSize)
}
def executorInfoFromJson(json: JValue): ExecutorInfo = {
val executorHost = (json \\ "Host").extract[String]
val totalCores = (json \\ "Total Cores").extract[Int]
val logUrls = mapFromJson(json \\ "Log Urls").toMap
val attributes = jsonOption(json \\ "Attributes") match {
case Some(attr) => mapFromJson(attr).toMap
case None => Map.empty[String, String]
}
val resources = jsonOption(json \\ "Resources") match {
case Some(resources) => resourcesMapFromJson(resources).toMap
case None => Map.empty[String, ResourceInformation]
}
val resourceProfileId = jsonOption(json \\ "Resource Profile Id") match {
case Some(id) => id.extract[Int]
case None => ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID
}
new ExecutorInfo(executorHost, totalCores, logUrls, attributes.toMap, resources.toMap,
resourceProfileId)
}
def blockUpdatedInfoFromJson(json: JValue): BlockUpdatedInfo = {
val blockManagerId = blockManagerIdFromJson(json \\ "Block Manager ID")
val blockId = BlockId((json \\ "Block ID").extract[String])
val storageLevel = storageLevelFromJson(json \\ "Storage Level")
val memorySize = (json \\ "Memory Size").extract[Long]
val diskSize = (json \\ "Disk Size").extract[Long]
BlockUpdatedInfo(blockManagerId, blockId, storageLevel, memorySize, diskSize)
}
def resourcesMapFromJson(json: JValue): Map[String, ResourceInformation] = {
val jsonFields = json.asInstanceOf[JObject].obj
jsonFields.map { case JField(k, v) =>
val resourceInfo = ResourceInformation.parseJson(v)
(k, resourceInfo)
}.toMap
}
/** -------------------------------- *
* Util JSON deserialization methods |
* --------------------------------- */
def mapFromJson(json: JValue): Map[String, String] = {
val jsonFields = json.asInstanceOf[JObject].obj
jsonFields.map { case JField(k, JString(v)) => (k, v) }.toMap
}
def propertiesFromJson(json: JValue): Properties = {
jsonOption(json).map { value =>
val properties = new Properties
mapFromJson(json).foreach { case (k, v) => properties.setProperty(k, v) }
properties
}.getOrElse(null)
}
def UUIDFromJson(json: JValue): UUID = {
val leastSignificantBits = (json \\ "Least Significant Bits").extract[Long]
val mostSignificantBits = (json \\ "Most Significant Bits").extract[Long]
new UUID(leastSignificantBits, mostSignificantBits)
}
def stackTraceFromJson(json: JValue): Array[StackTraceElement] = {
json.extract[List[JValue]].map { line =>
val declaringClass = (line \\ "Declaring Class").extract[String]
val methodName = (line \\ "Method Name").extract[String]
val fileName = (line \\ "File Name").extract[String]
val lineNumber = (line \\ "Line Number").extract[Int]
new StackTraceElement(declaringClass, methodName, fileName, lineNumber)
}.toArray
}
def exceptionFromJson(json: JValue): Exception = {
val e = new Exception((json \\ "Message").extract[String])
e.setStackTrace(stackTraceFromJson(json \\ "Stack Trace"))
e
}
/** Return an option that translates JNothing to None */
private def jsonOption(json: JValue): Option[JValue] = {
json match {
case JNothing => None
case value: JValue => Some(value)
}
}
private def emptyJson: JObject = JObject(List[JField]())
}
| dbtsai/spark | core/src/main/scala/org/apache/spark/util/JsonProtocol.scala | Scala | apache-2.0 | 58,986 |
/*
* Artificial Intelligence for Humans
* Volume 2: Nature Inspired Algorithms
* Java Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
*
* Copyright 2014 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.genetic.trees
import com.heatonresearch.aifh.evolutionary.genome.Genome
import com.heatonresearch.aifh.evolutionary.opp.EvolutionaryOperator
import com.heatonresearch.aifh.evolutionary.train.EvolutionaryAlgorithm
import com.heatonresearch.aifh.randomize.GenerateRandom
class MutateTree(owner: EvolutionaryAlgorithm,theMaxGraftLength: Int) extends EvolutionaryOperator(owner,1,1) {
private val maxGraftLength = Math.max(1, theMaxGraftLength)
override def performOperation(rnd: GenerateRandom, parents: Array[Genome], parentIndex: Int,
offspring: Array[Genome], offspringIndex: Int) {
val parent1 = parents(parentIndex).asInstanceOf[TreeGenome]
val eval = parent1.evaluator
val off1 = owner.population.genomeFactory.factor(parent1).asInstanceOf[TreeGenome]
val off1Point = eval.sampleRandomNode(rnd, off1.root)
val len = rnd.nextInt(1, this.maxGraftLength + 1)
val randomSequence = eval.grow(rnd, len)
if (off1Point.parent == null) {
off1.root = randomSequence
}
else {
val idx = off1Point.parent.getChildren.indexOf(off1Point.child)
off1Point.parent.getChildren.set(idx, randomSequence)
}
// TODO is this right - offspringIndex appears to be unused
offspring(0) = off1
}
} | PeterLauris/aifh | vol2/vol2-scala-examples/src/main/scala/com/heatonresearch/aifh/genetic/trees/MutateTree.scala | Scala | apache-2.0 | 2,255 |
package colossus.metrics
import akka.actor.SupervisorStrategy._
import akka.actor.{OneForOneStrategy, _}
import colossus.metrics.IntervalAggregator.{RegisterReporter, ReportMetrics}
import colossus.metrics.logging.ColossusLogging
import scala.concurrent.duration._
trait TagGenerator {
def tags: TagMap
}
/**
* Configuration class for the metric reporter
* @param metricSenders A list of [[MetricSender]] instances that the reporter will use to send metrics
* @param globalTags A map of tags to be used throughout the [[MetricReporter]].
* @param filters Tells the [[MetricReporter]] how to filter its Metrics before handing off to a Sender.
* @param includeHostInGlobalTags Whether to include the Host in the global tags.
*/
case class MetricReporterConfig(
metricSenders: Seq[MetricSender],
globalTags: Option[TagGenerator] = None,
filters: MetricReporterFilter = MetricReporterFilter.All,
includeHostInGlobalTags: Boolean = true
)
class MetricReporter(intervalAggregator: ActorRef, config: MetricReporterConfig, metricSystemName: String)
extends Actor
with ColossusLogging {
import MetricReporter._
import config._
val localHostname = java.net.InetAddress.getLocalHost.getHostName
override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 1, withinTimeRange = 3 seconds) {
case _: NullPointerException => Escalate
case _: Exception => Restart
}
private def createSender(sender: MetricSender) =
context.actorOf(sender.props, name = s"$metricSystemName-${sender.name}-sender")
private var reporters = Seq[ActorRef]()
private def compiledGlobalTags() = {
val userTags = globalTags.map { _.tags }.getOrElse(Map())
val added = if (includeHostInGlobalTags) Map("host" -> localHostname) else Map()
userTags ++ added
}
def receive = {
case ReportMetrics(m) => {
val s = MetricSender.Send(filterMetrics(m), compiledGlobalTags(), System.currentTimeMillis())
sendToReporters(s)
}
case ResetSender => {
info("resetting stats senders")
sendToReporters(PoisonPill)
reporters = metricSenders.map(createSender)
}
}
private def filterMetrics(m: MetricMap): MetricMap = {
filters match {
case MetricReporterFilter.All => m
case MetricReporterFilter.WhiteList(x) => m.filterKeys(k => x.exists(_.matches(k)))
case MetricReporterFilter.BlackList(x) => m.filterKeys(k => !x.exists(_.matches(k)))
}
}
private def sendToReporters(a: Any) {
reporters.foreach(_ ! a)
}
override def preStart() {
reporters = metricSenders.map(createSender)
intervalAggregator ! RegisterReporter(self)
}
}
object MetricReporter {
case object ResetSender
def apply(config: MetricReporterConfig, intervalAggregator: ActorRef, name: String)(
implicit fact: ActorRefFactory): ActorRef = {
fact.actorOf(Props(classOf[MetricReporter], intervalAggregator, config, name))
}
}
trait MetricSender {
def name: String
def props: Props
}
object MetricSender {
case class Send(metrics: MetricMap, globalTags: TagMap, timestamp: Long) {
def fragments = metrics.fragments(globalTags)
}
}
/**
* Tells a MetricReporter how to filter its Metrics before handing off to a Sender.
*/
sealed trait MetricReporterFilter
object MetricReporterFilter {
/**
* Do no filtering, pass all metrics through
*/
case object All extends MetricReporterFilter
/**
* Only allow metrics for the specified MetricAddresses
* @param addresses The MetricAddresses to whitelist.
*/
case class WhiteList(addresses: Seq[MetricAddress]) extends MetricReporterFilter
/**
* Allow all other metrics except for the ones in the specified MetricAddresses
* @param addresses The MetricAddresses to blacklist.
*/
case class BlackList(addresses: Seq[MetricAddress]) extends MetricReporterFilter
}
| tumblr/colossus | colossus-metrics/src/main/scala/colossus/metrics/StatReporter.scala | Scala | apache-2.0 | 3,913 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.api.common.ExecutionConfig
import org.apache.flink.api.dag.Transformation
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.operators.DamBehavior
import org.apache.flink.streaming.api.transformations.{PartitionTransformation, ShuffleMode}
import org.apache.flink.streaming.runtime.partitioner.{BroadcastPartitioner, GlobalPartitioner, RebalancePartitioner}
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.data.RowData
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, HashCodeGenerator}
import org.apache.flink.table.planner.delegation.BatchPlanner
import org.apache.flink.table.planner.plan.nodes.common.CommonPhysicalExchange
import org.apache.flink.table.planner.plan.nodes.exec.{BatchExecNode, ExecNode}
import org.apache.flink.table.planner.plan.utils.FlinkRelOptUtil
import org.apache.flink.table.runtime.partitioner.BinaryHashPartitioner
import org.apache.flink.table.runtime.typeutils.RowDataTypeInfo
import org.apache.flink.table.types.logical.RowType
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.{RelDistribution, RelNode, RelWriter}
import java.util
import org.apache.flink.streaming.api.graph.GlobalDataExchangeMode
import scala.collection.JavaConversions._
/**
* This RelNode represents a change of partitioning of the input elements.
*
* This does not create a physical transformation if its relDistribution' type is not range which
* is not supported now.
*/
class BatchExecExchange(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
relDistribution: RelDistribution)
extends CommonPhysicalExchange(cluster, traitSet, inputRel, relDistribution)
with BatchPhysicalRel
with BatchExecNode[RowData] {
// TODO reuse PartitionTransformation
// currently, an Exchange' input transformation will be reused if it is reusable,
// and different PartitionTransformation objects will be created which have same input.
// cache input transformation to reuse
private var reusedInput: Option[Transformation[RowData]] = None
// the required shuffle mode for reusable ExchangeBatchExec
// if it's None, use value from getShuffleMode
private var requiredShuffleMode: Option[ShuffleMode] = None
override def copy(
traitSet: RelTraitSet,
newInput: RelNode,
newDistribution: RelDistribution): BatchExecExchange = {
new BatchExecExchange(cluster, traitSet, newInput, relDistribution)
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.itemIf("shuffle_mode", requiredShuffleMode.orNull,
requiredShuffleMode.contains(ShuffleMode.BATCH))
}
//~ ExecNode methods -----------------------------------------------------------
def setRequiredShuffleMode(shuffleMode: ShuffleMode): Unit = {
require(shuffleMode != null)
requiredShuffleMode = Some(shuffleMode)
}
private[flink] def getShuffleMode(tableConf: Configuration): ShuffleMode = {
requiredShuffleMode match {
case Some(mode) if mode eq ShuffleMode.BATCH => mode
case _ =>
if (tableConf.getString(ExecutionConfigOptions.TABLE_EXEC_SHUFFLE_MODE)
.equalsIgnoreCase(GlobalDataExchangeMode.ALL_EDGES_BLOCKING.toString)) {
ShuffleMode.BATCH
} else {
ShuffleMode.UNDEFINED
}
}
}
override def getDamBehavior: DamBehavior = {
val tableConfig = FlinkRelOptUtil.getTableConfigFromContext(this)
val shuffleMode = getShuffleMode(tableConfig.getConfiguration)
if (shuffleMode eq ShuffleMode.BATCH) {
return DamBehavior.FULL_DAM
}
distribution.getType match {
case RelDistribution.Type.RANGE_DISTRIBUTED => DamBehavior.FULL_DAM
case _ => DamBehavior.PIPELINED
}
}
override def getInputNodes: util.List[ExecNode[BatchPlanner, _]] =
getInputs.map(_.asInstanceOf[ExecNode[BatchPlanner, _]])
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[BatchPlanner, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override protected def translateToPlanInternal(
planner: BatchPlanner): Transformation[RowData] = {
val input = reusedInput match {
case Some(transformation) => transformation
case None =>
val input = getInputNodes.get(0).translateToPlan(planner)
.asInstanceOf[Transformation[RowData]]
reusedInput = Some(input)
input
}
val inputType = input.getOutputType.asInstanceOf[RowDataTypeInfo]
val outputRowType = RowDataTypeInfo.of(FlinkTypeFactory.toLogicalRowType(getRowType))
val conf = planner.getTableConfig
val shuffleMode = getShuffleMode(conf.getConfiguration)
relDistribution.getType match {
case RelDistribution.Type.ANY =>
val transformation = new PartitionTransformation(
input,
null,
shuffleMode)
transformation.setOutputType(outputRowType)
transformation.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT)
transformation
case RelDistribution.Type.SINGLETON =>
val transformation = new PartitionTransformation(
input,
new GlobalPartitioner[RowData],
shuffleMode)
transformation.setOutputType(outputRowType)
transformation.setParallelism(1)
transformation
case RelDistribution.Type.RANDOM_DISTRIBUTED =>
val transformation = new PartitionTransformation(
input,
new RebalancePartitioner[RowData],
shuffleMode)
transformation.setOutputType(outputRowType)
transformation.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT)
transformation
case RelDistribution.Type.BROADCAST_DISTRIBUTED =>
val transformation = new PartitionTransformation(
input,
new BroadcastPartitioner[RowData],
shuffleMode)
transformation.setOutputType(outputRowType)
transformation.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT)
transformation
case RelDistribution.Type.HASH_DISTRIBUTED =>
// TODO Eliminate duplicate keys
val keys = relDistribution.getKeys
val partitioner = new BinaryHashPartitioner(
HashCodeGenerator.generateRowHash(
CodeGeneratorContext(planner.getTableConfig),
RowType.of(inputType.getLogicalTypes: _*),
"HashPartitioner",
keys.map(_.intValue()).toArray),
keys.map(getInput.getRowType.getFieldNames.get(_)).toArray
)
val transformation = new PartitionTransformation(
input,
partitioner,
shuffleMode)
transformation.setOutputType(outputRowType)
transformation.setParallelism(ExecutionConfig.PARALLELISM_DEFAULT)
transformation
case _ =>
throw new UnsupportedOperationException(
s"not support RelDistribution: ${relDistribution.getType} now!")
}
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecExchange.scala | Scala | apache-2.0 | 8,025 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail.batches
/** [[Batch]] implementation specialized for `Byte`.
*
* Under the hood it uses an [[monix.tail.batches.ArrayBatch ArrayBatch]]
* implementation, which is `@specialized`. Using `BytesBatch`
* might be desirable instead for `isInstanceOf` checks.
*/
final class BytesBatch(underlying: ArrayBatch[Byte]) extends Batch[Byte] {
override def cursor(): BytesCursor =
new BytesCursor(underlying.cursor())
override def take(n: Int): BytesBatch =
new BytesBatch(underlying.take(n))
override def drop(n: Int): BytesBatch =
new BytesBatch(underlying.drop(n))
override def slice(from: Int, until: Int): BytesBatch =
new BytesBatch(underlying.slice(from, until))
override def filter(p: (Byte) => Boolean): BytesBatch =
new BytesBatch(underlying.filter(p))
override def map[B](f: (Byte) => B): ArrayBatch[B] =
underlying.map(f)
override def collect[B](pf: PartialFunction[Byte, B]): ArrayBatch[B] =
underlying.collect(pf)
override def foldLeft[R](initial: R)(op: (R, Byte) => R): R =
underlying.foldLeft(initial)(op)
}
| alexandru/monifu | monix-tail/shared/src/main/scala/monix/tail/batches/BytesBatch.scala | Scala | apache-2.0 | 1,770 |
/**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.appjet.ajstdlib;
import scala.collection.mutable.{HashMap,ListBuffer};
import java.util.concurrent.locks.ReentrantLock;
object timer {
var _timings = new HashMap[String,ListBuffer[Double]];
var _lock = new ReentrantLock;
var _callstack = new ThreadLocal[ListBuffer[String]];
def start(opname: String) = {
var _localcallstack = _callstack.get();
if (_localcallstack == null) {
_callstack.set(new ListBuffer[String]);
_localcallstack = _callstack.get();
}
_localcallstack += opname;
var _oplabel = _localcallstack.mkString(".");
val startTime: Long = System.nanoTime();
new {
def done() {
val elapsedTimeMs: Double = (System.nanoTime() - startTime) / 1.0e6;
_lock.lock();
try {
var times = _timings.getOrElse(_oplabel, new ListBuffer[Double]);
/*
if (times.size > 100000) {
times = new ListBuffer[double];
}*/
times += elapsedTimeMs;
_timings.put(_oplabel, times);
_localcallstack.remove(_localcallstack.length-1);
} finally {
_lock.unlock();
}
}
}
}
def getOpNames(): Array[String] = {
_lock.lock();
try {
return _timings.keys.toList.toArray;
} finally {
_lock.unlock();
}
}
def getStats(opname: String): Array[Double] = {
_lock.lock();
try {
var times:ListBuffer[Double] = _timings(opname);
var total = times.foldRight(0.0)(_ + _);
return Array(times.size, total, (total / times.size));
} finally {
_lock.unlock();
}
}
def reset() {
_lock.lock();
_timings = new HashMap[String,ListBuffer[Double]];
_lock.unlock();
}
}
| whackpad/whackpad | infrastructure/net.appjet.ajstdlib/timer.scala | Scala | apache-2.0 | 2,239 |
package com.twitter.scalding.typed
import org.scalatest.FunSuite
import com.twitter.scalding.typed.functions.EqTypes
class ResolverTest extends FunSuite {
class Key[A]
class Value[A]
val k1 = new Key[Int]
val k2 = new Key[Int]
val k3 = new Key[Int]
val v1 = new Value[Int]
val v2 = new Value[Int]
val v3 = new Value[Int]
// if they are eq, they have the same type
def keq[A, B](ka: Key[A], kb: Key[B]): Option[EqTypes[A, B]] =
if (ka == null || kb == null) None
else if (ka eq kb) Some(EqTypes.reflexive[A].asInstanceOf[EqTypes[A, B]])
else None
val custom = new Resolver[Key, Value] {
def apply[A](k: Key[A]) =
keq(k1, k).map { eqtypes =>
eqtypes.subst[Value](v3)
}
}
import Resolver.pair
test("orElse order is correct") {
assert((pair(k1, v1).orElse(pair(k1, v2)))(k1) == Some(v1))
assert((pair(k1, v2).orElse(pair(k1, v1)))(k1) == Some(v2))
assert((pair(k2, v1).orElse(pair(k1, v2)))(k1) == Some(v2))
assert((pair(k2, v2).orElse(pair(k1, v1)))(k1) == Some(v1))
assert(((pair(k1, v1).orElse(pair(k1, v2))).orElse(pair(k1, v3)))(k1) == Some(v1))
assert(((pair(k1, v2).orElse(pair(k1, v1))).orElse(pair(k1, v3)))(k1) == Some(v2))
assert(((pair(k1, v1).orElse(pair(k1, v2))).orElse(pair(k2, v3)))(k2) == Some(v3))
assert(custom(k1) == Some(v3))
assert(custom(k2) == None)
assert((custom.orElse(pair(k1, v2)))(k1) == Some(v3))
assert((custom.orElse(pair(k2, v2)))(k2) == Some(v2))
assert((pair(k1, v2).orElse(custom))(k1) == Some(v2))
assert((pair(k2, v2).orElse(custom))(k1) == Some(v3))
assert((pair(k2, v2).orElse(custom))(k2) == Some(v2))
}
test("test remapping with andThen") {
val remap = Resolver.pair(k1, k2).orElse(Resolver.pair(k2, k3)).orElse(Resolver.pair(k3, k1))
assert((remap.andThen(custom.orElse(pair(k1, v2))))(k1) == None)
assert((remap.andThen(custom.orElse(pair(k2, v2))))(k2) == None)
assert((remap.andThen(pair(k1, v2).orElse(custom)))(k3) == Some(v2))
assert((remap.andThen(pair(k2, v2).orElse(custom)))(k3) == Some(v3))
assert((remap.andThen(pair(k2, v2).orElse(custom)))(k1) == Some(v2))
}
}
| twitter/scalding | scalding-core/src/test/scala/com/twitter/scalding/typed/ResolverTest.scala | Scala | apache-2.0 | 2,182 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.datastream
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan._
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.core.Values
import org.apache.calcite.rex.RexLiteral
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.table.api.{StreamQueryConfig, StreamTableEnvironment}
import org.apache.flink.table.codegen.CodeGenerator
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.runtime.io.CRowValuesInputFormat
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
import scala.collection.JavaConverters._
/**
* DataStream RelNode for LogicalValues.
*/
class DataStreamValues(
cluster: RelOptCluster,
traitSet: RelTraitSet,
schema: RowSchema,
tuples: ImmutableList[ImmutableList[RexLiteral]],
ruleDescription: String)
extends Values(cluster, schema.logicalType, tuples, traitSet)
with DataStreamRel {
override def deriveRowType() = schema.logicalType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new DataStreamValues(
cluster,
traitSet,
schema,
getTuples,
ruleDescription
)
}
override def translateToPlan(
tableEnv: StreamTableEnvironment,
queryConfig: StreamQueryConfig): DataStream[CRow] = {
val config = tableEnv.getConfig
val returnType = CRowTypeInfo(schema.physicalTypeInfo)
val generator = new CodeGenerator(config)
// generate code for every record
val generatedRecords = getTuples.asScala.map { r =>
generator.generateResultExpression(
schema.physicalTypeInfo,
schema.physicalFieldNames,
r.asScala)
}
// generate input format
val generatedFunction = generator.generateValuesInputFormat(
ruleDescription,
generatedRecords.map(_.code),
schema.physicalTypeInfo)
val inputFormat = new CRowValuesInputFormat(
generatedFunction.name,
generatedFunction.code,
returnType)
tableEnv.execEnv.createInput(inputFormat, returnType)
}
}
| hongyuhong/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/datastream/DataStreamValues.scala | Scala | apache-2.0 | 2,946 |
package com.stripe.bonsai
trait FullBinaryTreeOps[T, BL, LL] extends TreeOps[T, Either[BL, LL]] {
override def reduce[A](node: Node)(f: (Either[BL, LL], Iterable[A]) => A): A =
foldNode(node)({ (lbl, lc, rc) =>
f(Left(lbl), reduce(lc)(f) :: reduce(rc)(f) :: Nil)
}, lbl => f(Right(lbl), Nil))
def foldNode[A](node: Node)(f: (BL, Node, Node) => A, g: LL => A): A
def reduceNode[A](node: Node)(f: (BL, A, A) => A, g: LL => A): A =
foldNode(node)((lbl, rc, lc) => f(lbl, reduceNode(lc)(f, g), reduceNode(rc)(f, g)), g)
def label(node: Node): Either[BL, LL] =
foldNode(node)((bl, _, _) => Left(bl), ll => Right(ll))
def children(node: Node): Iterable[Node] =
foldNode(node)((_, lc, rc) => lc :: rc :: Nil, _ => Nil)
def collectLeafLabelsF[A](node: Node)(f: LL => A): Set[A] =
reduceNode[Set[A]](node)((_, lc, rc) => lc ++ rc, ll => Set(f(ll)))
def collectLeafLabels(node: Node): Set[LL] = collectLeafLabelsF(node)(identity)
}
object FullBinaryTreeOps {
final def apply[T, BL, LL](implicit ops: FullBinaryTreeOps[T, BL, LL]): FullBinaryTreeOps[T, BL, LL] = ops
}
| stripe/bonsai | bonsai-core/src/main/scala/com/stripe/bonsai/FullBinaryTreeOps.scala | Scala | mit | 1,113 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.cli
import java.io._
import org.bdgenomics.adam.projections.Projection
import org.bdgenomics.adam.projections.FeatureField._
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.utils.cli.Args4j
import org.bdgenomics.formats.avro.Feature
class TransformFeaturesSuite extends ADAMFunSuite {
sparkTest("can convert a simple BED file") {
val loader = Thread.currentThread().getContextClassLoader
val inputPath = loader.getResource("gencode.v7.annotation.trunc10.bed").getPath
val outputFile = File.createTempFile("adam-cli.TransformFeaturesSuite", ".adam")
val outputPath = outputFile.getAbsolutePath
val argLine = "%s %s".format(inputPath, outputPath).split("\\\\s+")
// We have to do this, since the features2adam won't work if the file already exists,
// but the "createTempFile" method actually creates the file (on some systems?)
assert(outputFile.delete(), "Couldn't delete (empty) temp file")
val args: TransformFeaturesArgs = Args4j.apply[TransformFeaturesArgs](argLine)
val features2Adam = new TransformFeatures(args)
features2Adam.run(sc)
val schema = Projection(featureId, contigName, start, strand)
val lister = new ParquetLister[Feature](Some(schema))
val converted = lister.materialize(outputPath).toSeq
assert(converted.size === 10)
assert(converted.find(_.getContigName != "chr1").isEmpty)
}
}
| massie/adam | adam-cli/src/test/scala/org/bdgenomics/adam/cli/TransformFeaturesSuite.scala | Scala | apache-2.0 | 2,219 |
package org.sbtidea.test.util
import sbt._
import org.apache.commons.io.FileUtils.listFiles
import org.apache.commons.io.FilenameUtils.removeExtension
import scala.xml.Utility.trim
import xml._
import collection.JavaConverters._
import xml.transform.{RewriteRule, RuleTransformer}
import xml.Node
import org.sbtidea.SystemProps
import scala.language.implicitConversions
import scala.language.reflectiveCalls
abstract class AbstractScriptedTestBuild(projectName : String) extends Build {
import XmlAttributesCopy._
lazy val assertExpectedXmlFiles = TaskKey[Unit]("assert-expected-xml-files")
lazy val scriptedTestSettings = Seq(assertExpectedXmlFiles := assertXmlsTask)
private def assertXmlsTask {
val expectedFiles = listFiles(file("."), Array("expected"), true).asScala
expectedFiles.map(assertExpectedXml).foldLeft[Option[String]](None) {
(acc, fileResult) => if (acc.isDefined) acc else fileResult
} foreach sys.error
}
private def assertExpectedXml(expectedFile: File):Option[String] = {
val actualFile = new File(removeExtension(expectedFile.getAbsolutePath))
if (actualFile.exists) assertExpectedXml(expectedFile, actualFile)
else Some("Expected file " + actualFile.getAbsolutePath + " does not exist.")
}
private def assertExpectedXml(expectedFile: File, actualFile: File): Option[String] = {
/* Make generated files OS independent and strip the suffix that is randomly generated from content url so that comparisons can work */
val actualFileTransformers = Map("" -> Seq(WindowsPathRewriteRule), ".iml" -> Seq(TmpPathRewriteRule, IvyCachePathRewriteRule))
/* Take current jdk version into consideration */
val expectedFileTransformers = Map("misc.xml.expected" -> Seq(JDKVersionRewriteRule))
def transformNode(fileName: String, transformerMap: Map[String, Seq[RewriteRule]], node: xml.Node): xml.Node = {
val transformers = transformerMap.keys.foldLeft(Seq[RewriteRule]()) { (acc, key) =>
if (fileName.endsWith(key)) acc ++ transformerMap(key) else acc
}
new RuleTransformer(transformers:_*).transform(node).head
}
def processActual(node: xml.Node): xml.Node = transformNode(actualFile.getName, actualFileTransformers, node)
def processExpected(node: xml.Node): xml.Node = transformNode(expectedFile.getName, expectedFileTransformers, node)
val actualXml = trim(processActual(XML.loadFile(actualFile)))
val expectedXml = trim(processExpected(XML.loadFile(expectedFile)))
if (actualXml != expectedXml) Some(formatErrorMessage(actualFile, actualXml, expectedXml)) else None
}
private def formatErrorMessage(actualFile: File, actualXml: Node, expectedXml: Node): String = {
val pp = new PrettyPrinter(1000, 2)
val msg = new StringBuilder
msg.append("Xml file " + actualFile.getName + " does not equal expected:")
msg.append("\\n********** Expected **********\\n ")
pp.format(expectedXml, msg)
msg.append("\\n*********** Actual ***********\\n ")
pp.format(actualXml, msg)
msg.toString
}
object XmlAttributesCopy {
implicit def addGoodCopyToAttribute(attr: Attribute) = new {
def goodcopy(key: String = attr.key, value: Any = attr.value): Attribute =
Attribute(attr.pre, key, Text(value.toString), attr.next)
}
implicit def iterableToMetaData(items: Iterable[MetaData]): MetaData = items match {
case Nil => Null
case head :: tail => head.copy(next = iterableToMetaData(tail))
}
}
object WindowsPathRewriteRule extends RewriteRule {
override def transform(n: Node): Seq[Node] =
n match {
case e: Elem if (e.attributes.asAttrMap.values.exists(_.contains("\\\\"))) => {
e.copy(attributes = for (attr <- e.attributes) yield attr match {
case a@Attribute(_, v, _) if v.text.contains("\\\\") => a.goodcopy(value = v.text.replaceAll("\\\\\\\\", "/"))
case other => other
})
}
case _ => n
}
}
object JDKVersionRewriteRule extends RewriteRule {
override def transform(n: Node): Seq[Node] =
n match {
case e: Elem if (e.attributes.asAttrMap.values.exists(_ == "ProjectRootManager")) => {
e.copy(attributes = for (attr <- e.attributes) yield attr match {
case a@Attribute(k, _, _) if k == "languageLevel" => a.goodcopy(value = SystemProps.languageLevel)
case a@Attribute(k, _, _) if k == "project-jdk-name" => a.goodcopy(value = SystemProps.jdkName)
case other => other
})
}
case _ => n
}
}
object TmpPathRewriteRule extends RewriteRule {
def elementMatches(e: Node): Boolean = {
val url = (e \\ "@url").text
url.matches("file://.*/sbt_[a-f[0-9]]+/" + projectName + "$")
}
override def transform(n: Node): Seq[Node] = n match {
case e: Elem if elementMatches(e) => {
<content url={"file:///tmp/sbt_/" + projectName}>
{e.child}
</content>
}
case _ => n
}
}
object IvyCachePathRewriteRule extends RewriteRule {
override def transform(n: Node): Seq[Node] =
n match {
case e: Elem if (e.attributes.asAttrMap.keys.exists(_ == "value")) => {
e.copy(attributes = for (attr <- e.attributes) yield attr match {
case a@Attribute(k, Text(v), _) if (k == "value" && v.contains("/.ivy2/")) => a.goodcopy(value = "~" + v.substring(v.indexOf("/.ivy2/")))
case other => other
})
}
case _ => n
}
}
} | mpeltonen/sbt-idea | src/main/scala/org/sbtidea/test/util/AbstractScriptedTestBuild.scala | Scala | bsd-3-clause | 5,531 |
package org.openmole.core.workflow.domain
import scala.annotation.implicitNotFound
/**
* Property of having a size for a domain
* @tparam D
*/
@implicitNotFound("${D} is not a sized variation domain")
trait DomainSize[-D] {
def apply(domain: D): Int
}
| openmole/openmole | openmole/core/org.openmole.core.workflow/src/main/scala/org/openmole/core/workflow/domain/DomainSize.scala | Scala | agpl-3.0 | 259 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io.{File}
import java.util.{List => JList}
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.SparkContext
import org.apache.spark.api.java.{JavaSparkContext, JavaRDD}
private[spark] object PythonUtils {
/** Get the PYTHONPATH for PySpark, either from SPARK_HOME, if it is set, or from our JAR */
def sparkPythonPath: String = {
val pythonPath = new ArrayBuffer[String]
for (sparkHome <- sys.env.get("SPARK_HOME")) {
pythonPath += Seq(sparkHome, "python").mkString(File.separator)
pythonPath += Seq(sparkHome, "python", "lib", "py4j-0.8.2.1-src.zip").mkString(File.separator)
}
pythonPath ++= SparkContext.jarOfObject(this)
pythonPath.mkString(File.pathSeparator)
}
/** Merge PYTHONPATHS with the appropriate separator. Ignores blank strings. */
def mergePythonPaths(paths: String*): String = {
paths.filter(_ != "").mkString(File.pathSeparator)
}
def generateRDDWithNull(sc: JavaSparkContext): JavaRDD[String] = {
sc.parallelize(List("a", null, "b"))
}
/**
* Convert list of T into seq of T (for calling API with varargs)
*/
def toSeq[T](cols: JList[T]): Seq[T] = {
cols.toList.toSeq
}
}
| Dax1n/spark-core | core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala | Scala | apache-2.0 | 2,077 |
// import fpinscala.datastructures._
//instead... copying all the boiler plate code.
sealed trait List[+A] // `List` data type, parameterized on a type, `A`
case object Nil extends List[Nothing] // A `List` data constructor representing the empty list
case class Cons[+A](head: A, tail: List[A]) extends List[A] // Another data constructor, representing nonempty lists. Note that `tail` is another `List[A]`, which may be `Nil` or another `Cons`.
object List { // `List` companion object. Contains functions for creating and working with lists.
def sum(ints: List[Int]): Int = ints match { // A function that uses pattern matching to add up a list of integers
case Nil => 0 // The sum of the empty list is 0.
case Cons(x,xs) => x + sum(xs) // The sum of a list starting with `x` is `x` plus the sum of the rest of the list.
}
def product(ds: List[Double]): Double = ds match {
case Nil => 1.0
case Cons(0.0, _) => 0.0
case Cons(x,xs) => x * product(xs)
}
def apply[A](as: A*): List[A] = // Variadic function syntax
if (as.isEmpty) Nil
else Cons(as.head, apply(as.tail: _*))
val x = List(1,2,3,4,5) match {
case Cons(x, Cons(2, Cons(4, _))) => x
case Nil => 42
case Cons(x, Cons(y, Cons(3, Cons(4, _)))) => x + y
case Cons(h, t) => h + sum(t)
case _ => 101
}
def append[A](a1: List[A], a2: List[A]): List[A] =
a1 match {
case Nil => a2
case Cons(h,t) => Cons(h, append(t, a2))
}
def foldRight[A,B](as: List[A], z: B)(f: (A, B) => B): B = // Utility functions
as match {
case Nil => z
case Cons(x, xs) => f(x, foldRight(xs, z)(f))
}
def sum2(ns: List[Int]) =
foldRight(ns, 0)((x,y) => x + y)
def product2(ns: List[Double]) =
foldRight(ns, 1.0)(_ * _) // `_ * _` is more concise notation for `(x,y) => x * y`; see sidebar
def tail[A](l: List[A]): List[A] = sys.error("todo")
def setHead[A](l: List[A], h: A): List[A] = sys.error("todo")
def drop[A](l: List[A], n: Int): List[A] = sys.error("todo")
def dropWhile[A](l: List[A], f: A => Boolean): List[A] = sys.error("todo")
def init[A](l: List[A]): List[A] = sys.error("todo")
def length[A](l: List[A]): Int = sys.error("todo")
def foldLeft[A,B](l: List[A], z: B)(f: (B, A) => B): B = sys.error("todo")
def map[A,B](l: List[A])(f: A => B): List[B] = sys.error("todo")
}
def sum(ints: List[Int]): Int = ints match { // A function that uses pattern matching to add up a list of integers
case Nil => 0 // The sum of the empty list is 0.
case Cons(x,xs) => x + sum(xs) // The sum of a list starting with `x` is `x` plus the sum of the rest of the list.
}
//////////begin my own stuff
var x = List(1,2,3,4,5) match {
case Cons(x, Cons(2, Cons(4,_))) => x
case Nil => 42
case Cons(x, Cons(y, Cons(3, Cons(4,_)))) => x + y
case Cons(h, t) => h + sum(t)
case _ => 101
}
println(x)
//answer is '3', but actually. stack overflow. bleh | scottleedavis/scala-musings | match_expression.scala | Scala | mit | 2,966 |
package com.twitter.finatra.http.tests.integration.messagebody.main.domain
import com.twitter.finatra.http.annotations.QueryParam
case class GreetingRequest(@QueryParam name: String)
| twitter/finatra | http-server/src/test/scala/com/twitter/finatra/http/tests/integration/messagebody/main/domain/GreetingRequest.scala | Scala | apache-2.0 | 185 |
package com.wavesplatform.network
import com.wavesplatform.block.Block
import com.wavesplatform.history.History
import com.wavesplatform.network.HistoryReplier._
import com.wavesplatform.settings.SynchronizationSettings
import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter}
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
@Sharable
class HistoryReplier(score: => BigInt, history: History, settings: SynchronizationSettings)(implicit ec: ExecutionContext)
extends ChannelInboundHandlerAdapter
with ScorexLogging {
private def respondWith(ctx: ChannelHandlerContext, value: Future[Message]): Unit =
value.onComplete {
case Failure(e) => log.debug(s"${id(ctx)} Error processing request", e)
case Success(value) =>
if (ctx.channel().isOpen) {
ctx.writeAndFlush(value)
} else {
log.trace(s"${id(ctx)} Channel is closed")
}
}
override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef): Unit = msg match {
case GetSignatures(otherSigs) =>
respondWith(ctx, Future(Signatures(history.blockIdsAfter(otherSigs, settings.maxRollback))))
case GetBlock(sig) =>
respondWith(
ctx,
Future(history.loadBlockBytes(sig))
.map {
case Some((blockVersion, bytes)) =>
RawBytes(if (blockVersion < Block.ProtoBlockVersion) BlockSpec.messageCode else PBBlockSpec.messageCode, bytes)
case _ => throw new NoSuchElementException(s"Error loading block $sig")
}
)
case MicroBlockRequest(microBlockId) =>
respondWith(
ctx,
Future(history.loadMicroBlock(microBlockId)).map {
case Some(microBlock) => RawBytes.fromMicroBlock(MicroBlockResponse(microBlock, microBlockId))
case _ => throw new NoSuchElementException(s"Error loading microblock $microBlockId")
}
)
case _: Handshake =>
respondWith(ctx, Future(LocalScoreChanged(score)))
case _ => super.channelRead(ctx, msg)
}
def cacheSizes: CacheSizes = CacheSizes(0, 0)
}
object HistoryReplier {
case class CacheSizes(blocks: Long, microBlocks: Long)
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/network/HistoryReplier.scala | Scala | mit | 2,313 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import org.apache.spark.{Partition, SharedSparkContext, SparkFunSuite, TaskContext}
class PartitionPruningRDDSuite extends SparkFunSuite with SharedSparkContext {
test("Pruned Partitions inherit locality prefs correctly") {
val rdd = new RDD[Int](sc, Nil) {
override protected def getPartitions = {
Array[Partition](
new TestPartition(0, 1),
new TestPartition(1, 1),
new TestPartition(2, 1))
}
def compute(split: Partition, context: TaskContext) = {
Iterator()
}
}
val prunedRDD = PartitionPruningRDD.create(rdd, _ == 2)
assert(prunedRDD.partitions.length == 1)
val p = prunedRDD.partitions(0)
assert(p.index == 0)
assert(p.asInstanceOf[PartitionPruningRDDPartition].parentSplit.index == 2)
}
test("Pruned Partitions can be unioned ") {
val rdd = new RDD[Int](sc, Nil) {
override protected def getPartitions = {
Array[Partition](
new TestPartition(0, 4),
new TestPartition(1, 5),
new TestPartition(2, 6))
}
def compute(split: Partition, context: TaskContext) = {
List(split.asInstanceOf[TestPartition].testValue).iterator
}
}
val prunedRDD1 = PartitionPruningRDD.create(rdd, _ == 0)
val prunedRDD2 = PartitionPruningRDD.create(rdd, _ == 2)
val merged = prunedRDD1 ++ prunedRDD2
assert(merged.count() == 2)
val take = merged.take(2)
assert(take.apply(0) == 4)
assert(take.apply(1) == 6)
}
}
class TestPartition(i: Int, value: Int) extends Partition with Serializable {
def index: Int = i
def testValue: Int = this.value
}
| mike0sv/spark | core/src/test/scala/org/apache/spark/rdd/PartitionPruningRDDSuite.scala | Scala | apache-2.0 | 2,481 |
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package org.openapitools.client.model
case class PipelineRunartifacts(
name: Option[String] = None,
size: Option[Int] = None,
url: Option[String] = None,
`class`: Option[String] = None
)
| cliffano/swaggy-jenkins | clients/scala-sttp/generated/src/main/scala/org/openapitools/client/model/PipelineRunartifacts.scala | Scala | mit | 545 |
//
// $Id$
//
// Wiggle - a 2D game development library - http://code.google.com/p/wiggle/
// Copyright 2008-2010 Michael Bayne
// Distributed under the "Simplified BSD License" in LICENSE.txt
package wiggle.input
import wiggle.app.Entity
import wiggle.util.Task
/**
* Maintains groups of key bindings where only one key can be active at a time in a group, like a
* directional pad. A traditional dpad has horizontal bindings where only one of left or right can
* be active and vertical bindings where only one of up or down can be active. Multiple key bindings
* are supported for each group to allow left and right handed variations (WASD and arrow keys, for
* example).
*/
class DPad (kdb :Keyboard, target :Entity)
{
/** A group of mappings from keys to tasks. */
class Group ()
{
/** Binds a key code to a particular task. */
def bind (code :Int, task :Task) :Group = {
val key = kdb.key(code)
_keymap += (key -> task)
key.addOnPress(onPress)
key.addOnRelease(onRelease)
this
}
/** Binds a list of key codes to a particular task. Each key will be mapped to the same task. */
def bind (keys :List[Int], task :Task) :Group = {
keys.foreach(bind(_, task))
this
}
/** Convenience function for binding left arrow key and a (for wasd controls). */
def bindLeft (task :Task) = bind(List(Keyboard.KEY_LEFT, Keyboard.KEY_A), task)
/** Convenience function for binding right arrow key and d (for wasd controls). */
def bindRight (task :Task) = bind(List(Keyboard.KEY_RIGHT, Keyboard.KEY_D), task)
/** Convenience function for binding up arrow key and w (for wasd controls). */
def bindUp (task :Task) = bind(List(Keyboard.KEY_UP, Keyboard.KEY_W), task)
/** Convenience function for binding down arrow key and s (for wasd controls). */
def bindDown (task :Task) = bind(List(Keyboard.KEY_DOWN, Keyboard.KEY_S), task)
/** Activates or deactivates first-wins mode. In first-wins mode, the first key pressed will
* remain active until it is released. No other key in the group will take effect. In last-wins
* mode (the default) any key pressed will override any already pressed key. If the original key
* remains down when the overriding key is released, the original task will be resumed. */
def setFirstWins (firstWins :Boolean) = {
_firstWins = true
this
}
protected def onPress (key :Keyboard#Key) = {
if (!_firstWins || _pressed.isEmpty) {
_pressed = key :: _pressed
activate(key)
}
true
}
protected def onRelease (key :Keyboard#Key) = {
if (!_pressed.isEmpty) {
if (_pressed.head == key) {
if (_pressed.tail.isEmpty) deactivate()
else activate(_pressed.tail.head)
}
_pressed = _pressed.filterNot(key.==)
}
true
}
protected def activate (key :Keyboard#Key) = _keymap.get(key) match {
case None => // nada
case Some(task) => {
deactivate()
target.add(task)
_active = Some(task)
}
}
protected def deactivate () = _active match {
case None => // nada
case Some(task) => {
target.remove(task)
_active = None
}
}
private[this] var _firstWins = false
private[this] var _keymap = Map[Keyboard#Key, Task]()
private[this] var _pressed :List[Keyboard#Key] = Nil
private[this] var _active :Option[Task] = None
}
/** Createse a mutually exclusive group. */
def group () = new Group
}
| zdevzee/wiggle | src/main/scala/wiggle/input/DPad.scala | Scala | bsd-3-clause | 3,558 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2015 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.modules
import edu.latrobe._
import edu.latrobe.blaze._
import scala.collection._
import scala.util.hashing._
/**
* A variant of Sequence that takes the time required for executing
* the underlying modules.
*/
final class Benchmark(override val builder: BenchmarkBuilder,
override val inputHints: BuildHints,
override val seed: InstanceSeed,
override val weightBufferBuilder: ValueTensorBufferBuilder)
extends SequentialContainer[BenchmarkBuilder]
with BenchmarkEnabled {
override val (children, outputHints)
: (Seq[Module], BuildHints) = {
var tmpHints = inputHints
val modules = builder.children.map(child => {
val tmp = child.build(tmpHints, seed, weightBufferBuilder)
tmpHints = tmp.outputHints
tmp
})
(modules, tmpHints)
}
override protected def doClose()
: Unit = {
children.foreach(
_.close()
)
super.doClose()
}
val caption
: String = builder.caption
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
override val requiresMaintainingInputDuringForwardPropagation
: Boolean = false
override protected def doPredict(mode: Mode,
inPlaceAllowed: Boolean,
input: Tensor,
reference: Tensor,
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: (Tensor, PredictContext) = {
doBenchmark(
s"$caption.predict(${input.layout})",
super.doPredict(
mode,
inPlaceAllowed,
input,
reference,
onEnter,
onLeave
)
)
}
override protected def doPredictInv(output: Tensor,
context: PredictContext,
onLeave: OnLeavePredict,
contexts: mutable.Stack[PredictContext])
: Tensor = {
doBenchmark(
s"$caption.predictInv(${output.layout})",
super.doPredictInv(
output,
context,
onLeave,
contexts
)
)
}
// ---------------------------------------------------------------------------
// Backward propagation related.
// ---------------------------------------------------------------------------
override val backpropagationRequirementsForInput
: TensorDependency = TensorDependency.NotRequired
override val backpropagationRequirementsForOutput
: TensorDependency = TensorDependency.NotRequired
override protected def doDeriveGradients(context: PredictContext,
error: NextError,
sink: ValueTensorBuffer,
onEnter: OnEnterDeriveGradients,
onLeave: OnLeaveDeriveGradients,
tensors: mutable.Stack[Tensor],
contexts: mutable.Stack[PredictContext])
: NextError = {
doBenchmark(
s"$caption.deriveGradients()",
super.doDeriveGradients(
context,
error,
sink,
onEnter,
onLeave,
tensors,
contexts
)
)
}
// ---------------------------------------------------------------------------
// State backup and retrieval.
// ---------------------------------------------------------------------------
override def state
: BenchmarkState = BenchmarkState(
super.state,
children.map(_.state)
)
override def restoreState(state: InstanceState)
: Unit = {
super.restoreState(state.parent)
state match {
case state: BenchmarkState =>
SeqEx.foreach(
children,
state.children
)(_.restoreState(_))
case _ =>
throw new MatchError(state)
}
}
}
final class BenchmarkBuilder
extends SequentialContainerBuilder[BenchmarkBuilder] {
override def repr
: BenchmarkBuilder = this
private var _caption
: String = ""
def caption
: String = _caption
def caption_=(value: String)
: Unit = {
require(value != null)
_caption = value
}
def setCaption(value: String)
: BenchmarkBuilder = {
caption_=(value)
this
}
override protected def doToString()
: List[Any] = _caption :: super.doToString()
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), _caption.hashCode())
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[BenchmarkBuilder]
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: BenchmarkBuilder =>
_caption == other._caption
case _ =>
false
})
override protected def doCopy()
: BenchmarkBuilder = BenchmarkBuilder()
override def copyTo(other: InstanceBuilder)
: Unit = {
super.copyTo(other)
other match {
case other: BenchmarkBuilder =>
other._caption = _caption
case _ =>
}
}
// ---------------------------------------------------------------------------
// Weights / binding related
// ---------------------------------------------------------------------------
override def build(hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: Benchmark = new Benchmark(this, hints, seed, weightsBuilder)
}
object BenchmarkBuilder {
final def apply()
: BenchmarkBuilder = new BenchmarkBuilder
final def apply(caption: String)
: BenchmarkBuilder = apply().setCaption(caption)
final def apply(caption: String,
module0: ModuleBuilder)
: BenchmarkBuilder = apply(caption) += module0
final def apply(caption: String,
module0: ModuleBuilder,
modules: ModuleBuilder*)
: BenchmarkBuilder = apply(caption, module0) ++= modules
final def apply(caption: String,
modules: TraversableOnce[ModuleBuilder])
: BenchmarkBuilder = apply(caption) ++= modules
final def apply(caption: String,
modules: Array[ModuleBuilder])
: BenchmarkBuilder = apply(caption) ++= modules
}
final case class BenchmarkState(override val parent: InstanceState,
children: Seq[InstanceState])
extends ModuleState {
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/modules/Benchmark.scala | Scala | apache-2.0 | 7,407 |
package scife.enumeration.testcases
import org.scalatest._
import org.scalatest.prop._
import org.scalatest.matchers._
import scife.enumeration._
import dependent._
import scife.{ enumeration => e }
import memoization._
import scife.util._
import structures.BSTrees._
import org.scalatest._
import org.scalameter.api._
import scala.language.postfixOps
import scala.language.existentials
class ClassInterfaceDAGTest extends FunSuite with Matchers with GeneratorDrivenPropertyChecks with
HasLogger with ProfileLogger {
import Checks._
import structures._
import BSTrees._
import Util._
import Common._
// (size, Id, #class, #interface, #overridableMethods, map(node->sealed))
type Input = (Int, Int, Set[Int], Set[Int], List[Int], Predef.Map[Int, Set[Int]])
// list of (extends - -1 for trait, implementing, overrides, seals)
type Output = List[(Int, List[Int], List[Int], List[Int])]
type EnumType = Depend[Input, Output]
val defMap = Predef.Map( -1 -> Set[Int](), 0 -> Set[Int]() )
test("checkGraph") {
val l1 =
List(
( (-1, List(), List(1, 2), List(2)) ),
( (0, List(1), List(1), List(1) )) ,
( (-1, List(1), List(), List() )) ,
( (-1, List(3), List(), List() ) )
)
// println( toGraph( (4, , 0, List(1, 2), defMap), l1) )
}
def toGraph(i: Input, o: Output) = {
import scife.enumeration.testcases.classinterfacedag._
val (size, myId, classes, interfaces, overridableMethods, sealedMap) = i
val graph = new DAG
graph.setSize(size)
graph.setMethodsNum(overridableMethods.size)
val parentMap = scala.collection.mutable.Map[Int, Array[Boolean]]()
for (i <- 1 to size)
parentMap(i) = new Array(size)
val nodes =
for ((myNode, ind_) <- o.zipWithIndex) yield {
val ind = ind_ + 1
val (ext, implementing, overrides, seals) = myNode
val dagNode = new DAGNode
dagNode.numChildren = size
dagNode.isClass = ext >= 0
dagNode.allBools = Array.fill(size + overridableMethods.size * 2)(false)
for (overrid <- overrides)
dagNode.allBools(size + overrid -1 ) = true
for (fin <- seals)
dagNode.allBools(size + overridableMethods.size + fin - 1) = true
if (ext > 0) parentMap(ext)(ind-1) = true
for (impl <- implementing) {
parentMap(impl)(ind-1) = true
}
dagNode
}
for ((node, nid) <- nodes.zipWithIndex) {
for (cid <- 0 until size)
node.allBools(cid) = parentMap(nid + 1)(cid)
}
import scala.collection.JavaConversions._
graph.setNodes(nodes)
graph
}
test("enumeration") {
val checkerHelper = new CheckerHelper[Output]
import checkerHelper._
def rangeList(m: Int) = m to 0 by -1 toArray
val enum = constructEnumerator
// can take a long time for >= 4
for (c <- 1 to 3) {
val input = (c, 1, Set[Int](), Set[Int](), 1 to 2 toList, defMap)
res = enum.getEnum(input)
info("size for (%d,2) id %d".format(c, res.size))
}
// (size, id, #class, #interface, #overridableMethods, sealedMap)
res = enum.getEnum((1, 1, Set(), Set(), Nil, defMap))
res shouldBe a[Map[_, _]]
res.size should be(2)
res = enum.getEnum((1, 1, Set(1), Set(), Nil, defMap + (1 -> Set())))
res shouldBe a[Map[_, _]]
res.size should be(3)
res = enum.getEnum((1, 1, Set(), Set(1), Nil, defMap + (1 -> Set())))
res shouldBe a[Map[_, _]]
res.size should be(4)
res = enum.getEnum((1, 1, Set(), Set(), List(1), defMap))
res.size should be(6)
res = enum.getEnum((1, 1, Set(1), Set(), List(1), defMap + (1 -> Set())))
res.size should be(9)
res = enum.getEnum((1, 1, Set(), Set(1), List(1), defMap + (1 -> Set(), 2 -> Set[Int]())))
res.size should be(12)
res = enum.getEnum((1, 1, Set(1), Set(2), List(1), defMap + (1 -> Set(), 2 -> Set[Int]())))
res.size should be(18)
res = enum.getEnum((2, 1, Set(), Set(), List(1), defMap))
res.size should be(57)
res = enum.getEnum((1, 1, Set(), Set(), List(1, 2), defMap))
res.size should be(18)
res = enum.getEnum((2, 1, Set(), Set(), List(1, 2), defMap))
res.size should be(471)
for (c <- 1 to 3; m <- 0 to 2) {
val input = (c, 1, Set[Int](), Set[Int](), 1 to m toList, defMap)
res = enum.getEnum(input)
res.distinct.size should be(res.size)
for (el <- res; g = toGraph(input, el))
withClue(el + "\\nGraph:\\n" + g) {
g.repOK() should be(true)
}
}
{
val input = (3, 1, Set[Int](), Set[Int](), List(1), defMap)
res = enum.getEnum( input )
info(res.map( toGraph(input, _) ).toList.mkString("\\n"))
res.size should be(862)
}
for (c <- 1 to 3; m <- 0 to 2) {
val input = (c, 1, Set[Int](), Set[Int](), 1 to m toList, defMap)
res = enum.getEnum(input)
val message = "(c = %d, m = %d)".format(c, m)
withClue(message) {
info(message + res.size)
}
}
}
test("subListChooser") {
val checkerHelper = new CheckerHelper[List[Int]]
import checkerHelper._
def rangeList(m: Int) = m to 0 by -1 toArray
val enum = constructEnumerator
withLazyClue("Elements are: " + clue) {
for(s <- 1 to 5; m <- 1 to s) {
addMessage = "m=%d and s=%d".format(m, s)
res = subListChooser.getEnum( (m, 1 to s toList) )
val listCombinations: List[List[Int]] =
((1 to s toList) combinations m) toList
res.size should be (listCombinations.size)
elements should contain theSameElementsAs (listCombinations)
}
}
}
val subListChooser: DependFinite[(Int, List[Int]), List[Int]] = Depend.memoizedFin(
(self: DependFinite[(Int, List[Int]), List[Int]], pair: (Int, List[Int])) => {
val (size, range) = pair
if (size <= 0) e.Singleton(Nil): Finite[List[Int]]
else if (size == 1) e.Enum(range map {List(_)}): Finite[List[Int]]
else if (size <= range.size) {
val temp = self.getEnum( (size - 1, range.tail) )
val kept = Map( temp , { range.head :: (_: List[Int]) })
val leftOut = self.getEnum( (size, range.tail) )
val allNodes = e.Concat(kept, leftOut)
allNodes: Finite[List[Int]]
} else e.Empty: Finite[List[Int]]
})
// given a list, pick n methods to override
def overrid(implicit overridableMethods: List[Int]): DependFinite[Int, List[Int]] =
Depend.fin( (nMethods: Int) => subListChooser( (nMethods, overridableMethods) ): Finite[List[Int]] )
// given n and list, pick n methods to seal
def seal: DependFinite[(Int, List[Int]), List[Int]] = Depend.fin( ( p: (Int, List[Int]) ) => {
val (nMethods, overrides) = p
subListChooser( (nMethods, overrides) ): Finite[List[Int]]
} )
// pick nMethods to override and seal
def overrideAndSeal(implicit overridableMethods: List[Int]): DependFinite[Int, ( (Int, List[Int]), List[Int] )] =
Depend.fin( (nMethods: Int) =>
e.dependent.Chain[(Int, List[Int]), List[Int]](
e.Product(e.Enum( 0 to nMethods), overrid(overridableMethods)(nMethods)): Finite[(Int, List[Int])], // to seal ** overrides
seal: DependFinite[(Int, List[Int]), List[Int]]
)
)
// pick a combination of override and seal
def allOverrideAndSeal(allMethods: List[Int], sealedMap: Predef.Map[Int, Set[Int]]) =
// ( #overrides, ((#seals, overrides), seals) )
Depend.fin( (p: (List[Int], Int)) => {
val (implements, extend) = p
val overridableMethods: List[Int] = allMethods.diff(
((extend :: implements).flatMap { sealedMap(_) }): List[Int] )
e.dependent.Chain(
e.Enum( 0 to overridableMethods.size ): Finite[Int],
overrideAndSeal(overridableMethods): DependFinite[Int, ((Int, List[Int]), List[Int])]
)
}
)
test("override and seals") {
e.Enum( 0 to 1 ).size should be (2)
for(s <- 1 to 5) {
val list = 1 to s toList;
withClue( "list is: " + list ) {
for (i1 <- 0 to s; comb1 <- list.combinations(i1))
// overrid(list)(i1).toList should contain ( (i1, comb1) )
overrid(list)(i1).toList should contain ( comb1 )
val map = Predef.Map(1 -> Set[Int]())
val res = allOverrideAndSeal(list, map)(Nil, 1)
val resList: List[(Int, ((Int, List[Int]), List[Int]))] = res.toList
for (i1 <- 0 to s; comb1 <- list.combinations(i1);
i2 <- 0 to i1; comb2 <- comb1.combinations(i2)) {
resList should contain ( (i1, ((i2, comb1), comb2)): (Int, ((Int, List[Int]), List[Int])) )
}
}
withClue( "list is: " + list ) {
for (c1 <- 0 until list.size; forbidden <- list.combinations(c1)) {
val map = Predef.Map(1 -> Set[Int](forbidden: _*))
val res = allOverrideAndSeal(list, map)(List(1), 1)
val resList: List[(Int, ((Int, List[Int]), List[Int]))] = res.toList
for (i1 <- 0 to s; comb1 <- list.diff(forbidden).combinations(i1);
i2 <- 0 to i1; comb2 <- comb1.combinations(i2)) {
resList should contain ( (i1, ((i2, comb1), comb2)): (Int, ((Int, List[Int]), List[Int])) )
}
}
}
}
}
// pick which to implement
def implements_(implicit interfaces: Set[Int])=
Map(e.dependent.Chain(
// sizes
Map(e.Enum(0 to interfaces.size): Finite[Int], { (_: Int, interfaces.toList) }),
subListChooser
), { (_: (_, List[Int]))._2 })
// pick which to extend
def extends_(implicit classes: Set[Int]) =
// -1 interface, 0 class that does not extend anything, 1 to #classes which to extend
e.Enum((classes + (-1) + (0)).toArray): Finite[Int]
test("extends and implements") {
for(c <- 0 to 10; i <- 0 to (10 - c)) {
withClue( "(c, i):" + (c, i) ) {
extends_(-1 to c toSet).size should be (c + 2)
implements_(1 to i toSet).size should be (math.pow(2, i).toInt)
}
}
extends_(Set()).size should be (2)
implements_(Set()).size should be (1)
e.Product( implements_(Set()), extends_(Set()) ).size should be (2)
}
def makeAll(size: Int, classes: Set[Int], interfaces: Set[Int],
overridableMethods: List[Int], map: Predef.Map[Int, Set[Int]]):
Finite[((List[Int], Int), (Int, ((Int, List[Int]), List[Int])))] =
e.dependent.Chain(
e.Product( implements_(interfaces), extends_(classes) ): Finite[(List[Int], Int)],
allOverrideAndSeal(overridableMethods, map)
)
def makeList( p: ((List[Int], Int), (Int, ((Int, List[Int]), List[Int]))) ) = {
val ( (impl, ext), (_, ((_, overriden), sealed_) ) ) = p
(ext, impl, overriden, sealed_) :: Nil
}
def constructEnumerator(implicit ms: MemoizationScope = null) = {
Depend.memoized(
(self: EnumType, par: Input) => {
// list sorted descendingly
implicit val (size, myId, classes, interfaces, overridableMethods, sealedMap) = par
// if (size <= 0) e.Singleton(Nil): Finite[Output]
// else
if (size == 1) {
Map( makeAll(size, classes, interfaces, overridableMethods, sealedMap), makeList ): Finite[Output]
}
else {
val rest: Depend[((List[Int], Int), (Int, ((Int, List[Int]), List[Int]))), Output] =
InMap(self, { (par: ((List[Int], Int), (Int, ((Int, List[Int]), List[Int])))) =>
val lastAdded = par
val ((impl, ext), (_, ((_, overriden), sealed_) )) = lastAdded
val newClasses = if (ext >= 0) classes + myId else classes
val newInterfaces = if (ext < 0) interfaces + myId else interfaces
// val newMethods = overridableMethods.diff(sealed_)
// collect all sealed from parents
val allParents =
if (ext > 0) ext :: impl else impl
val parentsSealed =
( Set[Int]() /: allParents ) { case (res, parent) => res union sealedMap(parent) }
val newMap = sealedMap + ( myId -> (parentsSealed union sealed_.toSet) )
(size - 1, myId + 1, newClasses, newInterfaces, overridableMethods, newMap)
})
e.dependent.Chain[((List[Int], Int), (Int, ((Int, List[Int]), List[Int]))), Output, Output] (
makeAll(size, classes, interfaces, overridableMethods, sealedMap): Enum[((List[Int], Int), (Int, ((Int, List[Int]), List[Int])))],
rest: Depend[((List[Int], Int), (Int, ((Int, List[Int]), List[Int]))), Output],
(r: ((List[Int], Int), (Int, ((Int, List[Int]), List[Int]))), o: Output ) => { makeList(r) ::: o }
): Finite[Output]
}
})
}
}
| kaptoxic/SciFe | src/test/scala/scife/enumeration/testcases/ClassInterfaceDAGTest.scala | Scala | gpl-2.0 | 12,673 |
package com.kakashi.simpleservices
import spray.httpx.Json4sJacksonSupport
import org.json4s._
import java.util.UUID
object Json4sSupport extends Json4sJacksonSupport {
implicit def json4sJacksonFormats: Formats = jackson.Serialization.formats(NoTypeHints) + new UUIDFormat
//so you don't need to import
//jackson everywhere
val jsonMethods = org.json4s.jackson.JsonMethods
class UUIDFormat extends Serializer[UUID] {
val UUIDClass = classOf[UUID]
def deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), UUID] = {
case (TypeInfo(UUIDClass, _), JString(x)) => UUID.fromString(x)
}
def serialize(implicit format: Formats): PartialFunction[Any, JValue] = {
case x: UUID => JString(x.toString)
}
}
def toJValue[T](value: T): JValue = {
Extraction.decompose(value)
}
}
| freeservices/simpleservices | src/main/scala/com/kakashi/simpleservices/Json4sSupport.scala | Scala | mit | 848 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
class WordSpecMixedInMatchersSpec extends WordSpec with Matchers {
"This spec" should {
"work OK" in {
"hello" should startWith ("he")
"hello" should endWith ("lo")
"hello" should include ("el")
"hello" should startWith regex ("h*")
"hello" should endWith regex (".*o")
"hello" should include regex ("l*")
}
"still work OK" in {
"dude" should not startWith ("he")
"dude" should not endWith ("lo")
"dude" should not include ("el")
"dude" should not startWith regex ("h*l")
"dude" should not endWith regex ("e*o")
"dude" should not include regex ("e*l")
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/WordSpecMixedInMatchersSpec.scala | Scala | apache-2.0 | 1,275 |
package scorex.wallet
import java.io.File
import com.google.common.primitives.{Bytes, Ints}
import org.h2.mvstore.{MVMap, MVStore}
import scorex.account.PrivateKeyAccount
import scorex.crypto.encode.Base58
import scorex.crypto.hash.SecureCryptographicHash
import scorex.utils.{LogMVMapBuilder, ScorexLogging, randomBytes}
import scala.collection.JavaConverters._
import scala.collection.concurrent.TrieMap
//todo: add accs txs?
class Wallet(walletFileOpt: Option[File], password: String, seedOpt: Option[Array[Byte]]) extends ScorexLogging {
private val NonceFieldName = "nonce"
private val database: MVStore = walletFileOpt match {
case Some(walletFile) =>
//create parent folders then check their existence
walletFile.getParentFile.mkdirs().ensuring(walletFile.getParentFile.exists())
new MVStore.Builder().fileName(walletFile.getAbsolutePath).encryptionKey(password.toCharArray).compress().open()
case None => new MVStore.Builder().open()
}
private val accountsPersistence: MVMap[Int, Array[Byte]] = database.openMap("privkeys", new LogMVMapBuilder[Int, Array[Byte]])
private val seedPersistence: MVMap[String, Array[Byte]] = database.openMap("seed", new LogMVMapBuilder[String, Array[Byte]])
private val noncePersistence: MVMap[String, Int] = database.openMap("nonce", new LogMVMapBuilder[String, Int])
if (Option(seedPersistence.get("seed")).isEmpty) {
val seed = seedOpt.getOrElse {
val Attempts = 10
val SeedSize = 64
lazy val randomSeed = randomBytes(SeedSize)
lazy val encodedSeed = Base58.encode(randomSeed)
println(s"You random generated seed is $encodedSeed")
randomSeed
}
seedPersistence.put("seed", seed)
}
val seed: Array[Byte] = seedPersistence.get("seed")
private val accountsCache: TrieMap[String, PrivateKeyAccount] = {
val accounts = accountsPersistence.asScala.keys.map(k => accountsPersistence.get(k)).map(seed => new PrivateKeyAccount(seed))
TrieMap(accounts.map(acc => acc.address -> acc).toSeq: _*)
}
def privateKeyAccounts(): List[PrivateKeyAccount] = accountsCache.values.toList
def generateNewAccounts(howMany: Int): Seq[PrivateKeyAccount] =
(1 to howMany).flatMap(_ => generateNewAccount())
def generateNewAccount(): Option[PrivateKeyAccount] = synchronized {
val nonce = getAndIncrementNonce()
val account = Wallet.generateNewAccount(seed, nonce)
val address = account.address
val created = if (!accountsCache.contains(address)) {
accountsCache += account.address -> account
accountsPersistence.put(accountsPersistence.lastKey() + 1, account.seed)
database.commit()
true
} else false
if (created) {
log.info("Added account #" + privateKeyAccounts().size)
Some(account)
} else None
}
def deleteAccount(account: PrivateKeyAccount): Boolean = synchronized {
val res = accountsPersistence.asScala.keys.find { k =>
if (accountsPersistence.get(k) sameElements account.seed) {
accountsPersistence.remove(k)
true
} else false
}
database.commit()
accountsCache -= account.address
res.isDefined
}
def exportAccountSeed(address: String): Option[Array[Byte]] = privateKeyAccount(address).map(_.seed)
def privateKeyAccount(address: String): Option[PrivateKeyAccount] = accountsCache.get(address)
def close(): Unit = if (!database.isClosed) {
database.commit()
database.close()
accountsCache.clear()
}
def exists(): Boolean = walletFileOpt.forall(_.exists())
def nonce(): Int = Option(noncePersistence.get(NonceFieldName)).getOrElse(0)
private def getAndIncrementNonce(): Int = synchronized {
noncePersistence.put(NonceFieldName, nonce() + 1)
}
}
object Wallet {
def generateNewAccount(seed: Array[Byte], nonce: Int): PrivateKeyAccount = {
val accountSeed = generateAccountSeed(seed, nonce)
new PrivateKeyAccount(accountSeed)
}
def generateAccountSeed(seed: Array[Byte], nonce: Int): Array[Byte] =
SecureCryptographicHash(Bytes.concat(Ints.toByteArray(nonce), seed))
}
| B83YPoj/Waves | src/main/scala/scorex/wallet/Wallet.scala | Scala | apache-2.0 | 4,093 |
// Wei Chen - Regression
// 2018-09-12
package com.scalaml.algorithm
trait Regression extends Algorithm {
val algotype: String = "Regression"
def train(data: Array[(Double, Array[Double])]): Boolean
def predict(data: Array[Array[Double]]): Array[Double]
} | Wei-1/Scala-Machine-Learning | src/main/scala/algorithm/regression/Regression.scala | Scala | mit | 269 |
package tech.sda.arcana.spark.classification.cnn
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl._
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import com.intel.analytics.bigdl.utils.Engine
/** A class to build or initialize SparkContext depending on BigDl configurations (instead of SparkConf)
* @constructor create a new initializer with a maxFailures and master
* @param maxFailures the number of maximum failures allowed
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
*/
class SparkBigDlInitializer(maxFailures: String = "1", master: String = "local[1]") {
val this.maxFailures:String=maxFailures
val this.master:String=master
/** Initialize SparkContext depending on BigDl predefined configurations
* @param model the neural network application name (purpose)
* @return SparkContext object, which tells Spark how to access
* a cluster
*/
def initialize(model:String):SparkContext={
//initiate spark using the engine
val conf = Engine.createSparkConf()
.setAppName(model)
.set("spark.task.maxFailures", maxFailures)
//.setMaster(master)
val sc = new SparkContext(conf)
Engine.init
return sc
}
} | SmartDataAnalytics/ARCANA | src/main/scala/tech/sda/arcana/spark/classification/cnn/SparkBigDlInitializer.scala | Scala | gpl-3.0 | 1,283 |
/**
* 打印XHTML文件中所有图像的名称,即打印所有位于img元素内的src属性值
*/
import scala.xml.XML
var html = "<html><head><title>第一个网页</title></head><body><p><img alt='a'><img src='1'></img></p></body></html>"
val images = (html \\\\ "img").flatMap(_.attributes("src"))
val images1 = html match{
case n @ <img/> => Some(n.attributes("src"))
}
println(images.mkString("\\n"));
println(images1.mkString("\\n"));
| vernonzheng/scala-for-the-Impatient | src/Chapter16/exercise05.scala | Scala | mit | 447 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.store.paxos
import com.treode.async.Async
import com.treode.async.stubs.StubScheduler
import com.treode.async.stubs.implicits._
import com.treode.cluster.{EphemeralPort, Peer, PortId}
import com.treode.cluster.stubs.{MessageCaptor, StubNetwork, StubPeer}
import com.treode.store.{Bytes, StoreTestTools, TxClock}
import org.scalatest.FreeSpec
import StoreTestTools._
class AcceptorSpec extends FreeSpec {
private implicit class RicStubPeer (peer: StubPeer) {
def ask (key: Long, ballot: Long, default: Int, from: MessageCaptor): Unit =
from.send (Acceptor.ask, peer) (1, key, ballot, default)
}
private case class Grant (key: Long, ballot: Long, proposed: Option [(Long, Int)])
private implicit class RichMessageCaptor (c: MessageCaptor) {
def expectGrant (peer: StubPeer) (implicit s: StubScheduler): Grant = {
val ((key, b1, proposed), from) = c.expect (Proposer.grant)
assertResult (peer.localId) (from.id)
proposed match {
case Some ((b2, value)) => Grant (key.long, b1, Some (b2.number, value.int))
case None => Grant (key.long, b1, None)
}}}
val k1 = 0xB91DBC0E0EE50880L
val v1 = 0xCCEA074C
"The Acceptor should" - {
"work" in {
implicit val (random, scheduler, network) = newKit()
val captor = MessageCaptor.install()
val h = StubPaxosHost .install() .expectPass()
h.ask (k1, 1, v1, captor)
assertResult (Grant (k1, 1, None)) (captor.expectGrant (h))
}}}
| Treode/store | store/test/com/treode/store/paxos/AcceptorSpec.scala | Scala | apache-2.0 | 2,094 |
package objsets
object TweetReader {
object ParseTweets {
import scala.util.parsing.json._
def getList[T](s: String): List[T] =
JSON.parseFull(s).get.asInstanceOf[List[T]]
def getMap(s: String): Map[String, Any] =
JSON.parseFull(s).get.asInstanceOf[Map[String, Any]]
def getTweets(user: String, json: String): List[Tweet] =
for (map <- getList[Map[String, Any]](json)) yield {
val text = map("text")
val retweets = map("retweet_count")
new Tweet(user, text.toString, retweets.toString.toDouble.toInt)
};
def getTweetData(user: String, json: String): List[Tweet] = {
// is list
val l = getList[Map[String, Any]](json)
for (map <- l) yield {
val text = map("text")
val retweets = map("retweets")
new Tweet(user, text.toString, retweets.toString.toDouble.toInt)
}
}
};
def toTweetSet(l: List[Tweet]): TweetSet = {
l.foldLeft(new Empty: TweetSet)(_.incl(_))
};
def unparseToData(tws: List[Tweet]): String = {
val buf = new StringBuffer
for (tw <- tws) {
val json = "{ \\"user\\": \\"" + tw.user + "\\", \\"text\\": \\"" +
tw.text.replaceAll(""""""", "\\\\\\\\\\\\\\"") + "\\", \\"retweets\\": " +
tw.retweets + ".0 }"
buf.append(json + ",\\n")
}
buf.toString;
};
val sites = List("gizmodo", "TechCrunch", "engadget", "amazondeals", "CNET", "gadgetlab", "mashable");
private val gizmodoTweets = TweetReader.ParseTweets.getTweetData("gizmodo", TweetData.gizmodo);
private val techCrunchTweets = TweetReader.ParseTweets.getTweetData("TechCrunch", TweetData.TechCrunch);
private val engadgetTweets = TweetReader.ParseTweets.getTweetData("engadget", TweetData.engadget);
private val amazondealsTweets = TweetReader.ParseTweets.getTweetData("amazondeals", TweetData.amazondeals);
private val cnetTweets = TweetReader.ParseTweets.getTweetData("CNET", TweetData.CNET);
private val gadgetlabTweets = TweetReader.ParseTweets.getTweetData("gadgetlab", TweetData.gadgetlab);
private val mashableTweets = TweetReader.ParseTweets.getTweetData("mashable", TweetData.mashable);
private val sources = List(gizmodoTweets, techCrunchTweets, engadgetTweets, amazondealsTweets, cnetTweets, gadgetlabTweets, mashableTweets)
val tweetMap: Map[String, List[Tweet]] =
Map() ++ Seq((sites(0) -> gizmodoTweets),
(sites(1) -> techCrunchTweets),
(sites(2) -> engadgetTweets),
(sites(3) -> amazondealsTweets),
(sites(4) -> cnetTweets),
(sites(5) -> gadgetlabTweets),
(sites(6) -> mashableTweets));
val tweetSets: List[TweetSet] = sources.map(tweets => toTweetSet(tweets))
private val siteTweetSetMap: Map[String, TweetSet] =
Map() ++ (sites zip tweetSets)
private def unionOfAllTweetSets(curSets: List[TweetSet], acc: TweetSet): TweetSet = {
<<<<<<< HEAD
=======
print("Running union of all sets with " + curSets.head + "\\n")
>>>>>>> 0ec02ff5022b5e0c927b5c54ec3293516f0f829e
if (curSets.isEmpty) acc
else unionOfAllTweetSets(curSets.tail, acc.union(curSets.head))
};
val allTweets: TweetSet = unionOfAllTweetSets(tweetSets, new Empty)
};
| rranelli/RRFPPScala | @Assignments/week3/objsets/src/main/scala/objsets/TweetReader.scala | Scala | unlicense | 3,175 |
package reforest.data.tree
/**
* It represents how a node can be recognized in all the forests
* @param forestId the forest identifier of the node
* @param treeId the tree identifier in the forest of the node
* @param nodeId the node identifier in the tree of the node
*/
case class NodeId(val forestId : Int, val treeId : Int, val nodeId : Int) {
}
| alessandrolulli/reforest | src/main/scala/reforest/data/tree/NodeId.scala | Scala | apache-2.0 | 361 |
package net.sansa_stack.rdf.spark.model.df
import net.sansa_stack.rdf.spark.io.fromRow
import org.apache.jena.graph.Triple
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
/**
* Spark/DataFrame based implementation of DataFrame of triples.
*
* @author Gezim Sejdiu
* @author Lorenz Buehmann
*/
object TripleOps {
/**
* Convert a [[DataFrame]] into a RDD[Triple].
*
* @param triples DataFrame of triples (as string).
* @return a RDD of triples.
*/
def toRDD(triples: DataFrame): RDD[Triple] = {
triples.rdd.map(fromRow)
}
/**
* Convert a DataFrame of Triple into a Dataset of Triple.
*
* @param triples DataFrame of triples.
* @return a Dataset of triples.
*/
def toDS(triples: DataFrame): Dataset[Triple] = {
val spark: SparkSession = SparkSession.builder().getOrCreate()
implicit val encoder: Encoder[Triple] = Encoders.kryo[Triple]
// triples.as[Triple]
spark.createDataset[Triple](toRDD(triples))
}
/**
* Get triples.
*
* @param triples DataFrame of triples.
* @return DataFrame which contains list of the triples.
*/
def getTriples(triples: DataFrame): DataFrame =
triples
/**
* Get subjects.
*
* @param triples DataFrame of triples.
* @return DataFrame which contains list of the subjects.
*/
def getSubjects(triples: DataFrame): DataFrame =
triples.select("s")
/**
* Get predicates.
*
* @param triples DataFrame of triples.
* @return DataFrame which contains list of the predicates.
*/
def getPredicates(triples: DataFrame): DataFrame =
triples.select("p")
/**
* Get objects.
*
* @param triples DataFrame of triples.
* @return DataFrame which contains list of the objects.
*/
def getObjects(triples: DataFrame): DataFrame =
triples.select("o")
/**
* Returns an DataFrame of triples that match with the given input.
*
* @param triples DataFrame of triples
* @param subject the subject
* @param predicate the predicate
* @param object the object
* @return DataFrame of triples
*/
def find(triples: DataFrame, subject: Option[String] = None, predicate: Option[String] = None, `object`: Option[String] = None): DataFrame = {
val sql = getSQL(subject, predicate, `object`)
triples.sqlContext.sql(sql)
}
/**
* Generate the translated SQL statement from the triple pattern.
*
* @param subject the subject
* @param predicate the predicate
* @param object the object
* @return the translated SQL statement as a string
*/
def getSQL(subject: Option[String] = None, predicate: Option[String] = None, `object`: Option[String] = None): String = {
var sql = s"SELECT s, p, o FROM TRIPLES"
if (subject.isDefined || predicate.isDefined || `object`.isDefined) {
sql += " WHERE "
val conditions = scala.collection.mutable.ListBuffer[String]()
if (subject.isDefined) conditions += s"s = '${subject.get}'"
if (predicate.isDefined) conditions += s"p = '${predicate.get}'"
if (`object`.isDefined) conditions += s"o = '${`object`.get}'"
sql += conditions.mkString(" AND ")
}
sql
}
/**
* Returns an DataFrame of triples that match with the given input.
*
* @param triples DataFrame of triples
* @param triple the triple to be checked
* @return DataFrame of triples that match the given input
*/
def find(triples: DataFrame, triple: Triple): DataFrame = {
find(
triples,
if (triple.getSubject.isVariable) None else Option(triple.getSubject.getURI),
if (triple.getPredicate.isVariable) None else Option(triple.getPredicate.getURI),
if (triple.getObject.isVariable) None else {
Option(if (triple.getObject.isLiteral) {
triple.getObject.getLiteralLexicalForm
} else triple.getObject.getURI)
})
}
/**
* Return the union all of RDF graphs.
*
* @param triples DataFrame of RDF graph
* @param others sequence of DataFrames of other RDF graph
* @return graph (union of all)
*/
def unionAll(triples: DataFrame, others: Seq[DataFrame]): DataFrame = {
val df: Option[DataFrame] = others match {
case g :: Nil => Some(g.toDF())
case g :: _ =>
Some(
g.toDF()
.sqlContext
.createDataFrame(
g.toDF().sqlContext.sparkContext.union(others.map(_.toDF().rdd)),
g.toDF().schema))
case _ => None
}
df.get
}
/**
* Determine whether this RDF graph contains any triples
* with a given (subject, predicate, object) pattern.
*
* @param triples DataFrame of triples
* @param subject the subject (None for any)
* @param predicate the predicate (None for any)
* @param object the object (None for any)
* @return true if there exists within this RDF graph
* a triple with (S, P, O) pattern, false otherwise
*/
def contains(triples: DataFrame, subject: Option[String] = None, predicate: Option[String] = None, `object`: Option[String] = None): Boolean = {
!find(triples, subject, predicate, `object`).isEmpty
}
/**
* Determine if a triple is present in this RDF graph.
*
* @param triples DataFrame of triples
* @param triple the triple to be checked
* @return true if the statement s is in this RDF graph, false otherwise
*/
def contains(triples: DataFrame, triple: Triple): Boolean = {
!find(triples, triple).isEmpty
}
/**
* Determine if any of the triples in an RDF graph are also contained in this RDF graph.
*
* @param triples DataFrame of triples
* @param other the other RDF graph containing the statements to be tested
* @return true if any of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAny(triples: DataFrame, other: DataFrame): Boolean = {
!triples.except(other).isEmpty // .exceptAll()?
}
/**
* Determine if all of the statements in an RDF graph are also contained in this RDF graph.
*
* @param triples DataFrame of triples
* @param other the other RDF graph containing the statements to be tested
* @return true if all of the statements in RDF graph are also contained
* in this RDF graph and false otherwise.
*/
def containsAll(triples: DataFrame, other: DataFrame): Boolean = {
triples.except(other).isEmpty // .exceptAll()?
}
/**
* Add a statement to the current RDF graph.
*
* @param triples DataFrame of RDF graph
* @param triple the triple to be added.
* @return new DataFrame of triples containing this statement.
*/
def add(triples: DataFrame, triple: Triple): DataFrame = {
import net.sansa_stack.rdf.spark.model._
val statement = triples.sparkSession.sparkContext.parallelize(Seq(triple))
triples.union(statement.toDF())
}
/**
* Add a list of statements to the current RDF graph.
*
* @param triples DataFrame of RDF graph
* @param triple the list of triples to be added.
* @return new DataFrame of triples containing this list of statements.
*/
def addAll(triples: DataFrame, triple: Seq[Triple]): DataFrame = {
import net.sansa_stack.rdf.spark.model._
val statements = triples.sparkSession.sparkContext.parallelize(triple)
triples.union(statements.toDF())
}
/**
* Removes a statement from the current RDF graph.
* The statement with the same subject, predicate and
* object as that supplied will be removed from the model.
*
* @param triples DataFrame of RDF graph
* @param triple the statement to be removed.
* @return new DataFrame of triples without this statement.
*/
def remove(triples: DataFrame, triple: Triple): DataFrame = {
import net.sansa_stack.rdf.spark.model._
val statement = triples.sparkSession.sparkContext.parallelize(Seq(triple))
triples.except(statement.toDF())
}
/**
* Removes all the statements from the current RDF graph.
* The statements with the same subject, predicate and
* object as those supplied will be removed from the model.
*
* @param triples DataFrame of RDF graph
* @param triple the list of statements to be removed.
* @return new DataFrame of triples without these statements.
*/
def removeAll(triples: DataFrame, triple: Seq[Triple]): DataFrame = {
import net.sansa_stack.rdf.spark.model._
val statements = triples.sparkSession.sparkContext.parallelize(triple)
triples.except(statements.toDF())
}
/**
* Write N-Triples from a given DataFrame of triples
*
* @param triples DataFrame of RDF graph
* @param path path to the file containing N-Triples
*/
def saveAsNTriplesFile(triples: DataFrame, path: String): Unit = {
net.sansa_stack.rdf.spark.model.ds.TripleOps.saveAsNTriplesFile(toDS(triples), path)
}
}
| SANSA-Stack/SANSA-RDF | sansa-rdf/sansa-rdf-spark/src/main/scala/net/sansa_stack/rdf/spark/model/df/TripleOps.scala | Scala | apache-2.0 | 8,816 |
/* Slf4jSupportSpec.scala
*
* Copyright (c) 2013-2014 linkedin.com
* Copyright (c) 2013-2014 zman.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atmos.dsl
import scala.concurrent.duration._
import org.slf4j.Logger
import atmos.monitor._
import Slf4jSupport._
import org.scalatest._
import org.scalamock.scalatest.MockFactory
/**
* Test suite for [[atmos.dsl.Slf4jSupport]].
*/
class Slf4jSupportSpec extends FlatSpec with Matchers with MockFactory {
"Slf4jSupport" should "support viewing Slf4j-logging-compatible objects as event monitors" in {
val logger = mock[Logger]
val monitor = logger: LogEventsWithSlf4j
monitor shouldBe LogEventsWithSlf4j(logger)
(monitor: LogEventsWithSlf4jExtensions) shouldBe LogEventsWithSlf4jExtensions(monitor)
(logger: LogEventsWithSlf4jExtensions) shouldBe LogEventsWithSlf4jExtensions(LogEventsWithSlf4j(logger))
}
it should "return Slf4j log levels in response to generic level queries" in {
import LogEventsWithSlf4j.Slf4jLevel
Slf4jEventLogLevels.errorLevel shouldBe LogEventsWithSlf4j.Slf4jLevel.Error
Slf4jEventLogLevels.warningLevel shouldBe LogEventsWithSlf4j.Slf4jLevel.Warn
Slf4jEventLogLevels.infoLevel shouldBe LogEventsWithSlf4j.Slf4jLevel.Info
Slf4jEventLogLevels.debugLevel shouldBe LogEventsWithSlf4j.Slf4jLevel.Debug
}
} | zmanio/atmos | src/test/scala/atmos/dsl/Slf4jSupportSpec.scala | Scala | apache-2.0 | 1,857 |
package com.krux.hyperion.activity
import com.krux.hyperion.HyperionContext
import com.krux.hyperion.adt.{HBoolean, HS3Uri, HString}
import com.krux.hyperion.common.{BaseFields, PipelineObjectId}
import com.krux.hyperion.expression.RunnableObject
import com.krux.hyperion.resource.{Ec2Resource, Resource}
/**
* The server-side PGP decryption activity decrypts files from the input location to the output location using the
* private decryption key.
*
* @param baseFields the pipeline base fields
* @param activityFields the activity setup fields
* @param shellCommandActivityFields the shell command setup fields
* @param key the file containing the private decryption key
* @param markSuccessfulJobs add a _SUCCESS file to the output location on success
*/
case class PgpDecryptActivity private(
baseFields: BaseFields,
activityFields: ActivityFields[Ec2Resource],
shellCommandActivityFields: ShellCommandActivityFields,
key: HS3Uri,
markSuccessfulJobs: HBoolean
) extends PgpActivity {
type Self = PgpDecryptActivity
def updateBaseFields(fields: BaseFields) = copy(baseFields = fields)
def updateActivityFields(fields: ActivityFields[Ec2Resource]) = copy(activityFields = fields)
def updateShellCommandActivityFields(fields: ShellCommandActivityFields) = copy(shellCommandActivityFields = fields)
def markOnSuccess = copy(markSuccessfulJobs = true)
override def scriptArguments = Seq(
if (markSuccessfulJobs) Option("--mark-successful-jobs") else None,
Option(key.serialize)
).flatten
}
object PgpDecryptActivity
extends RunnableObject {
def apply(key: HS3Uri)(runsOn: Resource[Ec2Resource])(implicit hc: HyperionContext): PgpDecryptActivity =
new PgpDecryptActivity(
baseFields = BaseFields(PipelineObjectId(PgpDecryptActivity.getClass)),
activityFields = ActivityFields(runsOn),
shellCommandActivityFields = ShellCommandActivityFields(PgpActivity.decryptScript),
key = key,
markSuccessfulJobs = HBoolean.False
)
}
| sethyates/hyperion | contrib/activity/definition/src/main/scala/com/krux/hyperion/activity/PgpDecryptActivity.scala | Scala | apache-2.0 | 2,018 |
package flowlib
class RingBuffer[T](backlog: Int) {
private def maxBacklog = backlog * 2
private case class State( buffer: Vector[T], seen: Long)
private val state = Transactor(State(Vector.empty, 0l))
def offer(t: T): Unit =
state.transact {
case State(buffer, seen) =>
val r =
if(buffer.length >= maxBacklog) buffer drop buffer.length-backlog-1
else buffer
State( r :+ t, seen + 1)
} { _ => }
def take( offset: Long )( k: (Long, T) => Unit): Unit =
state.transact {
case s @ State(_, seen) if offset < seen => s
} {
case State( buffer, seen ) =>
val ix = (offset - seen + buffer.length max 0l).toInt
k(ix - buffer.length + seen, buffer(ix))
}
}
| arnolddevos/FlowLib | src/main/scala/flowlib/RingBuffer.scala | Scala | lgpl-2.1 | 750 |
Subsets and Splits