code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt
package internal
import java.io.File
import java.nio.file.Path
import org.apache.ivy.core.module.descriptor.{ DefaultArtifact, Artifact => IArtifact }
import org.apache.ivy.core.report.DownloadStatus
import org.apache.ivy.core.resolve.DownloadOptions
import org.apache.ivy.plugins.resolver.DependencyResolver
import sbt.Defaults.prefix
import sbt.Keys._
import sbt.Project._
import sbt.ScopeFilter.Make._
import sbt.SlashSyntax0._
import sbt.coursierint.LMCoursier
import sbt.internal.inc.{ HashUtil, JarUtils }
import sbt.internal.librarymanagement._
import sbt.internal.remotecache._
import sbt.io.IO
import sbt.io.syntax._
import sbt.librarymanagement._
import sbt.librarymanagement.ivy.{ Credentials, IvyPaths, UpdateOptions }
import sbt.librarymanagement.syntax._
import sbt.nio.FileStamp
import sbt.nio.Keys.{ inputFileStamps, outputFileStamps }
import sbt.std.TaskExtra._
import sbt.util.InterfaceUtil.toOption
import sbt.util.Logger
import scala.annotation.nowarn
object RemoteCache {
final val cachedCompileClassifier = "cached-compile"
final val cachedTestClassifier = "cached-test"
final val commitLength = 10
def gitCommitId: String =
scala.sys.process.Process("git rev-parse HEAD").!!.trim.take(commitLength)
def gitCommitIds(n: Int): List[String] =
scala.sys.process
.Process("git log -n " + n.toString + " --format=%H")
.!!
.linesIterator
.toList
.map(_.take(commitLength))
lazy val defaultCacheLocation: File = SysProp.globalLocalCache
lazy val globalSettings: Seq[Def.Setting[_]] = Seq(
remoteCacheId := "",
remoteCacheIdCandidates := Nil,
pushRemoteCacheTo :== None,
localCacheDirectory :== defaultCacheLocation,
pushRemoteCache / ivyPaths := {
val app = appConfiguration.value
val base = app.baseDirectory.getCanonicalFile
// base is used only to resolve relative paths, which should never happen
IvyPaths(base, localCacheDirectory.value)
},
)
lazy val projectSettings: Seq[Def.Setting[_]] = (Seq(
pushRemoteCache := (Def.taskDyn {
val arts = (pushRemoteCacheConfiguration / remoteCacheArtifacts).value
val configs = arts flatMap { art =>
art.packaged.scopedKey.scope match {
case Scope(_, Select(c), _, _) => Some(c)
case _ => None
}
}
val filter = ScopeFilter(configurations = inConfigurationsByKeys(configs: _*))
Def.task {
val _ = pushRemoteCache.all(filter).value
()
}
}).value,
pullRemoteCache := (Def.taskDyn {
val arts = (pushRemoteCacheConfiguration / remoteCacheArtifacts).value
val configs = arts flatMap { art =>
art.packaged.scopedKey.scope match {
case Scope(_, Select(c), _, _) => Some(c)
case _ => None
}
}
val filter = ScopeFilter(configurations = inConfigurationsByKeys(configs: _*))
Def.task {
val _ = pullRemoteCache.all(filter).value
()
}
}).value,
pushRemoteCacheConfiguration / remoteCacheArtifacts := {
enabledOnly(remoteCacheArtifact.toSettingKey, defaultArtifactTasks).apply(_.join).value
},
pushRemoteCacheConfiguration / publishMavenStyle := true,
Compile / packageCache / pushRemoteCacheArtifact := true,
Test / packageCache / pushRemoteCacheArtifact := true,
Compile / packageCache / artifact := Artifact(moduleName.value, cachedCompileClassifier),
Test / packageCache / artifact := Artifact(moduleName.value, cachedTestClassifier),
remoteCachePom / pushRemoteCacheArtifact := true,
remoteCachePom := {
val s = streams.value
val config = (remoteCachePom / makePomConfiguration).value
val publisher = Keys.publisher.value
publisher.makePomFile((pushRemoteCache / ivyModule).value, config, s.log)
config.file.get
},
remoteCachePom / artifactPath := {
Defaults.prefixArtifactPathSetting(makePom / artifact, "remote-cache").value
},
remoteCachePom / makePomConfiguration := {
val config = makePomConfiguration.value
config.withFile((remoteCachePom / artifactPath).value)
},
remoteCachePom / remoteCacheArtifact := {
PomRemoteCacheArtifact((makePom / artifact).value, remoteCachePom)
},
remoteCacheResolvers := pushRemoteCacheTo.value.toVector,
) ++ inTask(pushRemoteCache)(
Seq(
ivyPaths := (Scope.Global / pushRemoteCache / ivyPaths).value,
ivyConfiguration := {
val config0 = Classpaths.mkIvyConfiguration.value
config0
.withResolvers(remoteCacheResolvers.value.toVector)
.withOtherResolvers(pushRemoteCacheTo.value.toVector)
.withResolutionCacheDir(crossTarget.value / "alt-resolution")
.withPaths(ivyPaths.value)
.withUpdateOptions(UpdateOptions().withGigahorse(true))
},
ivySbt := {
Credentials.register(credentials.value, streams.value.log)
val config0 = ivyConfiguration.value
new IvySbt(config0, sbt.internal.CustomHttp.okhttpClient.value)
},
)
) ++ inTask(pullRemoteCache)(
Seq(
dependencyResolution := Defaults.dependencyResolutionTask.value,
csrConfiguration := {
val rs = pushRemoteCacheTo.value.toVector ++ remoteCacheResolvers.value.toVector
LMCoursier.scalaCompilerBridgeConfigurationTask.value
.withResolvers(rs)
}
)
) ++ inConfig(Compile)(configCacheSettings(compileArtifact(Compile, cachedCompileClassifier)))
++ inConfig(Test)(configCacheSettings(testArtifact(Test, cachedTestClassifier))))
def getResourceFilePaths() = Def.task {
val syncDir = crossTarget.value / (prefix(configuration.value.name) + "sync")
val file = syncDir / "copy-resource"
file
}
@nowarn
def configCacheSettings[A <: RemoteCacheArtifact](
cacheArtifactTask: Def.Initialize[Task[A]]
): Seq[Def.Setting[_]] =
inTask(packageCache)(
Seq(
packageCache.in(Defaults.TaskZero) := {
val original = packageBin.in(Defaults.TaskZero).value
val artp = artifactPath.value
val af = compileAnalysisFile.value
IO.copyFile(original, artp)
// skip zip manipulation if the artp is a blank file
if (af.exists && artp.length() > 0) {
JarUtils.includeInJar(artp, Vector(af -> s"META-INF/inc_compile.zip"))
}
val rf = getResourceFilePaths.value
if (rf.exists) {
JarUtils.includeInJar(artp, Vector(rf -> s"META-INF/copy-resources.txt"))
}
// val testStream = (test / streams).?.value
// testStream foreach { s =>
// val sf = Defaults.succeededFile(s.cacheDirectory)
// if (sf.exists) {
// JarUtils.includeInJar(artp, Vector(sf -> s"META-INF/succeeded_tests"))
// }
// }
artp
},
pushRemoteCacheArtifact := true,
remoteCacheArtifact := cacheArtifactTask.value,
packagedArtifact := (artifact.value -> packageCache.value),
artifactPath := Defaults.artifactPathSetting(artifact).value
)
) ++ inTask(pushRemoteCache)(
Seq(
moduleSettings := {
val smi = scalaModuleInfo.value
ModuleDescriptorConfiguration(remoteCacheProjectId.value, projectInfo.value)
.withScalaModuleInfo(smi)
},
pushRemoteCache.in(Defaults.TaskZero) := (Def.task {
val s = streams.value
val config = pushRemoteCacheConfiguration.value
val is = (pushRemoteCache / ivySbt).value
val m = new is.Module(moduleSettings.value)
IvyActions.publish(m, config, s.log)
} tag (Tags.Publish, Tags.Network)).value,
)
) ++ Seq(
remoteCacheIdCandidates := List(remoteCacheId.value),
remoteCacheProjectId := {
val o = organization.value
val m = moduleName.value
val id = remoteCacheId.value
val c = (projectID / crossVersion).value
val v = toVersion(id)
ModuleID(o, m, v).cross(c)
},
remoteCacheId := {
val inputs = (unmanagedSources / inputFileStamps).value
val cp = (externalDependencyClasspath / outputFileStamps).?.value.getOrElse(Nil)
val extraInc = (extraIncOptions.value) flatMap {
case (k, v) =>
Vector(k, v)
}
combineHash(extractHash(inputs) ++ extractHash(cp) ++ extraInc)
},
pushRemoteCacheConfiguration := {
Classpaths.publishConfig(
(pushRemoteCacheConfiguration / publishMavenStyle).value,
Classpaths.deliverPattern(crossTarget.value),
if (isSnapshot.value) "integration" else "release",
ivyConfigurations.value.map(c => ConfigRef(c.name)).toVector,
(pushRemoteCacheConfiguration / packagedArtifacts).value.toVector,
(pushRemoteCacheConfiguration / checksums).value.toVector,
Classpaths.getPublishTo(pushRemoteCacheTo.value).name,
ivyLoggingLevel.value,
isSnapshot.value
)
},
pushRemoteCacheConfiguration / packagedArtifacts := Def.taskDyn {
val artifacts = (pushRemoteCacheConfiguration / remoteCacheArtifacts).value
artifacts
.map(a => a.packaged.map(file => (a.artifact, file)))
.join
.apply(_.join.map(_.toMap))
}.value,
pushRemoteCacheConfiguration / remoteCacheArtifacts := {
List((packageCache / remoteCacheArtifact).value)
},
pullRemoteCache := {
import scala.collection.JavaConverters._
val log = streams.value.log
val r = remoteCacheResolvers.value.head
val p = remoteCacheProjectId.value
val ids = remoteCacheIdCandidates.value
val is = (pushRemoteCache / ivySbt).value
val m = new is.Module((pushRemoteCache / moduleSettings).value)
val smi = scalaModuleInfo.value
val artifacts = (pushRemoteCacheConfiguration / remoteCacheArtifacts).value
val nonPom = artifacts.filterNot(isPomArtifact).toVector
val copyResources = getResourceFilePaths.value
m.withModule(log) {
case (ivy, md, _) =>
val resolver = ivy.getSettings.getResolver(r.name)
if (resolver eq null) sys.error(s"undefined resolver '${r.name}'")
val cross = CrossVersion(p, smi)
val crossf: String => String = cross.getOrElse(identity _)
var found = false
ids foreach {
id: String =>
val v = toVersion(id)
val modId = p.withRevision(v).withName(crossf(p.name))
val ivyId = IvySbt.toID(modId)
if (found) ()
else {
val rawa = nonPom map { _.artifact }
val seqa = CrossVersion.substituteCross(rawa, cross)
val as = seqa map { a =>
val extra = a.classifier match {
case Some(c) => Map("e:classifier" -> c)
case None => Map.empty
}
new DefaultArtifact(ivyId, null, a.name, a.`type`, a.extension, extra.asJava)
}
pullFromMavenRepo0(as, resolver, log) match {
case Right(xs0) =>
val jars = xs0.distinct
nonPom.foreach { art =>
val classifier = art.artifact.classifier
findJar(classifier, v, jars) match {
case Some(jar) =>
extractJar(art, jar, copyResources)
log.info(s"remote cache artifact extracted for $p $classifier")
case None =>
log.info(s"remote cache artifact not found for $p $classifier")
}
}
found = true
case Left(e) =>
log.info(s"remote cache not found for ${v}")
log.debug(e.getMessage)
}
}
}
()
}
},
)
def isPomArtifact(artifact: RemoteCacheArtifact): Boolean =
artifact match {
case _: PomRemoteCacheArtifact => true
case _ => false
}
def compileArtifact(
configuration: Configuration,
classifier: String
): Def.Initialize[Task[CompileRemoteCacheArtifact]] = Def.task {
CompileRemoteCacheArtifact(
Artifact(moduleName.value, classifier),
configuration / packageCache,
(configuration / classDirectory).value,
(configuration / compileAnalysisFile).value
)
}
def testArtifact(
configuration: Configuration,
classifier: String
): Def.Initialize[Task[TestRemoteCacheArtifact]] = Def.task {
TestRemoteCacheArtifact(
Artifact(moduleName.value, classifier),
configuration / packageCache,
(configuration / classDirectory).value,
(configuration / compileAnalysisFile).value,
Defaults.succeededFile((configuration / test / streams).value.cacheDirectory)
)
}
private def toVersion(v: String): String = s"0.0.0-$v"
private lazy val doption = new DownloadOptions
private def pullFromMavenRepo0(
artifacts: Vector[IArtifact],
r: DependencyResolver,
log: Logger
): Either[Throwable, Vector[File]] = {
try {
val files = r.download(artifacts.toArray, doption).getArtifactsReports.toVector map {
report =>
if (report == null) sys.error(s"failed to download $artifacts: " + r.toString)
else
report.getDownloadStatus match {
case DownloadStatus.NO =>
val o = report.getArtifactOrigin
if (o.isLocal) {
val localFile = new File(o.getLocation)
if (!localFile.exists) sys.error(s"$localFile doesn't exist")
else localFile
} else report.getLocalFile
case DownloadStatus.SUCCESSFUL =>
report.getLocalFile
case DownloadStatus.FAILED =>
sys.error(s"failed to download $artifacts: " + r.toString)
}
}
Right(files)
} catch {
case e: Throwable => Left(e)
}
}
private def findJar(classifier: Option[String], ver: String, jars: Vector[File]): Option[File] = {
val suffix = classifier.fold(ver)(c => s"$ver-$c.jar")
jars.find(_.toString.endsWith(suffix))
}
private def extractJar(
cacheArtifact: RemoteCacheArtifact,
jar: File,
copyResources: File
): Unit =
cacheArtifact match {
case a: CompileRemoteCacheArtifact =>
extractCache(jar, a.extractDirectory, preserveLastModified = true) { output =>
extractAnalysis(output, a.analysisFile)
extractResourceList(output, copyResources)
}
case a: TestRemoteCacheArtifact =>
extractCache(jar, a.extractDirectory, preserveLastModified = true) { output =>
extractAnalysis(output, a.analysisFile)
extractTestResult(output, a.testResult)
}
case a: CustomRemoteCacheArtifact =>
extractCache(jar, a.extractDirectory, a.preserveLastModified)(_ => ())
case _ =>
()
}
private def extractCache(jar: File, output: File, preserveLastModified: Boolean)(
processOutput: File => Unit
): Unit = {
IO.delete(output)
IO.unzip(jar, output, preserveLastModified = preserveLastModified)
processOutput(output)
// preserve semanticdb dir
// https://github.com/scalameta/scalameta/blob/a7927ee8e012cfff/semanticdb/scalac/library/src/main/scala/scala/meta/internal/semanticdb/scalac/SemanticdbPaths.scala#L9
Option((output / "META-INF").listFiles).foreach(
_.iterator.filterNot(_.getName == "semanticdb").foreach(IO.delete)
)
}
private def extractAnalysis(output: File, analysisFile: File): Unit = {
val metaDir = output / "META-INF"
val expandedAnalysis = metaDir / "inc_compile.zip"
if (expandedAnalysis.exists) {
IO.move(expandedAnalysis, analysisFile)
}
}
private def extractResourceList(output: File, copyResources: File): Unit = {
val metaDir = output / "META-INF"
val extractedCopyResources = metaDir / "copy-resources.txt"
if (extractedCopyResources.exists) {
IO.move(extractedCopyResources, copyResources)
}
}
private def extractTestResult(output: File, testResult: File): Unit = {
//val expandedTestResult = output / "META-INF" / "succeeded_tests"
//if (expandedTestResult.exists) {
// IO.move(expandedTestResult, testResult)
//}
}
private def defaultArtifactTasks: Seq[TaskKey[File]] =
Seq(Compile / packageCache, Test / packageCache)
private def enabledOnly[A](
key: SettingKey[A],
pkgTasks: Seq[TaskKey[File]]
): Def.Initialize[Seq[A]] =
(Classpaths.forallIn(key, pkgTasks) zipWith
Classpaths.forallIn(pushRemoteCacheArtifact, pkgTasks))(_ zip _ collect {
case (a, true) => a
})
private def extractHash(inputs: Seq[(Path, FileStamp)]): Vector[String] =
inputs.toVector map {
case (_, stamp0) => toOption(stamp0.stamp.getHash).getOrElse("cafe")
}
private def combineHash(vs: Vector[String]): String = {
val hashValue = HashUtil.farmHash(vs.sorted.mkString("").getBytes("UTF-8"))
java.lang.Long.toHexString(hashValue)
}
}
| xuwei-k/xsbt | main/src/main/scala/sbt/internal/RemoteCache.scala | Scala | apache-2.0 | 17,714 |
/*§
===========================================================================
Chronos
===========================================================================
Copyright (C) 2015-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.chronos.ast.expressions
import info.gianlucacosta.chronos.ast.{AstVisitor, Expression}
case class ExponentialRandom(averageValue: Expression) extends Expression {
override def accept[T](visitor: AstVisitor[T]): T =
visitor.visit(this)
}
| giancosta86/Chronos | src/main/scala/info/gianlucacosta/chronos/ast/expressions/ExponentialRandom.scala | Scala | apache-2.0 | 1,187 |
package doobie.contrib.postgresql.hi
import org.postgresql.{ PGConnection, PGNotification }
import doobie.contrib.postgresql.free.pgconnection.PGConnectionIO
import doobie.contrib.postgresql.free.copymanager.CopyManagerIO
import doobie.contrib.postgresql.free.fastpath.FastpathIO
import doobie.contrib.postgresql.free.largeobjectmanager.LargeObjectManagerIO
import doobie.contrib.postgresql.hi.{ pgconnection => HPGC }
import doobie.imports._
import scalaz.syntax.functor._
/** Module of safe `PGConnectionIO` operations lifted into `ConnectionIO`. */
object connection {
val pgGetBackendPID: ConnectionIO[Int] =
pgGetConnection(HPGC.getBackendPID)
def pgGetConnection[A](k: PGConnectionIO[A]): ConnectionIO[A] =
FC.unwrap(classOf[PGConnection]) >>= k.transK[ConnectionIO]
def pgGetCopyAPI[A](k: CopyManagerIO[A]): ConnectionIO[A] =
pgGetConnection(HPGC.getCopyAPI(k))
def pgGetFastpathAPI[A](k: FastpathIO[A]): ConnectionIO[A] =
pgGetConnection(HPGC.getFastpathAPI(k))
def pgGetLargeObjectAPI[A](k: LargeObjectManagerIO[A]): ConnectionIO[A] =
pgGetConnection(HPGC.getLargeObjectAPI(k))
val pgGetNotifications: ConnectionIO[List[PGNotification]] =
pgGetConnection(HPGC.getNotifications)
val pgGetPrepareThreshold: ConnectionIO[Int] =
pgGetConnection(HPGC.getPrepareThreshold)
def pgSetPrepareThreshold(threshold: Int): ConnectionIO[Unit] =
pgGetConnection(HPGC.setPrepareThreshold(threshold))
/**
* Construct a program that notifies on the given channel. Note that the channel is NOT sanitized;
* it cannot be passed as a parameter and is simply interpolated into the statement. DO NOT pass
* user input here.
*/
def pgNotify(channel: String): ConnectionIO[Unit] =
execVoid("NOTIFY " + channel)
/**
* Construct a program that notifies on the given channel, with a payload. Note that neither the
* channel nor the payload are sanitized; neither can be passed as parameters and are simply
* interpolated into the statement. DO NOT pass user input here.
*/
def pgNotify(channel: String, payload: String): ConnectionIO[Unit] =
execVoid(s"NOTIFY $channel, '$payload'")
/**
* Construct a program that starts listening on the given channel. Note that the channel is NOT
* sanitized; it cannot be passed as a parameter and is simply interpolated into the statement.
* DO NOT pass user input here.
*/
def pgListen(channel: String): ConnectionIO[Unit] =
execVoid("LISTEN " + channel)
/**
* Construct a program that stops listening on the given channel. Note that the channel is NOT
* sanitized; it cannot be passed as a parameter and is simply interpolated into the statement.
* DO NOT pass user input here.
*/
def pgUnlisten(channel: String): ConnectionIO[Unit] =
execVoid("UNLISTEN " + channel)
// a helper
private def execVoid(sql: String): ConnectionIO[Unit] =
HC.prepareStatement(sql)(HPS.executeUpdate).void
}
| jamescway/doobie | contrib/postgresql/src/main/scala/doobie/contrib/postgresql/hi/connection.scala | Scala | mit | 2,978 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.expressions
import org.apache.spark.sql.catalyst.encoders.encoderFor
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Complete}
import org.apache.spark.sql.execution.aggregate.TypedAggregateExpression
import org.apache.spark.sql.{DataFrame, Dataset, Encoder, TypedColumn}
/**
* A base class for user-defined aggregations, which can be used in [[DataFrame]] and [[Dataset]]
* operations to take all of the elements of a group and reduce them to a single value.
*
* For example, the following aggregator extracts an `int` from a specific class and adds them up:
* {{{
* case class Data(i: Int)
*
* val customSummer = new Aggregator[Data, Int, Int] {
* def zero: Int = 0
* def reduce(b: Int, a: Data): Int = b + a.i
* def merge(b1: Int, b2: Int): Int = b1 + b2
* def finish(r: Int): Int = r
* }.toColumn()
*
* val ds: Dataset[Data] = ...
* val aggregated = ds.select(customSummer)
* }}}
*
* Based loosely on Aggregator from Algebird: https://github.com/twitter/algebird
*
* @tparam I The input type for the aggregation.
* @tparam B The type of the intermediate value of the reduction.
* @tparam O The type of the final output result.
*
* @since 1.6.0
*/
abstract class Aggregator[-I, B, O] extends Serializable {
/**
* A zero value for this aggregation. Should satisfy the property that any b + zero = b.
* @since 1.6.0
*/
def zero: B
/**
* Combine two values to produce a new value. For performance, the function may modify `b` and
* return it instead of constructing new object for b.
* @since 1.6.0
*/
def reduce(b: B, a: I): B
/**
* Merge two intermediate values.
* @since 1.6.0
*/
def merge(b1: B, b2: B): B
/**
* Transform the output of the reduction.
* @since 1.6.0
*/
def finish(reduction: B): O
/**
* Returns this `Aggregator` as a [[TypedColumn]] that can be used in [[Dataset]] or [[DataFrame]]
* operations.
* @since 1.6.0
*/
def toColumn(
implicit bEncoder: Encoder[B],
cEncoder: Encoder[O]): TypedColumn[I, O] = {
val expr =
new AggregateExpression(
TypedAggregateExpression(this),
Complete,
false)
new TypedColumn[I, O](expr, encoderFor[O])
}
}
| chenc10/Spark-PAF | sql/core/src/main/scala/org/apache/spark/sql/expressions/Aggregator.scala | Scala | apache-2.0 | 3,105 |
package monocle.law.discipline.function
import monocle.function.FilterIndex._
import monocle.function._
import monocle.law.discipline.TraversalTests
import org.scalacheck.Arbitrary
import org.typelevel.discipline.Laws
import monocle.catssupport.Implicits._
object FilterIndexTests extends Laws {
def apply[S: Equal : Arbitrary, I, A: Equal : Arbitrary](implicit evFilterIndex: FilterIndex[S, I, A],
arbAA: Arbitrary[A => A], arbIB: Arbitrary[I => Boolean]): RuleSet =
new SimpleRuleSet("FilterIndex", TraversalTests(filterIndex(_: I => Boolean)(evFilterIndex)).props: _*)
} | fkz/Monocle | law/shared/src/main/scala/monocle/law/discipline/function/FilterIndexTests.scala | Scala | mit | 653 |
import language.experimental.macros
import reflect.macros.Context
import macrame.{ internal ⇒ fn }
package object macrame {
/**
* Logs the source code of the given expression to the console during
* compliation.
*/
def trace[A](a : A) : A = macro Impl.trace[A]
/** A list of all members of type `T` in the given object. */
def members[T](obj : Object) : List[T] = macro Impl.members[T]
/**
* A map of all members of type `T` in the given object, keyed by the name of the member.
*/
def memberMap[F](obj : Object) : Map[String, F] = macro Impl.memberMap[F]
implicit class RegexStringContext(sc : StringContext) {
def r(args : Any*) : scala.util.matching.Regex = macro Impl.regex
}
private object Impl {
def trace[A](c : Context)(a : c.Expr[A]) : c.Expr[A] = {
import c.universe._
c.info(
a.tree.pos,
"trace output\\n " + show(a.tree) + "\\nfor position:\\n",
true)
a
}
def regex(c : Context)(args : c.Expr[Any]*) : c.Expr[scala.util.matching.Regex] = {
import c.universe._
val s = c.prefix.tree match {
case Apply(_, List(Apply(_, rawParts))) ⇒ rawParts
case x ⇒ c.abort(c.enclosingPosition, "unexpected tree: " + show(x))
}
val parts = s map {
case Literal(Constant(const : String)) ⇒ const
}
def getPoint(msg : String) : Int =
msg.split("\\n")(2).indexOf('^')
try {
val emptyString : Tree = Literal(Constant(""))
val placeHolders = List.fill(args.length)(
java.util.regex.Pattern.quote("placeholder"))
// Check if the regex is valid.
try {
val regex = parts.zipAll(placeHolders, "", "").foldLeft("") {
case (a, (b, c)) ⇒ a + b + c
}
regex.r
} catch {
case e : java.util.regex.PatternSyntaxException ⇒
val pos = s.head.pos.withPoint(getPoint(e.getMessage) + s.head.pos.point)
c.abort(pos, "Invalid Regex: " + e.getMessage.split("\\n").head)
}
val mixed : List[(Tree, Tree)] = s.zipAll(
args.map(p ⇒ q"java.util.regex.Pattern.quote(${p.tree})"),
emptyString,
emptyString)
val regexString = mixed.foldLeft(emptyString) {
case (a, (b, c)) ⇒ q"$a + $b + $c"
}
c.Expr[scala.util.matching.Regex](q"""($regexString).r""")
} catch {
case e : Throwable ⇒
c.abort(c.prefix.tree.pos, e.getMessage)
}
}
def members[T : c.WeakTypeTag](c : Context)(obj : c.Expr[Object]) : c.Expr[List[T]] =
fn.sequenceExpr(c)(
fn.members[T](c)(obj)
.map(s ⇒ fn.renderName(s.name))
.map(n ⇒ c.Expr[T](c.universe.Select(obj.tree, c.universe.newTermName(n))))
)
def memberMap[T : c.WeakTypeTag](c : Context)(obj : c.Expr[Object]) : c.Expr[Map[String, T]] = {
import c.universe._
val tups = fn.sequenceExpr(c)(fn.members(c)(obj)
.map(_.name.decodedName.toString.trim)
.map(n ⇒
// ("n", obj.n)
c.Expr[(String, T)](
Apply(Select(Ident(newTermName("Tuple2")), newTermName("apply")), List(
Literal(Constant(n)),
Select(obj.tree, newTermName(n)))
)
)
)
)
// List(("a", obj.a), ("b", obj.b), ...).toMap
reify { tups.splice.toMap }
}
}
}
| ChrisNeveu/macrame | macrame/src/main/scala/macrame/package.scala | Scala | bsd-3-clause | 3,759 |
package uk.co.rajaconsulting.sbtjna
import sbt._
import org.junit.Test
import org.junit.Assert._
import org.scalatest.junit.JUnitSuite
import java.io._;
/**
* @author sumitraja
*
*/
class JNAeratorTest extends JUnitSuite {
@Test
def testJNAerate() {
new JNAerator("target/generated-sources/java", "src/test/headers", "target/generated-sources/scala", null, true, true, List("/opt/local/include"),
Nil, List(("simple", List("simple.h"))), true, true, true, true, true, JNAeratorRuntime.BridJ)
checkFile("target/generated-sources/java/simple/Pair.java")
checkFile("target/generated-sources/java/simple/BiggerStruct.java")
checkFile("target/generated-sources/java/simple/SimpleLibrary.java")
checkFile("target/generated-sources/scala/simple/Simple.scala")
}
def checkFile(path:String) = {
val file = new File(path);
assertTrue(file.length > 0)
}
} | sumo/sbt-jna | src/test/scala/uk/co/rajaconsulting/sbtjna/JNAeratorTest.scala | Scala | bsd-2-clause | 916 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.recorder.har
import scala.collection.mutable
import scala.concurrent.duration.DurationInt
import io.gatling.BaseSpec
import io.gatling.commons.util.Io.withCloseable
import io.gatling.recorder.config.ConfigKeys.http.InferHtmlResources
import io.gatling.recorder.config.RecorderConfiguration.fakeConfig
import io.gatling.recorder.scenario.{ ResponseBodyBytes, PauseElement, RequestElement }
class HarReaderSpec extends BaseSpec {
def resourceAsStream(p: String) = getClass.getClassLoader.getResourceAsStream(p)
val configWithResourcesFiltering = fakeConfig(mutable.Map(InferHtmlResources -> true))
// By default, we assume that we don't want to filter out the HTML resources
implicit val config = fakeConfig(mutable.Map(InferHtmlResources -> false))
"HarReader" should "work with empty JSON" in {
withCloseable(resourceAsStream("har/empty.har"))(HarReader(_) shouldBe empty)
}
val scn = withCloseable(resourceAsStream("har/www.kernel.org.har"))(HarReader(_))
val elts = scn.elements
val pauseElts = elts.collect { case PauseElement(duration) => duration }
it should "return the correct number of Pause elements" in {
pauseElts.size shouldBe <(elts.size / 2)
}
it should "return an appropriate pause duration" in {
val pauseDuration = pauseElts.reduce(_ + _)
// The total duration of the HAR record is of 6454ms
pauseDuration shouldBe <=(88389 milliseconds)
pauseDuration shouldBe >(80000 milliseconds)
}
it should "return the appropriate request elements" in {
val (googleFontUris, uris) = elts
.collect { case req: RequestElement => req.uri }
.partition(_.contains("google"))
all(uris) should startWith("https://www.kernel.org")
uris.size shouldBe 41
googleFontUris.size shouldBe 16
}
it should "have the approriate first requests" in {
// The first element can't be a pause.
elts.head shouldBe a[RequestElement]
elts.head.asInstanceOf[RequestElement].uri shouldBe "https://www.kernel.org/"
elts.head.asInstanceOf[RequestElement].responseBody should not be empty
elts(1) shouldBe a[RequestElement]
elts(1).asInstanceOf[RequestElement].uri shouldBe "https://www.kernel.org/theme/css/main.css"
elts(1).asInstanceOf[RequestElement].responseBody should not be empty
}
it should "have the headers correctly set" in {
val el0 = elts.head.asInstanceOf[RequestElement]
val el1 = elts(1).asInstanceOf[RequestElement]
val a = el0.headers shouldBe empty
el1.headers should not be empty
for {
header <- List("User-Agent", "Host", "Accept-Encoding", "Accept-Language")
} el1.headers should contain key header
}
it should "have requests with valid headers" in {
// Extra headers can be added by Chrome
val headerNames = elts.iterator.collect { case req: RequestElement => req.headers.keys }.flatten.toSet
all(headerNames) should not include regex(":.*")
}
it should "have the embedded HTML resources filtered out" in {
val scn2 = HarReader(resourceAsStream("har/www.kernel.org.har"))(configWithResourcesFiltering)
val elts2 = scn2.elements
elts2.size shouldBe <(elts.size)
elts2 should not contain "https://www.kernel.org/theme/css/main.css"
}
it should "deal correctly with file having a websockets record" in {
withCloseable(resourceAsStream("har/play-chat.har")) { is =>
val scn = HarReader(is)(configWithResourcesFiltering)
val requests = scn.elements.collect { case req: RequestElement => req.uri }
scn.elements should have size 3
requests shouldBe List("http://localhost:9000/room", "http://localhost:9000/room?username=robert")
}
}
it should "deal correctly with HTTP CONNECT requests" in {
withCloseable(resourceAsStream("har/charles_https.har")) { is =>
val scn = HarReader(is)
scn.elements shouldBe empty
}
}
it should "deal correctly with HTTP requests having a status=0" in {
withCloseable(resourceAsStream("har/null_status.har")) { is =>
val scn = HarReader(is)
val requests = scn.elements.collect { case req: RequestElement => req }
val statuses = requests.map(_.statusCode)
requests should have size 3
statuses should not contain 0
}
}
it should "decode base64-encoded bodies" in {
withCloseable(resourceAsStream("har/base64_encoded.har")) { is =>
val scn = HarReader(is)
scn.elements.head.asInstanceOf[RequestElement].responseBody should not be empty
scn.elements.head.asInstanceOf[RequestElement].responseBody.get shouldBe a[ResponseBodyBytes]
val responseBodyBytes = scn.elements.head.asInstanceOf[RequestElement].responseBody.get.asInstanceOf[ResponseBodyBytes]
new String(responseBodyBytes.bytes) should include("stats-semi-blind")
}
}
}
| GabrielPlassard/gatling | gatling-recorder/src/test/scala/io/gatling/recorder/har/HarReaderSpec.scala | Scala | apache-2.0 | 5,433 |
package com.twitter.finagle.http.codec
import com.twitter.finagle.http.{Request, Response}
import com.twitter.finagle.http.netty3.Bijections
import com.twitter.finagle.http.netty3.Bijections.responseFromNetty
import java.nio.charset.{StandardCharsets => Charsets}
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.jboss.netty.handler.codec.embedder.{DecoderEmbedder, EncoderEmbedder}
import org.jboss.netty.handler.codec.http.{
HttpRequest,
HttpRequestDecoder,
HttpRequestEncoder,
HttpResponse,
HttpResponseDecoder,
HttpResponseEncoder
}
/**
* Testing utilities for encoding/decoding [[Request]]s and [[Response]]s to/from Strings
* and byte arrays.
*/
object HttpCodec {
/** Encode a [[Request]] to a String. */
def encodeRequestToString(request: Request): String = {
new String(encodeRequestToBytes(request), "UTF-8")
}
/** Encode a [[Request]] tp a byte array */
def encodeRequestToBytes(request: Request): Array[Byte] = {
val encoder = new EncoderEmbedder[ChannelBuffer](new HttpRequestEncoder)
encoder.offer(Bijections.requestToNetty(request))
val buffer = encoder.poll()
val bytes = new Array[Byte](buffer.readableBytes())
buffer.readBytes(bytes)
bytes
}
/** Decode a [[Request]] from a String */
def decodeStringToRequest(s: String): Request = {
decodeBytesToRequest(s.getBytes(Charsets.UTF_8))
}
/** Decode a [[Request]] from a byte array */
def decodeBytesToRequest(b: Array[Byte]): Request = {
val decoder = new DecoderEmbedder(
new HttpRequestDecoder(Int.MaxValue, Int.MaxValue, Int.MaxValue)
)
decoder.offer(ChannelBuffers.wrappedBuffer(b))
val req = decoder.poll().asInstanceOf[HttpRequest]
assert(req ne null)
Bijections.requestFromNetty(req)
}
/** Encode a [[Response]] to a String */
def encodeResponseToString(response: Response): String = {
val encoder = new EncoderEmbedder[ChannelBuffer](new HttpResponseEncoder)
encoder.offer(Bijections.responseToNetty(response))
val buffer = encoder.poll()
buffer.toString(Charsets.UTF_8)
}
/** Decode a [[Response]] from a String */
def decodeStringToResponse(s: String): Response = {
decodeBytesToResponse(s.getBytes(Charsets.UTF_8))
}
/** Decode a [[Response]] from a byte array */
def decodeBytesToResponse(b: Array[Byte]): Response = {
val decoder = new DecoderEmbedder(
new HttpResponseDecoder(Int.MaxValue, Int.MaxValue, Int.MaxValue)
)
decoder.offer(ChannelBuffers.wrappedBuffer(b))
val res = decoder.poll().asInstanceOf[HttpResponse]
assert(res ne null)
responseFromNetty(res)
}
}
| mkhq/finagle | finagle-base-http/src/main/scala/com/twitter/finagle/http/codec/HttpCodec.scala | Scala | apache-2.0 | 2,648 |
package org.example.implicitplay
/**
* Created by kailianghe on 1/22/15.
*/
trait Comparable[T] {
def comp(o:T) : Int
}
class A
object B {
implicit class ExA(a : A) extends Comparable[A] {
override def comp(o: A): Int = a.hashCode - o.hashCode
}
}
object C {
implicit def int2A(v : Int) = new A
}
class D {
def call[T : Comparable](a: T) = {
// compiler give me the Comparable[T] instance defined in the context implicitly
val cp = implicitly[Comparable[T]]
cp.comp(a)
}
}
object E {
implicit val cp = new Comparable[A] {
override def comp(o: A): Int = 123
}
}
object ImplicitPlay extends App {
val a = new A
val b = new A
// 1. extend a existing class A
import org.example.implicitplay.B.ExA
println(a.comp(b))
// 2. auto convert parameter for me
import org.example.implicitplay.C.int2A
println(a.comp(1))
// 3. find a implicit variable Comparable from context
import org.example.implicitplay.E.cp
println((new D).call(new A))
} | hekailiang/akka-play | actor-samples/src/main/scala/org/example/implicitplay/ImplicitPlay.scala | Scala | apache-2.0 | 1,000 |
package com.pygmalios.reactiveinflux.examples
import java.net.URI
import com.pygmalios.reactiveinflux._
import org.joda.time.DateTime
import scala.concurrent.duration._
/**
* Example of blocking, synchronous usage of SyncReactiveInflux.
*
* It assumes that you have InfluxDB running locally on port 8086. How to install InfluxDB:
* https://docs.influxdata.com/influxdb/v0.11/introduction/installation/
*/
object SyncExample extends App {
// You have to specify how much are you willing to wait results of individual blocking calls
implicit val awaitAtMost = 10.seconds
// Use Influx at the provided URL and database "example1"
syncInfluxDb(new URI("http://localhost:8086/"), "example1") { db =>
// Synchronously create the "example1" database
db.create()
// Synchronously write a single point to "measurement1"
val point = Point(
time = DateTime.now(),
measurement = "measurement1",
tags = Map("t1" -> "A", "t2" -> "B"),
fields = Map(
"f1" -> 10.3, // BigDecimal field
"f2" -> "x", // String field
"f3" -> -1, // Long field
"f4" -> true) // Boolean field
)
db.write(point)
// Synchronously read the written point
val queryResult = db.query("SELECT * FROM measurement1")
// Print the single point to the console
println(queryResult.row.mkString)
// Synchronously drop the "example1" database.
db.drop()
}
}
| pygmalios/reactiveinflux | examples/src/main/scala/com/pygmalios/reactiveinflux/examples/SyncExample.scala | Scala | apache-2.0 | 1,443 |
package im.actor.server.session
import java.util.concurrent.TimeUnit
import akka.actor.{ ActorLogging, ActorRef, Cancellable, Props }
import akka.stream.actor._
import com.typesafe.config.Config
import im.actor.server.mtproto.codecs.protocol.MessageBoxCodec
import im.actor.server.mtproto.protocol._
import im.actor.server.mtproto.transport.MTPackage
import scala.annotation.tailrec
import scala.collection.immutable
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.control.NoStackTrace
private[session] sealed trait ReSenderMessage
private[session] object ReSenderMessage {
case class NewClient(client: ActorRef) extends ReSenderMessage
case class IncomingAck(messageIds: Seq[Long]) extends ReSenderMessage
case class IncomingRequestResend(messageId: Long) extends ReSenderMessage
case class OutgoingMessage(msg: ProtoMessage, reduceKey: Option[String]) extends ReSenderMessage
}
private[session] case class ReSenderConfig(ackTimeout: FiniteDuration, maxResendSize: Long, maxBufferSize: Long)
private[session] object ReSenderConfig {
def fromConfig(config: Config): ReSenderConfig = {
ReSenderConfig(
ackTimeout = config.getDuration("ack-timeout", TimeUnit.SECONDS).seconds,
maxResendSize = config.getBytes("max-resend-size"),
maxBufferSize = config.getBytes("max-buffer-size")
)
}
}
private[session] object ReSender {
private case class ScheduledResend(messageId: Long)
def props(authId: Long, sessionId: Long)(implicit config: ReSenderConfig) =
Props(classOf[ReSender], authId, sessionId, config)
}
private[session] class ReSender(authId: Long, sessionId: Long)(implicit config: ReSenderConfig)
extends ActorSubscriber with ActorPublisher[MTPackage] with ActorLogging with MessageIdHelper {
import ActorPublisherMessage._
import ActorSubscriberMessage._
import ReSender._
import ReSenderMessage._
// TODO: configurable
private val AckTimeout = config.ackTimeout
private val MaxBufferSize = config.maxBufferSize
private val MaxResendSize = config.maxResendSize
implicit val ec: ExecutionContext = context.dispatcher
def receive = waitingForFirstClient
def waitingForFirstClient: Receive = subscriber.orElse(publisher).orElse {
case NewClient(_) ⇒
context.become(resendingToNewClients)
case unmatched ⇒
log.error("Unmatched msg {}", unmatched)
}
def resendingToNewClients: Receive = subscriber.orElse(publisher).orElse {
case NewClient(actorRef) ⇒
log.debug("New client, sending all scheduled for resend")
resendBuffer foreach {
case (messageId, (msg, reduceKey, scheduledResend)) ⇒
scheduledResend.cancel()
enqueueProtoMessageWithResend(messageId, msg, reduceKey)
}
case unmatched ⇒
log.error("Unmatched msg {}", unmatched)
}
private[this] var resendBufferSize = 0L
private[this] var resendBuffer = immutable.SortedMap.empty[Long, (ProtoMessage with ResendableProtoMessage, Option[String], Cancellable)]
// Provides mapping from reduceKey to the last message with the reduceKey
private[this] var reduceMap = immutable.Map.empty[String, Long]
// Subscriber-related
def subscriber: Receive = {
case OnNext(IncomingAck(messageIds)) ⇒
// TODO: #perf possibly can be optimized
messageIds foreach { messageId ⇒
resendBuffer.get(messageId) foreach {
case (message, reduceKeyOpt, scheduledResend) ⇒
resendBufferSize -= message.bodySize
log.debug("Received Ack {}, cancelling resend", messageId)
scheduledResend.cancel()
reduceKeyOpt foreach (cleanReduceKey(_, messageId))
}
}
resendBuffer --= messageIds
case OnNext(OutgoingMessage(msg: ProtoMessage with OutgoingProtoMessage with ResendableProtoMessage, reduceKey: Option[String])) ⇒
enqueueProtoMessageWithResend(msg, reduceKey)
case OnNext(OutgoingMessage(msg: ProtoMessage with OutgoingProtoMessage, _)) ⇒ enqueueProtoMessage(msg)
case OnNext(IncomingRequestResend(messageId)) ⇒
resendBuffer.get(messageId) map {
case (msg, reduceKey, scheduledResend) ⇒
// should be already completed because RequestResend is sent by client only after receiving Unsent notification
scheduledResend.cancel()
enqueueProtoMessageWithResend(messageId, msg, None)
}
case OnComplete ⇒
log.debug("Stopping due to stream completion")
cleanup()
context.stop(self)
case OnError(cause) ⇒
log.error(cause, "Stopping due to stream error")
cleanup()
context.stop(self)
case ScheduledResend(messageId) ⇒
log.debug("Scheduled resend for messageId: {}", messageId)
resendBuffer.get(messageId) map {
case (message, reduceKey, _) ⇒
log.debug("Resending {}: {}", messageId, message)
resendBufferSize -= message.bodySize
message match {
case rspBox @ RpcResponseBox(requestMessageId, bodyBytes) ⇒
if (message.bodySize <= MaxResendSize) {
enqueueProtoMessageWithResend(messageId, rspBox, reduceKey)
} else {
scheduleResend(messageId, rspBox, reduceKey)
enqueueProtoMessage(nextMessageId(), UnsentResponse(messageId, requestMessageId, message.bodySize))
}
case ub @ UpdateBox(bodyBytes) ⇒
if (message.bodySize <= MaxResendSize) {
enqueueProtoMessageWithResend(messageId, ub, reduceKey)
} else {
scheduleResend(messageId, ub, reduceKey)
enqueueProtoMessage(nextMessageId(), UnsentMessage(messageId, message.bodySize))
}
case msg ⇒
enqueueProtoMessageWithResend(messageId, message, reduceKey)
}
}
}
// Publisher-related
override val requestStrategy = WatermarkRequestStrategy(10) // TODO: configurable
// Publisher-related
private[this] var packageQueue = immutable.Queue.empty[MTPackage]
def publisher: Receive = {
case Request(_) ⇒
deliverBuf()
case Cancel ⇒
context.stop(self)
}
private def enqueueProtoMessageWithResend(message: ProtoMessage with ResendableProtoMessage, reduceKeyOpt: Option[String]): Unit = {
enqueueProtoMessageWithResend(nextMessageId(), message, reduceKeyOpt)
}
private def enqueueProtoMessageWithResend(messageId: Long, message: ProtoMessage with ResendableProtoMessage, reduceKeyOpt: Option[String]): Unit = {
scheduleResend(messageId, message, reduceKeyOpt)
enqueueProtoMessage(messageId, message)
}
private def scheduleResend(messageId: Long, message: ProtoMessage with ResendableProtoMessage, reduceKeyOpt: Option[String]): Unit = {
log.debug("Scheduling resend of messageId: {}, timeout: {}", messageId, AckTimeout)
resendBufferSize += message.bodySize
if (resendBufferSize <= MaxBufferSize) {
val scheduledResend = context.system.scheduler.scheduleOnce(AckTimeout, self, ScheduledResend(messageId))
resendBuffer = resendBuffer.updated(messageId, (message, reduceKeyOpt, scheduledResend))
reduceKeyOpt foreach { reduceKey ⇒
for {
msgId ← reduceMap.get(reduceKey)
(msg, _, _) ← resendBuffer.get(msgId)
} yield {
resendBuffer -= msgId
resendBufferSize -= msg.bodySize
}
reduceMap += (reduceKey → messageId)
}
} else {
val msg = "Completing stream due to maximum buffer size reached"
log.warning(msg)
onErrorThenStop(new Exception(msg) with NoStackTrace)
}
}
private def enqueueProtoMessage(message: ProtoMessage): (MTPackage, Long) =
enqueueProtoMessage(nextMessageId(), message)
private def enqueueProtoMessage(messageId: Long, message: ProtoMessage): (MTPackage, Long) = {
val pkg = packProtoMessage(messageId, message)
if (packageQueue.isEmpty && totalDemand > 0) {
onNext(pkg)
} else {
packageQueue = packageQueue.enqueue(pkg)
deliverBuf()
}
(pkg, messageId)
}
@tailrec final def deliverBuf(): Unit = {
if (isActive && totalDemand > 0)
packageQueue.dequeueOption match {
case Some((el, q)) ⇒
packageQueue = q
onNext(el)
deliverBuf()
case None ⇒
}
}
/**
* Removes mapping from reduceMap if messageId equals to the one stored in the mapping
* @param reduceKey
* @param messageId
*/
private def cleanReduceKey(reduceKey: String, messageId: Long): Unit = {
if (reduceMap.get(reduceKey).contains(messageId))
reduceMap -= reduceKey
}
private def packProtoMessage(messageId: Long, message: ProtoMessage): MTPackage = {
val mb = boxProtoMessage(messageId, message)
packMessageBox(mb)
}
private def packMessageBox(mb: MessageBox): MTPackage = {
val bytes = MessageBoxCodec.encode(mb).require
MTPackage(authId, sessionId, bytes)
}
private def boxProtoMessage(messageId: Long, message: ProtoMessage): MessageBox = {
MessageBox(messageId, message)
}
private def cleanup(): Unit = {
resendBuffer foreach {
case (_, (_, _, scheduledResend)) ⇒
scheduledResend.cancel()
}
}
}
| jamesbond12/actor-platform | actor-server/actor-session/src/main/scala/im/actor/server/session/Resender.scala | Scala | mit | 9,326 |
package de.tudarmstadt.lt.flinkdt.tasks
import de.tudarmstadt.lt.flinkdt.types.CT2
import org.apache.flink.api.common.operators.Order
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.scala._
import org.apache.flink.util.Collector
import scala.reflect.ClassTag
/**
* Created by Steffen Remus
*/
object FilterSortDT {
def apply[C <: CT2 : ClassTag : TypeInformation, T1 : ClassTag : TypeInformation, T2 : ClassTag : TypeInformation](valfun:C => Double, order:Order = Order.DESCENDING, sort_B_desc_by_string:Boolean = false) = new FilterSortDT[C, T1, T2](valfun, order, sort_B_desc_by_string)
}
class FilterSortDT[C <: CT2 : ClassTag : TypeInformation, T1 : ClassTag : TypeInformation, T2 : ClassTag : TypeInformation](valfun:(C => Double), order:Order, sort_B_desc_by_string:Boolean) extends DSTask[C, C] {
// TODO: this can surely be optimized
override def process(ds: DataSet[C]): DataSet[C] = {
val ds_f = ds.filter(valfun(_) >= DSTaskConfig.param_min_sim)
val dt_count = ds_f
.map(ct => (ct.a.asInstanceOf[T1], 1)) // o1dot
.groupBy(0)
.sum(1)
val dt_val_grouped_a = ds_f
.join(dt_count)
.where("a").equalTo(0)((l, r) => (l, r._2))
.filter(_._2 >= DSTaskConfig.param_min_sim_distinct) // number of distinct co-occurrences (o1dot)
.map(t => (t._1, valfun(t._1))) // apply valfun
.groupBy("_1.a")
val dt_sort_val =
if(sort_B_desc_by_string) {
dt_val_grouped_a
.reduceGroup((iter, out: Collector[(C, Double)]) => {
val l = iter.toSeq
l.sortBy(t => (if(order == Order.DESCENDING) -t._2 else t._2, t._1.b.toString)) // sort by value and ascending by B.toString
.take(DSTaskConfig.param_topn_sim)
.foreach(out.collect)
})
}else {
dt_val_grouped_a
.sortGroup(1, order)
.first(DSTaskConfig.param_topn_sim)
}
dt_sort_val.map(_._1)
}
}
| remstef/flinkfun | src/main/scala/de/tudarmstadt/lt/flinkdt/tasks/FilterSortDT.scala | Scala | apache-2.0 | 1,987 |
package nak.stats
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import nak.classify.Classifier
import nak.data._
import breeze.linalg._
import breeze.stats._
import ContingencyStats._
/**
* Provides precision, recall and f-score for labellings.
* @author dlwh
*/
class ContingencyStats[L] private (private val classWise: Map[L,Table]) {
def this() = this(Map[L,Table]().withDefaultValue(Table(0,0,0)))
/**
* Add an observation.
*/
def +(l :(L,L)) = apply(l._1,l._2)
/** Takes a guess and a gold standard set of labelings. */
def apply(guessgold: (L,L)):ContingencyStats[L] = this(guessgold._1,guessgold._2)
/** Takes a guess and a gold standard set of labelings. */
def apply(guess: Set[L], gold: Set[L]):ContingencyStats[L] = {
val tps = (guess intersect gold).foldLeft(classWise) { (m,l) => m + (l -> m(l).incTP)}
val fps = (guess -- gold).foldLeft(tps) { (m,l) => m + (l -> m(l).incFP)}
val fns = (gold -- guess).foldLeft(fps) { (m,l) => m + (l -> m(l).incFN)}
new ContingencyStats(fns)
}
def apply(guess: L, gold: L): ContingencyStats[L] = {
if(guess == gold) {
val tbl = classWise(guess)
new ContingencyStats[L](classWise + (guess->tbl.incTP))
} else {
val tbl = classWise(guess)
val tblL = classWise(gold)
new ContingencyStats[L](classWise + (guess->tbl.incFP) + (gold->tblL.incFN))
}
}
def precision(l:L) = classWise(l).precision
def recall(l:L) = classWise(l).recall
/** F1 score for this label. */
def f(l:L) = classWise(l).f
def f(l:L,beta:Double) = classWise(l).f(beta)
lazy val microaveraged = new {
private val tbl = classWise.valuesIterator.foldLeft(Table(0,0,0))(_+_)
val precision = tbl.precision
val recall = tbl.recall
val f = tbl.f
def f(beta:Double) = tbl f beta
}
lazy val macroaveraged = new {
val precision = mean(classWise.valuesIterator.map(_.precision))
val recall = mean(classWise.valuesIterator.map(_.recall))
val f = mean(classWise.valuesIterator.map(_.f))
def f(beta:Double) = mean(classWise.valuesIterator.map(_.f(beta)))
}
private def r(x:Double) = "%.4f" format x
override def toString() = {
val buf = new StringBuilder
buf ++= "Contingency Statistics:\n"
buf ++= "==========================\n"
buf ++= "Macro: Prec " + r(macroaveraged.precision) + " Recall: " + r(macroaveraged.recall) + " F1: " + r(macroaveraged.f) + "\n"
buf ++= "Micro: Prec " + r(microaveraged.precision) + " Recall: " + r(microaveraged.recall) + " F1: " + r(microaveraged.f) + "\n"
buf ++= "==========================\n"
for( (l,tbl) <- classWise) {
buf ++= l + ": Prec " + r(tbl.precision) + " Recall: " + r(tbl.recall) + " F1: " + r(tbl.f) + "\n"
}
buf.toString
}
}
object ContingencyStats {
def apply[L]():ContingencyStats[L] = new ContingencyStats[L]
/**
* Classify every example and compute its statistics
*/
def apply[L,T](classifier: Classifier[L,T], dataset: Seq[Example[L,T]]):ContingencyStats[L] = {
apply(dataset.map(Example.lift(classifier)) map (_.features),dataset.map(_.label))
}
def apply[L](guessed: Seq[L], gold: Seq[L]):ContingencyStats[L] = {
require(guessed.length == gold.length)
(guessed.iterator zip gold.iterator).foldLeft(ContingencyStats[L]())(_+_)
}
class Accuracy(val numRight: Int, val numTotal: Int) {
def this() = this(0,0)
def accuracy = if(numTotal == 0) 0.0 else numRight.asInstanceOf[Double]/ numTotal
def + (b:Boolean) = new Accuracy(if(b) numRight + 1 else numRight, numTotal + 1)
def ++(b:Iterator[Boolean]) = b.foldLeft(this)(_+_)
def ++(b:Iterable[Boolean]) = b.foldLeft(this)(_+_)
override def toString = "Accuracy: " + accuracy
}
// true positive, false positive, false negative. TN is only used
// in Accuracy, which is unreliable and requires access to the label
// set.
private[stats] case class Table(tp: Int, fp:Int, fn: Int) {
def incTP = Table(tp+1,fp,fn)
def incFP = Table(tp,fp+1,fn)
def incFN = Table(tp,fp,fn+1)
def +(t :Table) = {
val Table(otp, ofp, ofn) = t
Table(tp + otp, fp + ofp, fn + ofn)
}
def precision = {
val denom = tp + fp
if(denom == 0) 0.0
else tp * 1.0 / denom
}
def recall = {
val denom = tp + fn
if(denom == 0) 1.0
else tp * 1.0 / denom
}
/** F-Score
*/
def f:Double = f(1.0)
/** F-\beta = (1+beta^2) * (precision * recall) / (\beta^2 * pr + re)
*/
def f(beta: Double) = {
val pr = precision
val re = recall
(1 + beta * beta) * (pr * re) / (beta * beta * pr + re)
}
}
}
| scalanlp/nak | src/main/scala/nak/stats/ContingencyStats.scala | Scala | apache-2.0 | 5,202 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.util.concurrent.LinkedBlockingQueue
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashSet
import scala.collection.mutable.Queue
import io.netty.buffer.ByteBuf
import org.apache.spark.{Logging, SparkException}
import org.apache.spark.network.BufferMessage
import org.apache.spark.network.ConnectionManagerId
import org.apache.spark.network.netty.ShuffleCopier
import org.apache.spark.serializer.Serializer
import org.apache.spark.util.Utils
/**
* A block fetcher iterator interface. There are two implementations:
*
* BasicBlockFetcherIterator: uses a custom-built NIO communication layer.
* NettyBlockFetcherIterator: uses Netty (OIO) as the communication layer.
*
* Eventually we would like the two to converge and use a single NIO-based communication layer,
* but extensive tests show that under some circumstances (e.g. large shuffles with lots of cores),
* NIO would perform poorly and thus the need for the Netty OIO one.
*/
private[storage]
trait BlockFetcherIterator extends Iterator[(BlockId, Option[Iterator[Any]])] with Logging {
def initialize()
def totalBlocks: Int
def numLocalBlocks: Int
def numRemoteBlocks: Int
def fetchWaitTime: Long
def remoteBytesRead: Long
}
private[storage]
object BlockFetcherIterator {
// A request to fetch one or more blocks, complete with their sizes
class FetchRequest(val address: BlockManagerId, val blocks: Seq[(BlockId, Long)]) {
val size = blocks.map(_._2).sum
}
// A result of a fetch. Includes the block ID, size in bytes, and a function to deserialize
// the block (since we want all deserializaton to happen in the calling thread); can also
// represent a fetch failure if size == -1.
class FetchResult(val blockId: BlockId, val size: Long, val deserialize: () => Iterator[Any]) {
def failed: Boolean = size == -1
}
class BasicBlockFetcherIterator(
private val blockManager: BlockManager,
val blocksByAddress: Seq[(BlockManagerId, Seq[(BlockId, Long)])],
serializer: Serializer)
extends BlockFetcherIterator {
import blockManager._
private var _remoteBytesRead = 0L
private var _fetchWaitTime = 0L
if (blocksByAddress == null) {
throw new IllegalArgumentException("BlocksByAddress is null")
}
// Total number blocks fetched (local + remote). Also number of FetchResults expected
protected var _numBlocksToFetch = 0
protected var startTime = System.currentTimeMillis
// This represents the number of local blocks, also counting zero-sized blocks
private var numLocal = 0
// BlockIds for local blocks that need to be fetched. Excludes zero-sized blocks
protected val localBlocksToFetch = new ArrayBuffer[BlockId]()
// This represents the number of remote blocks, also counting zero-sized blocks
private var numRemote = 0
// BlockIds for remote blocks that need to be fetched. Excludes zero-sized blocks
protected val remoteBlocksToFetch = new HashSet[BlockId]()
// A queue to hold our results.
protected val results = new LinkedBlockingQueue[FetchResult]
// Queue of fetch requests to issue; we'll pull requests off this gradually to make sure that
// the number of bytes in flight is limited to maxBytesInFlight
private val fetchRequests = new Queue[FetchRequest]
// Current bytes in flight from our requests
private var bytesInFlight = 0L
protected def sendRequest(req: FetchRequest) {
logDebug("Sending request for %d blocks (%s) from %s".format(
req.blocks.size, Utils.bytesToString(req.size), req.address.hostPort))
val cmId = new ConnectionManagerId(req.address.host, req.address.port)
val blockMessageArray = new BlockMessageArray(req.blocks.map {
case (blockId, size) => BlockMessage.fromGetBlock(GetBlock(blockId))
})
bytesInFlight += req.size
val sizeMap = req.blocks.toMap // so we can look up the size of each blockID
val future = connectionManager.sendMessageReliably(cmId, blockMessageArray.toBufferMessage)
future.onSuccess {
case Some(message) => {
val bufferMessage = message.asInstanceOf[BufferMessage]
val blockMessageArray = BlockMessageArray.fromBufferMessage(bufferMessage)
for (blockMessage <- blockMessageArray) {
if (blockMessage.getType != BlockMessage.TYPE_GOT_BLOCK) {
throw new SparkException(
"Unexpected message " + blockMessage.getType + " received from " + cmId)
}
val blockId = blockMessage.getId
val networkSize = blockMessage.getData.limit()
results.put(new FetchResult(blockId, sizeMap(blockId),
() => dataDeserialize(blockId, blockMessage.getData, serializer)))
_remoteBytesRead += networkSize
logDebug("Got remote block " + blockId + " after " + Utils.getUsedTimeMs(startTime))
}
}
case None => {
logError("Could not get block(s) from " + cmId)
for ((blockId, size) <- req.blocks) {
results.put(new FetchResult(blockId, -1, null))
}
}
}
}
protected def splitLocalRemoteBlocks(): ArrayBuffer[FetchRequest] = {
// Make remote requests at most maxBytesInFlight / 5 in length; the reason to keep them
// smaller than maxBytesInFlight is to allow multiple, parallel fetches from up to 5
// nodes, rather than blocking on reading output from one node.
val targetRequestSize = math.max(maxBytesInFlight / 5, 1L)
logInfo("maxBytesInFlight: " + maxBytesInFlight + ", targetRequestSize: " + targetRequestSize)
// Split local and remote blocks. Remote blocks are further split into FetchRequests of size
// at most maxBytesInFlight in order to limit the amount of data in flight.
val remoteRequests = new ArrayBuffer[FetchRequest]
for ((address, blockInfos) <- blocksByAddress) {
if (address == blockManagerId) {
numLocal = blockInfos.size
// Filter out zero-sized blocks
localBlocksToFetch ++= blockInfos.filter(_._2 != 0).map(_._1)
_numBlocksToFetch += localBlocksToFetch.size
} else {
numRemote += blockInfos.size
val iterator = blockInfos.iterator
var curRequestSize = 0L
var curBlocks = new ArrayBuffer[(BlockId, Long)]
while (iterator.hasNext) {
val (blockId, size) = iterator.next()
// Skip empty blocks
if (size > 0) {
curBlocks += ((blockId, size))
remoteBlocksToFetch += blockId
_numBlocksToFetch += 1
curRequestSize += size
} else if (size < 0) {
throw new BlockException(blockId, "Negative block size " + size)
}
if (curRequestSize >= targetRequestSize) {
// Add this FetchRequest
remoteRequests += new FetchRequest(address, curBlocks)
curRequestSize = 0
curBlocks = new ArrayBuffer[(BlockId, Long)]
logDebug(s"Creating fetch request of $curRequestSize at $address")
}
}
// Add in the final request
if (!curBlocks.isEmpty) {
remoteRequests += new FetchRequest(address, curBlocks)
}
}
}
logInfo("Getting " + _numBlocksToFetch + " non-empty blocks out of " +
totalBlocks + " blocks")
remoteRequests
}
protected def getLocalBlocks() {
// Get the local blocks while remote blocks are being fetched. Note that it's okay to do
// these all at once because they will just memory-map some files, so they won't consume
// any memory that might exceed our maxBytesInFlight
for (id <- localBlocksToFetch) {
getLocalFromDisk(id, serializer) match {
case Some(iter) => {
// Pass 0 as size since it's not in flight
results.put(new FetchResult(id, 0, () => iter))
logDebug("Got local block " + id)
}
case None => {
throw new BlockException(id, "Could not get block " + id + " from local machine")
}
}
}
}
override def initialize() {
// Split local and remote blocks.
val remoteRequests = splitLocalRemoteBlocks()
// Add the remote requests into our queue in a random order
fetchRequests ++= Utils.randomize(remoteRequests)
// Send out initial requests for blocks, up to our maxBytesInFlight
while (!fetchRequests.isEmpty &&
(bytesInFlight == 0 || bytesInFlight + fetchRequests.front.size <= maxBytesInFlight)) {
sendRequest(fetchRequests.dequeue())
}
val numFetches = remoteRequests.size - fetchRequests.size
logInfo("Started " + numFetches + " remote fetches in" + Utils.getUsedTimeMs(startTime))
// Get Local Blocks
startTime = System.currentTimeMillis
getLocalBlocks()
logDebug("Got local blocks in " + Utils.getUsedTimeMs(startTime) + " ms")
}
override def totalBlocks: Int = numLocal + numRemote
override def numLocalBlocks: Int = numLocal
override def numRemoteBlocks: Int = numRemote
override def fetchWaitTime: Long = _fetchWaitTime
override def remoteBytesRead: Long = _remoteBytesRead
// Implementing the Iterator methods with an iterator that reads fetched blocks off the queue
// as they arrive.
@volatile protected var resultsGotten = 0
override def hasNext: Boolean = resultsGotten < _numBlocksToFetch
override def next(): (BlockId, Option[Iterator[Any]]) = {
resultsGotten += 1
val startFetchWait = System.currentTimeMillis()
val result = results.take()
val stopFetchWait = System.currentTimeMillis()
_fetchWaitTime += (stopFetchWait - startFetchWait)
if (! result.failed) bytesInFlight -= result.size
while (!fetchRequests.isEmpty &&
(bytesInFlight == 0 || bytesInFlight + fetchRequests.front.size <= maxBytesInFlight)) {
sendRequest(fetchRequests.dequeue())
}
(result.blockId, if (result.failed) None else Some(result.deserialize()))
}
}
// End of BasicBlockFetcherIterator
class NettyBlockFetcherIterator(
blockManager: BlockManager,
blocksByAddress: Seq[(BlockManagerId, Seq[(BlockId, Long)])],
serializer: Serializer)
extends BasicBlockFetcherIterator(blockManager, blocksByAddress, serializer) {
import blockManager._
val fetchRequestsSync = new LinkedBlockingQueue[FetchRequest]
private def startCopiers(numCopiers: Int): List[_ <: Thread] = {
(for ( i <- Range(0,numCopiers) ) yield {
val copier = new Thread {
override def run(){
try {
while(!isInterrupted && !fetchRequestsSync.isEmpty) {
sendRequest(fetchRequestsSync.take())
}
} catch {
case x: InterruptedException => logInfo("Copier Interrupted")
// case _ => throw new SparkException("Exception Throw in Shuffle Copier")
}
}
}
copier.start
copier
}).toList
}
// keep this to interrupt the threads when necessary
private def stopCopiers() {
for (copier <- copiers) {
copier.interrupt()
}
}
override protected def sendRequest(req: FetchRequest) {
def putResult(blockId: BlockId, blockSize: Long, blockData: ByteBuf) {
val fetchResult = new FetchResult(blockId, blockSize,
() => dataDeserialize(blockId, blockData.nioBuffer, serializer))
results.put(fetchResult)
}
logDebug("Sending request for %d blocks (%s) from %s".format(
req.blocks.size, Utils.bytesToString(req.size), req.address.host))
val cmId = new ConnectionManagerId(req.address.host, req.address.nettyPort)
val cpier = new ShuffleCopier(blockManager.conf)
cpier.getBlocks(cmId, req.blocks, putResult)
logDebug("Sent request for remote blocks " + req.blocks + " from " + req.address.host )
}
private var copiers: List[_ <: Thread] = null
override def initialize() {
// Split Local Remote Blocks and set numBlocksToFetch
val remoteRequests = splitLocalRemoteBlocks()
// Add the remote requests into our queue in a random order
for (request <- Utils.randomize(remoteRequests)) {
fetchRequestsSync.put(request)
}
copiers = startCopiers(conf.getInt("spark.shuffle.copier.threads", 6))
logInfo("Started " + fetchRequestsSync.size + " remote fetches in " +
Utils.getUsedTimeMs(startTime))
// Get Local Blocks
startTime = System.currentTimeMillis
getLocalBlocks()
logDebug("Got local blocks in " + Utils.getUsedTimeMs(startTime) + " ms")
}
override def next(): (BlockId, Option[Iterator[Any]]) = {
resultsGotten += 1
val result = results.take()
// If all the results has been retrieved, copiers will exit automatically
(result.blockId, if (result.failed) None else Some(result.deserialize()))
}
}
// End of NettyBlockFetcherIterator
}
| yelshater/hadoop-2.3.0 | spark-core_2.10-1.0.0-cdh5.1.0/src/main/scala/org/apache/spark/storage/BlockFetcherIterator.scala | Scala | apache-2.0 | 14,053 |
package slate
package app
import cats.Applicative
import cats.data.Validated
import cats.implicits._
import monix.eval.Task
import org.atnos.eff._
import org.atnos.eff.syntax.all._
import org.scalajs.dom.XMLHttpRequest
import qq.Json
import qq.Platform.Rec._
import qq.cc.{CompiledFilter, CompiledFilterStack, OrCompilationError, OrRuntimeErr, Prelude, QQRuntimeError, QQRuntimeException, RuntimeErrs, TypeError}
import qq.data.{CompiledDefinition, JSON}
import qq.util.Recursion.RecursionEngine
import qq.util._
import slate.ajax.{Ajax, AjaxMethod}
import scala.concurrent.duration._
import scala.scalajs.js
object SlatePrelude extends Prelude {
import CompiledDefinition.noParamDefinition
import QQRuntimeException._
def googleAuth: CompiledDefinition =
noParamDefinition("googleAuth",
CompiledFilter.constE(identify.getAuthToken(interactive = true).map[Vector[JSON]](JSON.Str(_) +: Vector.empty).parallel.send[CompiledFilterStack]))
def launchAuth: CompiledDefinition =
CompiledDefinition("launchAuth", 2, CompiledDefinition.standardEffectDistribution {
params => _ =>
val urlRaw = params.head
val queryParamsRaw = params.tail.head
val urlVerified: Validated[RuntimeErrs, String] = urlRaw match {
case JSON.Str(s) => s.validNel
case k => (TypeError("ajax", "object" -> k): QQRuntimeError).invalidNel
}
val queryParamsVerified: Validated[RuntimeErrs, JSON.ObjList] = queryParamsRaw match {
case o: JSON.ObjMap => JSON.ObjList(o.value.toVector).validNel
case o: JSON.ObjList => o.validNel
case k => (TypeError("ajax", "object" -> k): QQRuntimeError).invalidNel
}
val urlWithQueryParams = Applicative[Validated[RuntimeErrs, ?]].map2(urlVerified, queryParamsVerified)(Ajax.addQueryParams)
for {
webAuthResult <-
Eff.send[OrRuntimeErr, CompiledFilterStack, String](urlWithQueryParams.toEither)
.flatMap(identify.launchWebAuthFlow(interactive = true, _).parallel.send[CompiledFilterStack])
accessToken = webAuthResult.substring(webAuthResult.indexOf("&code=") + "&code=".length)
} yield JSON.obj("code" -> JSON.Str(accessToken)) +: Vector.empty
})
private def makeAjaxDefinition(name: String, ajaxMethod: AjaxMethod) = CompiledDefinition(name, 4,
CompiledDefinition.standardEffectDistribution {
params => _ =>
val urlRaw = params.head
val queryParamsRaw = params.tail.head
val dataRaw = params.tail.tail.head
val headersRaw = params.tail.tail.tail.head
type Stack = Fx.fx2[TaskParallel, OrRuntimeErr]
implicit val ajaxTimeout = Ajax.Timeout(2000.millis)
val urlValidated: Validated[RuntimeErrs, String] = urlRaw match {
case JSON.Str(s) => s.validNel
case k => (TypeError("ajax", "string" -> k): QQRuntimeError).invalidNel
}
val queryParamsValidated: Validated[RuntimeErrs, JSON.ObjList] = queryParamsRaw match {
case o: JSON.ObjMap => JSON.ObjList(o.value.toVector).validNel
case o: JSON.ObjList => o.validNel
case k => (TypeError("ajax", "object" -> k): QQRuntimeError).invalidNel
}
val dataValidated: Validated[RuntimeErrs, String] = dataRaw match {
case JSON.Str(s) => s.validNel
case o: JSON.Obj => JSON.render(o).validNel
case k => (TypeError("ajax", "string | object" -> k): QQRuntimeError).invalidNel
}
val headersValidated: Validated[RuntimeErrs, Map[String, String]] = headersRaw match {
case o: JSON.ObjList if o.value.forall(_._2.isInstanceOf[JSON.Str]) => o.toMap.value.mapValues(_.asInstanceOf[JSON.Str].value).validNel
case o: JSON.ObjMap if o.value.forall(_._2.isInstanceOf[JSON.Str]) => o.toMap.value.mapValues(_.asInstanceOf[JSON.Str].value).validNel
case k => (TypeError("ajax", "object" -> k): QQRuntimeError).invalidNel
}
Eff.collapse[Stack, TaskParallel, Vector[JSON]](for {
resp <-
Eff.collapse[Stack, OrRuntimeErr, XMLHttpRequest](
(urlValidated |@| dataValidated |@| queryParamsValidated |@| headersValidated).map(
Ajax(ajaxMethod, _, _, _, _, withCredentials = false, "")
.onErrorRestart(1)
.map(Either.right)
.onErrorHandle[OrRuntimeErr[XMLHttpRequest]] {
case e: QQRuntimeException => Either.left[RuntimeErrs, XMLHttpRequest](e.errors)
}.parallel
).toEither.sequence[TaskParallel, OrRuntimeErr[XMLHttpRequest]].map(_.flatten).parallel.send[Stack]
)
asJson = Json.stringToJSON(resp.responseText).fold(Task.raiseError(_), t => Task.now(t +: Vector.empty)).parallel
} yield asJson).into[CompiledFilterStack]
})
def httpDelete: CompiledDefinition = makeAjaxDefinition("httpDelete", AjaxMethod.DELETE)
def httpGet: CompiledDefinition = makeAjaxDefinition("httpGet", AjaxMethod.GET)
def httpPost: CompiledDefinition = makeAjaxDefinition("httpPost", AjaxMethod.POST)
def httpPatch: CompiledDefinition = makeAjaxDefinition("httpPatch", AjaxMethod.PATCH)
def httpPut: CompiledDefinition = makeAjaxDefinition("httpPut", AjaxMethod.PUT)
final def toRFC3339(d: js.Date): String = {
def pad(n: Int): String = {
val toStr = n.toString
if (n < 10) "0" + toStr else toStr
}
d.getUTCFullYear() + "-" +
pad(d.getUTCMonth() + 1) + "-" + pad(d.getUTCDate()) + "T" +
pad(d.getUTCHours()) + ":" +
pad(d.getUTCMinutes()) + ":" +
pad(d.getUTCSeconds()) + "Z"
}
def nowRFC3339: CompiledDefinition =
noParamDefinition("nowRFC3339",
CompiledFilter.constE(Task.eval(JSON.str(toRFC3339(new js.Date())) +: Vector.empty).parallel.send[CompiledFilterStack]))
// TODO: remove from here and AppView
def formatDatetimeFriendlyImpl(d: js.Date): String = {
// Make a fuzzy time
val delta = Math.round((d.getTime() - new js.Date().getTime()) / 1000)
val minute = 60
val hour = minute * 60
val day = hour * 24
val week = day * 7
if (delta < 30) {
"just then"
} else if (delta < minute) {
delta.toString + " seconds ago"
} else if (delta < 2 * minute) {
"in a minute"
} else if (delta < hour) {
Math.floor(delta / minute).toString + " minutes ago"
} else if (Math.floor(delta / hour) == 1) {
"in 1 hour"
} else if (delta < day) {
"in " + Math.floor(delta / hour).toString + " hours"
} else if (delta < day * 2) {
"tomorrow"
} else if (delta < week) {
"in " + Math.floor(delta / day) + " days"
} else {
"in " + Math.floor(delta / week) + " weeks"
}
}
def formatDatetimeFriendly: CompiledDefinition = noParamDefinition("formatDatetimeFriendly", CompiledFilter.singleton {
case JSON.Str(s) =>
val asDate = js.Date.parse(s)
val fuzzy = formatDatetimeFriendlyImpl(new js.Date(asDate))
(JSON.str(fuzzy) +: Vector.empty).pureEff[CompiledFilterStack]
case k =>
typeErrorE[CompiledFilterStack, Vector[JSON]]("formatDatetimeFriendly", "string" -> k)
})
def randomHex: CompiledDefinition = noParamDefinition("randomHex", CompiledFilter.constE {
Eff.send[TaskParallel, CompiledFilterStack, Vector[JSON]](Task.eval(JSON.str(Vector.fill(6) {
java.lang.Integer.toHexString(scala.util.Random.nextInt(256))
}.mkString) +: Vector.empty).parallel)
})
override def all(implicit rec: RecursionEngine): OrCompilationError[Vector[CompiledDefinition]] =
Right(
Vector(googleAuth, launchAuth, randomHex, httpDelete, httpGet, httpPost, httpPatch, httpPut, nowRFC3339, formatDatetimeFriendly)
)
}
| edmundnoble/dashboarder | ui/src/main/scala/slate/app/SlatePrelude.scala | Scala | mit | 7,750 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import scala.xml.Elem
import org.apache.spark.SparkFunSuite
class UIUtilsSuite extends SparkFunSuite {
import UIUtils._
test("makeDescription") {
verify(
"""test <a href="/link"> text </a>""",
<span class="description-input">test <a href="/link"> text </a></span>,
"Correctly formatted text with only anchors and relative links should generate HTML"
)
verify(
"""test <a href="/link" text </a>""",
<span class="description-input">{"""test <a href="/link" text </a>"""}</span>,
"Badly formatted text should make the description be treated as a streaming instead of HTML"
)
verify(
"""test <a href="link"> text </a>""",
<span class="description-input">{"""test <a href="link"> text </a>"""}</span>,
"Non-relative links should make the description be treated as a string instead of HTML"
)
verify(
"""test<a><img></img></a>""",
<span class="description-input">{"""test<a><img></img></a>"""}</span>,
"Non-anchor elements should make the description be treated as a string instead of HTML"
)
verify(
"""test <a href="/link"> text </a>""",
<span class="description-input">test <a href="base/link"> text </a></span>,
baseUrl = "base",
errorMsg = "Base URL should be prepended to html links"
)
}
test("SPARK-11906: Progress bar should not overflow because of speculative tasks") {
val generated = makeProgressBar(2, 3, 0, 0, 4).head.child.filter(_.label == "div")
val expected = Seq(
<div class="bar bar-completed" style="width: 75.0%"></div>,
<div class="bar bar-running" style="width: 25.0%"></div>
)
assert(generated.sameElements(expected),
s"\\nRunning progress bar should round down\\n\\nExpected:\\n$expected\\nGenerated:\\n$generated")
}
test("decodeURLParameter (SPARK-12708: Sorting task error in Stages Page when yarn mode.)") {
val encoded1 = "%252F"
val decoded1 = "/"
val encoded2 = "%253Cdriver%253E"
val decoded2 = "<driver>"
assert(decoded1 === decodeURLParameter(encoded1))
assert(decoded2 === decodeURLParameter(encoded2))
// verify that no affect to decoded URL.
assert(decoded1 === decodeURLParameter(decoded1))
assert(decoded2 === decodeURLParameter(decoded2))
}
private def verify(
desc: String, expected: Elem, errorMsg: String = "", baseUrl: String = ""): Unit = {
val generated = makeDescription(desc, baseUrl)
assert(generated.sameElements(expected),
s"\\n$errorMsg\\n\\nExpected:\\n$expected\\nGenerated:\\n$generated")
}
}
| chenc10/Spark-PAF | core/src/test/scala/org/apache/spark/ui/UIUtilsSuite.scala | Scala | apache-2.0 | 3,418 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import _root_.scalariform.parser._
import _root_.scalariform.lexer._
import _root_.scalariform.lexer.Tokens._
import org.scalastyle.{CombinedAst, CombinedChecker, ScalastyleError, PositionError, Lines}
import VisitorHelper.visit
/**
* According to the Effective Scala "http://twitter.github.io/effectivescala/index.html#Formatting-Braces"
* Braces are used to create compound expressions, using braces with simple statement is hard to read.
* Effective Scala suggests avoid using braces with simple expression.
*
* this checker detects the simple expression where using braces like below,
*
* def foo() = {
* bar()
* }
*
* the method above contains only one expression "bar()", so we can remove braces
*
* def foo() = bar()
*
* Configuration.
* 1, To configure token to check, give parameter to checker in XML. like this,
*
* <parameters>
* <parameter name="targetTokens">def,for,while</parameter>
* </parameter>
*
* To specify multiple tokens, write all expression name and separate them with ","
* available tokens are def,val,var,if,else,for,while,do,case
*
* 2, To allow nested expression
*
* <parameter name="nestedAllowed">true</parameter>
*
* By default, the value is false
*/
class SimpleExprWithBraceChecker extends CombinedChecker {
val errorKey = "simple.expression.with.brace"
val ParamTargetTokens = "targetTokens"
val ParamTargetTokensDefValue = ""
val ParamNestedAllowed = "nestedAllowed"
val ParamNestedAllowedDefValue = false
val TargetTokensSeparator = ","
case class SourceContext(lines: Lines, targetTokens: Set[TokenType], nestedAllowed: Boolean)
override def verify(cAst: CombinedAst): List[ScalastyleError] = {
val targetTokensString = getString(ParamTargetTokens, ParamTargetTokensDefValue)
val targetTokens = (for (
tokenString <- targetTokensString.split(TargetTokensSeparator)
) yield TokenType(tokenString.toUpperCase())).toSet
val nestedAllowed = getBoolean(ParamNestedAllowed, ParamNestedAllowedDefValue)
val sourceContext = SourceContext(cAst.lines, targetTokens, nestedAllowed)
val it = for (Some(a) <- visit(cAst.compilationUnit, localvisit(_, sourceContext))) yield a
it.toList
}
private def localvisit(ast: Any, sc: SourceContext): List[Option[ScalastyleError]] = ast match {
case e: ExprElement => visitExprElement(e, sc)
case d: DefOrDcl => visitDefOrDcl(d, sc)
case c: CaseClause => visitCaseClause(c, sc)
case a: Any => visit(a, localvisit(_, sc))
}
private def checkBlockExprHasSingleExpr(b: BlockExpr, nestedAllowed: Boolean): Option[ScalastyleError] =
b.caseClausesOrStatSeq match {
case Right(s: StatSeq) if s.otherStats.isEmpty && checkStatSeq(s, nestedAllowed) => Some(PositionError(b.lbrace.offset))
case a: Any => None
}
private def checkStatSeq(ss: StatSeq, nestedAllowed: Boolean): Boolean = {
val length = expandExpr(ss).length
length <= 1 || (length > 1 && !nestedAllowed)
}
private def expandExpr(a: Any): List[CallExpr] = a match {
case e: CallExpr => e :: visit(e, expandExpr)
case a: Any => visit(a, expandExpr)
}
private def visitCaseClause(c: CaseClause, sc: SourceContext): List[Option[ScalastyleError]] = {
val blockExprOrEmpty = c.statSeq.firstStatOpt match {
case Some(x: Expr) => x.contents.take(1)
case _ => List()
}
blockExprOrEmpty.flatMap {
case x: BlockExpr => checkBlockExprHasSingleExpr(x, sc.nestedAllowed) :: visit(x, localvisit(_, sc))
case _ => visit(c, localvisit(_, sc))
}
}
private def visitDefOrDcl(d: DefOrDcl, sc: SourceContext): List[Option[ScalastyleError]] = d match {
case f: FunDefOrDcl if sc.targetTokens.contains(DEF) => f.funBodyOpt match {
case Some(e: ExprFunBody) => visitExprFunBody(e, sc)
case _ => visit(f, localvisit(_, sc))
}
case p: PatDefOrDcl if sc.targetTokens.contains(p.valOrVarToken.tokenType) => visitPatDefOrDcl(p, sc)
case a: Any => visit(a, localvisit(_, sc))
}
private def visitExprFunBody(e: ExprFunBody, sc: SourceContext): List[Option[ScalastyleError]] = e.body.contents match {
case (x: BlockExpr) :: xs => checkBlockExprHasSingleExpr(x, sc.nestedAllowed) :: visit(e.body, localvisit(_, sc))
case _ => visit(e.body, localvisit(_, sc))
}
private def visitPatDefOrDcl(p: PatDefOrDcl, sc: SourceContext): List[Option[ScalastyleError]] = p.equalsClauseOption match {
case Some((_, e: Expr)) => e.contents match {
case (x: BlockExpr) :: xs => checkBlockExprHasSingleExpr(x, sc.nestedAllowed) :: visit(e, localvisit(_, sc))
case _ => visit(e, localvisit(_, sc))
}
case _ => visit(p, localvisit(_, sc))
}
private def visitExprElement(e: ExprElement, sc: SourceContext): List[Option[ScalastyleError]] = e match {
case f: ForExpr => visitExpr(FOR, f.body, sc)
case d: DoExpr => visitExpr(DO, d.body, sc)
case w: WhileExpr => visitExpr(WHILE, w.body, sc)
case i: IfExpr => visitIfExpr(i, sc)
case _ => visit(e, localvisit(_, sc))
}
private def visitExpr(t: TokenType, e: Expr, sc: SourceContext): List[Option[ScalastyleError]] = e.contents match {
case (x: BlockExpr) :: xs if sc.targetTokens.contains(t) => checkBlockExprHasSingleExpr(x, sc.nestedAllowed) :: visit(e, localvisit(_, sc))
case _ => visit(e, localvisit(_, sc))
}
private def visitIfExpr(i: IfExpr, sc: SourceContext): List[Option[ScalastyleError]] = {
val ifExprErrors = i.body.contents match {
case (x: BlockExpr) :: xs if sc.targetTokens.contains(IF) =>
checkBlockExprHasSingleExpr(x, sc.nestedAllowed) :: visit(i.body, localvisit(_, sc))
case _ => visit(i.body, localvisit(_, sc))
}
val elseClauseErrors = i.elseClause match {
case Some(x) => visitElseOrElseif(x, sc)
case _ => List()
}
ifExprErrors ::: elseClauseErrors
}
private def visitElseOrElseif(e: ElseClause, sc: SourceContext): List[Option[ScalastyleError]] = e.elseBody.contents match {
case (x: IfExpr) :: xs => visitIfExpr(x, sc) ::: visit(xs, localvisit(_, sc))
case (x: BlockExpr) :: xs if sc.targetTokens.contains(ELSE) =>
checkBlockExprHasSingleExpr(x, sc.nestedAllowed) :: visit(e.elseBody, localvisit(_, sc))
case a: Any => visit(a, localvisit(_, sc))
}
}
| dwango/scalastyle | src/main/scala/org/scalastyle/scalariform/SimpleExprWithBraceChecker.scala | Scala | apache-2.0 | 7,066 |
package com.twitter.finagle.channel
import com.twitter.collection.BucketGenerationalQueue
import com.twitter.finagle.service.FailedService
import com.twitter.finagle.{param, Stack, Stackable}
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.{ConnectionRefusedException, SimpleFilter, Service, ClientConnection}
import com.twitter.finagle.{ServiceFactory, ServiceFactoryProxy}
import com.twitter.util.{Future, Duration}
import java.util.concurrent.atomic.AtomicInteger
case class OpenConnectionsThresholds(
lowWaterMark: Int,
highWaterMark: Int,
idleTimeout: Duration) {
require(lowWaterMark <= highWaterMark, "lowWaterMark must be <= highWaterMark")
}
private[finagle] object IdleConnectionFilter {
val role = Stack.Role("IdleConnectionAssasin")
/**
* A class eligible for configuring a [[com.twitter.finagle.Stackable]]
* [[com.twitter.finagle.channel.IdleConnectionFilter]].
*/
case class Param(thres: Option[OpenConnectionsThresholds])
implicit object Param extends Stack.Param[Param] {
val default = Param(None)
}
/**
* Creates a [[com.twitter.finagle.Stackable]] [[com.twitter.finagle.channel.IdleConnectionFilter]].
*/
def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Simple[ServiceFactory[Req, Rep]] {
val role = IdleConnectionFilter.role
val description = "Refuse requests and try to close idle connections " +
"based on the number of active connections"
def make(next: ServiceFactory[Req, Rep])(implicit params: Stack.Params) = {
get[IdleConnectionFilter.Param] match {
case IdleConnectionFilter.Param(Some(thres)) =>
val param.Stats(sr) = get[param.Stats]
new IdleConnectionFilter(next, thres, sr.scope("idle"))
case _ => next
}
}
}
}
/**
* Filter responsible for tracking idle connection, it will refuse requests and try to close idle
* connections based on the number of active connections.
*
* Each time a message from a new connection arrive (based on nb of connections):
* - if below low watermark: accept the connection.
* - if above low watermark: collect (close) idle connections, but accept the connection.
* - if above high watermark: collect (close) idle connections, and refuse/accept the
* connection depending if we managed to close an idle connection.
*
* NB: the connection is tracked after the server response, so that the server processing time is
* not count in the idle timeout.
*
* Note: this will not properly handle multiple outstanding messages per connection and should not
* be used for duplex protocols such as finagle-mux.
*/
class IdleConnectionFilter[Req, Rep](
self: ServiceFactory[Req, Rep],
threshold: OpenConnectionsThresholds,
statsReceiver: StatsReceiver = NullStatsReceiver
) extends ServiceFactoryProxy[Req, Rep](self) {
private[this] val queue = new BucketGenerationalQueue[ClientConnection](threshold.idleTimeout)
private[this] val connectionCounter = new AtomicInteger(0)
private[this] val idle = statsReceiver.addGauge("idle") {
queue.collectAll(threshold.idleTimeout).size
}
private[this] val refused = statsReceiver.counter("refused")
private[this] val closed = statsReceiver.counter("closed")
def openConnections = connectionCounter.get()
override def apply(c: ClientConnection) = {
c.onClose ensure { connectionCounter.decrementAndGet() }
if (accept(c)) {
queue.add(c)
c.onClose ensure {
queue.remove(c)
}
self(c) map { filterFactory(c) andThen _ }
} else {
refused.incr()
val address = c.remoteAddress
c.close()
Future.value(new FailedService(new ConnectionRefusedException(address)))
}
}
// This filter is responsible for adding/removing a connection to/from the idle tracking
// system during the phase when the server is computing the result.
// So if a request take a long time to be processed, we will never detect it as idle
// NB: private[channel] for testing purpose only
// TODO: this should be connection (service acquire/release) based, not request based.
private[channel] def filterFactory(c: ClientConnection) = new SimpleFilter[Req, Rep] {
def apply(request: Req, service: Service[Req, Rep]) = {
queue.remove(c)
service(request) ensure {
queue.touch(c)
}
}
}
private[channel] def closeIdleConnections() =
queue.collect(threshold.idleTimeout) match {
case Some(conn) =>
conn.close()
closed.incr()
true
case None =>
false
}
private[this] def accept(c: ClientConnection): Boolean = {
val connectionCount = connectionCounter.incrementAndGet()
if (connectionCount <= threshold.lowWaterMark)
true
else if (connectionCount <= threshold.highWaterMark) {
closeIdleConnections() // whatever the result of this, we accept the connection
true
} else {
// Try to close idle connections, if we don't find any, then we refuse the connection
closeIdleConnections()
}
}
}
| yancl/finagle-6.22.0 | finagle-core/src/main/scala/com/twitter/finagle/channel/IdleConnectionFilter.scala | Scala | apache-2.0 | 5,121 |
package info.mukel.telegrambot4s.methods
import info.mukel.telegrambot4s.models.{InputFile, Message, ReplyMarkup}
/** Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .ogg file encoded with OPUS (other formats may be sent as Audio or Document). On success, the sent Message is returned. Bots can currently send voice messages of up to 50 MB in size, this limit may be changed in the future.
*
* @param chatId Integer or String Unique identifier for the target chat or username of the target channel (in the format @channelusername)
* @param voice InputFile or String Audio file to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data.
* @param caption String Optional Voice message caption, 0-200 characters
* @param duration Integer Optional Duration of sent audio in seconds
* @param disableNotification Boolean Optional Sends the message silently. iOS users will not receive a notification, Android users will receive a notification with no sound.
* @param replyToMessageId Integer Optional If the message is a reply, ID of the original message
* @param replyMarkup InlineKeyboardMarkup or ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply Optional Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to hide reply keyboard or to force a reply from the user.
*/
case class SendVoice(
chatId : Long Either String,
voice : InputFile Either String,
caption : Option[String] = None,
duration : Option[String] = None,
disableNotification : Option[Boolean] = None,
replyToMessageId : Option[Long] = None,
replyMarkup : Option[ReplyMarkup] = None
) extends ApiRequestMultipart[Message]
| hugemane/telegrambot4s | src/main/scala/info/mukel/telegrambot4s/methods/SendVoice.scala | Scala | apache-2.0 | 2,247 |
package Pacman
import Chisel._
class NetLedHarness(
layers: List[LayerData],
testInput: Array[Array[Int]]
) extends Module {
if (layers.last.parameters.NumberOfCores != 1) {
throw new AssertionError("NetLedHarness only support single output")
}
val waitCycles = 100e6.toInt
val inputWordSize = layers(0).parameters.K
val numberOfInputWords = layers(0).parameters.MatrixWidth / inputWordSize
val inputCores = layers(0).parameters.NumberOfCores
val io = new Bundle {
val leds = Vec.fill(4)(Bits(width = 1)).asOutput
}
val net = Module(new Net(layers))
val bitBuffer = Module(new BitToWord(layers.last.parameters.MatrixHeight))
bitBuffer.io.enable := net.io.xsOutValid
bitBuffer.io.bit := net.io.xsOut(0)
io.leds := OHToUInt(bitBuffer.io.word)
val inputCycleCounter = Module(
new CounterWithSyncAndAsyncReset(0, numberOfInputWords))
val waitCounter = Module(new Counter(0, waitCycles))
val signalNewInput = waitCounter.io.value === UInt(waitCycles - 1)
inputCycleCounter.io.enable := Bool(true)
inputCycleCounter.io.asyncRst := signalNewInput
inputCycleCounter.io.syncRst := inputCycleCounter.io.value === UInt(
numberOfInputWords - 1)
val numberOfInputChunks = testInput.length / inputCores
val currentInput = Module(new AsyncCounter(0, numberOfInputChunks))
val singleImageChunks = Vec(
testInput.map(
input =>
Vec(
input
.map(bit => Bits(bit))
.grouped(layers(0).parameters.K)
.map(_.reverse.reduceLeft((a, b) => Cat(a, b)))
.toArray
)
)
)
for (i <- 0 until 40) {
println(
testInput(i).grouped(28).map(_.mkString).mkString("\\n")
)
println()
}
val testInputROMs = Vec(
singleImageChunks
.grouped(inputCores)
.map(group =>
Vec(group.transpose.map(_.reduceLeft((a, b) => Cat(b, a))).toArray))
.toArray)
val nextData = Reg(init = Bits(width = inputWordSize * inputCores))
when(currentInput.io.value === UInt(0)) {
nextData := testInputROMs(0)(inputCycleCounter.io.value)
}.elsewhen(currentInput.io.value === UInt(1)) {
nextData := testInputROMs(1)(inputCycleCounter.io.value)
}
.elsewhen(currentInput.io.value === UInt(2)) {
nextData := testInputROMs(2)(inputCycleCounter.io.value)
}
.elsewhen(currentInput.io.value === UInt(3)) {
nextData := testInputROMs(3)(inputCycleCounter.io.value)
}
.elsewhen(currentInput.io.value === UInt(4)) {
nextData := testInputROMs(4)(inputCycleCounter.io.value)
}
.elsewhen(currentInput.io.value === UInt(5)) {
nextData := testInputROMs(5)(inputCycleCounter.io.value)
}
.elsewhen(currentInput.io.value === UInt(6)) {
nextData := testInputROMs(6)(inputCycleCounter.io.value)
}
.elsewhen(currentInput.io.value === UInt(7)) {
nextData := testInputROMs(7)(inputCycleCounter.io.value)
}
.elsewhen(currentInput.io.value === UInt(8)) {
nextData := testInputROMs(8)(inputCycleCounter.io.value)
}
.elsewhen(currentInput.io.value === UInt(9)) {
nextData := testInputROMs(9)(inputCycleCounter.io.value)
}
.otherwise {
nextData := nextData
}
waitCounter.io.enable := Bool(true)
waitCounter.io.rst := signalNewInput
currentInput.io.enable := signalNewInput
net.io.start := Reg(init = Bool(false), next = signalNewInput)
net.io.xsIn := Vec(
Range(0, inputCores)
.map(i => {
val upper = (i + 1) * inputWordSize - 1
val lower = i * inputWordSize
nextData(upper, lower)
})
.toArray
)
net.io.pipeReady := Bool(true)
}
| martinhath/bnn | src/main/scala/NetLedHarness.scala | Scala | mit | 3,659 |
package mesosphere.marathon.api.v2
import java.util.Collections
import mesosphere.marathon.api.{ TaskKiller, TestAuthFixture }
import mesosphere.marathon.health.HealthCheckManager
import mesosphere.marathon.state.{ GroupManager, PathId, Timestamp }
import mesosphere.marathon.tasks.{ MarathonTasks, TaskIdUtil, TaskTracker }
import mesosphere.marathon.{ MarathonConf, MarathonSchedulerService, MarathonSpec }
import mesosphere.mesos.protos.Implicits._
import mesosphere.mesos.protos._
import mesosphere.util.Mockito
import org.mockito.Matchers.{ eq => equalTo }
import org.scalatest.{ GivenWhenThen, Matchers }
import scala.concurrent.duration._
class TasksResourceTest extends MarathonSpec with GivenWhenThen with Matchers with Mockito {
test("killTasks") {
val body = """{"ids": ["task-1", "task-2"]}"""
val bodyBytes = body.toCharArray.map(_.toByte)
val taskId1 = "task-1"
val taskId2 = "task-2"
val slaveId = SlaveID("some slave ID")
val now = Timestamp.now()
val task1 = MarathonTasks.makeTask(
taskId1, "host", ports = Nil, attributes = Nil, version = Timestamp.now(), now = now,
slaveId = slaveId
)
val task2 = MarathonTasks.makeTask(
taskId2, "host", ports = Nil, attributes = Nil, version = Timestamp.now(), now = now,
slaveId = slaveId
)
val app1 = PathId("/my/app-1")
val app2 = PathId("/my/app-2")
config.zkTimeoutDuration returns 5.seconds
taskIdUtil.appId(taskId1) returns app1
taskIdUtil.appId(taskId2) returns app2
taskTracker.fetchTask(app1, taskId1) returns Some(task1)
taskTracker.fetchTask(app2, taskId2) returns Some(task2)
val response = taskResource.killTasks(scale = false, body = bodyBytes, auth.request, auth.response)
response.getStatus shouldEqual 200
verify(taskIdUtil, atLeastOnce).appId(taskId1)
verify(taskIdUtil, atLeastOnce).appId(taskId2)
verify(taskKiller).kill(eq(app1), any, force = eq(true))
verify(taskKiller).kill(eq(app2), any, force = eq(true))
noMoreInteractions(taskKiller)
}
test("access without authentication is denied") {
Given("An unauthenticated request")
auth.authenticated = false
val req = auth.request
val resp = auth.response
val body = """{ "ids": ["a", "b", "c"] }""".getBytes
When(s"the index as json is fetched")
val running = taskResource.indexJson("status", Collections.emptyList(), req, resp)
Then("we receive a NotAuthenticated response")
running.getStatus should be(auth.NotAuthenticatedStatus)
When(s"one index as txt is fetched")
val cancel = taskResource.indexTxt(req, resp)
Then("we receive a NotAuthenticated response")
cancel.getStatus should be(auth.NotAuthenticatedStatus)
When(s"kill task is called")
val killTasks = taskResource.killTasks(true, body, req, resp)
Then("we receive a NotAuthenticated response")
killTasks.getStatus should be(auth.NotAuthenticatedStatus)
}
test("access without authorization is denied") {
Given("An unauthorized request")
auth.authenticated = true
auth.authorized = false
val req = auth.request
val resp = auth.response
val body = """{ "ids": ["a", "b", "c"] }""".getBytes
When(s"kill task is called")
val killTasks = taskResource.killTasks(true, body, req, resp)
Then("we receive a not authorized response")
killTasks.getStatus should be(auth.UnauthorizedStatus)
}
var service: MarathonSchedulerService = _
var taskTracker: TaskTracker = _
var taskKiller: TaskKiller = _
var config: MarathonConf = _
var groupManager: GroupManager = _
var healthCheckManager: HealthCheckManager = _
var taskResource: TasksResource = _
var taskIdUtil: TaskIdUtil = _
var auth: TestAuthFixture = _
before {
auth = new TestAuthFixture
service = mock[MarathonSchedulerService]
taskTracker = mock[TaskTracker]
taskKiller = mock[TaskKiller]
config = mock[MarathonConf]
groupManager = mock[GroupManager]
healthCheckManager = mock[HealthCheckManager]
taskIdUtil = mock[TaskIdUtil]
taskResource = new TasksResource(
service,
taskTracker,
taskKiller,
config,
groupManager,
healthCheckManager,
taskIdUtil,
auth.auth,
auth.auth
)
}
}
| matsluni/marathon | src/test/scala/mesosphere/marathon/api/v2/TasksResourceTest.scala | Scala | apache-2.0 | 4,282 |
package com.adamsresearch.mbs.fanniemae.issuancefiles
import java.text.SimpleDateFormat
import org.scalatest.FunSuite
/**
* Created by wma on 1/22/15.
*/
class NewIssuePoolStatisticsRecordTest extends FunSuite {
val recordType1Example = "AL6280|01|3138EN6S3|12/01/2014||$63,676,686.00|4.0|||||||10/01/2044|||||||4.69|||||355||||||CL |||"
val recordType2Example = "AV2422|02|75%|225040.0|4.875|97.0|736|360|2|359"
val recordType3Example = "AV2421|03|PURCHASE|59|100.0|$11,322,173.96"
val recordType4Example = "AV2421|04|1|59|100.0|$11,322,173.96"
val recordType5Example = "AV2422|05|PRINCIPAL RESIDENCE|6|100.0|$1,173,375.26"
val recordType6Example = "AX4930|06|RELOCATION|177|100.0|$50,890,104.07"
val recordType7Example = "AM7706|07|02/01/17|1|0.0|$17,880,000.00"
val recordType8Example = "AM7706|08|2014|1|100.0|$17,880,000.00"
val recordType9Example = "AV2421|09|COLORADO|59|100.0|$11,322,173.96"
val recordType10Example = "AY1918|10|UMPQUA BANK|6|100.0|$1,704,507.31"
val recordType11Example = "AY1918|11|11/01/2014|BELOW - 5.00|18.21|$310,457.31"
val recordType12Example = "AY1918|12|BELOW - 5.00|6|$1,704,507.31"
val recordType13Example = "AM7651|13|2.05|1|$11,700,000.00"
val recordType14Example = "AM7651|14|02/01/2015|100.0|0.37|0.37|0.37|0.532|0.532|0.532|99.999|99.999|0.37|0.37"
val recordType15Example = "AM7689|15|0.95|1.111|6.0|0.95"
val recordType16Example = "AM7655|16|Declining Premium|09/30/2021|81.000"
val recordType17Example = "AV2421|17|CORRESPONDENT|59|100.0|$11,322,173.96"
test("Pool Statistic (record type 1) parses correctly") {
val psOption = NewIssuePoolStatisticsRecord.parsePoolStatisticsRecord(recordType1Example)
psOption match {
case Some(ps) =>
assert(ps.poolNumber == "AL6280" &&
ps.recordType == "01" &&
ps.cusip == "3138EN6S3" &&
ps.poolIssueDate.compareTo(new SimpleDateFormat("MM/dd/yyyy").parse("12/01/2014")) == 0 &&
ps.poolIssueAmount.compareTo(63676686.00f) == 0 &&
ps.percentPassThroughRate.compareTo(4.0f) == 0 &&
ps.maturityDate.compareTo(new SimpleDateFormat("MM/dd/yyyy").parse("10/01/2044")) == 0 &&
ps.wACoupon.compareTo(4.69f) == 0 &&
ps.waRemainingMaturityAtIssuance == 355 &&
ps.prefix == "CL")
case None => fail("Did not successfully parse as a Pool Statistic record")
}
}
test("Quartile (record type 2) parses correctly") {
val qOption = NewIssuePoolStatisticsRecord.parseQuartileRecord(recordType2Example)
qOption match {
case Some(x) =>
assert(x.poolNumber == "AV2422" &&
x.recordType == "02" &&
x.quartileLevel == "75%" &&
x.loanSize.compareTo(225040.0f) == 0 &&
x.coupon.compareTo(4.875f) == 0 &&
x.loanToValue.compareTo(97.0f) == 0 &&
x.creditScore == 736 &&
x.loanTerm == 360 &&
x.loanAge == 2 &&
x.remainingMaturity == 359)
case None => fail("Did not successfully parse as a Quartile record")
}
}
test("Loan Purpose (record type 3) parses correctly") {
val lpOption = NewIssuePoolStatisticsRecord.parseLoanPurposeRecord(recordType3Example)
lpOption match {
case Some(x) =>
assert(x.poolNumber == "AV2421" &&
x.recordType == "03" &&
x.loanPurposeType == "PURCHASE" &&
x.numberOfLoans == 59 &&
x.percentageUPB.compareTo(100.0f) == 0 &&
x.aggregateUPB.compareTo(11322173.96f) == 0)
case None => fail("Did not successfully parse as a Loan Purpose record")
}
}
test("Property Type (record type 4) parses correctly") {
val ptOption = NewIssuePoolStatisticsRecord.parsePropertyTypeRecord(recordType4Example)
ptOption match {
case Some(x) =>
assert(x.poolNumber == "AV2421" &&
x.recordType == "04" &&
x.numberOfUnits == 1 &&
x.numberOfLoans == 59 &&
x.percentageUPB.compareTo(100.0f) == 0 &&
x.aggregateUPB.compareTo(11322173.96f) == 0)
case None => fail("Did not successfully parse as a Property Type record")
}
}
test("Occupancy Type (record type 5) parses correctly") {
val otOption = NewIssuePoolStatisticsRecord.parseOccupancyTypeRecord(recordType5Example)
otOption match {
case Some(x) =>
assert(x.poolNumber == "AV2422" &&
x.recordType == "05" &&
x.occupancyType == "PRINCIPAL RESIDENCE" &&
x.numberOfLoans == 6 &&
x.percentageUPB.compareTo(100.0f) == 0 &&
x.aggregateUPB.compareTo(1173375.26f) == 0)
case None => fail("Did not successfully parse as an Occupancy Type record")
}
}
test("Non Standard Loans (record type 6) parses correctly") {
val nslOption = NewIssuePoolStatisticsRecord.parseNonStandardLoansRecord(recordType6Example)
nslOption match {
case Some(x) =>
assert(x.poolNumber == "AX4930" &&
x.recordType == "06" &&
x.nonStandardLoanType == "RELOCATION" &&
x.numberOfLoans == 177 &&
x.percentageUPB.compareTo(100.0f) == 0 &&
x.aggregateUPB.compareTo(50890104.07f) == 0)
case None => fail("Did not successfully parse as a Non Standard Loans record")
}
}
test("First Scheduled Amortization (record type 7) parses correctly") {
val fsaOption = NewIssuePoolStatisticsRecord.parseFirstScheduledAmortizationRecord(recordType7Example)
fsaOption match {
case Some(x) =>
assert(x.poolNumber == "AM7706" &&
x.recordType == "07" &&
x.date.compareTo(new SimpleDateFormat("MM/dd/yyyy").parse("02/01/2017")) == 0 &&
x.numberOfLoans == 1 &&
x.percentageUPB.compareTo(0.0f) == 0 &&
x.aggregateUPB.compareTo(17880000.00f) == 0)
case None => fail("Did not successfully parse as a First Scheduled Amortization record")
}
}
test("Origination Year (record type 8) parses correctly") {
val oyOption = NewIssuePoolStatisticsRecord.parseOriginationYearRecord(recordType8Example)
oyOption match {
case Some(x) =>
assert(x.poolNumber == "AM7706" &&
x.recordType == "08" &&
x.year == 2014 &&
x.numberOfLoans == 1 &&
x.percentageUPB.compareTo(100.0f) == 0 &&
x.aggregateUPB.compareTo(17880000.00f) == 0)
case None => fail("Did not successfully parse as an Origination Year record")
}
}
test("Geographic Distribution (record type 9) parses correctly") {
val gdOption = NewIssuePoolStatisticsRecord.parseGeographicDistributionRecord(recordType9Example)
gdOption match {
case Some(x) =>
assert(x.poolNumber == "AV2421" &&
x.recordType == "09" &&
x.state == "COLORADO" &&
x.numberOfLoans == 59 &&
x.percentageUPB.compareTo(100.0f) == 0 &&
x.aggregateUPB.compareTo(11322173.96f) == 0)
case None => fail("Did not successfully parse as a Geographic Distribution record")
}
}
test("Servicer (record type 10) parses correctly") {
val sOption = NewIssuePoolStatisticsRecord.parseServicerRecord(recordType10Example)
sOption match {
case Some(x) =>
assert(x.poolNumber == "AY1918" &&
x.recordType == "10" &&
x.servicerName == "UMPQUA BANK" &&
x.numberOfLoans == 6 &&
x.percentageUPB.compareTo(100.0f) == 0 &&
x.aggregateUPB.compareTo(1704507.31f) == 0)
case None => fail("Did not successfully parse as a Servicer record")
}
}
test("Distribution of Loans by First Payment Date (record type 11) parses correctly") {
val dlfpdOption = NewIssuePoolStatisticsRecord.parseDistributionOfLoansByFirstPaymentDateRecord(recordType11Example)
dlfpdOption match {
case Some(x) =>
assert(x.poolNumber == "AY1918" &&
x.recordType == "11" &&
x.date.compareTo(new SimpleDateFormat("MM/dd/yyyy").parse("11/01/2014")) == 0 &&
x.originalInterestRate == "BELOW - 5.00" &&
x.percentageOfLoans.compareTo(18.21f) == 0 &&
x.aggregateUPB.compareTo(310457.31f) == 0)
case None => fail("Did not successfully parse as a Distribution of Loans by First Payment Date record")
}
}
test("Current Interest Rate (record type 12) parses correctly") {
val cirOption = NewIssuePoolStatisticsRecord.parseCurrentInterestRateRecord(recordType12Example)
cirOption match {
case Some(x) =>
assert(x.poolNumber == "AY1918" &&
x.recordType == "12" &&
x.currentMortgageInterestRate == "BELOW - 5.00" &&
x.numberOfLoans == 6 &&
x.aggregateUPB.compareTo(1704507.31f) == 0)
case None => fail("Did not successfully parse as a Current Interest Rate record")
}
}
test("Gross Margin (record type 13) parses correctly") {
val gmOption = NewIssuePoolStatisticsRecord.parseGrossMarginRecord(recordType13Example)
gmOption match {
case Some(x) =>
assert(x.poolNumber == "AM7651" &&
x.recordType == "13" &&
x.grossMargins.compareTo(2.05f) == 0 &&
x.numberOfLoans == 1 &&
x.aggregateUPB.compareTo(11700000.00f) == 0)
case None => fail("Did not successfully parse as a Gross Margin record")
}
}
test("Next Rate Change Date (record type 14) parses correctly") {
val nrcdOption = NewIssuePoolStatisticsRecord.parseNextRateChangeDateRecord(recordType14Example)
nrcdOption match {
case Some(x) =>
assert(x.poolNumber == "AM7651" &&
x.recordType == "14" &&
x.date.compareTo(new SimpleDateFormat("MM/dd/yyyy").parse("02/01/2015")) == 0 &&
x.percentageOfBalance.compareTo(100.0f) == 0 &&
x.mbsMarginHigh.compareTo(0.37f) == 0 &&
x.mbsMarginLow.compareTo(0.37f) == 0 &&
x.mbsMargin.compareTo(0.37f) == 0 &&
x.netCouponHigh.compareTo(0.532f) == 0 &&
x.netCouponLow.compareTo(0.532f) == 0 &&
x.waNetCoupon.compareTo(0.532f) == 0 &&
x.netLifeCapsHigh.compareTo(99.999f) == 0 &&
x.netLifeCapsLow.compareTo(99.999f) == 0 &&
x.netLifeFloorHigh.compareTo(0.37f) == 0 &&
x.netLifeFloorLow.compareTo(0.37f) == 0)
case None => fail("Did not successfully parse as a Next Rate Change Date record")
}
}
test ("Weighted Average for Next Rate Change Date (record type 15) parses correctly") {
val wanrcdOption = NewIssuePoolStatisticsRecord.parseWeightedAverageForNextRateChangeDateRecord(recordType15Example)
wanrcdOption match {
case Some(x) =>
assert(x.poolNumber == "AM7689" &&
x.recordType == "15" &&
x.waMbsMargin.compareTo(0.95f) == 0 &&
x.waNetCoupon.compareTo(1.111f) == 0 &&
x.waNetLifeCaps.compareTo(6.0f) == 0 &&
x.waNetLifeFloor.compareTo(0.95f) == 0)
case None => fail("Did not successfully parse as a Weighted Average for Next Rate Change Date record")
}
}
test ("Aggregate Prepayment Premium (record type 16) parses correctly") {
val appOption = NewIssuePoolStatisticsRecord.parseAggregatePrepaymentPremiumRecord(recordType16Example)
appOption match {
case Some(x) =>
assert(x.poolNumber == "AM7655" &&
x.recordType == "16" &&
x.prepaymentPremiumOption == "Declining Premium" &&
x.latestPrepaymentPremiumEndDate.compareTo(new SimpleDateFormat("MM/dd/yyyy").parse("09/30/2021")) == 0 &&
x.waPrepaymentPremiumTerm.compareTo(81.000f) == 0)
case None => fail("Did not successfully parse as a Aggregate Prepayment Premium record")
}
}
test ("Origination Type (record type 17) parses correctly") {
val otOption = NewIssuePoolStatisticsRecord.parseOriginationTypeRecord(recordType17Example)
otOption match {
case Some(x) =>
assert(x.poolNumber == "AV2421" &&
x.recordType == "17" &&
x.originationType == "CORRESPONDENT" &&
x.numberOfLoans == 59 &&
x.percentageUPB.compareTo(100.0f) == 0 &&
x.aggregateUPB.compareTo(11322173.96f) == 0)
case None => fail("Did not successfully parse as an Origination Type record")
}
}
}
| waynemadams/mbs-parser | src/test/scala/com/adamsresearch/mbs/fanniemae/issuancefiles/NewIssuePoolStatisticsRecordTest.scala | Scala | apache-2.0 | 12,371 |
package sampler.samplable
import cats.data.State
import sampler.maths.Random
case class Draw[R, T](remainder: R, drawnCounts: Map[T, Int])
/** Trait for sampling without replacement
*
* Wraps some kind of collection [R] of items of type [T], and allows them to be sampled
* without replacement.
*
*/
trait WithoutReplacement[R, T] {
type Counts = Map[T, Int]
val items: R
def numRemaining: Int
def empty: State[R, Counts]
/** Draw without replacement
*
* @return A [[sampler.samplable.Draw[R,T]] containing the draw counts and remainder left
*/
def draw(n: Int = 1)(implicit r: Random): Draw[R, T] = {
val state = drawState(n)
val (remainder, drawn) = state.run(items).value
Draw(remainder, drawn)
}
/*
* State which removes a single item
*/
protected def removeOne(soFar: Counts)(implicit r: Random): State[R, Counts]
/*
* State which removes n items
*/
protected def drawState(n: Int = 1)(implicit r: Random): State[R, Counts] = for (
selection <- {
assert(numRemaining >= n, "Not enough items in the collection to sample the desired quantity")
(1 to n).foldLeft(empty)((accState, _) => accState.flatMap(removeOne))
}
) yield selection
}
trait WithoutReplacementImplicits {
implicit class WithoutReplacemenMap[T](val items: Map[T, Int]) extends WithoutReplacement[Map[T, Int], T] {
def empty = State.pure(Map[T, Int]())
def numRemaining = items.values.sum
def removeOne(soFar: Counts)(implicit r: Random): State[Counts, Counts] =
for (
item <- State[Counts, T] { s =>
val (items, counts) = s.toIndexedSeq.unzip
val countIndex = r.nextInt(s.values.sum) // TODO use counts?
val selectedIndex = counts
.view
.scanLeft(0)(_ + _)
.drop(1)
.indexWhere(_ > countIndex)
val selected: T = items(selectedIndex)
(s.updated(selected, s(selected) - 1), selected)
}
) yield soFar.updated(item, soFar.getOrElse(item, 0) + 1)
}
implicit class WithoutReplacemenSeq[T](val items: IndexedSeq[T]) extends WithoutReplacement[IndexedSeq[T], T] {
type Remain = IndexedSeq[T]
def empty = State.pure(Map[T, Int]())
def numRemaining = items.size
def removeOne(soFar: Counts)(implicit r: Random): State[Remain, Counts] =
for {
remaining <- State.get[Remain] // Fresh State[Remain, Remain], representing remaining => (remaining, remaining)
index = r.nextInt(remaining.size) // Choose random index to get an item
item <- State.pure(remaining(index)) // Equivalent to item = remaining(index)?
_ <- State.set(remaining.patch(index, Nil, 1)) // Put the remainder after sampling into a new State[Remaining, Unit]
} yield soFar.updated(item, soFar.getOrElse(item, 0) + 1) // Change the Unit (result part) into the counts
}
} | tearne/Sampler | sampler-core/src/main/scala/sampler/samplable/WithoutReplacement.scala | Scala | apache-2.0 | 2,874 |
package cbb.cloudphylo
abstract class BaseApp extends App {
val parser = new scopt.OptionParser[Config]("Cloudphylo") {
head("Cloudphylo", "1.0")
opt[String]('i', "in") required() valueName "<fasta input>" action { (x, c) =>
c.copy(in = x) } text "Sequences fasta file"
opt[String]('o', "out") action { (x, c) =>
c.copy(out = x) } text "Output file prefix"
opt[String]('c', "charset") required() valueName "<charset>" action { (x, c) =>
c.copy(charset = x) } text "Charset, dna: DNA or aa: amino acid"
opt[Int]('k', "kmer-size") action { (x, c) =>
c.copy(k = x) } text "K-mer size"
opt[Int]('C', "output-cv") action { (x, c) =>
c.copy(k = x) } text "Output CV File"
opt[String]("phylip-neighbor-bin") action { (x, c) =>
c.copy(phylipNeighborBin = x) } text "Phylip Neighbor Bin Path"
}
parser.parse(args, Config()) match {
case Some(config) => run(config)
case None =>
}
def run(config: Config)
}
| xingjianxu/cloudphylo | src/main/scala/cbb/cloudphylo/BaseApp.scala | Scala | apache-2.0 | 992 |
/*
* Copyright 2017 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.featran.transformers
import org.scalacheck._
object BinarizerSpec extends TransformerProp("Binarizer") {
property("default") = Prop.forAll { (xs: List[Double]) =>
val expected = xs.map(x => Seq(if (x > 0.0) 1.0 else 0.0))
test(Binarizer("id"), xs, Seq("id"), expected, Seq(0.0))
}
property("threshold") = Prop.forAll { (xs: List[Double], threshold: Double) =>
val expected = xs.map(x => Seq(if (x > threshold) 1.0 else 0.0))
test(Binarizer("id", threshold), xs, Seq("id"), expected, Seq(0.0))
}
}
| spotify/featran | core/src/test/scala/com/spotify/featran/transformers/BinarizerSpec.scala | Scala | apache-2.0 | 1,144 |
package org.labrad
import io.netty.channel.EventLoopGroup
import java.io.File
import org.labrad.data._
import scala.concurrent.ExecutionContext
class Client(
val name: String = Client.defaults.name,
val host: String = Client.defaults.host,
val port: Int = Client.defaults.port,
val credential: Credential = Client.defaults.credential,
val tls: TlsMode = Client.defaults.tls,
val tlsCerts: Map[String, File] = Map(),
val workerGroup: EventLoopGroup = Connection.defaultWorkerGroup
)(
implicit val executionContext: ExecutionContext = ExecutionContext.global
) extends Connection {
protected def loginData = Cluster(UInt(Client.PROTOCOL_VERSION), Str(name))
}
object Client {
val PROTOCOL_VERSION = 1L
object defaults {
def name: String = "Scala Client"
def host: String = sys.env.getOrElse("LABRADHOST", "localhost")
def username: String = sys.env.getOrElse("LABRADUSER", "")
def password: Array[Char] = sys.env.getOrElse("LABRADPASSWORD", "").toCharArray
def credential: Credential = Password(username, password)
def port: Int = tls match {
case TlsMode.ON => sys.env.getOrElse("LABRAD_TLS_PORT", "7643").toInt
case _ => sys.env.getOrElse("LABRADPORT", "7682").toInt
}
def tls: TlsMode = sys.env.get("LABRAD_TLS").map(TlsMode.fromString).getOrElse(TlsMode.STARTTLS)
}
}
| labrad/scalabrad | core/src/main/scala/org/labrad/Client.scala | Scala | mit | 1,351 |
package org.vitrivr.adampro.web.datastructures
/**
* ADAMpro
*
* Ivan Giangreco
* June 2016
*/
private[web] object Entity {}
private[web] case class EntityListResponse(code: Int, entities: Seq[String])
private[web] case class EntityDetailResponse(code: Int, entityname: String, attribute : String, details: Map[String, String])
private[web] case class EntityCreateRequest(entityname: String, attributes: Seq[EntityField])
private[web] case class EntityField(name: String, datatype: String, storagehandler : String, params : Map[String, String])
private[web] case class EntityFillRequest(entityname: String, ntuples: Int, ndims: Int)
private[web] case class EntityImportRequest(host: String, database: String, username: String, password: String)
private[web] case class EntityReadResponse(code: Int, entityname: String, details: Seq[Map[String, String]])
private[web] case class EntityPartitionRequest(entityname: String, npartitions: Int, materialize: Boolean, replace: Boolean, attribute:String)
private[web] case class EntityStorageRequest(entityname: String, attributes: Seq[String], newhandler : String)
private[web] case class StorageHandlerResponse(code: Int, handlers : Map[String, Seq[String]])
| dbisUnibas/ADAMpro | web/src/main/scala/org/vitrivr/adampro/web/datastructures/Entity.scala | Scala | mit | 1,226 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.sdk
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import com.stratio.sparta.sdk.test.DimensionTypeMock
@RunWith(classOf[JUnitRunner])
class DimensionTest extends WordSpec with Matchers {
"Dimension" should {
val defaultDimensionType = new DimensionTypeMock(Map())
val dimension = Dimension("dim1", "eventKey", "identity", defaultDimensionType)
val dimensionIdentity = Dimension("dim1", "identity", "identity", defaultDimensionType)
val dimensionNotIdentity = Dimension("dim1", "key", "key", defaultDimensionType)
"Return the associated identity precision name" in {
val expected = "identity"
val result = dimensionIdentity.getNamePrecision
result should be(expected)
}
"Return the associated name precision name" in {
val expected = "key"
val result = dimensionNotIdentity.getNamePrecision
result should be(expected)
}
"Return the associated precision name" in {
val expected = "eventKey"
val result = dimension.getNamePrecision
result should be(expected)
}
"Compare function with other dimension must be less" in {
val dimension2 = Dimension("dim2", "eventKey", "identity", defaultDimensionType)
val expected = -1
val result = dimension.compare(dimension2)
result should be(expected)
}
"Compare function with other dimension must be equal" in {
val dimension2 = Dimension("dim1", "eventKey", "identity", defaultDimensionType)
val expected = 0
val result = dimension.compare(dimension2)
result should be(expected)
}
"Compare function with other dimension must be higher" in {
val dimension2 = Dimension("dim0", "eventKey", "identity", defaultDimensionType)
val expected = 1
val result = dimension.compare(dimension2)
result should be(expected)
}
"classSuffix must be " in {
val expected = "Field"
val result = Dimension.FieldClassSuffix
result should be(expected)
}
}
}
| danielcsant/sparta | sdk/src/test/scala/com/stratio/sparta/sdk/DimensionTest.scala | Scala | apache-2.0 | 2,696 |
import scala.util.control.NonLocalReturns._
inline def [T, E <: Throwable](op: => T) rescue (fallback: PartialFunction[E, T]) =
try op
catch {
case ex: ReturnThrowable[_] => throw ex
case ex: E =>
if (fallback.isDefinedAt(ex)) fallback(ex) else throw ex
}
def test: Unit = {
9 / 0 rescue { case _: ArithmeticException => 10 }
} | som-snytt/dotty | tests/pos/i7041.scala | Scala | apache-2.0 | 352 |
/*
*
* * Licensed to the Apache Software Foundation (ASF) under one or more
* * contributor license agreements. See the NOTICE file distributed with
* * this work for additional information regarding copyright ownership.
* * The ASF licenses this file to You under the Apache License, Version 2.0
* * (the "License"); you may not use this file except in compliance with
* * the License. You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.apache.eagle.datastream
import java.util
import com.typesafe.config.Config
import org.apache.eagle.alert.dao.AlertDefinitionDAOImpl
import org.apache.eagle.executor.AlertExecutorCreationUtils
import org.jgrapht.experimental.dag.DirectedAcyclicGraph
import org.slf4j.LoggerFactory
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
/**
* The constraints for alert is:
* 1. only 3 StreamProducers can be put immediately before MapProducer, FlatMapProducer, StreamUnionProducer
* 2. For StreamUnionProducer, the only supported unioned producers are MapProducer and FlatMapProducer
* 3. the output for MapProducer and FlatMapProducer is 2-field tuple, key and value, key is string, value has to be SortedMap
* 4. the framework will wrapper original MapProducer and FlatMapProducer to emit 3-field tuple, {key, streamName and value}
* 5. the framework will automatically partition traffic with first field
*
*
* 2 steps
* step 1: wrapper previous StreamProducer with one more field "streamName"
* step 2: partition alert executor by policy partitioner class
*/
class StreamAlertExpansion(config: Config) extends StreamDAGExpansion(config) {
val LOG = LoggerFactory.getLogger(classOf[StreamAlertExpansion])
override def expand(dag: DirectedAcyclicGraph[StreamProducer, StreamConnector]): Unit ={
val iter = dag.iterator()
val toBeAddedEdges = new ListBuffer[StreamConnector]
val toBeRemovedVertex = new ListBuffer[StreamProducer]
while(iter.hasNext) {
val current = iter.next()
dag.outgoingEdgesOf(current).foreach(edge => {
val child = edge.to
onIteration(toBeAddedEdges, toBeRemovedVertex, dag, current, child)
})
}
// add back edges
toBeAddedEdges.foreach(e => {
dag.addVertex(e.from)
dag.addVertex(e.to)
dag.addEdge(e.from, e.to, e)
})
toBeRemovedVertex.foreach(v => dag.removeVertex(v))
}
def onIteration(toBeAddedEdges: ListBuffer[StreamConnector], toBeRemovedVertex: ListBuffer[StreamProducer],
dag: DirectedAcyclicGraph[StreamProducer, StreamConnector], current: StreamProducer, child: StreamProducer): Unit = {
child match {
case AlertStreamSink(id, upStreamNames, alertExecutorId, withConsumer) => {
/**
* step 1: wrapper previous StreamProducer with one more field "streamName"
* for AlertStreamSink, we check previous StreamProducer and replace that
*/
val newStreamProducers = new ListBuffer[StreamProducer]
current match {
case StreamUnionProducer(id, others) => {
val incomingEdges = dag.incomingEdgesOf(current)
incomingEdges.foreach(e => newStreamProducers += replace(toBeAddedEdges, toBeRemovedVertex, dag, e.from, upStreamNames.get(0)))
var i: Int = 1
others.foreach(o => {
newStreamProducers += replace(toBeAddedEdges, toBeRemovedVertex, dag, o, upStreamNames.get(i))
i += 1
})
}
case _: FlatMapProducer[AnyRef, AnyRef] => {
newStreamProducers += replace(toBeAddedEdges, toBeRemovedVertex, dag, current, upStreamNames.get(0))
}
case _: MapProducer => {
newStreamProducers += replace(toBeAddedEdges, toBeRemovedVertex, dag, current, upStreamNames.get(0))
}
case s: StreamProducer if dag.inDegreeOf(s) == 0 => {
newStreamProducers += replace(toBeAddedEdges, toBeRemovedVertex, dag, current, upStreamNames.get(0))
}
case p@_ => throw new IllegalStateException(s"$p can not be put before AlertStreamSink, only StreamUnionProducer,FlatMapProducer and MapProducer are supported")
}
/**
* step 2: partition alert executor by policy partitioner class
*/
val alertExecutors = AlertExecutorCreationUtils.createAlertExecutors(config, new AlertDefinitionDAOImpl(config), upStreamNames, alertExecutorId)
var alertProducers = new scala.collection.mutable.MutableList[StreamProducer]
alertExecutors.foreach(exec => {
val t = FlatMapProducer(UniqueId.incrementAndGetId(), exec).withName(exec.getAlertExecutorId() + "_" + exec.getPartitionSeq())
t.setConfig(config)
t.setGraph(dag)
alertProducers += t
newStreamProducers.foreach(newsp => toBeAddedEdges += StreamConnector(newsp, t).groupBy(Seq(0)))
})
// remove AlertStreamSink
toBeRemovedVertex += child
// add alert consumer if necessary
if (withConsumer) {
AlertExecutorConsumerUtils.setupAlertConsumers(toBeAddedEdges, alertProducers.toList)
}
}
case _ =>
}
}
private def replace(toBeAddedEdges: ListBuffer[StreamConnector], toBeRemovedVertex: ListBuffer[StreamProducer],
dag: DirectedAcyclicGraph[StreamProducer, StreamConnector], current: StreamProducer, upStreamName: String) : StreamProducer= {
var newsp: StreamProducer = null
current match {
case _: FlatMapProducer[AnyRef, AnyRef] => {
val mapper = current.asInstanceOf[FlatMapProducer[_, _]].mapper
mapper match {
case a: JavaStormStreamExecutor[EagleTuple] => {
val newmapper = new JavaStormExecutorForAlertWrapper(a.asInstanceOf[JavaStormStreamExecutor[Tuple2[String, util.SortedMap[AnyRef, AnyRef]]]], upStreamName)
newsp = FlatMapProducer(UniqueId.incrementAndGetId(), newmapper)
newsp.setGraph(dag)
newsp.setConfig(config)
}
case b: StormStreamExecutor[EagleTuple] => {
val newmapper = StormExecutorForAlertWrapper(b.asInstanceOf[StormStreamExecutor[Tuple2[String, util.SortedMap[AnyRef, AnyRef]]]], upStreamName)
newsp = FlatMapProducer(UniqueId.incrementAndGetId(), newmapper)
newsp.setGraph(dag)
newsp.setConfig(config)
}
case _ => throw new IllegalArgumentException
}
// remove old StreamProducer and replace that with new StreamProducer
val incomingEdges = dag.incomingEdgesOf(current)
incomingEdges.foreach(e => toBeAddedEdges += StreamConnector(e.from, newsp))
val outgoingEdges = dag.outgoingEdgesOf(current)
outgoingEdges.foreach(e => toBeAddedEdges += StreamConnector(newsp, e.to))
toBeRemovedVertex += current
}
case _: MapProducer => {
val mapper = current.asInstanceOf[MapProducer].fn
val newfun: (AnyRef => AnyRef) = {
a => mapper(a) match {
case scala.Tuple2(x1, x2) => (x1, upStreamName, x2)
case _ => throw new IllegalArgumentException
}
}
current match {
case MapProducer(id, 2, fn) => newsp = MapProducer(UniqueId.incrementAndGetId(), 3, newfun)
case _ => throw new IllegalArgumentException
}
val incomingEdges = dag.incomingEdgesOf(current)
incomingEdges.foreach(e => toBeAddedEdges += StreamConnector(e.from, newsp))
val outgoingEdges = dag.outgoingEdgesOf(current)
outgoingEdges.foreach(e => toBeAddedEdges += StreamConnector(newsp, e.to))
toBeRemovedVertex += current
}
case s: StreamProducer if dag.inDegreeOf(s) == 0 => {
val fn:(AnyRef => AnyRef) = {
n => {
n match {
case scala.Tuple3 => n
case scala.Tuple2(x1,x2) => (x1,upStreamName,x2)
case scala.Tuple1(x1) => (if(x1 == null) null else x1.hashCode(),upStreamName,x1)
case _ => (if(n == null) null else n.hashCode(),upStreamName,n)
}
}
}
newsp = MapProducer(UniqueId.incrementAndGetId(),3,fn)
toBeAddedEdges += StreamConnector(current,newsp)
val outgoingEdges = dag.outgoingEdgesOf(current)
outgoingEdges.foreach(e => toBeAddedEdges += StreamConnector(newsp,e.to))
}
case _ => throw new IllegalArgumentException("Only FlatMapProducer and MapProducer can be replaced before AlertStreamSink")
}
newsp
}
} | eBay/Eagle | eagle-core/eagle-data-process/eagle-stream-process-api/src/main/scala/org/apache/eagle/datastream/StreamAlertExpansion.scala | Scala | apache-2.0 | 8,979 |
package com.github.j5ik2o.forseti.adaptor.validator
import com.github.j5ik2o.forseti.domain.auhtorizationCode.AuthorizationCode
import com.github.j5ik2o.forseti.domain.exception.{InvalidGrantException, OAuthException}
import com.github.j5ik2o.forseti.domain.pkce.{CodeChallenge, CodeVerifier}
import com.github.j5ik2o.forseti.infrastructure.util.Base64UrlSafeString
import com.github.j5ik2o.forseti.infrastructure.util.EitherTUtil._
import scala.concurrent.{ExecutionContext, Future}
import scalaz.{EitherT, Maybe}
trait CodeVerifierValidator {
def validate(authorizationCode: AuthorizationCode, codeVerifierOpt: Maybe[String])(
implicit ec: ExecutionContext
): EitherT[Future, OAuthException, Unit]
}
object CodeVerifierValidator {
def ofDefault: CodeVerifierValidator = new Default
private class Default extends CodeVerifierValidator {
def validate(authorizationCode: AuthorizationCode, codeVerifierOpt: Maybe[String])(
implicit ec: ExecutionContext
): EitherT[Future, OAuthException, Unit] = {
lazy val success = ().toRightTFuture[OAuthException]
authorizationCode.codeChallengeWithMethodType.map { cct =>
codeVerifierOpt.map { codeVerifier =>
val ccl = CodeChallenge(cct.methodType, CodeVerifier(codeVerifier)).value
if (ccl == cct.value.value)
success
else
createLeftOfEitherT[OAuthException, Unit](new InvalidGrantException(Maybe.just(
s"Invalid code_verifier: [(method = ${cct.methodType}, request code_verifier = $codeVerifier) " +
s" = registered code_challenge = ${cct.value.value}] != calculated code_challenge = $ccl"
)))
}.getOrElse {
createLeftOfEitherT[OAuthException, Unit](
new InvalidGrantException(Maybe.just("Required code_verifier"))
)
}
}.getOrElse(success)
}
}
}
| j5ik2o/forseti | server/server-use-case-port/src/main/scala/com/github/j5ik2o/forseti/adaptor/validator/CodeVerifierValidator.scala | Scala | mit | 1,903 |
package eggman89.extra
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkContext, SparkConf}
import org.joda.time.DateTime
/**
* Created by snehasis on 12/14/2015.
*/
object combineAndMakeCsv {
def main(args: Array[String]) {
val path = "C:/Users/sneha/Google Drive/Project/Dataset/"
val startTime = new DateTime()
//remove logging from console
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
Logger.getLogger("INFO").setLevel(Level.OFF)
System.setProperty("hadoop.home.dir", "c:/winutil/")
val conf = new SparkConf().setAppName("MusicReco").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer").set("spark.executor.memory","4g").setMaster("local[*]")
val sc = new SparkContext(conf)
//setting up sql context to query the data later on
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
println("Spark Context started")
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
Logger.getLogger("INFO").setLevel(Level.OFF)
val df_song_metadata = sqlContext.load("com.databricks.spark.csv", Map("path" -> (path + "track_metadata_without_dup.csv"), "header" -> "true"))
val df_song_attributes = sqlContext.load("com.databricks.spark.csv", Map("path" -> (path + "song_attributes.csv"), "header" -> "true"))
df_song_metadata.join(df_song_attributes, df_song_attributes("track_id" ) === df_song_metadata("track_id"))
.toDF("track_id","title","song_id","release","artist_id","artist_mbid","artist_name","duration","artist_familiarity","artist_hotttnesss","year","track_7digitalid","shs_perf","shs_work","track_id1","danceability","energy","key","loudness","tempo","time_signature")
.select("track_id","title","song_id","release","artist_id","artist_mbid","artist_name","duration","artist_familiarity","artist_hotttnesss","year","track_7digitalid","shs_perf","shs_work","danceability","energy","key","loudness","tempo","time_signature").write
.format("com.databricks.spark.csv")
.option("header", "true")
.save("newcars.csv")
}
}
| eggman89/TagGenerationAndMusicReco | src/main/scala-2.10/eggman89/extra/combineAndMakeCsv.scala | Scala | apache-2.0 | 2,207 |
/*
* tswrdb - a program and API to export the TSW resource database
*
* Copyright (C) 2013 Joakim Bjørnstad <[email protected]>
*
* Licensed under the GNU General Public License version 2.
* Please see the LICENSE file for the license text in verbatim.
*/
package com.joakibj.tswrdb
import java.io.File
import rdb.strings.{StringExportFormat, StringLanguage}
object ListRdbTypesMode extends Enumeration {
val None, All, Understood = Value
}
case class Config(tswDirectory: File = new File("."),
rdbType: Int = 0,
command: String = "",
subCommand: String = "",
listMode: Enumeration#Value = ListRdbTypesMode.None,
language: StringLanguage.Value = StringLanguage.English,
stringExportFormat: StringExportFormat.Value = StringExportFormat.Xml)
object TswRdb extends App {
Console.setErr(Console.out)
val parser = new scopt.OptionParser[Config]("tswrdb") {
head("tswrdb", "0.0.1")
opt[File]("tsw") required() valueName ("<directory>") action {
(file, config) =>
config.copy(tswDirectory = file)
} text ("tsw points to the TSW install directory and is required.")
note("")
cmd("list") action {
(_, config) =>
config.copy(command = "list", listMode = ListRdbTypesMode.Understood)
} text ("Lists the valid rdb types available. Per default and to keep the user sane, only well understood RdbTypes are listed.") children (
opt[Unit]("all") abbr ("a") action {
(_, config) =>
config.copy(listMode = ListRdbTypesMode.All)
} text ("List all rdbtypes, regardless. Note that some are highly mysterious and/or esoteric. You will have to make sense of them yourself")
)
note("")
cmd("export") action {
(_, config) =>
config.copy(command = "export")
} children(
cmd("rdbtype") action {
(_, config) =>
config.copy(subCommand = "rdbtype")
} text ("Export any RdbType as they appear in the resource database.") children (
arg[Int]("<rdbType>") required() action {
(rdbType, config) =>
config.copy(rdbType = rdbType)
} text ("rdbType of the data that is going to be exported.")
),
cmd("strings") action {
(_, config) =>
config.copy(subCommand = "strings")
} text ("Export strings (RdbType 1030002). XML is output per default, this can be overriden with Option --json. ") children(
opt[String]("lang") abbr ("l") required() action {
(lang, config) =>
config.copy(language = StringLanguage.values.find(_.toString == lang).get)
} validate {
lang => if (StringLanguage.values.map(_.toString).contains(lang)) success else failure("Option --lang must be en, fr or de")
} text ("Exports all strings for the language. Valid options are en, fr or de. Required."),
opt[Unit]("json") optional() action {
(lang, config) =>
config.copy(stringExportFormat = StringExportFormat.Json)
} text ("Strings are exported as JSON.")
)
)
note("")
cmd("index") action {
(_, config) =>
config.copy(command = "index")
} children (
cmd("info") action {
(_, config) =>
config.copy(subCommand = "info")
} text ("Show information about index file: version, hash, number of entries")
)
note("")
help("help") text ("prints this usage text.")
version("version") text ("prints the version")
}
parser.parse(args, Config()) map {
config =>
CommandDispatcher(config).dispatch()
} getOrElse {
}
}
| joakibj/tswrdb | tswrdb-cmdui/src/main/scala/com/joakibj/tswrdb/TswRdb.scala | Scala | gpl-2.0 | 3,687 |
package skinny.controller
import org.scalatra.test.scalatest._
import scalikejdbc._, SQLInterpolation._
import skinny.orm.SkinnyCRUDMapper
import skinny.validator._
import skinny.ParamType
class SkinnyApiResourceSpec extends ScalatraFlatSpec {
behavior of "SkinnyApiResource"
Class.forName("org.h2.Driver")
GlobalSettings.loggingSQLAndTime = LoggingSQLAndTimeSettings(singleLineMode = true)
ConnectionPool.add('SkinnyApiResource, "jdbc:h2:mem:SkinnyApiResource", "", "")
NamedDB('SkinnyApiResource).localTx { implicit s =>
sql"create table api (id serial primary key, name varchar(64) not null, url varchar(128) not null);"
.execute.apply()
}
case class Api(id: Long, name: String, url: String)
object Api extends SkinnyCRUDMapper[Api] {
override def connectionPoolName = 'SkinnyApiResource
override def defaultAlias = createAlias("api")
override def extract(rs: WrappedResultSet, n: ResultName[Api]) = new Api(
id = rs.get(n.id), name = rs.get(n.name), url = rs.get(n.url))
}
object ApisController extends SkinnyApiResource {
override def resourceName = "api"
override def resourcesName = "apis"
override def model = Api
override def resourcesBasePath = "/api/apis"
override def createForm = validation(createParams,
paramKey("name") is required & maxLength(64),
paramKey("url") is required & maxLength(128))
override def createFormStrongParameters = Seq("name" -> ParamType.String, "url" -> ParamType.String)
override def updateForm = validation(updateParams,
paramKey("name") is maxLength(64),
paramKey("url") is maxLength(128))
override def updateFormStrongParameters = createFormStrongParameters
}
addFilter(ApisController, "/*")
it should "have list APIs" in {
get("/api/apis.json") {
status should equal(200)
}
get("/api/apis.xml") {
status should equal(200)
}
}
it should "have create API" in {
post("/api/apis.json", "name" -> "Twitter APi") {
status should equal(400)
body should equal("""{"name":[],"url":["url is required"]}""")
}
post("/api/apis.json", "name" -> "Twitter APi", "url" -> "https://dev.twitter.com/") {
status should equal(201)
}
}
it should "have update API" in {
val id = Api.createWithAttributes('name -> "Twitter", 'url -> "https://dev.twitter.com")
put(s"/api/apis/${id}.json", "name" -> "Twitter API") {
status should equal(200)
}
Api.findById(id).get.name should equal("Twitter API")
}
it should "have delete API" in {
val id = Api.createWithAttributes('name -> "Twitter", 'url -> "https://dev.twitter.com")
delete(s"/api/apis/${id}.json") {
status should equal(200)
}
Api.findById(id).isDefined should equal(false)
}
}
| BlackPrincess/skinny-framework | framework/src/test/scala/skinny/controller/SkinnyApiResourceSpec.scala | Scala | mit | 2,798 |
package mgoeminne.scalaggplot.geom
import mgoeminne.scalaggplot.position.Position
import mgoeminne.scalaggplot.stat.Statistic
import mgoeminne.scalaggplot.stat
import mgoeminne.scalaggplot.position
import org.saddle.Frame
/**
* Points, jittered to reduce overplotting.
*
* The Position.jitter geom is a convenient default for point with position = 'Position.jitter'.
* See [[mgoeminne.scalaggplot.position.jitter]] to see how to adjust amount of jittering.
*
* == Aesthetics ==
*
* This function understands the following aesthetics:
*
* - '''[[aes.x]]'''
* - '''[[aes.y]]'''
* - [[aes.alpha]]
* - [[aes.colour]]
* - [[aes.fill]]
* - [[aes.shape]]
* - [[aes.size]]
*
* == Examples ==
*
* TODO
*
* @param mapping The aesthetic mapping, usually constructed with [[mgoeminne.scalaggplot.aes.aes]] or [[mgoeminne.scalaggplot.aes.string]].
* Only needs to be set at the layer level if you are overriding the plot defaults.
* @param data A layer specific dataset - only needed if you want to override the plot defaults.
* @param stat The statistical transformation to use on the data for this layer.
* @param position The position adjustment to use for overlappling points on this layer.
* @param removeNA If false (the default), removes missing values with a warning. If true, silently removes missing values.
* @tparam T
*/
case class jitter[T]( mapping: Option[(Seq[Numeric[T]], Seq[Numeric[T]])] = None,
data: Option[Frame[Any,Any,T]] = None,
stat: Statistic = jitterUtil.defaultStat,
position: Position = jitterUtil.defaultPos,
removeNA: Boolean = false) extends Geom
object jitterUtil
{
val defaultStat = stat.identity
val defaultPos = position.jitter()
} | mgoeminne/scala-ggplot | src/main/scala/mgoeminne/scalaggplot/geom/jitter.scala | Scala | lgpl-3.0 | 1,825 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.worker
import java.lang.management.ManagementFactory
import org.apache.spark.util.{IntParam, MemoryParam, Utils}
/**
* Command-line parser for the worker.
*/
private[spark] class WorkerArguments(args: Array[String]) {
var host = Utils.localHostName()
var port = 0
var webUiPort = 8081
var cores = inferDefaultCores()
var memory = inferDefaultMemory()
var masters: Array[String] = null
var workDir: String = null
// Check for settings in environment variables
if (System.getenv("SPARK_WORKER_PORT") != null) {
port = System.getenv("SPARK_WORKER_PORT").toInt
}
if (System.getenv("SPARK_WORKER_CORES") != null) {
cores = System.getenv("SPARK_WORKER_CORES").toInt
}
if (System.getenv("SPARK_WORKER_MEMORY") != null) {
memory = Utils.memoryStringToMb(System.getenv("SPARK_WORKER_MEMORY"))
}
if (System.getenv("SPARK_WORKER_WEBUI_PORT") != null) {
webUiPort = System.getenv("SPARK_WORKER_WEBUI_PORT").toInt
}
if (System.getenv("SPARK_WORKER_DIR") != null) {
workDir = System.getenv("SPARK_WORKER_DIR")
}
parse(args.toList)
def parse(args: List[String]): Unit = args match {
case ("--ip" | "-i") :: value :: tail =>
Utils.checkHost(value, "ip no longer supported, please use hostname " + value)
host = value
parse(tail)
case ("--host" | "-h") :: value :: tail =>
Utils.checkHost(value, "Please use hostname " + value)
host = value
parse(tail)
case ("--port" | "-p") :: IntParam(value) :: tail =>
port = value
parse(tail)
case ("--cores" | "-c") :: IntParam(value) :: tail =>
cores = value
parse(tail)
case ("--memory" | "-m") :: MemoryParam(value) :: tail =>
memory = value
parse(tail)
case ("--work-dir" | "-d") :: value :: tail =>
workDir = value
parse(tail)
case "--webui-port" :: IntParam(value) :: tail =>
webUiPort = value
parse(tail)
case ("--help" | "-h") :: tail =>
printUsageAndExit(0)
case value :: tail =>
if (masters != null) { // Two positional arguments were given
printUsageAndExit(1)
}
masters = value.stripPrefix("spark://").split(",").map("spark://" + _)
parse(tail)
case Nil =>
if (masters == null) { // No positional argument was given
printUsageAndExit(1)
}
case _ =>
printUsageAndExit(1)
}
/**
* Print usage and exit JVM with the given exit code.
*/
def printUsageAndExit(exitCode: Int) {
System.err.println(
"Usage: Worker [options] <master>\\n" +
"\\n" +
"Master must be a URL of the form spark://hostname:port\\n" +
"\\n" +
"Options:\\n" +
" -c CORES, --cores CORES Number of cores to use\\n" +
" -m MEM, --memory MEM Amount of memory to use (e.g. 1000M, 2G)\\n" +
" -d DIR, --work-dir DIR Directory to run apps in (default: SPARK_HOME/work)\\n" +
" -i HOST, --ip IP Hostname to listen on (deprecated, please use --host or -h)\\n" +
" -h HOST, --host HOST Hostname to listen on\\n" +
" -p PORT, --port PORT Port to listen on (default: random)\\n" +
" --webui-port PORT Port for web UI (default: 8081)")
System.exit(exitCode)
}
def inferDefaultCores(): Int = {
Runtime.getRuntime.availableProcessors()
}
def inferDefaultMemory(): Int = {
val ibmVendor = System.getProperty("java.vendor").contains("IBM")
var totalMb = 0
try {
val bean = ManagementFactory.getOperatingSystemMXBean()
if (ibmVendor) {
val beanClass = Class.forName("com.ibm.lang.management.OperatingSystemMXBean")
val method = beanClass.getDeclaredMethod("getTotalPhysicalMemory")
totalMb = (method.invoke(bean).asInstanceOf[Long] / 1024 / 1024).toInt
} else {
val beanClass = Class.forName("com.sun.management.OperatingSystemMXBean")
val method = beanClass.getDeclaredMethod("getTotalPhysicalMemorySize")
totalMb = (method.invoke(bean).asInstanceOf[Long] / 1024 / 1024).toInt
}
} catch {
case e: Exception => {
totalMb = 2*1024
System.out.println("Failed to get total physical memory. Using " + totalMb + " MB")
}
}
// Leave out 1 GB for the operating system, but don't return a negative memory size
math.max(totalMb - 1024, 512)
}
}
| yelshater/hadoop-2.3.0 | spark-core_2.10-1.0.0-cdh5.1.0/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala | Scala | apache-2.0 | 5,198 |
package scorex.utils
import java.net.InetAddress
import org.apache.commons.net.ntp.NTPUDPClient
object NTP extends ScorexLogging {
private val TimeTillUpdate = 1000 * 60 * 10L
private val NtpServer = "pool.ntp.org"
private var lastUpdate = 0L
private var offset = 0L
def correctedTime() = {
//CHECK IF OFFSET NEEDS TO BE UPDATED
if (System.currentTimeMillis() > lastUpdate + TimeTillUpdate) {
updateOffSet()
lastUpdate = System.currentTimeMillis()
log.info("Adjusting time with " + offset + " milliseconds.")
}
//CALCULATE CORRECTED TIME
System.currentTimeMillis() + offset
}
private def updateOffSet() {
val client = new NTPUDPClient()
client.setDefaultTimeout(10000)
try {
client.open()
val info = client.getTime(InetAddress.getByName(NtpServer))
info.computeDetails()
if (info.getOffset != null) offset = info.getOffset
} catch {
case t: Throwable => log.warn("Problems with NTP: ", t)
} finally {
client.close()
}
}
} | Pole-he/Scorex-Lagonaki | src/main/scala/scorex/utils/NTP.scala | Scala | cc0-1.0 | 1,046 |
package org.jetbrains.plugins.scala.lang.psi
import org.jetbrains.plugins.scala.lang.psi.types.ScSubstitutor
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
/**
* @author Nikolay.Tropin
*/
package object implicits {
type Candidate = (ScalaResolveResult, ScSubstitutor)
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/implicits/package.scala | Scala | apache-2.0 | 298 |
/**
* Copyright (c) 2013-2015 Patrick Nicolas - Scala for Machine Learning - All rights reserved
*
* The source code in this file is provided by the author for the sole purpose of illustrating the
* concepts and algorithms presented in "Scala for Machine Learning". It should not be used to
* build commercial applications.
* ISBN: 978-1-783355-874-2 Packt Publishing.
* Unless required by applicable law or agreed to in writing, software is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* Version 0.98
*/
package org.scalaml.supervised
import org.scalaml.core.XTSeries
import org.scalaml.core.Types.ScalaMl.DblVector
/**
* <p>Trait that defined the interface to supervised learning algorithm.
* The trait requires developers to create a validation routine for parameterized
* multidimensional time series of tuple (observation, class label).</p>
* @author Patrick Nicolas
* @since March 4, 2014
* @note Scala for Machine Learning
*/
trait Supervised[T] {
/**
* validation method for supervised learning algorithm
* @param xt parameterized multidimensional time series of tuple (observation, class label)
* @param tpClass index of the class that contains the True positive labels
* @return F1 measure
*/
def validate(xt: XTSeries[(Array[T], Int)], tpClass: Int): Option[Double]
def crossValidation: Option[DblVector] = None
}
// -------------------------------- EOF ------------------------------------------ | batermj/algorithm-challenger | books/cs/machine-learning/scala-for-machine-learning/1rst-edition/original-src-from-the-book/src/main/scala/org/scalaml/supervised/Supervised.scala | Scala | apache-2.0 | 1,533 |
package org.bitcoins.core.wallet.fee
import org.bitcoins.core.currency.{CurrencyUnit, Satoshis}
import org.bitcoins.core.number.Int64
import org.bitcoins.core.protocol.transaction.Transaction
/**
* This is meant to be an abstract type that represents different fee unit measurements for
* blockchains
*/
sealed abstract class FeeUnit {
def currencyUnit: CurrencyUnit
def *(tx: Transaction): CurrencyUnit = calc(tx)
def calc(tx: Transaction): CurrencyUnit = Satoshis(Int64(tx.vsize * toLong))
def toLong: Long = currencyUnit.satoshis.toLong
}
/**
* Meant to represent the different fee unit types for the bitcoin protocol
* @see [[https://en.bitcoin.it/wiki/Weight_units]]
*/
sealed abstract class BitcoinFeeUnit extends FeeUnit
case class SatoshisPerByte(currencyUnit: CurrencyUnit) extends BitcoinFeeUnit {
def toSatPerKb: SatoshisPerKiloByte = {
SatoshisPerKiloByte(currencyUnit.satoshis * Satoshis(Int64(1000)))
}
}
case class SatoshisPerKiloByte(currencyUnit: CurrencyUnit)
extends BitcoinFeeUnit {
def toSatPerByte: SatoshisPerByte = {
val conversionOpt = (currencyUnit.toBigDecimal * 0.001).toBigIntExact
conversionOpt match {
case Some(conversion) =>
val sat = Satoshis(Int64(conversion))
SatoshisPerByte(sat)
case None =>
throw new RuntimeException(
s"Failed to convert sat/kb -> sat/byte for ${currencyUnit}")
}
}
}
/**
* A 'virtual byte' (also known as virtual size) is a new weight measurement that
* was created with segregated witness (BIP141). Now 1 'virtual byte'
* has the weight of 4 bytes in the [[org.bitcoins.core.protocol.transaction.TransactionWitness]]
* of a [[org.bitcoins.core.protocol.transaction.WitnessTransaction]]
*/
case class SatoshisPerVirtualByte(currencyUnit: CurrencyUnit)
extends BitcoinFeeUnit
| bitcoin-s/bitcoin-s-core | core/src/main/scala/org/bitcoins/core/wallet/fee/FeeUnit.scala | Scala | mit | 1,851 |
package org.ensime.core.javac
import akka.event.slf4j.SLF4JLogging
import com.sun.source.tree.Scope
import com.sun.source.tree.Tree
import com.sun.source.tree.{ IdentifierTree, MemberSelectTree }
import com.sun.source.util.{ JavacTask, TreePath, Trees }
import java.io.{ File, FileInputStream, InputStream }
import java.net.URI
import java.nio.charset.Charset
import java.util.Locale
import java.util.concurrent.ConcurrentHashMap
import javax.lang.model.`type`.TypeKind
import javax.lang.model.`type`.TypeMirror
import javax.tools._
import org.ensime.api._
import org.ensime.core.DocSigPair
import org.ensime.model.LineSourcePositionHelper
import org.ensime.indexer.{ EnsimeVFS, SearchService }
import org.ensime.util.ReportHandler
import org.ensime.util.file._
import scala.collection.JavaConverters._
import scala.reflect.internal.util.{ BatchSourceFile, RangePosition, SourceFile }
import scala.tools.nsc.Settings
import scala.tools.nsc.interactive.CompilerControl
import scala.tools.nsc.io.AbstractFile
import scala.tools.nsc.reporters.Reporter
import scala.tools.refactoring.analysis.GlobalIndexes
import javax.lang.model.element.ElementKind
import javax.lang.model.element.ExecutableElement
import javax.lang.model.element.TypeElement
class JavaCompiler(
val config: EnsimeConfig,
val reportHandler: ReportHandler,
val search: SearchService,
val vfs: EnsimeVFS
) extends JavaDocFinding with JavaCompletion with JavaSourceFinding with Helpers with SLF4JLogging {
private val listener = new JavaDiagnosticListener()
private val silencer = new SilencedDiagnosticListener()
private val cp = (config.allJars ++ config.targetClasspath).mkString(File.pathSeparator)
private var workingSet = new ConcurrentHashMap[String, JavaFileObject]()
// needs to be recreated in JDK6. JDK7 seems more capable of reuse.
def getTask(
lint: String,
listener: DiagnosticListener[JavaFileObject],
files: java.lang.Iterable[JavaFileObject]
): JavacTask = {
// TODO: take a charset for each invocation
val compiler = ToolProvider.getSystemJavaCompiler()
val fileManager = compiler.getStandardFileManager(listener, null, DefaultCharset)
compiler.getTask(null, fileManager, listener, List(
"-cp", cp, "-Xlint:" + lint, "-proc:none"
).asJava, null, files).asInstanceOf[JavacTask]
}
def internSource(sf: SourceFileInfo): JavaFileObject = {
val jfo = getJavaFileObject(sf)
workingSet.put(sf.file.getAbsolutePath, jfo)
jfo
}
def askTypecheckFiles(files: List[SourceFileInfo]): Unit = {
reportHandler.clearAllJavaNotes()
for (sf <- files) {
internSource(sf)
}
typecheckAll()
}
def askLinkPos(fqn: JavaFqn, file: SourceFileInfo): Option[SourcePosition] = {
val infos = typecheckForUnits(List(file))
infos.headOption.flatMap { info => findInCompiledUnit(info, fqn) }
}
def askTypeAtPoint(file: SourceFileInfo, offset: Int): Option[TypeInfo] = {
pathToPoint(file, offset) flatMap {
case (info: CompilationInfo, path: TreePath) =>
getTypeMirror(info, offset).map(typeMirrorToTypeInfo)
}
}
def askSymbolAtPoint(file: SourceFileInfo, offset: Int): Option[SymbolInfo] = {
pathToPoint(file, offset) flatMap {
case (info: CompilationInfo, path: TreePath) =>
def withName(name: String): Option[SymbolInfo] = {
val tpeMirror = Option(info.getTrees().getTypeMirror(path))
val nullTpe = new BasicTypeInfo("NA", -1, DeclaredAs.Nil, "NA", List.empty, List.empty, None, None)
Some(SymbolInfo(
fqn(info, path).map(_.toFqnString).getOrElse(name),
name,
findDeclPos(info, path),
tpeMirror.map(typeMirrorToTypeInfo).getOrElse(nullTpe),
tpeMirror.map(_.getKind == TypeKind.EXECUTABLE).getOrElse(false),
None
))
}
path.getLeaf match {
case t: IdentifierTree => withName(t.getName.toString)
case t: MemberSelectTree => withName(t.getIdentifier.toString)
case _ => None
}
}
}
def askDocSignatureAtPoint(file: SourceFileInfo, offset: Int): Option[DocSigPair] = {
pathToPoint(file, offset) flatMap {
case (info: CompilationInfo, path: TreePath) =>
docSignature(info, path)
}
}
def askCompletionsAtPoint(
file: SourceFileInfo, offset: Int, maxResults: Int, caseSens: Boolean
): CompletionInfoList = {
completionsAt(file, offset, maxResults, caseSens)
}
protected def pathToPoint(file: SourceFileInfo, offset: Int): Option[(CompilationInfo, TreePath)] = {
val infos = typecheckForUnits(List(file))
infos.headOption.flatMap { info =>
val path = Option(new TreeUtilities(info).pathFor(offset))
path.map { p => (info, p) }
}
}
protected def scopeForPoint(file: SourceFileInfo, offset: Int): Option[(CompilationInfo, Scope)] = {
val infos = typecheckForUnits(List(file))
infos.headOption.flatMap { info =>
val path = Option(new TreeUtilities(info).scopeFor(offset))
path.map { p => (info, p) }
}
}
private def typeMirrorToTypeInfo(tm: TypeMirror): TypeInfo = {
BasicTypeInfo(tm.toString, -1, DeclaredAs.Class, tm.toString, List(), List(), Some(EmptySourcePosition()), None)
}
private def getTypeMirror(info: CompilationInfo, offset: Int): Option[TypeMirror] = {
val path = Option(new TreeUtilities(info).pathFor(offset))
// Uncomment to debug the AST path.
//for (p <- path) { for (t <- p) { System.err.println(t.toString()) } }
path.flatMap { p => Option(info.getTrees().getTypeMirror(p)) }
}
private def typecheckAll(): Unit = {
val task = getTask("all", listener, workingSet.values)
val t = System.currentTimeMillis()
task.parse()
task.analyze()
log.info("Parsed and analyzed: " + (System.currentTimeMillis() - t) + "ms")
}
private def typecheckForUnits(inputs: List[SourceFileInfo]): Vector[CompilationInfo] = {
// We only want the compilation units for inputs, but we need to typecheck them w.r.t
// the full working set.
val inputJfos = inputs.map { sf => internSource(sf).toUri }.toSet
val task = getTask("none", silencer, workingSet.values)
val t = System.currentTimeMillis()
val units = task.parse().asScala.filter { unit => inputJfos.contains(unit.getSourceFile.toUri) }
.map(new CompilationInfo(task, _)).toVector
task.analyze()
log.info("Parsed and analyzed for trees: " + (System.currentTimeMillis() - t) + "ms")
units
}
private class JavaObjectWithContents(val f: File, val contents: String)
extends SimpleJavaFileObject(f.toURI, JavaFileObject.Kind.SOURCE) {
override def getCharContent(ignoreEncodingErrors: Boolean): CharSequence = contents
}
private class JavaObjectFromFile(val f: File)
extends SimpleJavaFileObject(f.toURI, JavaFileObject.Kind.SOURCE) {
override def getCharContent(ignoreEncodingErrors: Boolean): CharSequence = f.readString
override def openInputStream(): InputStream = new FileInputStream(f)
}
private def getJavaFileObject(sf: SourceFileInfo): JavaFileObject = sf match {
case SourceFileInfo(f, None, None) => new JavaObjectFromFile(f)
case SourceFileInfo(f, Some(contents), None) => new JavaObjectWithContents(f, contents)
case SourceFileInfo(f, None, Some(contentsIn)) => new JavaObjectWithContents(f, contentsIn.readString)
}
private class JavaDiagnosticListener extends DiagnosticListener[JavaFileObject] with ReportHandler {
def report(diag: Diagnostic[_ <: JavaFileObject]): Unit = {
reportHandler.reportJavaNotes(List(
Note(
diag.getSource().getName(),
diag.getMessage(Locale.ENGLISH),
diag.getKind() match {
case Diagnostic.Kind.ERROR => NoteError
case Diagnostic.Kind.WARNING => NoteWarn
case Diagnostic.Kind.MANDATORY_WARNING => NoteWarn
case _ => NoteInfo
},
diag.getStartPosition() match {
case x if x > -1 => x.toInt
case _ => diag.getPosition().toInt
},
diag.getEndPosition().toInt,
diag.getLineNumber().toInt,
diag.getColumnNumber().toInt
)
))
}
}
private class SilencedDiagnosticListener extends DiagnosticListener[JavaFileObject] with ReportHandler {
def report(diag: Diagnostic[_ <: JavaFileObject]): Unit = {}
}
}
| jacobono/ensime-server | core/src/main/scala/org/ensime/core/javac/JavaCompiler.scala | Scala | gpl-3.0 | 8,432 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.TypeUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the sum calculated from values of a group.",
examples = """
Examples:
> SELECT _FUNC_(col) FROM VALUES (5), (10), (15) AS tab(col);
30
> SELECT _FUNC_(col) FROM VALUES (NULL), (10), (15) AS tab(col);
25
> SELECT _FUNC_(col) FROM VALUES (NULL), (NULL) AS tab(col);
NULL
""",
since = "1.0.0")
case class Sum(child: Expression) extends DeclarativeAggregate with ImplicitCastInputTypes {
override def children: Seq[Expression] = child :: Nil
override def nullable: Boolean = true
// Return data type.
override def dataType: DataType = resultType
override def inputTypes: Seq[AbstractDataType] = Seq(NumericType)
override def checkInputDataTypes(): TypeCheckResult =
TypeUtils.checkForNumericExpr(child.dataType, "function sum")
private lazy val resultType = child.dataType match {
case DecimalType.Fixed(precision, scale) =>
DecimalType.bounded(precision + 10, scale)
case _: IntegralType => LongType
case _ => DoubleType
}
private lazy val sumDataType = resultType
private lazy val sum = AttributeReference("sum", sumDataType)()
private lazy val zero = Cast(Literal(0), sumDataType)
override lazy val aggBufferAttributes = sum :: Nil
override lazy val initialValues: Seq[Expression] = Seq(
/* sum = */ Literal.create(null, sumDataType)
)
override lazy val updateExpressions: Seq[Expression] = {
if (child.nullable) {
Seq(
/* sum = */
coalesce(coalesce(sum, zero) + child.cast(sumDataType), sum)
)
} else {
Seq(
/* sum = */
coalesce(sum, zero) + child.cast(sumDataType)
)
}
}
override lazy val mergeExpressions: Seq[Expression] = {
Seq(
/* sum = */
coalesce(coalesce(sum.left, zero) + sum.right, sum.left)
)
}
override lazy val evaluateExpression: Expression = resultType match {
case d: DecimalType => CheckOverflow(sum, d, SQLConf.get.decimalOperationsNullOnOverflow)
case _ => sum
}
}
| techaddict/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Sum.scala | Scala | apache-2.0 | 3,233 |
package monocle.std
import monocle.function._
import monocle.{Iso, Lens}
import scalaz.Tree
import scala.annotation.tailrec
import scala.collection.immutable.Stream.Empty
object tree extends TreeFunctions with TreeInstances
trait TreeFunctions {
def rootLabel[A]: Lens[Tree[A], A] =
Lens[Tree[A], A](_.rootLabel)(l => tree => Tree.node(l, tree.subForest))
def subForest[A]: Lens[Tree[A], Stream[Tree[A]]] =
Lens[Tree[A], Stream[Tree[A]]](_.subForest)(children => tree => Tree.node(tree.rootLabel, children))
def leftMostLabel[A]: Lens[Tree[A], A] = {
@tailrec
def _get(tree: Tree[A]): A = tree.subForest match {
case Empty => tree.rootLabel
case x #:: xs => _get(x)
}
def _set(newLeaf: A)(tree: Tree[A]): Tree[A] = tree.subForest match {
case Empty => Tree.leaf(newLeaf)
case xs => Tree.node(tree.rootLabel, headOption[Stream[Tree[A]], Tree[A]].modify(_set(newLeaf))(xs) )
}
Lens(_get)(_set)
}
def rightMostLabel[A]: Lens[Tree[A], A] = {
@tailrec
def _get(tree: Tree[A]): A = tree.subForest match {
case Empty => tree.rootLabel
case xs => _get(xs.last)
}
def _set(newLeaf: A)(tree: Tree[A]): Tree[A] = tree.subForest match {
case Empty => Tree.leaf(newLeaf)
case xs => Tree.node(tree.rootLabel, lastOption[Stream[Tree[A]], Tree[A]].modify(_set(newLeaf))(xs) )
}
Lens(_get)(_set)
}
}
trait TreeInstances {
implicit def treeEach[A]: Each[Tree[A], A] = Each.traverseEach[Tree, A]
implicit def treeReverse[A]: Reverse[Tree[A], Tree[A]] = new Reverse[Tree[A], Tree[A]] {
def reverse = Iso[Tree[A], Tree[A]](reverseTree)(reverseTree)
private def reverseTree(tree: Tree[A]): Tree[A] = Tree.node(tree.rootLabel, tree.subForest.reverse.map(reverseTree))
}
}
| CapeSepias/Monocle | core/src/main/scala/monocle/std/Tree.scala | Scala | mit | 1,811 |
package model
import grizzled.slf4j.Logging
import io._
import io.config.ConfigMappings._
import io.config.Configuration
import maths.RandomNumberGenerator
import maths.integration.RungeKuttaIntegration
import physical.Turbulence
import physical.flow.Flow
import utilities.SimpleTimer
class CoupledBiophysicalModel(val config: Configuration, val name: String)
extends Logging {
setConfiguredLogLevel()
if (config.settings.randomSeed.isValidInt) {
RandomNumberGenerator.setSeed(config.settings.randomSeed)
}
val flow: Flow = config.flow
val clock = new SimulationClock(flow.period, flow.timeStep)
val turbulence: Option[Turbulence] = config.turbulence.applyTurbulence match {
case true =>
Some(
new Turbulence(
config.turbulence.horizontalDiffusionCoefficient,
config.turbulence.verticalDiffusionCoefficient,
flow.timeStep.totalSeconds
)
)
case false => None
}
val ocean = new PhysicalModel(config)
val integrator = new RungeKuttaIntegration(
ocean.flowController,
turbulence,
flow.timeStep.totalSeconds
)
val biology = new BiologicalModel(config, clock, integrator)
def run(): Unit = {
try {
val simulationTimer = new SimpleTimer()
simulationTimer.start()
info("Simulation run started")
val stepTimer = new SimpleTimer()
stepTimer.start()
while (clock.stillTime && biology.isDispersing(clock.now)) {
biology()
if (clock.isMidnight) {
if (config.larva.isMortal.getOrElse(false)) {
biology.applyMortality()
}
info(
"Day " + clock.now.toLocalDate + " has been completed in " + stepTimer
.stop() + " secs with " + biology.pelagicLarvae.size + " larvae."
)
stepTimer.start()
ocean.circulate()
}
clock.tick()
}
val time: Double = simulationTimer.stop() / 60.0
info(f"Simulation run completed in $time%.2f minutes")
val still = clock.stillTime
val disperse = biology.isDispersing(clock.now)
debug(s"Still movin': $still and dispersin': $disperse")
ocean.shutdown()
val resultsWriter =
new ResultsWriter(biology.stationaryLarvae.toArray, config.output, name)
resultsWriter.write()
} catch {
case e: Exception => e.printStackTrace()
} finally {
ocean.shutdown()
}
}
private def setConfiguredLogLevel(): Unit = {
//val logFile = config.output.logFile
if (config.output.logFile.nonEmpty) {
System.setProperty(
"org.slf4j.simpleLogger.logFile",
config.output.logFile
)
}
config.output.logLevel match {
case "debug" =>
System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "debug")
case "trace" =>
System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "trace")
case "error" =>
System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "error")
case "off" =>
System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "off")
case "all" =>
System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "all")
case _ =>
System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "info")
}
}
}
| shawes/zissou | src/main/scala/model/CoupledBiophysicalModel.scala | Scala | mit | 3,313 |
package de.hyronx.matter.compiler.types
import org.scalatest._
class TypesTest extends FlatSpec with Matchers {
"The standard types" should "correctly show up" in {
Types.root.printTree(true, 0, true)
succeed
}
}
| hyronx/matter | src/test/scala/de/hyronx/matter/compiler/types/TypesTest.scala | Scala | apache-2.0 | 227 |
package shield.implicits
import org.specs2.mutable.Specification
import spray.http.HttpHeaders.RawHeader
import spray.http.{HttpResponse, HttpRequest}
import shield.implicits.HttpImplicits._
class ImplicitsSpec extends Specification {
"HttpImplicits" should {
"Replace a request header" in {
var request = HttpRequest().withHeaders(
RawHeader("sample", "header")
)
request = request.withReplacedHeaders(RawHeader("sample","newHeader"))
request.headers.length must be equalTo 1
request.headers(0).name must be equalTo "sample"
request.headers(0).value must be equalTo "newHeader"
}
"Add a request header" in {
var request = HttpRequest().withHeaders(
RawHeader("sample", "header")
)
request = request.withAdditionalHeaders(
RawHeader("additional", "testHeader")
)
request.headers.length must be equalTo 2
request.headers.find(_.lowercaseName == "additional").get.value must be equalTo "testHeader"
request.headers.find(_.lowercaseName == "sample").get.value must be equalTo "header"
}
"Strip a request header" in {
var request = HttpRequest().withHeaders(
RawHeader("sample", "header"),
RawHeader("additional", "testHeader")
)
request = request.withStrippedHeaders(Set("sample"))
request.headers.length must be equalTo 1
request.headers(0).name must be equalTo "additional"
request.headers(0).value must be equalTo "testHeader"
}
"Replace a response header" in {
var response = HttpResponse().withHeaders(
RawHeader("sample", "header")
)
response = response.withReplacedHeaders(RawHeader("sample","newHeader"))
response.headers.length must be equalTo 1
response.headers(0).name must be equalTo "sample"
response.headers(0).value must be equalTo "newHeader"
}
"Add a response header" in {
var response = HttpResponse().withHeaders(
RawHeader("sample", "header")
)
response = response.withAdditionalHeaders(
RawHeader("additional", "testHeader")
)
response.headers.length must be equalTo 2
response.headers.find(_.lowercaseName == "additional").get.value must be equalTo "testHeader"
response.headers.find(_.lowercaseName == "sample").get.value must be equalTo "header"
}
"Strip a response header" in {
var response = HttpResponse().withHeaders(
RawHeader("sample", "header"),
RawHeader("additional", "testHeader")
)
response = response.withStrippedHeaders(Set("sample"))
response.headers.length must be equalTo 1
response.headers(0).name must be equalTo "additional"
response.headers(0).value must be equalTo "testHeader"
}
"Strip a response header that is capitalized" in {
var response = HttpResponse().withHeaders(
RawHeader("X-Cache", "header"),
RawHeader("additional","testHeader")
)
response = response.withStrippedHeaders(Set("X-Cache"))
response.headers.length must be equalTo 1
response.headers(0).name must be equalTo "additional"
response.headers(0).value must be equalTo "testHeader"
}
}
}
| RetailMeNot/shield | src/test/scala/shield/implicits/ImplicitsSpec.scala | Scala | mit | 3,233 |
package controllers
import java.util.concurrent.TimeUnit
import javax.inject._
import org.pac4j.core.config.Config
import org.pac4j.core.profile.{CommonProfile, ProfileManager}
import org.pac4j.play.scala.{Security, SecurityComponents}
import org.pac4j.play.store.PlaySessionStore
import play.api._
import play.api.mvc._
import play.libs.concurrent.HttpExecutionContext
import javax.inject._
import javax.inject._
import play.api.routing._
import _root_.controllers.support.RequireAccess
import _root_.controllers.support.Consented
import dao.user.UserDAO
import models.{Non, OrganizationId}
import org.pac4j.play.PlayWebContext
@Singleton
class ApplicationController @Inject()(/*override val config: Config, override val playSessionStore: PlaySessionStore, override val ec: HttpExecutionContext,*/ val controllerComponents: SecurityComponents, userDAO: UserDAO) extends BaseController with Security[CommonProfile] {
def index = Action { implicit request =>
Ok(views.html.index())
}
// def secure = RequireAccess(Non, to=OrganizationId(0)) { Secure("RedirectUnauthenticatedClient", "Access").async{ authenticatedRequest => Consented(authenticatedRequest, userDAO) { implicit user => Action { implicit request =>
// val webContext = new PlayWebContext(request, playSessionStore)
// val profileManager = new ProfileManager[CommonProfile](webContext)
// val profile = profileManager.get(true)
//
// Ok(views.html.secure())
// } } } }
def javascriptRoutes = Action { implicit request =>
Ok(
JavaScriptReverseRouter("jsRoutes")(
_root_.controllers.library.routes.javascript.LibraryController.viewQuestion,
_root_.controllers.library.routes.javascript.LibraryController.createQuestionCopyView,
_root_.controllers.library.routes.javascript.LibraryController.questionListAjax,
_root_.controllers.quiz.routes.javascript.QuizController.attachAjax,
_root_.controllers.quiz.routes.javascript.QuestionController.view,
_root_.controllers.quiz.routes.javascript.QuestionController.removeAjax,
_root_.controllers.organization.routes.javascript.CourseController.studentSelfQuestion
)
).as("text/javascript")
}
} | kristiankime/calc-tutor | app/controllers/ApplicationController.scala | Scala | mit | 2,206 |
/*
Copyright (c) 2016, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum.intellij.logika
import javax.swing.Icon
import com.intellij.openapi.util.IconLoader
import com.intellij.openapi.fileTypes.{PlainTextLanguage, LanguageFileType, FileTypeConsumer, FileTypeFactory}
class LogikaFileTypeFactory extends FileTypeFactory {
override def createFileTypes(fileTypeConsumer: FileTypeConsumer): Unit = {
for (e <- LogikaFileType.extensions) {
fileTypeConsumer.consume(LogikaFileType, e)
}
}
}
object LogikaFileType extends LanguageFileType(PlainTextLanguage.INSTANCE) {
final val extensions = Set("logika", "lgk")
final val icon = IconLoader.getIcon("/logika/icon/logika-4.png")
override val getDefaultExtension: String = "logika"
override val getName: String = "Logika"
override val getIcon: Icon = icon
override val getDescription: String = "Sireum Logika files"
}
| sireum/v3-intellij | src/org/sireum/intellij/logika/LogikaFileTypeFactory.scala | Scala | bsd-2-clause | 2,188 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.util.Locale
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalog.v2.{CatalogNotFoundException, CatalogPlugin, LookupCatalog}
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.encoders.OuterScopes
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.SubExprUtils._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* A trivial [[Analyzer]] with a dummy [[SessionCatalog]] and [[EmptyFunctionRegistry]].
* Used for testing when all relations are already filled in and the analyzer needs only
* to resolve attribute references.
*/
object SimpleAnalyzer extends Analyzer(
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)) {
override def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean) {}
},
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
/**
* Provides a way to keep state during the analysis, this enables us to decouple the concerns
* of analysis environment from the catalog.
* The state that is kept here is per-query.
*
* Note this is thread local.
*
* @param defaultDatabase The default database used in the view resolution, this overrules the
* current catalog database.
* @param nestedViewDepth The nested depth in the view resolution, this enables us to limit the
* depth of nested views.
*/
case class AnalysisContext(
defaultDatabase: Option[String] = None,
nestedViewDepth: Int = 0)
object AnalysisContext {
private val value = new ThreadLocal[AnalysisContext]() {
override def initialValue: AnalysisContext = AnalysisContext()
}
def get: AnalysisContext = value.get()
def reset(): Unit = value.remove()
private def set(context: AnalysisContext): Unit = value.set(context)
def withAnalysisContext[A](database: Option[String])(f: => A): A = {
val originContext = value.get()
val context = AnalysisContext(defaultDatabase = database,
nestedViewDepth = originContext.nestedViewDepth + 1)
set(context)
try f finally { set(originContext) }
}
}
/**
* Provides a logical query plan analyzer, which translates [[UnresolvedAttribute]]s and
* [[UnresolvedRelation]]s into fully typed objects using information in a [[SessionCatalog]].
*/
class Analyzer(
catalog: SessionCatalog,
conf: SQLConf,
maxIterations: Int)
extends RuleExecutor[LogicalPlan] with CheckAnalysis with LookupCatalog {
def this(catalog: SessionCatalog, conf: SQLConf) = {
this(catalog, conf, conf.optimizerMaxIterations)
}
override protected def lookupCatalog(name: String): CatalogPlugin =
throw new CatalogNotFoundException("No catalog lookup function")
def executeAndCheck(plan: LogicalPlan, tracker: QueryPlanningTracker): LogicalPlan = {
AnalysisHelper.markInAnalyzer {
val analyzed = executeAndTrack(plan, tracker)
try {
checkAnalysis(analyzed)
analyzed
} catch {
case e: AnalysisException =>
val ae = new AnalysisException(e.message, e.line, e.startPosition, Option(analyzed))
ae.setStackTrace(e.getStackTrace)
throw ae
}
}
}
override def execute(plan: LogicalPlan): LogicalPlan = {
AnalysisContext.reset()
try {
executeSameContext(plan)
} finally {
AnalysisContext.reset()
}
}
private def executeSameContext(plan: LogicalPlan): LogicalPlan = super.execute(plan)
def resolver: Resolver = conf.resolver
protected val fixedPoint = FixedPoint(maxIterations)
/**
* Override to provide additional rules for the "Resolution" batch.
*/
val extendedResolutionRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Override to provide rules to do post-hoc resolution. Note that these rules will be executed
* in an individual batch. This batch is to run right after the normal resolution batch and
* execute its rules in one pass.
*/
val postHocResolutionRules: Seq[Rule[LogicalPlan]] = Nil
lazy val batches: Seq[Batch] = Seq(
Batch("Hints", fixedPoint,
new ResolveHints.ResolveJoinStrategyHints(conf),
ResolveHints.ResolveCoalesceHints,
new ResolveHints.RemoveAllHints(conf)),
Batch("Simple Sanity Check", Once,
LookupFunctions),
Batch("Substitution", fixedPoint,
CTESubstitution,
WindowsSubstitution,
EliminateUnions,
new SubstituteUnresolvedOrdinals(conf)),
Batch("Resolution", fixedPoint,
ResolveTableValuedFunctions ::
ResolveTables ::
ResolveRelations ::
ResolveReferences ::
ResolveCreateNamedStruct ::
ResolveDeserializer ::
ResolveNewInstance ::
ResolveUpCast ::
ResolveGroupingAnalytics ::
ResolvePivot ::
ResolveOrdinalInOrderByAndGroupBy ::
ResolveAggAliasInGroupBy ::
ResolveMissingReferences ::
ExtractGenerator ::
ResolveGenerate ::
ResolveFunctions ::
ResolveAliases ::
ResolveSubquery ::
ResolveSubqueryColumnAliases ::
ResolveWindowOrder ::
ResolveWindowFrame ::
ResolveNaturalAndUsingJoin ::
ResolveOutputRelation ::
ExtractWindowExpressions ::
GlobalAggregates ::
ResolveAggregateFunctions ::
TimeWindowing ::
ResolveInlineTables(conf) ::
ResolveHigherOrderFunctions(catalog) ::
ResolveLambdaVariables(conf) ::
ResolveTimeZone(conf) ::
ResolveRandomSeed ::
TypeCoercion.typeCoercionRules(conf) ++
extendedResolutionRules : _*),
Batch("Post-Hoc Resolution", Once, postHocResolutionRules: _*),
Batch("View", Once,
AliasViewChild(conf)),
Batch("Nondeterministic", Once,
PullOutNondeterministic),
Batch("UDF", Once,
HandleNullInputsForUDF),
Batch("UpdateNullability", Once,
UpdateAttributeNullability),
Batch("Subquery", Once,
UpdateOuterReferences),
Batch("Cleanup", fixedPoint,
CleanupAliases)
)
/**
* Analyze cte definitions and substitute child plan with analyzed cte definitions.
*/
object CTESubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case With(child, relations) =>
// substitute CTE expressions right-to-left to resolve references to previous CTEs:
// with a as (select * from t), b as (select * from a) select * from b
relations.foldRight(child) {
case ((cteName, ctePlan), currentPlan) =>
substituteCTE(currentPlan, cteName, ctePlan)
}
case other => other
}
def substituteCTE(plan: LogicalPlan, cteName: String, ctePlan: LogicalPlan): LogicalPlan = {
plan resolveOperatorsUp {
case UnresolvedRelation(Seq(table)) if resolver(cteName, table) =>
ctePlan
case u: UnresolvedRelation =>
u
case other =>
// This cannot be done in ResolveSubquery because ResolveSubquery does not know the CTE.
other transformExpressions {
case e: SubqueryExpression =>
e.withNewPlan(substituteCTE(e.plan, cteName, ctePlan))
}
}
}
}
/**
* Substitute child plan with WindowSpecDefinitions.
*/
object WindowsSubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Lookup WindowSpecDefinitions. This rule works with unresolved children.
case WithWindowDefinition(windowDefinitions, child) => child.resolveExpressions {
case UnresolvedWindowExpression(c, WindowSpecReference(windowName)) =>
val errorMessage =
s"Window specification $windowName is not defined in the WINDOW clause."
val windowSpecDefinition =
windowDefinitions.getOrElse(windowName, failAnalysis(errorMessage))
WindowExpression(c, windowSpecDefinition)
}
}
}
/**
* Replaces [[UnresolvedAlias]]s with concrete aliases.
*/
object ResolveAliases extends Rule[LogicalPlan] {
private def assignAliases(exprs: Seq[NamedExpression]) = {
exprs.map(_.transformUp { case u @ UnresolvedAlias(child, optGenAliasFunc) =>
child match {
case ne: NamedExpression => ne
case go @ GeneratorOuter(g: Generator) if g.resolved => MultiAlias(go, Nil)
case e if !e.resolved => u
case g: Generator => MultiAlias(g, Nil)
case c @ Cast(ne: NamedExpression, _, _) => Alias(c, ne.name)()
case e: ExtractValue => Alias(e, toPrettySQL(e))()
case e if optGenAliasFunc.isDefined =>
Alias(child, optGenAliasFunc.get.apply(e))()
case e => Alias(e, toPrettySQL(e))()
}
}
).asInstanceOf[Seq[NamedExpression]]
}
private def hasUnresolvedAlias(exprs: Seq[NamedExpression]) =
exprs.exists(_.find(_.isInstanceOf[UnresolvedAlias]).isDefined)
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Aggregate(groups, aggs, child) if child.resolved && hasUnresolvedAlias(aggs) =>
Aggregate(groups, assignAliases(aggs), child)
case g: GroupingSets if g.child.resolved && hasUnresolvedAlias(g.aggregations) =>
g.copy(aggregations = assignAliases(g.aggregations))
case Pivot(groupByOpt, pivotColumn, pivotValues, aggregates, child)
if child.resolved && groupByOpt.isDefined && hasUnresolvedAlias(groupByOpt.get) =>
Pivot(Some(assignAliases(groupByOpt.get)), pivotColumn, pivotValues, aggregates, child)
case Project(projectList, child) if child.resolved && hasUnresolvedAlias(projectList) =>
Project(assignAliases(projectList), child)
}
}
object ResolveGroupingAnalytics extends Rule[LogicalPlan] {
/*
* GROUP BY a, b, c WITH ROLLUP
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (a), ( ) ).
* Group Count: N + 1 (N is the number of group expressions)
*
* We need to get all of its subsets for the rule described above, the subset is
* represented as sequence of expressions.
*/
def rollupExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.inits.toIndexedSeq
/*
* GROUP BY a, b, c WITH CUBE
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (b, c), (a, c), (a), (b), (c), ( ) ).
* Group Count: 2 ^ N (N is the number of group expressions)
*
* We need to get all of its subsets for a given GROUPBY expression, the subsets are
* represented as sequence of expressions.
*/
def cubeExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = {
// `cubeExprs0` is recursive and returns a lazy Stream. Here we call `toIndexedSeq` to
// materialize it and avoid serialization problems later on.
cubeExprs0(exprs).toIndexedSeq
}
def cubeExprs0(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.toList match {
case x :: xs =>
val initial = cubeExprs0(xs)
initial.map(x +: _) ++ initial
case Nil =>
Seq(Seq.empty)
}
private[analysis] def hasGroupingFunction(e: Expression): Boolean = {
e.collectFirst {
case g: Grouping => g
case g: GroupingID => g
}.isDefined
}
private def replaceGroupingFunc(
expr: Expression,
groupByExprs: Seq[Expression],
gid: Expression): Expression = {
expr transform {
case e: GroupingID =>
if (e.groupByExprs.isEmpty || e.groupByExprs == groupByExprs) {
Alias(gid, toPrettySQL(e))()
} else {
throw new AnalysisException(
s"Columns of grouping_id (${e.groupByExprs.mkString(",")}) does not match " +
s"grouping columns (${groupByExprs.mkString(",")})")
}
case e @ Grouping(col: Expression) =>
val idx = groupByExprs.indexWhere(_.semanticEquals(col))
if (idx >= 0) {
Alias(Cast(BitwiseAnd(ShiftRight(gid, Literal(groupByExprs.length - 1 - idx)),
Literal(1)), ByteType), toPrettySQL(e))()
} else {
throw new AnalysisException(s"Column of grouping ($col) can't be found " +
s"in grouping columns ${groupByExprs.mkString(",")}")
}
}
}
/*
* Create new alias for all group by expressions for `Expand` operator.
*/
private def constructGroupByAlias(groupByExprs: Seq[Expression]): Seq[Alias] = {
groupByExprs.map {
case e: NamedExpression => Alias(e, e.name)()
case other => Alias(other, other.toString)()
}
}
/*
* Construct [[Expand]] operator with grouping sets.
*/
private def constructExpand(
selectedGroupByExprs: Seq[Seq[Expression]],
child: LogicalPlan,
groupByAliases: Seq[Alias],
gid: Attribute): LogicalPlan = {
// Change the nullability of group by aliases if necessary. For example, if we have
// GROUPING SETS ((a,b), a), we do not need to change the nullability of a, but we
// should change the nullabilty of b to be TRUE.
// TODO: For Cube/Rollup just set nullability to be `true`.
val expandedAttributes = groupByAliases.map { alias =>
if (selectedGroupByExprs.exists(!_.contains(alias.child))) {
alias.toAttribute.withNullability(true)
} else {
alias.toAttribute
}
}
val groupingSetsAttributes = selectedGroupByExprs.map { groupingSetExprs =>
groupingSetExprs.map { expr =>
val alias = groupByAliases.find(_.child.semanticEquals(expr)).getOrElse(
failAnalysis(s"$expr doesn't show up in the GROUP BY list $groupByAliases"))
// Map alias to expanded attribute.
expandedAttributes.find(_.semanticEquals(alias.toAttribute)).getOrElse(
alias.toAttribute)
}
}
Expand(groupingSetsAttributes, groupByAliases, expandedAttributes, gid, child)
}
/*
* Construct new aggregate expressions by replacing grouping functions.
*/
private def constructAggregateExprs(
groupByExprs: Seq[Expression],
aggregations: Seq[NamedExpression],
groupByAliases: Seq[Alias],
groupingAttrs: Seq[Expression],
gid: Attribute): Seq[NamedExpression] = aggregations.map {
// collect all the found AggregateExpression, so we can check an expression is part of
// any AggregateExpression or not.
val aggsBuffer = ArrayBuffer[Expression]()
// Returns whether the expression belongs to any expressions in `aggsBuffer` or not.
def isPartOfAggregation(e: Expression): Boolean = {
aggsBuffer.exists(a => a.find(_ eq e).isDefined)
}
replaceGroupingFunc(_, groupByExprs, gid).transformDown {
// AggregateExpression should be computed on the unmodified value of its argument
// expressions, so we should not replace any references to grouping expression
// inside it.
case e: AggregateExpression =>
aggsBuffer += e
e
case e if isPartOfAggregation(e) => e
case e =>
// Replace expression by expand output attribute.
val index = groupByAliases.indexWhere(_.child.semanticEquals(e))
if (index == -1) {
e
} else {
groupingAttrs(index)
}
}.asInstanceOf[NamedExpression]
}
/*
* Construct [[Aggregate]] operator from Cube/Rollup/GroupingSets.
*/
private def constructAggregate(
selectedGroupByExprs: Seq[Seq[Expression]],
groupByExprs: Seq[Expression],
aggregationExprs: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
val gid = AttributeReference(VirtualColumn.groupingIdName, IntegerType, false)()
// In case of ANSI-SQL compliant syntax for GROUPING SETS, groupByExprs is optional and
// can be null. In such case, we derive the groupByExprs from the user supplied values for
// grouping sets.
val finalGroupByExpressions = if (groupByExprs == Nil) {
selectedGroupByExprs.flatten.foldLeft(Seq.empty[Expression]) { (result, currentExpr) =>
// Only unique expressions are included in the group by expressions and is determined
// based on their semantic equality. Example. grouping sets ((a * b), (b * a)) results
// in grouping expression (a * b)
if (result.find(_.semanticEquals(currentExpr)).isDefined) {
result
} else {
result :+ currentExpr
}
}
} else {
groupByExprs
}
// Expand works by setting grouping expressions to null as determined by the
// `selectedGroupByExprs`. To prevent these null values from being used in an aggregate
// instead of the original value we need to create new aliases for all group by expressions
// that will only be used for the intended purpose.
val groupByAliases = constructGroupByAlias(finalGroupByExpressions)
val expand = constructExpand(selectedGroupByExprs, child, groupByAliases, gid)
val groupingAttrs = expand.output.drop(child.output.length)
val aggregations = constructAggregateExprs(
finalGroupByExpressions, aggregationExprs, groupByAliases, groupingAttrs, gid)
Aggregate(groupingAttrs, aggregations, expand)
}
private def findGroupingExprs(plan: LogicalPlan): Seq[Expression] = {
plan.collectFirst {
case a: Aggregate =>
// this Aggregate should have grouping id as the last grouping key.
val gid = a.groupingExpressions.last
if (!gid.isInstanceOf[AttributeReference]
|| gid.asInstanceOf[AttributeReference].name != VirtualColumn.groupingIdName) {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
a.groupingExpressions.take(a.groupingExpressions.length - 1)
}.getOrElse {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
}
// This require transformUp to replace grouping()/grouping_id() in resolved Filter/Sort
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsUp {
case a if !a.childrenResolved => a // be sure all of the children are resolved.
// Ensure group by expressions and aggregate expressions have been resolved.
case Aggregate(Seq(c @ Cube(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(cubeExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
case Aggregate(Seq(r @ Rollup(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(rollupExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
// Ensure all the expressions have been resolved.
case x: GroupingSets if x.expressions.forall(_.resolved) =>
constructAggregate(x.selectedGroupByExprs, x.groupByExprs, x.aggregations, x.child)
// We should make sure all expressions in condition have been resolved.
case f @ Filter(cond, child) if hasGroupingFunction(cond) && cond.resolved =>
val groupingExprs = findGroupingExprs(child)
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newCond = replaceGroupingFunc(cond, groupingExprs, VirtualColumn.groupingIdAttribute)
f.copy(condition = newCond)
// We should make sure all [[SortOrder]]s have been resolved.
case s @ Sort(order, _, child)
if order.exists(hasGroupingFunction) && order.forall(_.resolved) =>
val groupingExprs = findGroupingExprs(child)
val gid = VirtualColumn.groupingIdAttribute
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newOrder = order.map(replaceGroupingFunc(_, groupingExprs, gid).asInstanceOf[SortOrder])
s.copy(order = newOrder)
}
}
object ResolvePivot extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case p: Pivot if !p.childrenResolved || !p.aggregates.forall(_.resolved)
|| (p.groupByExprsOpt.isDefined && !p.groupByExprsOpt.get.forall(_.resolved))
|| !p.pivotColumn.resolved || !p.pivotValues.forall(_.resolved) => p
case Pivot(groupByExprsOpt, pivotColumn, pivotValues, aggregates, child) =>
if (!RowOrdering.isOrderable(pivotColumn.dataType)) {
throw new AnalysisException(
s"Invalid pivot column '${pivotColumn}'. Pivot columns must be comparable.")
}
// Check all aggregate expressions.
aggregates.foreach(checkValidAggregateExpression)
// Check all pivot values are literal and match pivot column data type.
val evalPivotValues = pivotValues.map { value =>
val foldable = value match {
case Alias(v, _) => v.foldable
case _ => value.foldable
}
if (!foldable) {
throw new AnalysisException(
s"Literal expressions required for pivot values, found '$value'")
}
if (!Cast.canCast(value.dataType, pivotColumn.dataType)) {
throw new AnalysisException(s"Invalid pivot value '$value': " +
s"value data type ${value.dataType.simpleString} does not match " +
s"pivot column data type ${pivotColumn.dataType.catalogString}")
}
Cast(value, pivotColumn.dataType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
}
// Group-by expressions coming from SQL are implicit and need to be deduced.
val groupByExprs = groupByExprsOpt.getOrElse {
val pivotColAndAggRefs = pivotColumn.references ++ AttributeSet(aggregates)
child.output.filterNot(pivotColAndAggRefs.contains)
}
val singleAgg = aggregates.size == 1
def outputName(value: Expression, aggregate: Expression): String = {
val stringValue = value match {
case n: NamedExpression => n.name
case _ =>
val utf8Value =
Cast(value, StringType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
Option(utf8Value).map(_.toString).getOrElse("null")
}
if (singleAgg) {
stringValue
} else {
val suffix = aggregate match {
case n: NamedExpression => n.name
case _ => toPrettySQL(aggregate)
}
stringValue + "_" + suffix
}
}
if (aggregates.forall(a => PivotFirst.supportsDataType(a.dataType))) {
// Since evaluating |pivotValues| if statements for each input row can get slow this is an
// alternate plan that instead uses two steps of aggregation.
val namedAggExps: Seq[NamedExpression] = aggregates.map(a => Alias(a, a.sql)())
val namedPivotCol = pivotColumn match {
case n: NamedExpression => n
case _ => Alias(pivotColumn, "__pivot_col")()
}
val bigGroup = groupByExprs :+ namedPivotCol
val firstAgg = Aggregate(bigGroup, bigGroup ++ namedAggExps, child)
val pivotAggs = namedAggExps.map { a =>
Alias(PivotFirst(namedPivotCol.toAttribute, a.toAttribute, evalPivotValues)
.toAggregateExpression()
, "__pivot_" + a.sql)()
}
val groupByExprsAttr = groupByExprs.map(_.toAttribute)
val secondAgg = Aggregate(groupByExprsAttr, groupByExprsAttr ++ pivotAggs, firstAgg)
val pivotAggAttribute = pivotAggs.map(_.toAttribute)
val pivotOutputs = pivotValues.zipWithIndex.flatMap { case (value, i) =>
aggregates.zip(pivotAggAttribute).map { case (aggregate, pivotAtt) =>
Alias(ExtractValue(pivotAtt, Literal(i), resolver), outputName(value, aggregate))()
}
}
Project(groupByExprsAttr ++ pivotOutputs, secondAgg)
} else {
val pivotAggregates: Seq[NamedExpression] = pivotValues.flatMap { value =>
def ifExpr(e: Expression) = {
If(
EqualNullSafe(
pivotColumn,
Cast(value, pivotColumn.dataType, Some(conf.sessionLocalTimeZone))),
e, Literal(null))
}
aggregates.map { aggregate =>
val filteredAggregate = aggregate.transformDown {
// Assumption is the aggregate function ignores nulls. This is true for all current
// AggregateFunction's with the exception of First and Last in their default mode
// (which we handle) and possibly some Hive UDAF's.
case First(expr, _) =>
First(ifExpr(expr), Literal(true))
case Last(expr, _) =>
Last(ifExpr(expr), Literal(true))
case a: AggregateFunction =>
a.withNewChildren(a.children.map(ifExpr))
}.transform {
// We are duplicating aggregates that are now computing a different value for each
// pivot value.
// TODO: Don't construct the physical container until after analysis.
case ae: AggregateExpression => ae.copy(resultId = NamedExpression.newExprId)
}
Alias(filteredAggregate, outputName(value, aggregate))()
}
}
Aggregate(groupByExprs, groupByExprs ++ pivotAggregates, child)
}
}
// Support any aggregate expression that can appear in an Aggregate plan except Pandas UDF.
// TODO: Support Pandas UDF.
private def checkValidAggregateExpression(expr: Expression): Unit = expr match {
case _: AggregateExpression => // OK and leave the argument check to CheckAnalysis.
case expr: PythonUDF if PythonUDF.isGroupedAggPandasUDF(expr) =>
failAnalysis("Pandas UDF aggregate expressions are currently not supported in pivot.")
case e: Attribute =>
failAnalysis(
s"Aggregate expression required for pivot, but '${e.sql}' " +
s"did not appear in any aggregate function.")
case e => e.children.foreach(checkValidAggregateExpression)
}
}
/**
* Resolve table relations with concrete relations from v2 catalog.
*
* [[ResolveRelations]] still resolves v1 tables.
*/
object ResolveTables extends Rule[LogicalPlan] {
import org.apache.spark.sql.catalog.v2.utils.CatalogV2Util._
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case u @ UnresolvedRelation(CatalogObjectIdentifier(Some(catalogPlugin), ident)) =>
loadTable(catalogPlugin, ident).map(DataSourceV2Relation.create).getOrElse(u)
}
}
/**
* Replaces [[UnresolvedRelation]]s with concrete relations from the catalog.
*/
object ResolveRelations extends Rule[LogicalPlan] {
// If the unresolved relation is running directly on files, we just return the original
// UnresolvedRelation, the plan will get resolved later. Else we look up the table from catalog
// and change the default database name(in AnalysisContext) if it is a view.
// We usually look up a table from the default database if the table identifier has an empty
// database part, for a view the default database should be the currentDb when the view was
// created. When the case comes to resolving a nested view, the view may have different default
// database with that the referenced view has, so we need to use
// `AnalysisContext.defaultDatabase` to track the current default database.
// When the relation we resolve is a view, we fetch the view.desc(which is a CatalogTable), and
// then set the value of `CatalogTable.viewDefaultDatabase` to
// `AnalysisContext.defaultDatabase`, we look up the relations that the view references using
// the default database.
// For example:
// |- view1 (defaultDatabase = db1)
// |- operator
// |- table2 (defaultDatabase = db1)
// |- view2 (defaultDatabase = db2)
// |- view3 (defaultDatabase = db3)
// |- view4 (defaultDatabase = db4)
// In this case, the view `view1` is a nested view, it directly references `table2`, `view2`
// and `view4`, the view `view2` references `view3`. On resolving the table, we look up the
// relations `table2`, `view2`, `view4` using the default database `db1`, and look up the
// relation `view3` using the default database `db2`.
//
// Note this is compatible with the views defined by older versions of Spark(before 2.2), which
// have empty defaultDatabase and all the relations in viewText have database part defined.
def resolveRelation(plan: LogicalPlan): LogicalPlan = plan match {
case u @ UnresolvedRelation(AsTableIdentifier(ident)) if !isRunningDirectlyOnFiles(ident) =>
val defaultDatabase = AnalysisContext.get.defaultDatabase
val foundRelation = lookupTableFromCatalog(ident, u, defaultDatabase)
if (foundRelation != u) {
resolveRelation(foundRelation)
} else {
u
}
// The view's child should be a logical plan parsed from the `desc.viewText`, the variable
// `viewText` should be defined, or else we throw an error on the generation of the View
// operator.
case view @ View(desc, _, child) if !child.resolved =>
// Resolve all the UnresolvedRelations and Views in the child.
val newChild = AnalysisContext.withAnalysisContext(desc.viewDefaultDatabase) {
if (AnalysisContext.get.nestedViewDepth > conf.maxNestedViewDepth) {
view.failAnalysis(s"The depth of view ${view.desc.identifier} exceeds the maximum " +
s"view resolution depth (${conf.maxNestedViewDepth}). Analysis is aborted to " +
s"avoid errors. Increase the value of ${SQLConf.MAX_NESTED_VIEW_DEPTH.key} to work " +
"around this.")
}
executeSameContext(child)
}
view.copy(child = newChild)
case p @ SubqueryAlias(_, view: View) =>
val newChild = resolveRelation(view)
p.copy(child = newChild)
case _ => plan
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case i @ InsertIntoTable(u @ UnresolvedRelation(AsTableIdentifier(ident)), _, child, _, _)
if child.resolved =>
EliminateSubqueryAliases(lookupTableFromCatalog(ident, u)) match {
case v: View =>
u.failAnalysis(s"Inserting into a view is not allowed. View: ${v.desc.identifier}.")
case other => i.copy(table = other)
}
case u: UnresolvedRelation => resolveRelation(u)
}
// Look up the table with the given name from catalog. The database we used is decided by the
// precedence:
// 1. Use the database part of the table identifier, if it is defined;
// 2. Use defaultDatabase, if it is defined(In this case, no temporary objects can be used,
// and the default database is only used to look up a view);
// 3. Use the currentDb of the SessionCatalog.
private def lookupTableFromCatalog(
tableIdentifier: TableIdentifier,
u: UnresolvedRelation,
defaultDatabase: Option[String] = None): LogicalPlan = {
val tableIdentWithDb = tableIdentifier.copy(
database = tableIdentifier.database.orElse(defaultDatabase))
try {
catalog.lookupRelation(tableIdentWithDb)
} catch {
case _: NoSuchTableException | _: NoSuchDatabaseException =>
u
}
}
// If the database part is specified, and we support running SQL directly on files, and
// it's not a temporary view, and the table does not exist, then let's just return the
// original UnresolvedRelation. It is possible we are matching a query like "select *
// from parquet.`/path/to/query`". The plan will get resolved in the rule `ResolveDataSource`.
// Note that we are testing (!db_exists || !table_exists) because the catalog throws
// an exception from tableExists if the database does not exist.
private def isRunningDirectlyOnFiles(table: TableIdentifier): Boolean = {
table.database.isDefined && conf.runSQLonFile && !catalog.isTemporaryTable(table) &&
(!catalog.databaseExists(table.database.get) || !catalog.tableExists(table))
}
}
/**
* Replaces [[UnresolvedAttribute]]s with concrete [[AttributeReference]]s from
* a logical plan node's children.
*/
object ResolveReferences extends Rule[LogicalPlan] {
/**
* Generate a new logical plan for the right child with different expression IDs
* for all conflicting attributes.
*/
private def dedupRight (left: LogicalPlan, right: LogicalPlan): LogicalPlan = {
val conflictingAttributes = left.outputSet.intersect(right.outputSet)
logDebug(s"Conflicting attributes ${conflictingAttributes.mkString(",")} " +
s"between $left and $right")
right.collect {
// Handle base relations that might appear more than once.
case oldVersion: MultiInstanceRelation
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
val newVersion = oldVersion.newInstance()
(oldVersion, newVersion)
case oldVersion: SerializeFromObject
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(serializer = oldVersion.serializer.map(_.newInstance())))
// Handle projects that create conflicting aliases.
case oldVersion @ Project(projectList, _)
if findAliases(projectList).intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(projectList = newAliases(projectList)))
case oldVersion @ Aggregate(_, aggregateExpressions, _)
if findAliases(aggregateExpressions).intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(aggregateExpressions = newAliases(aggregateExpressions)))
case oldVersion @ FlatMapGroupsInPandas(_, _, output, _)
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(output = output.map(_.newInstance())))
case oldVersion: Generate
if oldVersion.producedAttributes.intersect(conflictingAttributes).nonEmpty =>
val newOutput = oldVersion.generatorOutput.map(_.newInstance())
(oldVersion, oldVersion.copy(generatorOutput = newOutput))
case oldVersion @ Window(windowExpressions, _, _, child)
if AttributeSet(windowExpressions.map(_.toAttribute)).intersect(conflictingAttributes)
.nonEmpty =>
(oldVersion, oldVersion.copy(windowExpressions = newAliases(windowExpressions)))
}
// Only handle first case, others will be fixed on the next pass.
.headOption match {
case None =>
/*
* No result implies that there is a logical plan node that produces new references
* that this rule cannot handle. When that is the case, there must be another rule
* that resolves these conflicts. Otherwise, the analysis will fail.
*/
right
case Some((oldRelation, newRelation)) =>
val attributeRewrites = AttributeMap(oldRelation.output.zip(newRelation.output))
right transformUp {
case r if r == oldRelation => newRelation
} transformUp {
case other => other transformExpressions {
case a: Attribute =>
dedupAttr(a, attributeRewrites)
case s: SubqueryExpression =>
s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attributeRewrites))
}
}
}
}
private def dedupAttr(attr: Attribute, attrMap: AttributeMap[Attribute]): Attribute = {
val exprId = attrMap.getOrElse(attr, attr).exprId
attr.withExprId(exprId)
}
/**
* The outer plan may have been de-duplicated and the function below updates the
* outer references to refer to the de-duplicated attributes.
*
* For example (SQL):
* {{{
* SELECT * FROM t1
* INTERSECT
* SELECT * FROM t1
* WHERE EXISTS (SELECT 1
* FROM t2
* WHERE t1.c1 = t2.c1)
* }}}
* Plan before resolveReference rule.
* 'Intersect
* :- Project [c1#245, c2#246]
* : +- SubqueryAlias t1
* : +- Relation[c1#245,c2#246] parquet
* +- 'Project [*]
* +- Filter exists#257 [c1#245]
* : +- Project [1 AS 1#258]
* : +- Filter (outer(c1#245) = c1#251)
* : +- SubqueryAlias t2
* : +- Relation[c1#251,c2#252] parquet
* +- SubqueryAlias t1
* +- Relation[c1#245,c2#246] parquet
* Plan after the resolveReference rule.
* Intersect
* :- Project [c1#245, c2#246]
* : +- SubqueryAlias t1
* : +- Relation[c1#245,c2#246] parquet
* +- Project [c1#259, c2#260]
* +- Filter exists#257 [c1#259]
* : +- Project [1 AS 1#258]
* : +- Filter (outer(c1#259) = c1#251) => Updated
* : +- SubqueryAlias t2
* : +- Relation[c1#251,c2#252] parquet
* +- SubqueryAlias t1
* +- Relation[c1#259,c2#260] parquet => Outer plan's attributes are de-duplicated.
*/
private def dedupOuterReferencesInSubquery(
plan: LogicalPlan,
attrMap: AttributeMap[Attribute]): LogicalPlan = {
plan transformDown { case currentFragment =>
currentFragment transformExpressions {
case OuterReference(a: Attribute) =>
OuterReference(dedupAttr(a, attrMap))
case s: SubqueryExpression =>
s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attrMap))
}
}
}
/**
* Resolves the attribute and extract value expressions(s) by traversing the
* input expression in top down manner. The traversal is done in top-down manner as
* we need to skip over unbound lamda function expression. The lamda expressions are
* resolved in a different rule [[ResolveLambdaVariables]]
*
* Example :
* SELECT transform(array(1, 2, 3), (x, i) -> x + i)"
*
* In the case above, x and i are resolved as lamda variables in [[ResolveLambdaVariables]]
*
* Note : In this routine, the unresolved attributes are resolved from the input plan's
* children attributes.
*/
private def resolveExpressionTopDown(e: Expression, q: LogicalPlan): Expression = {
if (e.resolved) return e
e match {
case f: LambdaFunction if !f.bound => f
case u @ UnresolvedAttribute(nameParts) =>
// Leave unchanged if resolution fails. Hopefully will be resolved next round.
val result =
withPosition(u) {
q.resolveChildren(nameParts, resolver)
.orElse(resolveLiteralFunction(nameParts, u, q))
.getOrElse(u)
}
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldExpr) if child.resolved =>
ExtractValue(child, fieldExpr, resolver)
case _ => e.mapChildren(resolveExpressionTopDown(_, q))
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan if !p.childrenResolved => p
// If the projection list contains Stars, expand it.
case p: Project if containsStar(p.projectList) =>
p.copy(projectList = buildExpandedProjectList(p.projectList, p.child))
// If the aggregate function argument contains Stars, expand it.
case a: Aggregate if containsStar(a.aggregateExpressions) =>
if (a.groupingExpressions.exists(_.isInstanceOf[UnresolvedOrdinal])) {
failAnalysis(
"Star (*) is not allowed in select list when GROUP BY ordinal position is used")
} else {
a.copy(aggregateExpressions = buildExpandedProjectList(a.aggregateExpressions, a.child))
}
// If the script transformation input contains Stars, expand it.
case t: ScriptTransformation if containsStar(t.input) =>
t.copy(
input = t.input.flatMap {
case s: Star => s.expand(t.child, resolver)
case o => o :: Nil
}
)
case g: Generate if containsStar(g.generator.children) =>
failAnalysis("Invalid usage of '*' in explode/json_tuple/UDTF")
// To resolve duplicate expression IDs for Join and Intersect
case j @ Join(left, right, _, _, _) if !j.duplicateResolved =>
j.copy(right = dedupRight(left, right))
case i @ Intersect(left, right, _) if !i.duplicateResolved =>
i.copy(right = dedupRight(left, right))
case e @ Except(left, right, _) if !e.duplicateResolved =>
e.copy(right = dedupRight(left, right))
case u @ Union(children) if !u.duplicateResolved =>
// Use projection-based de-duplication for Union to avoid breaking the checkpoint sharing
// feature in streaming.
val newChildren = children.foldRight(Seq.empty[LogicalPlan]) { (head, tail) =>
head +: tail.map {
case child if head.outputSet.intersect(child.outputSet).isEmpty =>
child
case child =>
val projectList = child.output.map { attr =>
Alias(attr, attr.name)()
}
Project(projectList, child)
}
}
u.copy(children = newChildren)
// When resolve `SortOrder`s in Sort based on child, don't report errors as
// we still have chance to resolve it based on its descendants
case s @ Sort(ordering, global, child) if child.resolved && !s.resolved =>
val newOrdering =
ordering.map(order => resolveExpressionBottomUp(order, child).asInstanceOf[SortOrder])
Sort(newOrdering, global, child)
// A special case for Generate, because the output of Generate should not be resolved by
// ResolveReferences. Attributes in the output will be resolved by ResolveGenerate.
case g @ Generate(generator, _, _, _, _, _) if generator.resolved => g
case g @ Generate(generator, join, outer, qualifier, output, child) =>
val newG = resolveExpressionBottomUp(generator, child, throws = true)
if (newG.fastEquals(generator)) {
g
} else {
Generate(newG.asInstanceOf[Generator], join, outer, qualifier, output, child)
}
// Skips plan which contains deserializer expressions, as they should be resolved by another
// rule: ResolveDeserializer.
case plan if containsDeserializer(plan.expressions) => plan
// SPARK-25942: Resolves aggregate expressions with `AppendColumns`'s children, instead of
// `AppendColumns`, because `AppendColumns`'s serializer might produce conflict attribute
// names leading to ambiguous references exception.
case a @ Aggregate(groupingExprs, aggExprs, appendColumns: AppendColumns) =>
a.mapExpressions(resolveExpressionTopDown(_, appendColumns))
case o: OverwriteByExpression if !o.outputResolved =>
// do not resolve expression attributes until the query attributes are resolved against the
// table by ResolveOutputRelation. that rule will alias the attributes to the table's names.
o
case q: LogicalPlan =>
logTrace(s"Attempting to resolve ${q.simpleString(SQLConf.get.maxToStringFields)}")
q.mapExpressions(resolveExpressionTopDown(_, q))
}
def newAliases(expressions: Seq[NamedExpression]): Seq[NamedExpression] = {
expressions.map {
case a: Alias => Alias(a.child, a.name)()
case other => other
}
}
def findAliases(projectList: Seq[NamedExpression]): AttributeSet = {
AttributeSet(projectList.collect { case a: Alias => a.toAttribute })
}
/**
* Build a project list for Project/Aggregate and expand the star if possible
*/
private def buildExpandedProjectList(
exprs: Seq[NamedExpression],
child: LogicalPlan): Seq[NamedExpression] = {
exprs.flatMap {
// Using Dataframe/Dataset API: testData2.groupBy($"a", $"b").agg($"*")
case s: Star => s.expand(child, resolver)
// Using SQL API without running ResolveAlias: SELECT * FROM testData2 group by a, b
case UnresolvedAlias(s: Star, _) => s.expand(child, resolver)
case o if containsStar(o :: Nil) => expandStarExpression(o, child) :: Nil
case o => o :: Nil
}.map(_.asInstanceOf[NamedExpression])
}
/**
* Returns true if `exprs` contains a [[Star]].
*/
def containsStar(exprs: Seq[Expression]): Boolean =
exprs.exists(_.collect { case _: Star => true }.nonEmpty)
/**
* Expands the matching attribute.*'s in `child`'s output.
*/
def expandStarExpression(expr: Expression, child: LogicalPlan): Expression = {
expr.transformUp {
case f1: UnresolvedFunction if containsStar(f1.children) =>
f1.copy(children = f1.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case c: CreateNamedStruct if containsStar(c.valExprs) =>
val newChildren = c.children.grouped(2).flatMap {
case Seq(k, s : Star) => CreateStruct(s.expand(child, resolver)).children
case kv => kv
}
c.copy(children = newChildren.toList )
case c: CreateArray if containsStar(c.children) =>
c.copy(children = c.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: Murmur3Hash if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: XxHash64 if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
// count(*) has been replaced by count(1)
case o if containsStar(o.children) =>
failAnalysis(s"Invalid usage of '*' in expression '${o.prettyName}'")
}
}
}
private def containsDeserializer(exprs: Seq[Expression]): Boolean = {
exprs.exists(_.find(_.isInstanceOf[UnresolvedDeserializer]).isDefined)
}
/**
* Literal functions do not require the user to specify braces when calling them
* When an attributes is not resolvable, we try to resolve it as a literal function.
*/
private def resolveLiteralFunction(
nameParts: Seq[String],
attribute: UnresolvedAttribute,
plan: LogicalPlan): Option[Expression] = {
if (nameParts.length != 1) return None
val isNamedExpression = plan match {
case Aggregate(_, aggregateExpressions, _) => aggregateExpressions.contains(attribute)
case Project(projectList, _) => projectList.contains(attribute)
case Window(windowExpressions, _, _, _) => windowExpressions.contains(attribute)
case _ => false
}
val wrapper: Expression => Expression =
if (isNamedExpression) f => Alias(f, toPrettySQL(f))() else identity
// support CURRENT_DATE and CURRENT_TIMESTAMP
val literalFunctions = Seq(CurrentDate(), CurrentTimestamp())
val name = nameParts.head
val func = literalFunctions.find(e => caseInsensitiveResolution(e.prettyName, name))
func.map(wrapper)
}
/**
* Resolves the attribute, column value and extract value expressions(s) by traversing the
* input expression in bottom-up manner. In order to resolve the nested complex type fields
* correctly, this function makes use of `throws` parameter to control when to raise an
* AnalysisException.
*
* Example :
* SELECT a.b FROM t ORDER BY b[0].d
*
* In the above example, in b needs to be resolved before d can be resolved. Given we are
* doing a bottom up traversal, it will first attempt to resolve d and fail as b has not
* been resolved yet. If `throws` is false, this function will handle the exception by
* returning the original attribute. In this case `d` will be resolved in subsequent passes
* after `b` is resolved.
*/
protected[sql] def resolveExpressionBottomUp(
expr: Expression,
plan: LogicalPlan,
throws: Boolean = false): Expression = {
if (expr.resolved) return expr
// Resolve expression in one round.
// If throws == false or the desired attribute doesn't exist
// (like try to resolve `a.b` but `a` doesn't exist), fail and return the origin one.
// Else, throw exception.
try {
expr transformUp {
case GetColumnByOrdinal(ordinal, _) => plan.output(ordinal)
case u @ UnresolvedAttribute(nameParts) =>
val result =
withPosition(u) {
plan.resolve(nameParts, resolver)
.orElse(resolveLiteralFunction(nameParts, u, plan))
.getOrElse(u)
}
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
} catch {
case a: AnalysisException if !throws => expr
}
}
/**
* In many dialects of SQL it is valid to use ordinal positions in order/sort by and group by
* clauses. This rule is to convert ordinal positions to the corresponding expressions in the
* select list. This support is introduced in Spark 2.0.
*
* - When the sort references or group by expressions are not integer but foldable expressions,
* just ignore them.
* - When spark.sql.orderByOrdinal/spark.sql.groupByOrdinal is set to false, ignore the position
* numbers too.
*
* Before the release of Spark 2.0, the literals in order/sort by and group by clauses
* have no effect on the results.
*/
object ResolveOrdinalInOrderByAndGroupBy extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
// Replace the index with the related attribute for ORDER BY,
// which is a 1-base position of the projection list.
case Sort(orders, global, child)
if orders.exists(_.child.isInstanceOf[UnresolvedOrdinal]) =>
val newOrders = orders map {
case s @ SortOrder(UnresolvedOrdinal(index), direction, nullOrdering, _) =>
if (index > 0 && index <= child.output.size) {
SortOrder(child.output(index - 1), direction, nullOrdering, Set.empty)
} else {
s.failAnalysis(
s"ORDER BY position $index is not in select list " +
s"(valid range is [1, ${child.output.size}])")
}
case o => o
}
Sort(newOrders, global, child)
// Replace the index with the corresponding expression in aggregateExpressions. The index is
// a 1-base position of aggregateExpressions, which is output columns (select expression)
case Aggregate(groups, aggs, child) if aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedOrdinal]) =>
val newGroups = groups.map {
case u @ UnresolvedOrdinal(index) if index > 0 && index <= aggs.size =>
aggs(index - 1)
case ordinal @ UnresolvedOrdinal(index) =>
ordinal.failAnalysis(
s"GROUP BY position $index is not in select list " +
s"(valid range is [1, ${aggs.size}])")
case o => o
}
Aggregate(newGroups, aggs, child)
}
}
/**
* Replace unresolved expressions in grouping keys with resolved ones in SELECT clauses.
* This rule is expected to run after [[ResolveReferences]] applied.
*/
object ResolveAggAliasInGroupBy extends Rule[LogicalPlan] {
// This is a strict check though, we put this to apply the rule only if the expression is not
// resolvable by child.
private def notResolvableByChild(attrName: String, child: LogicalPlan): Boolean = {
!child.output.exists(a => resolver(a.name, attrName))
}
private def mayResolveAttrByAggregateExprs(
exprs: Seq[Expression], aggs: Seq[NamedExpression], child: LogicalPlan): Seq[Expression] = {
exprs.map { _.transform {
case u: UnresolvedAttribute if notResolvableByChild(u.name, child) =>
aggs.find(ne => resolver(ne.name, u.name)).getOrElse(u)
}}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case agg @ Aggregate(groups, aggs, child)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(!_.resolved) =>
agg.copy(groupingExpressions = mayResolveAttrByAggregateExprs(groups, aggs, child))
case gs @ GroupingSets(selectedGroups, groups, child, aggs)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedAttribute]) =>
gs.copy(
selectedGroupByExprs = selectedGroups.map(mayResolveAttrByAggregateExprs(_, aggs, child)),
groupByExprs = mayResolveAttrByAggregateExprs(groups, aggs, child))
}
}
/**
* In many dialects of SQL it is valid to sort by attributes that are not present in the SELECT
* clause. This rule detects such queries and adds the required attributes to the original
* projection, so that they will be available during sorting. Another projection is added to
* remove these attributes after sorting.
*
* The HAVING clause could also used a grouping columns that is not presented in the SELECT.
*/
object ResolveMissingReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Skip sort with aggregate. This will be handled in ResolveAggregateFunctions
case sa @ Sort(_, _, child: Aggregate) => sa
case s @ Sort(order, _, child)
if (!s.resolved || s.missingInput.nonEmpty) && child.resolved =>
val (newOrder, newChild) = resolveExprsAndAddMissingAttrs(order, child)
val ordering = newOrder.map(_.asInstanceOf[SortOrder])
if (child.output == newChild.output) {
s.copy(order = ordering)
} else {
// Add missing attributes and then project them away.
val newSort = s.copy(order = ordering, child = newChild)
Project(child.output, newSort)
}
case f @ Filter(cond, child) if (!f.resolved || f.missingInput.nonEmpty) && child.resolved =>
val (newCond, newChild) = resolveExprsAndAddMissingAttrs(Seq(cond), child)
if (child.output == newChild.output) {
f.copy(condition = newCond.head)
} else {
// Add missing attributes and then project them away.
val newFilter = Filter(newCond.head, newChild)
Project(child.output, newFilter)
}
}
/**
* This method tries to resolve expressions and find missing attributes recursively. Specially,
* when the expressions used in `Sort` or `Filter` contain unresolved attributes or resolved
* attributes which are missed from child output. This method tries to find the missing
* attributes out and add into the projection.
*/
private def resolveExprsAndAddMissingAttrs(
exprs: Seq[Expression], plan: LogicalPlan): (Seq[Expression], LogicalPlan) = {
// Missing attributes can be unresolved attributes or resolved attributes which are not in
// the output attributes of the plan.
if (exprs.forall(e => e.resolved && e.references.subsetOf(plan.outputSet))) {
(exprs, plan)
} else {
plan match {
case p: Project =>
// Resolving expressions against current plan.
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, p))
// Recursively resolving expressions on the child of current plan.
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, p.child)
// If some attributes used by expressions are resolvable only on the rewritten child
// plan, we need to add them into original projection.
val missingAttrs = (AttributeSet(newExprs) -- p.outputSet).intersect(newChild.outputSet)
(newExprs, Project(p.projectList ++ missingAttrs, newChild))
case a @ Aggregate(groupExprs, aggExprs, child) =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, a))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, child)
val missingAttrs = (AttributeSet(newExprs) -- a.outputSet).intersect(newChild.outputSet)
if (missingAttrs.forall(attr => groupExprs.exists(_.semanticEquals(attr)))) {
// All the missing attributes are grouping expressions, valid case.
(newExprs, a.copy(aggregateExpressions = aggExprs ++ missingAttrs, child = newChild))
} else {
// Need to add non-grouping attributes, invalid case.
(exprs, a)
}
case g: Generate =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, g))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, g.child)
(newExprs, g.copy(unrequiredChildIndex = Nil, child = newChild))
// For `Distinct` and `SubqueryAlias`, we can't recursively resolve and add attributes
// via its children.
case u: UnaryNode if !u.isInstanceOf[Distinct] && !u.isInstanceOf[SubqueryAlias] =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, u))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, u.child)
(newExprs, u.withNewChildren(Seq(newChild)))
// For other operators, we can't recursively resolve and add attributes via its children.
case other =>
(exprs.map(resolveExpressionBottomUp(_, other)), other)
}
}
}
}
/**
* Checks whether a function identifier referenced by an [[UnresolvedFunction]] is defined in the
* function registry. Note that this rule doesn't try to resolve the [[UnresolvedFunction]]. It
* only performs simple existence check according to the function identifier to quickly identify
* undefined functions without triggering relation resolution, which may incur potentially
* expensive partition/schema discovery process in some cases.
* In order to avoid duplicate external functions lookup, the external function identifier will
* store in the local hash set externalFunctionNameSet.
* @see [[ResolveFunctions]]
* @see https://issues.apache.org/jira/browse/SPARK-19737
*/
object LookupFunctions extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
val externalFunctionNameSet = new mutable.HashSet[FunctionIdentifier]()
plan.resolveExpressions {
case f: UnresolvedFunction
if externalFunctionNameSet.contains(normalizeFuncName(f.name)) => f
case f: UnresolvedFunction if catalog.isRegisteredFunction(f.name) => f
case f: UnresolvedFunction if catalog.isPersistentFunction(f.name) =>
externalFunctionNameSet.add(normalizeFuncName(f.name))
f
case f: UnresolvedFunction =>
withPosition(f) {
throw new NoSuchFunctionException(f.name.database.getOrElse(catalog.getCurrentDatabase),
f.name.funcName)
}
}
}
def normalizeFuncName(name: FunctionIdentifier): FunctionIdentifier = {
val funcName = if (conf.caseSensitiveAnalysis) {
name.funcName
} else {
name.funcName.toLowerCase(Locale.ROOT)
}
val databaseName = name.database match {
case Some(a) => formatDatabaseName(a)
case None => catalog.getCurrentDatabase
}
FunctionIdentifier(funcName, Some(databaseName))
}
protected def formatDatabaseName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
}
/**
* Replaces [[UnresolvedFunction]]s with concrete [[Expression]]s.
*/
object ResolveFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case q: LogicalPlan =>
q transformExpressions {
case u if !u.childrenResolved => u // Skip until children are resolved.
case u: UnresolvedAttribute if resolver(u.name, VirtualColumn.hiveGroupingIdName) =>
withPosition(u) {
Alias(GroupingID(Nil), VirtualColumn.hiveGroupingIdName)()
}
case u @ UnresolvedGenerator(name, children) =>
withPosition(u) {
catalog.lookupFunction(name, children) match {
case generator: Generator => generator
case other =>
failAnalysis(s"$name is expected to be a generator. However, " +
s"its class is ${other.getClass.getCanonicalName}, which is not a generator.")
}
}
case u @ UnresolvedFunction(funcId, children, isDistinct) =>
withPosition(u) {
catalog.lookupFunction(funcId, children) match {
// AggregateWindowFunctions are AggregateFunctions that can only be evaluated within
// the context of a Window clause. They do not need to be wrapped in an
// AggregateExpression.
case wf: AggregateWindowFunction =>
if (isDistinct) {
failAnalysis(s"${wf.prettyName} does not support the modifier DISTINCT")
} else {
wf
}
// We get an aggregate function, we need to wrap it in an AggregateExpression.
case agg: AggregateFunction => AggregateExpression(agg, Complete, isDistinct)
// This function is not an aggregate function, just return the resolved one.
case other =>
if (isDistinct) {
failAnalysis(s"${other.prettyName} does not support the modifier DISTINCT")
} else {
other
}
}
}
}
}
}
/**
* This rule resolves and rewrites subqueries inside expressions.
*
* Note: CTEs are handled in CTESubstitution.
*/
object ResolveSubquery extends Rule[LogicalPlan] with PredicateHelper {
/**
* Resolve the correlated expressions in a subquery by using the an outer plans' references. All
* resolved outer references are wrapped in an [[OuterReference]]
*/
private def resolveOuterReferences(plan: LogicalPlan, outer: LogicalPlan): LogicalPlan = {
plan resolveOperatorsDown {
case q: LogicalPlan if q.childrenResolved && !q.resolved =>
q transformExpressions {
case u @ UnresolvedAttribute(nameParts) =>
withPosition(u) {
try {
outer.resolve(nameParts, resolver) match {
case Some(outerAttr) => OuterReference(outerAttr)
case None => u
}
} catch {
case _: AnalysisException => u
}
}
}
}
}
/**
* Resolves the subquery plan that is referenced in a subquery expression. The normal
* attribute references are resolved using regular analyzer and the outer references are
* resolved from the outer plans using the resolveOuterReferences method.
*
* Outer references from the correlated predicates are updated as children of
* Subquery expression.
*/
private def resolveSubQuery(
e: SubqueryExpression,
plans: Seq[LogicalPlan])(
f: (LogicalPlan, Seq[Expression]) => SubqueryExpression): SubqueryExpression = {
// Step 1: Resolve the outer expressions.
var previous: LogicalPlan = null
var current = e.plan
do {
// Try to resolve the subquery plan using the regular analyzer.
previous = current
current = executeSameContext(current)
// Use the outer references to resolve the subquery plan if it isn't resolved yet.
val i = plans.iterator
val afterResolve = current
while (!current.resolved && current.fastEquals(afterResolve) && i.hasNext) {
current = resolveOuterReferences(current, i.next())
}
} while (!current.resolved && !current.fastEquals(previous))
// Step 2: If the subquery plan is fully resolved, pull the outer references and record
// them as children of SubqueryExpression.
if (current.resolved) {
// Record the outer references as children of subquery expression.
f(current, SubExprUtils.getOuterReferences(current))
} else {
e.withNewPlan(current)
}
}
/**
* Resolves the subquery. Apart of resolving the subquery and outer references (if any)
* in the subquery plan, the children of subquery expression are updated to record the
* outer references. This is needed to make sure
* (1) The column(s) referred from the outer query are not pruned from the plan during
* optimization.
* (2) Any aggregate expression(s) that reference outer attributes are pushed down to
* outer plan to get evaluated.
*/
private def resolveSubQueries(plan: LogicalPlan, plans: Seq[LogicalPlan]): LogicalPlan = {
plan transformExpressions {
case s @ ScalarSubquery(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(s, plans)(ScalarSubquery(_, _, exprId))
case e @ Exists(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(e, plans)(Exists(_, _, exprId))
case InSubquery(values, l @ ListQuery(_, _, exprId, _))
if values.forall(_.resolved) && !l.resolved =>
val expr = resolveSubQuery(l, plans)((plan, exprs) => {
ListQuery(plan, exprs, exprId, plan.output)
})
InSubquery(values, expr.asInstanceOf[ListQuery])
}
}
/**
* Resolve and rewrite all subqueries in an operator tree..
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// In case of HAVING (a filter after an aggregate) we use both the aggregate and
// its child for resolution.
case f @ Filter(_, a: Aggregate) if f.childrenResolved =>
resolveSubQueries(f, Seq(a, a.child))
// Only a few unary nodes (Project/Filter/Aggregate) can contain subqueries.
case q: UnaryNode if q.childrenResolved =>
resolveSubQueries(q, q.children)
}
}
/**
* Replaces unresolved column aliases for a subquery with projections.
*/
object ResolveSubqueryColumnAliases extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case u @ UnresolvedSubqueryColumnAliases(columnNames, child) if child.resolved =>
// Resolves output attributes if a query has alias names in its subquery:
// e.g., SELECT * FROM (SELECT 1 AS a, 1 AS b) t(col1, col2)
val outputAttrs = child.output
// Checks if the number of the aliases equals to the number of output columns
// in the subquery.
if (columnNames.size != outputAttrs.size) {
u.failAnalysis("Number of column aliases does not match number of columns. " +
s"Number of column aliases: ${columnNames.size}; " +
s"number of columns: ${outputAttrs.size}.")
}
val aliases = outputAttrs.zip(columnNames).map { case (attr, aliasName) =>
Alias(attr, aliasName)()
}
Project(aliases, child)
}
}
/**
* Turns projections that contain aggregate expressions into aggregations.
*/
object GlobalAggregates extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case Project(projectList, child) if containsAggregates(projectList) =>
Aggregate(Nil, projectList, child)
}
def containsAggregates(exprs: Seq[Expression]): Boolean = {
// Collect all Windowed Aggregate Expressions.
val windowedAggExprs = exprs.flatMap { expr =>
expr.collect {
case WindowExpression(ae: AggregateExpression, _) => ae
}
}.toSet
// Find the first Aggregate Expression that is not Windowed.
exprs.exists(_.collectFirst {
case ae: AggregateExpression if !windowedAggExprs.contains(ae) => ae
}.isDefined)
}
}
/**
* This rule finds aggregate expressions that are not in an aggregate operator. For example,
* those in a HAVING clause or ORDER BY clause. These expressions are pushed down to the
* underlying aggregate operator and then projected away after the original operator.
*/
object ResolveAggregateFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case f @ Filter(cond, agg @ Aggregate(grouping, originalAggExprs, child)) if agg.resolved =>
// Try resolving the condition of the filter as though it is in the aggregate clause
try {
val aggregatedCondition =
Aggregate(
grouping,
Alias(cond, "havingCondition")() :: Nil,
child)
val resolvedOperator = executeSameContext(aggregatedCondition)
def resolvedAggregateFilter =
resolvedOperator
.asInstanceOf[Aggregate]
.aggregateExpressions.head
// If resolution was successful and we see the filter has an aggregate in it, add it to
// the original aggregate operator.
if (resolvedOperator.resolved) {
// Try to replace all aggregate expressions in the filter by an alias.
val aggregateExpressions = ArrayBuffer.empty[NamedExpression]
val transformedAggregateFilter = resolvedAggregateFilter.transform {
case ae: AggregateExpression =>
val alias = Alias(ae, ae.toString)()
aggregateExpressions += alias
alias.toAttribute
// Grouping functions are handled in the rule [[ResolveGroupingAnalytics]].
case e: Expression if grouping.exists(_.semanticEquals(e)) &&
!ResolveGroupingAnalytics.hasGroupingFunction(e) &&
!agg.output.exists(_.semanticEquals(e)) =>
e match {
case ne: NamedExpression =>
aggregateExpressions += ne
ne.toAttribute
case _ =>
val alias = Alias(e, e.toString)()
aggregateExpressions += alias
alias.toAttribute
}
}
// Push the aggregate expressions into the aggregate (if any).
if (aggregateExpressions.nonEmpty) {
Project(agg.output,
Filter(transformedAggregateFilter,
agg.copy(aggregateExpressions = originalAggExprs ++ aggregateExpressions)))
} else {
f
}
} else {
f
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => f
}
case sort @ Sort(sortOrder, global, aggregate: Aggregate) if aggregate.resolved =>
// Try resolving the ordering as though it is in the aggregate clause.
try {
// If a sort order is unresolved, containing references not in aggregate, or containing
// `AggregateExpression`, we need to push down it to the underlying aggregate operator.
val unresolvedSortOrders = sortOrder.filter { s =>
!s.resolved || !s.references.subsetOf(aggregate.outputSet) || containsAggregate(s)
}
val aliasedOrdering =
unresolvedSortOrders.map(o => Alias(o.child, "aggOrder")())
val aggregatedOrdering = aggregate.copy(aggregateExpressions = aliasedOrdering)
val resolvedAggregate: Aggregate =
executeSameContext(aggregatedOrdering).asInstanceOf[Aggregate]
val resolvedAliasedOrdering: Seq[Alias] =
resolvedAggregate.aggregateExpressions.asInstanceOf[Seq[Alias]]
// If we pass the analysis check, then the ordering expressions should only reference to
// aggregate expressions or grouping expressions, and it's safe to push them down to
// Aggregate.
checkAnalysis(resolvedAggregate)
val originalAggExprs = aggregate.aggregateExpressions.map(
CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
// If the ordering expression is same with original aggregate expression, we don't need
// to push down this ordering expression and can reference the original aggregate
// expression instead.
val needsPushDown = ArrayBuffer.empty[NamedExpression]
val evaluatedOrderings = resolvedAliasedOrdering.zip(unresolvedSortOrders).map {
case (evaluated, order) =>
val index = originalAggExprs.indexWhere {
case Alias(child, _) => child semanticEquals evaluated.child
case other => other semanticEquals evaluated.child
}
if (index == -1) {
needsPushDown += evaluated
order.copy(child = evaluated.toAttribute)
} else {
order.copy(child = originalAggExprs(index).toAttribute)
}
}
val sortOrdersMap = unresolvedSortOrders
.map(new TreeNodeRef(_))
.zip(evaluatedOrderings)
.toMap
val finalSortOrders = sortOrder.map(s => sortOrdersMap.getOrElse(new TreeNodeRef(s), s))
// Since we don't rely on sort.resolved as the stop condition for this rule,
// we need to check this and prevent applying this rule multiple times
if (sortOrder == finalSortOrders) {
sort
} else {
Project(aggregate.output,
Sort(finalSortOrders, global,
aggregate.copy(aggregateExpressions = originalAggExprs ++ needsPushDown)))
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => sort
}
}
def containsAggregate(condition: Expression): Boolean = {
condition.find(_.isInstanceOf[AggregateExpression]).isDefined
}
}
/**
* Extracts [[Generator]] from the projectList of a [[Project]] operator and creates [[Generate]]
* operator under [[Project]].
*
* This rule will throw [[AnalysisException]] for following cases:
* 1. [[Generator]] is nested in expressions, e.g. `SELECT explode(list) + 1 FROM tbl`
* 2. more than one [[Generator]] is found in projectList,
* e.g. `SELECT explode(list), explode(list) FROM tbl`
* 3. [[Generator]] is found in other operators that are not [[Project]] or [[Generate]],
* e.g. `SELECT * FROM tbl SORT BY explode(list)`
*/
object ExtractGenerator extends Rule[LogicalPlan] {
private def hasGenerator(expr: Expression): Boolean = {
expr.find(_.isInstanceOf[Generator]).isDefined
}
private def hasNestedGenerator(expr: NamedExpression): Boolean = {
CleanupAliases.trimNonTopLevelAliases(expr) match {
case UnresolvedAlias(_: Generator, _) => false
case Alias(_: Generator, _) => false
case MultiAlias(_: Generator, _) => false
case other => hasGenerator(other)
}
}
private def trimAlias(expr: NamedExpression): Expression = expr match {
case UnresolvedAlias(child, _) => child
case Alias(child, _) => child
case MultiAlias(child, _) => child
case _ => expr
}
private object AliasedGenerator {
/**
* Extracts a [[Generator]] expression, any names assigned by aliases to the outputs
* and the outer flag. The outer flag is used when joining the generator output.
* @param e the [[Expression]]
* @return (the [[Generator]], seq of output names, outer flag)
*/
def unapply(e: Expression): Option[(Generator, Seq[String], Boolean)] = e match {
case Alias(GeneratorOuter(g: Generator), name) if g.resolved => Some((g, name :: Nil, true))
case MultiAlias(GeneratorOuter(g: Generator), names) if g.resolved => Some((g, names, true))
case Alias(g: Generator, name) if g.resolved => Some((g, name :: Nil, false))
case MultiAlias(g: Generator, names) if g.resolved => Some((g, names, false))
case _ => None
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Project(projectList, _) if projectList.exists(hasNestedGenerator) =>
val nestedGenerator = projectList.find(hasNestedGenerator).get
throw new AnalysisException("Generators are not supported when it's nested in " +
"expressions, but got: " + toPrettySQL(trimAlias(nestedGenerator)))
case Project(projectList, _) if projectList.count(hasGenerator) > 1 =>
val generators = projectList.filter(hasGenerator).map(trimAlias)
throw new AnalysisException("Only one generator allowed per select clause but found " +
generators.size + ": " + generators.map(toPrettySQL).mkString(", "))
case p @ Project(projectList, child) =>
// Holds the resolved generator, if one exists in the project list.
var resolvedGenerator: Generate = null
val newProjectList = projectList
.map(CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
.flatMap {
case AliasedGenerator(generator, names, outer) if generator.childrenResolved =>
// It's a sanity check, this should not happen as the previous case will throw
// exception earlier.
assert(resolvedGenerator == null, "More than one generator found in SELECT.")
resolvedGenerator =
Generate(
generator,
unrequiredChildIndex = Nil,
outer = outer,
qualifier = None,
generatorOutput = ResolveGenerate.makeGeneratorOutput(generator, names),
child)
resolvedGenerator.generatorOutput
case other => other :: Nil
}
if (resolvedGenerator != null) {
Project(newProjectList, resolvedGenerator)
} else {
p
}
case g: Generate => g
case p if p.expressions.exists(hasGenerator) =>
throw new AnalysisException("Generators are not supported outside the SELECT clause, but " +
"got: " + p.simpleString(SQLConf.get.maxToStringFields))
}
}
/**
* Rewrites table generating expressions that either need one or more of the following in order
* to be resolved:
* - concrete attribute references for their output.
* - to be relocated from a SELECT clause (i.e. from a [[Project]]) into a [[Generate]]).
*
* Names for the output [[Attribute]]s are extracted from [[Alias]] or [[MultiAlias]] expressions
* that wrap the [[Generator]].
*/
object ResolveGenerate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case g: Generate if !g.child.resolved || !g.generator.resolved => g
case g: Generate if !g.resolved =>
g.copy(generatorOutput = makeGeneratorOutput(g.generator, g.generatorOutput.map(_.name)))
}
/**
* Construct the output attributes for a [[Generator]], given a list of names. If the list of
* names is empty names are assigned from field names in generator.
*/
private[analysis] def makeGeneratorOutput(
generator: Generator,
names: Seq[String]): Seq[Attribute] = {
val elementAttrs = generator.elementSchema.toAttributes
if (names.length == elementAttrs.length) {
names.zip(elementAttrs).map {
case (name, attr) => attr.withName(name)
}
} else if (names.isEmpty) {
elementAttrs
} else {
failAnalysis(
"The number of aliases supplied in the AS clause does not match the number of columns " +
s"output by the UDTF expected ${elementAttrs.size} aliases but got " +
s"${names.mkString(",")} ")
}
}
}
/**
* Extracts [[WindowExpression]]s from the projectList of a [[Project]] operator and
* aggregateExpressions of an [[Aggregate]] operator and creates individual [[Window]]
* operators for every distinct [[WindowSpecDefinition]].
*
* This rule handles three cases:
* - A [[Project]] having [[WindowExpression]]s in its projectList;
* - An [[Aggregate]] having [[WindowExpression]]s in its aggregateExpressions.
* - A [[Filter]]->[[Aggregate]] pattern representing GROUP BY with a HAVING
* clause and the [[Aggregate]] has [[WindowExpression]]s in its aggregateExpressions.
* Note: If there is a GROUP BY clause in the query, aggregations and corresponding
* filters (expressions in the HAVING clause) should be evaluated before any
* [[WindowExpression]]. If a query has SELECT DISTINCT, the DISTINCT part should be
* evaluated after all [[WindowExpression]]s.
*
* For every case, the transformation works as follows:
* 1. For a list of [[Expression]]s (a projectList or an aggregateExpressions), partitions
* it two lists of [[Expression]]s, one for all [[WindowExpression]]s and another for
* all regular expressions.
* 2. For all [[WindowExpression]]s, groups them based on their [[WindowSpecDefinition]]s
* and [[WindowFunctionType]]s.
* 3. For every distinct [[WindowSpecDefinition]] and [[WindowFunctionType]], creates a
* [[Window]] operator and inserts it into the plan tree.
*/
object ExtractWindowExpressions extends Rule[LogicalPlan] {
private def hasWindowFunction(exprs: Seq[Expression]): Boolean =
exprs.exists(hasWindowFunction)
private def hasWindowFunction(expr: Expression): Boolean = {
expr.find {
case window: WindowExpression => true
case _ => false
}.isDefined
}
/**
* From a Seq of [[NamedExpression]]s, extract expressions containing window expressions and
* other regular expressions that do not contain any window expression. For example, for
* `col1, Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5)`, we will extract
* `col1`, `col2 + col3`, `col4`, and `col5` out and replace their appearances in
* the window expression as attribute references. So, the first returned value will be
* `[Sum(_w0) OVER (PARTITION BY _w1 ORDER BY _w2)]` and the second returned value will be
* [col1, col2 + col3 as _w0, col4 as _w1, col5 as _w2].
*
* @return (seq of expressions containing at least one window expression,
* seq of non-window expressions)
*/
private def extract(
expressions: Seq[NamedExpression]): (Seq[NamedExpression], Seq[NamedExpression]) = {
// First, we partition the input expressions to two part. For the first part,
// every expression in it contain at least one WindowExpression.
// Expressions in the second part do not have any WindowExpression.
val (expressionsWithWindowFunctions, regularExpressions) =
expressions.partition(hasWindowFunction)
// Then, we need to extract those regular expressions used in the WindowExpression.
// For example, when we have col1 - Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5),
// we need to make sure that col1 to col5 are all projected from the child of the Window
// operator.
val extractedExprBuffer = new ArrayBuffer[NamedExpression]()
def extractExpr(expr: Expression): Expression = expr match {
case ne: NamedExpression =>
// If a named expression is not in regularExpressions, add it to
// extractedExprBuffer and replace it with an AttributeReference.
val missingExpr =
AttributeSet(Seq(expr)) -- (regularExpressions ++ extractedExprBuffer)
if (missingExpr.nonEmpty) {
extractedExprBuffer += ne
}
// alias will be cleaned in the rule CleanupAliases
ne
case e: Expression if e.foldable =>
e // No need to create an attribute reference if it will be evaluated as a Literal.
case e: Expression =>
// For other expressions, we extract it and replace it with an AttributeReference (with
// an internal column name, e.g. "_w0").
val withName = Alias(e, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
}
// Now, we extract regular expressions from expressionsWithWindowFunctions
// by using extractExpr.
val seenWindowAggregates = new ArrayBuffer[AggregateExpression]
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
_.transform {
// Extracts children expressions of a WindowFunction (input parameters of
// a WindowFunction).
case wf: WindowFunction =>
val newChildren = wf.children.map(extractExpr)
wf.withNewChildren(newChildren)
// Extracts expressions from the partition spec and order spec.
case wsc @ WindowSpecDefinition(partitionSpec, orderSpec, _) =>
val newPartitionSpec = partitionSpec.map(extractExpr)
val newOrderSpec = orderSpec.map { so =>
val newChild = extractExpr(so.child)
so.copy(child = newChild)
}
wsc.copy(partitionSpec = newPartitionSpec, orderSpec = newOrderSpec)
// Extract Windowed AggregateExpression
case we @ WindowExpression(
ae @ AggregateExpression(function, _, _, _),
spec: WindowSpecDefinition) =>
val newChildren = function.children.map(extractExpr)
val newFunction = function.withNewChildren(newChildren).asInstanceOf[AggregateFunction]
val newAgg = ae.copy(aggregateFunction = newFunction)
seenWindowAggregates += newAgg
WindowExpression(newAgg, spec)
case AggregateExpression(aggFunc, _, _, _) if hasWindowFunction(aggFunc.children) =>
failAnalysis("It is not allowed to use a window function inside an aggregate " +
"function. Please use the inner window function in a sub-query.")
// Extracts AggregateExpression. For example, for SUM(x) - Sum(y) OVER (...),
// we need to extract SUM(x).
case agg: AggregateExpression if !seenWindowAggregates.contains(agg) =>
val withName = Alias(agg, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
// Extracts other attributes
case attr: Attribute => extractExpr(attr)
}.asInstanceOf[NamedExpression]
}
(newExpressionsWithWindowFunctions, regularExpressions ++ extractedExprBuffer)
} // end of extract
/**
* Adds operators for Window Expressions. Every Window operator handles a single Window Spec.
*/
private def addWindow(
expressionsWithWindowFunctions: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
// First, we need to extract all WindowExpressions from expressionsWithWindowFunctions
// and put those extracted WindowExpressions to extractedWindowExprBuffer.
// This step is needed because it is possible that an expression contains multiple
// WindowExpressions with different Window Specs.
// After extracting WindowExpressions, we need to construct a project list to generate
// expressionsWithWindowFunctions based on extractedWindowExprBuffer.
// For example, for "sum(a) over (...) / sum(b) over (...)", we will first extract
// "sum(a) over (...)" and "sum(b) over (...)" out, and assign "_we0" as the alias to
// "sum(a) over (...)" and "_we1" as the alias to "sum(b) over (...)".
// Then, the projectList will be [_we0/_we1].
val extractedWindowExprBuffer = new ArrayBuffer[NamedExpression]()
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
// We need to use transformDown because we want to trigger
// "case alias @ Alias(window: WindowExpression, _)" first.
_.transformDown {
case alias @ Alias(window: WindowExpression, _) =>
// If a WindowExpression has an assigned alias, just use it.
extractedWindowExprBuffer += alias
alias.toAttribute
case window: WindowExpression =>
// If there is no alias assigned to the WindowExpressions. We create an
// internal column.
val withName = Alias(window, s"_we${extractedWindowExprBuffer.length}")()
extractedWindowExprBuffer += withName
withName.toAttribute
}.asInstanceOf[NamedExpression]
}
// Second, we group extractedWindowExprBuffer based on their Partition and Order Specs.
val groupedWindowExpressions = extractedWindowExprBuffer.groupBy { expr =>
val distinctWindowSpec = expr.collect {
case window: WindowExpression => window.windowSpec
}.distinct
// We do a final check and see if we only have a single Window Spec defined in an
// expressions.
if (distinctWindowSpec.isEmpty) {
failAnalysis(s"$expr does not have any WindowExpression.")
} else if (distinctWindowSpec.length > 1) {
// newExpressionsWithWindowFunctions only have expressions with a single
// WindowExpression. If we reach here, we have a bug.
failAnalysis(s"$expr has multiple Window Specifications ($distinctWindowSpec)." +
s"Please file a bug report with this error message, stack trace, and the query.")
} else {
val spec = distinctWindowSpec.head
(spec.partitionSpec, spec.orderSpec, WindowFunctionType.functionType(expr))
}
}.toSeq
// Third, we aggregate them by adding each Window operator for each Window Spec and then
// setting this to the child of the next Window operator.
val windowOps =
groupedWindowExpressions.foldLeft(child) {
case (last, ((partitionSpec, orderSpec, _), windowExpressions)) =>
Window(windowExpressions, partitionSpec, orderSpec, last)
}
// Finally, we create a Project to output windowOps's output
// newExpressionsWithWindowFunctions.
Project(windowOps.output ++ newExpressionsWithWindowFunctions, windowOps)
} // end of addWindow
// We have to use transformDown at here to make sure the rule of
// "Aggregate with Having clause" will be triggered.
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsDown {
case Filter(condition, _) if hasWindowFunction(condition) =>
failAnalysis("It is not allowed to use window functions inside WHERE and HAVING clauses")
// Aggregate with Having clause. This rule works with an unresolved Aggregate because
// a resolved Aggregate will not have Window Functions.
case f @ Filter(condition, a @ Aggregate(groupingExprs, aggregateExprs, child))
if child.resolved &&
hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add a Filter operator for conditions in the Having clause.
val withFilter = Filter(condition, withAggregate)
val withWindow = addWindow(windowExpressions, withFilter)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
case p: LogicalPlan if !p.childrenResolved => p
// Aggregate without Having clause.
case a @ Aggregate(groupingExprs, aggregateExprs, child)
if hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withAggregate)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
// We only extract Window Expressions after all expressions of the Project
// have been resolved.
case p @ Project(projectList, child)
if hasWindowFunction(projectList) && !p.expressions.exists(!_.resolved) =>
val (windowExpressions, regularExpressions) = extract(projectList)
// We add a project to get all needed expressions for window expressions from the child
// of the original Project operator.
val withProject = Project(regularExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withProject)
// Finally, generate output columns according to the original projectList.
val finalProjectList = projectList.map(_.toAttribute)
Project(finalProjectList, withWindow)
}
}
/**
* Pulls out nondeterministic expressions from LogicalPlan which is not Project or Filter,
* put them into an inner Project and finally project them away at the outer Project.
*/
object PullOutNondeterministic extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p: Project => p
case f: Filter => f
case a: Aggregate if a.groupingExpressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(a.groupingExpressions)
val newChild = Project(a.child.output ++ nondeterToAttr.values, a.child)
a.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}.copy(child = newChild)
// todo: It's hard to write a general rule to pull out nondeterministic expressions
// from LogicalPlan, currently we only do it for UnaryNode which has same output
// schema with its child.
case p: UnaryNode if p.output == p.child.output && p.expressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(p.expressions)
val newPlan = p.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}
val newChild = Project(p.child.output ++ nondeterToAttr.values, p.child)
Project(p.output, newPlan.withNewChildren(newChild :: Nil))
}
private def getNondeterToAttr(exprs: Seq[Expression]): Map[Expression, NamedExpression] = {
exprs.filterNot(_.deterministic).flatMap { expr =>
val leafNondeterministic = expr.collect { case n: Nondeterministic => n }
leafNondeterministic.distinct.map { e =>
val ne = e match {
case n: NamedExpression => n
case _ => Alias(e, "_nondeterministic")()
}
e -> ne
}
}.toMap
}
}
/**
* Set the seed for random number generation.
*/
object ResolveRandomSeed extends Rule[LogicalPlan] {
private lazy val random = new Random()
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if p.resolved => p
case p => p transformExpressionsUp {
case Uuid(None) => Uuid(Some(random.nextLong()))
case Shuffle(child, None) => Shuffle(child, Some(random.nextLong()))
}
}
}
/**
* Correctly handle null primitive inputs for UDF by adding extra [[If]] expression to do the
* null check. When user defines a UDF with primitive parameters, there is no way to tell if the
* primitive parameter is null or not, so here we assume the primitive input is null-propagatable
* and we should return null if the input is null.
*/
object HandleNullInputsForUDF extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p => p transformExpressionsUp {
case udf @ ScalaUDF(_, _, inputs, inputPrimitives, _, _, _, _)
if inputPrimitives.contains(true) =>
// Otherwise, add special handling of null for fields that can't accept null.
// The result of operations like this, when passed null, is generally to return null.
assert(inputPrimitives.length == inputs.length)
val inputPrimitivesPair = inputPrimitives.zip(inputs)
val inputNullCheck = inputPrimitivesPair.collect {
case (isPrimitive, input) if isPrimitive && input.nullable =>
IsNull(input)
}.reduceLeftOption[Expression](Or)
if (inputNullCheck.isDefined) {
// Once we add an `If` check above the udf, it is safe to mark those checked inputs
// as null-safe (i.e., wrap with `KnownNotNull`), because the null-returning
// branch of `If` will be called if any of these checked inputs is null. Thus we can
// prevent this rule from being applied repeatedly.
val newInputs = inputPrimitivesPair.map {
case (isPrimitive, input) =>
if (isPrimitive && input.nullable) {
KnownNotNull(input)
} else {
input
}
}
val newUDF = udf.copy(children = newInputs)
If(inputNullCheck.get, Literal.create(null, udf.dataType), newUDF)
} else {
udf
}
}
}
}
/**
* Check and add proper window frames for all window functions.
*/
object ResolveWindowFrame extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case WindowExpression(wf: WindowFunction, WindowSpecDefinition(_, _, f: SpecifiedWindowFrame))
if wf.frame != UnspecifiedFrame && wf.frame != f =>
failAnalysis(s"Window Frame $f must match the required frame ${wf.frame}")
case WindowExpression(wf: WindowFunction, s @ WindowSpecDefinition(_, _, UnspecifiedFrame))
if wf.frame != UnspecifiedFrame =>
WindowExpression(wf, s.copy(frameSpecification = wf.frame))
case we @ WindowExpression(e, s @ WindowSpecDefinition(_, o, UnspecifiedFrame))
if e.resolved =>
val frame = if (o.nonEmpty) {
SpecifiedWindowFrame(RangeFrame, UnboundedPreceding, CurrentRow)
} else {
SpecifiedWindowFrame(RowFrame, UnboundedPreceding, UnboundedFollowing)
}
we.copy(windowSpec = s.copy(frameSpecification = frame))
}
}
/**
* Check and add order to [[AggregateWindowFunction]]s.
*/
object ResolveWindowOrder extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case WindowExpression(wf: WindowFunction, spec) if spec.orderSpec.isEmpty =>
failAnalysis(s"Window function $wf requires window to be ordered, please add ORDER BY " +
s"clause. For example SELECT $wf(value_expr) OVER (PARTITION BY window_partition " +
s"ORDER BY window_ordering) from table")
case WindowExpression(rank: RankLike, spec) if spec.resolved =>
val order = spec.orderSpec.map(_.child)
WindowExpression(rank.withOrder(order), spec)
}
}
/**
* Removes natural or using joins by calculating output columns based on output from two sides,
* Then apply a Project on a normal Join to eliminate natural or using join.
*/
object ResolveNaturalAndUsingJoin extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case j @ Join(left, right, UsingJoin(joinType, usingCols), _, hint)
if left.resolved && right.resolved && j.duplicateResolved =>
commonNaturalJoinProcessing(left, right, joinType, usingCols, None, hint)
case j @ Join(left, right, NaturalJoin(joinType), condition, hint)
if j.resolvedExceptNatural =>
// find common column names from both sides
val joinNames = left.output.map(_.name).intersect(right.output.map(_.name))
commonNaturalJoinProcessing(left, right, joinType, joinNames, condition, hint)
}
}
/**
* Resolves columns of an output table from the data in a logical plan. This rule will:
*
* - Reorder columns when the write is by name
* - Insert safe casts when data types do not match
* - Insert aliases when column names do not match
* - Detect plans that are not compatible with the output table and throw AnalysisException
*/
object ResolveOutputRelation extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case append @ AppendData(table, query, isByName)
if table.resolved && query.resolved && !append.outputResolved =>
val projection = resolveOutputColumns(table.name, table.output, query, isByName)
if (projection != query) {
append.copy(query = projection)
} else {
append
}
case overwrite @ OverwriteByExpression(table, _, query, isByName)
if table.resolved && query.resolved && !overwrite.outputResolved =>
val projection = resolveOutputColumns(table.name, table.output, query, isByName)
if (projection != query) {
overwrite.copy(query = projection)
} else {
overwrite
}
case overwrite @ OverwritePartitionsDynamic(table, query, isByName)
if table.resolved && query.resolved && !overwrite.outputResolved =>
val projection = resolveOutputColumns(table.name, table.output, query, isByName)
if (projection != query) {
overwrite.copy(query = projection)
} else {
overwrite
}
}
def resolveOutputColumns(
tableName: String,
expected: Seq[Attribute],
query: LogicalPlan,
byName: Boolean): LogicalPlan = {
if (expected.size < query.output.size) {
throw new AnalysisException(
s"""Cannot write to '$tableName', too many data columns:
|Table columns: ${expected.map(c => s"'${c.name}'").mkString(", ")}
|Data columns: ${query.output.map(c => s"'${c.name}'").mkString(", ")}""".stripMargin)
}
val errors = new mutable.ArrayBuffer[String]()
val resolved: Seq[NamedExpression] = if (byName) {
expected.flatMap { tableAttr =>
query.resolveQuoted(tableAttr.name, resolver) match {
case Some(queryExpr) =>
checkField(tableAttr, queryExpr, byName, err => errors += err)
case None =>
errors += s"Cannot find data for output column '${tableAttr.name}'"
None
}
}
} else {
if (expected.size > query.output.size) {
throw new AnalysisException(
s"""Cannot write to '$tableName', not enough data columns:
|Table columns: ${expected.map(c => s"'${c.name}'").mkString(", ")}
|Data columns: ${query.output.map(c => s"'${c.name}'").mkString(", ")}"""
.stripMargin)
}
query.output.zip(expected).flatMap {
case (queryExpr, tableAttr) =>
checkField(tableAttr, queryExpr, byName, err => errors += err)
}
}
if (errors.nonEmpty) {
throw new AnalysisException(
s"Cannot write incompatible data to table '$tableName':\\n- ${errors.mkString("\\n- ")}")
}
Project(resolved, query)
}
private def checkField(
tableAttr: Attribute,
queryExpr: NamedExpression,
byName: Boolean,
addError: String => Unit): Option[NamedExpression] = {
// run the type check first to ensure type errors are present
val canWrite = DataType.canWrite(
queryExpr.dataType, tableAttr.dataType, byName, resolver, tableAttr.name, addError)
if (queryExpr.nullable && !tableAttr.nullable) {
addError(s"Cannot write nullable values to non-null column '${tableAttr.name}'")
None
} else if (!canWrite) {
None
} else {
// always add an UpCast. it will be removed in the optimizer if it is unnecessary.
Some(Alias(
UpCast(queryExpr, tableAttr.dataType), tableAttr.name
)(
explicitMetadata = Option(tableAttr.metadata)
))
}
}
}
private def commonNaturalJoinProcessing(
left: LogicalPlan,
right: LogicalPlan,
joinType: JoinType,
joinNames: Seq[String],
condition: Option[Expression],
hint: JoinHint) = {
val leftKeys = joinNames.map { keyName =>
left.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the left " +
s"side of the join. The left-side columns: [${left.output.map(_.name).mkString(", ")}]")
}
}
val rightKeys = joinNames.map { keyName =>
right.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the right " +
s"side of the join. The right-side columns: [${right.output.map(_.name).mkString(", ")}]")
}
}
val joinPairs = leftKeys.zip(rightKeys)
val newCondition = (condition ++ joinPairs.map(EqualTo.tupled)).reduceOption(And)
// columns not in joinPairs
val lUniqueOutput = left.output.filterNot(att => leftKeys.contains(att))
val rUniqueOutput = right.output.filterNot(att => rightKeys.contains(att))
// the output list looks like: join keys, columns from left, columns from right
val projectList = joinType match {
case LeftOuter =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput.map(_.withNullability(true))
case LeftExistence(_) =>
leftKeys ++ lUniqueOutput
case RightOuter =>
rightKeys ++ lUniqueOutput.map(_.withNullability(true)) ++ rUniqueOutput
case FullOuter =>
// in full outer join, joinCols should be non-null if there is.
val joinedCols = joinPairs.map { case (l, r) => Alias(Coalesce(Seq(l, r)), l.name)() }
joinedCols ++
lUniqueOutput.map(_.withNullability(true)) ++
rUniqueOutput.map(_.withNullability(true))
case _ : InnerLike =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput
case _ =>
sys.error("Unsupported natural join type " + joinType)
}
// use Project to trim unnecessary fields
Project(projectList, Join(left, right, joinType, newCondition, hint))
}
/**
* Replaces [[UnresolvedDeserializer]] with the deserialization expression that has been resolved
* to the given input attributes.
*/
object ResolveDeserializer extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case UnresolvedDeserializer(deserializer, inputAttributes) =>
val inputs = if (inputAttributes.isEmpty) {
p.children.flatMap(_.output)
} else {
inputAttributes
}
validateTopLevelTupleFields(deserializer, inputs)
val resolved = resolveExpressionBottomUp(
deserializer, LocalRelation(inputs), throws = true)
val result = resolved transformDown {
case UnresolvedMapObjects(func, inputData, cls) if inputData.resolved =>
inputData.dataType match {
case ArrayType(et, cn) =>
MapObjects(func, inputData, et, cn, cls) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
case other =>
throw new AnalysisException("need an array field but got " + other.catalogString)
}
case u: UnresolvedCatalystToExternalMap if u.child.resolved =>
u.child.dataType match {
case _: MapType =>
CatalystToExternalMap(u) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
case other =>
throw new AnalysisException("need a map field but got " + other.catalogString)
}
}
validateNestedTupleFields(result)
result
}
}
private def fail(schema: StructType, maxOrdinal: Int): Unit = {
throw new AnalysisException(s"Try to map ${schema.catalogString} to Tuple${maxOrdinal + 1}" +
", but failed as the number of fields does not line up.")
}
/**
* For each top-level Tuple field, we use [[GetColumnByOrdinal]] to get its corresponding column
* by position. However, the actual number of columns may be different from the number of Tuple
* fields. This method is used to check the number of columns and fields, and throw an
* exception if they do not match.
*/
private def validateTopLevelTupleFields(
deserializer: Expression, inputs: Seq[Attribute]): Unit = {
val ordinals = deserializer.collect {
case GetColumnByOrdinal(ordinal, _) => ordinal
}.distinct.sorted
if (ordinals.nonEmpty && ordinals != inputs.indices) {
fail(inputs.toStructType, ordinals.last)
}
}
/**
* For each nested Tuple field, we use [[GetStructField]] to get its corresponding struct field
* by position. However, the actual number of struct fields may be different from the number
* of nested Tuple fields. This method is used to check the number of struct fields and nested
* Tuple fields, and throw an exception if they do not match.
*/
private def validateNestedTupleFields(deserializer: Expression): Unit = {
val structChildToOrdinals = deserializer
// There are 2 kinds of `GetStructField`:
// 1. resolved from `UnresolvedExtractValue`, and it will have a `name` property.
// 2. created when we build deserializer expression for nested tuple, no `name` property.
// Here we want to validate the ordinals of nested tuple, so we should only catch
// `GetStructField` without the name property.
.collect { case g: GetStructField if g.name.isEmpty => g }
.groupBy(_.child)
.mapValues(_.map(_.ordinal).distinct.sorted)
structChildToOrdinals.foreach { case (expr, ordinals) =>
val schema = expr.dataType.asInstanceOf[StructType]
if (ordinals != schema.indices) {
fail(schema, ordinals.last)
}
}
}
}
/**
* Resolves [[NewInstance]] by finding and adding the outer scope to it if the object being
* constructed is an inner class.
*/
object ResolveNewInstance extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case n: NewInstance if n.childrenResolved && !n.resolved =>
val outer = OuterScopes.getOuterScope(n.cls)
if (outer == null) {
throw new AnalysisException(
s"Unable to generate an encoder for inner class `${n.cls.getName}` without " +
"access to the scope that this class was defined in.\\n" +
"Try moving this class out of its parent class.")
}
n.copy(outerPointer = Some(outer))
}
}
}
/**
* Replace the [[UpCast]] expression by [[Cast]], and throw exceptions if the cast may truncate.
*/
object ResolveUpCast extends Rule[LogicalPlan] {
private def fail(from: Expression, to: DataType, walkedTypePath: Seq[String]) = {
val fromStr = from match {
case l: LambdaVariable => "array element"
case e => e.sql
}
throw new AnalysisException(s"Cannot up cast $fromStr from " +
s"${from.dataType.catalogString} to ${to.catalogString}.\\n" +
"The type path of the target object is:\\n" + walkedTypePath.mkString("", "\\n", "\\n") +
"You can either add an explicit cast to the input data or choose a higher precision " +
"type of the field in the target object")
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case u @ UpCast(child, _, _) if !child.resolved => u
case UpCast(child, dt: AtomicType, _)
if SQLConf.get.getConf(SQLConf.LEGACY_LOOSE_UPCAST) &&
child.dataType == StringType =>
Cast(child, dt.asNullable)
case UpCast(child, dataType, walkedTypePath) if !Cast.canUpCast(child.dataType, dataType) =>
fail(child, dataType, walkedTypePath)
case UpCast(child, dataType, _) => Cast(child, dataType.asNullable)
}
}
}
}
/**
* Removes [[SubqueryAlias]] operators from the plan. Subqueries are only required to provide
* scoping information for attributes and can be removed once analysis is complete.
*/
object EliminateSubqueryAliases extends Rule[LogicalPlan] {
// This is also called in the beginning of the optimization phase, and as a result
// is using transformUp rather than resolveOperators.
def apply(plan: LogicalPlan): LogicalPlan = AnalysisHelper.allowInvokingTransformsInAnalyzer {
plan transformUp {
case SubqueryAlias(_, child) => child
}
}
}
/**
* Removes [[Union]] operators from the plan if it just has one child.
*/
object EliminateUnions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case Union(children) if children.size == 1 => children.head
}
}
/**
* Cleans up unnecessary Aliases inside the plan. Basically we only need Alias as a top level
* expression in Project(project list) or Aggregate(aggregate expressions) or
* Window(window expressions). Notice that if an expression has other expression parameters which
* are not in its `children`, e.g. `RuntimeReplaceable`, the transformation for Aliases in this
* rule can't work for those parameters.
*/
object CleanupAliases extends Rule[LogicalPlan] {
private def trimAliases(e: Expression): Expression = {
e.transformDown {
case Alias(child, _) => child
case MultiAlias(child, _) => child
}
}
def trimNonTopLevelAliases(e: Expression): Expression = e match {
case a: Alias =>
a.copy(child = trimAliases(a.child))(
exprId = a.exprId,
qualifier = a.qualifier,
explicitMetadata = Some(a.metadata))
case a: MultiAlias =>
a.copy(child = trimAliases(a.child))
case other => trimAliases(other)
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Project(projectList, child) =>
val cleanedProjectList =
projectList.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Project(cleanedProjectList, child)
case Aggregate(grouping, aggs, child) =>
val cleanedAggs = aggs.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Aggregate(grouping.map(trimAliases), cleanedAggs, child)
case Window(windowExprs, partitionSpec, orderSpec, child) =>
val cleanedWindowExprs =
windowExprs.map(e => trimNonTopLevelAliases(e).asInstanceOf[NamedExpression])
Window(cleanedWindowExprs, partitionSpec.map(trimAliases),
orderSpec.map(trimAliases(_).asInstanceOf[SortOrder]), child)
// Operators that operate on objects should only have expressions from encoders, which should
// never have extra aliases.
case o: ObjectConsumer => o
case o: ObjectProducer => o
case a: AppendColumns => a
case other =>
other transformExpressionsDown {
case Alias(child, _) => child
}
}
}
/**
* Ignore event time watermark in batch query, which is only supported in Structured Streaming.
* TODO: add this rule into analyzer rule list.
*/
object EliminateEventTimeWatermark extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case EventTimeWatermark(_, _, child) if !child.isStreaming => child
}
}
/**
* Maps a time column to multiple time windows using the Expand operator. Since it's non-trivial to
* figure out how many windows a time column can map to, we over-estimate the number of windows and
* filter out the rows where the time column is not inside the time window.
*/
object TimeWindowing extends Rule[LogicalPlan] {
import org.apache.spark.sql.catalyst.dsl.expressions._
private final val WINDOW_COL_NAME = "window"
private final val WINDOW_START = "start"
private final val WINDOW_END = "end"
/**
* Generates the logical plan for generating window ranges on a timestamp column. Without
* knowing what the timestamp value is, it's non-trivial to figure out deterministically how many
* window ranges a timestamp will map to given all possible combinations of a window duration,
* slide duration and start time (offset). Therefore, we express and over-estimate the number of
* windows there may be, and filter the valid windows. We use last Project operator to group
* the window columns into a struct so they can be accessed as `window.start` and `window.end`.
*
* The windows are calculated as below:
* maxNumOverlapping <- ceil(windowDuration / slideDuration)
* for (i <- 0 until maxNumOverlapping)
* windowId <- ceil((timestamp - startTime) / slideDuration)
* windowStart <- windowId * slideDuration + (i - maxNumOverlapping) * slideDuration + startTime
* windowEnd <- windowStart + windowDuration
* return windowStart, windowEnd
*
* This behaves as follows for the given parameters for the time: 12:05. The valid windows are
* marked with a +, and invalid ones are marked with a x. The invalid ones are filtered using the
* Filter operator.
* window: 12m, slide: 5m, start: 0m :: window: 12m, slide: 5m, start: 2m
* 11:55 - 12:07 + 11:52 - 12:04 x
* 12:00 - 12:12 + 11:57 - 12:09 +
* 12:05 - 12:17 + 12:02 - 12:14 +
*
* @param plan The logical plan
* @return the logical plan that will generate the time windows using the Expand operator, with
* the Filter operator for correctness and Project for usability.
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan if p.children.size == 1 =>
val child = p.children.head
val windowExpressions =
p.expressions.flatMap(_.collect { case t: TimeWindow => t }).toSet
val numWindowExpr = windowExpressions.size
// Only support a single window expression for now
if (numWindowExpr == 1 &&
windowExpressions.head.timeColumn.resolved &&
windowExpressions.head.checkInputDataTypes().isSuccess) {
val window = windowExpressions.head
val metadata = window.timeColumn match {
case a: Attribute => a.metadata
case _ => Metadata.empty
}
def getWindow(i: Int, overlappingWindows: Int): Expression = {
val division = (PreciseTimestampConversion(
window.timeColumn, TimestampType, LongType) - window.startTime) / window.slideDuration
val ceil = Ceil(division)
// if the division is equal to the ceiling, our record is the start of a window
val windowId = CaseWhen(Seq((ceil === division, ceil + 1)), Some(ceil))
val windowStart = (windowId + i - overlappingWindows) *
window.slideDuration + window.startTime
val windowEnd = windowStart + window.windowDuration
CreateNamedStruct(
Literal(WINDOW_START) ::
PreciseTimestampConversion(windowStart, LongType, TimestampType) ::
Literal(WINDOW_END) ::
PreciseTimestampConversion(windowEnd, LongType, TimestampType) ::
Nil)
}
val windowAttr = AttributeReference(
WINDOW_COL_NAME, window.dataType, metadata = metadata)()
if (window.windowDuration == window.slideDuration) {
val windowStruct = Alias(getWindow(0, 1), WINDOW_COL_NAME)(
exprId = windowAttr.exprId, explicitMetadata = Some(metadata))
val replacedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
// For backwards compatibility we add a filter to filter out nulls
val filterExpr = IsNotNull(window.timeColumn)
replacedPlan.withNewChildren(
Filter(filterExpr,
Project(windowStruct +: child.output, child)) :: Nil)
} else {
val overlappingWindows =
math.ceil(window.windowDuration * 1.0 / window.slideDuration).toInt
val windows =
Seq.tabulate(overlappingWindows)(i => getWindow(i, overlappingWindows))
val projections = windows.map(_ +: child.output)
val filterExpr =
window.timeColumn >= windowAttr.getField(WINDOW_START) &&
window.timeColumn < windowAttr.getField(WINDOW_END)
val substitutedPlan = Filter(filterExpr,
Expand(projections, windowAttr +: child.output, child))
val renamedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
renamedPlan.withNewChildren(substitutedPlan :: Nil)
}
} else if (numWindowExpr > 1) {
p.failAnalysis("Multiple time window expressions would result in a cartesian product " +
"of rows, therefore they are currently not supported.")
} else {
p // Return unchanged. Analyzer will throw exception later
}
}
}
/**
* Resolve a [[CreateNamedStruct]] if it contains [[NamePlaceholder]]s.
*/
object ResolveCreateNamedStruct extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveExpressions {
case e: CreateNamedStruct if !e.resolved =>
val children = e.children.grouped(2).flatMap {
case Seq(NamePlaceholder, e: NamedExpression) if e.resolved =>
Seq(Literal(e.name), e)
case kv =>
kv
}
CreateNamedStruct(children.toList)
}
}
/**
* The aggregate expressions from subquery referencing outer query block are pushed
* down to the outer query block for evaluation. This rule below updates such outer references
* as AttributeReference referring attributes from the parent/outer query block.
*
* For example (SQL):
* {{{
* SELECT l.a FROM l GROUP BY 1 HAVING EXISTS (SELECT 1 FROM r WHERE r.d < min(l.b))
* }}}
* Plan before the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < min(outer(b#227))) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
* Plan after the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < outer(min(b#227)#249)) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
*/
object UpdateOuterReferences extends Rule[LogicalPlan] {
private def stripAlias(expr: Expression): Expression = expr match { case a: Alias => a.child }
private def updateOuterReferenceInSubquery(
plan: LogicalPlan,
refExprs: Seq[Expression]): LogicalPlan = {
plan resolveExpressions { case e =>
val outerAlias =
refExprs.find(stripAlias(_).semanticEquals(stripOuterReference(e)))
outerAlias match {
case Some(a: Alias) => OuterReference(a.toAttribute)
case _ => e
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = {
plan resolveOperators {
case f @ Filter(_, a: Aggregate) if f.resolved =>
f transformExpressions {
case s: SubqueryExpression if s.children.nonEmpty =>
// Collect the aliases from output of aggregate.
val outerAliases = a.aggregateExpressions collect { case a: Alias => a }
// Update the subquery plan to record the OuterReference to point to outer query plan.
s.withNewPlan(updateOuterReferenceInSubquery(s.plan, outerAliases))
}
}
}
}
| aosagie/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala | Scala | apache-2.0 | 128,855 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.assertion
import io.gatling.BaseSpec
import io.gatling.commons.stats.assertion._
import io.gatling.core.config.GatlingConfiguration
class AssertionDSLSpec extends BaseSpec with AssertionSupport {
"The Assertion DSL builders" should "produce the expected Assertions ASTs" in {
implicit val configuration: GatlingConfiguration = GatlingConfiguration.loadForTest()
global.responseTime.min.is(100) shouldBe Assertion(Global, TimeTarget(ResponseTime, Min), Is(100))
details("Foo" / "Bar").responseTime.max.lte(100) shouldBe Assertion(Details(List("Foo", "Bar")), TimeTarget(ResponseTime, Max), Lte(100))
forAll.responseTime.mean.gte(100) shouldBe Assertion(ForAll, TimeTarget(ResponseTime, Mean), Gte(100))
global.responseTime.stdDev.between(1, 3) shouldBe Assertion(Global, TimeTarget(ResponseTime, StandardDeviation), Between(1, 3, inclusive = true))
global.responseTime.percentile1.is(300) shouldBe Assertion(Global, TimeTarget(ResponseTime, Percentiles(50)), Is(300))
global.responseTime.percentile2.in(Set(1, 2, 3)) shouldBe Assertion(Global, TimeTarget(ResponseTime, Percentiles(75)), In(List(1, 2, 3)))
global.responseTime.percentile3.is(300) shouldBe Assertion(Global, TimeTarget(ResponseTime, Percentiles(95)), Is(300))
global.responseTime.percentile4.in(Set(1, 2, 3)) shouldBe Assertion(Global, TimeTarget(ResponseTime, Percentiles(99)), In(List(1, 2, 3)))
global.allRequests.count.is(20) shouldBe Assertion(Global, CountTarget(AllRequests), Is(20))
forAll.allRequests.percent.lt(5) shouldBe Assertion(ForAll, PercentTarget(AllRequests), Lt(5))
global.failedRequests.count.gt(10) shouldBe Assertion(Global, CountTarget(FailedRequests), Gt(10))
details("Foo" / "Bar").failedRequests.percent.between(1, 5, inclusive = false) shouldBe Assertion(
Details(List("Foo", "Bar")),
PercentTarget(FailedRequests),
Between(1, 5, inclusive = false)
)
global.successfulRequests.count.in(1, 2, 2, 4) shouldBe Assertion(Global, CountTarget(SuccessfulRequests), In(List(1, 2, 4)))
global.successfulRequests.percent.is(6) shouldBe Assertion(Global, PercentTarget(SuccessfulRequests), Is(6))
global.requestsPerSec.is(35) shouldBe Assertion(Global, MeanRequestsPerSecondTarget, Is(35))
global.requestsPerSec.around(35, 3) shouldBe Assertion(Global, MeanRequestsPerSecondTarget, Between(32, 38, inclusive = true))
}
}
| gatling/gatling | gatling-core/src/test/scala/io/gatling/core/assertion/AssertionDSLSpec.scala | Scala | apache-2.0 | 3,048 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.{Equality, Every, One, Many, Entry}
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
import exceptions.TestFailedException
class EveryShouldContainOnlyLogicalOrSpec extends Spec {
val invertedStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
val invertedListOfStringEquality =
new Equality[Every[String]] {
def areEqual(a: Every[String], b: Any): Boolean = a != b
}
private def upperCase(value: Any): Any =
value match {
case l: Every[_] => l.map(upperCase(_))
case s: String => s.toUpperCase
case c: Char => c.toString.toUpperCase.charAt(0)
case (s1: String, s2: String) => (s1.toUpperCase, s2.toUpperCase)
case e: java.util.Map.Entry[_, _] =>
(e.getKey, e.getValue) match {
case (k: String, v: String) => Entry(k.toUpperCase, v.toUpperCase)
case _ => value
}
case _ => value
}
val upperCaseStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = upperCase(a) == upperCase(b)
}
//ADDITIONAL//
val fileName: String = "EveryShouldContainOnlyLogicalOrSpec.scala"
object `an Every` {
val fumList: Every[String] = Every("fum", "foe", "fie", "fee")
val toList: Every[String] = Every("you", "to", "birthday", "happy")
object `when used with (contain only (..) or contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain only ("fee", "fie", "foe", "fum") or contain only ("fie", "fee", "fum", "foe"))
fumList should (contain only ("fee", "fie", "foe", "fam") or contain only ("fie", "fee", "fum", "foe"))
fumList should (contain only ("fee", "fie", "foe", "fum") or contain only ("fie", "fee", "fam", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("fee", "fie", "foe", "fam") or contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fam\\"") + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain only ("FEE", "FIE", "FOE", "FUM") or contain only ("FIE", "FEE", "FUM", "FOE"))
fumList should (contain only ("FEE", "FIE", "FOE", "FAM") or contain only ("FIE", "FEE", "FUM", "FOE"))
fumList should (contain only ("FEE", "FIE", "FOE", "FUM") or contain only ("FIE", "FEE", "FAM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("FEE", "FIE", "FOE", "FAM") or (contain only ("FIE", "FEE", "FAM", "FOE")))
}
checkMessageStackDepth(e1, Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FAM\\"") + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FAM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain only ("FEE", "FIE", "FOE", "FUM") or contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (contain only ("FEE", "FIE", "FOE", "FAM") or contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (contain only ("FEE", "FIE", "FOE", "FUM") or contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain only ("FEE", "FIE", "FOE", "FAM") or contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FAM\\"") + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FAM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
(fumList should (contain only (" FEE ", " FIE ", " FOE ", " FUM ") or contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain only ("fee", "fie", "foe", "fie", "fum") or contain only ("fie", "fee", "fum", "foe"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (contain only ("fie", "fee", "fum", "foe") or contain only ("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
fumList should (contain only Many("fee", "fie", "foe", "fam") or contain only Many("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources.didNotContainOnlyElementsWithFriendlyReminder(decorateToStringValue(fumList), decorateToStringValue(Many("fee", "fie", "foe", "fam"))) + ", and " + Resources.didNotContainOnlyElementsWithFriendlyReminder(decorateToStringValue(fumList), decorateToStringValue(Many("happy", "birthday", "to", "you"))), fileName, thisLineNumber - 2)
}
}
object `when used with (equal (..) and contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (equal (fumList) or contain only ("fie", "fee", "fum", "foe"))
fumList should (equal (toList) or contain only ("fie", "fee", "fum", "foe"))
fumList should (equal (fumList) or contain only ("fie", "fee", "fam", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or contain only ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (equal (fumList) or contain only ("FIE", "FEE", "FUM", "FOE"))
fumList should (equal (toList) or contain only ("FIE", "FEE", "FUM", "FOE"))
fumList should (equal (fumList) or contain only ("FIE", "FEE", "FAM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or (contain only ("FIE", "FEE", "FAM", "FOE")))
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FAM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (equal (toList) or contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (equal (fumList) or contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (equal (toList) or contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (equal (fumList) or contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FAM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
(fumList should (equal (toList) or contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (decided by invertedListOfStringEquality, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (equal (fumList) or contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or contain only Many("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainOnlyElementsWithFriendlyReminder(decorateToStringValue(fumList), decorateToStringValue(Many("happy", "birthday", "to", "you"))), fileName, thisLineNumber - 2)
}
}
object `when used with (be (..) and contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (be_== (fumList) or contain only ("fie", "fee", "fum", "foe"))
fumList should (be_== (toList) or contain only ("fie", "fee", "fum", "foe"))
fumList should (be_== (fumList) or contain only ("fie", "fee", "fam", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) or contain only ("fie", "fee", "fam", "foe"))
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"fie\\", \\"fee\\", \\"fam\\", \\"foe\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (be_== (fumList) or contain only ("FIE", "FEE", "FUM", "FOE"))
fumList should (be_== (toList) or contain only ("FIE", "FEE", "FUM", "FOE"))
fumList should (be_== (fumList) or contain only ("FIE", "FEE", "FAM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) or (contain only ("FIE", "FEE", "FAM", "FOE")))
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FAM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (be_== (fumList) or contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality)
(fumList should (be_== (toList) or contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality)
(fumList should (be_== (fumList) or contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (be_== (toList) or contain only ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FAM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
(fumList should (be_== (fumList) or contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (be_== (fumList) or contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) or contain only Many("fie", "fee", "fam", "foe"))
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainOnlyElementsWithFriendlyReminder(decorateToStringValue(fumList), decorateToStringValue(Many("fie", "fee", "fam", "foe"))), fileName, thisLineNumber - 2)
}
}
object `when used with (contain only (..) and be (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain only ("fie", "fee", "fum", "foe") or be_== (fumList))
fumList should (contain only ("fie", "fee", "fam", "foe") or be_== (fumList))
fumList should (contain only ("fie", "fee", "fum", "foe") or be_== (toList))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("fee", "fie", "foe", "fam") or be_== (toList))
}
checkMessageStackDepth(e1, Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fam\\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain only ("FIE", "FEE", "FUM", "FOE") or be_== (fumList))
fumList should (contain only ("FIE", "FEE", "FAM", "FOE") or be_== (fumList))
fumList should (contain only ("FIE", "FEE", "FUM", "FOE") or be_== (toList))
val e1 = intercept[TestFailedException] {
fumList should (contain only ("FEE", "FIE", "FOE", "FAM") or be_== (toList))
}
checkMessageStackDepth(e1, Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FAM\\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain only ("FIE", "FEE", "FUM", "FOE") or be_== (fumList))) (decided by upperCaseStringEquality)
(fumList should (contain only ("FIE", "FEE", "FAM", "FOE") or be_== (fumList))) (decided by upperCaseStringEquality)
(fumList should (contain only ("FIE", "FEE", "FUM", "FOE") or be_== (toList))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain only ("FEE", "FIE", "FOE", "FAM") or be_== (toList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotContainOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FAM\\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (contain only (" FEE ", " FIE ", " FOE ", " FUM ") or be_== (fumList))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain only ("fee", "fie", "foe", "fie", "fum") or be_== (fumList))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
fumList should (contain only Many("fee", "fie", "foe", "fam") or be_== (toList))
}
checkMessageStackDepth(e1, Resources.didNotContainOnlyElementsWithFriendlyReminder(decorateToStringValue(fumList), decorateToStringValue(Many("fee", "fie", "foe", "fam"))) + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
}
}
object `when used with (not contain only (..) and not contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not contain only ("fee", "fie", "foe", "fuu") or not contain only ("fie", "fee", "fuu", "foe"))
fumList should (not contain only ("fee", "fie", "foe", "fum") or not contain only ("fie", "fee", "fuu", "foe"))
fumList should (not contain only ("fee", "fie", "foe", "fuu") or not contain only ("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (not contain only ("fee", "fie", "foe", "fum") or not contain only ("fie", "fee", "fum", "foe"))
}
checkMessageStackDepth(e1, Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\"") + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"fie\\", \\"fee\\", \\"fum\\", \\"foe\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not contain only ("FEE", "FIE", "FOE", "FUU") or not contain only ("FIE", "FEE", "FUU", "FOE"))
fumList should (not contain only ("FEE", "FIE", "FOE", "FUM") or not contain only ("FIE", "FEE", "FUU", "FOE"))
fumList should (not contain only ("FEE", "FIE", "FOE", "FUU") or not contain only ("FIE", "FEE", "FUM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (not contain only ("FEE", "FIE", "FOE", "FUM") or not contain only ("FIE", "FEE", "FUM", "FOE"))
}
checkMessageStackDepth(e1, Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FUM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not contain only ("FEE", "FIE", "FOE", "FUU") or not contain only ("FIE", "FEE", "FUU", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (not contain only ("FEE", "FIE", "FOE", "FUM") or not contain only ("FIE", "FEE", "FUU", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (not contain only ("FEE", "FIE", "FOE", "FUU") or not contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not contain only ("FEE", "FIE", "FOE", "FUM") or not contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FUM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
(fumList should (contain only (" FEE ", " FIE ", " FOE ", " FUM ") or contain only (" FEE ", " FIE ", " FOE ", " FUM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not contain only ("fee", "fie", "foe", "fie", "fum") or not contain only ("fie", "fee", "fuu", "foe"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (not contain only ("fie", "fee", "fuu", "foe") or not contain only ("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
One(Many("fee", "fie", "foe", "fum")) should (not contain only (Many("fee", "fie", "foe", "fum")) or not contain only (Many("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e1, Resources.containedOnlyElementsWithFriendlyReminder(decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("fee", "fie", "foe", "fum"))) + ", and " + Resources.containedOnlyElementsWithFriendlyReminder(decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("fee", "fie", "foe", "fum"))), fileName, thisLineNumber - 2)
}
}
object `when used with (not equal (..) and not contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not equal (toList) or not contain only ("fie", "fee", "fuu", "foe"))
fumList should (not equal (fumList) or not contain only ("fie", "fee", "fuu", "foe"))
fumList should (not equal (toList) or not contain only ("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) or not contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not equal (toList) or not contain only ("FIE", "FEE", "FUU", "FOE"))
fumList should (not equal (fumList) or not contain only ("FIE", "FEE", "FUU", "FOE"))
fumList should (not equal (toList) or not contain only ("FIE", "FEE", "FUM", "FOE"))
val e2 = intercept[TestFailedException] {
fumList should (not equal (fumList) or (not contain only ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e2, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not equal (fumList) or not contain only ("FIE", "FEE", "FUU", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (not equal (toList) or not contain only ("FIE", "FEE", "FUU", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (not equal (fumList) or not contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not equal (toList) or not contain only ("FIE", "FEE", "FUM", "FOE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FUM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
(fumList should (not contain only (" FEE ", " FIE ", " FOE ", " FUU ") or not contain only (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not equal (toList) or not contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
One(Many("fee", "fie", "foe", "fum")) should (not equal (One(Many("fee", "fie", "foe", "fum"))) or not contain only (Many("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e1, Resources.equaled(decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(One(Many("fee", "fie", "foe", "fum")))) + ", and " + Resources.containedOnlyElementsWithFriendlyReminder(decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("fee", "fie", "foe", "fum"))), fileName, thisLineNumber - 2)
}
}
object `when used with (not be (..) and not contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not be_== (toList) or not contain only ("fie", "fee", "fuu", "foe"))
fumList should (not be_== (fumList) or not contain only ("fie", "fee", "fuu", "foe"))
fumList should (not be_== (toList) or not contain only ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) or not contain only ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not be_== (toList) or not contain only ("FIE", "FEE", "FUU", "FOE"))
fumList should (not be_== (fumList) or not contain only ("FIE", "FEE", "FUU", "FOE"))
fumList should (not be_== (toList) or not contain only ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) or (not contain only ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not be_== (toList) or not contain only ("FIE", "FEE", "FUU", "FOE"))) (decided by upperCaseStringEquality)
(fumList should (not be_== (fumList) or not contain only ("FIE", "FEE", "FUU", "FOE"))) (decided by upperCaseStringEquality)
(fumList should (not be_== (toList) or not contain only ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not be_== (fumList) or not contain only ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
(fumList should (not contain only (" FEE ", " FIE ", " FOE ", " FUU ") or not contain only (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not be_== (toList) or not contain only ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
One(Many("fee", "fie", "foe", "fum")) should (not be_== (One(Many("fee", "fie", "foe", "fum"))) or not contain only (Many("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(One(Many("fee", "fie", "foe", "fum")))) + ", and " + Resources.containedOnlyElementsWithFriendlyReminder(decorateToStringValue(One(Many("fee", "fie", "foe", "fum"))), decorateToStringValue(Many("fee", "fie", "foe", "fum"))), fileName, thisLineNumber - 2)
}
}
}
object `collection of Lists` {
val list1s: Every[Every[Int]] = Every(Every(3, 2, 1), Every(3, 2, 1), Every(3, 2, 1))
val lists: Every[Every[Int]] = Every(Every(3, 2, 1), Every(3, 2, 1), Every(4, 3, 2))
val hiLists: Every[Every[String]] = Every(Every("hi", "hello"), Every("hi", "hello"), Every("hi", "hello"))
val toLists: Every[Every[String]] = Every(Every("you", "to"), Every("you", "to"), Every("you", "to"))
def allErrMsg(index: Int, message: String, lineNumber: Int, left: Any): String =
"'all' inspection failed, because: \\n" +
" at index " + index + ", " + message + " (" + fileName + ":" + (lineNumber) + ") \\n" +
"in " + decorateToStringValue(left)
object `when used with (contain only (..) and contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (contain only (3, 2, 1) or contain only (1, 3, 2))
all (list1s) should (contain only (3, 2, 5) or contain only (1, 3, 2))
all (list1s) should (contain only (3, 2, 1) or contain only (2, 3, 4))
atLeast (2, lists) should (contain only (3, 1, 2) or contain only (1, 2, 3))
atLeast (2, lists) should (contain only (3, 6, 5) or contain only (1, 3, 2))
atLeast (2, lists) should (contain only (3, 1, 2) or contain only (8, 3, 4))
val e1 = intercept[TestFailedException] {
all (lists) should (contain only (3, 1, 2) or contain only (1, 3, 2))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(Many(4, 3, 2)) + " did not contain only " + "(3, 1, 2)" + ", and " + decorateToStringValue(Many(4, 3, 2)) + " did not contain only " + "(1, 3, 2)", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (contain only ("HELLO", "HI") or contain only ("hello", "hi"))
all (hiLists) should (contain only ("HELLO", "HO") or contain only ("hello", "hi"))
all (hiLists) should (contain only ("HELLO", "HI") or contain only ("hello", "ho"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (contain only ("HELLO", "HO") or contain only ("hello", "ho"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"HELLO\\", \\"HO\\")" + ", and " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"hello\\", \\"ho\\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (contain only ("HELLO", "HI") or contain only ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (contain only ("HELLO", "HO") or contain only ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (contain only ("HELLO", "HI") or contain only ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (contain only ("HELLO", "HO") or contain only ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"HELLO\\", \\"HO\\")" + ", and " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"hello\\", \\"ho\\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain only (3, 2, 2, 1) or contain only (1, 3, 2))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain only (1, 3, 2) or contain only (3, 2, 2, 1))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (One(One(Many("hi", "hello")))) should (contain only Many("HELLO", "HO") or contain only Many("hello", "ho"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One(Many("hi", "hello"))) + " did not contain only " + "(" + decorateToStringValue(Many("HELLO", "HO")) + "), did you forget to say : _*" + ", and " + decorateToStringValue(One(Many("hi", "hello"))) + " did not contain only " + "(" + decorateToStringValue(Many("hello", "ho")) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many("hi", "hello")))), fileName, thisLineNumber - 2)
}
}
object `when used with (be (..) and contain only (..))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (be_== (Many(3, 2, 1)) or contain only (1, 2, 3))
all (list1s) should (be_== (Many(2, 3, 4)) or contain only (1, 2, 3))
all (list1s) should (be_== (Many(3, 2, 1)) or contain only (2, 3, 4))
val e1 = intercept[TestFailedException] {
all (list1s) should (be_== (Many(2, 3, 4)) or contain only (2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many(3, 2, 1)) + " was not equal to " + decorateToStringValue(Many(2, 3, 4)) + ", and " + decorateToStringValue(Many(3, 2, 1)) + " did not contain only " + "(2, 3, 4)", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (be_== (Many("hi", "hello")) or contain only ("HELLO", "HI"))
all (hiLists) should (be_== (Many("ho", "hello")) or contain only ("HELLO", "HI"))
all (hiLists) should (be_== (Many("hi", "hello")) or contain only ("HELLO", "HO"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (be_== (Many("ho", "hello")) or contain only ("HELLO", "HO"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(Many("ho", "hello")) + ", and " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"HELLO\\", \\"HO\\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (be_== (Many("hi", "hello")) or contain only ("HELLO", "HI"))) (decided by upperCaseStringEquality)
(all (hiLists) should (be_== (Many("ho", "hello")) or contain only ("HELLO", "HI"))) (decided by upperCaseStringEquality)
(all (hiLists) should (be_== (Many("hi", "hello")) or contain only ("HELLO", "HO"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (be_== (Many("ho", "hello")) or contain only ("HELLO", "HO"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(Many("ho", "hello")) + ", and " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"HELLO\\", \\"HO\\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (be_== (Many(3, 2, 1)) or contain only (1, 2, 2, 3))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1)))) should (be_== (Many(2, 3, 4)) or contain only Many(2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1))) + " was not equal to " + decorateToStringValue(Many(2, 3, 4)) + ", and " + decorateToStringValue(One(Many(3, 2, 1))) + " did not contain only (" + decorateToStringValue(Many(2, 3, 4)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1)))), fileName, thisLineNumber - 2)
}
}
object `when used with (not contain only xx and not contain only xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not contain only (3, 2, 8) or not contain only (8, 3, 4))
all (list1s) should (not contain only (1, 2, 3) or not contain only (8, 3, 4))
all (list1s) should (not contain only (3, 2, 8) or not contain only (2, 3, 1))
val e1 = intercept[TestFailedException] {
all (lists) should (not contain only (4, 2, 3) or not contain only (2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(Many(4, 3, 2)) + " contained only " + "(4, 2, 3)" + ", and " + decorateToStringValue(Many(4, 3, 2)) + " contained only " + "(2, 3, 4)", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not contain only ("HELLO", "HO") or not contain only ("hello", "ho"))
all (hiLists) should (not contain only ("HELLO", "HI") or not contain only ("hello", "ho"))
all (hiLists) should (not contain only ("HELLO", "HO") or not contain only ("hello", "hi"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not contain only ("HELLO", "HI") or not contain only ("hello", "hi"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"HELLO\\", \\"HI\\")" + ", and " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"hello\\", \\"hi\\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not contain only ("HELLO", "HO") or not contain only ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (not contain only ("HELLO", "HI") or not contain only ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (not contain only ("HELLO", "HO") or not contain only ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not contain only ("HELLO", "HI") or not contain only ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"HELLO\\", \\"HI\\")" + ", and " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"hello\\", \\"hi\\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain only (1, 2, 2, 3) or not contain only (8, 3, 4))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain only (8, 3, 4) or not contain only (1, 2, 2, 3))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1)))) should (not contain only (Many(3, 2, 1)) or not contain only (Many(3, 2, 1)))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1))) + " contained only (" + decorateToStringValue(Many(3, 2, 1)) + "), did you forget to say : _*" + ", and " + decorateToStringValue(One(Many(3, 2, 1))) + " contained only (" + decorateToStringValue(Many(3, 2, 1)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1)))), fileName, thisLineNumber - 2)
}
}
object `when used with (not be (...) and not contain only (...))` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not be_== (One(2)) or not contain only (8, 3, 4))
all (list1s) should (not be_== (Many(3, 2, 1)) or not contain only (8, 3, 4))
all (list1s) should (not be_== (One(2)) or not contain only (1, 2, 3))
val e1 = intercept[TestFailedException] {
all (list1s) should (not be_== (Many(3, 2, 1)) or not contain only (2, 3, 1))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many(3, 2, 1)) + " was equal to " + decorateToStringValue(Many(3, 2, 1)) + ", and " + decorateToStringValue(Many(3, 2, 1)) + " contained only " + "(2, 3, 1)", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not be_== (Many("hello", "ho")) or not contain only ("HELLO", "HO"))
all (hiLists) should (not be_== (Many("hello", "hi")) or not contain only ("HELLO", "HO"))
all (hiLists) should (not be_== (Many("hello", "ho")) or not contain only ("HELLO", "HI"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not be_== (Many("hi", "hello")) or not contain only ("HELLO", "HI"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")) + ", and " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"HELLO\\", \\"HI\\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not be_== (Many("hello", "ho")) or not contain only ("HELLO", "HO"))) (decided by upperCaseStringEquality)
(all (hiLists) should (not be_== (Many("hello", "hi")) or not contain only ("HELLO", "HO"))) (decided by upperCaseStringEquality)
(all (hiLists) should (not be_== (Many("hello", "ho")) or not contain only ("HELLO", "HI"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not be_== (Many("hi", "hello")) or not contain only ("HELLO", "HI"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")) + ", and " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"HELLO\\", \\"HI\\")", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not be_== (One(2)) or not contain only (1, 2, 2, 3))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.onlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain only (8, 3, 4) or not contain only (1, 2, 2, 3))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.onlyDuplicate))
}
def `should throw TFE with friendly reminder when single GenTraversable argument is passed and failed` {
val e1 = intercept[TestFailedException] {
all (One(One(Many(3, 2, 1)))) should (not be_== (One(Many(3, 2, 1))) or not contain only (Many(3, 2, 1)))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(One(Many(3, 2, 1))) + " was equal to " + decorateToStringValue(One(Many(3, 2, 1))) + ", and " + decorateToStringValue(One(Many(3, 2, 1))) + " contained only (" + decorateToStringValue(Many(3, 2, 1)) + "), did you forget to say : _*", thisLineNumber - 2, One(One(Many(3, 2, 1)))), fileName, thisLineNumber - 2)
}
}
}
}
| cheeseng/scalatest | scalatest-test/src/test/scala/org/scalatest/EveryShouldContainOnlyLogicalOrSpec.scala | Scala | apache-2.0 | 49,372 |
package net.fwbrasil.zoot.core.filter
import scala.concurrent.Future
import net.fwbrasil.zoot.core.request.Request
import net.fwbrasil.zoot.core.response.Response
trait Filter extends ((Request, (Request => Future[Response[Array[Byte]]])) => Future[Response[Array[Byte]]]) {
self =>
protected type Service = Request => Future[Response[Array[Byte]]]
def apply(input: Request, next: Service): Future[Response[Array[Byte]]]
def andThen(filter: Filter): Filter =
new Filter {
override def apply(input: Request, next: Service) =
self.apply(input, filter(_, next))
}
def andThen(service: Service): Service =
(input: Request) => apply(input, service)
} | fwbrasil/zoot | zoot-core/src/main/scala/net/fwbrasil/zoot/core/filter/Filter.scala | Scala | lgpl-2.1 | 724 |
package notebook.front.widgets
import scala.xml.{NodeSeq, UnprefixedAttribute, Null}
import play.api.libs.json._
import notebook._
import notebook.front._
import notebook.JsonCodec._
import notebook.front.widgets.magic._
trait Images extends Generic with Utils {
import java.awt.image.BufferedImage
import java.io.ByteArrayOutputStream
import javax.imageio.ImageIO
def imageCodec(tpe:String) = new Codec[JsValue, BufferedImage] {
def toBytes(bi:BufferedImage):String = {
val bos = new ByteArrayOutputStream()
ImageIO.write(bi, tpe, bos)
val imageBytes = bos.toByteArray()
val encodedImage = org.apache.commons.codec.binary.Base64.encodeBase64String(imageBytes)
val imageString = "data:image/"+tpe+";base64,"+encodedImage
bos.close()
imageString
}
def decode(a: BufferedImage):JsValue = JsString(toBytes(a))
def encode(v: JsValue):BufferedImage = ??? //todo
}
def img(tpe:String="png", width:String="150px", height:String="150px") = new SingleConnectedWidget[BufferedImage] {
implicit val codec:Codec[JsValue, BufferedImage] = imageCodec(tpe)
lazy val toHtml = <p>
<img width={width} height={height} data-bind="attr:{src: value}" />
{
scopedScript(
"""
|req(
|['observable', 'knockout'],
|function (O, ko) {
| ko.applyBindings({
| value: O.makeObservable(valueId)
| },
| this
| );
|});
""".stripMargin,
Json.obj("valueId" -> dataConnection.id)
)
}
</p>
def url(u:java.net.URL) = apply(ImageIO.read(u))
def file(f:java.io.File) = apply(ImageIO.read(f))
}
} | andypetrella/spark-notebook | modules/common/src/main/scala/notebook/front/widgets/Images.scala | Scala | apache-2.0 | 1,794 |
/*
*
* /\\\\\\\\\\
* /\\\\\\///\\\\\\
* /\\\\\\/ \\///\\\\\\ /\\\\\\\\\\\\\\\\\\ /\\\\\\ /\\\\\\
* /\\\\\\ \\//\\\\\\ /\\\\\\/////\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\ \\/// /\\\\\\\\\\ /\\\\\\\\\\ /\\\\\\ /\\\\\\ /\\\\\\\\\\\\\\\\\\\\
* \\/\\\\\\ \\/\\\\\\ \\/\\\\\\\\\\\\\\\\\\\\ \\////\\\\\\//// /\\\\\\ /\\\\\\///\\\\\\\\\\///\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\//////
* \\//\\\\\\ /\\\\\\ \\/\\\\\\////// \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\//\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\\\\\\\\\\\\\\\
* \\///\\\\\\ /\\\\\\ \\/\\\\\\ \\/\\\\\\_/\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\////////\\\\\\
* \\///\\\\\\\\\\/ \\/\\\\\\ \\//\\\\\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\//\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\
* \\///// \\/// \\///// \\/// \\/// \\/// \\/// \\///////// \\//////////
*
* The mathematical programming library for Scala.
*
*/
package optimus.optimization
import optimus.optimization.enums.{ PreSolve, SolutionStatus, SolverLib }
import optimus.optimization.model.{ MPBinaryVar, MPConstraint, MPFloatVar, MPIntVar }
import org.scalatest.{ FunSpec, Matchers }
/**
* Specification for Gurobi solver.
*/
final class GurobiSpecTest extends FunSpec with Matchers {
// Constant objective function tests
describe("Constant Program (1)") {
implicit val cp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 100, 200)
val y = MPFloatVar("y", 80, 170)
maximize(-5)
add(x >:= 5)
add(y <:= 100)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 200") {
x.value shouldEqual Some(200)
}
it("y should be equal to 80") {
y.value shouldEqual Some(80)
}
it("objective value should be equal to -5") {
objectiveValue shouldEqual -5
}
it("constraints should be satisfied") {
checkConstraints() shouldBe true
}
release()
}
describe("Constant Program (2)") {
implicit val cp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 100, 200)
val y = MPFloatVar("y", 80, 170)
minimize(-5)
add(x >:= 150)
add(y <:= 100)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 200") {
x.value shouldEqual Some(200)
}
it("y should be equal to 80") {
y.value shouldEqual Some(80)
}
it("objective value should be equal to -5") {
objectiveValue shouldEqual -5
}
it("constraints should be satisfied") {
checkConstraints() shouldBe true
}
release()
}
// Linear objective function tests
describe("Linear Program (1)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 100, 200)
val y = MPFloatVar("y", 80, 170)
maximize(-2 * x + 5 * y)
subjectTo (
y >:= -x + 200
)
start(PreSolve.CONSERVATIVE)
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 100") {
x.value shouldEqual Some(100)
}
it("y should be equal to 170") {
y.value shouldEqual Some(170)
}
it("objective value should be equal to 650") {
objectiveValue shouldEqual 650
}
it("constraints should be satisfied") {
checkConstraints() shouldBe true
}
release()
}
describe("Linear Program (2)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 100, 200)
val y = MPFloatVar("y", 80, 170)
minimize(-2 * x + 5 * y)
subjectTo (
y >:= -x + 200
)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 200") {
x.value shouldEqual Some(200)
}
it("y should be equal to 80") {
y.value shouldEqual Some(80)
}
it("objective value should be equal to 0") {
objectiveValue shouldEqual 0
}
it("constraints should be satisfied") {
checkConstraints() shouldBe true
}
release()
}
describe("Linear Program (3)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x")
val y = MPFloatVar("y", 80, 170)
minimize(-2 * x + 5 * y)
subjectTo (
y >:= -x + 200
)
start()
// Solution is infeasible but some solvers consider it dual infeasible
it("solution should be infeasible") {
status should (equal(SolutionStatus.UNBOUNDED) or equal(SolutionStatus.INFEASIBLE))
}
it("x should be None") {
x.value shouldBe None
}
it("y should be None") {
y.value shouldBe None
}
it("constraints should be unsatisfied") {
checkConstraints() shouldBe false
}
release()
}
describe("Linear Program (4)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 100, 200)
val y = MPFloatVar("y", 80, 170)
minimize(-2 * x + 5 * y)
val z = MPFloatVar("z", 80, 170)
subjectTo (
z >:= 170,
y >:= -x + 200
)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 200") {
x.value shouldEqual Some(200)
}
it("y should be equal to 80") {
y.value shouldEqual Some(80)
}
it("z should be equal to 170") {
z.value shouldEqual Some(170)
}
it("objective value should be equal to 0") {
objectiveValue shouldEqual 0
}
it("constraints should be satisfied") {
checkConstraints() shouldBe true
}
release()
}
describe("Linear Program (5)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 0, 10)
val y = MPFloatVar("y", 0, 10)
maximize(x + y)
subjectTo (
x + y >:= 5
)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 10") {
x.value shouldEqual Some(10)
}
it("y should be equal to 10") {
y.value shouldEqual Some(10)
}
it("objective value should be equal to 20") {
objectiveValue shouldEqual 20
}
it("constraints should be satisfied") {
checkConstraints() shouldBe true
}
release()
}
describe("Linear Program (6)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 0, 10)
val y = MPFloatVar("y", 0, 10)
var cons = Vector.empty[MPConstraint]
maximize(x + y)
cons = cons :+ add(x + y >:= 5)
cons = cons :+ add(x + 2 * y <:= 25)
cons = cons :+ add(x + 2 * y <:= 30)
cons = cons :+ add(x + y >:= 17.5)
cons = cons :+ add(x := 10.0)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 10.0") {
x.value.get shouldEqual 10.0 +- 1e-2
}
it("y should be equal to 7.5") {
y.value.get shouldEqual 7.5 +- 1e-2
}
it("objective value should be equal to 17.5") {
objectiveValue shouldEqual 17.5 +- 1e-2
}
it("check constraints") {
cons(0).isTight() shouldBe false
cons(1).isTight() shouldBe true
cons(2).isTight() shouldBe false
cons(3).isTight() shouldBe true
cons(4).isTight() shouldBe true
cons(0).slack.get shouldBe 12.5 +- 1e-2
cons(1).slack.get shouldBe 0.0 +- 1e-2
cons(2).slack.get shouldBe 5.0 +- 1e-2
cons(3).slack.get shouldBe 0.0 +- 1e-2
cons(4).slack.get shouldBe 0.0 +- 1e-2
cons.foreach(_.check() shouldBe true)
}
release()
}
describe("Linear Program (7)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar.positive("x")
val y = MPFloatVar.positive("y")
val z = MPFloatVar.positive("z")
var cons = Vector.empty[MPConstraint]
maximize(2 * x + 4 * y + 3 * z)
cons = cons :+ add(3 * x + 4 * y + 2 * z <:= 60)
cons = cons :+ add(2 * x + y + 2 * z <:= 40)
cons = cons :+ add(x + 3 * y + 2 * z <:= 80)
cons = cons :+ add(x >:= -80)
cons = cons :+ add(y >:= -50)
cons = cons :+ add(z >:= -0.005)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 0.0") {
x.value.get shouldEqual 0.0 +- 1e-2
}
it("y should be equal to 6.67") {
y.value.get shouldEqual 6.67 +- 1e-2
}
it("z should be equal to 16.67") {
z.value.get shouldEqual 16.67 +- 1e-2
}
it("objective value should be equal to 76.67") {
objectiveValue shouldEqual 76.67 +- 1e-2
}
it("check constraints") {
cons(0).isTight() shouldBe true
cons(1).isTight() shouldBe true
cons(2).isTight() shouldBe false
cons(3).isTight() shouldBe false
cons(4).isTight() shouldBe false
cons(5).isTight() shouldBe false
cons(0).slack.get shouldBe 0.0 +- 1e-2
cons(1).slack.get shouldBe 0.0 +- 1e-2
cons(2).slack.get shouldBe 26.67 +- 1e-2
cons(3).slack.get shouldBe 80.0 +- 1e-2
cons(4).slack.get shouldBe 56.67 +- 1e-2
cons(5).slack.get shouldBe 16.67 +- 1e-2
cons.foreach(_.check() shouldBe true)
}
release()
}
describe("Linear Program (8)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val w = MPFloatVar.positive("w")
val x = MPFloatVar.positive("x")
val y = MPFloatVar.positive("y")
val z = MPFloatVar.positive("z")
var cons: Vector[MPConstraint] = Vector()
maximize(3 * w - 8 * w + 10 * w + 0.001 * x - (-0.999 * x) - 0.3 * 10 * (-y) - 4 * 0.0006 * 0 * (w - x - z) + 2 * z - 2 * z + 4 * z)
cons = cons :+ add(w + x + y + z <:= 40)
cons = cons :+ add(2 * w + x - y - z >:= 10)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x, y and z should be equal to 0.0") {
x.value.get shouldEqual 0.0 +- 1e-2
y.value.get shouldEqual 0.0 +- 1e-2
z.value.get shouldEqual 0.0 +- 1e-2
}
it("w should be equal to 40.0") {
w.value.get shouldEqual 40.0 +- 1e-2
}
it("objective value should be equal to 200.0") {
objectiveValue shouldEqual 200.0 +- 1e-2
}
it("constraints should be satisfied") {
cons.head.isTight() shouldBe true
cons.last.isTight() shouldBe false
checkConstraints() shouldBe true
}
it("Add a couple of constraints and re-optimize") {
cons = cons :+ add(y >:= w)
cons = cons :+ add(x >:= 15)
start()
w.value.get should equal(12.5 +- 1e-2)
x.value.get should equal(15.0 +- 1e-2)
y.value.get should equal(12.5 +- 1e-2)
z.value.get should equal(0.0 +- 1e-2)
objectiveValue shouldBe (115.0 +- 1e-2)
cons(0).isTight() shouldBe true
cons(1).isTight() shouldBe false
cons(2).isTight() shouldBe true
cons(3).isTight() shouldBe true
status shouldEqual SolutionStatus.OPTIMAL
checkConstraints() shouldBe true
// Constraint: w - 2x + 4y + 3z >:= 40
cons :+= add(-(-w) - 2 * x + 4 * y + 3 * 0.5 * 2 * z >:= 40 - 3 + 2.7 + 0.3)
start()
w.value.get should equal(6.67 +- 1e-2)
x.value.get should equal(15.0 +- 1e-2)
y.value.get should equal(8.33 +- 1e-2)
z.value.get should equal(10.0 +- 1e-2)
objectiveValue shouldBe (113.33 +- 1e-2)
status should equal(SolutionStatus.OPTIMAL)
checkConstraints() shouldBe true
cons(0).isTight() shouldBe true
cons(1).isTight() shouldBe true
cons(2).isTight() shouldBe false
cons(3).isTight() shouldBe true
cons(4).isTight() shouldBe true
release()
}
}
describe("Linear Program (9)") {
implicit val lp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 0, 10)
maximize(x + 1)
subjectTo (
x <:= 1
)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 1.0") {
x.value shouldEqual Some(1.0)
}
it("objective value should be equal to 2.0") {
objectiveValue shouldEqual 2.0
}
release()
}
// Mixed-integer objective function tests
describe("Mixed-Integer Program (1)") {
implicit val mip: MPModel = MPModel(SolverLib.Gurobi)
val x0 = MPFloatVar("x0", 0, 40)
val x1 = MPIntVar("x1", 0 to 1000)
val x2 = MPIntVar("x2", 0 until 18)
val x3 = MPFloatVar("x3", 2, 3)
maximize(x0 + 2 * x1 + 3 * x2 + x3)
subjectTo(
-1 * x0 + x1 + x2 + 10 * x3 <:= 20,
x0 - 3.0 * x1 + x2 <:= 30,
x1 - 3.5 * x3 := 0
)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x0 should be equal to 39.9") {
x0.value.get shouldEqual 39.99 +- 1e-2
}
it("x1 should be equal to 10") {
x1.value.get shouldEqual 10
}
it("x2 should be equal to 17") {
x2.value.get shouldEqual 17
}
it("x3 should be equal to 2.85") {
x3.value.get shouldEqual 2.85 +- 1e-2
}
it("objective value should be equal to 113.85") {
objectiveValue shouldEqual 113.85 +- 1e-2
}
release()
}
describe("Mixed-Integer Program (2)") {
implicit val mip: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x", 0, 100)
val y = MPIntVar("y", 0 to 100)
maximize(8 * x + 12 * y)
add(10 * x + 20 * y <:= 140)
add(6 * x + 8 * y <:= 72)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("x should be equal to 8") {
x.value.get shouldEqual 8.0 +- 1e-2
}
it("y should be equal to 3") {
y.value.get shouldEqual 3
}
it("objective value should be equal to 100") {
objectiveValue shouldEqual 100.0 +- 1e-2
}
release()
}
describe("Mixed-Integer Program (3)") {
implicit val mip: MPModel = MPModel(SolverLib.Gurobi)
val x = Array.tabulate(6)(j => MPBinaryVar(s"x$j"))
val z = 3 * x(0) + 5 * x(1) + 6 * x(2) + 9 * x(3) + 10 * x(4) + 10 * x(5)
minimize(z)
add(-2 * x(0) + 6 * x(1) - 3 * x(2) + 4 * x(3) + x(4) - 2 * x(5) >:= 2)
add(-5 * x(0) - 3 * x(1) + x(2) + 3 * x(3) - 2 * x(4) + x(5) >:= -2)
add(5 * x(0) - x(1) + 4 * x(2) - 2 * x(3) + 2 * x(4) - x(5) >:= 3)
it ("all variables should be binary") {
x.foreach(_.isBinary shouldBe true)
}
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("objective value should be equal to 11") {
objectiveValue shouldEqual 11
}
release()
}
// Quadratic objective function tests
describe("Quadratic Program (1)") {
implicit val qp: MPModel = MPModel(SolverLib.Gurobi)
var cons = Vector.empty[MPConstraint]
val x = MPFloatVar.positive("x")
val y = MPFloatVar.positive("y")
minimize(-8 * x - 16 * y + x * x + 4 * y * y)
cons = cons :+ add(x + y <:= 5)
cons = cons :+ add(x <:= 3)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("objective value should be equal to -31") {
objectiveValue shouldEqual -31.0 +- 1e-2
}
it("x should be equal to 3") {
x.value.get shouldEqual 3.0 +- 1e-2
}
it("y should be equal to 2") {
y.value.get shouldEqual 2.0 +- 1.0e-2 // Here gurobi requires +- 1.0e-4
}
it("constraints should be satisfied") {
cons(0).isTight() shouldBe false
cons(1).isTight() shouldBe true
cons(0).slack.get shouldBe 0.0 +- 1.0e-2
cons(1).slack.get shouldBe 0.0 +- 1.0e-2
cons.foreach(_.check() shouldBe true)
}
release()
}
describe("Quadratic Program (2)") {
implicit val qp: MPModel = MPModel(SolverLib.Gurobi)
var cons = Vector.empty[MPConstraint]
val x = MPFloatVar.positive("x")
val y = MPFloatVar.positive("y")
minimize(2 * x * x + y * y + x * y + x + y)
cons = cons :+ add(x + y := 1)
cons = cons :+ add(x >:= -3)
cons = cons :+ add(y >:= -1e-4)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("objective value should be equal to 1.87") {
objectiveValue shouldEqual 1.87 +- 1.0e-2
}
it("x should be equal to 0.25") {
x.value.get shouldEqual 0.25 +- 1e-2
}
it("y should be equal to 0.75") {
y.value.get shouldEqual 0.75 +- 1e-2
}
it("constraints should be satisfied") {
cons(0).isTight() shouldBe true
cons(1).isTight() shouldBe false
cons(2).isTight() shouldBe false
cons(0).slack.get shouldBe 0.0 +- 1e-2
cons(1).slack.get shouldBe 3.25 +- 1e-2
cons(2).slack.get shouldBe 0.75 +- 1e-2
cons.foreach(_.check() shouldBe true)
}
release()
}
describe("Quadratic Program (3)") {
implicit val qp: MPModel = MPModel(SolverLib.Gurobi)
var cons = Vector.empty[MPConstraint]
val x = MPFloatVar.positive("x")
val y = MPFloatVar.positive("y")
minimize(x * x + x * x + y * y - y * y + y * y + 7 * x * y - 6 * y * x + x * x - x * x + x - 99.9e-9 * y + 1.0000000999 * y)
cons = cons :+ add(x + y := 1)
cons = cons :+ add(x >:= -3)
cons = cons :+ add(y >:= -1e-4)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("objective value should be equal to 1.87") {
objectiveValue shouldEqual 1.87 +- 1e-2
}
it("x should be equal to 0.25") {
x.value.get shouldEqual 0.25 +- 1e-2
}
it("y should be equal to 0.75") {
y.value.get shouldEqual 0.75 +- 1e-2
}
it("constraints should be satisfied") {
cons(0).isTight() shouldBe true
cons(1).isTight() shouldBe false
cons(2).isTight() shouldBe false
cons(0).slack.get shouldBe 0.0 +- 1e-2
cons(1).slack.get shouldBe 3.25 +- 1e-2
cons(2).slack.get shouldBe 0.75 +- 1e-2
cons.foreach(_.check() shouldBe true)
}
release()
}
describe("Quadratic Program (4)") {
implicit val qp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar.positive("x")
val y = MPFloatVar.positive("y")
minimize(-8 * x - 16 * y + x * x + 4 * y * y)
subjectTo (
x + y <:= 5,
x <:= 3,
x >:= 0,
y >:= 0
)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("objective value should be equal to -31") {
objectiveValue shouldEqual -31.0 +- 1e-2
}
it("x should be equal to 3") {
x.value.get shouldEqual 3.0 +- 1e-2
}
it("y should be equal to 2") {
y.value.get shouldEqual 2.0 +- 1e-2
}
it("constraints should be satisfied") {
checkConstraints() shouldBe true
}
release()
}
describe("Quadratic Program (5)") {
implicit val qp: MPModel = MPModel(SolverLib.Gurobi)
val w0 = MPFloatVar("w0")
val w1 = MPFloatVar("w1")
val w2 = MPFloatVar("w2")
val slack = MPFloatVar.positive("slack")
minimize(0.5 * (w0 * w0 + w1 * w1 + w2 * w2) + 1000 * slack)
add(-2.0 * w2 + 0.0 >:= -1.0 * slack + 16.0)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("objective value should be equal to 32") {
objectiveValue shouldEqual 32.0 +- 1e-2
}
it("w0 and w1 should be equal to 0") {
w0.value.get shouldEqual 0.0 +- 1e-2
w1.value.get shouldEqual 0.0 +- 1e-2
}
it("w2 should be equal to -8") {
w2.value.get shouldEqual -8.0 +- 1e-2
}
it("slack should be equal to 0") {
slack.value.get shouldEqual 0.0 +- 1e-2
}
it("constraints should be satisfied") {
checkConstraints() shouldBe true
}
it("Add a couple of constraints and re-optimize") {
add(-2.0 * w1 + -2.0 * w0 + 6.0 * w2 + 0.0 >:= -1.0 * slack + 6.0)
start()
status shouldBe SolutionStatus.OPTIMAL
objectiveValue shouldBe 214.25 +- 1e-2
w0.value.get shouldEqual -13.5 +- 1e-2
w1.value.get shouldEqual -13.5 +- 1e-2
w2.value.get shouldEqual -8.0 +- 1e-2
slack.value.get shouldEqual 0.0 +- 1e-2
checkConstraints() shouldBe true
}
}
// Quadratic objective function tests having quadratic constraints
describe("Quadratic Constraint Program (1)") {
implicit val qp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar("x")
val y = MPFloatVar("y", -0.5, 0.5)
maximize(x)
add(x * x + y * y <:= 1)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("objective value should be equal to 1") {
objectiveValue shouldEqual 1.0 +- 1e-2
}
it("x should be equal to 1") {
x.value.get shouldEqual 1.0
}
it("y should be equal to 0") {
y.value.get shouldEqual 0.0
}
release()
}
describe("Quadratic Constraint Program (2)") {
implicit val qp: MPModel = MPModel(SolverLib.Gurobi)
val x = MPFloatVar.positive("x")
val y = MPFloatVar.positive("y")
val z = MPFloatVar.positive("z")
minimize(x * x + 0.1 * y * y + z * z - x * z + y)
add(x + y + z - x * x - y * y - 0.1 * z * z + 0.2 * x * z >:= 1)
start()
it("solution should be optimal") {
status shouldBe SolutionStatus.OPTIMAL
}
it("objective value should be equal to 0.41") {
objectiveValue shouldEqual 0.41 +- 1e-2
}
it("x should be equal to 0.46") {
x.value.get shouldEqual 0.46 +- 1e-2
}
it("y should be equal to 0.01") {
y.value.get shouldEqual 0.01 +- 1e-2
}
it("z should be equal to 0.72") {
z.value.get shouldEqual 0.72 +- 1e-2
}
release()
}
}
| vagm/Optimus | solver-gurobi/src/test/scala/optimus/optimization/GurobiSpecTest.scala | Scala | lgpl-3.0 | 21,729 |
package com.arcusys.valamis.updaters.version320.schema3203
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.persistence.common.DbNameUtils._
trait StatementObjectSchema {
self: SlickProfile =>
import driver.api._
type StatementObjectRow = (Long, String)
class StatementObjectsTable(tag: Tag) extends Table[StatementObjectRow](tag, "lrs_statementObjects") {
def key = column[Long]("key", O.PrimaryKey, O.AutoInc)
def objectType = column[String]("objectType", O.SqlType(varCharMax))
def * = (key, objectType)
}
lazy val statementObjects = TableQuery[StatementObjectsTable]
}
| arcusys/Valamis | valamis-updaters/src/main/scala/com/arcusys/valamis/updaters/version320/schema3203/StatementObjectSchema.scala | Scala | gpl-3.0 | 642 |
package org.jetbrains.plugins.scala.editor.documentationProvider
import com.intellij.psi.PsiFile
import org.jetbrains.plugins.scala.ScalaFileType
abstract class ScalaDocumentationProviderTestBase extends DocumentationProviderTestBase
with ScalaDocumentationsSectionsTesting {
override protected def documentationProvider = new ScalaDocumentationProvider
override protected def createFile(fileContent: String): PsiFile =
getFixture.configureByText(ScalaFileType.INSTANCE, fileContent)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/editor/documentationProvider/ScalaDocumentationProviderTestBase.scala | Scala | apache-2.0 | 499 |
package scavlink.link.nav
import akka.actor.Status.Failure
import scavlink.link.Vehicle
import scavlink.link.channel.ChannelTellAPI._
import scavlink.link.fence.{FenceActor, FenceBreach}
import scavlink.link.nav.NavTellAPI._
import scavlink.link.operation._
import scavlink.link.telemetry.Telemetry
import scavlink.message.Mode
import scavlink.state.{ChannelState, LocationState}
import scala.concurrent.duration._
/**
* @param targetHeight climb to height in meters
* @param initThrottle initial throttle value
* @param targetThrottle throttle target value
* @param throttleRampRate ramp throttle %/sec
*/
case class RotorTakeoff(targetHeight: Double, initThrottle: Double, targetThrottle: Double, throttleRampRate: Double)
extends NavOp {
require(targetHeight > 0, "Target height must be > 0")
require(initThrottle >= 0 && initThrottle <= 50, "Initial throttle must be <= 50")
require(targetThrottle > 50 && targetThrottle <= 100, "Target throttle must be between 50 and 100")
require(throttleRampRate > 0, "Ramp rate must be > 0")
val actorType = classOf[RotorTakeoffActor]
}
object RotorTakeoff {
val Gentle = RotorTakeoff(2, 40, 56, 2)
val Hard = RotorTakeoff(2, 50, 70, 4)
}
case class RotorTakeoffResult(vehicle: Vehicle, op: RotorTakeoff, finalHeight: Double) extends OpResult
case class RotorTakeoffFailed(vehicle: Vehicle, op: RotorTakeoff, error: RotorTakeoffError.Value, message: String) extends OpException
object RotorTakeoffError extends Enumeration {
val BadVehicleType, ArmFailure, SetLoiterFailure, FenceBreach = Value
}
/**
* Executes a controlled takeoff of a rotor vehicle:
* (1) ramp up the throttle at a specified rate to the 50% level
* (2) wait 3 seconds for motors to spin up
* (3) ramp up the throttle to the specified target level
* (4) maintain the throttle at the target value until the vehicle reaches a target altitude, then
* (5) reduce the throttle to 50 to maintain altitude.
*
* With conservative parameters, this gives the motors time to spin up in unison
* before the vehicle leaves the ground, followed by a moderated ascent.
* See RotorTakeoff.Gentle.
*
* If vehicle is already at or above the target height, throttle is not adjusted
* and the operation returns success.
* @author Nick Rossi
*/
class RotorTakeoffActor(vehicle: Vehicle) extends VehicleOpActor[RotorTakeoff](vehicle) {
import context.dispatcher
case class ThrottleValue(throttle: Double) extends OpData
case class SetThrottle(setPoint: Double)
case object SetLoiter extends OpState
case object FirstLocation extends OpState
case object Arm extends OpState
case object StartThrottle extends OpState
case object RampThrottle extends OpState
// FSM states
when(Idle) {
case Event(op: RotorTakeoff, Uninitialized) =>
start(op, sender())
if (!vehicle.info.typeInfo.isRotor) {
stop using Finish(RotorTakeoffFailed(vehicle, op, RotorTakeoffError.BadVehicleType, "Only for rotor vehicles"))
}
vehicle.setModeToHeartbeat(Mode.Loiter)
goto(SetLoiter)
}
when(SetLoiter) {
case Event(_: ConversationSucceeded, _) =>
link.events.subscribe(self, Telemetry.subscribeTo(vehicle.id, Set(classOf[ChannelState], classOf[LocationState])))
goto(FirstLocation)
case Event(Failure(_: ConversationFailed), _) =>
stop using Finish(RotorTakeoffFailed(vehicle, op, RotorTakeoffError.SetLoiterFailure, "Unable to set Loiter mode"))
}
// if already at or above target height, end successfully before doing anything
when(FirstLocation) {
case Event(Telemetry(_, state: LocationState, _), _) =>
if (state.location.alt >= op.targetHeight) {
stop using Finish(RotorTakeoffResult(vehicle, op, state.location.alt))
} else {
vehicle.armMotors(true)
goto(Arm)
}
}
when(Arm) {
case Event(_: ConversationSucceeded, _) =>
goto(StartThrottle)
case Event(Failure(f: ConversationFailed), _) =>
stop using Finish(RotorTakeoffFailed(vehicle, op, RotorTakeoffError.ArmFailure, "Unable to arm"))
}
when(StartThrottle) {
case Event(Telemetry(_, state: ChannelState, _), _) =>
self ! SetThrottle(math.max(state.throttle, op.initThrottle))
link.events.subscribe(self, FenceActor.subscribeToBreach(vehicle))
goto(RampThrottle)
}
when(RampThrottle) {
case Event(SetThrottle(t), _) =>
log.debug(s"Setting throttle to $t")
vehicle.setThrottle(t)
val inc = op.throttleRampRate * .25
val nt = math.min(t + inc, op.targetThrottle)
// if we're crossing 50, hold at 50 for 3 seconds
val newThrottle = if (t < 50 && nt >= 50) 50 else nt
if (newThrottle != t) {
val delay = if (t == 50) 3.seconds else 250.milliseconds
context.system.scheduler.scheduleOnce(delay, self, SetThrottle(newThrottle))
}
stay using ThrottleValue(newThrottle)
case Event(Telemetry(_, state: LocationState, _), _) if state.location.alt >= op.targetHeight =>
vehicle.setThrottle(50)
stop using Finish(RotorTakeoffResult(vehicle, op, state.location.alt))
case Event(breach: FenceBreach, _) =>
stop using Finish(RotorTakeoffFailed(vehicle, op, RotorTakeoffError.FenceBreach,
s"Fence breach: ${ breach.fences.mkString(",") }"))
case _ => stay()
}
/**
* If actor is stopping unexpectedly and we were in the middle of a course,
* set the vehicle to Loiter mode.
*/
override def unexpectedStop(state: OpState, data: OpData): Unit = (state, data) match {
case (RampThrottle, ThrottleValue(t)) => if (t > 50) vehicle.setThrottle(50)
case _ => //
}
}
| nickolasrossi/scavlink | src/main/scala/scavlink/link/nav/RotorTakeoff.scala | Scala | mit | 5,662 |
/*
* Copyright (c) 2017-2021, Robby, Kansas State University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum.$internal
import scala.collection.mutable.Map
object Trie {
sealed trait Node[K, V]
final case class InNode[K, V](children: Map[K, Node[K, V]]) extends Node[K, V]
final case class Leaf[K, V](data: V) extends Node[K, V]
}
| sireum/v3-logika-runtime | macros/shared/src/main/scala/org/sireum/$internal/Trie.scala | Scala | bsd-2-clause | 1,644 |
/*
* Copyright (C) 2014 Szu-Hsien Lee ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.a30corner.twculture.server
import spray.json._
import java.io.{PrintWriter, File}
import scala.io.Source
case class Extra(data: List[Data], paging: Paging)
case class Data(ids: String,
iconImageUrl: Option[String],
imageUrl: String,
showInfoList:List[ShowInfoList]
)
case class ShowInfoList(cityId: Int,location:Option[String])
case class Paging(currentPage: Int, pageCount: Int, recordCount: Int)
object MyJsonProtocol extends DefaultJsonProtocol {
implicit val f0 = jsonFormat2(ShowInfoList)
implicit val f1 = jsonFormat4(Data)
implicit object PagingJsonFormat extends RootJsonFormat[Paging] {
def write(p: Paging) = JsObject(
"currentPage" -> JsString(p.currentPage.toString),
"pageCount" -> JsString(p.pageCount.toString),
"recordCount" -> JsString(p.recordCount.toString)
)
def read(value: JsValue) = {
value.asJsObject.getFields("currentPage", "pageCount", "recordCount") match {
case Seq(JsString(cur), JsString(pcount), JsString(rcount)) =>
new Paging(cur.toInt, pcount.toInt, rcount.toInt)
case _ => throw new DeserializationException("Paging expected")
}
}
}
implicit val f3 = jsonFormat2(Extra) //extra must after data & paging...
}
object ExtraData {
import spray.json._
import MyJsonProtocol._
import OpenData._
def log(s: String): String = {
println(s)
s
}
def url(page: Int, count: Int): String =
log(s"$jsonpath?method=doSearch2&siteId=101&iscancel=true&sortBy=dataImportDate¤tPage=$page&recordCount=$count")
private def extract(page: Int, count: Int): Extra =
fetchHtml(url(page, count)).asJson.convertTo[Extra]
def extradata(page: Int, pcount: Int = 100, rcount: Int = 0): Stream[List[Data]] = {
require(page > 0)
println(s"extradata=> $page / $pcount")
if (page > pcount) {
Stream.empty
} else {
val a = extract(page, rcount)
a.data #:: extradata(a.paging.currentPage + 1, a.paging.pageCount, a.paging.recordCount)
}
}
def write(f:File): File = {
val writer = new PrintWriter(f)
val c = for {
s <- ExtraData.extradata(1)
a <- s
} yield a.toJson
writer.write("[")
writer.write(c.mkString(","))
writer.write("]")
writer.close()
f
}
def read(f: File): List[Data] = {
val a = Source.fromFile(f).mkString.asJson.convertTo[List[Data]]
a.filter(b => b.iconImageUrl.getOrElse("") != "" || b.imageUrl != "")
}
def toMap(d: List[Data]): Map[String, Data] =
d.groupBy(_.ids).map {
case (k, v) => k -> v.head
}
}
| misgod/twculture | server/src/main/scala/com/a30corner/twculture/server/ExtraData.scala | Scala | apache-2.0 | 3,269 |
package japgolly.scalajs
import org.scalajs.dom, dom.html
import scala.scalajs.js
import js.{Dynamic, UndefOr, Object, Any => JAny, Function => JFn}
package object react extends ReactEventAliases {
type TopNode = dom.Element
/**
* These exist for type inference.
* If P,S,B,N types are needed and there's another object that has them, this is used to bridge for type inference.
*/
trait ReactComponentTypeAux[P, S, +B, +N <: TopNode]
trait ReactComponentTypeAuxJ[P, S, +B, +N <: TopNode] extends js.Object
implicit def reactComponentTypeAuxJ[P, S, B, N <: TopNode](a: ReactComponentTypeAuxJ[P,S,B,N]): ReactComponentTypeAux[P,S,B,N] =
a.asInstanceOf[ReactComponentTypeAux[P,S,B,N]]
// ===================================================================================================================
// TODO WrapObj was one of the first things I did when starting with ScalaJS. Reconsider.
/** Allows Scala classes to be used in place of `Object`. */
trait WrapObj[+A] extends Object { val v: A = js.native }
def WrapObj[A](v: A) =
Dynamic.literal("v" -> v.asInstanceOf[JAny]).asInstanceOf[WrapObj[A]]
@inline implicit final class ReactExt_ScalaColl[A](private val _as: TraversableOnce[A]) extends AnyVal {
@inline def toJsArray: js.Array[A] =
js.Array(_as.toSeq: _*)
@inline def toReactNodeArray(implicit ev: A => ReactNode): js.Array[ReactNode] = {
val r = new js.Array[ReactNode]()
_as.foreach(a => r.push(ev(a)))
r
}
}
@inline implicit final class ReactExt_JsArray[A](private val _as: js.Array[A]) extends AnyVal {
@inline def toReactNodeArray(implicit ev: A => ReactNode): js.Array[ReactNode] =
_as.map(ev: js.Function1[A, ReactNode])
}
@inline implicit def reactNodeInhabitableL (v: Long) : ReactNode = v.toString.asInstanceOf[ReactNode]
@inline implicit def reactNodeInhabitableI (v: Int) : ReactNode = v.asInstanceOf[ReactNode]
@inline implicit def reactNodeInhabitableSh (v: Short) : ReactNode = v.asInstanceOf[ReactNode]
@inline implicit def reactNodeInhabitableB (v: Byte) : ReactNode = v.asInstanceOf[ReactNode]
@inline implicit def reactNodeInhabitableD (v: Double) : ReactNode = v.asInstanceOf[ReactNode]
@inline implicit def reactNodeInhabitableF (v: Float) : ReactNode = v.asInstanceOf[ReactNode]
@inline implicit def reactNodeInhabitableS (v: String) : ReactNode = v.asInstanceOf[ReactNode]
@inline implicit def reactNodeInhabitableAn (v: js.Array[ReactNode]): ReactNode = v.asInstanceOf[ReactNode]
@inline implicit def reactNodeInhabitableAt[T <% ReactNode](v: js.Array[T]) : ReactNode = v.toReactNodeArray
@inline implicit def reactNodeInhabitableC [T <% ReactNode](v: TraversableOnce[T]) : ReactNode = v.toReactNodeArray
@inline implicit def reactNodeInhabitablePC (v: PropsChildren) : ReactNode = v.asInstanceOf[ReactNode]
// ===================================================================================================================
@inline final implicit def autoJsCtor[P,S,B,N <: TopNode](c: ReactComponentC[P,S,B,N]): ReactComponentC_ = c.jsCtor
// ===================================================================================================================
@inline implicit def autoUnWrapObj[A](a: WrapObj[A]): A = a.v
@inline implicit final class ReactExt_Any[A](private val _a: A) extends AnyVal {
@inline def wrap: WrapObj[A] = WrapObj(_a)
}
@inline implicit final class ReactExt_ReactObj(private val _r: React.type) extends AnyVal {
@inline def renderC[P, S, B, N <: TopNode](c: ReactComponentU[P,S,B,N], n: dom.Node)(callback: ComponentScopeM[P,S,B,N] => Unit) =
_r.render(c, n, callback)
}
@inline implicit final class ReactExt_ComponentScope_P[Props](private val _c: ComponentScope_P[Props]) extends AnyVal {
@inline def props = _c._props.v
@inline def propsChildren = _c._props.children
@inline def propsDynamic = _c._props.asInstanceOf[js.Dynamic]
}
@inline implicit final class ReactExt_ComponentScope_PS[Props, State](private val _c: ComponentScope_PS[Props, State]) extends AnyVal {
@inline def getInitialState(p: Props): State = _c._getInitialState(WrapObj(p)).v
}
@inline implicit final class ReactExt_ComponentScope_S[State](private val _c: ComponentScope_S[State]) extends AnyVal {
@inline def state = _c._state.v
}
val preventDefaultF = (_: SyntheticEvent[dom.Node]).preventDefault()
val stopPropagationF = (_: SyntheticEvent[dom.Node]).stopPropagation()
@inline implicit final class ReactExt_ReactComponentU[P,S,B,N <: TopNode](private val _c: ReactComponentU[P,S,B,N]) extends AnyVal {
def render(n: dom.Node) = React.render(_c, n)
}
@inline implicit final class ReactExt_ReactDOMElement(private val _v: ReactDOMElement) extends AnyVal {
@inline def typ = _v.`type`
}
@inline implicit final class ReactExt_ReactComponentU_(private val _v: ReactComponentU_) extends AnyVal {
@inline def dynamic = this.asInstanceOf[Dynamic]
}
@inline implicit final class ReactExt_UndefReactComponentM[N <: TopNode](private val _u: UndefOr[ReactComponentM_[N]]) extends AnyVal {
def tryFocus(): Unit = _u.foreach(_.getDOMNode() match {
case e: html.Element => e.focus()
case _ =>
})
}
@inline implicit final class ReactExt_ReactComponentM[N <: TopNode](private val _c: ReactComponentM_[N]) extends AnyVal {
@inline def domType[N2 <: TopNode] = _c.asInstanceOf[ReactComponentM_[N2]]
}
@inline implicit final class ReactExt_PropsChildren(private val _c: PropsChildren) extends AnyVal {
@inline def forEach[U](f: ReactNode => U): Unit =
React.Children.forEach(_c, (f:JFn).asInstanceOf[js.Function1[ReactNode, JAny]])
@inline def forEach[U](f: (ReactNode, Int) => U): Unit =
React.Children.forEach(_c, (f:JFn).asInstanceOf[js.Function2[ReactNode, Int, JAny]])
@inline def only: Option[ReactNode] =
try { Some(React.Children.only(_c))} catch { case t: Throwable => None}
}
// ===================================================================================================================
type OpCallback = UndefOr[() => Unit]
@inline implicit def toCompStateAccessOps[C, S](c: C)(implicit a: CompStateAccess[C, S]) =
new CompStateAccess.Ops[C, S](c)
@inline implicit def autoFocusEntireState[C, S](c: C)(implicit a: CompStateAccess[C, S]): CompStateFocus[S] =
c.lift
}
| beni55/scalajs-react | core/src/main/scala/japgolly/scalajs/react/package.scala | Scala | apache-2.0 | 6,673 |
package io.mem0r1es.trank.ranking
import java.net.URI
trait RankingAlgo {
def rank(entityTypes: Map[URI, HierInfo]): Seq[(URI, Double)]
} | MEM0R1ES/TRank | src/main/scala/io/mem0r1es/trank/ranking/RankingAlgo.scala | Scala | apache-2.0 | 142 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming.eventhubs
import scala.reflect.ClassTag
import org.apache.spark.eventhubscommon.utils.EventHubsTestUtilities
import org.apache.spark.sql.execution.streaming._
/** A trait for actions that can be performed while testing a streaming DataFrame. */
trait StreamAction
case class EventHubsAddDataMemory[A](source: MemoryStream[A], data: Seq[A])
extends EventHubsAddData {
override def toString: String = s"AddData to $source: ${data.mkString(",")}"
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
(source, source.addData(data))
}
}
/**
* Adds the given data to the stream. Subsequent check answers will block
* until this data has been processed.
*/
object EventHubsAddData {
def apply[A](source: MemoryStream[A], data: A*): EventHubsAddDataMemory[A] =
EventHubsAddDataMemory(source, data)
}
/** A trait that can be extended when testing a source. */
trait EventHubsAddData extends StreamAction with Serializable {
/**
* Called to adding the data to a source. It should find the source to add data to from
* the active query, and then return the source object the data was added, as well as the
* offset of added data.
*/
def addData(query: Option[StreamExecution]): (Source, Offset)
}
case class AddEventHubsData[T: ClassTag, U: ClassTag](
eventHubsParameters: Map[String, String],
highestBatchId: Long = 0,
eventPayloadsAndProperties: Seq[(T, Seq[U])] = Seq.empty[(T, Seq[U])])
extends EventHubsAddData {
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
val sources = query.get.logicalPlan.collect {
case StreamingExecutionRelation(source, _) if source.isInstanceOf[EventHubsSource] =>
source.asInstanceOf[EventHubsSource]
}
if (sources.isEmpty) {
throw new Exception(
"Could not find EventHubs source in the StreamExecution logical plan to add data to")
} else if (sources.size > 1) {
throw new Exception(
"Could not select the EventHubs source in the StreamExecution logical plan as there" +
"are multiple EventHubs sources:\\n\\t" + sources.mkString("\\n\\t"))
}
val eventHubsSource = sources.head
val eventHubs = EventHubsTestUtilities.getOrSimulateEventHubs(eventHubsParameters)
EventHubsTestUtilities.addEventsToEventHubs(eventHubs, eventPayloadsAndProperties)
val highestOffsetPerPartition = EventHubsTestUtilities.getHighestOffsetPerPartition(eventHubs)
val targetOffsetPerPartition = highestOffsetPerPartition.map{
case (ehNameAndPartition, (offset, _, _)) => (ehNameAndPartition, offset)}
val eventHubsBatchRecord = EventHubsBatchRecord(highestBatchId, targetOffsetPerPartition)
(eventHubsSource, eventHubsBatchRecord)
}
}
| CodingCat/spark-eventhubs | core/src/test/scala/org/apache/spark/sql/streaming/eventhubs/EventHubsAddData.scala | Scala | apache-2.0 | 3,593 |
package controllers.authentication
import javax.inject.Inject
import com.mohiva.play.silhouette.api.{ Environment, LogoutEvent, Silhouette }
import com.mohiva.play.silhouette.impl.authenticators.{JWTAuthenticator, CookieAuthenticator}
import com.mohiva.play.silhouette.impl.providers.SocialProviderRegistry
import models.tenant.Crew
import play.api.i18n.{Messages, MessagesApi}
import play.api.libs.json.Json
import play.api.mvc.{Action, AnyContent}
import scala.concurrent.Future
class CookieCredentialsTestController @Inject()(
val messagesApi: MessagesApi,
val env: Environment[Crew, CookieAuthenticator],
socialProviderRegistry: SocialProviderRegistry
)
extends Silhouette[Crew, CookieAuthenticator]
{
def index: Action[AnyContent] = SecuredAction.async { implicit request =>
Future.successful(Ok(Json.obj("message" -> Messages("authentication.successful"))))
}
}
| SBP07/backend | app/controllers/authentication/CookieCredentialsTestController.scala | Scala | gpl-2.0 | 1,070 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.TensorModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* Applies the Sigmoid function element-wise to the input Tensor,
* thus outputting a Tensor of the same dimension.
* Sigmoid is defined as: f(x) = 1 / (1 + exp(-x))
*/
@SerialVersionUID(6855417348268610044L)
class Sigmoid[@specialized(Float, Double) T: ClassTag](
implicit ev: TensorNumeric[T]) extends TensorModule[T] {
override def updateOutput(input: Tensor[T]): Tensor[T] = {
output.resizeAs(input)
output.map(input, (_, i) => ev.divide(ev.fromType[Int](1), ev.plus(ev.fromType[Int](1),
ev.exp(ev.negative(i)))))
output
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
gradInput.resizeAs(input)
gradInput.copy(gradOutput)
gradInput.map(output, (g, z) => ev.times(ev.times(g, ev.minus(ev.fromType[Int](1), z)), z))
gradInput
}
}
object Sigmoid {
def apply[@specialized(Float, Double) T: ClassTag]()
(implicit ev: TensorNumeric[T]) : Sigmoid[T] = {
new Sigmoid[T]()
}
}
| psyyz10/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/Sigmoid.scala | Scala | apache-2.0 | 1,832 |
package nova.scala.wrapper
import java.util.{List => JList}
import com.google.common.base.CaseFormat
/**
* @author Calclavia
*/
object StringWrapper {
implicit class WrappedString(str: String) {
def toCamelCase: String = str.toPascalCase.decapitalizeFirst
def decapitalizeFirst: String = str.substring(0, 1).toLowerCase + str.substring(1)
def toPascalCase: String = {
val parts: Array[String] = str.split("_")
var camelCaseString: String = ""
for (part <- parts) {
camelCaseString = camelCaseString + (part.toProperCase)
}
return camelCaseString
}
def toProperCase: String = str.substring(0, 1).toUpperCase + str.substring(1).toLowerCase
def camelToLowerUnderscore: String = CaseFormat.LOWER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, str)
def camelToReadable: String = str.replaceAll(String.format("%s|%s|%s", "(?<=[A-Z])(?=[A-Z][a-z])", "(?<=[^A-Z])(?=[A-Z])", "(?<=[A-Za-z])(?=[^A-Za-z])"), " ").capitalizeFirst
def capitalizeFirst: String = str.substring(0, 1).toUpperCase + str.substring(1)
def underscoreToCamel: String = CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.LOWER_CAMEL, str)
}
}
| SoniEx2/NOVA-Scala | src/main/scala/nova/scala/wrapper/StringWrapper.scala | Scala | lgpl-3.0 | 1,141 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.apigateway.util
import play.api.libs.json.Json
import play.api.mvc._
import play.api.test.FakeRequest
import uk.gov.hmrc.apigateway.util.HttpHeaders._
import uk.gov.hmrc.play.http.HeaderNames.xRequestId
import uk.gov.hmrc.play.test.UnitSpec
import scala.xml.XML
class PlayRequestUtilsSpec extends UnitSpec {
"bodyOf" should {
"return the body of a json request" in {
val json = """{"body":"test"}"""
val request = FakeRequest().withBody(AnyContentAsJson(Json.parse(json)))
val result = PlayRequestUtils.bodyOf(request)
result shouldBe Some(json)
}
"return the body of a text request" in {
val request = FakeRequest().withBody(AnyContentAsText("text"))
val result = PlayRequestUtils.bodyOf(request)
result shouldBe Some("text")
}
"return the body of a xml request" in {
val xml = """<xml>test</xml>"""
val request = FakeRequest().withBody(AnyContentAsXml(XML.loadString(xml)))
val result = PlayRequestUtils.bodyOf(request)
result shouldBe Some(xml)
}
"return None when no valid body" in {
val xml = """<xml>test</xml>"""
val request = FakeRequest().withBody(AnyContentAsEmpty)
val result = PlayRequestUtils.bodyOf(request)
result shouldBe None
}
}
"asMapOfSeq" should {
"convert a simple seq into a map" in {
val res = PlayRequestUtils.asMapOfSets(Seq("A" -> "aaa", "B" -> "bbb"))
res shouldBe Map("A" -> Set("aaa"), "B" -> Set("bbb"))
}
"convert a complex seq into a map" in {
val res = PlayRequestUtils.asMapOfSets(Seq(
"A" -> "aaa",
"B" -> "bbb",
"B" -> "yyy,qqq",
"A" -> "xxx, zzz"
))
res shouldBe Map("A" -> Set("aaa", "xxx, zzz"), "B" -> Set("bbb", "yyy,qqq"))
}
}
"replaceHeaders" should {
val headers = Headers(xRequestId -> "requestId")
"add new headers" in {
val res = PlayRequestUtils.replaceHeaders(headers)((X_CLIENT_ID, Some("clientId")))
res.headers shouldBe Seq((xRequestId, "requestId"), (X_CLIENT_ID, "clientId"))
}
"replace existing headers" in {
val res = PlayRequestUtils.replaceHeaders(headers)((xRequestId, Some("newRequestId")))
res.headers shouldBe Seq((xRequestId, "newRequestId"))
}
"remove headers" in {
val res = PlayRequestUtils.replaceHeaders(headers)((xRequestId, None))
res.headers shouldBe Seq()
}
"leave headers intact" in {
val res = PlayRequestUtils.replaceHeaders(headers)()
res.headers shouldBe Seq((xRequestId, "requestId"))
}
}
}
| hmrc/api-gateway | test/uk/gov/hmrc/apigateway/util/PlayRequestUtilsSpec.scala | Scala | apache-2.0 | 3,215 |
package texteditor.imperative
import scala.math.min
import scala.swing.BorderPanel.Position
import scala.swing.{BorderPanel, Button, Dimension, GridPanel, Label, MainFrame, ScrollPane, SimpleSwingApplication}
import scala.swing.event.ButtonClicked
object ApplicationSwingTextArea extends SimpleSwingApplication {
import scala.swing.TextArea
import scala.swing.event.{CaretUpdate, ValueChanged}
// imperative components
val textArea = new TextArea("Lorem ipsum dolor sit amet\\nconsectetur adipisicing elit\\nsed do eiusmod")
textArea.reactions += {
case e @ ValueChanged(_) => countLabel.text = "Ch " + textArea.text.length()
}
textArea.caret.reactions += {
case e @ CaretUpdate(_) => {
val pos = min(textArea.caret.position, textArea.text.length())
val line = textArea.peer.getLineOfOffset(pos);
val col = pos - textArea.peer.getLineStartOffset(line);
positionLabel.text = "Ln " + (line + 1) + " : " + textArea.lineCount + " Col " + (col + 1)
selectionLabel.text = "Sel " + (if (textArea.selected != null) textArea.selected.length else 0)
}
}
val positionLabel = new Label
val selectionLabel = new Label
val countLabel = new Label
val selectAllButton = new Button("Select All")
selectAllButton.reactions += {
case e @ ButtonClicked(_) => textArea.selectAll; textArea.requestFocus
}
val copyButton = new Button("Copy")
copyButton.reactions += {
case e @ ButtonClicked(_) => textArea.copy; textArea.requestFocus
}
val pasteButton = new Button("Paste")
pasteButton.reactions += {
case e @ ButtonClicked(_) => textArea.paste; textArea.requestFocus
}
// trigger initial events manually
textArea.reactions(new ValueChanged(textArea))
textArea.caret.reactions(new CaretUpdate(textArea))
// layout
def top = new MainFrame {
preferredSize = new Dimension(400, 400)
contents = new BorderPanel {
layout(new ScrollPane(textArea)) = Position.Center
layout(new GridPanel(1, 0) {
contents += selectAllButton
contents += copyButton
contents += pasteButton
}) = Position.North
layout(new GridPanel(1, 0) {
contents += positionLabel
contents += selectionLabel
contents += countLabel
}) = Position.South
}
}
}
| volkc/REScala | Examples/Editor/src/main/scala/texteditor/imperative/ApplicationSwingTextArea.scala | Scala | apache-2.0 | 2,301 |
package barneshut
import java.awt._
import java.awt.event._
import javax.swing._
import javax.swing.event._
import scala.collection.parallel.{TaskSupport, defaultTaskSupport}
class SimulationModel {
var screen = new Boundaries
var bodies: Seq[Body] = Nil
var quad: Quad = Empty(screen.centerX, screen.centerY, Float.MaxValue)
var shouldRenderQuad = false
var timeStats = new TimeStatistics
var taskSupport: TaskSupport = defaultTaskSupport
def initialize(parallelismLevel: Int, pattern: String, totalBodies: Int) {
taskSupport = new collection.parallel.ForkJoinTaskSupport(
new scala.concurrent.forkjoin.ForkJoinPool(parallelismLevel))
pattern match {
case "two-galaxies" => init2Galaxies(totalBodies)
case _ => sys.error(s"no such initial pattern: $pattern")
}
}
def init2Galaxies(totalBodies: Int) {
val bodyArray = new Array[Body](totalBodies)
val random = new scala.util.Random(213L)
def galaxy(from: Int, num: Int, maxradius: Float, cx: Float, cy: Float, sx: Float, sy: Float) {
val totalM = 1.5f * num
val blackHoleM = 1.0f * num
val cubmaxradius = maxradius * maxradius * maxradius
for (i <- from until (from + num)) {
val b = if (i == from) {
new Body(blackHoleM, cx, cy, sx, sy)
} else {
val angle = random.nextFloat * 2 * math.Pi
val radius = 25 + maxradius * random.nextFloat
val starx = cx + radius * math.sin(angle).toFloat
val stary = cy + radius * math.cos(angle).toFloat
val speed = math.sqrt(gee * blackHoleM / radius + gee * totalM * radius * radius / cubmaxradius)
val starspeedx = sx + (speed * math.sin(angle + math.Pi / 2)).toFloat
val starspeedy = sy + (speed * math.cos(angle + math.Pi / 2)).toFloat
val starmass = 1.0f + 1.0f * random.nextFloat
new Body(starmass, starx, stary, starspeedx, starspeedy)
}
bodyArray(i) = b
}
}
galaxy(0, bodyArray.length / 10, 400.0f, -1200.0f, -800.0f, 0.0f, 0.0f)
galaxy(bodyArray.length / 10, bodyArray.length / 10 * 2, 500.0f, -600.0f, -800.0f, 0.0f, 0.0f)
galaxy(bodyArray.length / 10 * 3, bodyArray.length / 10 * 3, 600.0f, -1200.0f, -400.0f, 0.0f, 0.0f)
galaxy(bodyArray.length / 10 * 6, bodyArray.length / 10 * 4, 700.0f, -600.0f, -400.0f, 0.0f, 0.0f)
bodies = bodyArray.toSeq
// compute center and boundaries
screen = new Boundaries
screen.minX = -2200.0f
screen.minY = -1600.0f
screen.maxX = 350.0f
screen.maxY = 350.0f
}
}
| dnc1994/FuncScala | parprog1/barneshut/src/main/scala/barneshut/SimulationModel.scala | Scala | mit | 2,578 |
package de.johanneswachter.projects.db4o
import com.db4o._
import model._
object InitDB {
def main(args : Array[String]) : Unit = {
val db = Db4o.openFile("test-dsl-simpsons.db")
db store Person("Bart","Simpson", 10)
db store Person("Lisa","Simpson", 8)
db store Person("Marge","Simpson", 34)
db store Person("Maggie","Simpson", 2)
db store Person("Homer","Simpson", 36)
db store Person("Ned","Flanders", 30)
db store Person("Rod","Flanders", 6)
db store Person("Todd","Flanders", 8)
db store Person("Ralph","Wiggum", 8)
db store Person("Martin","Prince", 7)
db store Person("Milhouse","van Houten", 8)
db store Person("Jimbo","Jones", 12)
db store Person("Seymour","Skinner", 46)
}
}
| jwachter/scala-db4o-dsl | src/de/johanneswachter/projects/db4o/InitDB.scala | Scala | apache-2.0 | 767 |
package com.yammer.dropwizard.examples
import javax.ws.rs._
import core.Response.Status
import core.{Response, MediaType}
import com.yammer.metrics.annotation.Timed
@Path("/hello-world")
@Produces(Array(MediaType.APPLICATION_JSON))
class HelloWorldResource(saying: String) {
@GET
@Timed
def sayHello(@QueryParam("opt") opt: Option[String]) = Seq(saying)
@POST
@Timed
def intentionalError = Response.status(Status.BAD_REQUEST).build()
@PUT
@Timed
def unintentionalError = None.get
}
| ericholscher/dropwizard | dropwizard-scala_2.9.1/src/test/scala/com/yammer/dropwizard/examples/HelloWorldResource.scala | Scala | apache-2.0 | 504 |
package org.jetbrains.plugins.scala.lang.completion.postfix
class ScalaForEachPostfixTemplateTest extends PostfixTemplateTest {
override def testPath() = super.testPath() + "foreach/"
def testExample(): Unit = doTest()
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/completion/postfix/ScalaForEachPostfixTemplateTest.scala | Scala | apache-2.0 | 227 |
import scala.io.StdIn._ //per readline
import java.io._ //per il FileOutputStream e PrintWriter
import scala.io.Source //per il source
object FileIO {
def main(args: Array[String]) {
print("Input: " )
val riga = readLine
println("Hai digitato: " + riga)
//sovrascrive il file
new PrintWriter("test.txt") { write("--HEADER--\n"); close }
//altro modo per scrivere su file, questa volta in append
val f1 = new BufferedWriter(new OutputStreamWriter(new FileOutputStream("test.txt", true))) // true su append, di default false
f1.write("Input inserito: "+riga)
f1.write("\naltro testo inutile\nasdasdasd\nasdasd\nasd asd as da sda\nasd as d as d\n asdasda \n\n\n\nasdads\tasd\n")
f1.close()
Source.fromFile("test.txt").foreach {
print //sottointesto print(_), _ e la variabile che si cicla
}
//esempio con la sintassi precendete
println((new test(4) {print; printInfo; hello; hello2}).x) //chiama prima _.metodo, poi le funzioni, hello2 e inacessibile
}
class test(x:Int = 5, y:Int = 10) {
def printInfo {
println("printInfo")
print
}
def print {
println("print")
println(x+" "+y)
}
def hello2() {
println("CIAO DALLA CLASSE!")
}
}
def hello() {
println("CIAO!")
}
def hello2() {
println("CIAO!")
}
}
| M9k/Scala-learning | 22-fileIO.scala | Scala | bsd-3-clause | 1,441 |
object MyTest
| sbt/sbt | sbt-app/src/sbt-test/actions/remote-cache-semanticdb/src/test/scala/MyTest.scala | Scala | apache-2.0 | 14 |
package com.wavesplatform.state
import com.google.common.primitives.Longs
import com.wavesplatform.account.Address
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils._
import com.wavesplatform.lang.v1.estimator.ScriptEstimatorV1
import com.wavesplatform.state.StateHash.SectionId
import com.wavesplatform.test.FreeSpec
import com.wavesplatform.transaction.Asset.IssuedAsset
import com.wavesplatform.transaction.smart.script.ScriptCompiler
class StateHashSpec extends FreeSpec {
"state hash" - {
val stateHash = new StateHashBuilder
val address = Address.fromString("3My3KZgFQ3CrVHgz6vGRt8687sH4oAA1qp8").explicitGet()
val address1 = Address.fromString("3N5GRqzDBhjVXnCn44baHcz2GoZy5qLxtTh").explicitGet()
val assetId = IssuedAsset(ByteStr.decodeBase58("9ekQuYn92natMnMq8KqeGK3Nn7cpKd3BvPEGgD6fFyyz").get)
val testScript = ScriptCompiler
.compile(
"""
|{-# STDLIB_VERSION 2 #-}
|{-# CONTENT_TYPE EXPRESSION #-}
|{-# SCRIPT_TYPE ACCOUNT #-}
|true
|""".stripMargin,
ScriptEstimatorV1
)
.explicitGet()
._1
val dataEntry = StringDataEntry("test", "test")
stateHash.addLeaseBalance(address, 10000L, 10000L)
stateHash.addAccountScript(address, Some(testScript))
stateHash.addAssetScript(assetId, Some(testScript))
stateHash.addAlias(address, "test")
stateHash.addAlias(address, "test1")
stateHash.addAlias(address1, "test2")
stateHash.addDataEntry(address, dataEntry)
stateHash.addLeaseStatus(TransactionId @@ assetId.id, status = true)
stateHash.addSponsorship(assetId, 1000)
stateHash.addAssetBalance(address, assetId, 2000)
stateHash.addAssetBalance(address1, assetId, 2000)
stateHash.addWavesBalance(address, 1000)
val result = stateHash.result()
def hash(bs: Array[Byte]*): ByteStr = ByteStr(com.wavesplatform.crypto.fastHash(bs.reduce(_ ++ _)))
def sect(id: SectionId.Value): ByteStr = result.hashes(id)
import SectionId._
"sections" - {
"lease balance" in {
sect(LeaseBalance) shouldBe hash(
address.bytes,
Longs.toByteArray(10000L),
Longs.toByteArray(10000L)
)
}
"asset balance" in {
sect(AssetBalance) shouldBe hash(
address.bytes,
assetId.id.arr,
Longs.toByteArray(2000),
address1.bytes,
assetId.id.arr,
Longs.toByteArray(2000)
)
}
"waves balance" in {
sect(WavesBalance) shouldBe hash(
address.bytes,
Longs.toByteArray(1000)
)
}
"account script" in {
sect(AccountScript) shouldBe hash(
address.bytes,
testScript.bytes().arr
)
}
"asset script" in {
sect(AssetScript) shouldBe hash(
assetId.id.arr,
testScript.bytes().arr
)
}
"alias" in {
sect(Alias) shouldBe hash(
address.bytes,
"test".getBytes(),
address.bytes,
"test1".getBytes(),
address1.bytes,
"test2".getBytes()
)
}
"data entry" in {
sect(DataEntry) shouldBe hash(
address.bytes,
"test".getBytes(),
dataEntry.valueBytes
)
}
"lease status" in {
sect(LeaseStatus) shouldBe hash(
assetId.id.arr,
Array(1.toByte)
)
}
"sponsor" in {
sect(Sponsorship) shouldBe hash(
assetId.id.arr,
Longs.toByteArray(1000)
)
}
}
"total" in {
val allHashes = SectionId.values.toSeq.map(id => result.hashes(id))
allHashes shouldBe Seq(WavesBalance, AssetBalance, DataEntry, AccountScript, AssetScript, LeaseBalance, LeaseStatus, Sponsorship, Alias)
.map(sect)
val testPrevHash = sect(SectionId.Alias)
result.createStateHash(testPrevHash).totalHash shouldBe hash(testPrevHash.arr +: allHashes.map(_.arr): _*)
result.copy(hashes = result.hashes - SectionId.WavesBalance).createStateHash(ByteStr.empty).totalHash shouldBe hash(
StateHashBuilder.EmptySectionHash.arr +: allHashes.tail.map(_.arr): _*
)
}
}
}
| wavesplatform/Waves | node/src/test/scala/com/wavesplatform/state/StateHashSpec.scala | Scala | mit | 4,268 |
package com.mesosphere.cosmos.handler
import java.util.UUID
import cats.data.Xor
import com.mesosphere.cosmos.ErrorResponse
import com.mesosphere.cosmos.circe.Decoders._
import com.mesosphere.cosmos.http.MediaTypes
import com.mesosphere.cosmos.rpc.v1.circe.Decoders._
import com.mesosphere.cosmos.rpc.v1.model.UninstallResponse
import com.mesosphere.cosmos.test.CosmosIntegrationTestClient
import com.mesosphere.cosmos.thirdparty.marathon.model.AppId
import com.netaporter.uri.dsl._
import com.twitter.finagle.http.Status
import com.twitter.io.Buf
import com.twitter.util.Await
import io.circe.parse._
import org.scalatest.FreeSpec
final class UninstallHandlerSpec extends FreeSpec {
import CosmosIntegrationTestClient._
"The uninstall handler should" - {
"be able to uninstall a service" in {
val appId = AppId("cassandra" / "uninstall-test")
val installRequest = CosmosClient.requestBuilder("package/install")
.addHeader("Content-Type", MediaTypes.InstallRequest.show)
.addHeader("Accept", MediaTypes.V1InstallResponse.show)
.buildPost(Buf.Utf8(s"""{"packageName":"cassandra", "appId":"${appId.toString}"}"""))
val installResponse = CosmosClient(installRequest)
assertResult(Status.Ok)(installResponse.status)
val marathonApp = Await.result(adminRouter.getApp(appId))
assertResult(appId)(marathonApp.app.id)
//TODO: Assert framework starts up
val uninstallRequest = CosmosClient.requestBuilder("package/uninstall")
.setHeader("Accept", MediaTypes.UninstallResponse.show)
.setHeader("Content-Type", MediaTypes.UninstallRequest.show)
.buildPost(Buf.Utf8("""{"packageName":"cassandra"}"""))
val uninstallResponse = CosmosClient(uninstallRequest)
val uninstallResponseBody = uninstallResponse.contentString
assertResult(Status.Ok)(uninstallResponse.status)
assertResult(MediaTypes.UninstallResponse.show)(uninstallResponse.headerMap("Content-Type"))
val Xor.Right(body) = decode[UninstallResponse](uninstallResponseBody)
assert(body.results.flatMap(_.postUninstallNotes).nonEmpty)
}
"be able to uninstall multiple packages when 'all' is specified" in {
// install 'helloworld' twice
val installBody1 = s"""{"packageName":"helloworld", "appId":"${UUID.randomUUID()}"}"""
val installRequest1 = CosmosClient.requestBuilder("package/install")
.addHeader("Content-Type", MediaTypes.InstallRequest.show)
.addHeader("Accept", MediaTypes.V1InstallResponse.show)
.buildPost(Buf.Utf8(installBody1))
val installResponse1 = CosmosClient(installRequest1)
assertResult(Status.Ok, s"install failed: $installBody1")(installResponse1.status)
val installBody2 = s"""{"packageName":"helloworld", "appId":"${UUID.randomUUID()}"}"""
val installRequest2 = CosmosClient.requestBuilder("package/install")
.addHeader("Content-Type", MediaTypes.InstallRequest.show)
.addHeader("Accept", MediaTypes.V1InstallResponse.show)
.buildPost(Buf.Utf8(installBody2))
val installResponse2 = CosmosClient(installRequest2)
assertResult(Status.Ok, s"install failed: $installBody2")(installResponse2.status)
val uninstallRequest = CosmosClient.requestBuilder("package/uninstall")
.setHeader("Accept", MediaTypes.UninstallResponse.show)
.setHeader("Content-Type", MediaTypes.UninstallRequest.show)
.buildPost(Buf.Utf8("""{"packageName":"helloworld", "all":true}"""))
val uninstallResponse = CosmosClient(uninstallRequest)
assertResult(Status.Ok)(uninstallResponse.status)
assertResult(MediaTypes.UninstallResponse.show)(uninstallResponse.headerMap("Content-Type"))
}
"error when multiple packages are installed and no appId is specified and all isn't set" in {
// install 'helloworld' twice
val appId1 = UUID.randomUUID()
val installBody1 = s"""{"packageName":"helloworld", "appId":"$appId1"}"""
val installRequest1 = CosmosClient.requestBuilder("package/install")
.addHeader("Content-Type", MediaTypes.InstallRequest.show)
.addHeader("Accept", MediaTypes.V1InstallResponse.show)
.buildPost(Buf.Utf8(installBody1))
val installResponse1 = CosmosClient(installRequest1)
assertResult(Status.Ok, s"install failed: $installBody1")(installResponse1.status)
val appId2 = UUID.randomUUID()
val installBody2 = s"""{"packageName":"helloworld", "appId":"$appId2"}"""
val installRequest2 = CosmosClient.requestBuilder("package/install")
.addHeader("Content-Type", MediaTypes.InstallRequest.show)
.addHeader("Accept", MediaTypes.V1InstallResponse.show)
.buildPost(Buf.Utf8(installBody2))
val installResponse2 = CosmosClient(installRequest2)
assertResult(Status.Ok, s"install failed: $installBody2")(installResponse2.status)
val uninstallRequest = CosmosClient.requestBuilder("package/uninstall")
.setHeader("Accept", MediaTypes.UninstallResponse.show)
.setHeader("Content-Type", MediaTypes.UninstallRequest.show)
.buildPost(Buf.Utf8("""{"packageName":"helloworld"}"""))
val uninstallResponse = CosmosClient(uninstallRequest)
val uninstallResponseBody = uninstallResponse.contentString
assertResult(Status.BadRequest)(uninstallResponse.status)
assertResult(MediaTypes.ErrorResponse.show)(uninstallResponse.headerMap("Content-Type"))
val Xor.Right(err) = decode[ErrorResponse](uninstallResponseBody)
assertResult(s"Multiple apps named [helloworld] are installed: [/$appId1, /$appId2]")(err.message)
val cleanupRequest = CosmosClient.requestBuilder("package/uninstall")
.setHeader("Accept", MediaTypes.UninstallResponse.show)
.setHeader("Content-Type", MediaTypes.UninstallRequest.show)
.buildPost(Buf.Utf8("""{"packageName":"helloworld", "all":true}"""))
val cleanupResponse = CosmosClient(cleanupRequest)
assertResult(Status.Ok)(cleanupResponse.status)
}
}
}
| movicha/cosmos | cosmos-server/src/it/scala/com/mesosphere/cosmos/handler/UninstallHandlerSpec.scala | Scala | apache-2.0 | 6,051 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.json
import java.io.{File, StringWriter}
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.util.Locale
import com.fasterxml.jackson.core.JsonFactory
import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkException
import org.apache.spark.sql.{functions => F, _}
import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.datasources.json.JsonInferSchema.compatibleType
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class TestFileFilter extends PathFilter {
override def accept(path: Path): Boolean = path.getParent.getName != "p=2"
}
class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
import testImplicits._
test("Type promotion") {
def checkTypePromotion(expected: Any, actual: Any) {
assert(expected.getClass == actual.getClass,
s"Failed to promote ${actual.getClass} to ${expected.getClass}.")
assert(expected == actual,
s"Promoted value ${actual}(${actual.getClass}) does not equal the expected value " +
s"${expected}(${expected.getClass}).")
}
val factory = new JsonFactory()
def enforceCorrectType(value: Any, dataType: DataType): Any = {
val writer = new StringWriter()
Utils.tryWithResource(factory.createGenerator(writer)) { generator =>
generator.writeObject(value)
generator.flush()
}
val dummyOption = new JSONOptions(Map.empty[String, String], "GMT")
val dummySchema = StructType(Seq.empty)
val parser = new JacksonParser(dummySchema, dummyOption)
Utils.tryWithResource(factory.createParser(writer.toString)) { jsonParser =>
jsonParser.nextToken()
val converter = parser.makeConverter(dataType)
converter.apply(jsonParser)
}
}
val intNumber: Int = 2147483647
checkTypePromotion(intNumber, enforceCorrectType(intNumber, IntegerType))
checkTypePromotion(intNumber.toLong, enforceCorrectType(intNumber, LongType))
checkTypePromotion(intNumber.toDouble, enforceCorrectType(intNumber, DoubleType))
checkTypePromotion(
Decimal(intNumber), enforceCorrectType(intNumber, DecimalType.SYSTEM_DEFAULT))
val longNumber: Long = 9223372036854775807L
checkTypePromotion(longNumber, enforceCorrectType(longNumber, LongType))
checkTypePromotion(longNumber.toDouble, enforceCorrectType(longNumber, DoubleType))
checkTypePromotion(
Decimal(longNumber), enforceCorrectType(longNumber, DecimalType.SYSTEM_DEFAULT))
val doubleNumber: Double = 1.7976931348623157E308d
checkTypePromotion(doubleNumber.toDouble, enforceCorrectType(doubleNumber, DoubleType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber * 1000L)),
enforceCorrectType(intNumber, TimestampType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber.toLong * 1000L)),
enforceCorrectType(intNumber.toLong, TimestampType))
val strTime = "2014-09-30 12:34:56"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf(strTime)),
enforceCorrectType(strTime, TimestampType))
val strDate = "2014-10-15"
checkTypePromotion(
DateTimeUtils.fromJavaDate(Date.valueOf(strDate)), enforceCorrectType(strDate, DateType))
val ISO8601Time1 = "1970-01-01T01:00:01.0Z"
val ISO8601Time2 = "1970-01-01T02:00:01-01:00"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(3601000)),
enforceCorrectType(ISO8601Time1, TimestampType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(10801000)),
enforceCorrectType(ISO8601Time2, TimestampType))
val ISO8601Date = "1970-01-01"
checkTypePromotion(DateTimeUtils.millisToDays(32400000),
enforceCorrectType(ISO8601Date, DateType))
}
test("Get compatible type") {
def checkDataType(t1: DataType, t2: DataType, expected: DataType) {
var actual = compatibleType(t1, t2)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
actual = compatibleType(t2, t1)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
}
// NullType
checkDataType(NullType, BooleanType, BooleanType)
checkDataType(NullType, IntegerType, IntegerType)
checkDataType(NullType, LongType, LongType)
checkDataType(NullType, DoubleType, DoubleType)
checkDataType(NullType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(NullType, StringType, StringType)
checkDataType(NullType, ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(NullType, StructType(Nil), StructType(Nil))
checkDataType(NullType, NullType, NullType)
// BooleanType
checkDataType(BooleanType, BooleanType, BooleanType)
checkDataType(BooleanType, IntegerType, StringType)
checkDataType(BooleanType, LongType, StringType)
checkDataType(BooleanType, DoubleType, StringType)
checkDataType(BooleanType, DecimalType.SYSTEM_DEFAULT, StringType)
checkDataType(BooleanType, StringType, StringType)
checkDataType(BooleanType, ArrayType(IntegerType), StringType)
checkDataType(BooleanType, StructType(Nil), StringType)
// IntegerType
checkDataType(IntegerType, IntegerType, IntegerType)
checkDataType(IntegerType, LongType, LongType)
checkDataType(IntegerType, DoubleType, DoubleType)
checkDataType(IntegerType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(IntegerType, StringType, StringType)
checkDataType(IntegerType, ArrayType(IntegerType), StringType)
checkDataType(IntegerType, StructType(Nil), StringType)
// LongType
checkDataType(LongType, LongType, LongType)
checkDataType(LongType, DoubleType, DoubleType)
checkDataType(LongType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(LongType, StringType, StringType)
checkDataType(LongType, ArrayType(IntegerType), StringType)
checkDataType(LongType, StructType(Nil), StringType)
// DoubleType
checkDataType(DoubleType, DoubleType, DoubleType)
checkDataType(DoubleType, DecimalType.SYSTEM_DEFAULT, DoubleType)
checkDataType(DoubleType, StringType, StringType)
checkDataType(DoubleType, ArrayType(IntegerType), StringType)
checkDataType(DoubleType, StructType(Nil), StringType)
// DecimalType
checkDataType(DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT,
DecimalType.SYSTEM_DEFAULT)
checkDataType(DecimalType.SYSTEM_DEFAULT, StringType, StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, ArrayType(IntegerType), StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, StructType(Nil), StringType)
// StringType
checkDataType(StringType, StringType, StringType)
checkDataType(StringType, ArrayType(IntegerType), StringType)
checkDataType(StringType, StructType(Nil), StringType)
// ArrayType
checkDataType(ArrayType(IntegerType), ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(ArrayType(IntegerType), ArrayType(LongType), ArrayType(LongType))
checkDataType(ArrayType(IntegerType), ArrayType(StringType), ArrayType(StringType))
checkDataType(ArrayType(IntegerType), StructType(Nil), StringType)
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, false), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, false), ArrayType(IntegerType, false))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
// StructType
checkDataType(StructType(Nil), StructType(Nil), StructType(Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
StructType(StructField("f1", LongType, true) :: Nil),
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
StructType(
StructField("f2", IntegerType, true) :: Nil),
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
DecimalType.SYSTEM_DEFAULT,
StringType)
}
test("Complex field and type inferring with null in sampling") {
val jsonDF = spark.read.json(jsonNullStruct)
val expectedSchema = StructType(
StructField("headers", StructType(
StructField("Charset", StringType, true) ::
StructField("Host", StringType, true) :: Nil)
, true) ::
StructField("ip", StringType, true) ::
StructField("nullstr", StringType, true):: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select nullstr, headers.Host from jsonTable"),
Seq(Row("", "1.abc.com"), Row("", null), Row("", null), Row(null, null))
)
}
test("Primitive field and type inferring") {
val jsonDF = spark.read.json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Complex field and type inferring") {
val jsonDF = spark.read.json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(DoubleType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(DecimalType(21, 0), true), true) ::
StructField("arrayOfBoolean", ArrayType(BooleanType, true), true) ::
StructField("arrayOfDouble", ArrayType(DoubleType, true), true) ::
StructField("arrayOfInteger", ArrayType(LongType, true), true) ::
StructField("arrayOfLong", ArrayType(LongType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", DecimalType(20, 0), true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(LongType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from jsonTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", 2.1)
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row(true, "str1", null),
Row(false, null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from jsonTable"),
Row(5, null)
)
}
test("GetField operation on complex data type") {
val jsonDF = spark.read.json(complexFieldAndType1)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
// Getting all values of a specific field from an array of structs.
checkAnswer(
sql("select arrayOfStruct.field1, arrayOfStruct.field2 from jsonTable"),
Row(Seq(true, false, null), Seq("str1", null, null))
)
}
test("Type conflict in primitive field values") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("num_bool", StringType, true) ::
StructField("num_num_1", LongType, true) ::
StructField("num_num_2", DoubleType, true) ::
StructField("num_num_3", DoubleType, true) ::
StructField("num_str", StringType, true) ::
StructField("str_bool", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("true", 11L, null, 1.1, "13.1", "str1") ::
Row("12", null, 21474836470.9, null, null, "true") ::
Row("false", 21474836470L, 92233720368547758070d, 100, "str1", "false") ::
Row(null, 21474836570L, 1.1, 21474836470L, "92233720368547758070", null) :: Nil
)
// Number and Boolean conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_bool - 10 from jsonTable where num_bool > 11"),
Row(2)
)
// Widening to LongType
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 11"),
Row(21474836370L) :: Row(21474836470L) :: Nil
)
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 10"),
Row(-89) :: Row(21474836370L) :: Row(21474836470L) :: Nil
)
// Widening to DecimalType
checkAnswer(
sql("select num_num_2 + 1.3 from jsonTable where num_num_2 > 1.1"),
Row(21474836472.2) ::
Row(92233720368547758071.3) :: Nil
)
// Widening to Double
checkAnswer(
sql("select num_num_3 + 1.2 from jsonTable where num_num_3 > 1.1"),
Row(101.2) :: Row(21474836471.2) :: Nil
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str > 14d"),
Row(92233720368547758071.2)
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str >= 92233720368547758060"),
Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue)
)
// String and Boolean conflict: resolve the type as string.
checkAnswer(
sql("select * from jsonTable where str_bool = 'str1'"),
Row("true", 11L, null, 1.1, "13.1", "str1")
)
}
ignore("Type conflict in primitive field values (Ignored)") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
jsonDF.createOrReplaceTempView("jsonTable")
// Right now, the analyzer does not promote strings in a boolean expression.
// Number and Boolean conflict: resolve the type as boolean in this query.
checkAnswer(
sql("select num_bool from jsonTable where NOT num_bool"),
Row(false)
)
checkAnswer(
sql("select str_bool from jsonTable where NOT str_bool"),
Row(false)
)
// Right now, the analyzer does not know that num_bool should be treated as a boolean.
// Number and Boolean conflict: resolve the type as boolean in this query.
checkAnswer(
sql("select num_bool from jsonTable where num_bool"),
Row(true)
)
checkAnswer(
sql("select str_bool from jsonTable where str_bool"),
Row(false)
)
// The plan of the following DSL is
// Project [(CAST(num_str#65:4, DoubleType) + 1.2) AS num#78]
// Filter (CAST(CAST(num_str#65:4, DoubleType), DecimalType) > 92233720368547758060)
// ExistingRdd [num_bool#61,num_num_1#62L,num_num_2#63,num_num_3#64,num_str#65,str_bool#66]
// We should directly cast num_str to DecimalType and also need to do the right type promotion
// in the Project.
checkAnswer(
jsonDF.
where('num_str >= BigDecimal("92233720368547758060")).
select(('num_str + 1.2).as("num")),
Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue())
)
// The following test will fail. The type of num_str is StringType.
// So, to evaluate num_str + 1.2, we first need to use Cast to convert the type.
// In our test data, one value of num_str is 13.1.
// The result of (CAST(num_str#65:4, DoubleType) + 1.2) for this value is 14.299999999999999,
// which is not 14.3.
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str > 13"),
Row(BigDecimal("14.3")) :: Row(BigDecimal("92233720368547758071.2")) :: Nil
)
}
test("Type conflict in complex field values") {
val jsonDF = spark.read.json(complexFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("array", ArrayType(LongType, true), true) ::
StructField("num_struct", StringType, true) ::
StructField("str_array", StringType, true) ::
StructField("struct", StructType(
StructField("field", StringType, true) :: Nil), true) ::
StructField("struct_array", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq(), "11", "[1,2,3]", Row(null), "[]") ::
Row(null, """{"field":false}""", null, null, "{}") ::
Row(Seq(4, 5, 6), null, "str", Row(null), "[7,8,9]") ::
Row(Seq(7), "{}", """["str1","str2",33]""", Row("str"), """{"field":true}""") :: Nil
)
}
test("Type conflict in array elements") {
val jsonDF = spark.read.json(arrayElementTypeConflict)
val expectedSchema = StructType(
StructField("array1", ArrayType(StringType, true), true) ::
StructField("array2", ArrayType(StructType(
StructField("field", LongType, true) :: Nil), true), true) ::
StructField("array3", ArrayType(StringType, true), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq("1", "1.1", "true", null, "[]", "{}", "[2,3,4]",
"""{"field":"str"}"""), Seq(Row(214748364700L), Row(1)), null) ::
Row(null, null, Seq("""{"field":"str"}""", """{"field":1}""")) ::
Row(null, null, Seq("1", "2", "3")) :: Nil
)
// Treat an element as a number.
checkAnswer(
sql("select array1[0] + 1 from jsonTable where array1 is not null"),
Row(2)
)
}
test("Handling missing fields") {
val jsonDF = spark.read.json(missingFields)
val expectedSchema = StructType(
StructField("a", BooleanType, true) ::
StructField("b", LongType, true) ::
StructField("c", ArrayType(LongType, true), true) ::
StructField("d", StructType(
StructField("field", BooleanType, true) :: Nil), true) ::
StructField("e", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
}
test("Loading a JSON dataset from a text file") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Loading a JSON dataset primitivesAsString returns schema with primitive types as strings") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.option("primitivesAsString", "true").json(path)
val expectedSchema = StructType(
StructField("bigInteger", StringType, true) ::
StructField("boolean", StringType, true) ::
StructField("double", StringType, true) ::
StructField("integer", StringType, true) ::
StructField("long", StringType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("92233720368547758070",
"true",
"1.7976931348623157E308",
"10",
"21474836470",
null,
"this is a simple string.")
)
}
test("Loading a JSON dataset primitivesAsString returns complex fields as strings") {
val jsonDF = spark.read.option("primitivesAsString", "true").json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfBoolean", ArrayType(StringType, true), true) ::
StructField("arrayOfDouble", ArrayType(StringType, true), true) ::
StructField("arrayOfInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfLong", ArrayType(StringType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(StringType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from jsonTable"),
Row("922337203685477580700", "-922337203685477580800", null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("1.1", "2.1", "3.1"))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", "2.1")
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row("true", "str1", null),
Row("false", null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row("true", "92233720368547758070"),
"true",
"92233720368547758070") :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq("4", "5", "6"), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from jsonTable"),
Row("5", null)
)
}
test("Loading a JSON dataset prefersDecimal returns schema with float types as BigDecimal") {
val jsonDF = spark.read.option("prefersDecimal", "true").json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DecimalType(17, -292), true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(BigDecimal("92233720368547758070"),
true,
BigDecimal("1.7976931348623157E308"),
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Find compatible types even if inferred DecimalType is not capable of other IntegralType") {
val mixedIntegerAndDoubleRecords = Seq(
"""{"a": 3, "b": 1.1}""",
s"""{"a": 3.1, "b": 0.${"0" * 38}1}""").toDS()
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(mixedIntegerAndDoubleRecords)
// The values in `a` field will be decimals as they fit in decimal. For `b` field,
// they will be doubles as `1.0E-39D` does not fit.
val expectedSchema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DoubleType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(
jsonDF,
Row(BigDecimal("3"), 1.1D) ::
Row(BigDecimal("3.1"), 1.0E-39D) :: Nil
)
}
test("Infer big integers correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.json(bigIntegerRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `92233720368547758070`.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(20, 0), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E38D, BigDecimal("92233720368547758070")))
}
test("Infer floating-point values correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `0.01` by having a precision equal to the scale.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(2, 2), true):: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E-39D, BigDecimal(0.01)))
val mergedJsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords.union(bigIntegerRecords))
val expectedMergedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(22, 2), true):: Nil)
assert(expectedMergedSchema === mergedJsonDF.schema)
checkAnswer(
mergedJsonDF,
Row(1.0E-39D, BigDecimal(0.01)) ::
Row(1.0E38D, BigDecimal("92233720368547758070")) :: Nil
)
}
test("Loading a JSON dataset from a text file with SQL") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.toURI.toString
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
sql(
s"""
|CREATE TEMPORARY VIEW jsonTableSQL
|USING org.apache.spark.sql.json
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(
sql("select * from jsonTableSQL"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Applying schemas") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema = StructType(
StructField("bigInteger", DecimalType.SYSTEM_DEFAULT, true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", IntegerType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
val jsonDF1 = spark.read.schema(schema).json(path)
assert(schema === jsonDF1.schema)
jsonDF1.createOrReplaceTempView("jsonTable1")
checkAnswer(
sql("select * from jsonTable1"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
val jsonDF2 = spark.read.schema(schema).json(primitiveFieldAndType)
assert(schema === jsonDF2.schema)
jsonDF2.createOrReplaceTempView("jsonTable2")
checkAnswer(
sql("select * from jsonTable2"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Applying schemas with MapType") {
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val jsonWithSimpleMap = spark.read.schema(schemaWithSimpleMap).json(mapType1)
jsonWithSimpleMap.createOrReplaceTempView("jsonWithSimpleMap")
checkAnswer(
sql("select `map` from jsonWithSimpleMap"),
Row(Map("a" -> 1)) ::
Row(Map("b" -> 2)) ::
Row(Map("c" -> 3)) ::
Row(Map("c" -> 1, "d" -> 4)) ::
Row(Map("e" -> null)) :: Nil
)
checkAnswer(
sql("select `map`['c'] from jsonWithSimpleMap"),
Row(null) ::
Row(null) ::
Row(3) ::
Row(1) ::
Row(null) :: Nil
)
val innerStruct = StructType(
StructField("field1", ArrayType(IntegerType, true), true) ::
StructField("field2", IntegerType, true) :: Nil)
val schemaWithComplexMap = StructType(
StructField("map", MapType(StringType, innerStruct, true), false) :: Nil)
val jsonWithComplexMap = spark.read.schema(schemaWithComplexMap).json(mapType2)
jsonWithComplexMap.createOrReplaceTempView("jsonWithComplexMap")
checkAnswer(
sql("select `map` from jsonWithComplexMap"),
Row(Map("a" -> Row(Seq(1, 2, 3, null), null))) ::
Row(Map("b" -> Row(null, 2))) ::
Row(Map("c" -> Row(Seq(), 4))) ::
Row(Map("c" -> Row(null, 3), "d" -> Row(Seq(null), null))) ::
Row(Map("e" -> null)) ::
Row(Map("f" -> Row(null, null))) :: Nil
)
checkAnswer(
sql("select `map`['a'].field1, `map`['c'].field2 from jsonWithComplexMap"),
Row(Seq(1, 2, 3, null), null) ::
Row(null, null) ::
Row(null, 4) ::
Row(null, 3) ::
Row(null, null) ::
Row(null, null) :: Nil
)
}
test("SPARK-2096 Correctly parse dot notations") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
checkAnswer(
sql(
"""
|select complexArrayOfStruct[0].field1[1].inner2[0], complexArrayOfStruct[1].field2[0][1]
|from jsonTable
""".stripMargin),
Row("str2", 6)
)
}
test("SPARK-3390 Complex arrays") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select arrayOfArray1[0][0][0], arrayOfArray1[1][0][1], arrayOfArray1[1][1][0]
|from jsonTable
""".stripMargin),
Row(5, 7, 8)
)
checkAnswer(
sql(
"""
|select arrayOfArray2[0][0][0].inner1, arrayOfArray2[1][0],
|arrayOfArray2[1][1][1].inner2[0], arrayOfArray2[2][0][0].inner3[0][0].inner4
|from jsonTable
""".stripMargin),
Row("str1", Nil, "str4", 2)
)
}
test("SPARK-3308 Read top level JSON arrays") {
val jsonDF = spark.read.json(jsonArray)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select a, b, c
|from jsonTable
""".stripMargin),
Row("str_a_1", null, null) ::
Row("str_a_2", null, null) ::
Row(null, "str_b_3", null) ::
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
}
test("Corrupt records: FAILFAST mode") {
val schema = StructType(
StructField("a", StringType, true) :: Nil)
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.json(corruptRecords)
}
assert(exceptionOne.getMessage.contains("JsonParseException"))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema(schema)
.json(corruptRecords)
.collect()
}
assert(exceptionTwo.getMessage.contains("JsonParseException"))
}
test("Corrupt records: DROPMALFORMED mode") {
val schemaOne = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val schemaTwo = StructType(
StructField("a", StringType, true) :: Nil)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDFOne = spark.read
.option("mode", "DROPMALFORMED")
.json(corruptRecords)
checkAnswer(
jsonDFOne,
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
assert(jsonDFOne.schema === schemaOne)
val jsonDFTwo = spark.read
.option("mode", "DROPMALFORMED")
.schema(schemaTwo)
.json(corruptRecords)
checkAnswer(
jsonDFTwo,
Row("str_a_4") :: Nil)
assert(jsonDFTwo.schema === schemaTwo)
}
test("SPARK-19641: Additional corrupt records: DROPMALFORMED mode") {
val schema = new StructType().add("dummy", StringType)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDF = spark.read
.option("mode", "DROPMALFORMED")
.json(additionalCorruptRecords)
checkAnswer(
jsonDF,
Row("test"))
assert(jsonDF.schema === schema)
}
test("Corrupt records: PERMISSIVE mode, without designated column for malformed records") {
val schema = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val jsonDF = spark.read.schema(schema).json(corruptRecords)
checkAnswer(
jsonDF.select($"a", $"b", $"c"),
Seq(
// Corrupted records are replaced with null
Row(null, null, null),
Row(null, null, null),
Row(null, null, null),
Row("str_a_4", "str_b_4", "str_c_4"),
Row(null, null, null))
)
}
test("Corrupt records: PERMISSIVE mode, with designated column for malformed records") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
val jsonDF = spark.read.json(corruptRecords)
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
jsonDF.select($"a", $"b", $"c", $"_unparsed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
checkAnswer(
jsonDF.filter($"_unparsed".isNull).select($"a", $"b", $"c"),
Row("str_a_4", "str_b_4", "str_c_4")
)
checkAnswer(
jsonDF.filter($"_unparsed".isNotNull).select($"_unparsed"),
Row("{") ::
Row("""{"a":1, b:2}""") ::
Row("""{"a":{, b:3}""") ::
Row("]") :: Nil
)
}
}
test("SPARK-13953 Rename the corrupt record field via option") {
val jsonDF = spark.read
.option("columnNameOfCorruptRecord", "_malformed")
.json(corruptRecords)
val schema = StructType(
StructField("_malformed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
jsonDF.selectExpr("a", "b", "c", "_malformed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
}
test("SPARK-4068: nulls in arrays") {
val jsonDF = spark.read.json(nullsInArrays)
jsonDF.createOrReplaceTempView("jsonTable")
val schema = StructType(
StructField("field1",
ArrayType(ArrayType(ArrayType(ArrayType(StringType, true), true), true), true), true) ::
StructField("field2",
ArrayType(ArrayType(
StructType(StructField("Test", LongType, true) :: Nil), true), true), true) ::
StructField("field3",
ArrayType(ArrayType(
StructType(StructField("Test", StringType, true) :: Nil), true), true), true) ::
StructField("field4",
ArrayType(ArrayType(ArrayType(LongType, true), true), true), true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
sql(
"""
|SELECT field1, field2, field3, field4
|FROM jsonTable
""".stripMargin),
Row(Seq(Seq(null), Seq(Seq(Seq("Test")))), null, null, null) ::
Row(null, Seq(null, Seq(Row(1))), null, null) ::
Row(null, null, Seq(Seq(null), Seq(Row("2"))), null) ::
Row(null, null, null, Seq(Seq(null, Seq(1, 2, 3)))) :: Nil
)
}
test("SPARK-4228 DataFrame to JSON") {
val schema1 = StructType(
StructField("f1", IntegerType, false) ::
StructField("f2", StringType, false) ::
StructField("f3", BooleanType, false) ::
StructField("f4", ArrayType(StringType), nullable = true) ::
StructField("f5", IntegerType, true) :: Nil)
val rowRDD1 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v5 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(values(0).toInt, values(1), values(2).toBoolean, r.split(",").toList, v5)
}
val df1 = spark.createDataFrame(rowRDD1, schema1)
df1.createOrReplaceTempView("applySchema1")
val df2 = df1.toDF
val result = df2.toJSON.collect()
// scalastyle:off
assert(result(0) === "{\\"f1\\":1,\\"f2\\":\\"A1\\",\\"f3\\":true,\\"f4\\":[\\"1\\",\\" A1\\",\\" true\\",\\" null\\"]}")
assert(result(3) === "{\\"f1\\":4,\\"f2\\":\\"D4\\",\\"f3\\":true,\\"f4\\":[\\"4\\",\\" D4\\",\\" true\\",\\" 2147483644\\"],\\"f5\\":2147483644}")
// scalastyle:on
val schema2 = StructType(
StructField("f1", StructType(
StructField("f11", IntegerType, false) ::
StructField("f12", BooleanType, false) :: Nil), false) ::
StructField("f2", MapType(StringType, IntegerType, true), false) :: Nil)
val rowRDD2 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v4 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(Row(values(0).toInt, values(2).toBoolean), Map(values(1) -> v4))
}
val df3 = spark.createDataFrame(rowRDD2, schema2)
df3.createOrReplaceTempView("applySchema2")
val df4 = df3.toDF
val result2 = df4.toJSON.collect()
assert(result2(1) === "{\\"f1\\":{\\"f11\\":2,\\"f12\\":false},\\"f2\\":{\\"B2\\":null}}")
assert(result2(3) === "{\\"f1\\":{\\"f11\\":4,\\"f12\\":true},\\"f2\\":{\\"D4\\":2147483644}}")
val jsonDF = spark.read.json(primitiveFieldAndType)
val primTable = spark.read.json(jsonDF.toJSON)
primTable.createOrReplaceTempView("primitiveTable")
checkAnswer(
sql("select * from primitiveTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
"this is a simple string.")
)
val complexJsonDF = spark.read.json(complexFieldAndType1)
val compTable = spark.read.json(complexJsonDF.toJSON)
compTable.createOrReplaceTempView("complexTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from complexTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from complexTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] " +
" from complexTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from complexTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from complexTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from complexTable"),
Row("str2", 2.1)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from complexTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from complexTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] " +
"from complexTable"),
Row(5, null)
)
}
test("JSONRelation equality test") {
withTempPath(dir => {
val path = dir.getCanonicalFile.toURI.toString
sparkContext.parallelize(1 to 100)
.map(i => s"""{"a": 1, "b": "str$i"}""").saveAsTextFile(path)
val d1 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
val d2 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
assert(d1 === d2)
})
}
test("SPARK-6245 JsonInferSchema.infer on empty RDD") {
// This is really a test that it doesn't throw an exception
val emptySchema = JsonInferSchema.infer(
empty.rdd,
new JSONOptions(Map.empty[String, String], "GMT"),
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("SPARK-7565 MapType in JsonRDD") {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempDir { dir =>
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val df = spark.read.schema(schemaWithSimpleMap).json(mapType1)
val path = dir.getAbsolutePath
df.write.mode("overwrite").parquet(path)
// order of MapType is not defined
assert(spark.read.parquet(path).count() == 5)
val df2 = spark.read.json(corruptRecords)
df2.write.mode("overwrite").parquet(path)
checkAnswer(spark.read.parquet(path), df2.collect())
}
}
}
test("SPARK-8093 Erase empty structs") {
val emptySchema = JsonInferSchema.infer(
emptyRecords.rdd,
new JSONOptions(Map.empty[String, String], "GMT"),
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("JSON with Partition") {
def makePartition(rdd: RDD[String], parent: File, partName: String, partValue: Any): File = {
val p = new File(parent, s"$partName=${partValue.toString}")
rdd.saveAsTextFile(p.getCanonicalPath)
p
}
withTempPath(root => {
val d1 = new File(root, "d1=1")
// root/dt=1/col1=abc
val p1_col1 = makePartition(
sparkContext.parallelize(2 to 5).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abc")
// root/dt=1/col1=abd
val p2 = makePartition(
sparkContext.parallelize(6 to 10).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abd")
spark.read.json(root.getAbsolutePath).createOrReplaceTempView("test_myjson_with_part")
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abc'"), Row(4))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abd'"), Row(5))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1"), Row(9))
})
}
test("backward compatibility") {
// This test we make sure our JSON support can read JSON data generated by previous version
// of Spark generated through toJSON method and JSON data source.
// The data is generated by the following program.
// Here are a few notes:
// - Spark 1.5.0 cannot save timestamp data. So, we manually added timestamp field (col13)
// in the JSON object.
// - For Spark before 1.5.1, we do not generate UDTs. So, we manually added the UDT value to
// JSON objects generated by those Spark versions (col17).
// - If the type is NullType, we do not write data out.
// Create the schema.
val struct =
StructType(
StructField("f1", FloatType, true) ::
StructField("f2", ArrayType(BooleanType), true) :: Nil)
val dataTypes =
Seq(
StringType, BinaryType, NullType, BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
DateType, TimestampType,
ArrayType(IntegerType), MapType(StringType, LongType), struct,
new UDT.MyDenseVectorUDT())
val fields = dataTypes.zipWithIndex.map { case (dataType, index) =>
StructField(s"col$index", dataType, nullable = true)
}
val schema = StructType(fields)
val constantValues =
Seq(
"a string in binary".getBytes(StandardCharsets.UTF_8),
null,
true,
1.toByte,
2.toShort,
3,
Long.MaxValue,
0.25.toFloat,
0.75,
new java.math.BigDecimal(s"1234.23456"),
new java.math.BigDecimal(s"1.23456"),
java.sql.Date.valueOf("2015-01-01"),
java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123"),
Seq(2, 3, 4),
Map("a string" -> 2000L),
Row(4.75.toFloat, Seq(false, true)),
new UDT.MyDenseVector(Array(0.25, 2.25, 4.25)))
val data =
Row.fromSeq(Seq("Spark " + spark.sparkContext.version) ++ constantValues) :: Nil
// Data generated by previous versions.
// scalastyle:off
val existingJSONData =
"""{"col0":"Spark 1.2.2","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"16436","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: Nil
// scalastyle:on
// Generate data for the current version.
val df = spark.createDataFrame(spark.sparkContext.parallelize(data, 1), schema)
withTempPath { path =>
df.write.format("json").mode("overwrite").save(path.getCanonicalPath)
// df.toJSON will convert internal rows to external rows first and then generate
// JSON objects. While, df.write.format("json") will write internal rows directly.
val allJSON =
existingJSONData ++
df.toJSON.collect() ++
sparkContext.textFile(path.getCanonicalPath).collect()
Utils.deleteRecursively(path)
sparkContext.parallelize(allJSON, 1).saveAsTextFile(path.getCanonicalPath)
// Read data back with the schema specified.
val col0Values =
Seq(
"Spark 1.2.2",
"Spark 1.3.1",
"Spark 1.3.1",
"Spark 1.4.1",
"Spark 1.4.1",
"Spark 1.5.0",
"Spark 1.5.0",
"Spark " + spark.sparkContext.version,
"Spark " + spark.sparkContext.version)
val expectedResult = col0Values.map { v =>
Row.fromSeq(Seq(v) ++ constantValues)
}
checkAnswer(
spark.read.format("json").schema(schema).load(path.getCanonicalPath),
expectedResult
)
}
}
test("SPARK-11544 test pathfilter") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(2)
df.write.json(path + "/p=1")
df.write.json(path + "/p=2")
assert(spark.read.json(path).count() === 4)
val extraOptions = Map(
"mapred.input.pathFilter.class" -> classOf[TestFileFilter].getName,
"mapreduce.input.pathFilter.class" -> classOf[TestFileFilter].getName
)
assert(spark.read.options(extraOptions).json(path).count() === 2)
}
}
test("SPARK-12057 additional corrupt records do not throw exceptions") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempView("jsonTable") {
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("dummy", StringType, true) :: Nil)
{
// We need to make sure we can infer the schema.
val jsonDF = spark.read.json(additionalCorruptRecords)
assert(jsonDF.schema === schema)
}
{
val jsonDF = spark.read.schema(schema).json(additionalCorruptRecords)
jsonDF.createOrReplaceTempView("jsonTable")
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
sql(
"""
|SELECT dummy, _unparsed
|FROM jsonTable
""".stripMargin),
Row("test", null) ::
Row(null, """[1,2,3]""") ::
Row(null, """":"test", "a":1}""") ::
Row(null, """42""") ::
Row(null, """ ","ian":"test"}""") :: Nil
)
}
}
}
}
test("Parse JSON rows having an array type and a struct type in the same field.") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
arrayAndStructRecords.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema =
StructType(
StructField("a", StructType(
StructField("b", StringType) :: Nil
)) :: Nil)
val jsonDF = spark.read.schema(schema).json(path)
assert(jsonDF.count() == 2)
}
}
test("SPARK-12872 Support to specify the option for compression codec") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "gZiP")
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("SPARK-13543 Write the output as uncompressed via option()") {
val extraOptions = Map[String, String](
"mapreduce.output.fileoutputformat.compress" -> "true",
"mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString,
"mapreduce.output.fileoutputformat.compress.codec" -> classOf[GzipCodec].getName,
"mapreduce.map.output.compress" -> "true",
"mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName
)
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "none")
.options(extraOptions)
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(!_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.options(extraOptions)
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("Casting long as timestamp") {
withTempView("jsonTable") {
val schema = (new StructType).add("ts", TimestampType)
val jsonDF = spark.read.schema(schema).json(timestampAsLong)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select ts from jsonTable"),
Row(java.sql.Timestamp.valueOf("2016-01-02 03:04:05"))
)
}
}
test("wide nested json table") {
val nested = (1 to 100).map { i =>
s"""
|"c$i": $i
""".stripMargin
}.mkString(", ")
val json = s"""
|{"a": [{$nested}], "b": [{$nested}]}
""".stripMargin
val df = spark.read.json(Seq(json).toDS())
assert(df.schema.size === 2)
df.collect()
}
test("Write dates correctly with dateFormat option") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
withTempDir { dir =>
// With dateFormat option.
val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.json"
val datesWithFormat = spark.read
.schema(customSchema)
.option("dateFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
datesWithFormat.write
.format("json")
.option("dateFormat", "yyyy/MM/dd")
.save(datesWithFormatPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringDatesWithFormat = spark.read
.schema(stringSchema)
.json(datesWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26"),
Row("2014/10/27"),
Row("2016/01/28"))
checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26 18:00"),
Row("2014/10/27 18:30"),
Row("2016/01/28 20:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option and timeZone option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option and timeZone option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/27 01:00"),
Row("2014/10/28 01:30"),
Row("2016/01/29 04:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
val readBack = spark.read
.schema(customSchema)
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.json(timestampsWithFormatPath)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
val records = Seq("""{"a": 3, "b": 1.1}""", """{"a": 3.1, "b": 0.000001}""").toDS()
val schema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DecimalType(7, 6), true) :: Nil)
val df1 = spark.read.option("prefersDecimal", "true").json(records)
assert(df1.schema == schema)
val df2 = spark.read.option("PREfersdecimaL", "true").json(records)
assert(df2.schema == schema)
}
test("SPARK-18352: Parse normal multi-line JSON files (compressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.option("compression", "GzIp")
.text(path)
assert(new File(path).listFiles().exists(_.getName.endsWith(".gz")))
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.option("compression", "gZiP")
.json(jsonDir)
assert(new File(jsonDir).listFiles().exists(_.getName.endsWith(".json.gz")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Parse normal multi-line JSON files (uncompressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write.json(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Expect one JSON document per file") {
// the json parser terminates as soon as it sees a matching END_OBJECT or END_ARRAY token.
// this might not be the optimal behavior but this test verifies that only the first value
// is parsed and the rest are discarded.
// alternatively the parser could continue parsing following objects, which may further reduce
// allocations by skipping the line reader entirely
withTempPath { dir =>
val path = dir.getCanonicalPath
spark
.createDataFrame(Seq(Tuple1("{}{invalid}")))
.coalesce(1)
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
// no corrupt record column should be created
assert(jsonDF.schema === StructType(Seq()))
// only the first object should be read
assert(jsonDF.count() === 1)
}
}
test("SPARK-18352: Handle multi-line corrupt documents (PERMISSIVE)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "PERMISSIVE").json(path)
assert(jsonDF.count() === corruptRecordCount)
assert(jsonDF.schema === new StructType()
.add("_corrupt_record", StringType)
.add("dummy", StringType))
val counts = jsonDF
.join(
additionalCorruptRecords.toDF("value"),
F.regexp_replace($"_corrupt_record", "(^\\\\s+|\\\\s+$)", "") === F.trim($"value"),
"outer")
.agg(
F.count($"dummy").as("valid"),
F.count($"_corrupt_record").as("corrupt"),
F.count("*").as("count"))
checkAnswer(counts, Row(1, 4, 6))
}
}
test("SPARK-19641: Handle multi-line corrupt documents (DROPMALFORMED)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "DROPMALFORMED").json(path)
checkAnswer(jsonDF, Seq(Row("test")))
}
}
test("SPARK-18352: Handle multi-line corrupt documents (FAILFAST)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val schema = new StructType().add("dummy", StringType)
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.json(path)
}
assert(exceptionOne.getMessage.contains("Failed to infer a common schema"))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.schema(schema)
.json(path)
.collect()
}
assert(exceptionTwo.getMessage.contains("Failed to parse a value"))
}
}
test("Throw an exception if a `columnNameOfCorruptRecord` field violates requirements") {
val columnNameOfCorruptRecord = "_unparsed"
val schema = StructType(
StructField(columnNameOfCorruptRecord, IntegerType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "Permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(corruptRecords)
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
// We use `PERMISSIVE` mode by default if invalid string is given.
withTempPath { dir =>
val path = dir.getCanonicalPath
corruptRecords.toDF("value").write.text(path)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "permm")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(path)
.collect
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
}
}
test("SPARK-18772: Parse special floats correctly") {
val jsons = Seq(
"""{"a": "NaN"}""",
"""{"a": "Infinity"}""",
"""{"a": "-Infinity"}""")
// positive cases
val checks: Seq[Double => Boolean] = Seq(
_.isNaN,
_.isPosInfinity,
_.isNegInfinity)
Seq(FloatType, DoubleType).foreach { dt =>
jsons.zip(checks).foreach { case (json, check) =>
val ds = spark.read
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(json).toDS())
.select($"a".cast(DoubleType)).as[Double]
assert(check(ds.first()))
}
}
// negative cases
Seq(FloatType, DoubleType).foreach { dt =>
val lowerCasedJsons = jsons.map(_.toLowerCase(Locale.ROOT))
// The special floats are case-sensitive so these cases below throw exceptions.
lowerCasedJsons.foreach { lowerCasedJson =>
val e = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(lowerCasedJson).toDS())
.collect()
}
assert(e.getMessage.contains("Cannot parse"))
}
}
}
}
| wangyixiaohuihui/spark2-annotation | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala | Scala | apache-2.0 | 75,068 |
package org.showgregator.core.util
import java.nio.ByteBuffer
object ByteBuffers {
implicit class AsByteArray(buffer: ByteBuffer) {
def asBytes:Array[Byte] = {
val a = new Array[Byte](buffer.remaining())
buffer.get(a)
a
}
}
}
| csm/showgregator | showgregator-core/src/main/scala/org/showgregator/core/util/ByteBuffers.scala | Scala | agpl-3.0 | 258 |
object test {
def make[m[x], b]: m[b] = sys.error("foo")
val lst: List[Int] = make[List, Int]
}
| yusuke2255/dotty | tests/untried/pos/tcpoly_subst.scala | Scala | bsd-3-clause | 100 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import java.util
import java.util.Comparator
import joptsimple.OptionParser
import org.apache.kafka.common.{Metric, MetricName}
import scala.collection.immutable.ListMap
import scala.collection.mutable
object ToolsUtils {
def validatePortOrDie(parser: OptionParser, hostPort: String) = {
val hostPorts: Array[String] = if(hostPort.contains(','))
hostPort.split(",")
else
Array(hostPort)
val validHostPort = hostPorts.filter {
hostPortData =>
org.apache.kafka.common.utils.Utils.getPort(hostPortData) != null
}
val isValid = !validHostPort.isEmpty && validHostPort.size == hostPorts.length
if(!isValid)
CommandLineUtils.printUsageAndDie(parser, "Please provide valid host:port like host1:9091,host2:9092\\n ")
}
/**
* print out the metrics in alphabetical order
* @param metrics the metrics to be printed out
*/
def printMetrics(metrics: mutable.Map[MetricName, _ <: Metric]): Unit = {
var maxLengthOfDisplayName = 0
val sortedMap = metrics.toSeq.sortWith( (s,t) =>
Array(s._1.group(), s._1.name(), s._1.tags()).mkString(":")
.compareTo(Array(t._1.group(), t._1.name(), t._1.tags()).mkString(":")) < 0
).map {
case (key, value) =>
val mergedKeyName = Array(key.group(), key.name(), key.tags()).mkString(":")
if (maxLengthOfDisplayName < mergedKeyName.length) {
maxLengthOfDisplayName = mergedKeyName.length
}
(mergedKeyName, value.value())
}
println(s"\\n%-${maxLengthOfDisplayName}s %s".format("Metric Name", "Value"))
sortedMap.foreach {
case (metricName, value) =>
println(s"%-${maxLengthOfDisplayName}s : %.3f".format(metricName, value))
}
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/utils/ToolsUtils.scala | Scala | apache-2.0 | 2,558 |
import munit.FunSuite
import org.scoverage.issue53.part.b.SubtractorScala
/** Created by Mikhail Kokho on 7/10/2015.
*/
class SubtractorTestSuite extends FunSuite {
test("Subtractor should subtract two numbers") {
assertEquals(SubtractorScala.minus(2, 1), 1)
}
}
| scoverage/sbt-scoverage | src/sbt-test/scoverage/aggregate-only/partB/src/test/scala/SubtractorTestSuite.scala | Scala | apache-2.0 | 274 |
package core.models
import play.api.Play
import play.api._
import play.api.mvc._
import play.api.db.slick.{DatabaseConfigProvider, HasDatabaseConfigProvider}
import scala.concurrent.Future
import slick.driver.JdbcProfile
import slick.driver.MySQLDriver.api._
import scala.concurrent.ExecutionContext.Implicits.global
import javax.inject.Singleton
import javax.inject._
import play.api.Play.current
import java.sql.Timestamp
import slick.profile.SqlProfile.ColumnOption.SqlType
case class Asset(id : Long , parent_id : Long, name : String, mimetype : String, collapsed : Boolean, path:String, server_path:String, filesize:Long, created_at:Timestamp )
case class AssetTree(id : Long, key: String, path:String, server_path:String, label : String, mimetype : String, collapsed : Boolean, children: List[AssetTree])
class AssetTableDef(tag: Tag) extends Table[Asset](tag, "assets") {
def id = column[Long]("id", O.PrimaryKey,O.AutoInc)
def parent_id = column[Long]("parent_id")
def name = column[String]("name")
def mimetype = column[String]("mimetype")
def path = column[String]("path")
def server_path = column[String]("server_path")
def collapsed = column[Boolean]("collapsed")
def filesize = column[Long]("filesize")
def created_at = column[Timestamp]("created_at", SqlType("timestamp not null default CURRENT_TIMESTAMP"))
override def * =
(id, parent_id, name, mimetype, collapsed, path, server_path, filesize, created_at) <>(Asset.tupled, Asset.unapply)
}
@Singleton
class Assets @Inject()(protected val dbConfigProvider: DatabaseConfigProvider, conf:Configuration) extends HasDatabaseConfigProvider[JdbcProfile] {
val assets = TableQuery[AssetTableDef]
val insertQuery = assets returning assets.map(_.id) into ((asset, id) => asset.copy(id = id))
def listAll():Future[Seq[Asset]] = {
dbConfig.db.run(assets.result)
}
def listJson():Future[List[AssetTree]] = {
def generateList(d:List[Asset],parentid:Long):List[AssetTree] = {
d.filter(x => x.parent_id == parentid).sortBy(a => a.mimetype)
.map(x => {
val childItems = generateList(d, x.id)
AssetTree(
id = x.id,
key = x.name,
path = x.path,
server_path = x.server_path,
label = x.name,
mimetype = x.mimetype,
collapsed = x.collapsed,
children = childItems.filter(_.mimetype == "folder") ++ childItems.filter(_.mimetype != "folder")
)
})
}
listAll map (x => generateList(x.toList, 0))
}
def getByName(name:String) = dbConfig.db.run {
assets.filter(_.name === name).result.headOption
}
def getByPath(path:String) = dbConfig.db.run {
assets.filter(_.path === path).result.headOption
}
def getById(id:Long) = dbConfig.db.run {
assets.filter(_.id === id).result.headOption
}
def create(asset:Asset):Future[Asset] = {
dbConfig.db.run( insertQuery += asset )
}
def delete(id:Long):Future[Int] = {
val subitems:Future[Seq[Asset]] = dbConfig.db.run(assets.filter(_.parent_id === id).result)
subitems.map(items => {
items.map(x => delete(x.id))
})
val assetdir = conf.getString("elestic.uploadroot").getOrElse("")
(dbConfig.db.run(assets.filter(_.id === id).result.headOption)).map(assetOpt => {
assetOpt.map(asset => {
val file = new java.io.File(assetdir + asset.server_path)
if(file.exists) {
file.delete();
}
})
})
val action = assets.filter(_.id === id).delete
dbConfig.db.run(action)
}
def update(asset:Asset):Future[Asset] = dbConfig.db.run {
assets.filter(_.id === asset.id).update(asset).map (x => asset)
}
def setCollapsed(id:Long, state:Boolean):Future[Int] = dbConfig.db.run {
assets.filter(_.id === id).map(_.collapsed).update(state)
}
def setName(id:Long, name:String):Future[Int] = {
(dbConfig.db.run {
assets.filter(_.id === id).map(_.name).update(name)
}) flatMap (x => {
getById(id) flatMap (assetOpt => assetOpt match {
case Some(asset) => {
getById(asset.parent_id) flatMap ( pAssetOpt => pAssetOpt match {
case Some(parentAsset) => updatePath(parentAsset)
case None => Future(0)
})
}
case None => Future(x)
})
})
}
def updateParent(id:Long, parent_id:Long):Future[Int] = {
(dbConfig.db.run {
assets.filter(_.id === id).map(_.parent_id).update(parent_id)
}) flatMap ( x => {
getById(parent_id) flatMap ( assetOpt => assetOpt match {
case Some(asset) => updatePath(asset)
case None => Future(0)
})
})
}
def getByParentId(parent_id:Long) = dbConfig.db.run {
assets.filter(_.parent_id === parent_id).result
}
def updatePath(parentObj:Asset):Future[Int] = {
getByParentId(parentObj.id) map (childs => {
val updatedChilds = {
if(parentObj.mimetype == "home") {
childs map (x => update(x.copy(path = "/" + x.name)))
} else {
childs map (x => update(x.copy(path = parentObj.path + "/" + x.name)))
}
}
updatedChilds foreach (x => x map (asset => {
updatePath(asset)
}))
updatedChilds.length
})
}
} | WernerLDev/ScalaPlayCMS | app/core/models/asset.scala | Scala | gpl-3.0 | 5,786 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnetexamples.imclassification.datasets
import org.apache.mxnet.DType.DType
import org.apache.mxnet._
import scala.collection.immutable.ListMap
import scala.util.Random
class SyntheticDataIter(numClasses: Int, val batchSize: Int, datumShape: List[Int],
labelShape: List[Int], maxIter: Int, dType: DType = DType.Float32
) extends DataIter {
var curIter = 0
val random = new Random()
val shape = Shape(batchSize :: datumShape)
val batchLabelShape = Shape(batchSize :: labelShape)
val maxLabel = if (labelShape.isEmpty) numClasses.toFloat else 1f
var label: IndexedSeq[NDArray] = IndexedSeq(
NDArray.api.random_uniform(Some(0f), Some(maxLabel), shape = Some(batchLabelShape)))
var data: IndexedSeq[NDArray] = IndexedSeq(
NDArray.api.random_uniform(shape = Some(shape), dtype = Some(dType.toString)))
val provideDataDesc: IndexedSeq[DataDesc] = IndexedSeq(
new DataDesc("data", shape, data(0).dtype, Layout.UNDEFINED))
val provideLabelDesc: IndexedSeq[DataDesc] = IndexedSeq(
new DataDesc("softmax_label", batchLabelShape, label(0).dtype, Layout.UNDEFINED))
val getPad: Int = 0
override def getData(): IndexedSeq[NDArray] = data
override def getIndex: IndexedSeq[Long] = IndexedSeq(curIter)
override def getLabel: IndexedSeq[NDArray] = label
override def hasNext: Boolean = curIter < maxIter - 1
override def next(): DataBatch = {
if (hasNext) {
curIter += batchSize
new DataBatch(data, label, getIndex, getPad)
} else {
throw new NoSuchElementException
}
}
override def reset(): Unit = {
curIter = 0
}
override def provideData: ListMap[String, Shape] = ListMap("data" -> shape)
override def provideLabel: ListMap[String, Shape] = ListMap("softmax_label" -> batchLabelShape)
}
| zhreshold/mxnet | scala-package/examples/src/main/scala/org/apache/mxnetexamples/imclassification/datasets/SyntheticDataIter.scala | Scala | apache-2.0 | 2,644 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import play.api.libs.json._
case class CurrentPensionsModel (currentPensionsAmt: Option[BigDecimal]) extends AmountModel {
override def getAmount = currentPensionsAmt
}
object CurrentPensionsModel {
implicit val format = Json.format[CurrentPensionsModel]
}
| hmrc/pensions-lifetime-allowance-frontend | app/models/CurrentPensionsModel.scala | Scala | apache-2.0 | 884 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.ctrl.checkers
import cmwell.ctrl.utils.ProcUtil
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.Future
import java.net._
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success}
/**
* Created by michael on 1/5/15.
*/
case class DiskUsage(name : String, usage : Float)
object SystemChecker extends Checker with LazyLogging{
private def deviceUsage : Set[DiskUsage] = {
val resTry = ProcUtil.executeCommand("""df -h | awk '{print $5 " " $6}' | awk -F "% " '{print $1 " " $2}' | tail -n+2""")
resTry match {
case Success(res) =>
res.trim.split("\n").map{
t =>
val vals = t.split(" ")
DiskUsage(vals(1), vals(0).toFloat)
}.toSet
case Failure(err) =>
logger.error("Couldn't retrieve disk usage", err)
Set.empty[DiskUsage]
}
}
override val storedStates: Int = 10
override def check: Future[ComponentState] = {
// val usages = deviceUsage
// usages.foreach {
// usage =>
// val name = usage.name
// val usagePercent = usage.usage
// logger.info(s"DiskUsage: $name $usagePercent")
// }
val interfaces = (NetworkInterface.getNetworkInterfaces.asScala flatMap (_.getInetAddresses.asScala.toList) filter (addr => addr != null && addr.getHostAddress.matches("""\d+.\d+.\d+.\d+"""))).toVector
val name = InetAddress.getLocalHost().getHostName().split('.')(0)
Future.successful(SystemResponse(interfaces.map(inet => inet.getHostAddress), name))
}
}
| nruppin/CM-Well | server/cmwell-controller/src/main/scala/cmwell/ctrl/checkers/SystemChecker.scala | Scala | apache-2.0 | 2,251 |
package dk.gp.mtgpc
import breeze.linalg._
import java.io._
object getMtGpcTestData {
/**
* Returns (x,y)
*/
def apply(): (DenseMatrix[Double], DenseVector[Double]) = {
val x = csvread(new File("src/test/resources/gpml/classification_x.csv"))
val y = csvread(new File("src/test/resources/gpml/classification_y.csv")).toDenseVector
val x1Idx = (0 until x.rows - 1).filter(idx => idx % 2 == 0)
val x2Idx = (0 until x.rows - 1).filter(idx => idx % 2 == 1)
val x1 = DenseMatrix.horzcat(DenseMatrix.zeros[Double](x1Idx.size, 1) + 1.0, x(x1Idx, ::))
val x2 = DenseMatrix.horzcat(DenseMatrix.zeros[Double](x2Idx.size, 1) + 2.0, x(x2Idx, ::))
val x3 = DenseMatrix.horzcat(DenseMatrix.zeros[Double](1, 1) + 3.0, x(x.rows - 1 to x.rows - 1, ::))
val allX = DenseMatrix.vertcat(x1, x2, x3)
val allY = DenseVector.vertcat(y(x1Idx).toDenseVector, y(x2Idx).toDenseVector, y(19 to 19))
(allX, allY)
}
} | danielkorzekwa/bayes-scala-gp | src/test/scala/dk/gp/mtgpc/getMtGpcTestData.scala | Scala | bsd-2-clause | 947 |
package io.github.binaryfoo.lagotto.doc
import java.io.{ByteArrayOutputStream, File, FileWriter, PrintWriter}
import java.nio.file.{Files, StandardCopyOption}
import io.github.binaryfoo.lagotto.shell.{IsATty, Main}
import org.asciidoctor.internal.JRubyAsciidoctor
import org.asciidoctor.{AsciiDocDirectoryWalker, OptionsBuilder, SafeMode}
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
object RunExamples {
val Example = " LAGO: (.*)".r
def main(args: Array[String]) {
val outputFile = new File("docs/examples.adoc")
val out = new PrintWriter(new FileWriter(outputFile))
Source.fromFile("src/docs/examples.adoc").getLines().foreach {
case Example(arguments) =>
out.println(" lago " + arguments + "\\n")
out.println(indent(lagoOutputFrom(cleanAndSplit(arguments))))
case line =>
out.println(line)
}
out.close()
move("rtts.csv")
move("rtts.gp")
move("rtts.svg")
move("gc-example-chart.svg")
val asciiDoctor = JRubyAsciidoctor.create()
asciiDoctor.renderDirectory(new AsciiDocDirectoryWalker("docs"), OptionsBuilder.options().safe(SafeMode.UNSAFE))
asciiDoctor.unregisterAllExtensions()
asciiDoctor.shutdown()
val siteDir = new File(if (args.isEmpty) "target/site" else args(0))
siteDir.mkdirs()
for (f <- new File("docs").listFiles()) {
println(s"Copying $f to $siteDir")
Files.copy(f.toPath, new File(siteDir, f.getName).toPath, StandardCopyOption.REPLACE_EXISTING)
}
}
def move(file: String): Unit = {
Files.move(new File(file).toPath, new File(s"docs/$file").toPath, StandardCopyOption.REPLACE_EXISTING)
}
def cleanAndSplit(arguments: String): Array[String] = {
var quoted = false
val current = new StringBuilder
val args = new ArrayBuffer[String]()
arguments.foreach {
case '\\'' => quoted = !quoted
case x if quoted => current.append(x)
case ' ' =>
if (current.nonEmpty) {
args += current.toString()
current.delete(0, current.length)
}
case x => current.append(x)
}
if (current.nonEmpty)
args += current.toString()
args.toArray
}
def lagoOutputFrom(args: Array[String]): String = {
println(s"Running ${args.mkString(" ")}")
try {
System.setProperty("single.thread", "true")
IsATty.enabled = false
val out = new ByteArrayOutputStream()
Console.withOut(out) {
Main.main(args.toArray)
}
out.toString
} finally {
System.clearProperty("single.thread")
}
}
def indent(s: String): String = {
Source.fromString(s).getLines().map(" " + _).mkString("\\n")
}
}
| binaryfoo/lagotto | src/test/scala/io/github/binaryfoo/lagotto/doc/RunExamples.scala | Scala | mit | 2,697 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.runtime
package graph
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import org.apache.spark.rdd.RDD
import com.asakusafw.spark.runtime.rdd.BranchKey
abstract class MapPartitions[T, U: ClassTag](
parent: Source,
branchKey: BranchKey,
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false)(
implicit val jobContext: JobContext) extends Source with Source.Ops {
self: CacheStrategy[RoundContext, Map[BranchKey, Future[() => RDD[_]]]] =>
override val label: String = parent.label
override def doCompute(
rc: RoundContext)(implicit ec: ExecutionContext): Map[BranchKey, Future[() => RDD[_]]] = {
val prevs = parent.compute(rc)
prevs.updated(
branchKey,
prevs(branchKey).map { rddF =>
() => rddF().asInstanceOf[RDD[T]].mapPartitionsWithIndex(f, preservesPartitioning)
})
}
}
| ueshin/asakusafw-spark | runtime/src/main/scala/com/asakusafw/spark/runtime/graph/MapPartitions.scala | Scala | apache-2.0 | 1,533 |
package objsets
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TweetSetSuite extends FunSuite {
trait TestSets {
val set1 = new Empty
val set2 = set1.incl(new Tweet("a", "a body", 20))
val set3 = set2.incl(new Tweet("b", "b body", 20))
val c = new Tweet("c", "c body", 7)
val d = new Tweet("d", "d body", 9)
val set4c = set3.incl(c)
val set4d = set3.incl(d)
val set5 = set4c.incl(d)
val setWithMost = set2.incl(c)
}
def asSet(tweets: TweetSet): Set[Tweet] = {
var res = Set[Tweet]()
tweets.foreach(res += _)
res
}
def size(set: TweetSet): Int = asSet(set).size
test("filter: on empty set") {
new TestSets {
assert(size(set1.filter(tw => tw.user == "a")) === 0)
}
}
test("filter: a on set5") {
new TestSets {
assert(size(set5.filter(tw => tw.user == "a")) === 1)
}
}
test("filter: 20 on set5") {
new TestSets {
assert(size(set5.filter(tw => tw.retweets == 20)) === 2)
}
}
test("filter: not 7 on set5") {
new TestSets {
assert(size(set5.filter(tw => tw.retweets != 7)) === 3)
}
}
test("union: set4c and set4d") {
new TestSets {
assert(size(set4c.union(set4d)) === 4)
}
}
test("union: with empty set (1)") {
new TestSets {
assert(size(set5.union(set1)) === 4)
}
}
test("union: with empty set (2)") {
new TestSets {
assert(size(set1.union(set5)) === 4)
}
}
test("can find most retweeted") {
new TestSets {
assert(size(setWithMost) === 2)
assert(setWithMost.mostRetweeted.text === "a body")
}
}
test("descending: set5") {
new TestSets {
val trends = set5.descendingByRetweet
assert(!trends.isEmpty)
assert(trends.head.user == "a" || trends.head.user == "b")
}
}
}
| kharandziuk/scala-coursera | objsets/src/test/scala/objsets/TweetSetSuite.scala | Scala | mit | 1,903 |
package com.amarjanica.discourse.util
trait HasParameters extends Product {
val parameters: Map[String, Any] = {
getClass.getDeclaredFields.map( _.getName ) // all field names
.zip( productIterator.to ).toMap
}
}
| amarjanica/discourse-scala-client | src/main/scala/com/amarjanica/discourse/util/HasParameters.scala | Scala | mit | 240 |
package hello
import org.springframework.boot._
import org.springframework.boot.autoconfigure._
import org.springframework.stereotype._
import org.springframework.web.bind.annotation._
import org.springframework.context.annotation.Configuration
import org.springframework.context.annotation.ComponentScan
/**
* This config class will trigger Spring @annotation scanning and auto configure Spring context.
*
* @author saung
* @since 1.0
*/
@Controller
@Configuration
@EnableAutoConfiguration
@ComponentScan
class HelloConfig {
@RequestMapping(Array("/"))
@ResponseBody
def home(): String = "Hello World!"
}
| swapniljoshi15/hello-world | src/main/scala/hello/HelloConfig.scala | Scala | mit | 643 |
package io.youi.font
import scala.concurrent.Future
case class OpenTypeFont(otf: opentype.Font) extends Font {
private var glyphs = Map.empty[Char, OpenTypeGlyph]
lazy val cached: CachedFont = new CachedFont(this)
override def glyph(char: Char): Glyph = glyphs.get(char) match {
case Some(g) => g
case None => {
val g = OpenTypeGlyph(this, char, otf.charToGlyph(char.toString), otf.unitsPerEm)
glyphs += char -> g
g
}
}
override def kerning(first: Glyph, second: Glyph, size: Double): Double = {
otf.getKerningValue(first.asInstanceOf[OpenTypeGlyph].otg, second.asInstanceOf[OpenTypeGlyph].otg) * (1.0 / otf.unitsPerEm * size)
}
override def lineHeight(size: Double): Double = ascender(size) - descender(size)
override def ascender(size: Double): Double = otf.ascender * (1.0 / otf.unitsPerEm * size)
override def descender(size: Double): Double = otf.descender * (1.0 / otf.unitsPerEm * size)
override protected def createBuilder(text: String, size: Double, maxWidth: Double, kerning: Boolean): TextBuilder = {
new OpenTypeTextBuilder(this, text, size, maxWidth, kerning)
}
}
object OpenTypeFont {
private var pathMap = Map.empty[String, OpenTypeFont]
def fromPath(path: String, cached: Boolean = true): Future[Font] = {
val openTypeFuture = pathMap.get(path) match {
case Some(font) => Future.successful(font)
case None => {
opentype.OpenType.load(path).map { otf =>
val font = new OpenTypeFont(otf)
pathMap += path -> font
font
}
}
}
if (cached) {
openTypeFuture.map(_.cached)
} else {
openTypeFuture
}
}
def fromURL(url: URL, cached: Boolean = true): Future[Font] = fromPath(url.toString)
} | outr/youi | ui/js/src/main/scala/io/youi/font/OpenTypeFont.scala | Scala | mit | 1,770 |
package common.validation
import cats.data.NonEmptyList
import cats.syntax.either._
import common.Checked
object Checked {
implicit class ValidCheck[A](val obj: A) extends AnyVal {
def validNelCheck: Checked[A] = obj.asRight[NonEmptyList[String]]
}
implicit class InvalidCheck(val obj: String) extends AnyVal {
def invalidNelCheck[A]: Checked[A] = NonEmptyList.one(obj).asLeft[A]
}
}
| ohsu-comp-bio/cromwell | common/src/main/scala/common/validation/Checked.scala | Scala | bsd-3-clause | 403 |
package org.jetbrains.plugins.scala.lang.psi
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi.{PsiAnnotationSupport, PsiElement, PsiLiteral}
import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createExpressionFromText
/**
* @author Alexander Podkhalyuzin
*/
class ScalaAnnotationSupport extends PsiAnnotationSupport {
def createLiteralValue(value: String, context: PsiElement): PsiLiteral =
createExpressionFromText("\\"" + StringUtil.escapeStringCharacters(value) + "\\"")(context.getManager)
.asInstanceOf[ScLiteral]
} | loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/ScalaAnnotationSupport.scala | Scala | apache-2.0 | 642 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.Properties
import scala.collection.parallel.CompositeThrowable
import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.sql.SQLContext
class SQLExecutionSuite extends SparkFunSuite {
test("concurrent query execution (SPARK-10548)") {
// Try to reproduce the issue with the old SparkContext
val conf = new SparkConf()
.setMaster("local[*]")
.setAppName("test")
val badSparkContext = new BadSparkContext(conf)
try {
testConcurrentQueryExecution(badSparkContext)
fail("unable to reproduce SPARK-10548")
} catch {
case e: IllegalArgumentException =>
assert(e.getMessage.contains(SQLExecution.EXECUTION_ID_KEY))
} finally {
badSparkContext.stop()
}
// Verify that the issue is fixed with the latest SparkContext
val goodSparkContext = new SparkContext(conf)
try {
testConcurrentQueryExecution(goodSparkContext)
} finally {
goodSparkContext.stop()
}
}
/**
* Trigger SPARK-10548 by mocking a parent and its child thread executing queries concurrently.
*/
private def testConcurrentQueryExecution(sc: SparkContext): Unit = {
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
// Initialize local properties. This is necessary for the test to pass.
sc.getLocalProperties
// Set up a thread that runs executes a simple SQL query.
// Before starting the thread, mutate the execution ID in the parent.
// The child thread should not see the effect of this change.
var throwable: Option[Throwable] = None
val child = new Thread {
override def run(): Unit = {
try {
sc.parallelize(1 to 100).map { i => (i, i) }.toDF("a", "b").collect()
} catch {
case t: Throwable =>
throwable = Some(t)
}
}
}
sc.setLocalProperty(SQLExecution.EXECUTION_ID_KEY, "anything")
child.start()
child.join()
// The throwable is thrown from the child thread so it doesn't have a helpful stack trace
throwable.foreach { t =>
t.setStackTrace(t.getStackTrace ++ Thread.currentThread.getStackTrace)
throw t
}
}
}
/**
* A bad [[SparkContext]] that does not clone the inheritable thread local properties
* when passing them to children threads.
*/
private class BadSparkContext(conf: SparkConf) extends SparkContext(conf) {
protected[spark] override val localProperties = new InheritableThreadLocal[Properties] {
override protected def childValue(parent: Properties): Properties = new Properties(parent)
override protected def initialValue(): Properties = new Properties()
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/test/scala/org/apache/spark/sql/execution/SQLExecutionSuite.scala | Scala | apache-2.0 | 3,528 |
/**
* Copyright 2015 Lorand Szakacs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.lorandszakacs.util.html
import org.scalatest.{Matchers, FunSpec}
import com.lorandszakacs.util.html.data._
/**
* @author Lorand Szakacs, [email protected]
* @since 16 Mar 2015
*
*/
class HtmlProcessorTest extends FunSpec with Matchers {
describe("Tag filter") {
it("should return the only tag") {
val html = Html(SimplifiedData.FilterTag.SingleTag)
val tag = html filter Tag("a")
tag should have length 1
}
it("should return both flat tags") {
val html = Html(SimplifiedData.FilterTag.FlatTags)
val tag = html filter Tag("a")
tag should have length 2
}
it("should return all three nested tags") {
val html = Html(SimplifiedData.FilterTag.NestedTags)
val tag = html filter Tag("a")
tag should have length 3
}
it("should return the elements with the specified tag from within all the top level elements") {
val html = Html(SimplifiedData.FilterTag.NestedTagsWithinFlatTags)
val tag = html filter Tag("a")
tag should have length 6
tag(0) should startWith( """<a href="first-link""")
tag(5) should startWith( """<a href="second/third-link""")
}
it("should return `Nil` when looking for a tag that doesn't exists") {
val html = Html(SimplifiedData.FilterTag.NestedTags)
val tag = html filter Tag("no-such-tag")
tag shouldBe empty
}
it("should return all 45 elements with tag `li`") {
val html = Html(RealLifeData.PhotoSetOfTheDay)
val tags = html filter Tag("li")
tags should have length 45
tags.zipWithIndex.foreach { pair =>
val (tag, index) = pair
withClue(s"@ index $index ...") {
tag should startWith( """<li class="photo-container"""")
}
}
}
}
//==========================================================================
//==========================================================================
describe("Class filter") {
it("should return the only class") {
val html = Html(SimplifiedData.FilterClass.SingleClass)
val clazz = html filter Class("meta-data")
clazz should have length 1
}
it("should return both flat classes") {
val html = Html(SimplifiedData.FilterClass.FlatClasses)
val clazz = html filter Class("meta-data")
clazz should have length 2
}
it("should return both nested classes") {
val html = Html(SimplifiedData.FilterClass.NestedClasses)
val clazz = html filter Class("meta-data")
clazz should have length 2
}
//FIXME: this fails because of a bug in Jsoup where classes with spaces are parsed as two seperate classes.
ignore("should return elements with classes containing spaces") {
val html = Html(SimplifiedData.FilterClass.ClassWithSpaceInTheName)
val clazz = html filter Class("button login")
clazz should have length 1
}
it("should return `Nil` when looking for a class that doesn't exists") {
val html = Html(SimplifiedData.FilterClass.NestedClasses)
val clazz = html filter Class("no-such-class")
clazz shouldBe empty
}
it("should filter out the 4 existing `image-section` classes") {
val html = Html(RealLifeData.AlbumPageMemberReview)
val classes = html filter Class("image-section")
classes should have length 4
}
it("should filter out the 9 existing `image-section` classes") {
val html = Html(RealLifeData.AlbumPageSetOfTheDay)
val classes = html filter Class("image-section")
classes should have length 9
}
it("should filter out the 45 existing `photo-container` classes") {
val html = Html(RealLifeData.PhotoSetOfTheDay)
val classes = html filter Class("photo-container")
classes should have length 45
classes.zipWithIndex.foreach { pair =>
val (clazz, index) = pair
withClue(s"@ index $index ...") {
clazz should startWith( """<li class="photo-container"""")
}
}
}
}
//==========================================================================
//==========================================================================
describe("Attribute filter") {
it("should filter out the 4 existing elements with a `href` attribute") {
val html = Html(RealLifeData.AlbumPageMemberReview)
val hrefs = html filter Attribute("href")
hrefs should have length 4
hrefs(0).trim() should startWith( """<a href="href1""")
hrefs(3).trim() should startWith( """<a href="href4""")
}
it("should return `Nil` when looking for an attribute that doesn't exists") {
val html = Html(SimplifiedData.FilterTag.NestedTags)
val attribute = html filter Attribute("no-such-attr")
attribute shouldBe empty
}
}
//==========================================================================
//==========================================================================
describe("grabbing the Values of elements filtered by Attributes") {
it("should return `data-index`attribute contents") {
val html = Html(RealLifeData.PhotoSetOfTheDay)
val dataIndex = html filter Value(Attribute("data-index"))
dataIndex should have length 45
assertResult(expected = "0")(dataIndex(0))
assertResult(expected = "44")(dataIndex(44))
}
}
//==========================================================================
//==========================================================================
describe("HrefLink filter") {
it("should filter out the one link in the html") {
val html = Html(SimplifiedData.FilterLink.SingleLink)
val links = html filter HrefLink()
links should have length 1
assertResult(expected = "first-link/foo")(links.head)
}
it("should filter out the two nested links in the html") {
val html = Html(SimplifiedData.FilterLink.NestedLinks)
val links = html filter HrefLink()
links should have length 2
assertResult(expected = "first-link/foo")(links(0))
assertResult(expected = "second-link/foo")(links(1))
}
it("should filter out two flat links") {
val html = Html(SimplifiedData.FilterLink.FlatLinks)
val links = html filter HrefLink()
links should have length 2
assertResult(expected = "first-link/foo")(links(0))
assertResult(expected = "second-link/foo")(links(1))
}
}
//==========================================================================
//==========================================================================
describe("RetainFirst of HrefLink filter") {
it("should retain only the first link when composed with a RetainFirst filter, on rhs") {
val html = Html(SimplifiedData.FilterLink.NestedLinks)
val links = html filter RetainFirst(HrefLink())
links should have length 1
assertResult(expected = "first-link/foo")(links.head)
}
it("should retain only the first link when composed with a RetainFirst filter, on lhs") {
val html = Html(SimplifiedData.FilterLink.NestedLinks)
val links = html filter RetainFirst(HrefLink())
links should have length 1
assertResult(expected = "first-link/foo")(links.head)
}
}
//==========================================================================
//==========================================================================
describe("Content filter") {
it("should return only the date from the `icon-photography` class") {
val html = Html(SimplifiedData.FilterContent.ContentFromClass)
val content = html filter Content(Class("icon-photography"))
content should have length 1
assertResult(expected = "Nov 09, 2013")(content.head)
}
it("should return only the contents of the `div` tag") {
val html = Html(SimplifiedData.FilterContent.ContentFromTag)
val content = html filter Content(Tag("div"))
content should have length 1
assertResult(expected = "<a>whatever</a>")(content.head)
}
it("should return only the contents of the `id` attribute") {
val html = Html(SimplifiedData.FilterContent.ContentFromAttribute)
val content = html filter Content(Attribute("id"))
content should have length 1
assertResult(expected = "Load more")(content.head)
}
it("should return the contents of a Composite Filter") {
val html = Html(SimplifiedData.FilterContent.ContentFromComposite)
val content = html filter Content(Class("meta-data") && Class("photographer"))
content should have length 1
content.head.trim() should startWith("by")
}
}
//==========================================================================
//==========================================================================
describe("Combining filters") {
it("should return all the links contained within the `photo-container` classes contained within the first `image-section` class") {
val html = Html(ComplexData.Combination.TwoTopLevelImageSections)
val links = html filter RetainFirst(Class("image-section")) && Class("photo-container") && HrefLink()
links should have length 45
assertResult(expected = "link0")(actual = links(0))
assertResult(expected = "link44")(actual = links(44))
}
it("should return all the links contained within the `photo-container` classes contained within both `image-section` class") {
val html = Html(ComplexData.Combination.TwoTopLevelImageSections)
val links = html filter Class("image-section") && Class("photo-container") && HrefLink()
links should have length 46
assertResult(expected = "link0")(actual = links(0))
assertResult(expected = "link44")(actual = links(44))
assertResult(expected = "BOGUS LINK!!")(actual = links(45))
}
it("should return all the links contained within the `photo-container` classes contained within both `image-section` class" +
" even though the middle `image-section` class contains no photocontainers") {
val html = Html(ComplexData.Combination.ThreeTopLevelImageSectionsMiddleOneEmpty)
val links = html filter Class("image-section") && Class("photo-container") && HrefLink()
links should have length 3
assertResult(expected = "link0")(actual = links(0))
assertResult(expected = "link44")(actual = links(1))
assertResult(expected = "BOGUS LINK!!")(actual = links(2))
}
it("should return `Nil` if the first filter in the combination returns `Nil`") {
val html = Html(ComplexData.Combination.TwoTopLevelImageSections)
val links = html filter Class("non-existent-class") && Class("photo-container") && HrefLink()
links shouldBe empty
}
it("should return `Nil` if the filter in middle the combination returns `Nil`") {
val html = Html(ComplexData.Combination.TwoTopLevelImageSections)
val links = html filter Class("image-section") && Class("non-existent-class") && HrefLink()
links shouldBe empty
}
it("should return `Nil` if the last filter in the combination returns `Nil`") {
val html = Html(ComplexData.Combination.TwoTopLevelImageSections)
val links = html filter Class("image-section") && Class("photo-container") && Attribute("non-existent-attribute")
links shouldBe empty
}
}
//==========================================================================
//==========================================================================
} | lorandszakacs/util-html | src/test/scala/com/lorandszakacs/util/html/HtmlProcessorTest.scala | Scala | apache-2.0 | 12,126 |
/**
* CPNetSolver
* Copyright (C) 2013 Francesco Burato, Simone Carriero
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see [http://www.gnu.org/licenses/].
*
* File: Domain.scala
* Package: constraintobjs
* Author: Simone Carriero
* Creation: 30/06/2013
*/
package constraintobjs
import scala.collection.mutable
import scala.collection.mutable.Buffer
import scala.collection.mutable.ListBuffer
/**
* Companion object.
*/
object Domain {
val domains: mutable.HashMap[String, Domain] = new mutable.HashMap[String, Domain]
def addDomain(domain: Domain) {
domains get domain.variable match {
case None => domains += (domain.variable -> domain)
case _ =>
}
}
def apply(variable: String): Option[Domain] = domains get variable
def reset() : Unit = domains.clear
}
/**
* Represents a domain.
* @author Simone Carriero
*/
class Domain(val variable: String, val accepted: Set[String]) {
/**
* The list of constraints involving the variable "variable".
*/
val constraints: Buffer[Constraint] = new ListBuffer[Constraint]
/**
* Adds a constraint to the list of constraints.
* If the constraint to be added doesn't involves the variable "variable"
* an Exception is thrown.
*/
def addConstraint(c: Constraint): Unit = {
if ( !(c.vars contains variable) )
throw new Exception("Domain.addConstraint error: adding to the domain of " +
variable + " a constraint in which " + variable + " is not involved.")
constraints += c
}
/**
* Tests if some element is contained in this domain.
*/
def contains(elem: String): Boolean = accepted.contains(elem)
/**
* toString method for debugging
*/
override def toString : String = accepted.toString
}
| fburato/CPNetSolver | CPNetSolver/src/constraintobjs/Domain.scala | Scala | gpl-3.0 | 2,333 |
package org.senkbeil.debugger.akka.messages
import org.senkbeil.debugger.akka.messages.structures.JDIThread
case class ThreadStartMessage(
thread: Option[JDIThread] = None
) extends MessageLike
| chipsenkbeil/scala-debugger-akka | src/main/scala/org/senkbeil/debugger/akka/messages/ThreadStartMessage.scala | Scala | apache-2.0 | 198 |
package models.pcf.pcl
import scala.language.existentials
sealed trait PCLExpression extends PCLModel
case class Constant(val constant:PCLConstraintConstantAttribute[_]) extends PCLExpression
// Treat constraint attribute as variable
case class Var(val attribute:PCLConstraintComplexAttribute) extends PCLExpression
// Treat constraint operation as user-defined function
case class Func(val operation:PCLConstraintOperation) extends PCLExpression
case class UnaryExpression(operator:String, arg:PCLExpression) extends PCLExpression
case class BinaryExpression(operator:String, left:PCLExpression, right:PCLExpression) extends PCLExpression
case class IfExpression(condition:PCLExpression, default:PCLExpression, alternative: Option[PCLExpression]) extends PCLExpression | shasha-amy-liu/process-constraint-framework | pcf/src/main/scala/models/pcf/pcl/PCLExpression.scala | Scala | mit | 772 |
package sms.core
import java.io.File
object Properties {
private object GetFile {
def unapply(path: String): Option[File] = {
val file = new File(path)
if (file.exists()) Some(file) else None
}
}
def parseFile[T](key: String)(action: File => T): Option[T] = {
sys.props.get(key).collect {
case GetFile(file) => action(file)
}
}
def parseFile[T](key: String, default: => T)(action: File => T): T = {
parseFile(key)(action).getOrElse(default)
}
def get(key: String): Option[String] = sys.props.get(key)
def execute(key: String)(action: String => Unit) {
get(key).map(action)
}
}
| kjanosz/stock-market-sherlock | core/src/main/scala/sms/core/Properties.scala | Scala | apache-2.0 | 643 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.