code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.azavea.gtfs.io.csv
trait GtfsFile[T] {
val fileName: String
val isRequired: Boolean
def parse(path: String): Seq[T]
}
| flibbertigibbet/open-transit-indicators | scala/gtfs/src/main/scala/com/azavea/gtfs/io/csv/GtfsFile.scala | Scala | gpl-3.0 | 138 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.concurrent.Future
import scala.concurrent.ExecutionContext
import scala.reflect.ClassTag
import org.scalactic.source
import org.scalatest.exceptions.StackDepthException._
import org.scalactic.source
import org.scalatest.compatible.Assertion
/**
* Offers two methods for transforming futures when exceptions are expected.
*
* <p>
* This trait offers two methods for testing for expected exceptions in the context of
* futures: <code>recoverToSucceededIf</code> and <code>recoverToExceptionIf</code>.
* Because this trait is mixed into trait <code>AsyncTestSuite</code>, both of its methods are
* available by default in any async-style suite.
* </p>
*
* <p>
* If you just want to ensure that a future fails with a particular exception type, and do
* not need to inspect the exception further, use <code>recoverToSucceededIf</code>:
* </p>
*
* <pre class="stHighlight">
* recoverToSucceededIf[IllegalStateException] { // Result type: Future[Assertion]
* emptyStackActor ? Peek
* }
* </pre>
*
* <p>
* The <code>recoverToSucceededIf</code> method performs a job similar to
* <a href="Assertions.html#expectedExceptions"><code>assertThrows</code></a>, except
* in the context of a future. It transforms a <code>Future</code> of any type into a
* <code>Future[Assertion]</code> that succeeds only if the original future fails with the specified
* exception. Here's an example in the REPL:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest.RecoverMethods._
* import org.scalatest.RecoverMethods._
*
* scala> import scala.concurrent.Future
* import scala.concurrent.Future
*
* scala> import scala.concurrent.ExecutionContext.Implicits.global
* import scala.concurrent.ExecutionContext.Implicits.global
*
* scala> recoverToSucceededIf[IllegalStateException] {
* | Future { throw new IllegalStateException }
* | }
* res0: scala.concurrent.Future[org.scalatest.Assertion] = ...
*
* scala> res0.value
* res1: Option[scala.util.Try[org.scalatest.Assertion]] = Some(Success(Succeeded))
* </pre>
*
* <p>
* Otherwise it fails with an error message similar to those given by <code>assertThrows</code>:
* </p>
*
* <pre class="stREPL">
* scala> recoverToSucceededIf[IllegalStateException] {
* | Future { throw new RuntimeException }
* | }
* res2: scala.concurrent.Future[org.scalatest.Assertion] = ...
*
* scala> res2.value
* res3: Option[scala.util.Try[org.scalatest.Assertion]] =
* Some(Failure(org.scalatest.exceptions.TestFailedException: Expected exception
* java.lang.IllegalStateException to be thrown, but java.lang.RuntimeException
* was thrown))
*
* scala> recoverToSucceededIf[IllegalStateException] {
* | Future { 42 }
* | }
* res4: scala.concurrent.Future[org.scalatest.Assertion] = ...
*
* scala> res4.value
* res5: Option[scala.util.Try[org.scalatest.Assertion]] =
* Some(Failure(org.scalatest.exceptions.TestFailedException: Expected exception
* java.lang.IllegalStateException to be thrown, but no exception was thrown))
* </pre>
*
* <p>
* The <code>recoverToExceptionIf</code> method differs from the <code>recoverToSucceededIf</code> in
* its behavior when the assertion succeeds: <code>recoverToSucceededIf</code> yields a <code>Future[Assertion]</code>,
* whereas <code>recoverToExceptionIf</code> yields a <code>Future[T]</code>, where <code>T</code> is the
* expected exception type.
* </p>
*
* <pre class="stHighlight">
* recoverToExceptionIf[IllegalStateException] { // Result type: Future[IllegalStateException]
* emptyStackActor ? Peek
* }
* </pre>
*
* <p>
* In other words, <code>recoverToExpectionIf</code> is to
* <a href="Assertions.html#expectedExceptions"><code>intercept</code></a> as
* <code>recovertToSucceededIf</code> is to <code>assertThrows</code>. The first one allows you to perform further
* assertions on the expected exception. The second one gives you a result type that will satisfy the type checker
* at the end of the test body. Here's an example showing <code>recoverToExceptionIf</code> in the REPL:
* </p>
*
* <pre class="stREPL">
* scala> val futureEx =
* | recoverToExceptionIf[IllegalStateException] {
* | Future { throw new IllegalStateException("hello") }
* | }
* futureEx: scala.concurrent.Future[IllegalStateException] = ...
*
* scala> futureEx.value
* res6: Option[scala.util.Try[IllegalStateException]] =
* Some(Success(java.lang.IllegalStateException: hello))
*
* scala> futureEx map { ex => assert(ex.getMessage == "world") }
* res7: scala.concurrent.Future[org.scalatest.Assertion] = ...
*
* scala> res7.value
* res8: Option[scala.util.Try[org.scalatest.Assertion]] =
* Some(Failure(org.scalatest.exceptions.TestFailedException: "[hello]" did not equal "[world]"))
* </pre>
*
* @author Bill Venners
*/
trait RecoverMethods {
/**
* Transforms a future of any type into a <code>Future[T]</code>, where <code>T</code> is a given
* expected exception type, which succeeds if the given future
* completes with a <code>Failure</code> containing the specified exception type.
*
* <p>
* See the main documentation for this trait for more detail and examples.
* </p>
*
* @param future A future of any type, which you expect to fail with an exception of the specified type T
* @return a Future[T] containing on success the expected exception, or containing on failure
* a <code>TestFailedException</code>
*/
def recoverToExceptionIf[T <: AnyRef](future: Future[Any])(implicit classTag: ClassTag[T], exCtx: ExecutionContext, pos: source.Position): Future[T] = {
val clazz = classTag.runtimeClass
future.failed.transform(
ex =>
if (!clazz.isAssignableFrom(ex.getClass)) {
val message = Resources.wrongException(clazz.getName, ex.getClass.getName)
throw newAssertionFailedExceptionForRecover(Some(message), Some(ex), pos)
}
else ex.asInstanceOf[T]
,
ex => {
val message = Resources.exceptionExpected(clazz.getName)
throw newAssertionFailedExceptionForRecover(Some(message), None, pos)
}
)
}
/**
* Transforms a future of any type into a <code>Future[Assertion]</code> that succeeds if the future
* completes with a <code>Failure</code> containing the specified exception type.
*
* <p>
* See the main documentation for this trait for more detail and examples.
* </p>
*
* @param future A future of any type, which you expect to fail with an exception of the specified type T
* @return a Future[Assertion] containing on success the <code>Succeeded</code> singleton, or containing on failure
* a <code>TestFailedException</code>
*/
def recoverToSucceededIf[T <: AnyRef](future: Future[Any])(implicit classTag: ClassTag[T], exCtx: ExecutionContext, pos: source.Position): Future[Assertion] = {
val clazz = classTag.runtimeClass
future.failed.transform(
rawEx => {
val ex =
rawEx match {
case execEx: java.util.concurrent.ExecutionException => execEx.getCause
case other => other
}
if (!clazz.isAssignableFrom(ex.getClass)) {
val message = Resources.wrongException(clazz.getName, ex.getClass.getName)
throw newAssertionFailedExceptionForRecover(Some(message), Some(ex), pos)
}
else Succeeded
},
ex => {
val message = Resources.exceptionExpected(clazz.getName)
throw newAssertionFailedExceptionForRecover(Some(message), None, pos)
}
)
}
private[scalatest] def newAssertionFailedExceptionForRecover(optionalMessage: Option[String], optionalCause: Option[Throwable], pos: source.Position): Throwable =
new org.scalatest.exceptions.TestFailedException(toExceptionFunction(optionalMessage), optionalCause, pos)
}
/**
* Companion object that facilitates the importing of <code>RecoverMethods</code>'s method as
* an alternative to mixing it in. One use case is to import <code>RecoverMethods</code>'s method so you can use
* it in the Scala interpreter.
*
* @author Bill Venners
*/
object RecoverMethods extends RecoverMethods
| dotty-staging/scalatest | scalatest/src/main/scala/org/scalatest/RecoverMethods.scala | Scala | apache-2.0 | 8,911 |
/*
* Copyright 2014 Adam Rosenberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nalloc.bitb.kcits.optional
import org.nalloc.bitb.kcits.macros._
object OptionalShort {
final def empty: OptionalShort = new OptionalShort(-32768)
final def apply(value: Short): OptionalShort = new OptionalShort(value)
}
final class OptionalShort(val value: Short) extends AnyVal {
def isEmpty = value == -32768
def get: Short = value
def isMinValue = value == -32767
def isMaxValue = value == 32767
def map[T](f: Short => T)(implicit x: OptionalResolver[T]): x.OptionalType = macro OptionalMacros.map_impl[Short, T]
def flatMap[T](f: Short => T)(implicit x: PrimitiveResolver[T]): T = macro OptionalMacros.flatMap_impl[Short, T]
def foreach(f: Short => Unit): Unit = macro OptionalMacros.foreach_impl[Short]
def exists(f: Short => Boolean): Boolean = macro OptionalMacros.exists_impl[Short]
def filter(f: Short => Boolean): OptionalShort = macro OptionalMacros.filter_impl[Short]
def orElse(f: => Short): Short = macro OptionalMacros.orElse_impl[Short]
def fold[T](ifEmpty: => T)(f: Short => T): T = macro OptionalMacros.fold_impl[Short, T]
override def toString = if (isEmpty) "-32768 (empty)" else s"$value"
}
| arosenberger/nalloc_2.10 | optional/src/main/scala/org/nalloc/bitb/kcits/optional/OptionalShort.scala | Scala | apache-2.0 | 1,756 |
package org.bfn.ninetynineprobs
import org.scalatest._
class P68Spec extends UnitSpec {
// TODO
}
| bfontaine/99Scala | src/test/scala/P68Spec.scala | Scala | mit | 105 |
package com.twitter.finagle.oauth2
/**
* Authorized information.
*
* @param user Authorized user which is registered on system.
* @param clientId Using client id which is registered on system.
* @param scope Inform the client of the scope of the access token issued.
* @param redirectUri This value is used by Authorization Code Grant.
*/
final case class AuthInfo[U](
user: U,
clientId: String,
scope: Option[String],
redirectUri: Option[String]
)
| finagle/finagle-oauth2 | src/main/scala/com/twitter/finagle/oauth2/AuthInfo.scala | Scala | apache-2.0 | 465 |
package spa.client.modules
import diode.data.Pot
import diode.react.ModelProxy
import japgolly.scalajs.react.extra.router.RouterCtl
import spa.client.SPAMain.{FeedbackLoc, Loc}
import diode.react.ReactPot._
import diode.react._
import diode.data.Pot
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import spa.client.components.Bootstrap._
import spa.client.components._
import spa.client.logger._
import spa.client.services._
import spa.shared._
/**
* Some information about the site, linking to a feedback form and the source code on GitHub
*/
object About {
case class Props(router: RouterCtl[Loc])
@inline private def bss = GlobalStyles.bootstrapStyles
class Backend($: BackendScope[Props, Unit]) {
def render(p: Props) = {//}, s: State) = {
<.div(^.className:="col-md-10 col-md-offset-1")(
Panel(Panel.Props("About internet-abridged.com"),
<.div(
<.p(
"""I embarked on this project to use Scala to create a "single page app" and a news aggregator
| for myself. As a side effect, I learned React.js in the process. All the JavaScript, HTML,
|and backend server code used for this site are written in Scala, leveraging a
|library for the HTML fragments, and the Scala.js compiler (with the help of a React library)
|to compile Scala to browser JS instead of server side JVM code. Server side, I make use of
|a small set of Akka Actors to concurrently retrieve updated feeds roughly every fifteen
|minutes.""".stripMargin),
<.div(
"Do you have questions about the site? Suggestions for feeds you think I should aggregate? ",
"Send me some ", p.router.link(FeedbackLoc)("feedback"), ". ",
"Is there another reason you want to talk to me? ",
p.router.link(FeedbackLoc)("You can reach me the same way.")
),<.br,
<.p("For those curious, you can view the source code for this site ",
<.a(^.href := "https://github.com/IceGiant/internet-abridged")("on GitHub"), ".")
)
)
)
}
}
val component = ReactComponentB[Props]("About")
.initialState({
log.debug("About init")
})
.renderBackend[Backend]
.build
/** Returns a function compatible with router location system while using our own props */
def apply(router: RouterCtl[Loc]) = component(Props(router))
}
| IceGiant/internet-abridged | client/src/main/scala/spa/client/modules/About.scala | Scala | apache-2.0 | 2,580 |
package com.arcusys.valamis.certificate.model.goal
object GoalStatistic {
def empty = GoalStatistic(0,0,0,0)
}
case class GoalStatistic(success: Int, inProgress: Int, failed: Int, total: Int, notStarted: Int = 0)
{
def addSuccess(count: Int = 1) =
this.copy(success = success + count, total = total + count)
def addFailed(count: Int = 1) =
this.copy(failed = failed + count, total = total + count)
def addInProgress(count: Int = 1) =
this.copy(inProgress = inProgress + count, total = total + count)
def add(status: GoalStatuses.Value) = {
status match {
case GoalStatuses.Failed => this.addFailed()
case GoalStatuses.Success => this.addSuccess()
case GoalStatuses.InProgress => this.addInProgress()
}
}
def +(that: GoalStatistic): GoalStatistic = {
val success = this.success + that.success
val inProgress = this.inProgress + that.inProgress
val failed = this.failed + that.failed
val notStarted = this.notStarted + that.notStarted
val total = this.total + that.total
GoalStatistic(success, inProgress, failed, total, notStarted)
}
}
| igor-borisov/valamis | valamis-certificate/src/main/scala/com/arcusys/valamis/certificate/model/goal/GoalStatistic.scala | Scala | gpl-3.0 | 1,121 |
package org.libss.util.helpers
import java.lang.reflect.Field
/**
* date: 02.06.2016 22:44
* author: Kaa
*
* Reflection field value getter helper trait
*/
trait ReflectionFieldValueHandler {
protected def setFieldValue(instance: AnyRef, field: Field, value: Any) {
field.setAccessible(true)
field.set(instance, value)
}
protected def setFieldValue(instance: AnyRef, fieldName: String, value: Any) {
setFieldValue(instance, instance.getClass.getDeclaredField(fieldName), value)
}
protected def getFieldValue[T <: Any](field: Field, instance: AnyRef): Option[Any] = {
field.setAccessible(true)
Option(field.get(instance))
}
protected def getFieldValue[T <: Any](fieldName: String, instance: AnyRef): Option[T] = {
getFieldValue(instance.getClass.getDeclaredField(fieldName), instance).asInstanceOf[Option[T]]
}
}
| kanischev/libss | libss-utils/src/main/scala/org/libss/util/helpers/ReflectionFieldValueHandler.scala | Scala | apache-2.0 | 866 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{universe => ru}
import scala.reflect.runtime.{currentMirror => cm}
import scala.tools.reflect.{ToolBox, mkConsoleFrontEnd}
object Test extends dotty.runtime.LegacyApp {
//val oldErr = Console.err;
val baos = new java.io.ByteArrayOutputStream()
val errs = new java.io.PrintStream(baos)
(Console withErr errs) {
val toolbox = cm.mkToolBox(frontEnd = mkConsoleFrontEnd(), options = "-deprecation")
toolbox.eval(reify{
object Utils {
@deprecated("test", "2.10.0")
def foo: Unit = { println("hello") }
}
Utils.foo
}.tree)
println("============compiler console=============")
errs.flush()
println(baos.toString);
println("=========================================")
println("============compiler messages============")
toolbox.frontEnd.infos.foreach(println(_))
println("=========================================")
}
}
| yusuke2255/dotty | tests/disabled/macro/run/toolbox_console_reporter.scala | Scala | bsd-3-clause | 969 |
package is.hail.annotations
import java.io.{ObjectInputStream, ObjectOutputStream}
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
import com.esotericsoftware.kryo.io.{Input, Output}
import is.hail.types.virtual._
import is.hail.types.physical._
import is.hail.utils._
import is.hail.variant.Locus
import org.apache.spark.sql.Row
import sun.reflect.generics.reflectiveObjects.NotImplementedException
trait UnKryoSerializable extends KryoSerializable {
def write(kryo: Kryo, output: Output): Unit = {
throw new NotImplementedException()
}
def read(kryo: Kryo, input: Input): Unit = {
throw new NotImplementedException()
}
}
class UnsafeIndexedSeq(
val t: PContainer,
val region: Region, val aoff: Long) extends IndexedSeq[Annotation] with UnKryoSerializable {
val length: Int = t.loadLength(aoff)
def apply(i: Int): Annotation = {
if (i < 0 || i >= length)
throw new IndexOutOfBoundsException(i.toString)
if (t.isElementDefined(aoff, i)) {
UnsafeRow.read(t.elementType, region, t.loadElement(aoff, length, i))
} else
null
}
override def toString: String = s"[${this.mkString(",")}]"
}
class UnsafeIndexedSeqRowMajorView(val wrapped: UnsafeIndexedSeq, shape: IndexedSeq[Long], strides: IndexedSeq[Long]) extends IndexedSeq[Annotation] {
val coordStorageArray = new Array[Long](shape.size)
val shapeProduct = shape.foldLeft(1L )(_ * _)
def apply(i: Int): Annotation = {
var workRemaining = i.toLong
var elementsInProcessedDimensions = shapeProduct
(0 until shape.size).foreach { dim =>
elementsInProcessedDimensions = elementsInProcessedDimensions / shape(dim)
coordStorageArray(dim) = workRemaining / elementsInProcessedDimensions
workRemaining = workRemaining % elementsInProcessedDimensions
}
val properIndex = (0 until shape.size).map(dim => coordStorageArray(dim) * strides(dim)).sum
if (properIndex > Int.MaxValue) {
throw new IllegalArgumentException("Index too large")
}
wrapped(properIndex.toInt)
}
override def length: Int = wrapped.length
}
object UnsafeRow {
def readBinary(boff: Long, t: PBinary): Array[Byte] =
t.loadBytes(boff)
def readArray(t: PContainer, region: Region, aoff: Long): IndexedSeq[Any] =
new UnsafeIndexedSeq(t, region, aoff)
def readBaseStruct(t: PBaseStruct, region: Region, offset: Long): UnsafeRow =
new UnsafeRow(t, region, offset)
def readString(boff: Long, t: PString): String =
new String(readBinary(boff, t.fundamentalType))
def readLocus(offset: Long, t: PLocus): Locus = {
Locus(
t.contig(offset),
t.position(offset))
}
def readAnyRef(t: PType, region: Region, offset: Long): AnyRef = read(t, region, offset).asInstanceOf[AnyRef]
def read(t: PType, region: Region, offset: Long): Any = {
t match {
case _: PBoolean =>
Region.loadBoolean(offset)
case _: PInt32 | _: PCall => Region.loadInt(offset)
case _: PInt64 => Region.loadLong(offset)
case _: PFloat32 => Region.loadFloat(offset)
case _: PFloat64 => Region.loadDouble(offset)
case t: PArray =>
readArray(t, region, offset)
case t: PSet =>
readArray(t, region, offset).toSet
case t: PString => readString(offset, t)
case t: PBinary => readBinary(offset, t)
case td: PDict =>
val a = readArray(td, region, offset)
a.asInstanceOf[IndexedSeq[Row]].map(r => (r.get(0), r.get(1))).toMap
case t: PBaseStruct => readBaseStruct(t, region, offset)
case x: PLocus => readLocus(offset, x)
case x: PInterval =>
val start: Annotation =
if (x.startDefined(offset))
read(x.pointType, region, x.loadStart(offset))
else
null
val end =
if (x.endDefined(offset))
read(x.pointType, region, x.loadEnd(offset))
else
null
val includesStart = x.includesStart(offset)
val includesEnd = x.includesEnd(offset)
Interval(start, end, includesStart, includesEnd)
case nd: PNDArray => {
val nDims = nd.nDims
val elementSize = nd.elementType.byteSize
val urWithStrides = read(nd.representation, region, offset).asInstanceOf[UnsafeRow]
val shapeRow = urWithStrides.get(0).asInstanceOf[UnsafeRow]
val shape = shapeRow.toSeq.map(x => x.asInstanceOf[Long]).toIndexedSeq
val strides = urWithStrides.get(1).asInstanceOf[UnsafeRow].toSeq.map(x => x.asInstanceOf[Long]).toIndexedSeq
val data = urWithStrides.get(2).asInstanceOf[UnsafeIndexedSeq]
val elementWiseStrides = (0 until nDims).map(i => strides(i) / elementSize)
val row = Row(shapeRow, new UnsafeIndexedSeqRowMajorView(data, shape, elementWiseStrides))
row
}
}
}
}
class UnsafeRow(val t: PBaseStruct,
var region: Region, var offset: Long) extends Row with UnKryoSerializable {
override def toString: String = {
if (t.isInstanceOf[PStruct]) {
val sb = new StringBuilder()
var i = 0
sb += '{'
while (i < t.size) {
if (i != 0) {
sb ++= ", "
}
sb ++= t.fieldNames(i)
sb ++= ": "
val x = get(i)
sb ++= (if (x == null) "null" else x.toString())
i += 1
}
sb += '}'
sb.toString
} else if (t.isInstanceOf[PTuple]) {
val sb = new StringBuilder()
var i = 0
sb += '('
while (i < t.size) {
if (i != 0) {
sb ++= ", "
}
val x = get(i)
sb ++= (if (x == null) "null" else x.toString())
i += 1
}
sb += ')'
sb.toString
} else {
super.toString
}
}
def this(t: PBaseStruct, rv: RegionValue) = this(t, rv.region, rv.offset)
def this(t: PBaseStruct) = this(t, null, 0)
def this() = this(null, null, 0)
def set(newRegion: Region, newOffset: Long) {
region = newRegion
offset = newOffset
}
def set(rv: RegionValue): Unit = set(rv.region, rv.offset)
def length: Int = t.size
private def assertDefined(i: Int) {
if (isNullAt(i))
throw new NullPointerException(s"null value at index $i")
}
def get(i: Int): Any = {
if (isNullAt(i))
null
else
UnsafeRow.read(t.types(i), region, t.loadField(offset, i))
}
def copy(): Row = new UnsafeRow(t, region, offset)
def pretty(): String = Region.pretty(t, offset)
override def getInt(i: Int): Int = {
assertDefined(i)
Region.loadInt(t.loadField(offset, i))
}
override def getLong(i: Int): Long = {
assertDefined(i)
Region.loadLong(t.loadField(offset, i))
}
override def getFloat(i: Int): Float = {
assertDefined(i)
Region.loadFloat(t.loadField(offset, i))
}
override def getDouble(i: Int): Double = {
assertDefined(i)
Region.loadDouble(t.loadField(offset, i))
}
override def getBoolean(i: Int): Boolean = {
assertDefined(i)
Region.loadBoolean(t.loadField(offset, i))
}
override def getByte(i: Int): Byte = {
assertDefined(i)
Region.loadByte(t.loadField(offset, i))
}
override def isNullAt(i: Int): Boolean = {
if (i < 0 || i >= t.size)
throw new IndexOutOfBoundsException(i.toString)
!t.isFieldDefined(offset, i)
}
private def writeObject(s: ObjectOutputStream): Unit = {
throw new NotImplementedException()
}
private def readObject(s: ObjectInputStream): Unit = {
throw new NotImplementedException()
}
}
object SafeRow {
def apply(t: PBaseStruct, off: Long): Row = {
Annotation.copy(t.virtualType, new UnsafeRow(t, null, off)).asInstanceOf[Row]
}
def apply(t: PBaseStruct, rv: RegionValue): Row = SafeRow(t, rv.offset)
def selectFields(t: PBaseStruct, region: Region, off: Long)(selectIdx: Array[Int]): Row = {
val fullRow = new UnsafeRow(t, region, off)
Row.fromSeq(selectIdx.map(i => Annotation.copy(t.types(i).virtualType, fullRow.get(i))))
}
def selectFields(t: PBaseStruct, rv: RegionValue)(selectIdx: Array[Int]): Row =
SafeRow.selectFields(t, rv.region, rv.offset)(selectIdx)
def read(t: PType, off: Long): Annotation =
Annotation.copy(t.virtualType, UnsafeRow.read(t, null, off))
def read(t: PType, rv: RegionValue): Annotation =
read(t, rv.offset)
def isSafe(a: Any): Boolean = {
a match {
case _: UnsafeRow => false
case _: UnsafeIndexedSeq => false
case r: Row =>
r.toSeq.forall(isSafe)
case a: IndexedSeq[_] =>
a.forall(isSafe)
case i: Interval =>
isSafe(i.start) && isSafe(i.end)
case _ => true
}
}
}
object SafeIndexedSeq {
def apply(t: PArray, off: Long): IndexedSeq[Annotation] =
Annotation.copy(t.virtualType, new UnsafeIndexedSeq(t, null, off))
.asInstanceOf[IndexedSeq[Annotation]]
def apply(t: PArray, rv: RegionValue): IndexedSeq[Annotation] =
apply(t, rv.offset)
}
class SelectFieldsRow(
private[this] var old: Row,
private[this] val fieldMapping: Array[Int]
) extends Row {
def this(
old: Row,
oldPType: TStruct,
newPType: TStruct
) = this(old, newPType.fieldNames.map(name => oldPType.fieldIdx(name)))
def this(
old: Row,
oldPType: PStruct,
newPType: PStruct
) = {
this(old,
(require(
oldPType.fields.length <= old.length &&
newPType.fields.length <= old.length,
s"${oldPType}, ${newPType} ${old.length} $old")
->
newPType.fieldNames.map(name => oldPType.fieldIdx(name)))._2
)
}
require(fieldMapping.forall(x => x < old.length),
s"${fieldMapping.toSeq}, ${old.length} $old")
override def length = fieldMapping.length
override def get(i: Int) = old.get(fieldMapping(i))
override def isNullAt(i: Int) = old.isNullAt(fieldMapping(i))
override def copy(): Row = new SelectFieldsRow(old.copy(), fieldMapping)
def set(newRow: Row): SelectFieldsRow = {
old = newRow
this
}
}
| danking/hail | hail/src/main/scala/is/hail/annotations/UnsafeRow.scala | Scala | mit | 9,948 |
package com.github.tototoshi.play2.auth.social.providers.facebook
import com.github.tototoshi.play2.auth.social.core.OAuthProviderUserSupport
import play.api.Logger
import play.api.libs.ws.{ WS, WSResponse }
import play.api.Play.current
import scala.concurrent.{ ExecutionContext, Future }
trait FacebookProviderUserSupport extends OAuthProviderUserSupport {
self: FacebookController =>
type ProviderUser = FacebookUser
private def readProviderUser(accessToken: String, response: WSResponse): ProviderUser = {
val j = response.json
FacebookUser(
(j \\ "id").as[String],
(j \\ "name").as[String],
(j \\ "email").as[String],
(j \\ "picture" \\ "data" \\ "url").as[String],
accessToken
)
}
def retrieveProviderUser(accessToken: AccessToken)(implicit ctx: ExecutionContext): Future[ProviderUser] = {
for {
response <- WS.url("https://graph.facebook.com/me")
.withQueryString("access_token" -> accessToken, "fields" -> "name,first_name,last_name,picture.type(large),email")
.get()
} yield {
Logger(getClass).debug("Retrieving user info from provider API: " + response.body)
readProviderUser(accessToken, response)
}
}
}
| tototoshi/play2-auth | social/src/main/scala/com/github/tototoshi/play2/auth/social/providers/facebook/FacebookProviderUserSupport.scala | Scala | apache-2.0 | 1,215 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import io.gatling.BaseSpec
import io.gatling.commons.util.HtmlHelper.HtmlRichString
class HtmlHelperSpec extends BaseSpec {
"htmlEscape" should "escape with entity chars" in {
"fooYéfoo".htmlEscape shouldBe "fooYéfoo"
}
}
| wiacekm/gatling | gatling-commons/src/test/scala/io/gatling/commons/util/HtmlHelperSpec.scala | Scala | apache-2.0 | 891 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.web.ld.exceptions
/**
* Created with IntelliJ IDEA.
* User: gilad
* Date: 7/11/13
* Time: 10:32 AM
* To change this template use File | Settings | File Templates.
*/
class UnsupportedURIException(msg: String) extends RuntimeException(msg)
| dudi3001/CM-Well | server/cmwell-ws/app/ld/exceptions/UnsupportedURIException.scala | Scala | apache-2.0 | 892 |
/*
* Copyright (c) 2015. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
* Morbi non lorem porttitor neque feugiat blandit. Ut vitae ipsum eget quam lacinia accumsan.
* Etiam sed turpis ac ipsum condimentum fringilla. Maecenas magna.
* Proin dapibus sapien vel ante. Aliquam erat volutpat. Pellentesque sagittis ligula eget metus.
* Vestibulum commodo. Ut rhoncus gravida arcu.
*/
package com.github.marklister.collections.io
import scala.collection.mutable.ArrayBuilder
class CSVReader(val reader: java.io.Reader,
final val delimiter: Char = ',',
final val quoteChar: Char = '\\"',
val headerRows: Int = 0) extends Iterator[Array[String]] {
def this(s: String) = this(new java.io.StringReader(s))
final val b = new InputBuffer(reader)
private[this] final val maxChar: Char = Seq(quoteChar, delimiter, CSVReader.cr, CSVReader.eol, CSVReader.eof).max
val currentOutput: ArrayBuilder[String] = new ArrayBuilder.ofRef[String] {}
val currentField = new java.lang.StringBuilder(40)
var line = headerRows
(1 to headerRows).foreach(dropLine)
def processQuotedField: Unit = {
currentField.append(b.nextUntil(quoteChar))
b.nextChar
if (b.peek == '"') {
currentField.append(b.nextChar)
processQuotedField
} else {
//Quote closed
while (b.nextChar == ' ') {} //ignore whitespace
if (b.lastChar != delimiter && !b.eoLine && b.lastChar != CSVReader.cr
) throw new Exception("Line " + line + " Expected " + delimiter + " got " + b.lastChar.toInt)
if(b.lastChar==13 && b.peek==10) b.nextChar
}
}
def processUnQuotedField: Unit = {
do {
currentField.append(b.nextWhile(maxChar))
(b.nextChar) match {
case `quoteChar` =>
if (b.peek == quoteChar) currentField.append(b.nextChar)
else currentField.append(b.lastChar)
case `delimiter`=>
case CSVReader.eol =>
case CSVReader.cr =>
case CSVReader.eof =>
case _ => currentField.append(b.lastChar)
}
}while (b.lastChar != delimiter && !b.eoLine)
}
/*Sniff the buffer -- it it starts with whitespace and a quote process as quoted field
* Other wise process as an unquoted field
*/
def processField: Unit = {
while (b.peek == ' ') currentField.append(b.nextChar)
if (b.peek == quoteChar) {
currentField.setLength(0)
b.nextChar
processQuotedField
}
else {
processUnQuotedField
}
}
override def next: Array[String] = {
currentOutput.clear()
currentField.setLength(0)
line += 1
do {
processField
currentOutput += currentField.toString
currentField.setLength(0)
} while (!b.eoLine)
currentOutput.result() //toArray(new Array[String](currentOutput.size()))
}
override def hasNext = !b.eoFile
private[this] def dropLine(i: Int): Unit = do b.nextChar while (!b.eoLine)
def close() = reader.close()
}
object CSVReader {
final val eol = '\\n'
final val cr = '\\r'
final val eof = 26.toChar
}
| CapeSepias/product-collections | shared/src/main/scala/collections/io/CSVReader.scala | Scala | bsd-2-clause | 3,101 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2018, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package xml
package include
/**
* `XIncludeException` is the generic superclass for all checked exceptions
* that may be thrown as a result of a violation of XInclude's rules.
*
* Constructs an `XIncludeException` with the specified detail message.
* The error message string `message` can later be retrieved by the
* `{@link java.lang.Throwable#getMessage}`
* method of class `java.lang.Throwable`.
*
* @param message the detail message.
*/
class XIncludeException(message: String) extends Exception(message) {
/**
* uses `'''null'''` as its error detail message.
*/
def this() = this(null)
private var rootCause: Throwable = null
/**
* When an `IOException`, `MalformedURLException` or other generic
* exception is thrown while processing an XML document for XIncludes,
* it is customarily replaced by some form of `XIncludeException`.
* This method allows you to store the original exception.
*
* @param nestedException the underlying exception which
* caused the XIncludeException to be thrown
*/
def setRootCause(nestedException: Throwable): Unit = {
this.rootCause = nestedException
}
/**
* When an `IOException`, `MalformedURLException` or other generic
* exception is thrown while processing an XML document for XIncludes,
* it is customarily replaced by some form of `XIncludeException`.
* This method allows you to retrieve the original exception.
* It returns null if no such exception caused this `XIncludeException`.
*
* @return Throwable the underlying exception which caused the
* `XIncludeException` to be thrown
*/
def getRootCause(): Throwable = this.rootCause
}
| ashawley/scala-xml | shared/src/main/scala/scala/xml/include/XIncludeException.scala | Scala | bsd-3-clause | 2,244 |
package uk.gov.gds.ier.transaction.forces.previousAddress
import play.api.data.Forms._
import play.api.data.validation.{Invalid, Valid, Constraint}
import uk.gov.gds.ier.validation._
import uk.gov.gds.ier.validation.constraints.CommonConstraints
import uk.gov.gds.ier.serialiser.WithSerialiser
import uk.gov.gds.ier.model._
import uk.gov.gds.ier.transaction.forces.InprogressForces
import uk.gov.gds.ier.service.AddressService
trait PreviousAddressForms
extends PreviousAddressConstraints
with CommonForms {
self: FormKeys
with ErrorMessages
with WithSerialiser =>
val addressService: AddressService
// address mapping for select address page - the address part
lazy val partialAddressMappingForPreviousAddress =
PartialAddress.mapping.verifying(
postcodeIsValidForPreviousAddress, uprnOrManualDefinedForPreviousAddressIfNotFromNI)
// address mapping for manual address - the address individual lines part
lazy val manualPartialAddressLinesMappingForPreviousAddress = PartialManualAddress.mapping
.verifying(lineOneIsRequredForPreviousAddress, cityIsRequiredForPreviousAddress)
lazy val partialPreviousAddressMappingForPreviousAddress = mapping(
keys.movedRecently.key -> optional(movedHouseMapping),
keys.previousAddress.key -> optional(partialAddressMappingForPreviousAddress)
) (
PartialPreviousAddress.apply
) (
PartialPreviousAddress.unapply
).verifying(previousAddressRequiredIfMoved)
// address mapping for manual address - the address parent wrapper part
val manualPartialPreviousAddressMappingForPreviousAddress = mapping(
keys.postcode.key -> nonEmptyText,
keys.manualAddress.key -> optional(manualPartialAddressLinesMappingForPreviousAddress)
) (
(postcode, manualAddress) => PartialAddress(
addressLine = None,
uprn = None,
postcode = postcode,
manualAddress = manualAddress
)
) (
partial => Some(
partial.postcode,
partial.manualAddress
)
).verifying(postcodeIsValidForPreviousAddress)
lazy val postcodeLookupMappingForPreviousAddress = mapping(
keys.postcode.key -> nonEmptyText
) (
postcode => PartialPreviousAddress(
movedRecently = Some(MovedHouseOption.Yes),
previousAddress = Some(PartialAddress(
addressLine = None,
uprn = None,
postcode = postcode,
manualAddress = None)
)
)
) (
partialPreviousAddress => partialPreviousAddress.previousAddress.map(_.postcode)
).verifying(postcodeIsValidForlookupForPreviousAddress)
lazy val possibleAddressesMappingForPreviousAddress = mapping(
keys.jsonList.key -> nonEmptyText,
keys.postcode.key -> nonEmptyText
) (
(json, postcode) => PossibleAddress(
serialiser.fromJson[Addresses](json),
postcode
)
) (
possibleAddress => Some(
serialiser.toJson(possibleAddress.jsonList),
possibleAddress.postcode
)
)
val postcodeAddressFormForPreviousAddress = ErrorTransformForm(
mapping (
keys.previousAddress.key -> optional(postcodeLookupMappingForPreviousAddress)
) (
(previousAddress) => InprogressForces(
previousAddress = previousAddress
)
) (
inprogress => Some(
inprogress.previousAddress
)
).verifying(
postcodeIsNotEmptyForPreviousAddress
)
)
val selectAddressFormForPreviousAddress = ErrorTransformForm(
mapping (
keys.previousAddress.key -> optional(partialAddressMappingForPreviousAddress),
keys.address.key -> optional(LastAddress.mapping),
keys.possibleAddresses.key -> optional(possibleAddressesMappingForPreviousAddress)
) (
(previousAddress, address, possibleAddr) => InprogressForces(
previousAddress = Some(PartialPreviousAddress(
movedRecently = Some(MovedHouseOption.Yes),
previousAddress = previousAddress
)),
address = address,
possibleAddresses = possibleAddr
)
) (
inprogress => Some(
inprogress.previousAddress.flatMap(_.previousAddress),
inprogress.address,
inprogress.possibleAddresses)
).verifying( selectedAddressIsRequiredForPreviousAddress,
selectedAddressIsDifferent )
)
lazy val selectedAddressIsDifferent = Constraint[InprogressForces](keys.previousAddress.key) {
inprogress =>
val currentAddress = inprogress.address
//If _BOTH_ current and previous addresses have UPRN values (ie. neither manual addresses)...
//...then validate that the UPRNs are different
if (
currentAddress.flatMap(_.address).flatMap(_.uprn).isDefined
&& inprogress.previousAddress.get.previousAddress.flatMap(_.uprn).isDefined
) {
inprogress.previousAddress match {
case Some(partialAddress) if partialAddress.previousAddress.flatMap(_.uprn) != currentAddress.flatMap(_.address).flatMap(_.uprn) => {
Valid
}
case _ => {
Invalid("Your previous address cannot be the same as your current address", keys.previousAddress.previousAddress.address)
}
}
}
else {
Valid
}
}
val manualAddressFormForPreviousAddress = ErrorTransformForm(
mapping(
keys.previousAddress.key -> optional(manualPartialPreviousAddressMappingForPreviousAddress)
) (
previousAddress => InprogressForces(
previousAddress = Some(PartialPreviousAddress(
movedRecently = Some(MovedHouseOption.Yes),
previousAddress = previousAddress
)))
) (
inprogress => inprogress.previousAddress.map(_.previousAddress)
).verifying( manualAddressIsRequiredForPreviousAddress )
)
}
trait PreviousAddressConstraints extends CommonConstraints {
self: FormKeys
with ErrorMessages =>
// passed from PreviousAddressForms
val addressService: AddressService
lazy val previousAddressRequiredIfMoved = Constraint[PartialPreviousAddress](keys.previousAddress.key) {
previousAddress =>
val postcode = previousAddress.previousAddress.map(_.postcode)
val uprn = previousAddress.previousAddress.flatMap(_.uprn)
val manualAddressCity = previousAddress.previousAddress.flatMap(_.manualAddress.flatMap(_.city))
previousAddress.movedRecently match {
case Some(MovedHouseOption.Yes) if postcode.exists(addressService.isNothernIreland(_)) => Valid
case Some(MovedHouseOption.Yes) if uprn.exists(_.nonEmpty) => Valid
case Some(MovedHouseOption.Yes) if manualAddressCity.exists(_.nonEmpty) => Valid
case Some(MovedHouseOption.NotMoved) => Valid
case _ => Invalid("Please complete this step", keys.previousAddress)
}
}
lazy val manualAddressIsRequiredForPreviousAddress = Constraint[InprogressForces](keys.previousAddress.key) {
inprogress =>
inprogress.previousAddress match {
case Some(partialAddress) if partialAddress.previousAddress
.flatMap(_.manualAddress).isDefined && partialAddress
.previousAddress.exists(_.postcode != "") => {
Valid
}
case _ => {
Invalid("Please answer this question", keys.previousAddress.manualAddress)
}
}
}
lazy val selectedAddressIsRequiredForPreviousAddress = Constraint[InprogressForces](keys.previousAddress.key) {
inprogress =>
inprogress.previousAddress match {
case Some(partialAddress)
if partialAddress.previousAddress.exists(_.postcode != "")
&& (partialAddress.previousAddress.flatMap(_.uprn).isDefined
|| partialAddress.previousAddress.flatMap(_.manualAddress).isDefined) => {
Valid
}
case _ => {
Invalid("Please answer this question", keys.previousAddress)
}
}
}
lazy val postcodeIsNotEmptyForPreviousAddress = Constraint[InprogressForces](keys.previousAddress.key) {
inprogress =>
inprogress.previousAddress match {
case Some(partialAddress) if partialAddress
.previousAddress.exists(_.postcode == "") => {
Invalid("Please enter the postcode of your previous address", keys.previousAddress.postcode)
}
case None => {
Invalid("Please enter the postcode of your previous address", keys.previousAddress.postcode)
}
case _ => {
Valid
}
}
}
lazy val uprnOrManualDefinedForPreviousAddressIfNotFromNI = Constraint[PartialAddress](keys.previousAddress.key) {
case partialAddress if addressService.isNothernIreland(partialAddress.postcode) => Valid
case partialAddress if partialAddress.uprn.exists(_.nonEmpty) => Valid
case partialAddress if partialAddress.manualAddress.exists(_.city.exists(_.nonEmpty)) => Valid
case _ => Invalid(
"Please select your address",
keys.previousAddress.uprn,
keys.previousAddress.manualAddress,
keys.previousAddress
)
}
lazy val postcodeIsValidForPreviousAddress = Constraint[PartialAddress](keys.previousAddress.key) {
case PartialAddress(_, _, postcode, _, _)
if PostcodeValidator.isValid(postcode) => {
Valid
}
case _ => {
Invalid("Your postcode is not valid", keys.previousAddress.postcode)
}
}
/**
* Special version of 'postcodeIsValid' just for Postcode Step.
* The input type here is different, it is PartialPreviousAddress, wrapping PartialAddress
* containing the postcode.
*/
lazy val postcodeIsValidForlookupForPreviousAddress = Constraint[PartialPreviousAddress](keys.previousAddress.key) {
case PartialPreviousAddress(Some(MovedHouseOption.Yes), Some(PartialAddress(_, _, postcode, _, _)))
if PostcodeValidator.isValid(postcode) => Valid
case _ => Invalid("Your postcode is not valid", keys.previousAddress.postcode)
}
lazy val lineOneIsRequredForPreviousAddress = Constraint[PartialManualAddress](
keys.previousAddress.manualAddress.key) {
case PartialManualAddress(Some(_), _, _, _) => Valid
case _ => Invalid(atLeastOneLineIsRequiredError, keys.previousAddress.manualAddress.lineOne)
}
lazy val cityIsRequiredForPreviousAddress = Constraint[PartialManualAddress](
keys.previousAddress.manualAddress.key) {
case PartialManualAddress(_, _, _, Some(_)) => Valid
case _ => Invalid(cityIsRequiredError, keys.previousAddress.manualAddress.city)
}
}
| alphagov/ier-frontend | app/uk/gov/gds/ier/transaction/forces/previousAddress/PreviousAddressForms.scala | Scala | mit | 10,400 |
package is.hail.utils
class UnionFind(initialCapacity: Int = 32) {
private var a: Array[Int] = new Array[Int](initialCapacity)
private var rank: Array[Int] = new Array[Int](initialCapacity)
private var count: Int = 0
def size: Int = count
private def ensure(i: Int) {
if (i >= a.length) {
var newLength = a.length << 1
while (i >= newLength) {
newLength = newLength << 1
}
val a2 = new Array[Int](newLength)
Array.copy(a, 0, a2, 0, a.length)
a = a2
val rank2 = new Array[Int](newLength)
Array.copy(rank, 0, rank2, 0, rank.length)
rank = rank2
}
}
def makeSet(i: Int) {
ensure(i)
a(i) = i
count += 1
}
def find(x: Int): Int = {
require(x < a.length)
var representative = x
while (representative != a(representative)) {
representative = a(representative)
}
var current = x
while (representative != current) {
val temp = a(current)
a(current) = representative
current = temp
}
current
}
def union(x: Int, y: Int) {
val xroot = find(x)
val yroot = find(y)
if (xroot != yroot) {
count -= 1
if (rank(xroot) < rank(yroot)) {
a(xroot) = yroot
} else if (rank(xroot) > rank(yroot)) {
a(yroot) = xroot
} else {
a(xroot) = yroot
rank(yroot) += 1
}
}
}
def sameSet(x: Int, y: Int): Boolean = {
require(x < a.length && y < a.length)
find(x) == find(y)
}
}
| hail-is/hail | hail/src/main/scala/is/hail/utils/UnionFind.scala | Scala | mit | 1,503 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.time
import slamdata.Predef._
import scalaz.{Equal, Show}
sealed abstract class TemporalPart extends Serializable
object TemporalPart {
final case object Century extends TemporalPart
final case object Day extends TemporalPart
final case object Decade extends TemporalPart
final case object Hour extends TemporalPart
final case object Microsecond extends TemporalPart
final case object Millennium extends TemporalPart
final case object Millisecond extends TemporalPart
final case object Minute extends TemporalPart
final case object Month extends TemporalPart
final case object Quarter extends TemporalPart
final case object Second extends TemporalPart
final case object Week extends TemporalPart
final case object Year extends TemporalPart
implicit val equal: Equal[TemporalPart] = Equal.equalRef
implicit val show: Show[TemporalPart] = Show.showFromToString
}
| jedesah/Quasar | foundation/src/main/scala/quasar/time/TemporalPart.scala | Scala | apache-2.0 | 1,575 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.client
import java.io.{File, PrintStream}
import java.lang.{Iterable => JIterable}
import java.nio.charset.StandardCharsets.UTF_8
import java.util.{Locale, Map => JMap}
import java.util.concurrent.TimeUnit._
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.common.StatsSetupConst
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.metastore.{IMetaStoreClient, TableType => HiveTableType}
import org.apache.hadoop.hive.metastore.api.{Database => HiveDatabase, Table => MetaStoreApiTable}
import org.apache.hadoop.hive.metastore.api.{FieldSchema, Order, SerDeInfo, StorageDescriptor}
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.metadata.{Hive, HiveException, Partition => HivePartition, Table => HiveTable}
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC
import org.apache.hadoop.hive.ql.processors._
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe
import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.metrics.source.HiveCatalogMetrics
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, NoSuchPartitionException}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParseException}
import org.apache.spark.sql.execution.QueryExecutionException
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.hive.HiveExternalCatalog.{DATASOURCE_SCHEMA, DATASOURCE_SCHEMA_NUMPARTS, DATASOURCE_SCHEMA_PART_PREFIX}
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.client.HiveClientImpl._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.{CircularBuffer, Utils}
/**
* A class that wraps the HiveClient and converts its responses to externally visible classes.
* Note that this class is typically loaded with an internal classloader for each instantiation,
* allowing it to interact directly with a specific isolated version of Hive. Loading this class
* with the isolated classloader however will result in it only being visible as a [[HiveClient]],
* not a [[HiveClientImpl]].
*
* This class needs to interact with multiple versions of Hive, but will always be compiled with
* the 'native', execution version of Hive. Therefore, any places where hive breaks compatibility
* must use reflection after matching on `version`.
*
* Every HiveClientImpl creates an internal HiveConf object. This object is using the given
* `hadoopConf` as the base. All options set in the `sparkConf` will be applied to the HiveConf
* object and overrides any exiting options. Then, options in extraConfig will be applied
* to the HiveConf object and overrides any existing options.
*
* @param version the version of hive used when pick function calls that are not compatible.
* @param sparkConf all configuration options set in SparkConf.
* @param hadoopConf the base Configuration object used by the HiveConf created inside
* this HiveClientImpl.
* @param extraConfig a collection of configuration options that will be added to the
* hive conf before opening the hive client.
* @param initClassLoader the classloader used when creating the `state` field of
* this [[HiveClientImpl]].
*/
private[hive] class HiveClientImpl(
override val version: HiveVersion,
warehouseDir: Option[String],
sparkConf: SparkConf,
hadoopConf: JIterable[JMap.Entry[String, String]],
extraConfig: Map[String, String],
initClassLoader: ClassLoader,
val clientLoader: IsolatedClientLoader)
extends HiveClient
with Logging {
// Circular buffer to hold what hive prints to STDOUT and ERR. Only printed when failures occur.
private val outputBuffer = new CircularBuffer()
private val shim = version match {
case hive.v12 => new Shim_v0_12()
case hive.v13 => new Shim_v0_13()
case hive.v14 => new Shim_v0_14()
case hive.v1_0 => new Shim_v1_0()
case hive.v1_1 => new Shim_v1_1()
case hive.v1_2 => new Shim_v1_2()
case hive.v2_0 => new Shim_v2_0()
case hive.v2_1 => new Shim_v2_1()
case hive.v2_2 => new Shim_v2_2()
case hive.v2_3 => new Shim_v2_3()
case hive.v3_0 => new Shim_v3_0()
case hive.v3_1 => new Shim_v3_1()
}
// Create an internal session state for this HiveClientImpl.
val state: SessionState = {
val original = Thread.currentThread().getContextClassLoader
if (clientLoader.isolationOn) {
// Switch to the initClassLoader.
Thread.currentThread().setContextClassLoader(initClassLoader)
try {
newState()
} finally {
Thread.currentThread().setContextClassLoader(original)
}
} else {
// Isolation off means we detect a CliSessionState instance in current thread.
// 1: Inside the spark project, we have already started a CliSessionState in
// `SparkSQLCLIDriver`, which contains configurations from command lines. Later, we call
// `SparkSQLEnv.init()` there, which would new a hive client again. so we should keep those
// configurations and reuse the existing instance of `CliSessionState`. In this case,
// SessionState.get will always return a CliSessionState.
// 2: In another case, a user app may start a CliSessionState outside spark project with built
// in hive jars, which will turn off isolation, if SessionSate.detachSession is
// called to remove the current state after that, hive client created later will initialize
// its own state by newState()
val ret = SessionState.get
if (ret != null) {
// hive.metastore.warehouse.dir is determined in SharedState after the CliSessionState
// instance constructed, we need to follow that change here.
warehouseDir.foreach { dir =>
ret.getConf.setVar(ConfVars.METASTOREWAREHOUSE, dir)
}
ret
} else {
newState()
}
}
}
// Log the default warehouse location.
logInfo(
s"Warehouse location for Hive client " +
s"(version ${version.fullVersion}) is ${conf.getVar(ConfVars.METASTOREWAREHOUSE)}")
private def newState(): SessionState = {
val hiveConf = new HiveConf(classOf[SessionState])
// HiveConf is a Hadoop Configuration, which has a field of classLoader and
// the initial value will be the current thread's context class loader
// (i.e. initClassLoader at here).
// We call hiveConf.setClassLoader(initClassLoader) at here to make
// this action explicit.
hiveConf.setClassLoader(initClassLoader)
// 1: Take all from the hadoopConf to this hiveConf.
// This hadoopConf contains user settings in Hadoop's core-site.xml file
// and Hive's hive-site.xml file. Note, we load hive-site.xml file manually in
// SharedState and put settings in this hadoopConf instead of relying on HiveConf
// to load user settings. Otherwise, HiveConf's initialize method will override
// settings in the hadoopConf. This issue only shows up when spark.sql.hive.metastore.jars
// is not set to builtin. When spark.sql.hive.metastore.jars is builtin, the classpath
// has hive-site.xml. So, HiveConf will use that to override its default values.
// 2: we set all spark confs to this hiveConf.
// 3: we set all entries in config to this hiveConf.
val confMap = (hadoopConf.iterator().asScala.map(kv => kv.getKey -> kv.getValue) ++
sparkConf.getAll.toMap ++ extraConfig).toMap
confMap.foreach { case (k, v) => hiveConf.set(k, v) }
SQLConf.get.redactOptions(confMap).foreach { case (k, v) =>
logDebug(
s"""
|Applying Hadoop/Hive/Spark and extra properties to Hive Conf:
|$k=$v
""".stripMargin)
}
// Disable CBO because we removed the Calcite dependency.
hiveConf.setBoolean("hive.cbo.enable", false)
val state = new SessionState(hiveConf)
if (clientLoader.cachedHive != null) {
Hive.set(clientLoader.cachedHive.asInstanceOf[Hive])
}
// Hive 2.3 will set UDFClassLoader to hiveConf when initializing SessionState
// since HIVE-11878, and ADDJarCommand will add jars to clientLoader.classLoader.
// For this reason we cannot load the jars added by ADDJarCommand because of class loader
// got changed. We reset it to clientLoader.ClassLoader here.
if (HiveUtils.isHive23) {
state.getConf.setClassLoader(clientLoader.classLoader)
}
SessionState.start(state)
state.out = new PrintStream(outputBuffer, true, UTF_8.name())
state.err = new PrintStream(outputBuffer, true, UTF_8.name())
state
}
/** Returns the configuration for the current session. */
def conf: HiveConf = if (!HiveUtils.isHive23) {
state.getConf
} else {
val hiveConf = state.getConf
// Hive changed the default of datanucleus.schema.autoCreateAll from true to false
// and hive.metastore.schema.verification from false to true since Hive 2.0.
// For details, see the JIRA HIVE-6113, HIVE-12463 and HIVE-1841.
// isEmbeddedMetaStore should not be true in the production environment.
// We hard-code hive.metastore.schema.verification and datanucleus.schema.autoCreateAll to allow
// bin/spark-shell, bin/spark-sql and sbin/start-thriftserver.sh to automatically create the
// Derby Metastore when running Spark in the non-production environment.
val isEmbeddedMetaStore = {
val msUri = hiveConf.getVar(ConfVars.METASTOREURIS)
val msConnUrl = hiveConf.getVar(ConfVars.METASTORECONNECTURLKEY)
(msUri == null || msUri.trim().isEmpty) &&
(msConnUrl != null && msConnUrl.startsWith("jdbc:derby"))
}
if (isEmbeddedMetaStore) {
hiveConf.setBoolean("hive.metastore.schema.verification", false)
hiveConf.setBoolean("datanucleus.schema.autoCreateAll", true)
}
hiveConf
}
override val userName = UserGroupInformation.getCurrentUser.getShortUserName
override def getConf(key: String, defaultValue: String): String = {
conf.get(key, defaultValue)
}
// We use hive's conf for compatibility.
private val retryLimit = conf.getIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES)
private val retryDelayMillis = shim.getMetastoreClientConnectRetryDelayMillis(conf)
/**
* Runs `f` with multiple retries in case the hive metastore is temporarily unreachable.
*/
private def retryLocked[A](f: => A): A = clientLoader.synchronized {
// Hive sometimes retries internally, so set a deadline to avoid compounding delays.
val deadline = System.nanoTime + (retryLimit * retryDelayMillis * 1e6).toLong
var numTries = 0
var caughtException: Exception = null
do {
numTries += 1
try {
return f
} catch {
case e: Exception if causedByThrift(e) =>
caughtException = e
logWarning(
"HiveClient got thrift exception, destroying client and retrying " +
s"(${retryLimit - numTries} tries remaining)", e)
clientLoader.cachedHive = null
Thread.sleep(retryDelayMillis)
}
} while (numTries <= retryLimit && System.nanoTime < deadline)
if (System.nanoTime > deadline) {
logWarning("Deadline exceeded")
}
throw caughtException
}
private def causedByThrift(e: Throwable): Boolean = {
var target = e
while (target != null) {
val msg = target.getMessage()
if (msg != null && msg.matches("(?s).*(TApplication|TProtocol|TTransport)Exception.*")) {
return true
}
target = target.getCause()
}
false
}
private def client: Hive = {
if (clientLoader.cachedHive != null) {
clientLoader.cachedHive.asInstanceOf[Hive]
} else {
val c = Hive.get(conf)
clientLoader.cachedHive = c
c
}
}
private def msClient: IMetaStoreClient = {
shim.getMSC(client)
}
/** Return the associated Hive [[SessionState]] of this [[HiveClientImpl]] */
override def getState: SessionState = withHiveState(state)
/**
* Runs `f` with ThreadLocal session state and classloaders configured for this version of hive.
*/
def withHiveState[A](f: => A): A = retryLocked {
val original = Thread.currentThread().getContextClassLoader
val originalConfLoader = state.getConf.getClassLoader
// The classloader in clientLoader could be changed after addJar, always use the latest
// classloader. We explicitly set the context class loader since "conf.setClassLoader" does
// not do that, and the Hive client libraries may need to load classes defined by the client's
// class loader.
Thread.currentThread().setContextClassLoader(clientLoader.classLoader)
state.getConf.setClassLoader(clientLoader.classLoader)
// Set the thread local metastore client to the client associated with this HiveClientImpl.
Hive.set(client)
// Replace conf in the thread local Hive with current conf
Hive.get(conf)
// setCurrentSessionState will use the classLoader associated
// with the HiveConf in `state` to override the context class loader of the current
// thread.
shim.setCurrentSessionState(state)
val ret = try f finally {
state.getConf.setClassLoader(originalConfLoader)
Thread.currentThread().setContextClassLoader(original)
HiveCatalogMetrics.incrementHiveClientCalls(1)
}
ret
}
def setOut(stream: PrintStream): Unit = withHiveState {
state.out = stream
}
def setInfo(stream: PrintStream): Unit = withHiveState {
state.info = stream
}
def setError(stream: PrintStream): Unit = withHiveState {
state.err = stream
}
private def setCurrentDatabaseRaw(db: String): Unit = {
if (state.getCurrentDatabase != db) {
if (databaseExists(db)) {
state.setCurrentDatabase(db)
} else {
throw new NoSuchDatabaseException(db)
}
}
}
override def setCurrentDatabase(databaseName: String): Unit = withHiveState {
setCurrentDatabaseRaw(databaseName)
}
override def createDatabase(
database: CatalogDatabase,
ignoreIfExists: Boolean): Unit = withHiveState {
client.createDatabase(
new HiveDatabase(
database.name,
database.description,
CatalogUtils.URIToString(database.locationUri),
Option(database.properties).map(_.asJava).orNull),
ignoreIfExists)
}
override def dropDatabase(
name: String,
ignoreIfNotExists: Boolean,
cascade: Boolean): Unit = withHiveState {
client.dropDatabase(name, true, ignoreIfNotExists, cascade)
}
override def alterDatabase(database: CatalogDatabase): Unit = withHiveState {
if (!getDatabase(database.name).locationUri.equals(database.locationUri)) {
// SPARK-29260: Enable supported versions once it support altering database location.
if (!(version.equals(hive.v3_0) || version.equals(hive.v3_1))) {
throw new AnalysisException(
s"Hive ${version.fullVersion} does not support altering database location")
}
}
client.alterDatabase(
database.name,
new HiveDatabase(
database.name,
database.description,
CatalogUtils.URIToString(database.locationUri),
Option(database.properties).map(_.asJava).orNull))
}
override def getDatabase(dbName: String): CatalogDatabase = withHiveState {
Option(client.getDatabase(dbName)).map { d =>
CatalogDatabase(
name = d.getName,
description = Option(d.getDescription).getOrElse(""),
locationUri = CatalogUtils.stringToURI(d.getLocationUri),
properties = Option(d.getParameters).map(_.asScala.toMap).orNull)
}.getOrElse(throw new NoSuchDatabaseException(dbName))
}
override def databaseExists(dbName: String): Boolean = withHiveState {
client.databaseExists(dbName)
}
override def listDatabases(pattern: String): Seq[String] = withHiveState {
client.getDatabasesByPattern(pattern).asScala
}
private def getRawTableOption(dbName: String, tableName: String): Option[HiveTable] = {
Option(client.getTable(dbName, tableName, false /* do not throw exception */))
}
private def getRawTablesByName(dbName: String, tableNames: Seq[String]): Seq[HiveTable] = {
try {
msClient.getTableObjectsByName(dbName, tableNames.asJava).asScala
.map(extraFixesForNonView).map(new HiveTable(_))
} catch {
case ex: Exception =>
throw new HiveException(s"Unable to fetch tables of db $dbName", ex);
}
}
override def tableExists(dbName: String, tableName: String): Boolean = withHiveState {
getRawTableOption(dbName, tableName).nonEmpty
}
override def getTablesByName(
dbName: String,
tableNames: Seq[String]): Seq[CatalogTable] = withHiveState {
getRawTablesByName(dbName, tableNames).map(convertHiveTableToCatalogTable)
}
override def getTableOption(
dbName: String,
tableName: String): Option[CatalogTable] = withHiveState {
logDebug(s"Looking up $dbName.$tableName")
getRawTableOption(dbName, tableName).map(convertHiveTableToCatalogTable)
}
private def convertHiveTableToCatalogTable(h: HiveTable): CatalogTable = {
// Note: Hive separates partition columns and the schema, but for us the
// partition columns are part of the schema
val cols = h.getCols.asScala.map(fromHiveColumn)
val partCols = h.getPartCols.asScala.map(fromHiveColumn)
val schema = StructType(cols ++ partCols)
val bucketSpec = if (h.getNumBuckets > 0) {
val sortColumnOrders = h.getSortCols.asScala
// Currently Spark only supports columns to be sorted in ascending order
// but Hive can support both ascending and descending order. If all the columns
// are sorted in ascending order, only then propagate the sortedness information
// to downstream processing / optimizations in Spark
// TODO: In future we can have Spark support columns sorted in descending order
val allAscendingSorted = sortColumnOrders.forall(_.getOrder == HIVE_COLUMN_ORDER_ASC)
val sortColumnNames = if (allAscendingSorted) {
sortColumnOrders.map(_.getCol)
} else {
Seq.empty
}
Option(BucketSpec(h.getNumBuckets, h.getBucketCols.asScala, sortColumnNames))
} else {
None
}
// Skew spec and storage handler can't be mapped to CatalogTable (yet)
val unsupportedFeatures = ArrayBuffer.empty[String]
if (!h.getSkewedColNames.isEmpty) {
unsupportedFeatures += "skewed columns"
}
if (h.getStorageHandler != null) {
unsupportedFeatures += "storage handler"
}
if (h.getTableType == HiveTableType.VIRTUAL_VIEW && partCols.nonEmpty) {
unsupportedFeatures += "partitioned view"
}
val properties = Option(h.getParameters).map(_.asScala.toMap).orNull
// Hive-generated Statistics are also recorded in ignoredProperties
val ignoredProperties = scala.collection.mutable.Map.empty[String, String]
for (key <- HiveStatisticsProperties; value <- properties.get(key)) {
ignoredProperties += key -> value
}
val excludedTableProperties = HiveStatisticsProperties ++ Set(
// The property value of "comment" is moved to the dedicated field "comment"
"comment",
// For EXTERNAL_TABLE, the table properties has a particular field "EXTERNAL". This is added
// in the function toHiveTable.
"EXTERNAL"
)
val filteredProperties = properties.filterNot {
case (key, _) => excludedTableProperties.contains(key)
}
val comment = properties.get("comment")
CatalogTable(
identifier = TableIdentifier(h.getTableName, Option(h.getDbName)),
tableType = h.getTableType match {
case HiveTableType.EXTERNAL_TABLE => CatalogTableType.EXTERNAL
case HiveTableType.MANAGED_TABLE => CatalogTableType.MANAGED
case HiveTableType.VIRTUAL_VIEW => CatalogTableType.VIEW
case unsupportedType =>
val tableTypeStr = unsupportedType.toString.toLowerCase(Locale.ROOT).replace("_", " ")
throw new AnalysisException(s"Hive $tableTypeStr is not supported.")
},
schema = schema,
partitionColumnNames = partCols.map(_.name),
// If the table is written by Spark, we will put bucketing information in table properties,
// and will always overwrite the bucket spec in hive metastore by the bucketing information
// in table properties. This means, if we have bucket spec in both hive metastore and
// table properties, we will trust the one in table properties.
bucketSpec = bucketSpec,
owner = Option(h.getOwner).getOrElse(""),
createTime = h.getTTable.getCreateTime.toLong * 1000,
lastAccessTime = h.getLastAccessTime.toLong * 1000,
storage = CatalogStorageFormat(
locationUri = shim.getDataLocation(h).map(CatalogUtils.stringToURI),
// To avoid ClassNotFound exception, we try our best to not get the format class, but get
// the class name directly. However, for non-native tables, there is no interface to get
// the format class name, so we may still throw ClassNotFound in this case.
inputFormat = Option(h.getTTable.getSd.getInputFormat).orElse {
Option(h.getStorageHandler).map(_.getInputFormatClass.getName)
},
outputFormat = Option(h.getTTable.getSd.getOutputFormat).orElse {
Option(h.getStorageHandler).map(_.getOutputFormatClass.getName)
},
serde = Option(h.getSerializationLib),
compressed = h.getTTable.getSd.isCompressed,
properties = Option(h.getTTable.getSd.getSerdeInfo.getParameters)
.map(_.asScala.toMap).orNull
),
// For EXTERNAL_TABLE, the table properties has a particular field "EXTERNAL". This is added
// in the function toHiveTable.
properties = filteredProperties,
stats = readHiveStats(properties),
comment = comment,
// In older versions of Spark(before 2.2.0), we expand the view original text and
// store that into `viewExpandedText`, that should be used in view resolution.
// We get `viewExpandedText` as viewText, and also get `viewOriginalText` in order to
// display the original view text in `DESC [EXTENDED|FORMATTED] table` command for views
// that created by older versions of Spark.
viewOriginalText = Option(h.getViewOriginalText),
viewText = Option(h.getViewExpandedText),
unsupportedFeatures = unsupportedFeatures,
ignoredProperties = ignoredProperties.toMap)
}
override def createTable(table: CatalogTable, ignoreIfExists: Boolean): Unit = withHiveState {
verifyColumnDataType(table.dataSchema)
client.createTable(toHiveTable(table, Some(userName)), ignoreIfExists)
}
override def dropTable(
dbName: String,
tableName: String,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = withHiveState {
shim.dropTable(client, dbName, tableName, true, ignoreIfNotExists, purge)
}
override def alterTable(
dbName: String,
tableName: String,
table: CatalogTable): Unit = withHiveState {
// getTableOption removes all the Hive-specific properties. Here, we fill them back to ensure
// these properties are still available to the others that share the same Hive metastore.
// If users explicitly alter these Hive-specific properties through ALTER TABLE DDL, we respect
// these user-specified values.
verifyColumnDataType(table.dataSchema)
val hiveTable = toHiveTable(
table.copy(properties = table.ignoredProperties ++ table.properties), Some(userName))
// Do not use `table.qualifiedName` here because this may be a rename
val qualifiedTableName = s"$dbName.$tableName"
shim.alterTable(client, qualifiedTableName, hiveTable)
}
override def alterTableDataSchema(
dbName: String,
tableName: String,
newDataSchema: StructType,
schemaProps: Map[String, String]): Unit = withHiveState {
val oldTable = client.getTable(dbName, tableName)
verifyColumnDataType(newDataSchema)
val hiveCols = newDataSchema.map(toHiveColumn)
oldTable.setFields(hiveCols.asJava)
// remove old schema table properties
val it = oldTable.getParameters.entrySet.iterator
while (it.hasNext) {
val entry = it.next()
val isSchemaProp = entry.getKey.startsWith(DATASOURCE_SCHEMA_PART_PREFIX) ||
entry.getKey == DATASOURCE_SCHEMA || entry.getKey == DATASOURCE_SCHEMA_NUMPARTS
if (isSchemaProp) {
it.remove()
}
}
// set new schema table properties
schemaProps.foreach { case (k, v) => oldTable.setProperty(k, v) }
val qualifiedTableName = s"$dbName.$tableName"
shim.alterTable(client, qualifiedTableName, oldTable)
}
override def createPartitions(
db: String,
table: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = withHiveState {
shim.createPartitions(client, db, table, parts, ignoreIfExists)
}
override def dropPartitions(
db: String,
table: String,
specs: Seq[TablePartitionSpec],
ignoreIfNotExists: Boolean,
purge: Boolean,
retainData: Boolean): Unit = withHiveState {
// TODO: figure out how to drop multiple partitions in one call
val hiveTable = client.getTable(db, table, true /* throw exception */)
// do the check at first and collect all the matching partitions
val matchingParts =
specs.flatMap { s =>
assert(s.values.forall(_.nonEmpty), s"partition spec '$s' is invalid")
// The provided spec here can be a partial spec, i.e. it will match all partitions
// whose specs are supersets of this partial spec. E.g. If a table has partitions
// (b='1', c='1') and (b='1', c='2'), a partial spec of (b='1') will match both.
val parts = client.getPartitions(hiveTable, s.asJava).asScala
if (parts.isEmpty && !ignoreIfNotExists) {
throw new AnalysisException(
s"No partition is dropped. One partition spec '$s' does not exist in table '$table' " +
s"database '$db'")
}
parts.map(_.getValues)
}.distinct
var droppedParts = ArrayBuffer.empty[java.util.List[String]]
matchingParts.foreach { partition =>
try {
shim.dropPartition(client, db, table, partition, !retainData, purge)
} catch {
case e: Exception =>
val remainingParts = matchingParts.toBuffer -- droppedParts
logError(
s"""
|======================
|Attempt to drop the partition specs in table '$table' database '$db':
|${specs.mkString("\\n")}
|In this attempt, the following partitions have been dropped successfully:
|${droppedParts.mkString("\\n")}
|The remaining partitions have not been dropped:
|${remainingParts.mkString("\\n")}
|======================
""".stripMargin)
throw e
}
droppedParts += partition
}
}
override def renamePartitions(
db: String,
table: String,
specs: Seq[TablePartitionSpec],
newSpecs: Seq[TablePartitionSpec]): Unit = withHiveState {
require(specs.size == newSpecs.size, "number of old and new partition specs differ")
val catalogTable = getTable(db, table)
val hiveTable = toHiveTable(catalogTable, Some(userName))
specs.zip(newSpecs).foreach { case (oldSpec, newSpec) =>
val hivePart = getPartitionOption(catalogTable, oldSpec)
.map { p => toHivePartition(p.copy(spec = newSpec), hiveTable) }
.getOrElse { throw new NoSuchPartitionException(db, table, oldSpec) }
client.renamePartition(hiveTable, oldSpec.asJava, hivePart)
}
}
override def alterPartitions(
db: String,
table: String,
newParts: Seq[CatalogTablePartition]): Unit = withHiveState {
// Note: Before altering table partitions in Hive, you *must* set the current database
// to the one that contains the table of interest. Otherwise you will end up with the
// most helpful error message ever: "Unable to alter partition. alter is not possible."
// See HIVE-2742 for more detail.
val original = state.getCurrentDatabase
try {
setCurrentDatabaseRaw(db)
val hiveTable = toHiveTable(getTable(db, table), Some(userName))
shim.alterPartitions(client, table, newParts.map { toHivePartition(_, hiveTable) }.asJava)
} finally {
state.setCurrentDatabase(original)
}
}
/**
* Returns the partition names for the given table that match the supplied partition spec.
* If no partition spec is specified, all partitions are returned.
*
* The returned sequence is sorted as strings.
*/
override def getPartitionNames(
table: CatalogTable,
partialSpec: Option[TablePartitionSpec] = None): Seq[String] = withHiveState {
val hivePartitionNames =
partialSpec match {
case None =>
// -1 for result limit means "no limit/return all"
client.getPartitionNames(table.database, table.identifier.table, -1)
case Some(s) =>
assert(s.values.forall(_.nonEmpty), s"partition spec '$s' is invalid")
client.getPartitionNames(table.database, table.identifier.table, s.asJava, -1)
}
hivePartitionNames.asScala.sorted
}
override def getPartitionOption(
table: CatalogTable,
spec: TablePartitionSpec): Option[CatalogTablePartition] = withHiveState {
val hiveTable = toHiveTable(table, Some(userName))
val hivePartition = client.getPartition(hiveTable, spec.asJava, false)
Option(hivePartition).map(fromHivePartition)
}
/**
* Returns the partitions for the given table that match the supplied partition spec.
* If no partition spec is specified, all partitions are returned.
*/
override def getPartitions(
table: CatalogTable,
spec: Option[TablePartitionSpec]): Seq[CatalogTablePartition] = withHiveState {
val hiveTable = toHiveTable(table, Some(userName))
val partSpec = spec match {
case None => CatalogTypes.emptyTablePartitionSpec
case Some(s) =>
assert(s.values.forall(_.nonEmpty), s"partition spec '$s' is invalid")
s
}
val parts = client.getPartitions(hiveTable, partSpec.asJava).asScala.map(fromHivePartition)
HiveCatalogMetrics.incrementFetchedPartitions(parts.length)
parts
}
override def getPartitionsByFilter(
table: CatalogTable,
predicates: Seq[Expression]): Seq[CatalogTablePartition] = withHiveState {
val hiveTable = toHiveTable(table, Some(userName))
val parts = shim.getPartitionsByFilter(client, hiveTable, predicates).map(fromHivePartition)
HiveCatalogMetrics.incrementFetchedPartitions(parts.length)
parts
}
override def listTables(dbName: String): Seq[String] = withHiveState {
client.getAllTables(dbName).asScala
}
override def listTables(dbName: String, pattern: String): Seq[String] = withHiveState {
client.getTablesByPattern(dbName, pattern).asScala
}
/**
* Runs the specified SQL query using Hive.
*/
override def runSqlHive(sql: String): Seq[String] = {
val maxResults = 100000
val results = runHive(sql, maxResults)
// It is very confusing when you only get back some of the results...
if (results.size == maxResults) sys.error("RESULTS POSSIBLY TRUNCATED")
results
}
/**
* Execute the command using Hive and return the results as a sequence. Each element
* in the sequence is one row.
* Since upgrading the built-in Hive to 2.3, hive-llap-client is needed when
* running MapReduce jobs with `runHive`.
* Since HIVE-17626(Hive 3.0.0), need to set hive.query.reexecution.enabled=false.
*/
protected def runHive(cmd: String, maxRows: Int = 1000): Seq[String] = withHiveState {
def closeDriver(driver: Driver): Unit = {
// Since HIVE-18238(Hive 3.0.0), the Driver.close function's return type changed
// and the CommandProcessorFactory.clean function removed.
driver.getClass.getMethod("close").invoke(driver)
if (version != hive.v3_0 && version != hive.v3_1) {
CommandProcessorFactory.clean(conf)
}
}
logDebug(s"Running hiveql '$cmd'")
if (cmd.toLowerCase(Locale.ROOT).startsWith("set")) { logDebug(s"Changing config: $cmd") }
try {
val cmd_trimmed: String = cmd.trim()
val tokens: Array[String] = cmd_trimmed.split("\\\\s+")
// The remainder of the command.
val cmd_1: String = cmd_trimmed.substring(tokens(0).length()).trim()
val proc = shim.getCommandProcessor(tokens(0), conf)
proc match {
case driver: Driver =>
val response: CommandProcessorResponse = driver.run(cmd)
// Throw an exception if there is an error in query processing.
if (response.getResponseCode != 0) {
closeDriver(driver)
throw new QueryExecutionException(response.getErrorMessage)
}
driver.setMaxRows(maxRows)
val results = shim.getDriverResults(driver)
closeDriver(driver)
results
case _ =>
if (state.out != null) {
// scalastyle:off println
state.out.println(tokens(0) + " " + cmd_1)
// scalastyle:on println
}
Seq(proc.run(cmd_1).getResponseCode.toString)
}
} catch {
case e: Exception =>
logError(
s"""
|======================
|HIVE FAILURE OUTPUT
|======================
|${outputBuffer.toString}
|======================
|END HIVE FAILURE OUTPUT
|======================
""".stripMargin)
throw e
}
}
def loadPartition(
loadPath: String,
dbName: String,
tableName: String,
partSpec: java.util.LinkedHashMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSrcLocal: Boolean): Unit = withHiveState {
val hiveTable = client.getTable(dbName, tableName, true /* throw exception */)
shim.loadPartition(
client,
new Path(loadPath), // TODO: Use URI
s"$dbName.$tableName",
partSpec,
replace,
inheritTableSpecs,
isSkewedStoreAsSubdir = hiveTable.isStoredAsSubDirectories,
isSrcLocal = isSrcLocal)
}
def loadTable(
loadPath: String, // TODO URI
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = withHiveState {
shim.loadTable(
client,
new Path(loadPath),
tableName,
replace,
isSrcLocal)
}
def loadDynamicPartitions(
loadPath: String,
dbName: String,
tableName: String,
partSpec: java.util.LinkedHashMap[String, String],
replace: Boolean,
numDP: Int): Unit = withHiveState {
val hiveTable = client.getTable(dbName, tableName, true /* throw exception */)
shim.loadDynamicPartitions(
client,
new Path(loadPath),
s"$dbName.$tableName",
partSpec,
replace,
numDP,
listBucketingEnabled = hiveTable.isStoredAsSubDirectories)
}
override def createFunction(db: String, func: CatalogFunction): Unit = withHiveState {
shim.createFunction(client, db, func)
}
override def dropFunction(db: String, name: String): Unit = withHiveState {
shim.dropFunction(client, db, name)
}
override def renameFunction(db: String, oldName: String, newName: String): Unit = withHiveState {
shim.renameFunction(client, db, oldName, newName)
}
override def alterFunction(db: String, func: CatalogFunction): Unit = withHiveState {
shim.alterFunction(client, db, func)
}
override def getFunctionOption(
db: String, name: String): Option[CatalogFunction] = withHiveState {
shim.getFunctionOption(client, db, name)
}
override def listFunctions(db: String, pattern: String): Seq[String] = withHiveState {
shim.listFunctions(client, db, pattern)
}
def addJar(path: String): Unit = {
val uri = new Path(path).toUri
val jarURL = if (uri.getScheme == null) {
// `path` is a local file path without a URL scheme
new File(path).toURI.toURL
} else {
// `path` is a URL with a scheme
uri.toURL
}
clientLoader.addJar(jarURL)
runSqlHive(s"ADD JAR $path")
}
def newSession(): HiveClientImpl = {
clientLoader.createClient().asInstanceOf[HiveClientImpl]
}
def reset(): Unit = withHiveState {
val allTables = client.getAllTables("default")
val (mvs, others) = allTables.asScala.map(t => client.getTable("default", t))
.partition(_.getTableType.toString.equals("MATERIALIZED_VIEW"))
// Remove materialized view first, otherwise caused a violation of foreign key constraint.
mvs.foreach { table =>
val t = table.getTableName
logDebug(s"Deleting materialized view $t")
client.dropTable("default", t)
}
others.foreach { table =>
val t = table.getTableName
logDebug(s"Deleting table $t")
try {
client.getIndexes("default", t, 255).asScala.foreach { index =>
shim.dropIndex(client, "default", t, index.getIndexName)
}
if (!table.isIndexTable) {
client.dropTable("default", t)
}
} catch {
case _: NoSuchMethodError =>
// HIVE-18448 Hive 3.0 remove index APIs
client.dropTable("default", t)
}
}
client.getAllDatabases.asScala.filterNot(_ == "default").foreach { db =>
logDebug(s"Dropping Database: $db")
client.dropDatabase(db, true, false, true)
}
}
}
private[hive] object HiveClientImpl {
/** Converts the native StructField to Hive's FieldSchema. */
def toHiveColumn(c: StructField): FieldSchema = {
val typeString = if (c.metadata.contains(HIVE_TYPE_STRING)) {
c.metadata.getString(HIVE_TYPE_STRING)
} else {
c.dataType.catalogString
}
new FieldSchema(c.name, typeString, c.getComment().orNull)
}
/** Get the Spark SQL native DataType from Hive's FieldSchema. */
private def getSparkSQLDataType(hc: FieldSchema): DataType = {
try {
CatalystSqlParser.parseDataType(hc.getType)
} catch {
case e: ParseException =>
throw new SparkException("Cannot recognize hive type string: " + hc.getType, e)
}
}
/** Builds the native StructField from Hive's FieldSchema. */
def fromHiveColumn(hc: FieldSchema): StructField = {
val columnType = getSparkSQLDataType(hc)
val metadata = if (hc.getType != columnType.catalogString) {
new MetadataBuilder().putString(HIVE_TYPE_STRING, hc.getType).build()
} else {
Metadata.empty
}
val field = StructField(
name = hc.getName,
dataType = columnType,
nullable = true,
metadata = metadata)
Option(hc.getComment).map(field.withComment).getOrElse(field)
}
private def verifyColumnDataType(schema: StructType): Unit = {
schema.foreach(col => getSparkSQLDataType(toHiveColumn(col)))
}
private def toInputFormat(name: String) =
Utils.classForName[org.apache.hadoop.mapred.InputFormat[_, _]](name)
private def toOutputFormat(name: String) =
Utils.classForName[org.apache.hadoop.hive.ql.io.HiveOutputFormat[_, _]](name)
/**
* Converts the native table metadata representation format CatalogTable to Hive's Table.
*/
def toHiveTable(table: CatalogTable, userName: Option[String] = None): HiveTable = {
val hiveTable = new HiveTable(table.database, table.identifier.table)
// For EXTERNAL_TABLE, we also need to set EXTERNAL field in the table properties.
// Otherwise, Hive metastore will change the table to a MANAGED_TABLE.
// (metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java#L1095-L1105)
hiveTable.setTableType(table.tableType match {
case CatalogTableType.EXTERNAL =>
hiveTable.setProperty("EXTERNAL", "TRUE")
HiveTableType.EXTERNAL_TABLE
case CatalogTableType.MANAGED =>
HiveTableType.MANAGED_TABLE
case CatalogTableType.VIEW => HiveTableType.VIRTUAL_VIEW
case t =>
throw new IllegalArgumentException(
s"Unknown table type is found at toHiveTable: $t")
})
// Note: In Hive the schema and partition columns must be disjoint sets
val (partCols, schema) = table.schema.map(toHiveColumn).partition { c =>
table.partitionColumnNames.contains(c.getName)
}
hiveTable.setFields(schema.asJava)
hiveTable.setPartCols(partCols.asJava)
Option(table.owner).filter(_.nonEmpty).orElse(userName).foreach(hiveTable.setOwner)
hiveTable.setCreateTime(MILLISECONDS.toSeconds(table.createTime).toInt)
hiveTable.setLastAccessTime(MILLISECONDS.toSeconds(table.lastAccessTime).toInt)
table.storage.locationUri.map(CatalogUtils.URIToString).foreach { loc =>
hiveTable.getTTable.getSd.setLocation(loc)}
table.storage.inputFormat.map(toInputFormat).foreach(hiveTable.setInputFormatClass)
table.storage.outputFormat.map(toOutputFormat).foreach(hiveTable.setOutputFormatClass)
hiveTable.setSerializationLib(
table.storage.serde.getOrElse("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
table.storage.properties.foreach { case (k, v) => hiveTable.setSerdeParam(k, v) }
table.properties.foreach { case (k, v) => hiveTable.setProperty(k, v) }
table.comment.foreach { c => hiveTable.setProperty("comment", c) }
// Hive will expand the view text, so it needs 2 fields: viewOriginalText and viewExpandedText.
// Since we don't expand the view text, but only add table properties, we map the `viewText` to
// the both fields in hive table.
table.viewText.foreach { t =>
hiveTable.setViewOriginalText(t)
hiveTable.setViewExpandedText(t)
}
table.bucketSpec match {
case Some(bucketSpec) if DDLUtils.isHiveTable(table) =>
hiveTable.setNumBuckets(bucketSpec.numBuckets)
hiveTable.setBucketCols(bucketSpec.bucketColumnNames.toList.asJava)
if (bucketSpec.sortColumnNames.nonEmpty) {
hiveTable.setSortCols(
bucketSpec.sortColumnNames
.map(col => new Order(col, HIVE_COLUMN_ORDER_ASC))
.toList
.asJava
)
}
case _ =>
}
hiveTable
}
/**
* Converts the native partition metadata representation format CatalogTablePartition to
* Hive's Partition.
*/
def toHivePartition(
p: CatalogTablePartition,
ht: HiveTable): HivePartition = {
val tpart = new org.apache.hadoop.hive.metastore.api.Partition
val partValues = ht.getPartCols.asScala.map { hc =>
p.spec.getOrElse(hc.getName, throw new IllegalArgumentException(
s"Partition spec is missing a value for column '${hc.getName}': ${p.spec}"))
}
val storageDesc = new StorageDescriptor
val serdeInfo = new SerDeInfo
p.storage.locationUri.map(CatalogUtils.URIToString(_)).foreach(storageDesc.setLocation)
p.storage.inputFormat.foreach(storageDesc.setInputFormat)
p.storage.outputFormat.foreach(storageDesc.setOutputFormat)
p.storage.serde.foreach(serdeInfo.setSerializationLib)
serdeInfo.setParameters(p.storage.properties.asJava)
storageDesc.setSerdeInfo(serdeInfo)
tpart.setDbName(ht.getDbName)
tpart.setTableName(ht.getTableName)
tpart.setValues(partValues.asJava)
tpart.setSd(storageDesc)
tpart.setCreateTime(MILLISECONDS.toSeconds(p.createTime).toInt)
tpart.setLastAccessTime(MILLISECONDS.toSeconds(p.lastAccessTime).toInt)
tpart.setParameters(mutable.Map(p.parameters.toSeq: _*).asJava)
new HivePartition(ht, tpart)
}
/**
* Build the native partition metadata from Hive's Partition.
*/
def fromHivePartition(hp: HivePartition): CatalogTablePartition = {
val apiPartition = hp.getTPartition
val properties: Map[String, String] = if (hp.getParameters != null) {
hp.getParameters.asScala.toMap
} else {
Map.empty
}
CatalogTablePartition(
spec = Option(hp.getSpec).map(_.asScala.toMap).getOrElse(Map.empty),
storage = CatalogStorageFormat(
locationUri = Option(CatalogUtils.stringToURI(apiPartition.getSd.getLocation)),
inputFormat = Option(apiPartition.getSd.getInputFormat),
outputFormat = Option(apiPartition.getSd.getOutputFormat),
serde = Option(apiPartition.getSd.getSerdeInfo.getSerializationLib),
compressed = apiPartition.getSd.isCompressed,
properties = Option(apiPartition.getSd.getSerdeInfo.getParameters)
.map(_.asScala.toMap).orNull),
createTime = apiPartition.getCreateTime.toLong * 1000,
lastAccessTime = apiPartition.getLastAccessTime.toLong * 1000,
parameters = properties,
stats = readHiveStats(properties))
}
/**
* This is the same process copied from the method `getTable()`
* of [[org.apache.hadoop.hive.ql.metadata.Hive]] to do some extra fixes for non-views.
* Methods of extracting multiple [[HiveTable]] like `getRawTablesByName()`
* should invoke this before return.
*/
def extraFixesForNonView(tTable: MetaStoreApiTable): MetaStoreApiTable = {
// For non-views, we need to do some extra fixes
if (!(HiveTableType.VIRTUAL_VIEW.toString == tTable.getTableType)) {
// Fix the non-printable chars
val parameters = tTable.getSd.getParameters
if (parameters != null) {
val sf = parameters.get(serdeConstants.SERIALIZATION_FORMAT)
if (sf != null) {
val b: Array[Char] = sf.toCharArray
if ((b.length == 1) && (b(0) < 10)) { // ^A, ^B, ^C, ^D, \\t
parameters.put(serdeConstants.SERIALIZATION_FORMAT, Integer.toString(b(0)))
}
}
}
// Use LazySimpleSerDe for MetadataTypedColumnsetSerDe.
// NOTE: LazySimpleSerDe does not support tables with a single column of col
// of type "array<string>". This happens when the table is created using
// an earlier version of Hive.
if (classOf[MetadataTypedColumnsetSerDe].getName ==
tTable.getSd.getSerdeInfo.getSerializationLib &&
tTable.getSd.getColsSize > 0 &&
tTable.getSd.getCols.get(0).getType.indexOf('<') == -1) {
tTable.getSd.getSerdeInfo.setSerializationLib(classOf[LazySimpleSerDe].getName)
}
}
tTable
}
/**
* Reads statistics from Hive.
* Note that this statistics could be overridden by Spark's statistics if that's available.
*/
private def readHiveStats(properties: Map[String, String]): Option[CatalogStatistics] = {
val totalSize = properties.get(StatsSetupConst.TOTAL_SIZE).map(BigInt(_))
val rawDataSize = properties.get(StatsSetupConst.RAW_DATA_SIZE).map(BigInt(_))
val rowCount = properties.get(StatsSetupConst.ROW_COUNT).map(BigInt(_))
// NOTE: getting `totalSize` directly from params is kind of hacky, but this should be
// relatively cheap if parameters for the table are populated into the metastore.
// Currently, only totalSize, rawDataSize, and rowCount are used to build the field `stats`
// TODO: stats should include all the other two fields (`numFiles` and `numPartitions`).
// (see StatsSetupConst in Hive)
// When table is external, `totalSize` is always zero, which will influence join strategy.
// So when `totalSize` is zero, use `rawDataSize` instead. When `rawDataSize` is also zero,
// return None.
// In Hive, when statistics gathering is disabled, `rawDataSize` and `numRows` is always
// zero after INSERT command. So they are used here only if they are larger than zero.
if (totalSize.isDefined && totalSize.get > 0L) {
Some(CatalogStatistics(sizeInBytes = totalSize.get, rowCount = rowCount.filter(_ > 0)))
} else if (rawDataSize.isDefined && rawDataSize.get > 0) {
Some(CatalogStatistics(sizeInBytes = rawDataSize.get, rowCount = rowCount.filter(_ > 0)))
} else {
// TODO: still fill the rowCount even if sizeInBytes is empty. Might break anything?
None
}
}
// Below is the key of table properties for storing Hive-generated statistics
private val HiveStatisticsProperties = Set(
StatsSetupConst.COLUMN_STATS_ACCURATE,
StatsSetupConst.NUM_FILES,
StatsSetupConst.NUM_PARTITIONS,
StatsSetupConst.ROW_COUNT,
StatsSetupConst.RAW_DATA_SIZE,
StatsSetupConst.TOTAL_SIZE
)
}
| caneGuy/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala | Scala | apache-2.0 | 50,118 |
/*
* Main.scala
*
* Licensed to the Communitivity, Inc under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.communitivity.echoxmpp
import scala.actors.Actor
import scala.actors.Actor._
import org.xmpp.packet.Packet
import org.xmpp.packet.Message
import org.communitivity.shellack.ComponentConfig
import org.communitivity.shellack.XMPPComponent
//case class PacketReceived(pkt : Packet, out : Actor)
//case class PacketResponse(pkt : Packet)
object Main {
/**
* @param args the command line arguments
*/
def main(args: Array[String]) :Unit = {
new XMPPComponent(
new ComponentConfig() {
def secret() : String = { "secret.goes.here" }
def server() : String = { "communitivity.com" }
def subdomain() : String = { "weather" }
def name() : String = { "US Weather" }
def description() : String = { "Weather component that also supported SPARQL/XMPP" }
},
actor {
loop {
react {
case (pkt:Packet, out : Actor) =>
Console.println("Received packet...\\n"+pkt.toXML)
pkt match {
case message:Message =>
val reply = new Message()
reply.setTo(message.getFrom())
reply.setFrom(message.getTo())
reply.setType(message.getType())
reply.setThread(message.getThread())
reply.setBody("Received '"+message.getBody()+"', tyvm")
out ! reply
case _ =>
Console.println("Received something other than Message")
}
case _ =>
Console.println("Received something other than (Packet, actor)")
}
}
}
).start
}
}
| Java-Communitivity/MinimalScalaXMPPComponent | src/org/communitivity/echoxmpp/Main.scala | Scala | apache-2.0 | 2,647 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.funspec
import org.scalatest._
import org.scalatest.exceptions._
import org.scalactic.{source, Prettifier}
import java.util.ConcurrentModificationException
import java.util.concurrent.atomic.AtomicReference
import org.scalatest.Suite.anExceptionThatShouldCauseAnAbort
import org.scalatest.Suite.autoTagClassAnnotations
import verbs.BehaveWord
/**
* Implementation trait for class <code>FixtureAnyFunSpec</code>, which is
* a sister class to <code>org.scalatest.funspec.AnyFunSpec</code> that can pass a
* fixture object into its tests.
*
* <p>
* <a href="FixtureAnyFunSpec.html"><code>FixtureAnyFunSpec</code></a> is a class,
* not a trait, to minimize compile time given there is a slight compiler
* overhead to mixing in traits compared to extending classes. If you need
* to mix the behavior of <code>FixtureAnyFunSpec</code> into some other
* class, you can use this trait instead, because class
* <code>FixtureAnyFunSpec</code> does nothing more than extend this trait and add a nice <code>toString</code> implementation.
* </p>
*
* <p>
* See the documentation of the class for a <a href="FixtureAnyFunSpec.html">detailed
* overview of <code>FixtureAnyFunSpec</code></a>.
* </p>
*
* @author Bill Venners
*/
//SCALATESTJS-ONLY @scala.scalajs.reflect.annotation.EnableReflectiveInstantiation
//SCALATESTNATIVE-ONLY @scala.scalajs.reflect.annotation.EnableReflectiveInstantiation
@Finders(Array("org.scalatest.finders.FunSpecFinder"))
trait FixtureAnyFunSpecLike extends org.scalatest.FixtureTestSuite with org.scalatest.FixtureTestRegistration with Informing with Notifying with Alerting with Documenting { thisSuite =>
private final val engine = new FixtureEngine[FixtureParam](Resources.concurrentFixtureSpecMod, "FixtureFunSpec")
import engine._
private[scalatest] val sourceFileName = "FixtureAnyFunSpecLike.scala"
/**
* Returns an <code>Informer</code> that during test execution will forward strings passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked from inside a scope,
* it will forward the information to the current reporter immediately. If invoked from inside a test function,
* it will record the information and forward it to the current reporter only after the test completed, as <code>recordedEvents</code>
* of the test completed event, such as <code>TestSucceeded</code>. If invoked at any other time, it will print to the standard output.
* This method can be called safely by any thread.
*/
protected def info: Informer = atomicInformer.get
/**
* Returns a <code>Notifier</code> that during test execution will forward strings (and other objects) passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked while this
* <code>FixtureAnyFunSpec</code> is being executed, such as from inside a test function, it will forward the information to
* the current reporter immediately. If invoked at any other time, it will
* print to the standard output. This method can be called safely by any thread.
*/
protected def note: Notifier = atomicNotifier.get
/**
* Returns an <code>Alerter</code> that during test execution will forward strings (and other objects) passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked while this
* <code>FixtureAnyFunSpec</code> is being executed, such as from inside a test function, it will forward the information to
* the current reporter immediately. If invoked at any other time, it will
* print to the standard output. This method can be called safely by any thread.
*/
protected def alert: Alerter = atomicAlerter.get
/**
* Returns a <code>Documenter</code> that during test execution will forward strings passed to its
* <code>apply</code> method to the current reporter. If invoked in a constructor, it
* will register the passed string for forwarding later during test execution. If invoked from inside a scope,
* it will forward the information to the current reporter immediately. If invoked from inside a test function,
* it will record the information and forward it to the current reporter only after the test completed, as <code>recordedEvents</code>
* of the test completed event, such as <code>TestSucceeded</code>. If invoked at any other time, it will print to the standard output.
* This method can be called safely by any thread.
*/
protected def markup: Documenter = atomicDocumenter.get
private final def registerTestImpl(testText: String, testTags: Tag*)(testFun: FixtureParam => Any /* Assertion */, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepthAdjustment = -2
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepthAdjustment = -5
engine.registerTest(testText, org.scalatest.fixture.Transformer(testFun), Resources.testCannotBeNestedInsideAnotherTest, sourceFileName, "registerTest", 5, stackDepthAdjustment, None, None, Some(pos), None, testTags: _*)
}
// SKIP-DOTTY-START
final def registerTest(testText: String, testTags: Tag*)(testFun: FixtureParam => Any /* Assertion */)(implicit pos: source.Position): Unit = {
registerTestImpl(testText, testTags: _*)(testFun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def registerTest(testText: String, testTags: Tag*)(testFun: FixtureParam => Any /* Assertion */)(implicit pos: source.Position): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => registerTestImpl(testText, testTags: _*)(testFun, pos) }) }
//DOTTY-ONLY }
private final def registerIgnoredTestImpl(testText: String, testTags: Tag*)(testFun: FixtureParam => Any /* Assertion */, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepthAdjustment = 0
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepthAdjustment = -2
engine.registerIgnoredTest(testText, org.scalatest.fixture.Transformer(testFun), Resources.testCannotBeNestedInsideAnotherTest, sourceFileName, "registerIgnoredTest", 1, stackDepthAdjustment, None, Some(pos), testTags: _*)
}
// SKIP-DOTTY-START
final def registerIgnoredTest(testText: String, testTags: Tag*)(testFun: FixtureParam => Any /* Assertion */)(implicit pos: source.Position): Unit = {
registerIgnoredTestImpl(testText, testTags: _*)(testFun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def registerIgnoredTest(testText: String, testTags: Tag*)(testFun: FixtureParam => Any /* Assertion */)(implicit pos: source.Position): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => registerIgnoredTestImpl(testText, testTags: _*)(testFun, pos) }) }
//DOTTY-ONLY }
/**
* Class that, via an instance referenced from the <code>it</code> field,
* supports test (and shared test) registration in <code>FixtureAnyFunSpec</code>s.
*
* <p>
* This class supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* it("should be empty")
* ^
* </pre>
*
* <pre class="stHighlight">
* it should behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For more information and examples, see the <a href="FixtureAnyFunSpec.html">main documentation for <code>FixtureAnyFunSpec</code></a>.
* </p>
*/
protected final class ItWord {
class ResultOfItWordApplication(specText: String, testTags: Tag*) {
private final def applyImpl(testFun: FixtureParam => Any /* Assertion */, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepth = 3
val stackDepthAdjustment = -2
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepth = 5
//SCALATESTJS,NATIVE-ONLY val stackDepthAdjustment = -5
engine.registerTest(specText, org.scalatest.fixture.Transformer(testFun), Resources.itCannotAppearInsideAnotherItOrThey, sourceFileName, "apply", stackDepth, stackDepthAdjustment, None, None, Some(pos), None, testTags: _*)
}
// SKIP-DOTTY-START
def apply(testFun: FixtureParam => Any /* Assertion */)(implicit pos: source.Position): Unit = {
applyImpl(testFun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def apply(testFun: FixtureParam => Any /* Assertion */): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => applyImpl(testFun, pos) }) }
//DOTTY-ONLY }
private final def applyImpl(testFun: () => Any /* Assertion */, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepth = 3
val stackDepthAdjustment = -2
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepth = 5
//SCALATESTJS,NATIVE-ONLY val stackDepthAdjustment = -5
engine.registerTest(specText, org.scalatest.fixture.Transformer(new org.scalatest.fixture.NoArgTestWrapper(testFun)), Resources.itCannotAppearInsideAnotherItOrThey, sourceFileName, "apply", stackDepth, stackDepthAdjustment, None, None, Some(pos), None, testTags: _*)
}
// SKIP-DOTTY-START
def apply(testFun: () => Any /* Assertion */)(implicit pos: source.Position): Unit = {
applyImpl(testFun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def apply(testFun: () => Any /* Assertion */): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => applyImpl(testFun, pos) }) }
//DOTTY-ONLY }
}
/**
* Register a test with the given spec text, optional tags, and test function value that takes no arguments.
* An invocation of this method is called an “example.”
*
* This method will register the test for later execution via an invocation of one of the <code>execute</code>
* methods. The name of the test will be a concatenation of the text of all surrounding describers,
* from outside in, and the passed spec text, with one space placed between each item. (See the documenation
* for <code>testNames</code> for an example.) The resulting test name must not have been registered previously on
* this <code>FixtureAnyFunSpec</code> instance.
*
* @param specText the specification text, which will be combined with the descText of any surrounding describers
* to form the test name
* @param testTags the optional list of tags for this test
* @param testFun the test function
* @throws DuplicateTestNameException if a test with the same name has been registered previously
* @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite
* @throws NullArgumentException if <code>specText</code> or any passed test tag is <code>null</code>
*/
def apply(specText: String, testTags: Tag*): ResultOfItWordApplication =
new ResultOfItWordApplication(specText, testTags: _*)
/**
* Supports the registration of shared tests.
*
* <p>
* This method supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* it should behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For examples of shared tests, see the <a href="../Spec.html#SharedTests">Shared tests section</a>
* in the main documentation for trait <code>FixtureAnyFunSpec</code>.
* </p>
*
* @param behaveWord the <code>BehaveWord</code>
*/
def should(behaveWord: BehaveWord) = behaveWord
/**
* Supports the registration of shared tests.
*
* <p>
* This method supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* it must behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For examples of shared tests, see the <a href="../Spec.html#SharedTests">Shared tests section</a>
* in the main documentation for trait <code>FixtureAnyFunSpec</code>.
* </p>
*
* @param behaveWord the <code>BehaveWord</code>
*/
def must(behaveWord: BehaveWord) = behaveWord
}
/**
* Supports test (and shared test) registration in <code>FixtureAnyFunSpec</code>s.
*
* <p>
* This field supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* it("should be empty")
* ^
* </pre>
*
* <pre class="stHighlight">
* it should behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For more information and examples of the use of the <code>it</code> field, see
* the <a href="FixtureAnyFunSpec.html">main documentation for <code>FixtureAnyFunSpec</code></a>.
* </p>
*/
protected val it = new ItWord
/**
* Class that, via an instance referenced from the <code>they</code> field,
* supports test (and shared test) registration in <code>FixtureAnyFunSpec</code>s.
*
* <p>
* This class supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* they("should be empty")
* ^
* </pre>
*
* <pre class="stHighlight">
* they should behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For more information and examples, see the <a href="FixtureAnyFunSpec.html">main documentation for <code>FixtureAnyFunSpec</code></a>.
* </p>
*/
protected final class TheyWord {
class ResultOfTheyWordApplication(specText: String, testTags: Tag*)(implicit pos: source.Position) {
private final def applyImpl(testFun: FixtureParam => Any /* Assertion */, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepthAdjustment = -2
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepthAdjustment = -3
engine.registerTest(specText, org.scalatest.fixture.Transformer(testFun), Resources.theyCannotAppearInsideAnotherItOrThey, sourceFileName, "apply", 3, stackDepthAdjustment, None, None, Some(pos), None, testTags: _*)
}
// SKIP-DOTTY-START
def apply(testFun: FixtureParam => Any /* Assertion */)(implicit pos: source.Position): Unit = {
applyImpl(testFun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def apply(testFun: FixtureParam => Any /* Assertion */): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => applyImpl(testFun, pos) }) }
//DOTTY-ONLY }
def applyImpl(testFun: () => Any /* Assertion */, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepthAdjustment = -2
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepthAdjustment = -3
engine.registerTest(specText, org.scalatest.fixture.Transformer(new org.scalatest.fixture.NoArgTestWrapper(testFun)), Resources.theyCannotAppearInsideAnotherItOrThey, sourceFileName, "apply", 3, stackDepthAdjustment, None, None, Some(pos), None, testTags: _*)
}
// SKIP-DOTTY-START
def apply(testFun: () => Any /* Assertion */)(implicit pos: source.Position): Unit = {
applyImpl(testFun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def apply(testFun: () => Any /* Assertion */): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => applyImpl(testFun, pos) }) }
//DOTTY-ONLY }
}
/**
* Register a test with the given spec text, optional tags, and test function value that takes no arguments.
* An invocation of this method is called an “example.”
*
* This method will register the test for later execution via an invocation of one of the <code>execute</code>
* methods. The name of the test will be a concatenation of the text of all surrounding describers,
* from outside in, and the passed spec text, with one space placed between each item. (See the documenation
* for <code>testNames</code> for an example.) The resulting test name must not have been registered previously on
* this <code>FixtureAnyFunSpec</code> instance.
*
* @param specText the specification text, which will be combined with the descText of any surrounding describers
* to form the test name
* @param testTags the optional list of tags for this test
* @param testFun the test function
* @throws DuplicateTestNameException if a test with the same name has been registered previously
* @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite
* @throws NullArgumentException if <code>specText</code> or any passed test tag is <code>null</code>
*/
def apply(specText: String, testTags: Tag*): ResultOfTheyWordApplication =
new ResultOfTheyWordApplication(specText, testTags: _*)
/**
* Supports the registration of shared tests.
*
* <p>
* This method supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* they should behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For examples of shared tests, see the <a href="../Spec.html#SharedTests">Shared tests section</a>
* in the main documentation for trait <code>FixtureAnyFunSpec</code>.
* </p>
*
* @param behaveWord the <code>BehaveWord</code>
*/
def should(behaveWord: BehaveWord) = behaveWord
/**
* Supports the registration of shared tests.
*
* <p>
* This method supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* they must behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For examples of shared tests, see the <a href="../Spec.html#SharedTests">Shared tests section</a>
* in the main documentation for trait <code>FixtureAnyFunSpec</code>.
* </p>
*
* @param behaveWord the <code>BehaveWord</code>
*/
def must(behaveWord: BehaveWord) = behaveWord
}
/**
* Supports test (and shared test) registration in <code>FixtureAnyFunSpec</code>s.
*
* <p>
* This field supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* they("should be empty")
* ^
* </pre>
*
* <pre class="stHighlight">
* they should behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For more information and examples of the use of the <code>it</code> field, see
* the <a href="FixtureAnyFunSpec.html">main documentation for <code>FixtureAnyFunSpec</code></a>.
* </p>
*/
protected val they = new TheyWord
class ResultOfIgnoreInvocation(specText: String, testTags: Tag*) {
private final def applyImpl(testFun: FixtureParam => Any /* Assertion */, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepth = 3
val stackDepthAdjustment = -3
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepth = 5
//SCALATESTJS,NATIVE-ONLY val stackDepthAdjustment = -6
engine.registerIgnoredTest(specText, org.scalatest.fixture.Transformer(testFun), Resources.ignoreCannotAppearInsideAnItOrAThey, sourceFileName, "apply", stackDepth, stackDepthAdjustment, None, Some(pos), testTags: _*)
}
// SKIP-DOTTY-START
def apply(testFun: FixtureParam => Any /* Assertion */)(implicit pos: source.Position): Unit = {
applyImpl(testFun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def apply(testFun: FixtureParam => Any /* Assertion */): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => applyImpl(testFun, pos) }) }
//DOTTY-ONLY }
private final def applyImpl(testFun: () => Any /* Assertion */, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepth = 3
val stackDepthAdjustment = -3
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepth = 5
//SCALATESTJS,NATIVE-ONLY val stackDepthAdjustment = -6
engine.registerIgnoredTest(specText, org.scalatest.fixture.Transformer(new org.scalatest.fixture.NoArgTestWrapper(testFun)), Resources.ignoreCannotAppearInsideAnItOrAThey, sourceFileName, "apply", stackDepth, stackDepthAdjustment, None, Some(pos), testTags: _*)
}
// SKIP-DOTTY-START
def apply(testFun: () => Any /* Assertion */)(implicit pos: source.Position): Unit = {
applyImpl(testFun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def apply(testFun: () => Any /* Assertion */): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => applyImpl(testFun, pos) }) }
//DOTTY-ONLY }
}
/**
* Register a test to ignore, which has the given spec text, optional tags, and test function value that takes no arguments.
* This method will register the test for later ignoring via an invocation of one of the <code>execute</code>
* methods. This method exists to make it easy to ignore an existing test by changing the call to <code>it</code>
* to <code>ignore</code> without deleting or commenting out the actual test code. The test will not be executed, but a
* report will be sent that indicates the test was ignored. The name of the test will be a concatenation of the text of all surrounding describers,
* from outside in, and the passed spec text, with one space placed between each item. (See the documenation
* for <code>testNames</code> for an example.) The resulting test name must not have been registered previously on
* this <code>FixtureAnyFunSpec</code> instance.
*
* @param specText the specification text, which will be combined with the descText of any surrounding describers
* to form the test name
* @param testTags the optional list of tags for this test
* @param testFun the test function
* @throws DuplicateTestNameException if a test with the same name has been registered previously
* @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite
* @throws NullArgumentException if <code>specText</code> or any passed test tag is <code>null</code>
*/
protected def ignore(specText: String, testTags: Tag*): ResultOfIgnoreInvocation =
new ResultOfIgnoreInvocation(specText, testTags: _*)
/**
* Register a test to ignore, which has the given spec text and test function value that takes no arguments.
* This method will register the test for later ignoring via an invocation of one of the <code>execute</code>
* methods. This method exists to make it easy to ignore an existing test by changing the call to <code>it</code>
* to <code>ignore</code> without deleting or commenting out the actual test code. The test will not be executed, but a
* report will be sent that indicates the test was ignored. The name of the test will be a concatenation of the text of all surrounding describers,
* from outside in, and the passed spec text, with one space placed between each item. (See the documenation
* for <code>testNames</code> for an example.) The resulting test name must not have been registered previously on
* this <code>FixtureAnyFunSpec</code> instance.
*
* @param specText the specification text, which will be combined with the descText of any surrounding describers
* to form the test name
* @param testFun the test function
* @throws DuplicateTestNameException if a test with the same name has been registered previously
* @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite
* @throws NullArgumentException if <code>specText</code> or any passed test tag is <code>null</code>
*/
private final def describeImpl(description: String)(fun: => Unit, pos: source.Position): Unit = {
// SKIP-SCALATESTJS,NATIVE-START
val stackDepth = 4
// SKIP-SCALATESTJS,NATIVE-END
//SCALATESTJS,NATIVE-ONLY val stackDepth = 6
try {
registerNestedBranch(description, None, fun, Resources.describeCannotAppearInsideAnIt, sourceFileName, "describe", stackDepth, -2, None, Some(pos))
}
catch {
case e: TestFailedException => throw new NotAllowedException(FailureMessages.assertionShouldBePutInsideItOrTheyClauseNotDescribeClause, Some(e), e.position.getOrElse(pos))
case e: TestCanceledException => throw new NotAllowedException(FailureMessages.assertionShouldBePutInsideItOrTheyClauseNotDescribeClause, Some(e), e.position.getOrElse(pos))
case e: DuplicateTestNameException => throw new NotAllowedException(FailureMessages.exceptionWasThrownInDescribeClause(Prettifier.default, UnquotedString(e.getClass.getName), description, e.getMessage), Some(e), e.position.getOrElse(pos))
case other: Throwable if (!Suite.anExceptionThatShouldCauseAnAbort(other)) => throw new NotAllowedException(FailureMessages.exceptionWasThrownInDescribeClause(Prettifier.default, UnquotedString(other.getClass.getName), description, other.getMessage), Some(other), pos)
case other: Throwable => throw other
}
}
/**
* Describe a “subject” being specified and tested by the passed function value. The
* passed function value may contain more describers (defined with <code>describe</code>) and/or tests
* (defined with <code>it</code>). This trait's implementation of this method will register the
* description string and immediately invoke the passed function.
*
* @param description the description text
* @param fun the function which makes up the body for the description
*/
// SKIP-DOTTY-START
protected def describe(description: String)(fun: => Unit)(implicit pos: source.Position): Unit = {
describeImpl(description)(fun, pos)
}
// SKIP-DOTTY-END
//DOTTY-ONLY inline def describe(description: String)(fun: => Unit): Unit = {
//DOTTY-ONLY ${ source.Position.withPosition[Unit]('{(pos: source.Position) => describeImpl(description)(fun, pos) }) }
//DOTTY-ONLY }
/**
* A <code>Map</code> whose keys are <code>String</code> tag names to which tests in this <code>FixtureAnyFunSpec</code> belong, and values
* the <code>Set</code> of test names that belong to each tag. If this <code>FixtureAnyFunSpec</code> contains no tags, this method returns an empty <code>Map</code>.
*
* <p>
* This trait's implementation returns tags that were passed as strings contained in <code>Tag</code> objects passed to
* methods <code>test</code> and <code>ignore</code>.
* </p>
*
* <p>
* In addition, this trait's implementation will also auto-tag tests with class level annotations.
* For example, if you annotate @Ignore at the class level, all test methods in the class will be auto-annotated with @Ignore.
* </p>
*/
override def tags: Map[String, Set[String]] = autoTagClassAnnotations(atomic.get.tagsMap, this)
/**
* Run a test. This trait's implementation runs the test registered with the name specified by
* <code>testName</code>. Each test's name is a concatenation of the text of all describers surrounding a test,
* from outside in, and the test's spec text, with one space placed between each item. (See the documenation
* for <code>testNames</code> for an example.)
*
* @param testName the name of one test to execute.
* @param args the <code>Args</code> for this run
* @return a <code>Status</code> object that indicates when the test started by this method has completed, and whether or not it failed .
* @throws NullArgumentException if <code>testName</code> or <code>args</code> is <code>null</code>.
*/
protected override def runTest(testName: String, args: Args): Status = {
def invokeWithFixture(theTest: TestLeaf): Outcome = {
theTest.testFun match {
case transformer: org.scalatest.fixture.Transformer[_] =>
transformer.exceptionalTestFun match {
case wrapper: fixture.NoArgTestWrapper[_, _] =>
withFixture(new FixturelessTestFunAndConfigMap(testName, wrapper.test, args.configMap))
case fun => withFixture(new TestFunAndConfigMap(testName, fun, args.configMap))
}
case other =>
other match {
case wrapper: fixture.NoArgTestWrapper[_, _] =>
withFixture(new FixturelessTestFunAndConfigMap(testName, wrapper.test, args.configMap))
case fun => withFixture(new TestFunAndConfigMap(testName, fun, args.configMap))
}
}
}
runTestImpl(thisSuite, testName, args, true, invokeWithFixture)
}
/**
* <p>
* Run zero to many of this <code>FixtureAnyFunSpec</code>'s tests.
* </p>
*
* <p>
* This method takes a <code>testName</code> parameter that optionally specifies a test to invoke.
* If <code>testName</code> is <code>Some</code>, this trait's implementation of this method
* invokes <code>runTest</code> on this object with passed <code>args</code>.
* </p>
*
* <p>
* This method takes an <code>args</code> that contains a <code>Set</code> of tag names that should be included (<code>tagsToInclude</code>), and a <code>Set</code>
* that should be excluded (<code>tagsToExclude</code>), when deciding which of this <code>Suite</code>'s tests to execute.
* If <code>tagsToInclude</code> is empty, all tests will be executed
* except those those belonging to tags listed in the <code>tagsToExclude</code> <code>Set</code>. If <code>tagsToInclude</code> is non-empty, only tests
* belonging to tags mentioned in <code>tagsToInclude</code>, and not mentioned in <code>tagsToExclude</code>
* will be executed. However, if <code>testName</code> is <code>Some</code>, <code>tagsToInclude</code> and <code>tagsToExclude</code> are essentially ignored.
* Only if <code>testName</code> is <code>None</code> will <code>tagsToInclude</code> and <code>tagsToExclude</code> be consulted to
* determine which of the tests named in the <code>testNames</code> <code>Set</code> should be run. For more information on trait tags, see the main documentation for this trait.
* </p>
*
* <p>
* If <code>testName</code> is <code>None</code>, this trait's implementation of this method
* invokes <code>testNames</code> on this <code>Suite</code> to get a <code>Set</code> of names of tests to potentially execute.
* (A <code>testNames</code> value of <code>None</code> essentially acts as a wildcard that means all tests in
* this <code>Suite</code> that are selected by <code>tagsToInclude</code> and <code>tagsToExclude</code> should be executed.)
* For each test in the <code>testName</code> <code>Set</code>, in the order
* they appear in the iterator obtained by invoking the <code>elements</code> method on the <code>Set</code>, this trait's implementation
* of this method checks whether the test should be run based on the <code>tagsToInclude</code> and <code>tagsToExclude</code> <code>Set</code>s.
* If so, this implementation invokes <code>runTest</code> with passed <code>args</code>.
* </p>
*
* @param testName an optional name of one test to execute. If <code>None</code>, all relevant tests should be executed.
* I.e., <code>None</code> acts like a wildcard that means execute all relevant tests in this <code>FixtureAnyFunSpec</code>.
* @param args the <code>Args</code> to which results will be reported
* @return a <code>Status</code> object that indicates when all tests started by this method have completed, and whether or not a failure occurred.
* @throws NullArgumentException if any of <code>testName</code> or <code>args</code> is <code>null</code>.
*/
protected override def runTests(testName: Option[String], args: Args): Status = {
runTestsImpl(thisSuite, testName, args, info, true, runTest)
}
/**
* An immutable <code>Set</code> of test names. If this <code>FixtureAnyFunSpec</code> contains no tests, this method returns an
* empty <code>Set</code>.
*
* <p>
* This trait's implementation of this method will return a set that contains the names of all registered tests. The set's
* iterator will return those names in the order in which the tests were registered. Each test's name is composed
* of the concatenation of the text of each surrounding describer, in order from outside in, and the text of the
* example itself, with all components separated by a space.
* </p>
*
* @return the <code>Set</code> of test names
*/
override def testNames: Set[String] = {
InsertionOrderSet(atomic.get.testNamesList)
}
override def run(testName: Option[String], args: Args): Status = {
runImpl(thisSuite, testName, args, super.run)
}
/**
* Supports shared test registration in <code>FixtureAnyFunSpec</code>s.
*
* <p>
* This field supports syntax such as the following:
* </p>
*
* <pre class="stHighlight">
* it should behave like nonFullStack(stackWithOneItem)
* ^
* </pre>
*
* <p>
* For more information and examples of the use of <cod>behave</code>, see the <a href="FixtureAnyFunSpec.html#SharedTests">Shared tests section</a>
* in the main documentation for trait <code>FixtureAnyFunSpec</code>.
* </p>
*/
protected val behave = new BehaveWord
import scala.language.implicitConversions
/**
* Implicitly converts a function that takes no parameters and results in <code>PendingStatement</code> to
* a function from <code>FixtureParam</code> to <code>Any</code>, to enable pending tests to registered as by-name parameters
* by methods that require a test function that takes a <code>FixtureParam</code>.
*
* <p>
* This method makes it possible to write pending tests as simply <code>(pending)</code>, without needing
* to write <code>(fixture => pending)</code>.
* </p>
*
* @param f a function
* @return a function of <code>FixtureParam => Any</code>
*/
protected implicit def convertPendingToFixtureFunction(f: => PendingStatement): FixtureParam => Any /* Assertion */ = {
fixture => { f; Succeeded }
}
/**
* Implicitly converts a function that takes no parameters and results in <code>Any</code> to
* a function from <code>FixtureParam</code> to <code>Any</code>, to enable no-arg tests to registered
* by methods that require a test function that takes a <code>FixtureParam</code>.
*
* @param fun a function
* @return a function of <code>FixtureParam => Any</code>
*/
/*
protected implicit def convertNoArgToFixtureFunction(fun: () => Any /* Assertion */): (FixtureParam => Any /* Assertion */) =
new NoArgTestWrapper(fun)
*/
/**
* <strong>The <code>styleName</code> lifecycle method has been deprecated and will be removed in a future version of ScalaTest.</strong>
*
* <p>This method was used to support the chosen styles feature, which was deactivated in 3.1.0. The internal modularization of ScalaTest in 3.2.0
* will replace chosen styles as the tool to encourage consistency across a project. We do not plan a replacement for <code>styleName</code>.</p>
*/
@deprecated("The styleName lifecycle method has been deprecated and will be removed in a future version of ScalaTest with no replacement.", "3.1.0")
final override val styleName: String = "org.scalatest.fixture.FunSpec"
override def testDataFor(testName: String, theConfigMap: ConfigMap = ConfigMap.empty): TestData = createTestDataFor(testName, theConfigMap, this)
}
| scalatest/scalatest | jvm/funspec/src/main/scala/org/scalatest/funspec/FixtureAnyFunSpecLike.scala | Scala | apache-2.0 | 36,551 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.sexp
import org.ensime.util.EnsimeSpec
class SexpCompactPrinterSpec extends EnsimeSpec {
private val foo = SexpString("foo")
private val foosym = SexpSymbol("foo")
private val barsym = SexpSymbol("bar")
private def assertPrinter(sexp: Sexp, expect: String): Unit = {
SexpCompactPrinter(sexp) should ===(expect)
}
"CompactPrinter" should "handle nil or empty lists/data" in {
assertPrinter(SexpNil, "nil")
assertPrinter(SexpList(Nil), "nil")
}
it should "output lists of atoms" in {
assertPrinter(SexpList(foo, SexpNumber(13), foosym), """("foo" 13 foo)""")
}
it should "output lists of lists" in {
assertPrinter(SexpList(SexpList(foo), SexpList(foo)), """(("foo") ("foo"))""")
}
it should "output cons" in {
assertPrinter(SexpCons(foosym, barsym), "(foo . bar)")
}
it should "output escaped characters" in {
assertPrinter(SexpString("""C:\\my\\folder"""), """"C:\\\\my\\\\folder"""")
}
}
| d1egoaz/ensime-sbt | src/sbt-test/sbt-ensime/ensime-server/s-express/src/test/scala/org/ensime/sexp/SexpCompactPrinterSpec.scala | Scala | apache-2.0 | 1,097 |
package com.twitter.finatra.kafkastreams.transformer.lifecycle
import com.twitter.finatra.kafkastreams.transformer.watermarks.Watermark
trait OnWatermark {
def onWatermark(watermark: Watermark): Unit
}
| twitter/finatra | kafka-streams/kafka-streams/src/main/scala/com/twitter/finatra/kafkastreams/transformer/lifecycle/OnWatermark.scala | Scala | apache-2.0 | 206 |
package com.bio4j.dynamograph.parser.go
import com.bio4j.dynamograph.parser.SingleElement
trait AnyGoParser extends Traversable[SingleElement]
| bio4j/dynamograph | src/main/scala/com/bio4j/dynamograph/parser/go/AnyGoParser.scala | Scala | agpl-3.0 | 145 |
package com.twitter.finagle.exp.mysql.transport
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.exp.mysql.{Request, Result}
import com.twitter.finagle.netty3.{ChannelSnooper, Netty3Transporter}
import com.twitter.finagle.Stack
import com.twitter.util.NonFatal
import java.util.logging.{Level, Logger}
import org.jboss.netty.buffer.ChannelBuffer
import org.jboss.netty.channel._
import org.jboss.netty.channel.{Channels, ChannelPipelineFactory}
import org.jboss.netty.handler.codec.frame.FrameDecoder
/**
* Decodes logical MySQL packets that could be fragmented across
* frames. MySQL packets are a length encoded set of bytes written
* in little endian byte order.
*/
class PacketFrameDecoder extends FrameDecoder {
override def decode(ctx: ChannelHandlerContext, channel: Channel, buffer: ChannelBuffer): Packet = {
if (buffer.readableBytes < Packet.HeaderSize)
return null
buffer.markReaderIndex()
val header = new Array[Byte](Packet.HeaderSize)
buffer.readBytes(header)
val br = BufferReader(header)
val length = br.readUnsignedInt24()
val seq = br.readUnsignedByte()
if (buffer.readableBytes < length) {
buffer.resetReaderIndex()
return null
}
val body = new Array[Byte](length)
buffer.readBytes(body)
Packet(seq, Buffer(body))
}
}
class PacketEncoder extends SimpleChannelDownstreamHandler {
override def writeRequested(ctx: ChannelHandlerContext, evt: MessageEvent) =
evt.getMessage match {
case p: Packet =>
try {
val cb = p.toChannelBuffer
Channels.write(ctx, evt.getFuture, cb, evt.getRemoteAddress)
} catch {
case NonFatal(e) =>
evt.getFuture.setFailure(new ChannelException(e.getMessage))
}
case unknown =>
evt.getFuture.setFailure(new ChannelException(
"Unsupported request type %s".format(unknown.getClass.getName)))
}
}
/**
* A Netty3 pipeline that is responsible for framing network
* traffic in terms of mysql logical packets.
*/
object MysqlClientPipelineFactory extends ChannelPipelineFactory {
def getPipeline = {
val pipeline = Channels.pipeline()
pipeline.addLast("packetDecoder", new PacketFrameDecoder)
pipeline.addLast("packetEncoder", new PacketEncoder)
pipeline
}
}
/**
* Responsible for the transport layer plumbing required to produce
* a Transporter[Packet, Packet]. The current implementation uses
* Netty3.
*/
object MysqlTransporter {
def apply(params: Stack.Params): Transporter[Packet, Packet] =
Netty3Transporter(MysqlClientPipelineFactory, params)
}
| nkhuyu/finagle | finagle-mysql/src/main/scala/com/twitter/finagle/mysql/transport/Netty3.scala | Scala | apache-2.0 | 2,637 |
package org.mauritania.photosync.olympus.client
import java.net.URL
import java.time.{LocalDate, LocalDateTime, LocalTime}
case class FileInfo(
folder: String,
name: String,
size: Long,
date: Int = FileInfo.DefaultDate,
time: Int = FileInfo.DefaultTime,
thumbnailUrl: Option[URL] = None // if local, no thumbnail will be available
) {
import FileInfo._
def getFileId: String = {
folder + "/" + name
}
val humanDate: LocalDate = {
// ..yyyyyyyymmmmddddd
// 65432109876543210
val days = maskShift(date, 4, 0)
val months = maskShift(date, 8, 5)
val years = maskShift(date, 16, 9) + 1980
LocalDate.of(years, months, days)
}
val humanTime: LocalTime = {
// ...hhhhhhmmmmmmsssss
// 65432109876543210
val s = FileInfo.maskShift(time, 4, 0)
val m = FileInfo.maskShift(time, 10, 5)
val h = FileInfo.maskShift(time, 16, 11)
LocalTime.of(h, m, s)
}
val humanDateTime: LocalDateTime = humanDate.atTime(humanTime)
}
object FileInfo {
val MaskDays = Integer.parseInt("0000000000011111", 2)
val MaskMont = Integer.parseInt("0000000111100000", 2)
val MaskYear = Integer.parseInt("1111111000000000", 2)
val MaxMachineDayticks = 61343
val MinMachineDayticks = 10273
val DefaultDate = MinMachineDayticks
val DefaultTime = 0
val MaxDate = LocalDate.of(2099, 12, 31)
val MinDate = LocalDate.of(2000, 1, 1)
def maskShift(i: Int, upperMaskBitPos: Int, lowerMaskBitPos: Int): Int =
(i % (2 << upperMaskBitPos)) >> lowerMaskBitPos
}
| mauriciojost/olympus-photosync | src/main/scala/org/mauritania/photosync/olympus/client/FileInfo.scala | Scala | apache-2.0 | 1,532 |
/*
*
* * Copyright 2015 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.dhira.core.nnet.layers.convolution
import org.dhira.core.containers.Pair
import org.dhira.core.nnet.api.Layer
import org.dhira.core.nnet.conf.NeuralNetConfiguration
import org.dhira.core.nnet.gradient.DefaultGradient
import org.dhira.core.nnet.gradient.Gradient
import org.dhira.core.nnet.layers.BaseLayer
import org.deeplearning4j.nn.params.ConvolutionParamInitializer
import org.deeplearning4j.util.Dropout
import org.nd4j.linalg.api.ndarray.INDArray
import org.nd4j.linalg.api.shape.Shape
import org.nd4j.linalg.convolution.Convolution
import org.nd4j.linalg.factory.Nd4j
import org.slf4j.Logger
import org.slf4j.LoggerFactory
/**
* Convolution layer
*
* @author Adam Gibson (original impl), Alex Black (current version)
*/
object ConvolutionLayer {
protected val log: Logger = LoggerFactory.getLogger(classOf[ConvolutionLayer])
}
class ConvolutionLayer extends BaseLayer[ConvolutionLayer] {
private[convolution] var helper: ConvolutionHelper = null
def this(conf: NeuralNetConfiguration) {
this()
`super`(conf)
initializeHelper
}
def this(conf: Nothing, input: INDArray) {
this()
`super`(conf, input)
initializeHelper
}
private[convolution] def initializeHelper {
try {
helper = Class.forName("org.deeplearning4j.nn.layers.convolution.CudnnConvolutionHelper").asSubclass(classOf[Nothing]).newInstance
ConvolutionLayer.log.debug("CudnnConvolutionHelper successfully loaded")
}
catch {
case t: Throwable => {
if (!(t.isInstanceOf[ClassNotFoundException])) {
ConvolutionLayer.log.warn("Could not load CudnnConvolutionHelper", t)
}
}
}
}
override def calcL2: Double = {
if (!conf.isUseRegularization || conf.getLayer.getL2 <= 0.0) return 0.0
val l2Norm: Double = getParam(ConvolutionParamInitializer.WEIGHT_KEY).norm2Number.doubleValue
return 0.5 * conf.getLayer.getL2 * l2Norm * l2Norm
}
override def calcL1: Double = {
if (!conf.isUseRegularization || conf.getLayer.getL1 <= 0.0) return 0.0
return conf.getLayer.getL1 * getParam(ConvolutionParamInitializer.WEIGHT_KEY).norm1Number.doubleValue
}
override def `type`: Nothing = {
return Type.CONVOLUTIONAL
}
override def backpropGradient(epsilon: INDArray): Nothing = {
val weights: INDArray = getParam(ConvolutionParamInitializer.WEIGHT_KEY)
val miniBatch: Int = input.size(0)
val inH: Int = input.size(2)
val inW: Int = input.size(3)
val outDepth: Int = weights.size(0)
val inDepth: Int = weights.size(1)
val kH: Int = weights.size(2)
val kW: Int = weights.size(3)
val kernel: Array[Int] = layerConf.getKernelSize
val strides: Array[Int] = layerConf.getStride
val pad: Array[Int] = layerConf.getPadding
val outH: Int = Convolution.outSize(inH, kernel(0), strides(0), pad(0), false)
val outW: Int = Convolution.outSize(inW, kernel(1), strides(1), pad(1), false)
val biasGradView: INDArray = gradientViews.get(ConvolutionParamInitializer.BIAS_KEY)
val weightGradView: INDArray = gradientViews.get(ConvolutionParamInitializer.WEIGHT_KEY)
val weightGradView2df: INDArray = Shape.newShapeNoCopy(weightGradView, Array[Int](outDepth, inDepth * kH * kW), false).transpose
var delta: INDArray = null
val afn: String = conf.getLayer.getActivationFunction
if ("identity" == afn) {
delta = epsilon
}
else {
val sigmaPrimeZ: INDArray = preOutput(true)
Nd4j.getExecutioner.execAndReturn(Nd4j.getOpFactory.createTransform(afn, sigmaPrimeZ, conf.getExtraArgs).derivative)
delta = sigmaPrimeZ.muli(epsilon)
}
if (helper != null) {
val ret: Nothing = helper.backpropGradient(input, weights, delta, kernel, strides, pad, biasGradView, weightGradView, afn)
if (ret != null) {
return ret
}
}
delta = delta.permute(1, 0, 2, 3)
val delta2d: INDArray = delta.reshape('c', Array[Int](outDepth, miniBatch * outH * outW))
val col: INDArray = Nd4j.createUninitialized(Array[Int](miniBatch, outH, outW, inDepth, kH, kW), 'c')
val col2: INDArray = col.permute(0, 3, 4, 5, 1, 2)
Convolution.im2col(input, kH, kW, strides(0), strides(1), pad(0), pad(1), false, col2)
val im2col2d: INDArray = col.reshape('c', miniBatch * outH * outW, inDepth * kH * kW)
Nd4j.gemm(im2col2d, delta2d, weightGradView2df, true, true, 1.0, 0.0)
val wPermuted: INDArray = weights.permute(3, 2, 1, 0)
val w2d: INDArray = wPermuted.reshape('f', inDepth * kH * kW, outDepth)
val epsNext2d: INDArray = w2d.mmul(delta2d)
var eps6d: INDArray = Shape.newShapeNoCopy(epsNext2d, Array[Int](kW, kH, inDepth, outW, outH, miniBatch), true)
eps6d = eps6d.permute(5, 2, 1, 0, 4, 3)
val epsNextOrig: INDArray = Nd4j.create(Array[Int](inDepth, miniBatch, inH, inW), 'c')
val epsNext: INDArray = epsNextOrig.permute(1, 0, 2, 3)
Convolution.col2im(eps6d, epsNext, strides(0), strides(1), pad(0), pad(1), inH, inW)
val retGradient: Gradient = new DefaultGradient
val biasGradTemp: INDArray = delta2d.sum(1)
biasGradView.assign(biasGradTemp)
retGradient.setGradientFor(ConvolutionParamInitializer.BIAS_KEY, biasGradView)
retGradient.setGradientFor(ConvolutionParamInitializer.WEIGHT_KEY, weightGradView, 'c')
return new Nothing(retGradient, epsNext)
}
override def preOutput(training: Boolean): INDArray = {
var weights: INDArray = getParam(ConvolutionParamInitializer.WEIGHT_KEY)
val bias: INDArray = getParam(ConvolutionParamInitializer.BIAS_KEY)
if (conf.isUseDropConnect && training && conf.getLayer.getDropOut > 0) {
weights = Dropout.applyDropConnect(this, ConvolutionParamInitializer.WEIGHT_KEY)
}
val miniBatch: Int = input.size(0)
val inH: Int = input.size(2)
val inW: Int = input.size(3)
val outDepth: Int = weights.size(0)
val inDepth: Int = weights.size(1)
val kH: Int = weights.size(2)
val kW: Int = weights.size(3)
val kernel: Array[Int] = layerConf.getKernelSize
val strides: Array[Int] = layerConf.getStride
val pad: Array[Int] = layerConf.getPadding
val outH: Int = Convolution.outSize(inH, kernel(0), strides(0), pad(0), false)
val outW: Int = Convolution.outSize(inW, kernel(1), strides(1), pad(1), false)
if (helper != null) {
val ret: INDArray = helper.preOutput(input, weights, bias, kernel, strides, pad)
if (ret != null) {
return ret
}
}
val col: INDArray = Nd4j.createUninitialized(Array[Int](miniBatch, outH, outW, inDepth, kH, kW), 'c')
val col2: INDArray = col.permute(0, 3, 4, 5, 1, 2)
Convolution.im2col(input, kH, kW, strides(0), strides(1), pad(0), pad(1), false, col2)
val reshapedCol: INDArray = Shape.newShapeNoCopy(col, Array[Int](miniBatch * outH * outW, inDepth * kH * kW), false)
val permutedW: INDArray = weights.permute(3, 2, 1, 0)
val reshapedW: INDArray = permutedW.reshape('f', kW * kH * inDepth, outDepth)
var z: INDArray = reshapedCol.mmul(reshapedW)
z.addiRowVector(bias)
z = Shape.newShapeNoCopy(z, Array[Int](outW, outH, miniBatch, outDepth), true)
return z.permute(2, 3, 1, 0)
}
override def activate(training: Boolean): INDArray = {
if (input == null) throw new IllegalArgumentException("No null input allowed")
applyDropOutIfNecessary(training)
val z: INDArray = preOutput(training)
val afn: String = conf.getLayer.getActivationFunction
if ("identity" == afn) {
return z
}
if (helper != null) {
val ret: INDArray = helper.activate(z, conf.getLayer.getActivationFunction)
if (ret != null) {
return ret
}
}
val activation: INDArray = Nd4j.getExecutioner.execAndReturn(Nd4j.getOpFactory.createTransform(afn, z))
return activation
}
override def transpose: Nothing = {
throw new UnsupportedOperationException("Not yet implemented")
}
override def calcGradient(layerError: Gradient, indArray: INDArray): Gradient = {
throw new UnsupportedOperationException("Not yet implemented")
}
override def fit(input: INDArray) {
}
override def merge(layer: Nothing, batchSize: Int) {
throw new UnsupportedOperationException
}
override def params: INDArray = {
return Nd4j.toFlattened('c', params.values)
}
override def setParams(params: INDArray) {
setParams(params, 'c')
}
} | Mageswaran1989/aja | src/main/scala/org/aja/dhira/src/main/scala/org/dhira/core/nnet/layers/convolution/ConvolutionLayer.scala | Scala | apache-2.0 | 9,034 |
package notebook
import rx.lang.scala.{Observable => RxObservable, Observer => RxObserver}
/**
* Author: Ken
*/
trait Observer[T] extends RxObserver[T] {
def map[A](fxn: A => T): Observer[A] = new MappingObserver[T, A] {
def innerObserver = Observer.this;
def observerMapper = fxn
}
}
/**
* A no-op observer, useful for extending just the methods you want
* @tparam T
*/
trait ConcreteObserver[T] extends Observer[T] {
override def onCompleted() {}
def onError(e: Exception) {}
override def onNext(args: T) {}
}
class NoopObserver[T] extends ConcreteObserver[T]
trait MappingObserver[A, B] extends Observer[B] {
protected def innerObserver: Observer[A]
protected def observerMapper: B => A
override def onCompleted() {
innerObserver.onCompleted()
}
def onError(e: Exception) {
innerObserver.onError(e)
}
override def onNext(args: B) {
innerObserver.onNext(observerMapper(args))
}
}
object Observer {
def apply[A](f: A => Unit) = new ConcreteObserver[A] {
override def onNext(args: A) = f(args)
}
}
| erictu/mango-notebook | modules/observable/src/main/scala/notebook/Observer.scala | Scala | apache-2.0 | 1,072 |
/*
* Part of GDL book_api.
* Copyright (C) 2017 Global Digital Library
*
* See LICENSE
*/
package io.digitallibrary.bookapi.model.domain
import io.digitallibrary.bookapi.BookApiProperties
import io.digitallibrary.license.model.License
import scalikejdbc._
case class Book(id: Option[Long],
revision: Option[Int],
publisherId: Long,
publisher: Publisher,
license: License,
source: String)
object Book extends SQLSyntaxSupport[Book] {
implicit val formats = org.json4s.DefaultFormats
override val tableName = "book"
override val schemaName = Some(BookApiProperties.MetaSchema)
def apply(b: SyntaxProvider[Book], pub: SyntaxProvider[Publisher])(rs: WrappedResultSet): Book =
apply(b.resultName, pub.resultName)(rs)
def apply(b: ResultName[Book], pub: ResultName[Publisher])(rs: WrappedResultSet): Book = Book(
rs.longOpt(b.id),
rs.intOpt(b.revision),
rs.long(b.publisherId),
Publisher.apply(pub)(rs),
License(rs.string(b.license)),
rs.string(b.source))
}
| GlobalDigitalLibraryio/book-api | src/main/scala/io/digitallibrary/bookapi/model/domain/Book.scala | Scala | apache-2.0 | 1,096 |
package com.bigchange.basic
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by CHAOJIANG on 2016/5/1 0001.
*/
object HiveOperationTest {
def main(args: Array[String]): Unit = {
if (args.length < 1) {
System.err.println("Usage: <inpath>")
System.exit(1)
}
val inputFile = args(0)
val conf = new SparkConf().setAppName("HiveOperationTest")
val sc = new SparkContext(conf)
val sqlContext = new HiveContext(sc)
// create table
sqlContext.sql("CREATE TABLE IF NOT EXISTS weather (date STRING, city STRING, minTem Int, maxTem Int) row format delimited fields terminated by '\\t'")
sqlContext.sql(s"LOAD DATA INPATH '$inputFile' INTO TABLE weather")
// Queries are expressed in HiveQL
sqlContext.sql("select city, avg(minTem) from weather group by city").collect().foreach(println)
// 使用 udf
sqlContext.udf.register("class", (s: Int) => if (s <= 20) "lower" else "high")
sqlContext.sql("select city, maxTem, class(maxTem) from weather").collect().foreach(println)
sc.stop()
}
}
| bigchange/AI | src/main/scala/com/bigchange/basic/HiveOperationTest.scala | Scala | apache-2.0 | 1,126 |
package xyz.hyperreal.sprolog
class PrologDB extends Database
{
Prolog.compileProgram( Prolog.parseProgram("""
% X = X. % for some reason this doesn't work
If -> Then :- If, !, Then.
If -> Then ; _ :- If, !, Then.
_ -> _ ; Else :- !, Else. % the cut stops the rules for disjunction (;) from being tried
F ; _ :- F.
_ ; A :- A.
\+ Goal :- Goal, !, fail.
\+ _.
once( Goal ) :- Goal, !.
repeat.
repeat :- repeat.
sublist( [], L ).
sublist( [H|T], [H|U] ) :- sublist_( T, U ).
sublist( S, [H|U] ) :- sublist( S, U ).
sublist_( [], L ).
sublist_( [H|T], [H|U] ) :- sublist_( T, U ).
member( X, [X|_] ).
member( X, [_|T] ) :- member( X, T ).
append( [], L, L ).
append( [H|T], L, [H|LT] ) :- append( T, L, LT ).
reverse( List, Reversed ) :- reverse_( List, [], Reversed ).
reverse_( [], Reversed, Reversed ).
reverse_( [Head|Tail], SoFar, Reversed ) :- reverse_( Tail, [Head|SoFar], Reversed ).
length( Xs, L ) :- length_( Xs, 0, L ) .
length_( [] , L , L ) .
length_( [_|Xs] , T , L ) :-
T1 is T + 1,
length_( Xs, T1, L ).
sum_list( L, R ) :-
sum_list_( L, 0, R ).
sum_list_( [], F, F ).
sum_list_( [H|T], F, R ) :-
F2 is F + H,
sum_list_( T, F2 ,R ).
delete( X, [X|R], R ).
delete( X, [F|R], [F|S] ) :- delete( X, R, S ).
permutation( [], [] ).
permutation( [X|Y], Z ) :- permutation( Y, W ), delete( X, Z, W ).
halve( L, A, B ) :- halve_( L, L, A, B ).
halve_( [], R, [], R ). % for lists of even length
halve_( [_], R, [], R ). % for lists of odd length
halve_( [_, _|T], [X|L], [X|L1], R ) :- halve_( T, L, L1, R ).
powerset(Set, PowerSet):-
powerset_(Set, [[]], PowerSet).
powerset_([], Yss, Yss).
powerset_([X|Xs], Yss0, Yss):-
powerset__(Yss0, X, Yss1),
powerset_(Xs, Yss1, Yss).
powerset__([], _, []).
powerset__([Zs|Zss], X, [Zs, [X|Zs]|Yss]):-
powerset__(Zss, X, Yss).
subset( [], _ ).
subset( [H|T], L ) :-
member( H, L ),
subset( T, L ).
union( [], X, X ) :- !.
union( [X|R], Y, Z ) :- member( X, Y ), union( R, Y, Z ), !.
union( [X|R], Y, [X|Z] ) :- union( R, Y, Z ).
intersection( [], _, [] ) :- !.
intersection( [X|R], Y, [X|T] ) :- member( X, Y ), intersection( R, Y, T ), !.
intersection( [X|R], Y, L ) :- intersection( R, Y, L ).
while_( C ) :- C.
while_( C ) :- C, while_( C ).
iterate_( L, V ) :-
iterator_( L, I ),
while_( hasNext_(I) ),
next_( I, V ).
"""), this )
}
| edadma/sprolog | src/main/scala/PrologDB.scala | Scala | mit | 2,552 |
package service
import java.lang.Long.parseLong
import scala.concurrent.Future
import scalaz.syntax.applicative.ToApplyOps
import play.api.libs.concurrent.Execution.Implicits._
import scalaz._
import Scalaz._
import org.mindrot.jbcrypt.BCrypt
import utils.{Logging, Utils}
import Utils._
import entities._
/**
* Global object, handles actual interaction with Redis.
* All methods are non-blocking and thread safe.
*/
object RedisService extends RedisConfig with Logging {
//small page size to demonstrate pagination
val page_size = 10
/**
* Load a post
* @param post_id the post id to load
* @return (Future of) Some message if the given post_id maps to an actual message else None
*/
def load_post(post_id: PostId): Future[Option[Msg]] = {
for {
map <- redis.hmGetAsMap[String](RedisSchema.post_info(post_id))("timestamp", "author", "body")
} yield {
for{
timestamp <- map.get("timestamp")
author <- map.get("author")
body <- map.get("body")
} yield Msg(post_id, parseLong(timestamp), UserId(author), body)
}
}
/**
* Load a series of posts
* @param post_ids sequence of post ids to load
* @return (Future of) messages
*/
def load_posts(post_ids: Seq[PostId]): Future[Seq[Msg]] = {
for {
msgs <- Future.sequence(post_ids.map(load_post))
} yield msgs.collect {case Some(msg) => msg} //filter out posts that have been deleted
}
/**
* Write a Message to Redis
* @param post_id post id of message being saved
* @param msg message to save
* @return (Future of) Unit
*/
private def save_post(post_id: PostId, msg: Msg): Future[Unit] =
redis.hmSetFromMap(RedisSchema.post_info(post_id), Map(
"timestamp" -> msg.timestamp,
"author" -> msg.uid.uid,
"body" -> msg.body
))
/**
* Add a post's id to the feed of its author and her followers
* @param from author of the post
* @param post_id id of the post
* @return (Future of) Unit
*/
private def distribute_post(from: UserId, post_id: PostId): Future[Unit] =
for {
recipients <- followed_by(from)
distribution = for (recipient <- recipients + from) yield {
for {
_ <- redis.lPush(RedisSchema.user_posts(recipient), post_id.pid)
_ <- redis.publish(s"${recipient.uid}:feed", post_id.pid)
} yield ()
}
_ <- Future.sequence(distribution)
} yield ()
/**
* Post a message by a given user with a given body. Reserves a global post id
* and distributes that post id to the feeds of all followers of this user.
* @param author author of the post
* @param body body of the post
* @return (Future of) the id of the post after it is created
*/
def post_message(author: UserId, body: String): Future[PostId] = {
val timestamp = System.currentTimeMillis
def trim_global = redis.lTrim(RedisSchema.global_timeline,0,1000)
def handle_post(post_id: PostId) = {
log.info(s"handling post $post_id for $author with body $body")
(redis.lPush(RedisSchema.global_timeline, post_id.pid)
|@| save_post(post_id, Msg(post_id, timestamp, author, body))
|@| distribute_post(author, post_id)
){ (a,b,c) => () }
}
for {
post_id <- redis.incr(RedisSchema.next_post_id).map(id => PostId(id.toString))
_ <- handle_post(post_id)
_ <- trim_global
} yield post_id
}
/**
* Get the set of users which are following this user.
* @param uid user being followed
* @return users following user
*/
def is_following(uid: UserId): Future[Set[UserId]] =
for {
following <- redis.sMembers(RedisSchema.is_following(uid))
} yield following.map( id => UserId(id) )
/**
* Get the set of users followed by this user
* @param uid user doing the following
* @return users followed by user
*/
def followed_by(uid: UserId): Future[Set[UserId]] =
for {
followers <- redis.sMembers(RedisSchema.followed_by(uid))
} yield followers.map( id => UserId(id) )
/**
* Start following a user
* @param uid user doing the following
* @param to_follow user being followed
* @return (Future of) Unit
*/
def follow_user(uid: UserId, to_follow: UserId): Future[Unit] = {
println(s"follow_user($uid: UserId, $to_follow: UserId)")
for {
_ <- predicate(uid =/= to_follow, s"user $uid just tried to follow himself! probably a client-side bug")
_ <- redis.sAdd(RedisSchema.is_following(uid), to_follow.uid)
_ <- redis.sAdd(RedisSchema.followed_by(to_follow), uid.uid)
} yield ()
}
/**
* Stop following a user
* @param uid user doing the unfollowing
* @param to_unfollow user being unfollowed
* @return (Future of) Unit
*/
def unfollow_user(uid: UserId, to_unfollow: UserId): Future[Unit] = {
println(s"follow_user($uid: UserId, $to_unfollow: UserId)")
for {
_ <- predicate(uid =/= to_unfollow, s"user $uid just tried to unfollow himself! probably a client-side bug")
_ <- (redis.sRem(RedisSchema.is_following(uid), to_unfollow.uid) |@|
redis.sRem(RedisSchema.followed_by(to_unfollow), uid.uid)){ (_,_) => () }
} yield ()
}
/**
* Attempt to log a user in with the provided credentials
* @param username username to login with
* @param password password to login with
* @return (Future of) the UID assigned existing user with the above credentials
*/
def login_user(username: String, password: String): Future[UserId] = {
for {
raw_uid_opt <- redis.get(RedisSchema.username_to_id(username))
uid <- match_or_else(raw_uid_opt, s"no uid for username $username"){case Some(u) => UserId(u)}
passwordHashOpt <- redis.get(RedisSchema.user_password(uid))
passwordHash <- match_or_else(passwordHashOpt, s"no hashed password for user $username with uid $uid"){case Some(pw) => pw}
_ <- predicate(BCrypt.checkpw(password, passwordHash), "password doesn't match hashed password")
} yield uid
}
/**
* Attempt to register a user with the provided credentials
* @param username username to register with
* @param password password to register with
* @return (Future of) a fresh UID assigned to a new user with the above credentials
*/
def register_user(username: String, password: String): Future[UserId] = {
val hashedPassword = BCrypt.hashpw(password, BCrypt.gensalt())
for {
raw_uid <- redis.incr(RedisSchema.next_user_id).map(_.toString)
uid = UserId(raw_uid)
_ <- set_username(uid, username)
_ <- redis.set(RedisSchema.user_password(uid), hashedPassword)
} yield uid
}
/**
* Generate and register a new auth token for a given user
* @param uid user to generate an auth token for
* @return (Future of) an auth token
*/
def gen_auth_token(uid: UserId): Future[AuthToken] = {
val auth = AuthToken( new scala.util.Random().nextString(15) )
for {
_ <- (redis.set(RedisSchema.user_auth(uid), auth.token) |@|
redis.set(RedisSchema.auth_user(auth), uid.uid)){ (_, _) => () }
} yield auth
}
/**
* Get the user associated with some auth token
* @param auth an auth token
* @return associated user
*/
def user_from_auth_token(auth: AuthToken): Future[UserId] = {
for {
raw_uid_opt <- redis.get(RedisSchema.auth_user(auth))
uid <- match_or_else(raw_uid_opt, s"uid not found for auth token: $auth"){ case Some(u) => UserId(u) }
user_auth_opt <- redis.get(RedisSchema.user_auth(uid))
user_auth <- match_or_else(user_auth_opt, s"auth string not found for uid: $uid"){ case Some(a) => AuthToken(a) }
_ <- predicate(user_auth === auth, s"user auth $user_auth doesn't match attempted $auth")
} yield uid
}
/**
* Given a user, fetch posts routed to their feed
* (represented as a linked list, random access to a node requires traversing all precursors of that node.)
* @param user_id a user
* @param page pages to offset the fetched slice of the feed by
* @return (Future of) posts in the requested page of the given user's feed
*/
def get_user_feed(user_id: UserId, page: Int): Future[Seq[PostId]] = {
val start = page_size * page
val end = start + page_size
redis.lRange(RedisSchema.user_posts(user_id), start, end).map(_.map(PostId(_)))
}
/**
* Fetch posts routed to the global feed
* (represented as a linked list, random access to a node requires traversing all precursors of that node.)
* @param page pages to offset the fetched slice of the feed by
* @return (Future of) posts in the requested page of the global feed
*/
def get_global_feed(page: Int): Future[Seq[PostId]] = {
val start = page_size * page
val end = start + page_size
redis.lRange(RedisSchema.global_timeline, start, end).map(_.map(PostId(_)))
}
/**
* Fetch a user's username
* @param uid some user
* @return (Future of) this user's username
*/
def get_user_name(uid: UserId): Future[String] =
for {
username_opt <- redis.get[String](RedisSchema.id_to_username(uid))
username <- match_or_else(username_opt, s"no username for uid $uid"){case Some(s) => s}
} yield username
/**
* Reserve a username for a user. Checks availability of that username
* @param uid some user
* @param username username to register for the given user
* @return (Future of) Unit
*/
private def set_username(uid: UserId, username: String): Future[Unit] =
for {
uid_for_username <- redis.get(RedisSchema.username_to_id(username))
_ <- match_or_else(uid_for_username , s"user $uid attempting to reserve taken username $username, already in use by user with uid $uid_for_username"){
case None =>
}
_ <- (redis.set(RedisSchema.id_to_username(uid), username) |@|
redis.set(RedisSchema.username_to_id(username), uid.uid)){
(_,_) => ()
}
} yield ()
}
| alanktwong/typesafe_activators | redis-twitter-clone/app/service/RedisService.scala | Scala | mit | 10,278 |
package test_expect_failure.scala_import
class RootScalaImportPassesLabelsDirectDeps | bazelbuild/rules_scala | test_expect_failure/scala_import/RootScalaImportPassesLabelsDirectDeps.scala | Scala | apache-2.0 | 84 |
package com.danieltrinh.benchmarks
import org.example.SimpleScalaBenchmark
import com.google.caliper.Param
import scala.collection.par._
import scala.collection.par.Scheduler.Implicits.global
import scala.collection.parallel.ParSeq
class ParallelVsRegularCollections extends SimpleScalaBenchmark {
@Param(Array("10", "100", "1000"))
val length: Int = 0
var array: Array[Int] = _
var seq = Seq[Data]()
var parSeq = ParSeq[Data]()
override def setUp() {
array = new Array(length)
seq = Seq.tabulate(length)(i => Data(List(i)))
parSeq = seq.par
}
def timeSeqMapReduce(reps: Int): Seq[Int] = repeat(reps) {
var i = 0
var result = Seq[Int]()
while(i < array.length) {
result = seq.map(_.d).reduce { _ ++ _ }
i = i + 1
}
result
}
def timeParSeqMapReduce(reps: Int): Seq[Int] = repeat(reps) {
var i = 0
var result = Seq[Int]()
while(i < array.length) {
result = parSeq.map(_.d).reduce(_ ++ _)
i = i + 1
}
result
}
def timeParSeqViewMapReduce(reps: Int): Seq[Int] = repeat(reps) {
var i = 0
var result = Seq[Int]()
while(i < array.length) {
result = parSeq.view.map(_.d).reduce(_ ++ _)
i = i + 1
}
result
}
def timeSeqFoldLeft(reps: Int): Seq[Int] = repeat(reps) {
var i = 0
var result = Seq[Int]()
while(i < array.length) {
result = seq.foldLeft(Seq[Int]()) { _ ++ _.d }
i = i + 1
}
result
}
}
case class Data(d: List[Int]) | daniel-trinh/scala_microbenchmarks | src/main/scala/com/danieltrinh/benchmarks/ParallelVsRegularCollections.scala | Scala | mit | 1,499 |
/* Copyright 2012-2015 Micronautics Research Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License. */
package com.micronautics.aws
import org.scalatest.WordSpec
class ETTest extends WordSpec with TestBase {
"Blah" must {
"blah" in {
}
}
}
| mslinn/awslib_scala | src/test/scala/com/micronautics/aws/ETTest.scala | Scala | mit | 758 |
package com.twitter.finagle.http.netty
import org.jboss.netty.handler.codec.http.{DefaultHttpRequest, HttpMethod, HttpVersion}
import org.specs.SpecificationWithJUnit
class HttpMessageProxySpec extends SpecificationWithJUnit {
"HttpMessageProxy" should {
"basics" in {
val message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/")
val proxy = new HttpMessageProxy {
final val httpMessage = message
}
proxy.getProtocolVersion must_== HttpVersion.HTTP_1_1
}
}
}
| firebase/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/netty/HttpMessageProxySpec.scala | Scala | apache-2.0 | 525 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest
import java.io.File
import java.nio.file.{Path, Paths}
import java.util.UUID
import scala.collection.JavaConverters._
import com.google.common.base.Charsets
import com.google.common.io.Files
import io.fabric8.kubernetes.api.model.Pod
import io.fabric8.kubernetes.client.{Watcher, WatcherException}
import io.fabric8.kubernetes.client.Watcher.Action
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Tag}
import org.scalatest.concurrent.{Eventually, PatienceConfiguration}
import org.scalatest.concurrent.PatienceConfiguration.{Interval, Timeout}
import org.scalatest.matchers.must.Matchers
import org.scalatest.matchers.should.Matchers._
import org.scalatest.time.{Minutes, Seconds, Span}
import org.apache.spark.SparkFunSuite
import org.apache.spark.deploy.k8s.integrationtest.TestConstants._
import org.apache.spark.deploy.k8s.integrationtest.backend.{IntegrationTestBackend, IntegrationTestBackendFactory}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
class KubernetesSuite extends SparkFunSuite
with BeforeAndAfterAll with BeforeAndAfter with BasicTestsSuite with SparkConfPropagateSuite
with SecretsTestsSuite with PythonTestsSuite with ClientModeTestsSuite with PodTemplateSuite
with PVTestsSuite with DepsTestsSuite with DecommissionSuite with RTestsSuite with Logging
with Eventually with Matchers {
import KubernetesSuite._
protected var sparkHomeDir: Path = _
protected var pyImage: String = _
protected var rImage: String = _
protected var image: String = _
protected var testBackend: IntegrationTestBackend = _
protected var driverPodName: String = _
protected var kubernetesTestComponents: KubernetesTestComponents = _
protected var sparkAppConf: SparkAppConf = _
protected var containerLocalSparkDistroExamplesJar: String = _
protected var appLocator: String = _
// Default memory limit is 1024M + 384M (minimum overhead constant)
private val baseMemory = s"${1024 + 384}"
protected val memOverheadConstant = 0.8
private val standardNonJVMMemory = s"${(1024 + 0.4*1024).toInt}"
protected val additionalMemory = 200
// 209715200 is 200Mi
protected val additionalMemoryInBytes = 209715200
private val extraDriverTotalMemory = s"${(1024 + memOverheadConstant*1024).toInt}"
private val extraExecTotalMemory =
s"${(1024 + memOverheadConstant*1024 + additionalMemory).toInt}"
protected override def logForFailedTest(): Unit = {
logInfo("\\n\\n===== EXTRA LOGS FOR THE FAILED TEST\\n")
logInfo("BEGIN DESCRIBE PODS for application\\n" +
testBackend.describePods(s"spark-app-locator=$appLocator").mkString("\\n"))
logInfo("END DESCRIBE PODS for the application")
val driverPodOption = kubernetesTestComponents.kubernetesClient
.pods()
.withLabel("spark-app-locator", appLocator)
.withLabel("spark-role", "driver")
.list()
.getItems
.asScala
.headOption
driverPodOption.foreach { driverPod =>
logInfo("BEGIN driver POD log\\n" +
kubernetesTestComponents.kubernetesClient
.pods()
.withName(driverPod.getMetadata.getName)
.getLog)
logInfo("END driver POD log")
}
kubernetesTestComponents.kubernetesClient
.pods()
.withLabel("spark-app-locator", appLocator)
.withLabel("spark-role", "executor")
.list()
.getItems.asScala.foreach { execPod =>
val podLog = try {
kubernetesTestComponents.kubernetesClient
.pods()
.withName(execPod.getMetadata.getName)
.getLog
} catch {
case e: io.fabric8.kubernetes.client.KubernetesClientException =>
"Error fetching log (pod is likely not ready) ${e}"
}
logInfo(s"\\nBEGIN executor (${execPod.getMetadata.getName}) POD log:\\n" +
podLog)
logInfo(s"END executor (${execPod.getMetadata.getName}) POD log")
}
}
/**
* Build the image ref for the given image name, taking the repo and tag from the
* test configuration.
*/
private def testImageRef(name: String): String = {
val tag = sys.props.get(CONFIG_KEY_IMAGE_TAG_FILE)
.map { path =>
val tagFile = new File(path)
require(tagFile.isFile,
s"No file found for image tag at ${tagFile.getAbsolutePath}.")
Files.toString(tagFile, Charsets.UTF_8).trim
}
.orElse(sys.props.get(CONFIG_KEY_IMAGE_TAG))
.getOrElse {
throw new IllegalArgumentException(
s"One of $CONFIG_KEY_IMAGE_TAG_FILE or $CONFIG_KEY_IMAGE_TAG is required.")
}
val repo = sys.props.get(CONFIG_KEY_IMAGE_REPO)
.map { _ + "/" }
.getOrElse("")
s"$repo$name:$tag"
}
override def beforeAll(): Unit = {
super.beforeAll()
// The scalatest-maven-plugin gives system properties that are referenced but not set null
// values. We need to remove the null-value properties before initializing the test backend.
val nullValueProperties = System.getProperties.asScala
.filter(entry => entry._2.equals("null"))
.map(entry => entry._1.toString)
nullValueProperties.foreach { key =>
System.clearProperty(key)
}
val possible_spark_dirs = List(
// If someone specified the tgz for the tests look at the extraction dir
System.getProperty(CONFIG_KEY_UNPACK_DIR),
// Try the spark test home
sys.props("spark.test.home")
)
val sparkDirProp = possible_spark_dirs.filter(x =>
new File(Paths.get(x).toFile, "bin/spark-submit").exists).headOption.getOrElse(null)
require(sparkDirProp != null,
s"Spark home directory must be provided in system properties tested $possible_spark_dirs")
sparkHomeDir = Paths.get(sparkDirProp)
require(sparkHomeDir.toFile.isDirectory,
s"No directory found for spark home specified at $sparkHomeDir.")
image = testImageRef(sys.props.getOrElse(CONFIG_KEY_IMAGE_JVM, "spark"))
pyImage = testImageRef(sys.props.getOrElse(CONFIG_KEY_IMAGE_PYTHON, "spark-py"))
rImage = testImageRef(sys.props.getOrElse(CONFIG_KEY_IMAGE_R, "spark-r"))
containerLocalSparkDistroExamplesJar =
s"local:///opt/spark/examples/jars/${Utils.getExamplesJarName()}"
testBackend = IntegrationTestBackendFactory.getTestBackend
testBackend.initialize()
kubernetesTestComponents = new KubernetesTestComponents(testBackend.getKubernetesClient)
}
override def afterAll(): Unit = {
try {
testBackend.cleanUp()
} finally {
super.afterAll()
}
}
protected def setUpTest(): Unit = {
appLocator = UUID.randomUUID().toString.replaceAll("-", "")
driverPodName = "spark-test-app-" + UUID.randomUUID().toString.replaceAll("-", "")
sparkAppConf = kubernetesTestComponents.newSparkAppConf()
.set("spark.kubernetes.container.image", image)
.set("spark.kubernetes.driver.pod.name", driverPodName)
.set("spark.kubernetes.driver.label.spark-app-locator", appLocator)
.set("spark.kubernetes.executor.label.spark-app-locator", appLocator)
.set(NETWORK_AUTH_ENABLED.key, "true")
if (!kubernetesTestComponents.hasUserSpecifiedNamespace) {
kubernetesTestComponents.createNamespace()
}
}
before {
setUpTest()
}
after {
if (!kubernetesTestComponents.hasUserSpecifiedNamespace) {
kubernetesTestComponents.deleteNamespace()
}
deleteDriverPod()
deleteExecutorPod()
}
protected def runSparkPiAndVerifyCompletion(
appResource: String = containerLocalSparkDistroExamplesJar,
driverPodChecker: Pod => Unit = doBasicDriverPodCheck,
executorPodChecker: Pod => Unit = doBasicExecutorPodCheck,
appArgs: Array[String] = Array.empty[String],
isJVM: Boolean = true ): Unit = {
runSparkApplicationAndVerifyCompletion(
appResource,
SPARK_PI_MAIN_CLASS,
Seq("Pi is roughly 3"),
Seq(),
appArgs,
driverPodChecker,
executorPodChecker,
isJVM)
}
protected def runDFSReadWriteAndVerifyCompletion(
wordCount: Int,
appResource: String = containerLocalSparkDistroExamplesJar,
driverPodChecker: Pod => Unit = doBasicDriverPodCheck,
executorPodChecker: Pod => Unit = doBasicExecutorPodCheck,
appArgs: Array[String] = Array.empty[String],
isJVM: Boolean = true,
interval: Option[PatienceConfiguration.Interval] = None): Unit = {
runSparkApplicationAndVerifyCompletion(
appResource,
SPARK_DFS_READ_WRITE_TEST,
Seq(s"Success! Local Word Count $wordCount and " +
s"DFS Word Count $wordCount agree."),
Seq(),
appArgs,
driverPodChecker,
executorPodChecker,
isJVM,
None,
Option((interval, None)))
}
protected def runMiniReadWriteAndVerifyCompletion(
wordCount: Int,
appResource: String = containerLocalSparkDistroExamplesJar,
driverPodChecker: Pod => Unit = doBasicDriverPodCheck,
executorPodChecker: Pod => Unit = doBasicExecutorPodCheck,
appArgs: Array[String] = Array.empty[String],
isJVM: Boolean = true,
interval: Option[PatienceConfiguration.Interval] = None): Unit = {
runSparkApplicationAndVerifyCompletion(
appResource,
SPARK_MINI_READ_WRITE_TEST,
Seq(s"Success! Local Word Count $wordCount and " +
s"D Word Count $wordCount agree."),
Seq(),
appArgs,
driverPodChecker,
executorPodChecker,
isJVM,
None,
Option((interval, None)))
}
protected def runSparkRemoteCheckAndVerifyCompletion(
appResource: String = containerLocalSparkDistroExamplesJar,
driverPodChecker: Pod => Unit = doBasicDriverPodCheck,
executorPodChecker: Pod => Unit = doBasicExecutorPodCheck,
appArgs: Array[String],
timeout: Option[PatienceConfiguration.Timeout] = None): Unit = {
runSparkApplicationAndVerifyCompletion(
appResource,
SPARK_REMOTE_MAIN_CLASS,
Seq(s"Mounting of ${appArgs.head} was true"),
Seq(),
appArgs,
driverPodChecker,
executorPodChecker,
true,
executorPatience = Option((None, timeout)))
}
protected def runSparkJVMCheckAndVerifyCompletion(
appResource: String = containerLocalSparkDistroExamplesJar,
mainClass: String = SPARK_DRIVER_MAIN_CLASS,
driverPodChecker: Pod => Unit = doBasicDriverPodCheck,
appArgs: Array[String] = Array("5"),
expectedJVMValue: Seq[String]): Unit = {
val appArguments = SparkAppArguments(
mainAppResource = appResource,
mainClass = mainClass,
appArgs = appArgs)
SparkAppLauncher.launch(
appArguments,
sparkAppConf,
TIMEOUT.value.toSeconds.toInt,
sparkHomeDir,
true)
val driverPod = kubernetesTestComponents.kubernetesClient
.pods()
.withLabel("spark-app-locator", appLocator)
.withLabel("spark-role", "driver")
.list()
.getItems
.get(0)
doBasicDriverPodCheck(driverPod)
Eventually.eventually(TIMEOUT, INTERVAL) {
expectedJVMValue.foreach { e =>
assert(kubernetesTestComponents.kubernetesClient
.pods()
.withName(driverPod.getMetadata.getName)
.getLog
.contains(e), "The application did not complete.")
}
}
}
// scalastyle:off argcount
protected def runSparkApplicationAndVerifyCompletion(
appResource: String,
mainClass: String,
expectedDriverLogOnCompletion: Seq[String],
expectedExecutorLogOnCompletion: Seq[String] = Seq(),
appArgs: Array[String],
driverPodChecker: Pod => Unit,
executorPodChecker: Pod => Unit,
isJVM: Boolean,
pyFiles: Option[String] = None,
executorPatience: Option[(Option[Interval], Option[Timeout])] = None,
decommissioningTest: Boolean = false,
env: Map[String, String] = Map.empty[String, String]): Unit = {
// scalastyle:on argcount
val appArguments = SparkAppArguments(
mainAppResource = appResource,
mainClass = mainClass,
appArgs = appArgs)
val execPods = scala.collection.mutable.Map[String, Pod]()
val podsDeleted = scala.collection.mutable.HashSet[String]()
val (patienceInterval, patienceTimeout) = {
executorPatience match {
case Some(patience) => (patience._1.getOrElse(INTERVAL), patience._2.getOrElse(TIMEOUT))
case _ => (INTERVAL, TIMEOUT)
}
}
def checkPodReady(namespace: String, name: String) = {
val execPod = kubernetesTestComponents.kubernetesClient
.pods()
.inNamespace(namespace)
.withName(name)
.get()
val resourceStatus = execPod.getStatus
val conditions = resourceStatus.getConditions().asScala
val conditionTypes = conditions.map(_.getType())
val readyConditions = conditions.filter{cond => cond.getType() == "Ready"}
val result = readyConditions
.map(cond => cond.getStatus() == "True")
.headOption.getOrElse(false)
result
}
val execWatcher = kubernetesTestComponents.kubernetesClient
.pods()
.withLabel("spark-app-locator", appLocator)
.withLabel("spark-role", "executor")
.watch(new Watcher[Pod] {
logDebug("Beginning watch of executors")
override def onClose(): Unit = logInfo("Ending watch of executors")
override def onClose(cause: WatcherException): Unit =
logInfo("Ending watch of executors")
override def eventReceived(action: Watcher.Action, resource: Pod): Unit = {
val name = resource.getMetadata.getName
val namespace = resource.getMetadata().getNamespace()
action match {
case Action.MODIFIED =>
execPods(name) = resource
case Action.ADDED =>
logDebug(s"Add event received for $name.")
execPods(name) = resource
// If testing decommissioning start a thread to simulate
// decommissioning on the first exec pod.
if (decommissioningTest && execPods.size == 1) {
// Wait for all the containers in the pod to be running
logDebug("Waiting for pod to become OK prior to deletion")
Eventually.eventually(patienceTimeout, patienceInterval) {
val result = checkPodReady(namespace, name)
result shouldBe (true)
}
// Look for the string that indicates we're good to trigger decom on the driver
logDebug("Waiting for first collect...")
Eventually.eventually(TIMEOUT, INTERVAL) {
assert(kubernetesTestComponents.kubernetesClient
.pods()
.withName(driverPodName)
.getLog
.contains("Waiting to give nodes time to finish migration, decom exec 1."),
"Decommission test did not complete first collect.")
}
// Delete the pod to simulate cluster scale down/migration.
// This will allow the pod to remain up for the grace period
// We set an intentionally long grace period to test that Spark
// exits once the blocks are done migrating and doesn't wait for the
// entire grace period if it does not need to.
kubernetesTestComponents.kubernetesClient.pods()
.withName(name).withGracePeriod(Int.MaxValue).delete()
logDebug(s"Triggered pod decom/delete: $name deleted")
// Make sure this pod is deleted
Eventually.eventually(TIMEOUT, INTERVAL) {
assert(podsDeleted.contains(name))
}
// Then make sure this pod is replaced
Eventually.eventually(TIMEOUT, INTERVAL) {
assert(execPods.size == 3)
}
}
case Action.DELETED | Action.ERROR =>
execPods.remove(name)
podsDeleted += name
case Action.BOOKMARK =>
assert(false)
}
}
})
logDebug("Starting Spark K8s job")
SparkAppLauncher.launch(
appArguments,
sparkAppConf,
TIMEOUT.value.toSeconds.toInt,
sparkHomeDir,
isJVM,
pyFiles,
env)
val driverPod = kubernetesTestComponents.kubernetesClient
.pods()
.withLabel("spark-app-locator", appLocator)
.withLabel("spark-role", "driver")
.list()
.getItems
.get(0)
driverPodChecker(driverPod)
// If we're testing decommissioning we an executors, but we should have an executor
// at some point.
Eventually.eventually(TIMEOUT, patienceInterval) {
execPods.values.nonEmpty should be (true)
}
execPods.values.foreach(executorPodChecker(_))
val execPod: Option[Pod] = if (expectedExecutorLogOnCompletion.nonEmpty) {
Some(kubernetesTestComponents.kubernetesClient
.pods()
.withLabel("spark-app-locator", appLocator)
.withLabel("spark-role", "executor")
.list()
.getItems
.get(0))
} else {
None
}
Eventually.eventually(patienceTimeout, patienceInterval) {
expectedDriverLogOnCompletion.foreach { e =>
assert(kubernetesTestComponents.kubernetesClient
.pods()
.withName(driverPod.getMetadata.getName)
.getLog
.contains(e),
s"The application did not complete, driver log did not contain str ${e}")
}
expectedExecutorLogOnCompletion.foreach { e =>
assert(kubernetesTestComponents.kubernetesClient
.pods()
.withName(execPod.get.getMetadata.getName)
.getLog
.contains(e),
s"The application did not complete, executor log did not contain str ${e}")
}
}
execWatcher.close()
}
protected def doBasicDriverPodCheck(driverPod: Pod): Unit = {
assert(driverPod.getMetadata.getName === driverPodName)
assert(driverPod.getSpec.getContainers.get(0).getImage === image)
assert(driverPod.getSpec.getContainers.get(0).getName === "spark-kubernetes-driver")
assert(driverPod.getSpec.getContainers.get(0).getResources.getRequests.get("memory").getAmount
=== baseMemory)
}
protected def doExecutorServiceAccountCheck(executorPod: Pod, account: String): Unit = {
doBasicExecutorPodCheck(executorPod)
assert(executorPod.getSpec.getServiceAccount == kubernetesTestComponents.serviceAccountName)
}
protected def doBasicDriverPyPodCheck(driverPod: Pod): Unit = {
assert(driverPod.getMetadata.getName === driverPodName)
assert(driverPod.getSpec.getContainers.get(0).getImage === pyImage)
assert(driverPod.getSpec.getContainers.get(0).getName === "spark-kubernetes-driver")
assert(driverPod.getSpec.getContainers.get(0).getResources.getRequests.get("memory").getAmount
=== standardNonJVMMemory)
}
protected def doBasicDriverRPodCheck(driverPod: Pod): Unit = {
assert(driverPod.getMetadata.getName === driverPodName)
assert(driverPod.getSpec.getContainers.get(0).getImage === rImage)
assert(driverPod.getSpec.getContainers.get(0).getName === "spark-kubernetes-driver")
assert(driverPod.getSpec.getContainers.get(0).getResources.getRequests.get("memory").getAmount
=== standardNonJVMMemory)
}
protected def doBasicExecutorPodCheck(executorPod: Pod): Unit = {
assert(executorPod.getSpec.getContainers.get(0).getImage === image)
assert(executorPod.getSpec.getContainers.get(0).getName === "spark-kubernetes-executor")
assert(executorPod.getSpec.getContainers.get(0).getResources.getRequests.get("memory").getAmount
=== baseMemory)
}
protected def doBasicExecutorPyPodCheck(executorPod: Pod): Unit = {
assert(executorPod.getSpec.getContainers.get(0).getImage === pyImage)
assert(executorPod.getSpec.getContainers.get(0).getName === "spark-kubernetes-executor")
assert(executorPod.getSpec.getContainers.get(0).getResources.getRequests.get("memory").getAmount
=== standardNonJVMMemory)
}
protected def doBasicExecutorRPodCheck(executorPod: Pod): Unit = {
assert(executorPod.getSpec.getContainers.get(0).getImage === rImage)
assert(executorPod.getSpec.getContainers.get(0).getName === "spark-kubernetes-executor")
assert(executorPod.getSpec.getContainers.get(0).getResources.getRequests.get("memory").getAmount
=== standardNonJVMMemory)
}
protected def doDriverMemoryCheck(driverPod: Pod): Unit = {
assert(driverPod.getSpec.getContainers.get(0).getResources.getRequests.get("memory").getAmount
=== extraDriverTotalMemory)
}
protected def doExecutorMemoryCheck(executorPod: Pod): Unit = {
assert(executorPod.getSpec.getContainers.get(0).getResources.getRequests.get("memory").getAmount
=== extraExecTotalMemory)
}
protected def checkCustomSettings(pod: Pod): Unit = {
assert(pod.getMetadata.getLabels.get("label1") === "label1-value")
assert(pod.getMetadata.getLabels.get("label2") === "label2-value")
assert(pod.getMetadata.getAnnotations.get("annotation1") === "annotation1-value")
assert(pod.getMetadata.getAnnotations.get("annotation2") === "annotation2-value")
val container = pod.getSpec.getContainers.get(0)
val envVars = container
.getEnv
.asScala
.map { env =>
(env.getName, env.getValue)
}
.toMap
assert(envVars("ENV1") === "VALUE1")
assert(envVars("ENV2") === "VALUE2")
}
private def deleteDriverPod(): Unit = {
kubernetesTestComponents.kubernetesClient.pods().withName(driverPodName).delete()
Eventually.eventually(TIMEOUT, INTERVAL) {
assert(kubernetesTestComponents.kubernetesClient
.pods()
.withName(driverPodName)
.get() == null)
}
}
private def deleteExecutorPod(): Unit = {
kubernetesTestComponents
.kubernetesClient
.pods()
.withLabel("spark-app-locator", appLocator)
.withLabel("spark-role", "executor")
.delete()
Eventually.eventually(TIMEOUT, INTERVAL) {
assert(kubernetesTestComponents.kubernetesClient
.pods()
.withLabel("spark-app-locator", appLocator)
.withLabel("spark-role", "executor")
.list()
.getItems.isEmpty)
}
}
}
private[spark] object KubernetesSuite {
val k8sTestTag = Tag("k8s")
val localTestTag = Tag("local")
val rTestTag = Tag("r")
val MinikubeTag = Tag("minikube")
val SPARK_PI_MAIN_CLASS: String = "org.apache.spark.examples.SparkPi"
val SPARK_DFS_READ_WRITE_TEST = "org.apache.spark.examples.DFSReadWriteTest"
val SPARK_MINI_READ_WRITE_TEST = "org.apache.spark.examples.MiniReadWriteTest"
val SPARK_REMOTE_MAIN_CLASS: String = "org.apache.spark.examples.SparkRemoteFileTest"
val SPARK_DRIVER_MAIN_CLASS: String = "org.apache.spark.examples.DriverSubmissionTest"
val TIMEOUT = PatienceConfiguration.Timeout(Span(3, Minutes))
val INTERVAL = PatienceConfiguration.Interval(Span(1, Seconds))
}
| vinodkc/spark | resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/KubernetesSuite.scala | Scala | apache-2.0 | 23,920 |
package extruder.aws
trait AllAwsInstances extends AwsCredentialsInstances with AwsRegionInstances
| janstenpickle/extruder | aws/src/main/scala/extruder/aws/AllAwsInstances.scala | Scala | mit | 100 |
package sctags
import net.pmonk.optparse.Opt
import scala.collection.mutable.ListBuffer
import java.io.File
import java.io.PrintStream
import java.text.Collator
object SCTags extends Parsing with TagGeneration {
var outputFile: String = "tags"
var recurse = false
var etags = false
def main(args: Array[String]) {
val options = List(
Opt('f')
.withArgument[String]
.help("Write tags to specified file. Use \\"-\\" for stdout")
.alias('o')
.action { fname => outputFile = fname },
Opt("recurse")
.withArgument[Boolean]
.help("Recurse into directories supplied on command line")
.alias('R', true)
.action { r => recurse = r },
Opt("etags")
.withArgument[Boolean]
.help("Generate a TAGS file for emacsen")
.alias('E', true)
.action { e => etags = e }
)
val files = new ListBuffer[File]
args.foreach { fname =>
val file = new File(fname)
if (file.isDirectory) {
if (recurse) {
files ++= FileUtils.listFilesRecursive(file, {(f: File) => f.getName.endsWith(".scala")})
} else {
System.err.println("Skipping directory " + fname)
}
} else {
if (file.getName.endsWith(".scala")) {
println("adding file: " + file.getName)
files += file
} else {
System.err.println("Skipping file " + fname)
}
}
}
val tags: Seq[(String, Seq[Tag])] = files.map(f => (f.getPath, generateTags(parse(f))))
val output = outputFile match {
case "-" => Console.out
case "tags" if etags => new PrintStream("TAGS")
case x => new PrintStream(x)
}
if (etags) {
(new ETags)(tags, output)
} else {
(new CTags)(tags, output)
}
}
}
| stevej/sctags | src/main/scala/sctags/SCTags.scala | Scala | apache-2.0 | 1,791 |
package org.json4s
import scala.collection.immutable
import scala.annotation.implicitNotFound
@implicitNotFound(
"No JSON serializer found for type ${T}. Try to implement an implicit Writer or JsonFormat for this type."
)
trait Writer[-T] { self =>
def write(obj: T): JValue
def contramap[A](f: A => T): Writer[A] =
(obj: A) => self.write(f(obj))
}
object Writer extends WriterFunctions {
def apply[A](implicit a: Writer[A]): Writer[A] = a
}
trait DefaultWriters {
protected[this] class W[-T](fn: T => JValue) extends Writer[T] {
def write(obj: T): JValue = fn(obj)
}
implicit val IntWriter: Writer[Int] = new W[Int](JInt(_))
implicit val ByteWriter: Writer[Byte] = new W[Byte](x => JInt(x: Long))
implicit val ShortWriter: Writer[Short] = new W[Short](x => JInt(x: Long))
implicit val LongWriter: Writer[Long] = new W[Long](JInt(_))
implicit val BigIntWriter: Writer[BigInt] = new W[BigInt](JInt(_))
implicit val BooleanWriter: Writer[Boolean] = new W[Boolean](JBool(_))
implicit val StringWriter: Writer[String] = new W[String](JString(_))
implicit def arrayWriter[T](implicit valueWriter: Writer[T]): Writer[Array[T]] = (obj: Array[T]) =>
JArray(obj.map(valueWriter.write(_)).toList)
implicit def seqWriter[T: Writer]: Writer[collection.Seq[T]] = (a: collection.Seq[T]) =>
JArray(a.map(Writer[T].write(_)).toList)
implicit def mapWriter[K, V](implicit
keyWriter: JsonKeyWriter[K],
valueWriter: Writer[V]
): Writer[immutable.Map[K, V]] =
(obj: Map[K, V]) =>
JObject(
obj.map { case (k, v) => keyWriter.write(k) -> valueWriter.write(v) }.toList
)
implicit val JValueWriter: Writer[JValue] = new W[JValue](identity)
implicit def OptionWriter[T](implicit valueWriter: Writer[T]): Writer[Option[T]] = (obj: Option[T]) =>
obj match {
case Some(v) => valueWriter.write(v)
case _ => JNull
}
}
trait DoubleWriters extends DefaultWriters {
implicit val FloatWriter: Writer[Float] = new W[Float](x => JDouble(x: Double))
implicit val DoubleWriter: Writer[Double] = new W[Double](JDouble(_))
implicit val BigDecimalWriter: Writer[BigDecimal] = new W[BigDecimal](d => JDouble(d.doubleValue))
}
trait BigDecimalWriters extends DefaultWriters {
implicit val FloatWriter: Writer[Float] = new W[Float](x => JDecimal(x: Double))
implicit val DoubleWriter: Writer[Double] = new W[Double](JDecimal(_))
implicit val BigDecimalWriter: Writer[BigDecimal] = new W[BigDecimal](d => JDecimal(d))
}
object BigDecimalWriters extends BigDecimalWriters
object DoubleWriters extends DoubleWriters
object DefaultWriters extends DoubleWriters // alias for DoubleWriters
| json4s/json4s | ast/shared/src/main/scala/org/json4s/Writer.scala | Scala | apache-2.0 | 2,668 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.impurity
import org.apache.spark.annotation.{DeveloperApi, Since}
/**
* Class for calculating entropy during multiclass classification.
*/
@Since("1.0.0")
object Entropy extends Impurity {
private[tree] def log2(x: Double) = scala.math.log(x) / scala.math.log(2)
/**
* :: DeveloperApi ::
* information calculation for multiclass classification
* @param counts Array[Double] with counts for each label
* @param totalCount sum of counts for all labels
* @return information value, or 0 if totalCount = 0
*/
@Since("1.1.0")
@DeveloperApi
override def calculate(counts: Array[Double], totalCount: Double): Double = {
if (totalCount == 0) {
return 0
}
val numClasses = counts.length
var impurity = 0.0
var classIndex = 0
while (classIndex < numClasses) {
val classCount = counts(classIndex)
if (classCount != 0) {
val freq = classCount / totalCount
impurity -= freq * log2(freq)
}
classIndex += 1
}
impurity
}
/**
* :: DeveloperApi ::
* variance calculation
* @param count number of instances
* @param sum sum of labels
* @param sumSquares summation of squares of the labels
* @return information value, or 0 if count = 0
*/
@Since("1.0.0")
@DeveloperApi
override def calculate(count: Double, sum: Double, sumSquares: Double): Double =
throw new UnsupportedOperationException("Entropy.calculate")
/**
* Get this impurity instance.
* This is useful for passing impurity parameters to a Strategy in Java.
*/
@Since("1.1.0")
def instance: this.type = this
}
/**
* Class for updating views of a vector of sufficient statistics,
* in order to compute impurity from a sample.
* Note: Instances of this class do not hold the data; they operate on views of the data.
* @param numClasses Number of classes for label.
*/
private[spark] class EntropyAggregator(numClasses: Int)
extends ImpurityAggregator(numClasses + 1) with Serializable {
/**
* Update stats for one (node, feature, bin) with the given label.
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for this (node, feature, bin).
*/
def update(
allStats: Array[Double],
offset: Int,
label: Double,
numSamples: Int,
sampleWeight: Double): Unit = {
if (label >= numClasses) {
throw new IllegalArgumentException(s"EntropyAggregator given label $label" +
s" but requires label < numClasses (= ${numClasses}).")
}
if (label < 0) {
throw new IllegalArgumentException(s"EntropyAggregator given label $label" +
s"but requires label is non-negative.")
}
allStats(offset + label.toInt) += numSamples * sampleWeight
allStats(offset + statsSize - 1) += numSamples
}
/**
* Get an [[ImpurityCalculator]] for a (node, feature, bin).
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for this (node, feature, bin).
*/
def getCalculator(allStats: Array[Double], offset: Int): EntropyCalculator = {
new EntropyCalculator(allStats.view(offset, offset + statsSize - 1).toArray,
allStats(offset + statsSize - 1).toLong)
}
}
/**
* Stores statistics for one (node, feature, bin) for calculating impurity.
* Unlike [[EntropyAggregator]], this class stores its own data and is for a specific
* (node, feature, bin).
* @param stats Array of sufficient statistics for a (node, feature, bin).
*/
private[spark] class EntropyCalculator(stats: Array[Double], var rawCount: Long)
extends ImpurityCalculator(stats) {
/**
* Make a deep copy of this [[ImpurityCalculator]].
*/
def copy: EntropyCalculator = new EntropyCalculator(stats.clone(), rawCount)
/**
* Calculate the impurity from the stored sufficient statistics.
*/
def calculate(): Double = Entropy.calculate(stats, stats.sum)
/**
* Weighted number of data points accounted for in the sufficient statistics.
*/
def count: Double = stats.sum
/**
* Prediction which should be made based on the sufficient statistics.
*/
def predict: Double = if (count == 0) {
0
} else {
indexOfLargestArrayElement(stats)
}
/**
* Probability of the label given by [[predict]].
*/
override def prob(label: Double): Double = {
val lbl = label.toInt
require(lbl < stats.length,
s"EntropyCalculator.prob given invalid label: $lbl (should be < ${stats.length}")
require(lbl >= 0, "Entropy does not support negative labels")
val cnt = count
if (cnt == 0) {
0
} else {
stats(lbl) / cnt
}
}
override def toString: String = s"EntropyCalculator(stats = [${stats.mkString(", ")}])"
}
| pgandhi999/spark | mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala | Scala | apache-2.0 | 5,656 |
package org.ucf.scala
import scala.io.Source
object Method {
/**
* The most common way to define a function is as a member of some object;
* such as a function is called a method. For example in the following codes,
* there are two methods.
*/
private def processLine(filename:String, width:Int, line:String) = {
if (line.length > width) println(filename + ": " + line.trim)
}
def processFile(filename:String, width:Int) = {
val source = Source.fromFile(filename)
for (line <- source.getLines())
processLine(filename, width, line)
}
}
object LocalFunction{
/**
* Programs should be decomposed into many small functions that each do a well-defined
* task. Each building task should be simple enough to be understood individually.
* Such more helper function names leads to pollute the program namespace. The solution
* is to fix out this issue, there are two ways:
* 1. define a private method in a object
* 2. define a local function in an object, just like local varibles, are visible only
* in their enclosing block.
*/
def processFile(filename:String, width:Int) = {
/**
* As a local function, processLine is in scope inside processFile,
* so it can access the parameters and local variables of its enclosing
* function, but inaccessible outside.
* @param line
*/
def processLine(line:String) = {
if (line.length > width) println(filename + ": " + line.trim)
}
val source = Source.fromFile(filename)
for (line <- source.getLines())
processLine(line)
}
}
object SpecialFunctionCall{
/**
* [[Repeated Parameters]]
* Place an asterisk after the type of the parameter.
*/
// echo: (args: String*)Unit
def echo(a:Int, args:String*) =
for (arg <- args) println(a + ":" + arg)
echo(0) // No parameter
echo(1, "one") // Only one
echo(2, "one", "two") // Only two
val arr = Array("What's", "up", "doc?")
// echo(3,arr) // does not compile since there is an error.
echo(3,arr:_*) // "_*" append the array argument. This notation
// tells the compiler to pass each element of arr
// as its own argument to echo, rather than all
// of it as a single argument.
/**
* [[Named Arguments]]
*
* In a normal function call, the arguments in the call are matched
* one by one in the order of the parameters of the called function.
*
* If you want to pass arguments to a function in a different order
* you can call it with named arguments, which does not change the
* meaning.
*/
def speed(distance:Float, time:Float):Float = distance / time
speed(100, 10)
speed(distance = 100, time = 10)
speed(time = 10, distance = 100)
/**
* [[Default Parameters Values]]
*
* Scala lets you specify default values for function parameters
*/
def printTime(out:java.io.PrintStream = Console.out, divisor:Int = 1) =
out.println("time = " + System.currentTimeMillis())
printTime(divisor = 2) // using default value
printTime(Console.err) // specify Console.err
printTime(Console.err, 1000)
/**
* [[call-by-value]] VS [[call-by-name]]
*
* [[call-by-value]] functions compute the passed-in expression's value
* before calling the function, thus the same value is accessed every time.
*
* [[call-by-name]] functions recompute the passed-in expression's value every
* time it is accessed.
*/
// [[call-by-name]]
def callByName(x: => Int) = {
println("x1 = " + x)
println("x2 = " + x)
}
// [[call-by-value]]
def callByValue(x:Int) = {
println("x1 = " + x)
println("x2 = " + x)
}
def something() = {
println("calling something")
10
}
/**
*
* The side-effect of the passed-in function something
* call twice.
* scala> callByName(something)
* calling something
* x1 = 10
* calling something
* x2 = 10
*
*
* The side-effect of the passed-in function something
* only only happened once.
* scala> callByValue(something)
* calling something
* x1 = 10
* x2 = 10
*/
} | bingrao/Scala-Learning | Function/src/main/scala/org/ucf/scala/Function.scala | Scala | mit | 4,329 |
package fs2
package time
import org.scalacheck.Gen
import scala.concurrent.duration._
import Stream._
import fs2.util.Task
class TimeSpec extends Fs2Spec {
"time" - {
"awakeEvery" in {
time.awakeEvery[Task](100.millis).map(_.toMillis/100).take(5).runLog.unsafeRun() shouldBe Vector(1,2,3,4,5)
}
"awakeEvery liveness" in {
val s = time.awakeEvery[Task](1.milli).evalMap { i => Task.async[Unit](cb => S(cb(Right(())))) }.take(200)
runLog { concurrent.join(5)(Stream(s, s, s, s, s)) }
}
"duration" in {
val firstValueDiscrepancy = time.duration[Task].take(1).runLog.unsafeRun().last
val reasonableErrorInMillis = 200
val reasonableErrorInNanos = reasonableErrorInMillis * 1000000
def s = firstValueDiscrepancy.toNanos < reasonableErrorInNanos
withClue("first duration is near zero on first run") { assert(s) }
Thread.sleep(reasonableErrorInMillis)
withClue("first duration is near zero on second run") { assert(s) }
}
"every" in {
val smallDelay = Gen.choose(10, 300) map {_.millis}
forAll(smallDelay) { delay: FiniteDuration =>
type BD = (Boolean, FiniteDuration)
val durationSinceLastTrue: Pipe[Pure,BD,BD] = {
def go(lastTrue: FiniteDuration): Handle[Pure,BD] => Pull[Pure,BD,Handle[Pure,BD]] = h => {
h.receive1 {
case pair #: tl =>
pair match {
case (true , d) => Pull.output1((true , d - lastTrue)) >> go(d)(tl)
case (false, d) => Pull.output1((false, d - lastTrue)) >> go(lastTrue)(tl)
}
}
}
_ pull go(0.seconds)
}
val draws = (600.millis / delay) min 10 // don't take forever
val durationsSinceSpike = time.every[Task](delay).
zip(time.duration[Task]).
take(draws.toInt).
through(durationSinceLastTrue)
val result = durationsSinceSpike.runLog.unsafeRun().toList
val (head :: tail) = result
withClue("every always emits true first") { assert(head._1) }
withClue("true means the delay has passed") { assert(tail.filter(_._1).map(_._2).forall { _ >= delay }) }
withClue("false means the delay has not passed") { assert(tail.filterNot(_._1).map(_._2).forall { _ <= delay }) }
}
}
}
}
| japgolly/scalaz-stream | core/src/test/scala/fs2/time/TimeSpec.scala | Scala | mit | 2,359 |
package github.joestein.skeletor
import me.prettyprint.hector.api.{ Cluster, Keyspace => HKeyspace }
import me.prettyprint.hector.api.factory.HFactory
import me.prettyprint.hector.api.query.{ SuperSliceQuery, MultigetSliceQuery, MultigetSliceCounterQuery, CounterQuery, RangeSlicesQuery, MultigetSubSliceQuery}
import github.joestein.util.{ LogHelper }
import Conversions._
import me.prettyprint.hector.api.query.Query
import me.prettyprint.hector.api.beans.{ Rows => HectorRows, SuperSlice }
import me.prettyprint.cassandra.model.IndexedSlicesQuery
import me.prettyprint.hector.api.Serializer
object Cassandra extends LogHelper {
//https://github.com/rantav/hector/blob/master/core/src/main/java/me/prettyprint/hector/api/factory/HFactory.java
var cluster: Cluster = null
def *(name: String, servers: String) = {
cluster = HFactory.getOrCreateCluster(name, servers);
}
def connect(name: String, servers: String) = {
*(name, servers)
}
def shutdown() = {
cluster.getConnectionManager().shutdown()
}
import scala.collection.mutable.ListBuffer
import me.prettyprint.cassandra.serializers.LongSerializer
import me.prettyprint.cassandra.serializers.StringSerializer
import me.prettyprint.hector.api.ConsistencyLevelPolicy
//default write consistency
var defaultWriteConsistencyLevel: ConsistencyLevelPolicy = {
CL.ONE()
}
//default read consistency
var defaultReadConsistencyLevel: ConsistencyLevelPolicy = {
CL.ONE()
}
def ++(rows: Seq[ColumnNameValue], cl: ConsistencyLevelPolicy = defaultWriteConsistencyLevel): Unit = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(rows(0).ks, cluster);
ksp.setConsistencyLevelPolicy(cl) //this way you can set your own consistency level
val mutator = HFactory.createMutator(ksp, stringSerializer);
rows.foreach { cv =>
mutator.insertCounter(cv.row, cv.cf, HFactory.createCounterColumn(cv.name, cv.intValue))
}
mutator.execute()
}
def <<(rows: Seq[ColumnNameValue], cl: ConsistencyLevelPolicy = defaultWriteConsistencyLevel): Unit = {
if (rows(0).isCounter) { //it is a counter column to shoot it on up
++(rows, cl) //this way you can set your own consistency level
} else {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(rows(0).ks, cluster);
ksp.setConsistencyLevelPolicy(cl) //this way you can set your own consistency level
val mutator = HFactory.createMutator(ksp, stringSerializer);
rows.foreach { cv =>
if (cv.isSuperColumn) mutator.addInsertion(cv.row, cv.cf, cv.hSuperColumn)
else mutator.addInsertion(cv.row, cv.cf, cv.hColumn)
}
mutator.execute()
}
}
def indexQuery[V](cf: ColumnFamily, settings: (IndexedSlicesQuery[String, String, V]) => Unit, proc: (String, String, V) => Unit, valueSerializer: Serializer[V], cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(cf.ks, cluster);
ksp.setConsistencyLevelPolicy(cl) //this way you can set your own consistency level
val query = HFactory.createIndexedSlicesQuery(ksp, stringSerializer, stringSerializer, valueSerializer)
query.setColumnFamily(cf);
settings(query); //let the caller define keys, range, count whatever they want on this CF
executeQuery(query, proc)
}
def rangeQuery(cf: ColumnFamily, settings: (MultigetSliceQuery[String, String, String]) => Unit, proc: (String, String, String) => Unit, cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(cf.ks, cluster);
ksp.setConsistencyLevelPolicy(cl) //this way you can set your own consistency level
val multigetSliceQuery = HFactory.createMultigetSliceQuery(ksp, stringSerializer, stringSerializer, stringSerializer)
multigetSliceQuery.setColumnFamily(cf)
settings(multigetSliceQuery) //let the caller define keys, range, count whatever they want on this CF
executeQuery(multigetSliceQuery, proc)
}
def >>(cf: ColumnFamily, settings: (MultigetSliceQuery[String, String, String]) => Unit, proc: (String, String, String) => Unit, cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
rangeQuery(cf, settings, proc, cl)
}
def rangeSlicesQuery(cf: ColumnFamily, settings: (RangeSlicesQuery[String, String, String]) => Unit, proc: (String, String, String) => Unit, cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(cf.ks, cluster)
ksp.setConsistencyLevelPolicy(cl)
val rangeSlicesQuery = HFactory.createRangeSlicesQuery(ksp, stringSerializer, stringSerializer, stringSerializer)
rangeSlicesQuery.setColumnFamily(cf)
settings(rangeSlicesQuery)
executeQuery(rangeSlicesQuery, proc)
}
def superSliceQuery(cf: ColumnFamily, settings: (SuperSliceQuery[String, String, String, String]) => Unit, proc: (String, String, String) => Unit, cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(cf.ks, cluster)
ksp.setConsistencyLevelPolicy(cl)
val superSliceQuery = HFactory.createSuperSliceQuery(ksp, stringSerializer, stringSerializer, stringSerializer, stringSerializer)
superSliceQuery.setColumnFamily(cf)
settings(superSliceQuery)
executeSuperQuery(superSliceQuery, proc)
}
def multigetSubSliceQuery(cf: ColumnFamily, settings: (MultigetSubSliceQuery[String, String, String, String]) => Unit, proc: (String, String, String) => Unit, cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(cf.ks, cluster)
ksp.setConsistencyLevelPolicy(cl)
val multigetSubSliceQuery = HFactory.createMultigetSubSliceQuery(ksp, stringSerializer, stringSerializer, stringSerializer, stringSerializer)
multigetSubSliceQuery.setColumnFamily(cf)
settings(multigetSubSliceQuery)
executeQuery(multigetSubSliceQuery, proc)
}
def >>>(cf: ColumnFamily, settings: (RangeSlicesQuery[String, String, String]) => Unit, proc: (String, String, String) => Unit, cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
rangeSlicesQuery(cf, settings, proc, cl)
}
private def executeQuery[V](query: Query[_ <: HectorRows[String, String, V]], proc: (String, String, V) => Unit) = {
val result = query.execute();
val orderedRows = result.get();
import scala.collection.JavaConversions._
for (o <- orderedRows) {
val c = o.getColumnSlice()
val d = c.getColumns()
for (l <- d) {
debug("query=" + o.getKey() + " for column=" + l.getName() + " & value=" + l.getValue())
proc(o.getKey(), l.getName(), l.getValue())
}
}
}
private def executeSuperQuery[V](query: Query[_ <: SuperSlice[String, String, V]], proc: (String, String, V) => Unit) = {
val result = query.execute()
val superSlice = result.get()
val superColumns = superSlice.getSuperColumns()
import scala.collection.JavaConversions._
for (sc <- superColumns) {
val cs = sc.getColumns()
for (c <- cs) {
proc(sc.getName(), c.getName(), c.getValue())
}
}
}
//delete a row
def delete(cnv: ColumnNameValue, cl: ConsistencyLevelPolicy = defaultWriteConsistencyLevel) = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(cnv.ks, cluster);
ksp.setConsistencyLevelPolicy(cl) //this way you can set your own consistency level
val mutator = HFactory.createMutator(ksp, stringSerializer);
if (cnv.name == "")
mutator.delete(cnv.row, cnv.cf, null, stringSerializer); //setting null for column gets rid of entire row
else
mutator.delete(cnv.row, cnv.cf, cnv.name, stringSerializer); //setting null for column gets rid of entire row
}
def >#(cf: ColumnFamily, sets: (MultigetSliceCounterQuery[String, String]) => Unit, proc: (String, String, Long) => Unit, cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(cf.ks, cluster);
ksp.setConsistencyLevelPolicy(cl) //this way you can set your own consistency level
val multigetCounterSliceQuery = HFactory.createMultigetSliceCounterQuery(ksp, stringSerializer, stringSerializer)
multigetCounterSliceQuery.setColumnFamily(cf);
sets(multigetCounterSliceQuery); //let the caller define keys, range, count whatever they want on this CF
val result = multigetCounterSliceQuery.execute();
val orderedRows = result.get();
debug("keyMultigetSliceCounterQuery order rows called")
import scala.collection.JavaConversions._
for (o <- orderedRows) {
val c = o.getColumnSlice()
val d = c.getColumns()
for (l <- d) {
debug("keyMultigetSliceCounterQuery=" + o.getKey() + " for column=" + l.getName() + " & value=" + l.getValue())
proc(o.getKey(), l.getName(), l.getValue())
}
}
}
def >%(cf: ColumnFamily, sets: (CounterQuery[String, String]) => Unit, proc: (Long) => Unit, cl: ConsistencyLevelPolicy = defaultReadConsistencyLevel) = {
val stringSerializer = StringSerializer.get()
val ksp = HFactory.createKeyspace(cf.ks, cluster);
ksp.setConsistencyLevelPolicy(cl) //this way you can set your own consistency level
val getCounterQuery = HFactory.createCounterColumnQuery(ksp, stringSerializer, stringSerializer)
getCounterQuery.setColumnFamily(cf)
sets(getCounterQuery); //let the caller define keys, range, count whatever they want on this CF
val result = getCounterQuery.execute();
val counter = result.get();
if (counter != null)
proc(counter.getValue())
}
}
trait Cassandra {
//https://github.com/rantav/hector/blob/master/core/src/main/java/me/prettyprint/cassandra/service/CassandraHostConfigurator.java
def ^ = {
Cassandra.cluster
}
} | joestein/skeletor | src/main/scala/skeletor/Cassandra.scala | Scala | mit | 10,835 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.console
import _root_.java.util.regex.Pattern
import _root_.javax.servlet.ServletContext
import _root_.org.fusesource.scalate.{DefaultRenderContext, RenderContext}
import _root_.org.fusesource.scalate.servlet.ServletRenderContext
import _root_.scala.Option
import java.io.File
import scala.io.Source
import collection.JavaConversions._
import collection.immutable.SortedMap
import collection.mutable.{ArrayBuffer, ListBuffer}
import util.parsing.input.{Position, OffsetPosition}
import xml.NodeSeq
import org.fusesource.scalate.util.{Log, SourceMapInstaller, SourceMap}
case class SourceLine(line: Int, source: String) {
def style(errorLine: Int): String = if (line == errorLine) "line error" else "line"
def nonBlank = source != null && source.length > 0
/**
* Return a tuple of the prefix, the error character and the postfix of this source line
* to highlight the error at the given column
*/
def splitOnCharacter(col: Int): Tuple3[String, String, String] = {
val length = source.length
if (col >= length) {
(source, "", "")
}
else {
val next = col + 1
val prefix = source.substring(0, col)
val ch = if (col < length) source.substring(col, next) else ""
val postfix = if (next < length) source.substring(next, length) else ""
(prefix, ch, postfix)
}
}
}
object ConsoleHelper extends Log
/**
* Helper snippets for creating the console
*
* @version $Revision : 1.1 $
*/
class ConsoleHelper(context: DefaultRenderContext) extends ConsoleSnippets {
import ConsoleHelper._
import context._
val consoleParameter = "_scalate"
def servletContext: ServletContext = context.asInstanceOf[ServletRenderContext].servletContext
def renderContext = context
// TODO figure out the viewName from the current template?
def viewName = "index"
/**
* Returns the class name of the current resource
*/
def resourceClassName: Option[String] = attributes.get("it") match {
case Some(it: AnyRef) => Some(it.getClass.getName)
case _ => None
}
def isDevelopmentMode = context.engine.isDevelopmentMode
/**
* Returns an attempt at finding the source file for the current resource.
*
* TODO use bytecode swizzling to find the accurate name from the debug info in
* the class file!
*/
def resourceSourceFile: Option[File] = resourceClassName match {
case Some(name: String) =>
val fileName = name.replace('.', '/')
val prefixes = List("src/main/scala/", "src/main/java/")
val postfixes = List(".scala", ".java")
val names = for (prefix <- prefixes; postfix <- postfixes) yield new File(prefix + fileName + postfix)
names.find(_.exists)
case _ => None
}
/**
* Returns all the available archetypes for the current view name
*/
def archetypes: Array[Archetype] = {
val dir = "/WEB-INF/archetypes/" + viewName
var files: Array[File] = Array()
val fileName = realPath(dir)
if (fileName != null) {
val file = new File(fileName)
if (file.exists && file.isDirectory) {
files = file.listFiles
}
}
files.map(f => new Archetype(new File(dir, f.getName)))
}
/**
* Creates the newly created template name if there can be one for the current resource
*/
def newTemplateName(): Option[String] = resourceClassName match {
case Some(resource) =>
val prefix = "/" + resource.replace('.', '/') + "."
if (templates.exists(_.startsWith(prefix)) == false) {
Some(prefix + viewName)
}
else {
None
}
case _ => None
}
/**
* Returns the current template names used in the current context
*/
def templates: List[String] = attributes.get("scalateTemplates") match {
case Some(list: List[String]) => list.distinct.sortWith(_ < _)
case _ => Nil
}
/**
* Returns the current layouts used in the current context
*/
def layouts: List[String] = attributes.get("scalateLayouts") match {
case Some(list: List[String]) => list.distinct.sortWith(_ < _)
case _ => Nil
}
/**
* Returns true if the option is enabled
*/
def optionEnabled(name: String): Boolean = context.asInstanceOf[ServletRenderContext].parameterValues(consoleParameter).contains(name)
/**
* Link to the current page with the option enabled
*/
def enableLink(name: String): String = context.asInstanceOf[ServletRenderContext].currentUriPlus(consoleParameter + "=" + name)
/**
* Link to the current page with the option disabled
*/
def disableLink(name: String): String = context.asInstanceOf[ServletRenderContext]currentUriMinus(consoleParameter + "=" + name)
/**
* Retrieves a chunk of lines either side of the given error line
*/
def lines(template: String, errorLine: Int, chunk: Int): Seq[SourceLine] = {
val file = realPath(template)
if (file != null) {
val source = Source.fromFile(file)
val start = (errorLine - chunk).min(0)
val end = start + chunk
val list = new ListBuffer[SourceLine]
val lines = source.getLines().toIndexedSeq
for (i <- 1.to(end)) {
val code = lines(i)
if (i >= start) {
list += SourceLine(i, code)
}
}
list
}
else {
Nil
}
}
/**
* Retrieves a chunk of lines either side of the given error line
*/
def lines(template: String, pos: Position, chunk: Int = 5): Seq[SourceLine] = {
pos match {
case op: OffsetPosition =>
// OffsetPosition's already are holding onto the file contents
val index: Array[String] = {
val source = op.source
var rc = new ArrayBuffer[String]
var start = 0;
for (i <- 0 until source.length) {
if (source.charAt(i) == '\\n') {
rc += source.subSequence(start, i).toString.stripLineEnd
start = i + 1
}
}
rc.toArray
}
val start = (pos.line - chunk).max(1)
val end = (pos.line + chunk).min(index.length)
val list = new ListBuffer[SourceLine]
for (i <- start to end) {
list += SourceLine(i, index(i - 1))
}
list
case _ =>
// We need to manually load the file..
lines(template, pos.line, chunk)
}
}
def systemProperties: SortedMap[String,String] = {
// TODO is there a better way?
val m: Map[String,String] = System.getProperties.toMap
SortedMap(m.iterator.toSeq :_*)
}
// Error Handling helper methods
//-------------------------------------------------------------------------
def exception = attributes("javax.servlet.error.exception")
def errorMessage = attributeOrElse("javax.servlet.error.message", "")
def errorRequestUri = attributeOrElse("javax.servlet.error.request_uri", "")
def errorCode = attributeOrElse("javax.servlet.error.status_code", 500)
def renderStackTraceElement(stack:StackTraceElement): NodeSeq = {
var rc:NodeSeq = null
// Does it look like a scalate template class??
var className = stack.getClassName.split(Pattern.quote(".")).last
if( className.startsWith("$_scalate_$") ) {
// Then try to load it's smap info..
var file = RenderContext().engine.bytecodeDirectory
file = new File(file, stack.getClassName.replace('.', '/')+".class")
try {
val smap = SourceMap.parse(SourceMapInstaller.load(file))
// And then render a link to the original template file.
smap.mapToStratum(stack.getLineNumber) match {
case None =>
case Some((file, line)) =>
rc = editLink(file, Some(line), Some(1)) {
RenderContext() << <pre class="stacktrace">at ({file}:{line})</pre>
}
}
} catch {
// ignore errors trying to load the smap... we can fallback
// to rendering a plain stack line.
case e:Throwable=>
}
}
if( rc==null )
<pre class="stacktrace">at {stack.getClassName}.{stack.getMethodName}({stack.getFileName}:{stack.getLineNumber})</pre>
else
rc
}
}
| dnatic09/scalate | scalate-core/src/main/scala/org/fusesource/scalate/console/ConsoleHelper.scala | Scala | apache-2.0 | 8,872 |
package io.getquill.h2
import io.getquill.context.sql.ProductSpec
import monix.execution.Scheduler
class ProductJdbcSpec extends ProductSpec {
val context = testContext
import testContext._
implicit val scheduler = Scheduler.global
override def beforeAll = {
testContext.run(quote(query[Product].delete)).runSyncUnsafe()
()
}
"Product" - {
"Insert multiple products" in {
val (inserted, product) =
(for {
i <- testContext.run(liftQuery(productEntries).foreach(e => productInsert(e)))
ps <- testContext.run(productById(lift(i(2))))
} yield (i, ps.head)).runSyncUnsafe()
product.description mustEqual productEntries(2).description
product.id mustEqual inserted(2)
}
"Single insert product" in {
val (inserted, product) =
(for {
i <- testContext.run(productSingleInsert)
ps <- testContext.run(productById(lift(i)))
} yield (i, ps.head)).runSyncUnsafe()
product.description mustEqual "Window"
product.id mustEqual inserted
}
"Single insert with inlined free variable" in {
val prd = Product(0L, "test1", 1L)
val (inserted, returnedProduct) =
(for {
i <- testContext.run {
product.insert(_.sku -> lift(prd.sku), _.description -> lift(prd.description)).returningGenerated(_.id)
}
rps <- testContext.run(productById(lift(i)))
} yield (i, rps.head)).runSyncUnsafe()
returnedProduct.description mustEqual "test1"
returnedProduct.sku mustEqual 1L
returnedProduct.id mustEqual inserted
}
"Single insert with free variable and explicit quotation" in {
val prd = Product(0L, "test2", 2L)
val q1 = quote {
product.insert(_.sku -> lift(prd.sku), _.description -> lift(prd.description)).returningGenerated(_.id)
}
val (inserted, returnedProduct) =
(for {
i <- testContext.run(q1)
rps <- testContext.run(productById(lift(i)))
} yield (i, rps.head)).runSyncUnsafe()
returnedProduct.description mustEqual "test2"
returnedProduct.sku mustEqual 2L
returnedProduct.id mustEqual inserted
}
"Single product insert with a method quotation" in {
val prd = Product(0L, "test3", 3L)
val (inserted, returnedProduct) =
(for {
i <- testContext.run(productInsert(lift(prd)))
rps <- testContext.run(productById(lift(i)))
} yield (i, rps.head)).runSyncUnsafe()
returnedProduct.description mustEqual "test3"
returnedProduct.sku mustEqual 3L
returnedProduct.id mustEqual inserted
}
}
}
| getquill/quill | quill-jdbc-monix/src/test/scala/io/getquill/h2/ProductJdbcSpec.scala | Scala | apache-2.0 | 2,674 |
package io.flow.dependency.api.lib
import io.flow.dependency.v0.models.{Credentials, CredentialsUndefinedType, UsernamePassword}
import org.htmlcleaner.HtmlCleaner
import org.apache.commons.codec.binary.Base64
import org.apache.commons.lang3.StringUtils
import org.apache.commons.text.StringEscapeUtils
import java.net.URL
import scala.util.{Failure, Success, Try}
/**
* Accepts the URI of a resolver
* (e.g. https://oss.sonatype.org/content/repositories/snapshots) and
* parsers the contents in a list of files and
* directories. Intentionally NOT recursive.
*/
object RemoteDirectory {
case class Result(
directories: Seq[String] = Nil,
files: Seq[String] = Nil
)
def fetch(
url: String,
credentials: Option[Credentials] = None
) (
filter: String => Boolean = { !_.startsWith(".") }
): Result = {
val base = Result()
val cleaner = new HtmlCleaner()
val uc = (new URL(url)).openConnection()
credentials.map { cred =>
cred match {
case UsernamePassword(username, password) =>{
val userpass = username + ":" + password.getOrElse("")
val basicAuth = "Basic " + new String(new Base64().encode(userpass.getBytes()))
uc.setRequestProperty ("Authorization", basicAuth)
}
case CredentialsUndefinedType(_) => {
// No-op
}
}
}
Try(cleaner.clean(uc.getInputStream())) match {
case Failure(_) => {
base
}
case Success(rootNode) => {
rootNode.getElementsByName("a", true).foldLeft(base) { case (result, elem) =>
Option(elem.getAttributeByName("href")) match {
case None => {
result
}
case Some(_) => {
val text = StringEscapeUtils.unescapeHtml4(elem.getText.toString)
filter(StringUtils.stripEnd(text, "/")) match {
case false => {
result
}
case true => {
text.endsWith("/") match {
case true => result.copy(directories = result.directories ++ Seq(text))
case false => result.copy(files = result.files ++ Seq(text))
}
}
}
}
}
}
}
}
}
}
| flowcommerce/dependency | api/app/lib/RemoteDirectory.scala | Scala | mit | 2,317 |
package com.wlangiewicz.xbot.domain
case class Ticker(max: Long, min: Long, last: Long, bid: Long, ask: Long, vwap: Long, average: Long, volume: Long) | wlk/xbot | src/main/scala/com/wlangiewicz/xbot/domain/Ticker.scala | Scala | apache-2.0 | 151 |
package com.olvind.crud
package server
import slick.driver.JdbcDriver
trait integrationSlick {
val driver: JdbcDriver
}
| elacin/slick-crud | crud/jvm/src/main/scala/com/olvind/crud/server/integrationSlick.scala | Scala | apache-2.0 | 124 |
package io.buoyant.linkerd
package protocol
import com.twitter.conversions.time._
import com.twitter.finagle.{Http => FinagleHttp, Status => _, http => _, _}
import com.twitter.finagle.buoyant.linkerd.Headers
import com.twitter.finagle.http.{param => _, _}
import com.twitter.finagle.http.Method._
import com.twitter.finagle.stats.{InMemoryStatsReceiver, NullStatsReceiver}
import com.twitter.finagle.tracing.{Annotation, BufferingTracer, NullTracer}
import com.twitter.util._
import io.buoyant.router.{Http, RoutingFactory}
import io.buoyant.router.http.MethodAndHostIdentifier
import io.buoyant.test.Awaits
import java.net.InetSocketAddress
import org.scalatest.FunSuite
class HttpEndToEndTest extends FunSuite with Awaits {
case class Downstream(name: String, server: ListeningServer) {
val address = server.boundAddress.asInstanceOf[InetSocketAddress]
val port = address.getPort
val dentry = Dentry(
Path.read(s"/svs/$name"),
NameTree.read(s"/$$/inet/127.1/$port")
)
}
object Downstream {
def mk(name: String)(f: Request=>Response): Downstream = {
val service = Service.mk { req: Request => Future(f(req)) }
val stack = FinagleHttp.server.stack.remove(Headers.Ctx.serverModule.role)
val server = FinagleHttp.server.withStack(stack)
.configured(param.Label(name))
.configured(param.Tracer(NullTracer))
.serve(":*", service)
Downstream(name, server)
}
def const(name: String, value: String, status: Status = Status.Ok): Downstream =
mk(name) { _ =>
val rsp = Response()
rsp.status = status
rsp.contentString = value
rsp
}
}
def upstream(server: ListeningServer) = {
val address = Address(server.boundAddress.asInstanceOf[InetSocketAddress])
val name = Name.Bound(Var.value(Addr.Bound(address)), address)
val stack = FinagleHttp.client.stack.remove(Headers.Ctx.clientModule.role)
FinagleHttp.client.withStack(stack)
.configured(param.Stats(NullStatsReceiver))
.configured(param.Tracer(NullTracer))
.newClient(name, "upstream").toService
}
def basicConfig(dtab: Dtab) =
s"""|routers:
|- protocol: http
| dtab: ${dtab.show}
| servers:
| - port: 0
|""".stripMargin
def annotationKeys(annotations: Seq[Annotation]): Seq[String] =
annotations.collect {
case Annotation.ClientSend() => "cs"
case Annotation.ClientRecv() => "cr"
case Annotation.ServerSend() => "ss"
case Annotation.ServerRecv() => "sr"
case Annotation.WireSend => "ws"
case Annotation.WireRecv => "wr"
case Annotation.BinaryAnnotation(k, _) if k == "l5d.success" => k
case Annotation.Message(m) if Seq("l5d.retryable", "l5d.failure").contains(m) => m
}
test("linking") {
val stats = NullStatsReceiver
val tracer = new BufferingTracer
def withAnnotations(f: Seq[Annotation] => Unit): Unit = {
f(tracer.iterator.map(_.annotation).toSeq)
tracer.clear()
}
val cat = Downstream.const("cat", "meow")
val dog = Downstream.const("dog", "woof")
val dtab = Dtab.read(s"""
/p/cat => /$$/inet/127.1/${cat.port} ;
/p/dog => /$$/inet/127.1/${dog.port} ;
/svc/felix => /p/cat ;
/svc/clifford => /p/dog ;
""")
val linker = Linker.Initializers(Seq(HttpInitializer)).load(basicConfig(dtab))
.configured(param.Stats(stats))
.configured(param.Tracer(tracer))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
def get(host: String, path: String = "/")(f: Response => Unit): Unit = {
val req = Request()
req.host = host
req.uri = path
val rsp = await(client(req))
f(rsp)
}
try {
get("felix") { rsp =>
assert(rsp.status == Status.Ok)
assert(rsp.contentString == "meow")
val path = "/svc/felix"
val bound = s"/$$/inet/127.1/${cat.port}"
withAnnotations { anns =>
assert(annotationKeys(anns) == Seq("sr", "cs", "ws", "wr", "l5d.success", "cr", "ss"))
assert(anns.contains(Annotation.BinaryAnnotation("namer.path", path)))
assert(anns.contains(Annotation.BinaryAnnotation("dst.id", bound)))
assert(anns.contains(Annotation.BinaryAnnotation("dst.path", "/")))
}
}
get("ralph-machio") { rsp =>
assert(rsp.status == Status.BadGateway)
assert(rsp.headerMap.contains(Headers.Err.Key))
}
get("") { rsp =>
assert(rsp.status == Status.BadRequest)
assert(rsp.headerMap.contains(Headers.Err.Key))
}
// todo check stats
} finally {
await(client.close())
await(cat.server.close())
await(dog.server.close())
await(server.close())
await(router.close())
}
}
test("marks 5XX as failure by default") {
val stats = new InMemoryStatsReceiver
val tracer = NullTracer
val downstream = Downstream.mk("dog") {
case req if req.path == "/woof" =>
val rsp = Response()
rsp.status = Status.Ok
rsp.contentString = "woof"
rsp
case _ =>
val rsp = Response()
rsp.status = Status.InternalServerError
rsp
}
val label = s"$$/inet/127.1/${downstream.port}"
val dtab = Dtab.read(s"/svc/dog => /$label;")
val linker = Linker.Initializers(Seq(HttpInitializer)).load(basicConfig(dtab))
.configured(param.Stats(stats))
.configured(param.Tracer(tracer))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
val okreq = Request()
okreq.host = "dog"
okreq.uri = "/woof"
val okrsp = await(client(okreq))
assert(okrsp.status == Status.Ok)
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "requests")) == Some(1))
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "success")) == Some(1))
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "failures")) == None)
assert(stats.counters.get(Seq("http", "dst", "id", label, "requests")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "success")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "failures")) == None)
val errreq = Request()
errreq.host = "dog"
val errrsp = await(client(errreq))
assert(errrsp.status == Status.InternalServerError)
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "requests")) == Some(2))
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "success")) == Some(1))
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "failures")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "requests")) == Some(2))
assert(stats.counters.get(Seq("http", "dst", "id", label, "success")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "failures")) == Some(1))
} finally {
await(client.close())
await(downstream.server.close())
await(server.close())
await(router.close())
}
}
val allMethods = Set[Method](Connect, Delete, Get, Head, Patch, Post, Put, Options, Trace)
val readMethods = Set[Method](Get, Head, Options, Trace)
val idempotentMethods = readMethods ++ Set[Method](Delete, Put)
def retryTest(kind: String, methods: Set[Method]): Unit = {
val stats = new InMemoryStatsReceiver
val tracer = new BufferingTracer
def withAnnotations(f: Seq[Annotation] => Unit): Unit = {
f(tracer.iterator.map(_.annotation).toSeq)
tracer.clear()
}
@volatile var failNext = false
val downstream = Downstream.mk("dog") { req =>
val rsp = Response()
rsp.status = if (failNext) Status.InternalServerError else Status.Ok
failNext = false
rsp
}
val label = s"$$/inet/127.1/${downstream.port}"
val dtab = Dtab.read(s"/svc/dog => /$label;")
val yaml =
s"""|routers:
|- protocol: http
| dtab: ${dtab.show}
| responseClassifier:
| kind: $kind
| servers:
| - port: 0
|""".stripMargin
val linker = Linker.load(yaml)
.configured(param.Stats(stats))
.configured(param.Tracer(tracer))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
try {
// retryable request, fails and is retried
for (method <- methods) {
val req = Request()
req.method = method
req.host = "dog"
failNext = true
stats.clear()
val rsp = await(client(req))
assert(rsp.status == Status.Ok)
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "requests")) == Some(1))
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "success")) == Some(1))
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "failures")) == None)
assert(stats.counters.get(Seq("http", "dst", "id", label, "requests")) == Some(2))
assert(stats.counters.get(Seq("http", "dst", "id", label, "success")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "failures")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "status", "200")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "status", "500")) == Some(1))
val name = "svc/dog"
assert(stats.counters.get(Seq("http", "dst", "path", name, "requests")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "path", name, "success")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "path", name, "failures")) == None)
assert(stats.stats.get(Seq("http", "dst", "path", name, "retries", "per_request")) == Some(Seq(1.0)))
assert(stats.counters.get(Seq("http", "dst", "path", name, "retries", "total")) == Some(1))
withAnnotations { anns =>
assert(annotationKeys(anns) == Seq("sr", "cs", "ws", "wr", "l5d.retryable", "cr", "cs", "ws", "wr", "l5d.success", "cr", "ss"))
}
}
// non-retryable request, fails and is not retried
for (method <- allMethods -- methods) {
val req = Request()
req.method = method
req.host = "dog"
failNext = true
stats.clear()
val rsp = await(client(req))
assert(rsp.status == Status.InternalServerError)
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "requests")) == Some(1))
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "success")) == None)
assert(stats.counters.get(Seq("http", "srv", "127.0.0.1/0", "failures")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "requests")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "success")) == None)
assert(stats.counters.get(Seq("http", "dst", "id", label, "failures")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "id", label, "status", "200")) == None)
assert(stats.counters.get(Seq("http", "dst", "id", label, "status", "500")) == Some(1))
val name = s"svc/dog"
assert(stats.counters.get(Seq("http", "dst", "path", name, "requests")) == Some(1))
assert(stats.counters.get(Seq("http", "dst", "path", name, "success")) == None)
assert(stats.counters.get(Seq("http", "dst", "path", name, "failures")) == Some(1))
assert(stats.stats.get(Seq("http", "dst", "path", name, "retries", "per_request")) == Some(Seq(0.0)))
assert(!stats.counters.contains(Seq("http", "dst", "path", name, "retries", "total")))
withAnnotations { anns =>
assert(annotationKeys(anns) == Seq("sr", "cs", "ws", "wr", "l5d.failure", "cr", "ss"))
}
}
} finally {
await(client.close())
await(downstream.server.close())
await(server.close())
await(router.close())
}
}
test("retries retryableIdempotent5XX") {
retryTest("io.l5d.retryableIdempotent5XX", idempotentMethods)
}
test("retries retryablRead5XX") {
retryTest("io.l5d.retryableRead5XX", readMethods)
}
test("retries nonRetryable5XX") {
retryTest("io.l5d.nonRetryable5XX", Set.empty)
}
val dtabReadHeaders = Seq("l5d-dtab", "l5d-ctx-dtab")
val dtabWriteHeader = "l5d-ctx-dtab"
for (readHeader <- dtabReadHeaders) test(s"dtab read from $readHeader header") {
val stats = NullStatsReceiver
val tracer = new BufferingTracer
@volatile var headers: HeaderMap = null
val dog = Downstream.mk("dog") { req =>
headers = req.headerMap
req.response
}
val dtab = Dtab.read(s"""
/svc/* => /$$/inet/127.1/${dog.port} ;
""")
val linker = Linker.Initializers(Seq(HttpInitializer)).load(basicConfig(dtab))
.configured(param.Stats(stats))
.configured(param.Tracer(tracer))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
val req = Request()
req.host = "dog"
req.headerMap.set(readHeader, "/a=>/b")
await(client(req))
for (header <- dtabReadHeaders) {
if (header == dtabWriteHeader) assert(headers(header) == "/a=>/b")
else assert(!headers.contains(header))
}
assert(!headers.contains("dtab-local"))
}
test("dtab-local header is ignored") {
val stats = NullStatsReceiver
val tracer = new BufferingTracer
@volatile var headers: HeaderMap = null
val dog = Downstream.mk("dog") { req =>
headers = req.headerMap
req.response
}
val dtab = Dtab.read(s"""
/svc/* => /$$/inet/127.1/${dog.port} ;
""")
val linker = Linker.Initializers(Seq(HttpInitializer)).load(basicConfig(dtab))
.configured(param.Stats(stats))
.configured(param.Tracer(tracer))
val router = linker.routers.head.initialize()
val server = router.servers.head.serve()
val client = upstream(server)
val req = Request()
req.host = "dog"
req.headerMap.set("dtab-local", "/a=>/b")
await(client(req))
assert(headers("dtab-local") == "/a=>/b")
assert(!headers.contains(dtabWriteHeader))
}
test("with clearContext") {
val downstream = Downstream.mk("dog") { req =>
val rsp = Response()
rsp.contentString = req.headerMap.collect {
case (k, v) if k.startsWith("l5d-") => s"$k=$v"
}.mkString(",")
rsp
}
val localDtab = "/foo=>/bar"
val req = Request()
req.host = "test"
req.headerMap("l5d-dtab") = localDtab
req.headerMap("l5d-ctx-thing") = "yoooooo"
val yaml =
s"""|routers:
|- protocol: http
| dtab: /svc/* => /$$/inet/127.1/${downstream.port}
| servers:
| - port: 0
| clearContext: true
|""".stripMargin
val linker = Linker.load(yaml)
val router = linker.routers.head.initialize()
val s = router.servers.head.serve()
val body =
try {
val c = upstream(s)
try await(c(req)).contentString
finally await(c.close())
} finally await(s.close())
val headers =
body.split(",").map { kv =>
val Array(k, v) = kv.split("=", 2)
k -> v
}.toMap
assert(headers.keySet == Set(
"l5d-dst-logical",
"l5d-dst-concrete",
"l5d-reqid",
"l5d-ctx-trace"
))
}
test("without clearContext") {
val downstream = Downstream.mk("dog") { req =>
val rsp = Response()
rsp.contentString = req.headerMap.collect {
case (k, v) if k.startsWith("l5d-") => s"$k=$v"
}.mkString(",")
rsp
}
val localDtab = "/foo=>/bar"
val req = Request()
req.host = "test"
req.headerMap("l5d-dtab") = localDtab
req.headerMap("l5d-ctx-thing") = "yoooooo"
val yaml =
s"""|routers:
|- protocol: http
| dtab: /svc/* => /$$/inet/127.1/${downstream.port}
| servers:
| - port: 0
|""".stripMargin
val linker = Linker.load(yaml)
val router = linker.routers.head.initialize()
val s = router.servers.head.serve()
val body =
try {
val c = upstream(s)
try await(c(req)).contentString
finally await(c.close())
} finally await(s.close())
val headers =
body.split(",").map { kv =>
val Array(k, v) = kv.split("=", 2)
k -> v
}.toMap
assert(headers.keySet == Set(
"l5d-dst-logical",
"l5d-dst-concrete",
"l5d-reqid",
"l5d-ctx-dtab",
"l5d-ctx-trace",
"l5d-ctx-thing"
))
assert(headers.get("l5d-ctx-dtab") == Some(localDtab))
}
}
| hhtpcd/linkerd | linkerd/protocol/http/src/e2e/scala/io/buoyant/linkerd/protocol/HttpEndToEndTest.scala | Scala | apache-2.0 | 16,864 |
package cgta.oscala
package util
import cgta.otest.FunSuite
//////////////////////////////////////////////////////////////
// Copyright (c) 2013 Ben Jackman
// All Rights Reserved
// please contact [email protected]
// for licensing inquiries
// Created by bjackman @ 9/23/13 5:44 PM
//////////////////////////////////////////////////////////////
object TestBin extends FunSuite {
case class TestException(msg: String) extends Exception {
override def toString() = s"TestException($msg)"
}
test("Attempt") {
Assert.isEquals(BinSome(5), Bin.attempt(5))
Assert.isEquals(BinNone(BinError(TestException("X"))), Bin.attempt(throw TestException("X")))
}
test("Get") {
Assert.isAnyEquals(5, BinSome(5).get)
Assert.intercepts[NoSuchElementException](BinNone().get)
}
test("GetError") {
Assert.intercepts[NoSuchElementException](BinSome(5).getError)
Assert.intercepts[NoSuchElementException](BinNone().getError)
Assert.isEquals(BinError("X"), BinNone(BinError("X")).getError)
}
test("Error") {
Assert.isEquals(Some(BinError("X")), BinNone(BinError("X")).error)
Assert.isEquals(None, BinNone().error)
Assert.isEquals(None, BinSome(5).error)
}
test("WithMsg") {
Assert.isEquals(BinNone("Y"), BinNone(BinError("X")).setMsg("Y"))
Assert.isEquals(BinSome(5), BinSome(5).setMsg("Y"))
Assert.isEquals(BinNone("Y"), BinNone().setMsg("Y"))
}
test("GetOrElse") {
Assert.isEquals(5, BinSome(5).getOrElse(4))
Assert.isEquals(4, BinNone().getOrElse(4))
}
test("OrElse") {
Assert.isEquals(BinSome(5), BinSome(5).orElse(BinSome(6)))
Assert.isEquals(BinSome(6), BinNone().orElse(BinSome(6)))
}
test("Filter") {
Assert.isEquals(BinSome(5), BinSome(5).filter(5 == _).filter(5 == _))
Assert.isEquals(BinNone(), BinSome(5).filter(5 == _).filter(6 == _))
Assert.isEquals(BinNone(), BinSome(5).filter(6 == _).filter(5 == _))
Assert.isEquals(BinNone(), BinSome(5).filter(6 == _))
Assert.isEquals(BinNone(), BinNone().filter(_ == 5))
}
test("WithFilter") {
Assert.isEquals(BinSome(5), for {x <- BinSome(5) if x == 5} yield x)
Assert.isEquals(BinSome(5), for {x <- BinSome(5) if x == 5 if x == 5} yield x)
Assert.isEquals(BinNone(), for {x <- BinSome(5) if x == 6 if x == 5} yield x)
Assert.isEquals(BinNone(), for {x <- BinSome(5) if x == 5 if x == 6} yield x)
Assert.isEquals(BinNone(), for {x <- BinSome(5) if x == 6} yield x)
}
test("BinNoneApplys") {
Assert.isEquals(BinNone(None), BinNone())
Assert.isEquals(BinNone(Some(BinError("Y"))), BinNone("Y"))
Assert.isEquals(BinNone(Some(BinError(TestException("Y")))), BinNone(TestException("Y")))
}
test("BinMultiOk") {
val z = for {x <- Bin.attempt(5)
y <- Bin.attempt(6)} yield {
x -> y
}
Assert.isEquals(BinSome(5 -> 6), z)
}
test("BinFilterNone") {
val z = for {x <- Bin.attempt(5)
if x == 6
y <- Bin.attempt(6)} yield {
x -> y
}
Assert.isEquals(BinNone(), z)
}
test("BinMultiError1") {
val z = for {x <- Bin.attempt[Int](throw TestException("x"))
y <- Bin.attempt[Int](throw TestException("y"))} yield {
x -> y
}
Assert.isEquals(BinNone(TestException("x")), z)
}
test("BinMultiError2") {
val z = for {x <- Bin.attempt[Int](5)
y <- Bin.attempt[Int](throw TestException("y"))} yield {
x -> y
}
Assert.isEquals(BinNone(TestException("y")), z)
}
} | cgta/open | oscala/shared/src/test/scala/cgta/oscala/util/TestBin.scala | Scala | mit | 3,525 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger}
import uk.gov.hmrc.ct.computations.calculations.ProfitAndLossCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP14(value: Int) extends CtBoxIdentifier(name = "Gross profit or loss") with CtInteger
object CP14 extends Calculated[CP14, ComputationsBoxRetriever] with ProfitAndLossCalculator {
override def calculate(boxRetriever: ComputationsBoxRetriever): CP14 =
calculateProfitOrLoss(boxRetriever.cp7, boxRetriever.cp8)
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP14.scala | Scala | apache-2.0 | 1,183 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.util.{HashMap => JHashMap}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import org.apache.spark.Dependency
import org.apache.spark.OneToOneDependency
import org.apache.spark.Partition
import org.apache.spark.Partitioner
import org.apache.spark.ShuffleDependency
import org.apache.spark.SparkEnv
import org.apache.spark.TaskContext
import org.apache.spark.serializer.Serializer
/**
* An optimized version of cogroup for set difference/subtraction.
*
* It is possible to implement this operation with just `cogroup`, but
* that is less efficient because all of the entries from `rdd2`, for
* both matching and non-matching values in `rdd1`, are kept in the
* JHashMap until the end.
*
* With this implementation, only the entries from `rdd1` are kept in-memory,
* and the entries from `rdd2` are essentially streamed, as we only need to
* touch each once to decide if the value needs to be removed.
*
* This is particularly helpful when `rdd1` is much smaller than `rdd2`, as
* you can use `rdd1`'s partitioner/partition size and not worry about running
* out of memory because of the size of `rdd2`.
*/
private[spark] class SubtractedRDD[K: ClassTag, V: ClassTag, W: ClassTag](
@transient var rdd1: RDD[_ <: Product2[K, V]],
@transient var rdd2: RDD[_ <: Product2[K, W]],
part: Partitioner)
extends RDD[(K, V)](rdd1.context, Nil) {
private var serializer: Option[Serializer] = None
/** Set a serializer for this RDD's shuffle, or null to use the default (spark.serializer) */
def setSerializer(serializer: Serializer): SubtractedRDD[K, V, W] = {
this.serializer = Option(serializer)
this
}
override def getDependencies: Seq[Dependency[_]] = {
def rddDependency[T1: ClassTag, T2: ClassTag](rdd: RDD[_ <: Product2[T1, T2]])
: Dependency[_] = {
if (rdd.partitioner == Some(part)) {
logDebug("Adding one-to-one dependency with " + rdd)
new OneToOneDependency(rdd)
} else {
logDebug("Adding shuffle dependency with " + rdd)
new ShuffleDependency[T1, T2, Any](rdd, part, serializer)
}
}
Seq(rddDependency[K, V](rdd1), rddDependency[K, W](rdd2))
}
override def getPartitions: Array[Partition] = {
val array = new Array[Partition](part.numPartitions)
for (i <- 0 until array.length) {
// Each CoGroupPartition will depend on rdd1 and rdd2
array(i) = new CoGroupPartition(i, Seq(rdd1, rdd2).zipWithIndex.map { case (rdd, j) =>
dependencies(j) match {
case s: ShuffleDependency[_, _, _] =>
None
case _ =>
Some(new NarrowCoGroupSplitDep(rdd, i, rdd.partitions(i)))
}
}.toArray)
}
array
}
override val partitioner = Some(part)
override def compute(p: Partition, context: TaskContext): Iterator[(K, V)] = {
val partition = p.asInstanceOf[CoGroupPartition]
val map = new JHashMap[K, ArrayBuffer[V]]
def getSeq(k: K): ArrayBuffer[V] = {
val seq = map.get(k)
if (seq != null) {
seq
} else {
val seq = new ArrayBuffer[V]()
map.put(k, seq)
seq
}
}
def integrate(depNum: Int, op: Product2[K, V] => Unit): Unit = {
dependencies(depNum) match {
case oneToOneDependency: OneToOneDependency[_] =>
val dependencyPartition = partition.narrowDeps(depNum).get.split
oneToOneDependency.rdd.iterator(dependencyPartition, context)
.asInstanceOf[Iterator[Product2[K, V]]].foreach(op)
case shuffleDependency: ShuffleDependency[_, _, _] =>
val iter = SparkEnv.get.shuffleManager
.getReader(
shuffleDependency.shuffleHandle, partition.index, partition.index + 1, context)
.read()
iter.foreach(op)
}
}
// the first dep is rdd1; add all values to the map
integrate(0, t => getSeq(t._1) += t._2)
// the second dep is rdd2; remove all of its keys
integrate(1, t => map.remove(t._1))
map.asScala.iterator.map(t => t._2.iterator.map((t._1, _))).flatten
}
override def clearDependencies() {
super.clearDependencies()
rdd1 = null
rdd2 = null
}
}
| chenc10/Spark-PAF | core/src/main/scala/org/apache/spark/rdd/SubtractedRDD.scala | Scala | apache-2.0 | 5,085 |
/*
* Copyright (c) 2012 - 2020 Splice Machine, Inc.
*
* This file is part of Splice Machine.
* Splice Machine is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either
* version 3, or (at your option) any later version.
* Splice Machine is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License along with Splice Machine.
* If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.splicemachine.nsds.kafka
import java.io.Externalizable
import java.util
import java.util.{Collections, Properties, UUID}
import com.splicemachine.derby.impl.kryo.KryoSerialization
import com.splicemachine.derby.stream.spark.KafkaReadFunction.Message
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, KafkaConsumer}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, IntegerDeserializer}
import scala.collection.JavaConverters._
object KafkaUtils {
private def getConsumer(bootstrapServers: String): KafkaConsumer[Integer, Array[Byte]] = {
val props = new Properties
val groupId = "spark-consumer-nsdsk-ku"
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId)
props.put(ConsumerConfig.CLIENT_ID_CONFIG, groupId + "-" + UUID.randomUUID)
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[IntegerDeserializer].getName)
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName)
new KafkaConsumer[Integer, Array[Byte]](props)
}
def messageCount(bootstrapServers: String, topicName: String): Long = {
@transient lazy val consumer = getConsumer(bootstrapServers)
val partitionInfo = consumer.partitionsFor(topicName).asScala
val partitions = partitionInfo.map(pi => new TopicPartition(topicName, pi.partition()))
consumer.assign(partitions.asJava)
consumer.seekToEnd(Collections.emptySet())
val endPartitions: Map[TopicPartition, Long] = partitions.map(p => p -> consumer.position(p))(collection.breakOut)
consumer.seekToBeginning(Collections.emptySet())
val count = partitions.map(p => endPartitions(p) - consumer.position(p)).sum
consumer.close
count
}
def messageCount(bootstrapServers: String, topicName: String, partition: Int): Long = {
@transient lazy val consumer = getConsumer(bootstrapServers)
val topicPartition = new TopicPartition(topicName, partition)
val partitions = Seq(topicPartition)
consumer.assign(partitions.asJava)
consumer.seekToEnd(partitions.asJava)
val nextOffset = consumer.position(topicPartition)
consumer.seekToBeginning(partitions.asJava)
val firstOffset = consumer.position(topicPartition)
consumer.close
nextOffset - firstOffset
}
def lastMessageOf(bootstrapServers: String, topicName: String, partition: Int): Option[Externalizable] = {
@transient lazy val consumer = getConsumer(bootstrapServers)
val topicPartition = new TopicPartition(topicName, partition)
val partitions = Seq(topicPartition)
consumer.assign(partitions.asJava)
val end = consumer.endOffsets(partitions.asJava).get(topicPartition)
if( end == 0L ) {
None
} else {
consumer.seek(topicPartition, end-1)
consumer.poll(java.time.Duration.ofMillis(1000L)).asScala
.headOption.map( r => {
val kryo = new KryoSerialization()
kryo.init
val m = kryo.deserialize(r.value).asInstanceOf[Message]
kryo.close
m
})
}
}
def messagesFrom(bootstrapServers: String, topicName: String, partition: Int): Seq[Externalizable] = {
@transient lazy val consumer = getConsumer(bootstrapServers)
consumer.assign(util.Arrays.asList(new TopicPartition(topicName, partition)))
val expectedMsgCt = messageCount(bootstrapServers, topicName, partition)
val timeout = java.time.Duration.ofMillis(1000L)
var records = Iterable.empty[ConsumerRecord[Integer, Array[Byte]]]
var newRecords = consumer.poll(timeout).asScala // newRecords: Iterable[ConsumerRecord[Integer, Array[Byte]]]
records = records ++ newRecords
var retries = 0
val maxRetries = 10
while(
newRecords.nonEmpty ||
(records.size < expectedMsgCt && retries < maxRetries)
)
{
if( newRecords.isEmpty ) { retries += 1 }
newRecords = consumer.poll(timeout).asScala
records = records ++ newRecords
}
consumer.close
//println( s"KafkaUtils.msgs record count: ${records.size}" )
val seqBuilder = Seq.newBuilder[Externalizable]
@transient lazy val kryo = new KryoSerialization()
kryo.init
for (record <- records.iterator) {
seqBuilder += kryo.deserialize(record.value).asInstanceOf[Message]
}
kryo.close
seqBuilder.result
}
}
| splicemachine/spliceengine | splice_spark2/src/main/scala/com/splicemachine/nsds/kafka/KafkaUtils.scala | Scala | agpl-3.0 | 5,218 |
package fringe.fringeArria10.bigIP
// TODO: what to do for this guy?
import fringe.FringeGlobals
import fringe.bigIP.BigIP
import fringe.bigIP.BigIPSim
import chisel3._
import chisel3.util._
class BigIPArria10 extends BigIPSim | stanford-ppl/spatial-lang | spatial/core/resources/chiselgen/template-level/fringeArria10/bigIP/BigIPArria10.scala | Scala | mit | 227 |
package dx.compiler
import dx.core.languages.Language
import wdlTools.eval.WdlValues
import wdlTools.generators.code.WdlV1Generator
import wdlTools.syntax.{CommentMap, SourceLocation, WdlVersion}
import wdlTools.types.WdlTypes.T_Task
import wdlTools.types.{WdlTypes, TypedAbstractSyntax => TAT}
import wdlTools.util.{Logger, StringFileSource}
case class WdlCodeGen(logger: Logger,
typeAliases: Map[String, WdlTypes.T],
language: Language.Value) {
private val locPlaceholder: SourceLocation = SourceLocation.empty
// A self contained WDL workflow
val wdlVersion: WdlVersion = {
language match {
case Language.WDLvDraft2 =>
logger.warning("Upgrading draft-2 input to verion 1.0")
WdlVersion.V1
case Language.WDLv1_0 => WdlVersion.V1
case Language.WDLv2_0 => WdlVersion.V2
case other =>
throw new Exception(s"Unsupported language version ${other}")
}
}
private lazy val typeAliasDefinitions: Vector[TAT.StructDefinition] = {
val sortedTypeAliases = SortTypeAliases(logger).apply(typeAliases.toVector)
sortedTypeAliases.map {
case (name, wdlType: WdlTypes.T_Struct) =>
TAT.StructDefinition(name, wdlType, wdlType.members, locPlaceholder)
case other => throw new RuntimeException(s"Unexpected type alias ${other}")
}
}
// create a wdl-value of a specific type.
private[compiler] def genDefaultValueOfType(wdlType: WdlTypes.T): TAT.Expr = {
wdlType match {
case WdlTypes.T_Boolean => TAT.ValueBoolean(value = true, wdlType, locPlaceholder)
case WdlTypes.T_Int => TAT.ValueInt(0, wdlType, locPlaceholder)
case WdlTypes.T_Float => TAT.ValueFloat(0.0, wdlType, locPlaceholder)
case WdlTypes.T_String => TAT.ValueString("", wdlType, locPlaceholder)
case WdlTypes.T_File => TAT.ValueString("placeholder.txt", wdlType, locPlaceholder)
// We could convert an optional to a null value, but that causes
// problems for the pretty printer.
// WdlValues.V_OptionalValue(wdlType, None)
case WdlTypes.T_Optional(t) => genDefaultValueOfType(t)
// The WdlValues.V_Map type HAS to appear before the array types, because
// otherwise it is coerced into an array. The map has to
// contain at least one key-value pair, otherwise you get a type error.
case WdlTypes.T_Map(keyType, valueType) =>
val k = genDefaultValueOfType(keyType)
val v = genDefaultValueOfType(valueType)
TAT.ExprMap(Map(k -> v), wdlType, locPlaceholder)
// an empty array
case WdlTypes.T_Array(_, false) =>
TAT.ExprArray(Vector.empty, wdlType, locPlaceholder)
// Non empty array
case WdlTypes.T_Array(t, true) =>
TAT.ExprArray(Vector(genDefaultValueOfType(t)), wdlType, locPlaceholder)
case WdlTypes.T_Pair(lType, rType) =>
TAT.ExprPair(genDefaultValueOfType(lType),
genDefaultValueOfType(rType),
wdlType,
locPlaceholder)
case WdlTypes.T_Struct(_, typeMap) =>
val members = typeMap.map {
case (fieldName, t) =>
val key: TAT.Expr = TAT.ValueString(fieldName, WdlTypes.T_String, locPlaceholder)
key -> genDefaultValueOfType(t)
}
TAT.ExprObject(members, wdlType, locPlaceholder)
case _ => throw new Exception(s"Unhandled type ${wdlType}")
}
}
private[compiler] def wdlValueToExpr(value: WdlValues.V): TAT.Expr = {
def seqToType(vec: Iterable[TAT.Expr]): WdlTypes.T = {
vec.headOption.map(_.wdlType).getOrElse(WdlTypes.T_Any)
}
value match {
case WdlValues.V_Null => TAT.ValueNull(WdlTypes.T_Any, locPlaceholder)
case WdlValues.V_Boolean(value) =>
TAT.ValueBoolean(value, WdlTypes.T_Boolean, locPlaceholder)
case WdlValues.V_Int(value) => TAT.ValueInt(value, WdlTypes.T_Int, locPlaceholder)
case WdlValues.V_Float(value) => TAT.ValueFloat(value, WdlTypes.T_Float, locPlaceholder)
case WdlValues.V_String(value) =>
TAT.ValueString(value, WdlTypes.T_String, locPlaceholder)
case WdlValues.V_File(value) => TAT.ValueFile(value, WdlTypes.T_File, locPlaceholder)
case WdlValues.V_Directory(value) =>
TAT.ValueDirectory(value, WdlTypes.T_Directory, locPlaceholder)
// compound values
case WdlValues.V_Pair(l, r) =>
val lExpr = wdlValueToExpr(l)
val rExpr = wdlValueToExpr(r)
TAT.ExprPair(lExpr, rExpr, WdlTypes.T_Pair(lExpr.wdlType, rExpr.wdlType), locPlaceholder)
case WdlValues.V_Array(value) =>
val valueExprs = value.map(wdlValueToExpr)
TAT.ExprArray(valueExprs, seqToType(valueExprs), locPlaceholder)
case WdlValues.V_Map(value) =>
val keyExprs = value.keys.map(wdlValueToExpr)
val valueExprs = value.values.map(wdlValueToExpr)
TAT.ExprMap(keyExprs.zip(valueExprs).toMap,
WdlTypes.T_Map(seqToType(keyExprs), seqToType(valueExprs)),
locPlaceholder)
case WdlValues.V_Optional(value) => wdlValueToExpr(value)
case WdlValues.V_Struct(name, members) =>
val memberExprs: Map[TAT.Expr, TAT.Expr] = members.map {
case (name, value) =>
TAT.ValueString(name, WdlTypes.T_String, locPlaceholder) -> wdlValueToExpr(value)
case other => throw new RuntimeException(s"Unexpected member ${other}")
}
val memberTypes = memberExprs.map {
case (name: TAT.ValueString, value) => name.value -> value.wdlType
case other => throw new RuntimeException(s"Unexpected member ${other}")
}
TAT.ExprMap(memberExprs, WdlTypes.T_Struct(name, memberTypes), locPlaceholder)
case WdlValues.V_Object(members) =>
val memberExprs = members.map {
case (name, value) =>
val key: TAT.Expr = TAT.ValueString(name, WdlTypes.T_String, locPlaceholder)
key -> wdlValueToExpr(value)
}
TAT.ExprObject(memberExprs, WdlTypes.T_Object, locPlaceholder)
case other =>
throw new Exception(s"Unhandled value ${other}")
}
}
/*
Create a header for a task/workflow. This is an empty task
that includes the input and output definitions. It is used
to
(1) allow linking to native DNAx applets (and workflows in the future).
(2) make a WDL file stand-alone, without imports
For example, the stub for the Add task:
task Add {
input {
Int a
Int b
}
command {
command <<<
python -c "print(${a} + ${b})"
>>>
output {
Int result = read_int(stdout())
}
}
is:
task Add {
input {
Int a
Int b
}
command {}
output {
Int result
}
}
*/
private def genTaskHeader(callable: IR.Callable): TAT.Task = {
/*Utils.trace(verbose.on,
s"""|taskHeader callable=${callable.name}
| inputs= ${callable.inputVars.map(_.name)}
| outputs= ${callable.outputVars.map(_.name)}"""
.stripMargin)*/
// Sort the inputs by name, so the result will be deterministic.
val inputs: Vector[TAT.InputDefinition] =
callable.inputVars
.sortWith(_.name < _.name)
.map { cVar =>
cVar.default match {
case None =>
TAT.RequiredInputDefinition(cVar.name, cVar.wdlType, locPlaceholder)
case Some(wValue) =>
TAT.OverridableInputDefinitionWithDefault(cVar.name,
cVar.wdlType,
wdlValueToExpr(wValue),
locPlaceholder)
}
}
val outputs: Vector[TAT.OutputDefinition] =
callable.outputVars
.sortWith(_.name < _.name)
.map { cVar =>
val defaultVal = genDefaultValueOfType(cVar.wdlType)
TAT.OutputDefinition(cVar.name, cVar.wdlType, defaultVal, locPlaceholder)
}
language match {
case Language.WDLvDraft2 | Language.WDLv1_0 | Language.WDLv2_0 =>
TAT.Task(
callable.name,
WdlTypes.T_Task(
callable.name,
inputs.map {
case TAT.RequiredInputDefinition(name, wdlType, _) =>
name -> (wdlType, false)
case other: TAT.InputDefinition =>
other.name -> (other.wdlType, true)
}.toMap,
outputs.map(d => d.name -> d.wdlType).toMap
),
inputs,
outputs,
TAT.CommandSection(Vector.empty, locPlaceholder),
Vector.empty,
None,
None,
None,
None,
locPlaceholder
)
case other =>
throw new Exception(s"Unsupported language version ${other}")
}
}
/**
* Generate a WDL stub fore a DNAnexus applet.
* @param id the applet ID
* @param appletName the applet name
* @param inputSpec the applet inputs
* @param outputSpec the applet outputs
* @return an AST.Task
*/
def genDnanexusAppletStub(id: String,
appletName: String,
inputSpec: Map[String, WdlTypes.T],
outputSpec: Map[String, WdlTypes.T]): TAT.Task = {
val meta = TAT.MetaSection(
Map(
"type" -> TAT.MetaValueString("native", locPlaceholder),
"id" -> TAT.MetaValueString(id, locPlaceholder)
),
locPlaceholder
)
TAT.Task(
appletName,
T_Task(appletName, inputSpec.map {
case (name, wdlType) => name -> (wdlType, false)
}, outputSpec),
inputSpec.map {
case (name, wdlType) => TAT.RequiredInputDefinition(name, wdlType, locPlaceholder)
}.toVector,
outputSpec.map {
case (name, wdlType) =>
val expr = genDefaultValueOfType(wdlType)
TAT.OutputDefinition(name, wdlType, expr, locPlaceholder)
}.toVector,
TAT.CommandSection(Vector.empty, locPlaceholder),
Vector.empty,
Some(meta),
parameterMeta = None,
runtime = None,
hints = None,
loc = locPlaceholder
)
}
def standAloneTask(task: TAT.Task): TAT.Document = {
TAT.Document(
StringFileSource.empty,
TAT.Version(wdlVersion, locPlaceholder),
typeAliasDefinitions :+ task,
None,
locPlaceholder,
CommentMap.empty
)
}
// A workflow can import other libraries:
//
// import "library.wdl" as lib
// workflow foo {
// call lib.Multiply as mul { ... }
// call lib.Add { ... }
// call lib.Nice as nice { ... }
// call lib.Hello
// }
//
// rewrite the workflow, and remove the calls to external libraries.
//
// workflow foo {
// call Multiply as mul { ... }
// call Add { ... }
// call Nice as nice { ... }
// call Hello
// }
private def cleanCalls(body: Vector[TAT.WorkflowElement]): Vector[TAT.WorkflowElement] = {
body.map {
case call: TAT.Call =>
call.copy(fullyQualifiedName = call.unqualifiedName)
case scat: TAT.Scatter =>
scat.copy(body = cleanCalls(scat.body))
case cond: TAT.Conditional =>
cond.copy(body = cleanCalls(cond.body))
case other => other
}
}
// A workflow must have definitions for all the tasks it
// calls. However, a scatter calls tasks that are missing from
// the WDL file we generate. To ameliorate this, we add stubs for
// called tasks. The generated tasks are named by their
// unqualified names, not their fully-qualified names. This works
// because the WDL workflow must be "flattenable".
def standAloneWorkflow(wf: TAT.Workflow, allCalls: Vector[IR.Callable]): TAT.Document = {
val tasks: Vector[TAT.Task] =
allCalls
.foldLeft(Map.empty[String, TAT.Task]) {
case (accu, callable) =>
if (accu contains callable.name) {
// we have already created a stub for this call
accu
} else {
val stub: TAT.Task = callable match {
case IR.Applet(_, _, _, _, _, IR.AppletKindTask(_), doc, _, _) =>
// This is a task, include its source instead of a header.
val tasks = doc.elements.collect {
case t: TAT.Task => t
}
assert(tasks.size == 1)
tasks.head
case _ =>
// no existing stub, create it
genTaskHeader(callable)
}
accu + (callable.name -> stub)
}
}
// sort the task order by name, so the generated code will be deterministic
.toVector
.sortWith(_._1 < _._1)
.map { case (_, task) => task }
val wfWithoutImportCalls = wf.copy(body = cleanCalls(wf.body))
TAT.Document(
StringFileSource.empty,
TAT.Version(wdlVersion, locPlaceholder),
typeAliasDefinitions ++ tasks,
Some(wfWithoutImportCalls),
locPlaceholder,
CommentMap.empty
)
}
def generateDocument(doc: TAT.Document): String = {
val generator = WdlV1Generator()
generator.generateDocument(doc).mkString("\\n")
}
}
| dnanexus-rnd/dxWDL | src/main/scala/dx/compiler/WdlCodeGen.scala | Scala | apache-2.0 | 13,565 |
package dbtarzan.gui.info
import dbtarzan.db.QuerySql
import dbtarzan.gui.TControlBuilder
import dbtarzan.localization.Localization
import scalafx.scene.Parent
import scalafx.scene.control.TextArea
/** The read only text box showing the query sql, so that it can be seen anc copied */
class QueryInfo(sql : QuerySql, localization : Localization) extends TControlBuilder {
val textBox = new TextArea {
text = sql.sql
editable = false
}
def control : Parent = textBox
}
| aferrandi/dbtarzan | src/main/scala/dbtarzan/gui/info/QueryInfo.scala | Scala | apache-2.0 | 482 |
import play.api.libs.json.{Json, OFormat}
package object models {
case class Order(id: Int = -1,
customerId: Int,
attendees: List[String])
object Order {
implicit val OrderFormat: OFormat[Order] =
Json.format[Order]
}
case class OrderPlaced(order: Order)
object OrderPlaced {
implicit val OrderPlacedFormat: OFormat[OrderPlaced] =
Json.format[OrderPlaced]
}
case class UserRecognized(name: String)
object UserRecognized {
implicit val userRecognizedFormat
: OFormat[UserRecognized] =
Json.format[UserRecognized]
}
}
| leanovate/contoso-conference-manager | app/models/package.scala | Scala | apache-2.0 | 616 |
/**
* Copyright (c) 2007-2011 Eric Torreborre <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package org.specs.util
import org.specs.Specification
import org.specs.runner._
import org.specs._
class dataTableHeaderUnit extends SpecificationWithJUnit with DataTables {
"a data table header" should {
val tableHeader = "a"|"b"|"c"|
"print out the column names separated by |" in {
tableHeader.toString must_== "|a|b|c|"
}
"have a toXhtml method" in {
tableHeader.toXhtml must_== <tr><th>a</th><th>b</th><th>c</th></tr>
}
}
}
| Muki-SkyWalker/specs | src/test/scala/org/specs/util/dataTableHeaderUnit.scala | Scala | mit | 1,816 |
package extractors.solutions
object Exercise2 {
object At {
def unapply(s: String): Option[(String, String)] = {
val parts = s split "@"
if (parts.length == 2) Some(parts(0), parts(1)) else None
}
}
}
| julienrf/scala-lessons | highlights/extractors/code/src/main/scala/extractors/solutions/Exercise2.scala | Scala | mit | 230 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg.distributed
import breeze.linalg.{diag => brzDiag, DenseMatrix => BDM, DenseVector => BDV}
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.{Matrices, Vectors}
/**
* 索引行矩阵(IndexedRowMatrix)跟RowMatrix类似,但是有行索引;其底层支撑结构是索引的行组成的RDD
* 所以每行可以通过索引(long)和局部向量表示
* IndexedRowMatrix:每一行是一个特征向量,行索引
*/
class IndexedRowMatrixSuite extends SparkFunSuite with MLlibTestSparkContext {
val m = 4
val n = 3
//创建行索引Seq[IndexedRow]
val data = Seq(
(0L, Vectors.dense(0.0, 1.0, 2.0)),
(1L, Vectors.dense(3.0, 4.0, 5.0)),
(3L, Vectors.dense(9.0, 0.0, 1.0))
).map(x => IndexedRow(x._1, x._2))
var indexedRows: RDD[IndexedRow] = _
override def beforeAll() {
super.beforeAll()
indexedRows = sc.parallelize(data, 2)
}
test("size") {
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val mat1 = new IndexedRowMatrix(indexedRows)
assert(mat1.numRows() === m)//3行
assert(mat1.numCols() === n)//4列
//创建行索引矩阵,5行,0列
val mat2 = new IndexedRowMatrix(indexedRows, 5, 0)
assert(mat2.numRows() === 5)
assert(mat2.numCols() === n)
}
test("empty rows") {//空行
val rows = sc.parallelize(Seq[IndexedRow](), 1)
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val mat = new IndexedRowMatrix(rows)
intercept[RuntimeException] {
mat.numRows()
}
intercept[RuntimeException] {
mat.numCols()
}
}
test("toBreeze") {//
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val mat = new IndexedRowMatrix(indexedRows)
val expected = BDM(
(0.0, 1.0, 2.0),
(3.0, 4.0, 5.0),
(0.0, 0.0, 0.0),
(9.0, 0.0, 1.0))
assert(mat.toBreeze() === expected)
}
test("toRowMatrix") {//行矩阵
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val idxRowMat = new IndexedRowMatrix(indexedRows)
//转换行矩阵
val rowMat = idxRowMat.toRowMatrix()
assert(rowMat.numCols() === n)//3列
assert(rowMat.numRows() === 3, "should drop empty rows")
assert(rowMat.rows.collect().toSeq === data.map(_.vector).toSeq)
}
//CoordinateMatrix常用于稀疏性比较高的计算中,MatrixEntry是一个 Tuple类型的元素,其中包含行、列和元素值
test("toCoordinateMatrix") {//协调矩阵
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val idxRowMat = new IndexedRowMatrix(indexedRows)
//CoordinateMatrix常用于稀疏性比较高的计算中,MatrixEntry是一个 Tuple类型的元素,其中包含行、列和元素值
val coordMat = idxRowMat.toCoordinateMatrix()
assert(coordMat.numRows() === m)
assert(coordMat.numCols() === n)
assert(coordMat.toBreeze() === idxRowMat.toBreeze())
}
test("toBlockMatrix") {//块矩阵
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val idxRowMat = new IndexedRowMatrix(indexedRows)
//转换块矩阵
val blockMat = idxRowMat.toBlockMatrix(2, 2)
assert(blockMat.numRows() === m)//4行
assert(blockMat.numCols() === n)//3列
assert(blockMat.toBreeze() === idxRowMat.toBreeze())
intercept[IllegalArgumentException] {
idxRowMat.toBlockMatrix(-1, 2)
}
/**
* 分块矩阵(BlockMatrix)是由RDD支撑的分布式矩阵,RDD中的元素为MatrixBlock,
* MatrixBlock是多个((Int, Int),Matrix)组成的元组,其中(Int,Int)是分块索引,Matriax是指定索引处的子矩阵
*/
intercept[IllegalArgumentException] {
idxRowMat.toBlockMatrix(2, 0)
}
}
test("multiply a local matrix") {//乘一个局部矩阵
//索引行矩阵(IndexedRowMatrix)按行分布式存储,有行索引,其底层支撑结构是索引的行组成的RDD,所以每行可以通过索引(long)和局部向量表示
val A = new IndexedRowMatrix(indexedRows)
val B = Matrices.dense(3, 2, Array(0.0, 1.0, 2.0, 3.0, 4.0, 5.0))
val C = A.multiply(B)
val localA = A.toBreeze()
val localC = C.toBreeze()
val expected = localA * B.toBreeze.asInstanceOf[BDM[Double]]
assert(localC === expected)
}
test("gram") {//格拉姆矩阵
val A = new IndexedRowMatrix(indexedRows)
//格拉姆矩阵
val G = A.computeGramianMatrix()
val expected = BDM(
(90.0, 12.0, 24.0),
(12.0, 17.0, 22.0),
(24.0, 22.0, 30.0))
assert(G.toBreeze === expected)
}
test("svd") {//奇异值分解
val A = new IndexedRowMatrix(indexedRows)
//第一个参数3意味着取top 2个奇异值,第二个参数true意味着计算矩阵U
val svd = A.computeSVD(n, computeU = true)
assert(svd.U.isInstanceOf[IndexedRowMatrix])
val localA = A.toBreeze()
val U = svd.U.toBreeze()
val s = svd.s.toBreeze.asInstanceOf[BDV[Double]]
val V = svd.V.toBreeze.asInstanceOf[BDM[Double]]
assert(closeToZero(U.t * U - BDM.eye[Double](n)))
assert(closeToZero(V.t * V - BDM.eye[Double](n)))
assert(closeToZero(U * brzDiag(s) * V.t - localA))
}
test("validate matrix sizes of svd") {//验证SVD矩阵大小
val k = 2
val A = new IndexedRowMatrix(indexedRows)
val svd = A.computeSVD(k, computeU = true)
assert(svd.U.numRows() === m)
assert(svd.U.numCols() === k)
assert(svd.s.size === k)
assert(svd.V.numRows === n)
assert(svd.V.numCols === k)
}
test("validate k in svd") {//验证K 奇异值
val A = new IndexedRowMatrix(indexedRows)
intercept[IllegalArgumentException] {
A.computeSVD(-1)
}
}
def closeToZero(G: BDM[Double]): Boolean = {
//math.abs返回数的绝对值
G.valuesIterator.map(math.abs).sum < 1e-6
}
}
| tophua/spark1.52 | mllib/src/test/scala/org/apache/spark/mllib/linalg/distributed/IndexedRowMatrixSuite.scala | Scala | apache-2.0 | 7,370 |
package de.tototec.sbuild.ant
import de.tototec.sbuild.Project
import java.io.File
import de.tototec.sbuild.Path
import de.tototec.sbuild.TargetRef
import de.tototec.sbuild.TargetRefs
object AntPath {
def apply(location: File = null,
locations: Seq[File] = null,
path: String = null,
paths: Seq[String] = null)(implicit project: Project) =
new AntPath(
location = location,
locations = locations,
path = path,
paths = paths
)
def apply(targetRefs: TargetRefs)(implicit _project: Project): AntPath =
new AntPath(locations = targetRefs.targetRefs.map { targetRef =>
_project.uniqueTargetFile(targetRef).file
})
def apply(targetRef: TargetRef)(implicit proj: Project): AntPath =
new AntPath(location = proj.uniqueTargetFile(targetRef).file)
}
class AntPath()(implicit _project: Project) extends org.apache.tools.ant.types.Path(AntProject()) {
def this(location: File = null,
locations: Seq[File] = null,
path: String = null,
paths: Seq[String] = null)(implicit project: Project) {
this
if (location != null) setLocation(location)
if (locations != null) locations.foreach { loc => setLocation(loc) }
if (path != null) setPath(path)
if (paths != null) paths.foreach { path => setPath(path) }
}
} | SBuild-org/sbuild | de.tototec.sbuild.ant/src/main/scala/de/tototec/sbuild/ant/AntPath.scala | Scala | apache-2.0 | 1,348 |
/*
* Copyright (c) 2018 Georgios Andreadakis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.tap.framework.filesystem
import java.io.{File, FileOutputStream, InputStream}
import java.net.URI
import java.nio.file.{Files, Path, Paths}
import org.apache.commons.lang.StringUtils
import org.apache.tika.io.IOUtils
/**
* TODO comment this class.
*/
object FileHandling {
object FileReference {
def classpathResource(pathName: String): URI = {
val url = this.getClass().getClassLoader().getResource(pathName)
url.toURI()
}
def findTempFolder(): Path = {
val systemTemp = System.getProperty("java.io.tmpdir")
val folder = if (StringUtils.isNotEmpty(systemTemp)) systemTemp else "/tmp/tap"
val uri = new File(folder).toURI
val folderPath = Paths.get(uri)
if (!folderPath.toFile.exists()) {
folderPath.toFile.createNewFile()
}
folderPath
}
}
object FileAccess {
def newInputStream(pathName: String): InputStream = {
val path = Paths.get(FileReference.classpathResource(pathName))
val stream = newInputStream(path)
stream
}
def newInputStream(path: Path): InputStream = {
val stream = Files.newInputStream(path)
stream
}
def fileAsByteArray(path: Path): Array[Byte] = {
Files.readAllBytes(path)
}
def writeToFile(stream: InputStream, file: File): Unit = {
var out: FileOutputStream = null
try {
out = new FileOutputStream(file)
IOUtils.copy(stream, out)
} finally out.close()
}
}
}
| GeorgiosAndreadakis/TextAnalyserPlatform | framework/src/main/scala/org/tap/framework/filesystem/FileHandling.scala | Scala | apache-2.0 | 2,099 |
trait A{
private var s = 1
def getS = s
}
object Test extends A {
def main(args: Array[String]): Unit = println(getS)
}
| yusuke2255/dotty | tests/run/i744.scala | Scala | bsd-3-clause | 126 |
package io.eels.component.csv
import java.nio.file.Paths
import com.sksamuel.exts.metrics.Timed
import io.eels.schema.StructType
import io.eels.{Frame, Row}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import scala.util.Random
/**
* v0.90 1m rows insertion: 1400 reading: 1324
* v1.10 1m rows insertion: 1250: reading: 680
*/
object CsvSpeedTest extends App with Timed {
implicit val conf = new Configuration()
implicit val fs = FileSystem.getLocal(conf)
val schema = StructType("a", "b", "c", "d", "e")
val rows = List.fill(1000000)(Row(schema, Random.nextBoolean(), Random.nextFloat(), Random.nextGaussian(), Random.nextLong(), Random.nextString(10)))
val frame = Frame(schema, rows)
while(true) {
val path = Paths.get("csv_speed.csv")
path.toFile.delete()
timed("Insertion") {
frame.to(CsvSink(path))
}
timed("Reading") {
val in = CsvSource(path).toFrame().collect()
assert(in.size == rows.size, in.size)
}
path.toFile.delete()
}
}
| stheppi/eel | eel-components/src/test/scala/io/eels/component/csv/CsvSpeedTest.scala | Scala | apache-2.0 | 1,050 |
/*******************************************************************************
* Copyright 2010 Maxime Lévesque
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
***************************************************************************** */
package org.squeryl.dsl
class Group[K](k: K) {
def key = k
}
class Measures[M](m: M) {
def measures = m
}
class GroupWithMeasures[K,M](k: K, m: M) {
def key = k
def measures = m
override def toString = {
val sb = new java.lang.StringBuilder
sb.append("GroupWithMeasures[")
sb.append("key=")
sb.append(key)
sb.append(",measures=")
sb.append(measures)
sb.append("]")
sb.toString
}
}
object GroupWithMeasures {
def unapply[K, M](x: GroupWithMeasures[K, M]) = Some((x.key, x.measures))
}
| squeryl/squeryl | src/main/scala/org/squeryl/dsl/Group.scala | Scala | apache-2.0 | 1,335 |
/*
* Copyright 2017 David Schmitz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.uport.recipe.swagger
import com.github.swagger.akka.SwaggerHttpService
import com.github.swagger.akka.model.Info
import io.uport.recipe.routes.RecipeServiceRoutes
object SwaggerDocService extends SwaggerHttpService with SwaggerUi {
import io.uport.recipe.config.Settings._
override val apiClasses: Set[Class[_]] = Set(classOf[RecipeServiceRoutes])
override val host = s"${httpHost}:${httpPort}"
//the url of your api, not swagger's json endpoint
override val basePath = "/" //the basePath for the API you are exposing
override val apiDocsPath = "api-docs" //where you want the swagger-json endpoint exposed
override val info = Info(version = "1.0") //provides license and other description details
}
| dschmitz/recipe-service | src/main/scala/io/uport/recipe/swagger/SwaggerDocService.scala | Scala | apache-2.0 | 1,388 |
/* Copyright 2009-2021 EPFL, Lausanne */
import stainless.lang._
/** This benchmarks tests some potential issues with the legacy "bestRealType" function, which was original introduced to work around
* Scala's well-too-precise-for-Leon type inference. */
object BestRealTypes {
sealed abstract class Num
case class Zero() extends Num
case class Succ(pred : Num) extends Num
case class Wrapper(num : Num)
def boolToNum(b : Boolean) : Num = if(b) {
Zero()
} else {
Succ(Zero())
}
// This requires computing the "bestRealTypes" of w1 and w2.
def zipWrap(w1 : Wrapper, w2 : Wrapper) : (Wrapper,Wrapper) = (w1, w2)
def somethingToProve(b : Boolean) : Boolean = {
val (z1,z2) = zipWrap(Wrapper(boolToNum(b)), Wrapper(boolToNum(!b)))
z1.num == Zero() || z2.num == Zero()
}.holds
}
| epfl-lara/stainless | frontends/benchmarks/verification/valid/BestRealTypes.scala | Scala | apache-2.0 | 819 |
/*
* Copyright 2010-2011 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalaz.camel.core
import org.scalatest.{WordSpec, BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.matchers.MustMatchers
import scalaz._
import scalaz.concurrent.Strategy
/**
* @author Martin Krasser
*/
trait CamelTest extends CamelTestContext with WordSpec with MustMatchers with BeforeAndAfterAll with BeforeAndAfterEach {
import Scalaz._
override def beforeAll = {
from("direct:predef-1") { appendToBody("-p1") }
from("direct:predef-2") { appendToBody("-p2") }
router.start
}
override def afterAll = router.stop
override def afterEach = mocks.values.foreach { m => m.reset }
def support = afterWord("support")
"scalaz.camel.core.Camel" should support {
"Kleisli composition of CPS message processors" in {
appendToBody("-1") >=> appendToBody("-2") process Message("a") must equal(Success(Message("a-1-2")))
}
"Kleisli composition of direct-style message processors" in {
ds_appendToBody("-1") >=> ds_appendToBody("-2") process Message("a") must equal(Success(Message("a-1-2")))
}
"Kleisli composition of asynchonous Camel message processors" in {
repeatBody >=> repeatBody process Message("a") must equal(Success(Message("aaaa")))
}
"Kleisli composition of synchonous Camel message processors" in {
repeatBody.sp >=> repeatBody.sp process Message("a") must equal(Success(Message("aaaa")))
}
"Kleisli composition of Camel endpoint producers" in {
to("direct:predef-1") >=> to("direct:predef-2") process Message("a") must equal(Success(Message("a-p1-p2")))
}
"Kleisli composition of different types of message processors" in {
repeatBody >=> repeatBody.sp >=> appendToBody("-1") >=> ds_appendToBody("-2") >=> to("direct:predef-1") process
Message("a") must equal(Success(Message("aaaa-1-2-p1")))
}
"Kleisli composition of CPS processors defined inline" in {
val route = appendToBody("-1") >=> { (m: Message, k: MessageValidation => Unit) => k(m.appendToBody("-x").success) }
route process Message("a") must equal(Success(Message("a-1-x")))
}
"Kleisli composition of direct-style processors defined inline" in {
val route = appendToBody("-1") >=> { m: Message => m.appendToBody("-y") }
route process Message("a") must equal(Success(Message("a-1-y")))
}
"failure reporting with CPS processors" in {
failWithMessage("1") >=> failWithMessage("2") process Message("a") match {
case Success(_) => fail("Failure result expected")
case Failure(m: Message) => m.exception match {
case Some(e: Exception) => e.getMessage must equal("1")
case None => fail("no exception set for message")
}
}
}
"failure reporting with direct-style processors (that throw exceptions)" in {
ds_failWithMessage("1") >=> ds_failWithMessage("2") process Message("a") match {
case Success(_) => fail("Failure result expected")
case Failure(m: Message) => m.exception match {
case Some(e: Exception) => e.getMessage must equal("1")
case None => fail("no exception set for message")
}
}
}
"application of routes using promises" in {
// With the 'Sequential' strategy, routing will be started in the current
// thread but processing may continue in another thread depending on the
// concurrency strategy used for dispatcher and processors.
implicit val strategy = Strategy.Sequential
val promise = appendToBody("-1") >=> appendToBody("-2") submit Message("a")
promise.get match {
case Success(m) => m.body must equal("a-1-2")
case Failure(m) => fail("unexpected failure")
}
}
"application of routes using response queues" in {
val queue = appendToBody("-1") >=> appendToBody("-2") submitN Message("a")
queue.take match {
case Success(m) => m.body must equal("a-1-2")
case Failure(m) => fail("unexpected failure")
}
}
"application of routes using continuation-passing style (CPS)" in {
val queue = new java.util.concurrent.LinkedBlockingQueue[MessageValidation](10)
appendToBody("-1") >=> appendToBody("-2") apply Message("a").success respond { mv => queue.put(mv) }
queue.take match {
case Success(m) => m.body must equal("a-1-2")
case Failure(m) => fail("unexpected failure")
}
}
"message comsumption from endpoints" in {
from("direct:test-1") { appendToBody("-1") >=> appendToBody("-2") }
template.requestBody("direct:test-1", "test") must equal ("test-1-2")
}
"content-based routing" in {
from("direct:test-10") {
appendToBody("-1") >=> choose {
case Message("a-1", _) => appendToBody("-2") >=> appendToBody("-3")
case Message("b-1", _) => appendToBody("-4") >=> appendToBody("-5")
} >=> appendToBody("-done")
}
template.requestBody("direct:test-10", "a") must equal ("a-1-2-3-done")
template.requestBody("direct:test-10", "b") must equal ("b-1-4-5-done")
template.requestBody("direct:test-10", "c") must equal ("c-1-done")
}
"scatter-gather" in {
val combine = (m1: Message, m2: Message) => m1.appendToBody(" + %s" format m2.body)
from("direct:test-11") {
appendToBody("-1") >=> scatter(
appendToBody("-2") >=> appendToBody("-3"),
appendToBody("-4") >=> appendToBody("-5"),
appendToBody("-6") >=> appendToBody("-7")
).gather(combine) >=> appendToBody(" done")
}
template.requestBody("direct:test-11", "a") must equal ("a-1-2-3 + a-1-4-5 + a-1-6-7 done")
}
"scatter-gather that fails if one of the recipients fail" in {
val combine = (m1: Message, m2: Message) => m1.appendToBody(" + %s" format m2.body)
from("direct:test-12") {
appendToBody("-1") >=> scatter(
appendToBody("-2") >=> failWithMessage("x"),
appendToBody("-4") >=> failWithMessage("y")
).gather(combine) >=> appendToBody(" done")
}
try {
template.requestBody("direct:test-12", "a")
fail("exception expected")
} catch {
case e: Exception => {
// test passed but reported exception message can be either 'x'
// or 'y' if message is distributed to destination concurrently.
// For sequential multicast (or a when using a single-threaded
// executor for multicast) then exception message 'x' will always
// be reported first.
if (multicastConcurrencyStrategy == Strategy.Sequential)
e.getCause.getMessage must equal ("x")
}
}
}
"usage of routes inside message processors" in {
// CPS message processor doing CPS application of route
val composite1: MessageProcessor = (m: Message, k: MessageValidation => Unit) =>
appendToBody("-n1") >=> appendToBody("-n2") apply m.success respond k
// direct-style message processor (blocks contained until route generated response)
val composite2: Message => Message = (m: Message) =>
appendToBody("-n3") >=> appendToBody("-n4") process m match {
case Success(m) => m
case Failure(m) => throw m.exception.get
}
from("direct:test-20") {
composite1 >=> composite2
}
template.requestBody("direct:test-20", "test") must equal("test-n1-n2-n3-n4")
}
"custom scatter-gather using for-comprehensions and promises" in {
// needed for creation of response promise (can be any
// other strategy as well such as Sequential or ...)
implicit val strategy = Strategy.Naive
// input message to destination routes
val input = Message("test")
// custom scatter-gather
val promise = for {
a <- appendToBody("-1") >=> appendToBody("-2") submit input
b <- appendToBody("-3") >=> appendToBody("-4") submit input
} yield a |@| b apply { (m1: Message, m2: Message) => m1.appendToBody(" + %s" format m2.body) }
promise.get must equal(Success(Message("test-1-2 + test-3-4")))
}
"multicast" in {
from("direct:test-30") {
appendToBody("-1") >=> multicast(
appendToBody("-2") >=> to("mock:mock1"),
appendToBody("-3") >=> to("mock:mock1")
) >=> appendToBody("-done") >=> to("mock:mock2")
}
mock("mock1").expectedBodiesReceivedInAnyOrder("a-1-2" , "a-1-3")
mock("mock2").expectedBodiesReceivedInAnyOrder("a-1-2-done", "a-1-3-done")
template.sendBody("direct:test-30", "a")
mock("mock1").assertIsSatisfied
mock("mock2").assertIsSatisfied
}
"multicast with a failing destination" in {
from("direct:test-31") {
attempt {
appendToBody("-1") >=> multicast(
appendToBody("-2"),
appendToBody("-3") >=> failWithMessage("-fail")
) >=> appendToBody("-done") >=> to("mock:mock")
} fallback {
case e: Exception => appendToBody(e.getMessage) >=> to("mock:error")
}
}
mock("mock").expectedBodiesReceived("a-1-2-done")
mock("error").expectedBodiesReceived("a-1-3-fail")
template.sendBody("direct:test-31", "a")
mock("mock").assertIsSatisfied
mock("error").assertIsSatisfied
}
"splitting of messages" in {
val splitLogic = (m: Message) => for (i <- 1 to 3) yield { m.appendToBody("-%s" format i) }
from("direct:test-35") { split(splitLogic) >=> appendToBody("-done") >=> to("mock:mock") }
mock("mock").expectedBodiesReceivedInAnyOrder("a-1-done", "a-2-done", "a-3-done")
template.sendBody("direct:test-35", "a")
mock("mock").assertIsSatisfied
}
"aggregation of messages" in {
// Waits for three messages with a 'keep' header.
// At arrival of the third message, a new Message
// with body 'aggregated' is returned.
def waitFor(count: Int) = {
val counter = new java.util.concurrent.atomic.AtomicInteger(0)
(m: Message) => {
m.header("keep") match {
case None => Some(m)
case Some(_) => if (counter.incrementAndGet == count) Some(Message("aggregated")) else None
}
}
}
from("direct:test-40") {
aggregate(waitFor(3)) >=> to("mock:mock")
}
mock("mock").expectedBodiesReceivedInAnyOrder("aggregated", "not aggregated")
// only third message will make the aggregator to send a response
for (i <- 1 to 5) template.sendBodyAndHeader("direct:test-40", "a", "keep", true)
// ignored by aggregator and forwarded as-is
template.sendBody("direct:test-40", "not aggregated")
mock("mock").assertIsSatisfied
}
"filtering of messages" in {
from("direct:test-45") {
filter(_.body == "ok") >=> to("mock:mock")
}
mock("mock").expectedBodiesReceived("ok")
template.sendBody("direct:test-45", "filtered")
template.sendBody("direct:test-45", "ok")
mock("mock").assertIsSatisfied
}
"sharing of routes" in {
// nothing specific to scalaz-camel
// just demonstrates function reuse
val r = appendToBody("-1") >=> appendToBody("-2")
from ("direct:test-50a") { r }
from ("direct:test-50b") { r }
template.requestBody("direct:test-50a", "a") must equal ("a-1-2")
template.requestBody("direct:test-50b", "b") must equal ("b-1-2")
}
"preserving the message context even if a processor drops it" in {
// Function that returns *new* message that doesn't contain the context of
// the input message (it contains a new default context). The context of
// the input message will be set on the result message by the MessageValidationResponder
val badguy1 = (m: Message) => new Message("bad")
// Function that returns a *new* message on which setException is called as well.
// Returning a new message *and* calling either setException or setOneway required
// explicit setting on the exchange from the input message as well.
val badguy2 = (m: Message) => new Message("bad").setContextFrom(m).setException(new Exception("x"))
val route1 = appendToBody("-1") >=> badguy1 >=> appendToBody("-2")
val route2 = appendToBody("-1") >=> badguy2 >=> appendToBody("-2")
route1 process Message("a").setOneway(true) match {
case Failure(m) => fail("unexpected failure")
case Success(m) => {
m.context.oneway must be (true)
m.body must equal ("bad-2")
}
}
route2 process Message("a").setOneway(true) match {
case Failure(m) => fail("unexpected failure")
case Success(m) => {
m.context.oneway must be (true)
m.body must equal ("bad-2")
}
}
}
"proper correlation of (concurrent) request and response messages" in {
def conditionalDelay(delay: Long, body: String): MessageProcessor = (m: Message, k: MessageValidation => Unit) => {
if (m.body == body)
processorConcurrencyStrategy.apply { Thread.sleep(delay); k(m.success) }
else
processorConcurrencyStrategy.apply { k(m.success) }
}
val r = conditionalDelay(1000, "a") >=> conditionalDelay(1000, "x") >=> appendToBody("-done")
from("direct:test-55") { r }
val a = Strategy.Naive.apply { template.requestBody("direct:test-55", "a") }
val b = Strategy.Naive.apply { template.requestBody("direct:test-55", "b") }
val x = Strategy.Naive.apply { r process Message("x") }
val y = Strategy.Naive.apply { r process Message("y") }
y() must equal (Success(Message("y-done")))
x() must equal (Success(Message("x-done")))
b() must equal ("b-done")
a() must equal ("a-done")
}
}
}
class CamelTestSequential extends CamelTest
class CamelTestConcurrent extends CamelTest with ExecutorMgnt {
import java.util.concurrent.Executors
dispatchConcurrencyStrategy = Strategy.Executor(register(Executors.newFixedThreadPool(3)))
multicastConcurrencyStrategy = Strategy.Executor(register(Executors.newFixedThreadPool(3)))
processorConcurrencyStrategy = Strategy.Executor(register(Executors.newFixedThreadPool(3)))
override def afterAll = {
shutdown
super.afterAll
}
}
| krasserm/scalaz-camel | scalaz-camel-core/src/test/scala/scalaz/camel/core/CamelTest.scala | Scala | apache-2.0 | 15,056 |
package debop4s.data.orm.hibernate.utils
import java.lang.{Iterable => JIterable}
import java.util
import java.util.{Collection => JCollection, Collections, Comparator, List => JList, Locale, Map => JMap}
import debop4s.core.ValueObject
import debop4s.core.json.{JacksonSerializer, JsonSerializer}
import debop4s.core.tools.MapperTool
import debop4s.core.utils.Graphs
import debop4s.data.orm.hibernate.HibernateParameter
import debop4s.data.orm.hibernate.repository.HibernateDao
import debop4s.data.orm.model._
import org.hibernate.Session
import org.hibernate.criterion.{DetachedCriteria, Projections, Restrictions}
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
/**
* EntityUtils
* @author debop created at 2014. 5. 22.
*/
object EntityUtils {
private lazy val log = LoggerFactory.getLogger(getClass)
private final val PROPERTY_ANCESTORS: String = "ancestors"
private final val PROPERTY_DESCENDENTS: String = "descendents"
private final val jsonSerializer: JsonSerializer = new JacksonSerializer
def entityToString(entity: ValueObject): String = {
if (entity == null) "" else entity.toString
}
def asJsonText(entity: ValueObject): String = {
jsonSerializer.serializeToText(entity)
}
def assertNotCirculaHierarchy[T <: HierarchyEntity[T]](child: T, parent: T) {
if (child eq parent)
throw new IllegalArgumentException("child and parent are same.")
if (child.getDescendents.contains(parent))
throw new IllegalArgumentException("child has parent as descendents")
if (parent.getAncestors.asScala.intersect(child.getDescendents.asScala).size > 0)
throw new IllegalArgumentException("ancestors of parent and descendents of child has same thing.")
}
def setHierarchy[T <: HierarchyEntity[T]](child: T, oldParent: T, newParent: T) {
assert(child != null)
log.trace(s"현재 노드의 부모를 변경하고, 계층구조를 변경합니다... " +
s"child=$child, oldParent=$oldParent, newParent=$newParent")
if (oldParent != null) removeHierarchy(child, oldParent)
if (newParent != null) setHierarchy(child, newParent)
}
def setHierarchy[T <: HierarchyEntity[T]](child: T, parent: T) {
if (parent == null || child == null) return
log.trace(s"노드의 부모 및 조상을 설정합니다. child=$child, parent=$parent")
parent.getDescendents.add(child)
parent.getDescendents.addAll(child.getDescendents)
parent.getAncestors.asScala.foreach { ancestor =>
ancestor.getDescendents.add(child)
ancestor.getDescendents.addAll(child.getDescendents)
}
child.getAncestors.add(parent)
child.getAncestors.addAll(parent.getAncestors)
}
def removeHierarchy[T <: HierarchyEntity[T]](child: T, parent: T) {
if (parent == null || child == null) return
log.trace(s"노드의 부모 및 조상을 제거합니다. child=$child, parent=$parent")
child.getAncestors.remove(parent)
child.getAncestors.removeAll(parent.getAncestors)
parent.getAncestors.asScala.foreach { ancestor =>
ancestor.getDescendents.remove(child)
ancestor.getDescendents.removeAll(child.getDescendents)
}
child.getDescendents.asScala.foreach { des =>
des.getAncestors.remove(parent)
des.getAncestors.removeAll(parent.getAncestors)
}
}
def getAncestorsCriteria[T <: HierarchyEntity[T]](entity: T,
session: Session,
entityClass: Class[T]): DetachedCriteria = {
DetachedCriteria
.forClass(entityClass)
.createAlias(PROPERTY_DESCENDENTS, "des")
.add(Restrictions.eq("des.id", entity.getId))
}
def getDescendentsCriteria[T <: HierarchyEntity[T]](entity: T,
session: Session,
entityClass: Class[T]): DetachedCriteria = {
DetachedCriteria
.forClass(entityClass)
.createAlias(PROPERTY_ANCESTORS, "ans")
.add(Restrictions.eq("ans.id", entity.getId))
}
def getAncestorIds[T <: HierarchyEntity[T]](entity: T,
session: Session,
entityClass: Class[T]): DetachedCriteria = {
getAncestorsCriteria(entity, session, entityClass)
.setProjection(Projections.distinct(Projections.id))
}
def getDescendentIds[T <: HierarchyEntity[T]](entity: T,
session: Session,
entityClass: Class[T]): DetachedCriteria = {
getDescendentsCriteria(entity, session, entityClass)
.setProjection(Projections.distinct(Projections.id))
}
/**
* 특정 로케일 키를 가지는 엔티티를 조회하는 HQL 문.
*/
private final val GET_LIST_BY_LOCALE_KEY: String =
"select distinct loen from %s loen where :key in indices (loen.localeMap)"
/**
* 특정 로케일 속성값에 따른 엔티티를 조회하는 HQL 문.
*/
private final val GET_LIST_BY_LOCALE_PROPERTY: String =
"select distinct loen from %s loen join loen.localeMap locale where locale.%s = :%s"
def copyLocale[T <: LocaleEntity[TLocaleValue], TLocaleValue <: LocaleValue](src: T, dest: T) {
src.getLocales.asScala.foreach { locale =>
dest.addLocaleValue(locale, src.getLocaleValue(locale))
}
}
def containsLocale[T <: LocaleEntity[TLocaleValue], TLocaleValue <: LocaleValue](dao: HibernateDao,
entityClass: Class[T],
locale: Locale): JList[T] = {
val hql = GET_LIST_BY_LOCALE_KEY.format(entityClass.getName)
dao.findByHql(hql, new HibernateParameter("key", locale)).asInstanceOf[JList[T]]
}
final val GET_LIST_BY_META_KEY =
"select distinct me from %s me where :key in indices(me.metaMap)"
final val GET_LIST_BY_META_VALUE =
"select distinct me from %s me join me.metaMap meta where meta.value = :value"
def containsMetaKey[T <: MetaEntity](dao: HibernateDao, entityClass: Class[T], key: String): JList[T] = {
val hql = String.format(GET_LIST_BY_META_KEY, entityClass.getName)
dao.findByHql(hql, new HibernateParameter("key", key)).asInstanceOf[JList[T]]
}
def containsMetaValue[T <: MetaEntity](dao: HibernateDao, entityClass: Class[T], value: String): JList[T] = {
val hql = String.format(GET_LIST_BY_META_VALUE, value)
dao.findByHql(hql, new HibernateParameter("value", value)).asInstanceOf[JList[T]]
}
def mapEntity[S, T](source: S, target: T): T = {
MapperTool.map(source, target)
target
}
def mapEntity[S, T](source: S, targetClass: Class[T]): T = {
MapperTool.createMap(source, targetClass)
}
def mapEntities[S, T](sources: JList[S], targets: JList[T]): JList[T] = {
val size: Int = sources.size min targets.size
var i = 0
while (i < size) {
MapperTool.map(sources.get(i), targets.get(i))
i += 1
}
// for (i <- 0 until size) {
// MapperTool.map(sources.get(i), targets.get(i))
// }
targets
}
def mapEntities[S, T](sources: JList[S], targetClass: Class[T]): JList[T] = {
val targets = new util.ArrayList[T]()
var i = 0
while (i < sources.size()) {
targets.add(mapEntity(sources.get(i), targetClass))
i += 1
}
targets
}
@Deprecated
def mapEntitiesAsParallel[S, T](sources: JList[S], targetClass: Class[T]): JList[T] = {
val targets = new util.ArrayList[T]()
var i = 0
while (i < sources.size()) {
targets.add(mapEntity(sources.get(i), targetClass))
i += 1
}
targets
}
def updateTreeNodePosition[T <: TreeEntity[T]](entity: T) {
assert(entity != null)
val np = entity.getNodePosition
if (entity.getParent != null) {
np.setLevel(entity.getParent.getNodePosition.getLevel + 1)
if (!entity.getParent.getChildren.contains(entity)) {
np.setOrder(entity.getParent.getChildren.size)
}
}
else {
np.setPosition(0, 0)
}
}
def getChildCount[T <: TreeEntity[T]](dao: HibernateDao, entity: T): Long = {
val dc = DetachedCriteria.forClass(entity.getClass)
dc.add(Restrictions.eq("parent", entity))
dao.count(dc)
}
def hasChildren[T <: TreeEntity[T]](dao: HibernateDao, entity: T): Boolean = {
val dc: DetachedCriteria = DetachedCriteria.forClass(entity.getClass)
dc.add(Restrictions.eq("parent", entity))
dao.exists(dc)
}
def setNodeOrder[T <: TreeEntity[T]](node: T, order: Int) {
require(node != null)
if (node.getParent != null) {
node.getParent.getChildren.asScala.foreach { child =>
if (child.getNodePosition.getOrder >= order) {
child.getNodePosition.setOrder(child.getNodePosition.getOrder + 1)
}
}
}
node.getNodePosition.setOrder(order)
}
def adjustChildOrders[T <: TreeEntity[T]](parent: T) {
require(parent != null)
val children = new util.ArrayList[T](parent.getChildren)
Collections.sort(children, new Comparator[T] {
def compare(o1: T, o2: T): Int = {
o1.getNodePosition.getOrder - o2.getNodePosition.getOrder
}
})
var order: Int = 0
children.asScala.foreach { node =>
node.getNodePosition.setOrder(order)
order += 1
}
}
def changeParent[T <: TreeEntity[T]](node: T, oldParent: T, newParent: T) {
require(node != null)
if (oldParent != null) {
oldParent.getChildren.remove(node)
}
if (newParent != null) {
newParent.getChildren.add(node)
}
node.setParent(newParent)
updateTreeNodePosition(node)
}
def setParent[T <: TreeEntity[T]](node: T, parent: T) {
require(node != null)
changeParent(node, node.getParent, parent)
}
def insertChildNode[T <: TreeEntity[T]](parent: T, child: T, order: Int) {
assert(parent != null)
assert(child != null)
val ord = math.max(0, math.min(order, parent.getChildren.size - 1))
parent.addChild(child)
setNodeOrder(child, ord)
}
def getAncestors[T <: TreeEntity[T]](current: T): JIterable[T] = {
val ancestors: JList[T] = new util.ArrayList[T]()
if (current != null) {
var parent: T = current
while (parent != null) {
ancestors.add(parent)
parent = parent.getParent
}
}
ancestors
}
def getDescendents[T <: TreeEntity[T]](current: T): JIterable[T] = {
Graphs.depthFirstScan(current, (x: T) => x.getChildren)
}
def getRoot[T <: TreeEntity[T]](current: T): T = {
if (current == null) return current
var root: T = current
var parent: T = current.getParent
while (parent != null) {
root = parent
parent = parent.getParent
}
root
}
}
| debop/debop4s | debop4s-data-orm/src/main/scala/debop4s/data/orm/hibernate/utils/EntityUtils.scala | Scala | apache-2.0 | 10,900 |
package nibbler
import nibbler.io.HistdataTimestampParser
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.mock.MockitoSugar
class HistdataTimestampParserTest extends FunSuite with MockitoSugar with ShouldMatchers {
test("should parse timestamp") {
// Given
val timestampAsString = "20000530 172736000"
// When
val parser = new HistdataTimestampParser
val timestamp = parser.parse(timestampAsString)
// Then
timestamp should equal(959704056000L)
}
}
| pkoperek/nibbler | src/test/scala/nibbler/HistdataTimestampParserTest.scala | Scala | gpl-3.0 | 536 |
package scalaxy.extensions
package test
import org.junit._
import Assert._
import scalaxy.debug._
import java.io._
import scala.collection.mutable
import scala.reflect.internal.util._
import scala.tools.nsc._
import scala.tools.nsc.plugins._
import scala.tools.nsc.reporters._
trait TestBase {
import MacroExtensionsCompiler.jarOf
lazy val jars =
jarOf(classOf[List[_]]).toSeq ++
jarOf(classOf[scala.reflect.macros.Context]) ++
jarOf(classOf[Global])
def transform(code: String, name: String): String
def assertSameTransform(original: String, equivalent: String) {
val expected = transform(equivalent, "equiv")
val actual = transform(original, "orig")
if (actual != expected) {
println(s"EXPECTED\\n\\t" + expected.replaceAll("\\n", "\\n\\t"))
println(s"ACTUAL\\n\\t" + actual.replaceAll("\\n", "\\n\\t"))
assertEquals(expected, actual)
}
}
def expectException(reason: String)(block: => Unit) {
var failed = true
try {
block
failed = false
} catch { case ex: Throwable =>
//ex.printStackTrace()
}
if (!failed)
fail(s"Code should not have compiled: $reason")
}
//def normalize(s: String) = s.trim.replaceAll("^\\\\s*|\\\\s*?$", "")
def transformCode(code: String, name: String, macroExtensions: Boolean, runtimeExtensions: Boolean, useUntypedReify: Boolean): (String, String, Seq[(AbstractReporter#Severity, String)]) = {
val settings0 = new Settings
val file = {
val file = File.createTempFile(name, ".scala")
file.deleteOnExit()
val out = new PrintWriter(file)
out.print(code)
out.close()
file
}
try {
val args = Array(file.toString)
val command =
new CompilerCommand(
List("-bootclasspath", jars.mkString(File.pathSeparator)) ++ args, settings0)
require(command.ok)
val report = mutable.ArrayBuffer[(AbstractReporter#Severity, String)]()
var transformed: (String, String) = null
val reporter = new AbstractReporter {
override val settings = settings0
override def displayPrompt() {}
override def display(pos: Position, msg: String, severity: Severity) {
//println(s"[$name]: $severity: $msg")
report += severity -> msg
}
}
val global = new Global(settings0, reporter) {
override protected def computeInternalPhases() {
super.computeInternalPhases
val comp = new MacroExtensionsComponent(this, macroExtensions = macroExtensions, runtimeExtensions = runtimeExtensions, useUntypedReify = useUntypedReify)
phasesSet += comp
// Get node string right after macro extensions component.
phasesSet += new TestComponent(this, comp, (s, n) => transformed = s -> n)
// Stop compilation after typer and refchecks, to see if there are errors.
if ((System.getenv("SCALAXY_TEST_COMPILE_FULLY") == "1" || System.getProperty("scalaxy.test.compile.fully") == "true")) {
println("COMPILING TESTS FULLY!")
} else {
phasesSet += new StopComponent(this)
}
}
}
new global.Run().compile(command.files)
//println(s"errs = ${report.toSeq}")
val errors = report.filter(_._1.toString == "ERROR")
if (!errors.isEmpty)
sys.error("Found " + errors.size + " errors: " + errors.mkString(", "))
(transformed._1, transformed._2, report.toSeq)
} finally {
file.delete()
}
}
class TestComponent(
val global: Global,
after: PluginComponent,
out: (String, String) => Unit) extends PluginComponent
{
import global._
override val phaseName = "after-" + after.phaseName
override val runsRightAfter = Some(after.phaseName)
override val runsAfter = runsRightAfter.toList
override val runsBefore = List("patmat")
def newPhase(prev: Phase): StdPhase = new StdPhase(prev) {
def apply(unit: CompilationUnit) {
out(unit.body.toString, nodeToString(unit.body))
unit.body = EmptyTree
}
}
}
class StopComponent(val global: Global) extends PluginComponent
{
import global._
override val phaseName = "stop"
override val runsRightAfter = Some("refchecks")
override val runsAfter = runsRightAfter.toList
override val runsBefore = Nil
def newPhase(prev: Phase): StdPhase = new StdPhase(prev) {
def apply(unit: CompilationUnit) {
unit.body = EmptyTree
}
}
}
}
| nativelibs4java/Scalaxy | Obsolete/MacroExtensions/src/test/scala/scalaxy/extensions/TestBase.scala | Scala | bsd-3-clause | 4,518 |
package com.sksamuel.elastic4s
/**
* Converts between scala types and types that Elasticsearch understands.
*/
object FieldsMapper {
import scala.collection.JavaConverters._
def mapper(m: Map[String, Any]): Map[String, AnyRef] = {
m map {
case null => null
case (name: String, nest: Map[_, _]) => name -> mapper(nest.asInstanceOf[Map[String, Any]]).asJava
case (name: String, iter: Iterable[_]) => name -> iter.map(mapper).toArray
case (name: String, a: AnyRef) => name -> a
case (name: String, a: Any) => name -> a.toString
}
}
def mapper(a: Any): AnyRef = {
a match {
case map: Map[_, _] => map.map { case (key, value) => key -> mapper(value) }.asJava
case iter: Iterable[_] => iter.map(mapper).toArray
case a: AnyRef => a
case a: Any => a.toString
case null => null
}
}
def mapFields(fields: Map[String, Any]): Seq[FieldValue] = {
fields map {
case (name: String, nest: Map[_, _]) =>
val nestedFields = mapFields(nest.asInstanceOf[Map[String, Any]])
NestedFieldValue(Some(name), nestedFields)
case (name: String, nest: Array[Map[_, _]]) =>
val nested = nest.map(n => new NestedFieldValue(None, mapFields(n.asInstanceOf[Map[String, Any]])))
ArrayFieldValue(name, nested)
case (name: String, arr: Array[Any]) =>
val values = arr.map(new SimpleFieldValue(None, _))
ArrayFieldValue(name, values)
case (name: String, a: FieldValue) =>
NestedFieldValue(name, Seq(a))
case (name: String, s: Iterable[_]) =>
s.headOption match {
case Some(m: Map[_, _]) =>
val nested = s.map(n => new NestedFieldValue(None, mapFields(n.asInstanceOf[Map[String, Any]])))
ArrayFieldValue(name, nested.toSeq)
case Some(a: Any) =>
val values = s.map(new SimpleFieldValue(None, _))
ArrayFieldValue(name, values.toSeq)
case _ =>
// can't work out or empty - map to empty
ArrayFieldValue(name, Seq.empty)
}
case (name: String, a: Any) =>
SimpleFieldValue(Some(name), a)
case (name: String, _) =>
NullFieldValue(name)
}
}.toSeq
}
| aroundus-inc/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/FieldsMapper.scala | Scala | apache-2.0 | 2,245 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import java.io.File
import org.apache.spark.sql.{SaveMode, AnalysisException, Row}
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.util.Utils
class InsertSuite extends DataSourceTest with SharedSQLContext {
protected override lazy val sql = caseInsensitiveContext.sql _
private lazy val sparkContext = caseInsensitiveContext.sparkContext
private var path: File = null
override def beforeAll(): Unit = {
super.beforeAll()
//创建json保存路径
path = Utils.createTempDir()
val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}"""))
//json参数rdd: RDD[String]类型
caseInsensitiveContext.read.json(rdd).registerTempTable("jt")
sql(
//USING使用org.apache.spark.sql.json.DefaultSource或者json都可以
s"""
|CREATE TEMPORARY TABLE jsonTable (a int, b string)
|USING json
|OPTIONS (
| path '${path.toString}'
|)
""".stripMargin)
}
override def afterAll(): Unit = {
try {
//删除临时表
caseInsensitiveContext.dropTempTable("jsonTable")
caseInsensitiveContext.dropTempTable("jt")
//递归删除json数据保存路径
Utils.deleteRecursively(path)
} finally {
super.afterAll()
}
}
test("Simple INSERT OVERWRITE a JSONRelation") {//简单的插入覆盖一个JSON的关系
//使用INSERT OVERWRITE TABLE插入数据
//Overwrite模式,实际操作是,先删除数据,再写新数据
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
(1 to 10).map(i => Row(i, s"str$i"))
)
}
test("PreInsert casting and renaming") {//插入强类型转换和重命名
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a * 2, a * 4 FROM jt
""".stripMargin)
/**
*+---+---+
| a| b|
+---+---+
| 2| 4|
| 4| 8|
| 6| 12|
| 8| 16|
| 10| 20|
| 12| 24|
| 14| 28|
| 16| 32|
| 18| 36|
| 20| 40|
+---+---+*/
sql("SELECT a, b FROM jsonTable").show(1)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
(1 to 10).map(i => Row(i * 2, s"${i * 4}"))
)
//重命名字段
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a * 4 AS A, a * 6 as c FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
(1 to 10).map(i => Row(i * 4, s"${i * 6}"))
)
}
//产生相同的列,是不充许的.
test("SELECT clause generating a different number of columns is not allowed.") {
val message = intercept[RuntimeException] {
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a FROM jt
""".stripMargin)
}.getMessage
assert(
message.contains("generates the same number of columns as its schema"),
"SELECT clause generating a different number of columns should not be not allowed."
)
}
test("INSERT OVERWRITE a JSONRelation multiple times") {
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
(1 to 10).map(i => Row(i, s"str$i"))
)
// Writing the table to less part files.
//写到表较少的部分文件
val rdd1 = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}"""), 5)
caseInsensitiveContext.read.json(rdd1).registerTempTable("jt1")
//使用RDD方式直接插入表数据
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt1
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
(1 to 10).map(i => Row(i, s"str$i"))
)
// Writing the table to more part files.
//写到表更多的部分文件
val rdd2 = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}"""), 10)
//读取RDD[String]数据
caseInsensitiveContext.read.json(rdd2).registerTempTable("jt2")
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt2
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
(1 to 10).map(i => Row(i, s"str$i"))
)
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a * 10, b FROM jt1
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
(1 to 10).map(i => Row(i * 10, s"str$i"))
)
caseInsensitiveContext.dropTempTable("jt1")
caseInsensitiveContext.dropTempTable("jt2")
}
test("INSERT INTO JSONRelation for now") {
//插入覆盖表数据
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
sql("SELECT a, b FROM jt").collect()
)
//插入表数据,注意没有OVERWRITE
//Overwrite模式,实际操作是,先删除数据,再写新数据
sql(
s"""
|INSERT INTO TABLE jsonTable SELECT a, b FROM jt
""".stripMargin)
checkAnswer(
sql("SELECT a, b FROM jsonTable"),
sql("SELECT a, b FROM jt UNION ALL SELECT a, b FROM jt").collect()
)
}
//在查询时,不允许在表上写入
test("it is not allowed to write to a table while querying it.") {
val message = intercept[AnalysisException] {
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jsonTable
""".stripMargin)
}.getMessage
assert(
message.contains("Cannot insert overwrite into table that is also being read from."),
"INSERT OVERWRITE to a table while querying it should not be allowed.")
}
test("Caching") {//缓存
// write something to the jsonTable
//写点东西给jsontable
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a, b FROM jt
""".stripMargin)
// Cached Query Execution 缓存查询执行
caseInsensitiveContext.cacheTable("jsonTable")
//判断是否存在缓存表数据
assertCached(sql("SELECT * FROM jsonTable"))
checkAnswer(
sql("SELECT * FROM jsonTable"),
(1 to 10).map(i => Row(i, s"str$i")))
assertCached(sql("SELECT a FROM jsonTable"))
checkAnswer(
sql("SELECT a FROM jsonTable"),
(1 to 10).map(Row(_)).toSeq)
assertCached(sql("SELECT a FROM jsonTable WHERE a < 5"))
checkAnswer(
sql("SELECT a FROM jsonTable WHERE a < 5"),
(1 to 4).map(Row(_)).toSeq)
assertCached(sql("SELECT a * 2 FROM jsonTable"))
checkAnswer(
sql("SELECT a * 2 FROM jsonTable"),
(1 to 10).map(i => Row(i * 2)).toSeq)
assertCached(sql(
"SELECT x.a, y.a FROM jsonTable x JOIN jsonTable y ON x.a = y.a + 1"), 2)
checkAnswer(sql(
"SELECT x.a, y.a FROM jsonTable x JOIN jsonTable y ON x.a = y.a + 1"),
(2 to 10).map(i => Row(i, i - 1)).toSeq)
// Insert overwrite and keep the same schema.
//插入覆盖并保持相同的模式
sql(
s"""
|INSERT OVERWRITE TABLE jsonTable SELECT a * 2, b FROM jt
""".stripMargin)
// jsonTable should be recached.
assertCached(sql("SELECT * FROM jsonTable"))
// TODO we need to invalidate the cached data in InsertIntoHadoopFsRelation
// // The cached data is the new data.
// checkAnswer(
// sql("SELECT a, b FROM jsonTable"),
// sql("SELECT a * 2, b FROM jt").collect())
//
// // Verify uncaching
// caseInsensitiveContext.uncacheTable("jsonTable")
// assertCached(sql("SELECT * FROM jsonTable"), 0)
}
//它不允许插入的关系,不是一个可插入的关系
test("it's not allowed to insert into a relation that is not an InsertableRelation") {
sql(
"""
|CREATE TEMPORARY TABLE oneToTen
|USING org.apache.spark.sql.sources.SimpleScanSource
|OPTIONS (
| From '1',
| To '10'
|)
""".stripMargin)
checkAnswer(
sql("SELECT * FROM oneToTen"),
(1 to 10).map(Row(_)).toSeq
)
val message = intercept[AnalysisException] {
sql(
s"""
|INSERT OVERWRITE TABLE oneToTen SELECT CAST(a AS INT) FROM jt
""".stripMargin)
}.getMessage
assert(
message.contains("does not allow insertion."),
"It is not allowed to insert into a table that is not an InsertableRelation."
)
caseInsensitiveContext.dropTempTable("oneToTen")
}
}
| tophua/spark1.52 | sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala | Scala | apache-2.0 | 9,356 |
package test
import edu.mit.csail.cap.query._
/** Examples relying on data collected from real applications */
class SwingTextAreaDemo extends SynthesisTest {
override def default = super.default.copy(
PrintStrings = false,
PrintPrimitives = false)
def metadata = "meta_swing"
test("TextAreaDemo moveCaretPosition") {
run("swing_components_TextAreaDemo", "moveCaretPosition")
}
test("TextAreaDemo insert") {
run("swing_components_TextAreaDemo", "Runnable.run--JTextArea.insert")
}
test("TextAreaDemo actionPerformed") {
run("swing_components_TextAreaDemo", "actionPerformed")
}
test("TextAreaDemo processKeyBinding -- actionPerformed") {
run("swing_components_TextAreaDemo", "JComponent.processKeyBinding--actionPerformed")
}
test("TextFieldDemo processKeyBinding -- actionPerformed") {
run("swing_components_TextFieldDemo", "JComponent.processKeyBinding--actionPerformed")
}
}
| kyessenov/semeru | src/test/scala/test/SwingTextAreaDemo.scala | Scala | gpl-3.0 | 941 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.debug
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.test.SQLTestData.TestData
class DebuggingSuite extends SparkFunSuite with SharedSQLContext {
test("DataFrame.debug()") {
testData.debug()
}
test("Dataset.debug()") {
import testImplicits._
testData.as[TestData].debug()
}
test("debugCodegen") {
val res = codegenString(spark.range(10).groupBy("id").count().queryExecution.executedPlan)
assert(res.contains("Subtree 1 / 2"))
assert(res.contains("Subtree 2 / 2"))
assert(res.contains("Object[]"))
}
test("debugCodegenStringSeq") {
val res = codegenStringSeq(spark.range(10).groupBy("id").count().queryExecution.executedPlan)
assert(res.length == 2)
assert(res.forall{ case (subtree, code) =>
subtree.contains("Range") && code.contains("Object[]")})
}
}
| minixalpha/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/debug/DebuggingSuite.scala | Scala | apache-2.0 | 1,731 |
package ml.sparkling.graph.loaders
import org.apache.spark.SparkContext
import org.apache.spark.graphx.GraphLoader
import org.scalatest._
/**
* Created by Roman Bartusiak ([email protected] http://riomus.github.io).
*/
abstract class LoaderTest(implicit sc:SparkContext) extends FlatSpec with BeforeAndAfterAll with GivenWhenThen with Matchers{
def loadGraph(file:String)={
GraphLoader.edgeListFile(sc,file.toString).cache()
}
} | sparkling-graph/sparkling-graph | loaders/src/test/scala/ml/sparkling/graph/loaders/LoaderTest.scala | Scala | bsd-2-clause | 451 |
package play.api.db.evolutions
import java.io._
import scalax.file._
import scalax.io.JavaConverters._
import play.core._
import play.api._
import play.api.db._
import play.api.libs._
import play.api.libs.Codecs._
import javax.sql.DataSource
import java.sql.{ Statement, Date, Connection, SQLException }
import scala.util.control.Exception._
import scala.util.control.NonFatal
/**
* An SQL evolution - database changes associated with a software version.
*
* An evolution includes ‘up’ changes, to upgrade to to the version, as well as ‘down’ changes, to downgrade the database
* to the previous version.
*
* @param revision revision number
* @param sql_up the SQL statements for UP application
* @param sql_down the SQL statements for DOWN application
*/
private[evolutions] case class Evolution(revision: Int, sql_up: String = "", sql_down: String = "") {
/**
* Revision hash, automatically computed from the SQL content.
*/
val hash = sha1(sql_down.trim + sql_up.trim)
}
/**
* A Script to run on the database.
*/
private[evolutions] trait Script {
/**
* Original evolution.
*/
val evolution: Evolution
/**
* The complete SQL to be run.
*/
val sql: String
/**
* The sql string separated into constituent ";"-delimited statements.
*
* Any ";;" found in the sql are escaped to ";".
*/
def statements: Seq[String] = {
// Regex matches on semicolons that neither precede nor follow other semicolons
sql.split("(?<!;);(?!;)").map(_.trim.replace(";;", ";")).filter(_ != "")
}
}
/**
* An UP Script to run on the database.
*
* @param evolution the original evolution
* @param sql the SQL to be run
*/
private[evolutions] case class UpScript(evolution: Evolution, sql: String) extends Script
/**
* A DOWN Script to run on the database.
*
* @param evolution the original evolution
* @param sql the SQL to be run
*/
private[evolutions] case class DownScript(evolution: Evolution, sql: String) extends Script
/**
* Defines Evolutions utilities functions.
*/
object Evolutions {
/**
* Apply pending evolutions for the given DB.
*/
def applyFor(dbName: String, path: java.io.File = new java.io.File(".")) {
Play.current.plugin[DBPlugin] map { db =>
val script = Evolutions.evolutionScript(db.api, path, db.getClass.getClassLoader, dbName)
Evolutions.applyScript(db.api, dbName, script)
}
}
/**
* Updates a local (file-based) evolution script.
*/
def updateEvolutionScript(db: String = "default", revision: Int = 1, comment: String = "Generated", ups: String, downs: String)(implicit application: Application) {
import play.api.libs._
val evolutions = application.getFile("conf/evolutions/" + db + "/" + revision + ".sql");
Files.createDirectory(application.getFile("conf/evolutions/" + db));
Files.writeFileIfChanged(evolutions,
"""|# --- %s
|
|# --- !Ups
|%s
|
|# --- !Downs
|%s
|
|""".stripMargin.format(comment, ups, downs));
}
// --
private def executeQuery(sql: String)(implicit c: Connection) = {
c.createStatement.executeQuery(sql)
}
private def execute(sql: String)(implicit c: Connection) = {
c.createStatement.execute(sql)
}
private def prepare(sql: String)(implicit c: Connection) = {
c.prepareStatement(sql)
}
// --
/**
* Resolves evolution conflicts.
*
* @param api the `DBApi` to use
* @param db the database name
* @param revision the revision to mark as resolved
*/
def resolve(api: DBApi, db: String, revision: Int) {
implicit val connection = api.getConnection(db, autocommit = true)
try {
execute("update play_evolutions set state = 'applied' where state = 'applying_up' and id = " + revision);
execute("delete from play_evolutions where state = 'applying_down' and id = " + revision);
} finally {
connection.close()
}
}
/**
* Checks the evolutions state.
*
* @param api the `DBApi` to use
* @param db the database name
* @throws an error if the database is in an inconsistent state
*/
def checkEvolutionsState(api: DBApi, db: String) {
implicit val connection = api.getConnection(db, autocommit = true)
try {
val problem = executeQuery("select id, hash, apply_script, revert_script, state, last_problem from play_evolutions where state like 'applying_%'")
if (problem.next) {
val revision = problem.getInt("id")
val state = problem.getString("state")
val hash = problem.getString("hash").substring(0, 7)
val script = state match {
case "applying_up" => problem.getString("apply_script")
case _ => problem.getString("revert_script")
}
val error = problem.getString("last_problem")
Logger("play").error(error)
val humanScript = "# --- Rev:" + revision + "," + (if (state == "applying_up") "Ups" else "Downs") + " - " + hash + "\\n\\n" + script;
throw InconsistentDatabase(db, humanScript, error, revision)
}
} catch {
case e: InconsistentDatabase => throw e
case NonFatal(_) => try {
execute(
"""
create table play_evolutions (
id int not null primary key, hash varchar(255) not null,
applied_at timestamp not null,
apply_script text,
revert_script text,
state varchar(255),
last_problem text
)
""")
} catch { case NonFatal(ex) => Logger.warn("play_evolutions table already existed") }
} finally {
connection.close()
}
}
/**
* Applies a script to the database.
*
* @param api the `DBApi` to use
* @param db the database name
* @param script the script to run
*/
def applyScript(api: DBApi, db: String, script: Seq[Script]) {
implicit val connection = api.getConnection(db, autocommit = true)
checkEvolutionsState(api, db)
var applying = -1
try {
script.foreach { s =>
applying = s.evolution.revision
// Insert into log
s match {
case UpScript(e, _) => {
val ps = prepare("insert into play_evolutions values(?, ?, ?, ?, ?, ?, ?)")
ps.setInt(1, e.revision)
ps.setString(2, e.hash)
ps.setDate(3, new Date(System.currentTimeMillis()))
ps.setString(4, e.sql_up)
ps.setString(5, e.sql_down)
ps.setString(6, "applying_up")
ps.setString(7, "")
ps.execute()
}
case DownScript(e, _) => {
execute("update play_evolutions set state = 'applying_down' where id = " + e.revision)
}
}
// Execute script
s.statements.foreach(execute)
// Insert into logs
s match {
case UpScript(e, _) => {
execute("update play_evolutions set state = 'applied' where id = " + e.revision)
}
case DownScript(e, _) => {
execute("delete from play_evolutions where id = " + e.revision)
}
}
}
} catch {
case NonFatal(e) => {
val message = e match {
case ex: SQLException => ex.getMessage + " [ERROR:" + ex.getErrorCode + ", SQLSTATE:" + ex.getSQLState + "]"
case ex => ex.getMessage
}
val ps = prepare("update play_evolutions set last_problem = ? where id = ?")
ps.setString(1, message)
ps.setInt(2, applying)
ps.execute()
}
} finally {
connection.close()
}
checkEvolutionsState(api, db)
}
/**
* Translates an evolution script to something human-readable.
*
* @param scripts the script
* @return a formatted script
*/
def toHumanReadableScript(script: Seq[Script]): String = {
val txt = script.map {
case UpScript(ev, sql) => "# --- Rev:" + ev.revision + ",Ups - " + ev.hash.take(7) + "\\n" + sql + "\\n"
case DownScript(ev, sql) => "# --- Rev:" + ev.revision + ",Downs - " + ev.hash.take(7) + "\\n" + sql + "\\n"
}.mkString("\\n")
script.find {
case DownScript(_, _) => true
case UpScript(_, _) => false
}.map(_ => "# !!! WARNING! This script contains DOWNS evolutions that are likely destructives\\n\\n").getOrElse("") + txt
}
/**
* Computes the evolution script.
*
* @param api the `DBApi` to use
* @param applicationPath the application path
* @param db the database name
*/
def evolutionScript(api: DBApi, path: File, applicationClassloader: ClassLoader, db: String): Seq[Product with Serializable with Script] = {
val application = applicationEvolutions(path, applicationClassloader, db)
Option(application).filterNot(_.isEmpty).map {
case application =>
val database = databaseEvolutions(api, db)
val (nonConflictingDowns, dRest) = database.span(e => !application.headOption.exists(e.revision <= _.revision))
val (nonConflictingUps, uRest) = application.span(e => !database.headOption.exists(_.revision >= e.revision))
val (conflictingDowns, conflictingUps) = dRest.zip(uRest).takeWhile {
case (down, up) => down.hash != up.hash
}.unzip
val ups = (nonConflictingUps ++ conflictingUps).reverse.map(e => UpScript(e, e.sql_up))
val downs = (nonConflictingDowns ++ conflictingDowns).map(e => DownScript(e, e.sql_down))
downs ++ ups
}.getOrElse(Nil)
}
/**
* Reads evolutions from the database.
*
* @param api the `DBApi` to use
* @param db the database name
*/
def databaseEvolutions(api: DBApi, db: String): Seq[Evolution] = {
implicit val connection = api.getConnection(db, autocommit = true)
checkEvolutionsState(api, db)
try {
Collections.unfoldLeft(executeQuery(
"""
select id, hash, apply_script, revert_script from play_evolutions order by id
""")) { rs =>
rs.next match {
case false => None
case true => {
Some((rs, Evolution(
rs.getInt(1),
rs.getString(3),
rs.getString(4))))
}
}
}
} finally {
connection.close()
}
}
/**
* Reads the evolutions from the application.
*
* @param db the database name
*/
def applicationEvolutions(path: File, applicationClassloader: ClassLoader, db: String): Seq[Evolution] = {
val upsMarker = """^#.*!Ups.*$""".r
val downsMarker = """^#.*!Downs.*$""".r
val UPS = "UPS"
val DOWNS = "DOWNS"
val UNKNOWN = "UNKNOWN"
val mapUpsAndDowns: PartialFunction[String, String] = {
case upsMarker() => UPS
case downsMarker() => DOWNS
case _ => UNKNOWN
}
val isMarker: PartialFunction[String, Boolean] = {
case upsMarker() => true
case downsMarker() => true
case _ => false
}
Collections.unfoldLeft(1) { revision =>
Option(new File(path, "conf/evolutions/" + db + "/" + revision + ".sql")).filter(_.exists).map(new FileInputStream(_)).orElse {
Option(applicationClassloader.getResourceAsStream("evolutions/" + db + "/" + revision + ".sql"))
}.map { stream =>
(revision + 1, (revision, stream.asInput.string))
}
}.sortBy(_._1).map {
case (revision, script) => {
val parsed = Collections.unfoldLeft(("", script.split('\\n').toList.map(_.trim))) {
case (_, Nil) => None
case (context, lines) => {
val (some, next) = lines.span(l => !isMarker(l))
Some((next.headOption.map(c => (mapUpsAndDowns(c), next.tail)).getOrElse("" -> Nil),
context -> some.mkString("\\n")))
}
}.reverse.drop(1).groupBy(i => i._1).mapValues { _.map(_._2).mkString("\\n").trim }
Evolution(
revision,
parsed.get(UPS).getOrElse(""),
parsed.get(DOWNS).getOrElse(""))
}
}.reverse
}
}
/**
* Play Evolutions plugin.
*/
class EvolutionsPlugin(app: Application) extends Plugin with HandleWebCommandSupport {
import Evolutions._
lazy val dbApi = app.plugin[DBPlugin].map(_.api).getOrElse(throw new Exception("there should be a database plugin registered at this point but looks like it's not available, so evolution won't work. Please make sure you register a db plugin properly"))
/**
* Is this plugin enabled.
*
* {{{
* evolutionplugin = disabled
* }}}
*/
override lazy val enabled = app.configuration.getConfig("db").isDefined && {
!app.configuration.getString("evolutionplugin").filter(_ == "disabled").isDefined
}
/**
* Checks the evolutions state.
*/
override def onStart() {
dbApi.datasources.foreach {
case (ds, db) => {
withLock(ds) {
val script = evolutionScript(dbApi, app.path, app.classloader, db)
val hasDown = script.find(_.isInstanceOf[DownScript]).isDefined
if (!script.isEmpty) {
app.mode match {
case Mode.Test => Evolutions.applyScript(dbApi, db, script)
case Mode.Dev if app.configuration.getBoolean("applyEvolutions." + db).filter(_ == true).isDefined => Evolutions.applyScript(dbApi, db, script)
case Mode.Prod if !hasDown && app.configuration.getBoolean("applyEvolutions." + db).filter(_ == true).isDefined => Evolutions.applyScript(dbApi, db, script)
case Mode.Prod if hasDown &&
app.configuration.getBoolean("applyEvolutions." + db).filter(_ == true).isDefined &&
app.configuration.getBoolean("applyDownEvolutions." + db).filter(_ == true).isDefined => Evolutions.applyScript(dbApi, db, script)
case Mode.Prod if hasDown => {
Logger("play").warn("Your production database [" + db + "] needs evolutions! \\n\\n" + toHumanReadableScript(script))
Logger("play").warn("Run with -DapplyEvolutions." + db + "=true and -DapplyDownEvolutions." + db + "=true if you want to run them automatically (be careful)")
throw InvalidDatabaseRevision(db, toHumanReadableScript(script))
}
case Mode.Prod => {
Logger("play").warn("Your production database [" + db + "] needs evolutions! \\n\\n" + toHumanReadableScript(script))
Logger("play").warn("Run with -DapplyEvolutions." + db + "=true if you want to run them automatically (be careful)")
throw InvalidDatabaseRevision(db, toHumanReadableScript(script))
}
case _ => throw InvalidDatabaseRevision(db, toHumanReadableScript(script))
}
}
}
}
}
}
def withLock(ds: DataSource)(block: => Unit) {
if (app.configuration.getBoolean("evolutions.use.locks").filter(_ == true).isDefined) {
val c = ds.getConnection
c.setAutoCommit(false)
val s = c.createStatement()
createLockTableIfNecessary(c, s)
lock(c, s)
try {
block
} finally {
unlock(c, s)
}
} else {
block
}
}
def createLockTableIfNecessary(c: Connection, s: Statement) {
try {
val r = s.executeQuery("select lock from play_evolutions_lock")
r.close()
} catch {
case e: SQLException =>
c.rollback()
s.execute("""
create table play_evolutions_lock (
lock int not null primary key
)
""")
s.executeUpdate("insert into play_evolutions_lock (lock) values (1)")
}
}
def lock(c: Connection, s: Statement, attempts: Int = 5) {
try {
s.executeQuery("select lock from play_evolutions_lock where lock = 1 for update nowait")
} catch {
case e: SQLException =>
if (attempts == 0) throw e
else {
Logger("play").warn("Exception while attempting to lock evolutions (other node probably has lock), sleeping for 1 sec")
c.rollback()
Thread.sleep(1000)
lock(c, s, attempts - 1)
}
}
}
def unlock(c: Connection, s: Statement) {
ignoring(classOf[SQLException])(s.close())
ignoring(classOf[SQLException])(c.commit())
ignoring(classOf[SQLException])(c.close())
}
def handleWebCommand(request: play.api.mvc.RequestHeader, sbtLink: play.core.SBTLink, path: java.io.File): Option[play.api.mvc.Result] = {
val applyEvolutions = """/@evolutions/apply/([a-zA-Z0-9_]+)""".r
val resolveEvolutions = """/@evolutions/resolve/([a-zA-Z0-9_]+)/([0-9]+)""".r
request.path match {
case applyEvolutions(db) => {
Some {
val script = Evolutions.evolutionScript(dbApi, app.path, app.classloader, db)
Evolutions.applyScript(dbApi, db, script)
sbtLink.forceReload()
play.api.mvc.Results.Redirect(request.queryString.get("redirect").filterNot(_.isEmpty).map(_(0)).getOrElse("/"))
}
}
case resolveEvolutions(db, rev) => {
Some {
Evolutions.resolve(dbApi, db, rev.toInt)
sbtLink.forceReload()
play.api.mvc.Results.Redirect(request.queryString.get("redirect").filterNot(_.isEmpty).map(_(0)).getOrElse("/"))
}
}
case _ => None
}
}
}
/**
* Can be used to run off-line evolutions, i.e. outside a running application.
*/
object OfflineEvolutions {
/**
* Computes and applies an evolutions script.
*
* @param classloader the classloader used to load the driver
* @param dbName the database name
*/
def applyScript(appPath: File, classloader: ClassLoader, dbName: String) {
import play.api._
val c = Configuration.load(appPath).getConfig("db").get
val dbApi = new BoneCPApi(c, classloader)
val script = Evolutions.evolutionScript(dbApi, appPath, classloader, dbName)
if (!Play.maybeApplication.exists(_.mode == Mode.Test)) {
Logger("play").warn("Applying evolution script for database '" + dbName + "':\\n\\n" + Evolutions.toHumanReadableScript(script))
}
Evolutions.applyScript(dbApi, dbName, script)
}
/**
* Resolve an inconsistent evolution..
*
* @param classloader the classloader used to load the driver
* @param dbName the database name
*/
def resolve(appPath: File, classloader: ClassLoader, dbName: String, revision: Int) {
import play.api._
val c = Configuration.load(appPath).getConfig("db").get
val dbApi = new BoneCPApi(c, classloader)
if (!Play.maybeApplication.exists(_.mode == Mode.Test)) {
Logger("play").warn("Resolving evolution [" + revision + "] for database '" + dbName + "'")
}
Evolutions.resolve(dbApi, dbName, revision)
}
}
/**
* Exception thrown when the database is not up to date.
*
* @param db the database name
* @param script the script to be run to resolve the conflict.
*/
case class InvalidDatabaseRevision(db: String, script: String) extends PlayException.RichDescription(
"Database '" + db + "' needs evolution!",
"An SQL script need to be run on your database.") {
def subTitle = "This SQL script must be run:"
def content = script
private val javascript = """
document.location = '/@evolutions/apply/%s?redirect=' + encodeURIComponent(location)
""".format(db).trim
def htmlDescription = {
<span>An SQL script will be run on your database -</span>
<input name="evolution-button" type="button" value="Apply this script now!" onclick={ javascript }/>
}.map(_.toString).mkString
}
/**
* Exception thrown when the database is in inconsistent state.
*
* @param db the database name
*/
case class InconsistentDatabase(db: String, script: String, error: String, rev: Int) extends PlayException.RichDescription(
"Database '" + db + "' is in inconsistent state!",
"An evolution has not been applied properly. Please check the problem and resolve it manually before marking it as resolved.") {
def subTitle = "We got the following error: " + error + ", while trying to run this SQL script:"
def content = script
private val javascript = """
document.location = '/@evolutions/resolve/%s/%s?redirect=' + encodeURIComponent(location)
""".format(db, rev).trim
def htmlDescription: String = {
<span>An evolution has not been applied properly. Please check the problem and resolve it manually before marking it as resolved -</span>
<input name="evolution-button" type="button" value="Mark it resolved" onclick={ javascript }/>
}.map(_.toString).mkString
}
| noel-yap/setter-for-catan | play-2.1.1/framework/src/play-jdbc/src/main/scala/play/api/db/evolutions/Evolutions.scala | Scala | apache-2.0 | 20,609 |
package scalan.primitives
import scalan.staged.BaseExp
import scalan.{ ScalanExp, Scalan, ScalanStd }
import scalan.Scalan
trait StringOps extends UnBinOps { self: Scalan =>
implicit class StringOpsCls(lhs: Rep[String]) {
def toInt = StringToInt(lhs)
def toDouble = StringToDouble(lhs)
def length = StringLength(lhs)
def apply(index: Rep[Int]) = string_apply(lhs, index)
def substring(start: Rep[Int], end: Rep[Int]) = string_substring(lhs, start, end)
def +(rhs: Rep[String]) = StringConcat(lhs, rhs)
def startsWith(rhs: Rep[String]) = StringStartsWith(lhs, rhs)
def endsWith(rhs: Rep[String]) = StringEndsWith(lhs, rhs)
def contains(rhs: Rep[String]) = StringContains(lhs, rhs)
def matches(rhs: Rep[String]) = StringMatches(lhs, rhs)
}
object StringObject {
lazy val empty = toRep("")
}
def string_substring(str: Rep[String], start: Rep[Int], end: Rep[Int]): Rep[String]
def string_apply(str: Rep[String], index: Rep[Int]): Rep[Char]
val StringToInt = new UnOp[String, Int]("toInt", _.toInt)
val StringToDouble = new UnOp[String, Double]("toDouble", _.toDouble)
val StringLength = new UnOp[String, Int]("length", _.length)
val StringConcat = new EndoBinOp[String]("+", _ + _)
val StringContains = new BinOp[String, Boolean]("contains", _.contains(_))
val StringStartsWith = new BinOp[String, Boolean]("startsWith", _.startsWith(_))
val StringEndsWith = new BinOp[String, Boolean]("endsWith", _.endsWith(_))
val StringMatches = new BinOp[String, Boolean]("matches", _.matches(_))
}
trait StringOpsStd extends StringOps { self: ScalanStd =>
def string_substring(str: Rep[String], start: Rep[Int], end: Rep[Int]): Rep[String] = str.substring(start, end)
def string_apply(str: Rep[String], index: Rep[Int]): Rep[Char] = str.charAt(index)
}
trait StringOpsExp extends StringOps with BaseExp { self: ScalanExp =>
case class StringSubstring(str: Rep[String], start: Rep[Int], end: Rep[Int]) extends BaseDef[String]
case class StringCharAt(str: Rep[String], index: Rep[Int]) extends BaseDef[Char]
def string_substring(str: Rep[String], start: Rep[Int], end: Rep[Int]): Rep[String] = StringSubstring(str, start, end)
def string_apply(str: Rep[String], index: Rep[Int]): Rep[Char] = StringCharAt(str, index)
override def rewriteDef[T](d: Def[T]) = d match {
case ApplyBinOp(op, x, Def(Const(""))) if op == StringConcat =>
x
case ApplyBinOp(op, Def(Const("")), x) if op == StringConcat =>
x
case ApplyBinOp(op, x, Def(Const(""))) if op == StringStartsWith || op == StringEndsWith =>
toRep(true)
case _ => super.rewriteDef(d)
}
}
| scalan/scalan | core/src/main/scala/scalan/primitives/StringOps.scala | Scala | apache-2.0 | 2,655 |
package nl.gn0s1s.julius
import scala.annotation.tailrec
import org.scalacheck._
import org.scalacheck.Prop.forAll
object JuliusSpec extends Properties("Julius") {
import Generators._
import RomanDigit._
import RomanNumeral._
property("RomanDigit.generator only generates roman digits") = forAll { r: RomanDigit =>
r match {
case I => true
case V => true
case X => true
case L => true
case C => true
case D => true
case M => true
}
}
property("RomanNumeral.generator only generates roman numerals") = forAll { n: RomanNumeral =>
n match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral should be Nulla or consist of one or more digits") = forAll { n: RomanNumeral =>
n match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(l) => l.nonEmpty
}
}
property("RomanNumeral addition is commutative") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
n.plus(m) == m.plus(n)
}
property("RomanNumeral addition is associative") = forAll { (n: RomanNumeral, m: RomanNumeral, o: RomanNumeral) =>
n.plus(m).plus(o) == n.plus(m.plus(o))
}
property("RomanNumeral addition has an identity element") = forAll(genNulla, genRomanDigits) {
(n: RomanNumeral, m: RomanNumeral) => n.plus(m) == m.plus(n) && m == n.plus(m)
}
@tailrec def checkList(l: List[RomanDigit]): Boolean = l match {
case a :: b :: rest => (a >= b) && checkList(b :: rest)
case _ => true
}
property("RomanNumeral always has its digits sorted from high to low") = forAll { n: RomanNumeral =>
n match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(l) => checkList(l)
}
}
property("RomanNumeral is optimized after addition") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
n.plus(m) match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(l) =>
!(l containsSlice List(I, I, I, I, I)) &&
!(l containsSlice List(V, V)) &&
!(l containsSlice List(X, X, X, X, X)) &&
!(l containsSlice List(L, L)) &&
!(l containsSlice List(C, C, C, C, C)) &&
!(l containsSlice List(D, D))
}
}
property("RomanNumeral is optimized after creation") = forAll { (n: RomanNumeral) =>
n == n.optimize
}
property("RomanDigit can be added to another") = forAll { (r: RomanDigit, s: RomanDigit) =>
r + s match {
case RomanNumeral.Nulla => false
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanDigit can be added to a RomanNumeral") = forAll { (n: RomanNumeral, r: RomanDigit) =>
n + r match {
case RomanNumeral.Nulla => false
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral can be added to a RomanNumeral") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
n + m match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral can be added to a RomanDigit") = forAll { (r: RomanDigit, n: RomanNumeral) =>
r + n match {
case RomanNumeral.Nulla => false
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanDigit can be multiplied by another") = forAll { (r: RomanDigit, s: RomanDigit) =>
r * s match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral can be multiplied by a RomanDigit") = forAll { (n: RomanNumeral, r: RomanDigit) =>
n * r match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral can be multiplied by another") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
n * m match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanDigit can be multiplied by a RomanNumeral") = forAll { (r: RomanDigit, n: RomanNumeral) =>
r * n match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanDigit can be subtracted from another") = forAll { (r: RomanDigit, s: RomanDigit) =>
r - s match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanDigit can be subtracted from a RomanNumeral") = forAll { (n: RomanNumeral, r: RomanDigit) =>
n - r match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral can be subtracted from a RomanNumeral") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
n - m match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral can be subtracted from a RomanDigit") = forAll { (r: RomanDigit, n: RomanNumeral) =>
r - n match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanDigit can be divided by another") = forAll { (r: RomanDigit, s: RomanDigit) =>
r / s match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral can be divided by a RomanDigit") = forAll { (n: RomanNumeral, r: RomanDigit) =>
n / r match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral can be divided by another") = forAll(genRomanNumeral, genRomanDigits) {
(n: RomanNumeral, m: RomanNumeral) =>
n / m match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanDigit can be divided by a RomanNumeral") = forAll(genRomanDigit, genRomanDigits) {
(r: RomanDigit, n: RomanNumeral) =>
r / n match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => true
}
}
property("RomanNumeral division by Nulla results in ArithmeticException") = forAll { (n: RomanNumeral) =>
Prop.throws(classOf[ArithmeticException]) { n.div(RomanNumeral.Nulla) }
}
property("RomanNumeral isOdd") = forAll { (n: RomanNumeral) =>
n.isOdd == (n.toInt % 2 != 0)
}
property("RomanNumeral doubling then halving") = forAll { (n: RomanNumeral) =>
n == n.double.halve
}
property("RomanNumeral halving then doubling") = forAll { (n: RomanNumeral) =>
{
if (n.isOdd) n == n.halve.double + I
else n == n.halve.double
}
}
property("RomanNumeral multiplication is commutative") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
n.times(m) == m.times(n)
}
property("RomanNumeral multiplication is associative") =
forAll(genLimitedRomanNumeral, genLimitedRomanNumeral, genLimitedRomanNumeral) {
(n: RomanNumeral, m: RomanNumeral, o: RomanNumeral) => n.times(m).times(o) == n.times(m.times(o))
}
property("RomanNumeral multiplication has an identity element") = forAll(genRomanNumeral) { n: RomanNumeral =>
n * I == I * n && n * I == n
}
property("RomanNumeral adding is left distributive") = forAll { (n: RomanNumeral, m: RomanNumeral, o: RomanNumeral) =>
n.times(m.plus(o)) == n.times(m).plus(n.times(o))
}
property("RomanNumeral adding is right distributive") = forAll {
(n: RomanNumeral, m: RomanNumeral, o: RomanNumeral) => m.plus(o).times(n) == m.times(n).plus(o.times(n))
}
property("RomanNumeral adding then subtracting") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
n == n.plus(m).minus(m)
}
property("RomanNumeral subtracting is left distributive") = forAll {
(n: RomanNumeral, m: RomanNumeral, o: RomanNumeral) => n.times(m.minus(o)) == n.times(m).minus(n.times(o))
}
property("RomanNumeral subtracting is right distributive") = forAll {
(n: RomanNumeral, m: RomanNumeral, o: RomanNumeral) => m.minus(o).times(n) == m.times(n).minus(o.times(n))
}
property("RomanNumeral multiplying then dividing") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
m match {
case RomanNumeral.Nulla => true
case RomanNumeral.RomanDigits(_) => n == n.times(m).div(m)
}
}
property("adding RomanNumerals and then converting to int is the same as converting to int and then adding") =
forAll { (n: RomanNumeral, m: RomanNumeral) =>
n.plus(m).toInt == n.toInt + m.toInt
}
property("RomanDigit comparison: <") = forAll { (r: RomanDigit, s: RomanDigit) =>
(r < s) == (r.toInt < s.toInt)
}
property("RomanDigit comparison: <=") = forAll { (r: RomanDigit, s: RomanDigit) =>
(r <= s) == (r.toInt <= s.toInt)
}
property("RomanDigit comparison: >") = forAll { (r: RomanDigit, s: RomanDigit) =>
(r > s) == (r.toInt > s.toInt)
}
property("RomanDigit comparison: >=") = forAll { (r: RomanDigit, s: RomanDigit) =>
(r >= s) == (r.toInt >= s.toInt)
}
property("RomanNumeral comparison: <") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
(n < m) == (n.toInt < m.toInt)
}
property("RomanNumeral comparison: <=") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
(n <= m) == (n.toInt <= m.toInt)
}
property("RomanNumeral comparison: >") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
(n > m) == (n.toInt > m.toInt)
}
property("RomanNumeral comparison: >=") = forAll { (n: RomanNumeral, m: RomanNumeral) =>
(n >= m) == (n.toInt >= m.toInt)
}
property("RomanDigit to Char and back") = forAll { (r: RomanDigit) =>
r == r.toChar.toRomanDigit.getOrElse(false)
}
property("RomanDigit from Char") = forAll { (c: Char) =>
c.toRomanDigit match {
case Some(_) => List('I', 'V', 'X', 'L', 'C', 'D', 'M').contains(c)
case None => !List('I', 'V', 'X', 'L', 'C', 'D', 'M').contains(c)
}
}
property("RomanNumeral to String and back") = forAll { (n: RomanNumeral) =>
n == n.toString.toRomanNumeral.getOrElse(false)
}
property("RomanNumeral to Int and back") = forAll { (n: RomanNumeral) =>
n == n.toInt.toRomanNumeral
}
property("RomanNumeral created from Roman Digits is validated") = forAll(genStringOfRomanDigits) { (s: String) =>
s.toRomanNumeral.isEmpty || s.toRomanNumeral.nonEmpty
}
property("RomanNumeral created from String is validated") = forAll { (s: String) =>
s.toRomanNumeral.isEmpty || s.toRomanNumeral.nonEmpty
}
}
| Philippus/julius | src/test/scala/nl/gn0s1s/julius/JuliusSpec.scala | Scala | mpl-2.0 | 10,371 |
package devnull.rest.helpers
import javax.servlet.http.HttpServletRequest
import devnull.rest.dto.FaultResponse
import devnull.rest.helpers.ResponseWrites.ResponseJson
import org.json4s.{JValue, StreamInput}
import org.json4s.native.JsonMethods
import unfiltered.directives.Directive
import unfiltered.directives.Directives._
import unfiltered.response.{BadRequest, ResponseFunction}
object EitherDirective {
type EitherDirective[T] = Directive[HttpServletRequest, ResponseFunction[Any], T]
implicit val formats = org.json4s.DefaultFormats
def withJson[T, P: Manifest](
t: P => T
): EitherDirective[Either[Throwable, Option[T]]] = {
inputStream.map(is => {
implicit val formats = org.json4s.DefaultFormats
val parse: JValue = JsonMethods.parse(new StreamInput(is))
Right(Some(t(parse.extract[P])))
})
}
def fromEither[T](either: Either[Throwable, T]): EitherDirective[T] = {
either.fold(
ex => failure(BadRequest ~> ResponseJson(FaultResponse(ex))),
a => success(a)
)
}
}
| javaBin/devnull | src/main/scala/devnull/rest/helpers/EitherDirective.scala | Scala | apache-2.0 | 1,048 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.visor.commands.ping
import org.apache.ignite.cluster.ClusterNode
import java.util.concurrent._
import org.apache.ignite.visor.{VisorTag, visor}
import org.apache.ignite.visor.commands.{VisorConsoleCommand, VisorTextTable}
import visor._
import scala.collection.JavaConversions._
import scala.language.{implicitConversions, reflectiveCalls}
import scala.util.control.Breaks._
/**
* Ping result container.
*/
private class Result {
/** Total pings count. */
var total = 0
/** Successful pings count. */
var oks = 0
/** Failed pings count */
var fails = 0
/** Failed nodes. */
val failedNodes = collection.mutable.Set.empty[ClusterNode]
}
/**
* Thread that pings one node.
*/
private case class Pinger(n: ClusterNode, res: Result) extends Runnable {
assert(n != null)
assert(res != null)
override def run() {
val ok = ignite.cluster.pingNode(n.id())
res.synchronized {
res.total += 1
if (ok)
res.oks += 1
else {
res.fails += 1
res.failedNodes += n
}
}
}
}
/**
* ==Command==
* Visor 'ping' command implementation.
*
* ==Help==
* {{{
* +--------------------+
* | ping | Pings node. |
* +--------------------+
* }}}
*
* ====Specification====
* {{{
* ping {"id81 id82 ... id8k"}
* }}}
*
* ====Arguments====
* {{{
* id8k
* ID8 of the node to ping.
* }}}
*
* ====Examples====
* {{{
* ping "12345678"
* Pings node with '12345678' ID8.
* ping
* Pings all nodes in the topology.
* }}}
*/
class VisorPingCommand {
/**
* Prints error message and advise.
*
* @param errMsgs Error messages.
*/
private def scold(errMsgs: Any*) {
assert(errMsgs != null)
warn(errMsgs: _*)
warn("Type 'help ping' to see how to use this command.")
}
/**
* ===Command===
* Pings node(s) by its ID8.
*
* ===Examples===
* <ex>ping "12345678 56781234"</ex>
* Pings nodes with '12345678' and '56781234' ID8s.
*
* @param args List of node ID8s. If empty or null - pings all nodes in the topology.
*/
def ping(args: String) = breakable {
if (!isConnected)
adviseToConnect()
else {
val argLst = parseArgs(args)
val res = new Result()
var pings = List.empty[Pinger]
if (argLst.isEmpty)
pings ++= ignite.cluster.nodes().map(Pinger(_, res))
else {
for (id8 <- argLst) {
if (id8._1 != null || id8._2 == null)
scold("Invalid ID8: " + argName(id8))
else {
val ns = nodeById8(id8._2)
if (ns.size != 1)
scold("Unknown ID8: " + argName(id8))
else
pings +:= Pinger(ns.head, res)
}
}
}
if (pings.isEmpty)
scold("Topology is empty.")
else {
try
pings.map(pool.submit(_)).foreach(_.get)
catch {
case _: RejectedExecutionException => scold("Ping failed due to system error.").^^
}
val t = VisorTextTable()
// No synchronization on 'res' is needed since all threads
// are finished and joined.
t += ("Total pings", res.total)
t += ("Successful pings", res.oks + " (" + formatInt(100 * res.oks / res.total) + "%)")
t += ("Failed pings", res.fails + " (" + formatInt(100 * res.fails / res.total) + "%)")
if (res.failedNodes.nonEmpty)
t += ("Failed nodes", res.failedNodes.map(n => nodeId8Addr(n.id)))
t.render()
}
}
}
/**
* ===Command===
* Pings all nodes in the topology.
*
* ===Examples===
* <ex>ping</ex>
* Pings all nodes in the topology.
*/
def ping() {
ping("")
}
}
/**
* Companion object that does initialization of the command.
*/
object VisorPingCommand {
// Adds command's help to visor.
addHelp(
name = "ping",
shortInfo = "Pings node.",
spec = List("ping <id81> <id82> ... <id8k>"),
args = List(
("<id8k>",
"ID8 of the node to ping. Note you can also use '@n0' ... '@nn' variables as shortcut to <id8k>.")
),
examples = List(
"ping 12345678" ->
"Pings node with '12345678' ID8.",
"ping @n0" ->
"Pings node with 'specified node with ID8 taken from 'n0' memory variable.",
"ping" ->
"Pings all nodes in the topology."
),
ref = VisorConsoleCommand(cmd.ping, cmd.ping)
)
/** Singleton command. */
private val cmd = new VisorPingCommand
/**
* Singleton.
*/
def apply() = cmd
/**
* Implicit converter from visor to commands "pimp".
*
* @param vs Visor tagging trait.
*/
implicit def fromPing2Visor(vs: VisorTag): VisorPingCommand = cmd
}
| abhishek-ch/incubator-ignite | modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ping/VisorPingCommand.scala | Scala | apache-2.0 | 6,151 |
import sbt._
import Keys._
// ===========================================================================
object BuildSettings {
val defaultSettings =
Defaults.defaultSettings ++
Resolvers.settings ++
Publishing.settings ++ Seq(
organization := "hr.element.proxykwai",
crossScalaVersions := Seq("2.9.1", "2.9.0-1", "2.9.0"),
scalaVersion <<= (crossScalaVersions) { versions => versions.head },
scalacOptions := Seq("-unchecked", "-deprecation", "-encoding", "UTF-8", "-optimise"), // , "-Yrepl-sync"
javacOptions := Seq("-deprecation", "-encoding", "UTF-8", "-source", "1.5", "-target", "1.5"),
unmanagedSourceDirectories in Compile <<= (scalaSource in Compile)( _ :: Nil),
unmanagedSourceDirectories in Test <<= (scalaSource in Test )( _ :: Nil)
)
val bsProxyKwai =
defaultSettings ++ Seq(
name := "ProxyKwai",
version := "0.0.1-SNAPSHOT",
unmanagedSourceDirectories in Compile <<= (javaSource in Compile)( _ :: Nil),
autoScalaLibrary := false,
crossPaths := false
)
}
// ---------------------------------------------------------------------------
object Dependencies {
val jasmin = "net.sf.jasmin" % "jasmin" % "2.4"
val scalaTest = "org.scalatest" %% "scalatest" % "1.6.1" % "test"
val jUnit = "junit" % "junit" % "4.10" % "test"
val depsProxyKwai =
libraryDependencies := Seq(
jasmin,
scalaTest,
jUnit
)
}
// ---------------------------------------------------------------------------
object ProxyKwaiBuild extends Build {
import Dependencies._
import BuildSettings._
lazy val proxyKwai = Project(
"ProxyKwai",
file("proxykwai"),
settings = bsProxyKwai :+ depsProxyKwai
)
}
// ===========================================================================
object Repositories {
val ElementNexus = "Element Nexus" at "http://maven.element.hr/nexus/content/groups/public/"
val ElementReleases = "Element Releases" at "http://maven.element.hr/nexus/content/repositories/releases/"
val ElementSnapshots = "Element Snapshots" at "http://maven.element.hr/nexus/content/repositories/snapshots/"
}
// ---------------------------------------------------------------------------
object Resolvers {
import Repositories._
val settings = Seq(
resolvers := Seq(ElementNexus, ElementReleases, ElementSnapshots),
externalResolvers <<= resolvers map { rs =>
Resolver.withDefaultResolvers(rs, mavenCentral = false, scalaTools = false)
}
)
}
// ---------------------------------------------------------------------------
object Publishing {
import Repositories._
val settings = Seq(
publishTo <<= (version) { version => Some(
if (version.endsWith("SNAPSHOT")) ElementSnapshots else ElementReleases
)},
credentials += Credentials(Path.userHome / ".publish" / "element.credentials"),
publishArtifact in (Compile, packageDoc) := false
)
}
// ===========================================================================
| melezov/proxykwai | project/ProxyKwaiBuild.scala | Scala | bsd-3-clause | 3,067 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.tstreams.agents.integration
import java.util.concurrent.{CountDownLatch, TimeUnit}
import com.bwsw.tstreams.agents.consumer.Offset.Newest
import com.bwsw.tstreams.agents.consumer.{ConsumerTransaction, TransactionOperator}
import com.bwsw.tstreams.agents.producer.NewProducerTransactionPolicy
import com.bwsw.tstreams.testutils.{TestStorageServer, TestUtils}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
/**
* Created by ivan on 21.05.17.
*/
class ProducerSubscriberPartitionCheckpointTest extends FlatSpec with Matchers with BeforeAndAfterAll with TestUtils {
lazy val srv = TestStorageServer.getNewClean()
override def beforeAll(): Unit = {
srv
createNewStream()
}
override def afterAll(): Unit = {
TestStorageServer.dispose(srv)
onAfterAll()
}
"Producer" should "do checkpoint(partition) correctly" in {
val TOTAL = 300
val latch = new CountDownLatch(TOTAL)
var wrongPartition = false
val producer = f.getProducer(
name = "test_producer",
partitions = Set(0, 1, 2))
val s = f.getSubscriber(name = "subscriber",
partitions = Set(0, 1, 2),
offset = Newest,
useLastOffset = false,
callback = (consumer: TransactionOperator, transaction: ConsumerTransaction) => {
latch.countDown()
if(transaction.getPartition > 0)
wrongPartition = true
})
s.start()
producer.checkpoint(0)
for (it <- 0 until TOTAL * 3) {
producer
.newTransaction(NewProducerTransactionPolicy.EnqueueIfOpened)
.send("test")
}
producer.checkpoint(0).stop()
latch.await(60, TimeUnit.SECONDS) shouldBe true
Thread.sleep(1000)
wrongPartition shouldBe false
s.stop()
}
}
| bwsw/t-streams | src/test/scala/com/bwsw/tstreams/agents/integration/ProducerSubscriberPartitionCheckpointTest.scala | Scala | apache-2.0 | 2,565 |
package charactor.core.messages
class MoveAnywhereMessage(val speed: Double)
{
} | PiotrTrzpil/charactor | src/charactor/core/messages/MoveAnywhereMessage.scala | Scala | apache-2.0 | 82 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.ems.core.user.service.impl
import org.beangle.commons.bean.Properties
import org.beangle.commons.collection.Collections
import org.beangle.commons.lang.Strings
import org.beangle.ems.core.user.model.Dimension
import org.beangle.ems.core.user.service.DataResolver
object CsvDataResolver extends DataResolver {
def marshal(field: Dimension, items: Seq[Any]): String = {
if (null == items || items.isEmpty) return ""
val properties = new collection.mutable.ListBuffer[String]
field.keyName foreach (properties += _)
field.properties foreach (x => properties ++= Strings.split(x, ","))
val sb = new StringBuilder()
if (properties.isEmpty) {
for (obj <- items) if (null != obj) sb.append(String.valueOf(obj)).append(',')
} else {
for (prop <- properties) sb.append(prop).append(';')
sb.deleteCharAt(sb.length() - 1).append(',')
for (obj <- items) {
for (prop <- properties) {
try {
val value: Any = Properties.get(obj, prop)
sb.append(String.valueOf(value)).append(';')
} catch {
case e: Exception => e.printStackTrace()
}
}
sb.deleteCharAt(sb.length() - 1)
sb.append(',')
}
}
if (sb.nonEmpty) sb.deleteCharAt(sb.length() - 1)
sb.toString()
}
def unmarshal(field: Dimension, source: String): collection.Seq[Map[String, String]] = {
if (Strings.isEmpty(source)) return List.empty
val properties = new collection.mutable.ListBuffer[String]
field.keyName foreach (properties += _)
field.properties foreach (x => properties ++= Strings.split(x, ","))
val rs = new collection.mutable.ListBuffer[Map[String, String]]
if (properties.isEmpty) {
val datas = Strings.split(source, ",")
for (data <- datas) rs += Map(field.keyName.get -> data)
} else {
properties.clear()
var startIndex = 1
val datas = Strings.split(source, ",")
var names = Array(field.keyName.get)
names = Strings.split(datas(0), ",")
properties ++= names
(startIndex until datas.length) foreach { i =>
val obj = Collections.newMap[String, String]
val dataItems = Strings.split(datas(i), ";")
properties.indices foreach { j =>
obj.put(properties(j), dataItems(j))
}
rs += obj.toMap
}
}
rs
}
}
| beangle/ems | core/src/main/scala/org/beangle/ems/core/user/service/impl/CsvDataResolver.scala | Scala | lgpl-3.0 | 3,115 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet
object DType extends Enumeration {
type DType = Value
val Float32 = Value(0, "float32")
val Float64 = Value(1, "float64")
val Float16 = Value(2, "float16")
val UInt8 = Value(3, "uint8")
val Int32 = Value(4, "int32")
val Unknown = Value(-1, "unknown")
private[mxnet] def numOfBytes(dtype: DType): Int = {
dtype match {
case DType.UInt8 => 1
case DType.Int32 => 4
case DType.Float16 => 2
case DType.Float32 => 4
case DType.Float64 => 8
case DType.Unknown => 0
}
}
private[mxnet] def getType(dtypeStr: String): DType = {
dtypeStr match {
case "UInt8" => DType.UInt8
case "Int32" => DType.Int32
case "Float16" => DType.Float16
case "Float32" => DType.Float32
case "Float64" => DType.Float64
case _ => throw new IllegalArgumentException(
s"DType: $dtypeStr not found! please set it in DType.scala")
}
}
}
| dmlc/mxnet | scala-package/core/src/main/scala/org/apache/mxnet/DType.scala | Scala | apache-2.0 | 1,746 |
package uk.co.odinconsultants.graph.impl
import java.lang.Math.pow
import scala.collection.immutable.Seq
import scala.collection.mutable.ArrayBuffer
object GraphGenerator {
type ComponentFn = Seq[VertexId] => Seq[Edge]
def and[T](t: T): T = identity(t)
def stronglyConnectedComponents: ComponentFn = { vertices =>
vertices.zip(vertices.drop(1)).map{ case(from, to) =>
val edge: Edge = (from, to)
edge
}
}
def toUniqueVertexIdIDs[T](edges: Seq[Edge]): Set[VertexId] = {
val vertices = new ArrayBuffer[VertexId]()
edges foreach { edge =>
vertices += edge._1
vertices += edge._2
}
vertices.toSet
}
def makeAGraphWith[T](n: Int, intraComponentFn: ComponentFn, interComponentFn: ComponentFn): (Seq[VertexId], Seq[Edge]) = {
val leaders = (2 to n + 1).map(pow(_, 2).toLong)
val edges = intraComponentFn(leaders) ++ interComponentFn(leaders)
(leaders, edges)
}
def eachComponentIsARing: ComponentFn = { leaders =>
val edges = new ArrayBuffer[Edge]()
var last = 0L
for (leader <- leaders) {
edges += ((leader, last))
(last + 1 to leader).foreach { VertexIdId =>
val edge = (last, VertexIdId)
edges += edge
last = VertexIdId
}
last = leader + 1
}
edges.to
}
def uniqueVertices(edges: Seq[Edge]): Set[VertexId] = edges.flatMap(edge => Seq(edge._1, edge._2)).toSet
}
| PhillHenry/palgorithms | src/test/scala/uk/co/odinconsultants/graph/impl/GraphGenerator.scala | Scala | apache-2.0 | 1,420 |
package scalariform.parser
import scalariform.lexer._
import scalariform.parser._
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
// format: +preserveSpaceBeforeArguments
class ParserTest extends FlatSpec with ShouldMatchers {
"Parser" should "throw a parse exception" in {
evaluating { parseExpression("for {x <- b if }") } should produce[ScalaParserException]
}
"Parser" should "throw a parse exception for empty match " in {
evaluating { parseExpression("a match { }") } should produce[ScalaParserException]
}
"Parser" should "produce a parse exception on a trailing close brace" in {
evaluating { parseCompilationUnit("class A{}}") } should produce[ScalaParserException]
}
"Parser" should "not throw an exception" in {
parseExpression("{ case List[String]() => 12 }")
}
// See issue #60
"Parser" should "not throw an exception on case block ending with decl" in {
parseExpression("""
args(0) match {
case "blah" =>
val x = args(0)
case _ =>
println("not blah")
}
""")
}
"Parser" should "throw a parse exception in bad package blocks" in {
evaluating { parseCompilationUnit("package a {} package b {}") } should produce[ScalaParserException]
}
// issue #44
"Parser" should "allow qualified type parameter in pattern matching" in {
parseExpression("""
{
case List[scala.Int]() => 1
case _: List[scala.Int] => 2
}
""")
}
private def parser(s: String) = new ScalaParser(ScalaLexer.tokenise(s).toArray)
private def parseExpression(s: String) = parser(s).expr
private def parseCompilationUnit(s: String) = parser(s).compilationUnit
}
| triggerNZ/scalariform | scalariform/src/test/scala/com/danieltrinh/scalariform/parser/ParserTest.scala | Scala | mit | 1,809 |
/**
* Copyright: Copyright (C) 2016, ATS Advanced Telematic Systems GmbH
* License: MPL-2.0
*/
package org.genivi.sota.core
import java.security.MessageDigest
import akka.stream.scaladsl.Sink
import akka.util.ByteString
import org.apache.commons.codec.binary.Hex
import scala.concurrent.{ExecutionContext, Future}
object DigestCalculator {
type DigestResult = String
def apply(algorithm: String = "SHA-1")(implicit ec: ExecutionContext): Sink[ByteString, Future[DigestResult]] = {
val digest = MessageDigest.getInstance(algorithm)
Sink.fold(digest) { (d, b: ByteString) =>
d.update(b.toArray)
d
} mapMaterializedValue(_.map(dd => Hex.encodeHexString(dd.digest())))
}
}
| PDXostc/rvi_sota_server | core/src/main/scala/org/genivi/sota/core/DigestCalculator.scala | Scala | mpl-2.0 | 708 |
/*
* Copyright (c) 2016 eBay Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ebay.rtran.core
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.LazyLogging
import scala.collection.JavaConversions._
import scala.util.{Failure, Success, Try}
object UpgraderMeta extends LazyLogging {
val META_PATH = "META-INF/rtran-meta.conf"
def configs = getClass.getClassLoader.getResources(META_PATH) map {url =>
Try(ConfigFactory.parseURL(url)) match {
case Success(c) => Some(c)
case Failure(e) => logger.error("Failed to load config from {}, {}", url, e)
None
}
} collect {
case Some(c) => c
}
}
| eBay/RTran | rtran-core/src/main/scala/com/ebay/rtran/core/UpgraderMeta.scala | Scala | apache-2.0 | 1,204 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package patterns
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.expressions.Literal
import org.jetbrains.plugins.scala.lang.parser.parsing.types.StableId
import org.jetbrains.plugins.scala.lang.parser.parsing.xml.pattern.XmlPattern
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils
/**
* @author Alexander Podkhalyuzin
* Date: 29.02.2008
*/
/*
* SimplePattern ::= '_'
* | varid
* | Literal
* | StableId
* | StableId '(' [Patterns [',']] ')'
* | StableId '(' [Patterns ','] [(varid | '_' ) '@'] '_' '*'')'
* |'(' [Patterns [',']] ')'
* | XmlPattern
*/
object SimplePattern {
def parse(builder: ScalaPsiBuilder): Boolean = {
val simplePatternMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.tUNDER =>
builder.advanceLexer() //Ate _
builder.getTokenText match {
case "*" =>
simplePatternMarker.rollbackTo()
return false
case _ =>
}
simplePatternMarker.done(ScalaElementTypes.WILDCARD_PATTERN)
return true
case ScalaTokenTypes.tLPARENTHESIS =>
builder.advanceLexer() //Ate (
builder.disableNewlines()
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
builder.restoreNewlinesState()
simplePatternMarker.done(ScalaElementTypes.TUPLE_PATTERN)
return true
case _ =>
}
if (Patterns.parse(builder)) {
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
builder.restoreNewlinesState()
simplePatternMarker.done(ScalaElementTypes.TUPLE_PATTERN)
return true
case _ =>
builder error ScalaBundle.message("rparenthesis.expected")
builder.restoreNewlinesState()
simplePatternMarker.done(ScalaElementTypes.TUPLE_PATTERN)
return true
}
}
if (Pattern parse builder) {
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
case _ =>
builder error ScalaBundle.message("rparenthesis.expected")
}
builder.restoreNewlinesState()
simplePatternMarker.done(ScalaElementTypes.PATTERN_IN_PARENTHESIS)
return true
}
case _ =>
}
if (InterpolationPattern parse builder) {
simplePatternMarker.done(ScalaElementTypes.INTERPOLATION_PATTERN)
return true
}
if (Literal parse builder) {
simplePatternMarker.done(ScalaElementTypes.LITERAL_PATTERN)
return true
}
if (XmlPattern.parse(builder)) {
simplePatternMarker.drop()
return true
}
if (builder.lookAhead(ScalaTokenTypes.tIDENTIFIER) &&
!builder.lookAhead(ScalaTokenTypes.tIDENTIFIER, ScalaTokenTypes.tDOT) &&
!builder.lookAhead(ScalaTokenTypes.tIDENTIFIER, ScalaTokenTypes.tLPARENTHESIS) &&
!ParserUtils.isCurrentVarId(builder)) {
val rpm = builder.mark
builder.getTokenText
builder.advanceLexer()
rpm.done(ScalaElementTypes.REFERENCE_PATTERN)
simplePatternMarker.drop()
return true
}
val rb1 = builder.mark
if (StableId parse (builder, ScalaElementTypes.REFERENCE_EXPRESSION)) {
builder.getTokenType match {
case ScalaTokenTypes.tLPARENTHESIS =>
rb1.rollbackTo()
StableId parse (builder, ScalaElementTypes.REFERENCE)
val args = builder.mark
builder.advanceLexer() //Ate (
builder.disableNewlines()
def parseSeqWildcard(withComma: Boolean): Boolean = {
if (if (withComma)
builder.lookAhead(ScalaTokenTypes.tCOMMA, ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER)
else builder.lookAhead(ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER)) {
val wild = builder.mark
if (withComma) builder.advanceLexer()
builder.getTokenType
builder.advanceLexer()
if (builder.getTokenType == ScalaTokenTypes.tIDENTIFIER && "*".equals(builder.getTokenText)) {
builder.advanceLexer()
wild.done(ScalaElementTypes.SEQ_WILDCARD)
true
} else {
wild.rollbackTo()
false
}
} else {
false
}
}
def parseSeqWildcardBinding(withComma: Boolean): Boolean = {
if (if (withComma) builder.lookAhead(ScalaTokenTypes.tCOMMA, ScalaTokenTypes.tIDENTIFIER, ScalaTokenTypes.tAT,
ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER) || builder.lookAhead(ScalaTokenTypes.tCOMMA, ScalaTokenTypes.tUNDER, ScalaTokenTypes.tAT,
ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER)
else builder.lookAhead(ScalaTokenTypes.tIDENTIFIER, ScalaTokenTypes.tAT,
ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER) || builder.lookAhead(ScalaTokenTypes.tUNDER, ScalaTokenTypes.tAT,
ScalaTokenTypes.tUNDER, ScalaTokenTypes.tIDENTIFIER)) {
val wild = builder.mark
if (withComma) builder.advanceLexer() // ,
ParserUtils.parseVarIdWithWildcardBinding(builder, wild)
} else false
}
if (!parseSeqWildcard(withComma = false) && !parseSeqWildcardBinding(withComma = false) && Pattern.parse(builder)) {
while (builder.getTokenType == ScalaTokenTypes.tCOMMA) {
builder.advanceLexer() // eat comma
if (!parseSeqWildcard(withComma = false) && !parseSeqWildcardBinding(withComma = false)) Pattern.parse(builder)
}
}
builder.getTokenType match {
case ScalaTokenTypes.tRPARENTHESIS =>
builder.advanceLexer() //Ate )
case _ =>
builder error ErrMsg("rparenthesis.expected")
}
builder.restoreNewlinesState()
args.done(ScalaElementTypes.PATTERN_ARGS)
simplePatternMarker.done(ScalaElementTypes.CONSTRUCTOR_PATTERN)
return true
case _ =>
rb1.drop()
simplePatternMarker.done(ScalaElementTypes.STABLE_REFERENCE_PATTERN)
return true
}
}
simplePatternMarker.rollbackTo()
false
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/patterns/SimplePattern.scala | Scala | apache-2.0 | 6,837 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.dataset
import org.apache.calcite.rex.RexNode
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.DataSet
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.codegen.{FunctionCodeGenerator, GeneratedFunction}
import org.apache.flink.table.plan.nodes.CommonScan
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.runtime.MapRunner
import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo
import org.apache.flink.types.Row
trait BatchScan extends CommonScan[Row] with DataSetRel {
protected def convertToInternalRow(
schema: RowSchema,
input: DataSet[Any],
fieldIdxs: Array[Int],
config: TableConfig,
rowtimeExpression: Option[RexNode]): DataSet[Row] = {
val inputType = input.getType
val internalType = schema.typeInfo
val hasTimeIndicator = fieldIdxs.exists(f =>
f == TimeIndicatorTypeInfo.ROWTIME_BATCH_MARKER ||
f == TimeIndicatorTypeInfo.PROCTIME_BATCH_MARKER)
// conversion
if (inputType != internalType || hasTimeIndicator) {
val function = generateConversionMapper(
config,
inputType,
internalType,
"DataSetSourceConversion",
schema.fieldNames,
fieldIdxs,
rowtimeExpression)
val runner = new MapRunner[Any, Row](
function.name,
function.code,
function.returnType)
val opName = s"from: (${schema.fieldNames.mkString(", ")})"
input.map(runner).name(opName)
}
// no conversion necessary, forward
else {
input.asInstanceOf[DataSet[Row]]
}
}
private def generateConversionMapper(
config: TableConfig,
inputType: TypeInformation[Any],
outputType: TypeInformation[Row],
conversionOperatorName: String,
fieldNames: Seq[String],
inputFieldMapping: Array[Int],
rowtimeExpression: Option[RexNode]): GeneratedFunction[MapFunction[Any, Row], Row] = {
val generator = new FunctionCodeGenerator(
config,
false,
inputType,
None,
Some(inputFieldMapping))
val conversion = generator.generateConverterResultExpression(
outputType,
fieldNames,
rowtimeExpression)
val body =
s"""
|${conversion.code}
|return ${conversion.resultTerm};
|""".stripMargin
generator.generateFunction(
"DataSetSourceConversion",
classOf[MapFunction[Any, Row]],
body,
outputType)
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/nodes/dataset/BatchScan.scala | Scala | apache-2.0 | 3,426 |
package gapt.expr.util
import gapt.expr.Apps
import gapt.expr.Const
import gapt.expr.Expr
import gapt.expr.ReductionRule
import gapt.expr.Var
import gapt.expr.formula.Formula
/**
* A conditional rewrite rule.
*
* An instance of this rule can be used to rewrite the left hand side
* into its right hand side only if the conditions all rewrite to ⊤.
*
* The free variables of the conditions together with those of the
* right hand side must form a subset of the free variables of the
* left hand side. The left hand side must not be a variable.
*
* @param conditions The conditions of this rewrite rule.
* @param lhs The left hand side of this rewrite rule.
* @param rhs The right hand side of this rewrite rule.
*/
case class ConditionalReductionRule( conditions: Seq[Formula], lhs: Expr, rhs: Expr ) {
require(
( conditions.flatMap { freeVariables( _ ) } ++
freeVariables( rhs ) ).toSet.subsetOf( freeVariables( lhs ) ),
"""free variables in conditions and right hand side do not form a
|subset of the free variables of the left hand side""".stripMargin )
require( !lhs.isInstanceOf[Var], "left hand side must not be a variable" )
val Apps( lhsHead @ Const( lhsHeadName, _, _ ), lhsArgs ) = lhs
val lhsArgsSize: Int = lhsArgs.size
}
object ConditionalReductionRule {
def apply( rule: ReductionRule ): ConditionalReductionRule =
ConditionalReductionRule( List(), rule.lhs, rule.rhs )
} | gapt/gapt | core/src/main/scala/gapt/expr/util/ConditionalReductionRule.scala | Scala | gpl-3.0 | 1,439 |
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.deduplication.models
import java.util.UUID
/**
* Duplicate candidate
* @param id UUID of the candidate
* @param name name
* @param score similarity score
*/
case class Candidate(
id: UUID = UUID.randomUUID(),
name: Option[String] = None,
score: Double = 0.0
)
| bpn1/ingestion | src/main/scala/de/hpi/ingestion/deduplication/models/Candidate.scala | Scala | apache-2.0 | 912 |
package pureconfig.module.magnolia
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import _root_.magnolia._
import com.typesafe.config.{ConfigValue, ConfigValueFactory}
import pureconfig._
import pureconfig.generic.{CoproductHint, ProductHint}
/** An object containing Magnolia `combine` and `dispatch` methods to generate `ConfigWriter` instances.
*/
object MagnoliaConfigWriter {
def combine[A](ctx: CaseClass[ConfigWriter, A])(implicit hint: ProductHint[A]): ConfigWriter[A] =
if (ctx.typeName.full.startsWith("scala.Tuple")) combineTuple(ctx)
else if (ctx.isValueClass) combineValueClass(ctx)
else combineCaseClass(ctx)
private def combineCaseClass[A](ctx: CaseClass[ConfigWriter, A])(implicit hint: ProductHint[A]): ConfigWriter[A] =
new ConfigWriter[A] {
def to(a: A): ConfigValue = {
val fieldValues = ctx.parameters.map { param =>
val valueOpt = param.typeclass match {
case tc: WritesMissingKeys[param.PType @unchecked] =>
tc.toOpt(param.dereference(a))
case tc =>
Some(tc.to(param.dereference(a)))
}
hint.to(valueOpt, param.label)
}
ConfigValueFactory.fromMap(fieldValues.flatten.toMap.asJava)
}
}
private def combineTuple[A](ctx: CaseClass[ConfigWriter, A]): ConfigWriter[A] =
new ConfigWriter[A] {
override def to(a: A): ConfigValue =
ConfigValueFactory.fromIterable(ctx.parameters.map { param => param.typeclass.to(param.dereference(a)) }.asJava)
}
private def combineValueClass[A](ctx: CaseClass[ConfigWriter, A]): ConfigWriter[A] =
new ConfigWriter[A] {
override def to(a: A): ConfigValue =
ctx.parameters.map { param => param.typeclass.to(param.dereference(a)) }.head
}
def dispatch[A: ClassTag](ctx: SealedTrait[ConfigWriter, A])(implicit hint: CoproductHint[A]): ConfigWriter[A] =
new ConfigWriter[A] {
def to(a: A): ConfigValue =
ctx.dispatch(a) { subtype =>
hint.to(subtype.typeclass.to(subtype.cast(a)), subtype.typeName.short)
}
}
}
| pureconfig/pureconfig | modules/magnolia/src/main/scala/pureconfig/module/magnolia/MagnoliaConfigWriter.scala | Scala | mpl-2.0 | 2,121 |
class Foo {
val bar = new Bar {
type S = Int
type T = Int => Int
type U = [X] =>> Int
val x: Long = 2L
def y: Boolean = true
def z(): Char = 'f'
def z2()(): Char = 'g'
def w[T]: String = "a"
def w2[T](a: Null)(b: Null): Null = null
}
}
trait Bar {
type S
type T
type U <: [X] =>> Any
val x: Any
def y: Any
def z(): Any
def z2()(): Any
def w[T]: Any
def w2[T](a: Null)(b: Null): Any
}
| som-snytt/dotty | tests/pos/simpleRefinement.scala | Scala | apache-2.0 | 444 |
/* scala-stm - (c) 2009-2010, Stanford University, PPL */
package scala.concurrent.stm
/** Provides all of the operations of a `Sink[A]`, without the ability to get
* a `Sink.View`.
*
* @author Nathan Bronson
*/
trait SinkLike[-A, Context] {
/** Performs a transactional write. The new value will not be visible by
* any other threads until (and unless) `txn` successfully commits.
* Equivalent to `set(v)`.
*
* Example: {{{
* val x = Ref(0)
* atomic { implicit t =>
* ...
* x() = 10 // perform a write inside a transaction
* ...
* }
* }}}
* @param v a value to store in the `Ref`.
* @throws IllegalStateException if `txn` is not active. */
def update(v: A)(implicit txn: Context): Unit = set(v)
/** Performs a transactional write. The new value will not be visible by
* any other threads until (and unless) `txn` successfully commits.
* Equivalent to `update(v)`.
* @param v a value to store in the `Ref`.
* @throws IllegalStateException if `txn` is not active.
*/
def set(v: A)(implicit txn: Context): Unit
/** Performs a transactional write and returns true, or returns false. The
* STM implementation may choose to return false to reduce (not necessarily
* avoid) blocking. If no other threads are performing any transactional or
* atomic accesses then this method will succeed.
*/
def trySet(v: A)(implicit txn: Context): Boolean
}
| nbronson/scala-stm | src/main/scala/scala/concurrent/stm/SinkLike.scala | Scala | bsd-3-clause | 1,469 |
package se.lu.nateko.cp.sbtdeploy
import sbt._
import sbt.Keys._
import sbt.plugins.JvmPlugin
import sbtassembly.AssemblyPlugin
import sbtbuildinfo.BuildInfoPlugin
import scala.sys.process.Process
object IcosCpSbtDeployPlugin extends AutoPlugin {
override def trigger = noTrigger
override def requires = AssemblyPlugin && BuildInfoPlugin
override lazy val buildSettings = Seq()
override lazy val globalSettings = Seq()
object autoImport {
val cpDeploy = inputKey[Unit]("Deploys to production using Ansible (depends on 'infrastructure' project)")
val cpDeployTarget = settingKey[String]("Ansible target role for cpDeploy")
val cpDeployBuildInfoPackage = settingKey[String]("Java/Scala package to put BuildInfo object into")
val cpDeployPlaybook = settingKey[String]("The ansible playbook")
}
import autoImport._
import AssemblyPlugin.autoImport.assembly
import BuildInfoPlugin.autoImport._
lazy val gitChecksTask = Def.task {
val log = streams.value.log
log.info("Check git status")
val gitStatus = Process("git status -s").lineStream.mkString("").trim
if(!gitStatus.isEmpty) sys.error("Please clean your 'git status -s' before deploying!")
log.info("Check infrastructure version")
val infrastructureDir = new java.io.File("../infrastructure/").getCanonicalFile
Process("git -C " + infrastructureDir + " fetch")
if (Process("git -C " + infrastructureDir + " rev-list HEAD...origin/master --count").!!.trim.toInt > 0) {
sys.error("Your infrastructure repo is not in sync with origin/master.")
}
}
lazy val cpAnsible = Def.inputTask {
val log = streams.value.log
val args: Seq[String] = sbt.Def.spaceDelimited().parsed
val (check, test) = args.toList match{
case "to" :: "production" :: Nil =>
log.info("Performing a REAL deployment to production environment")
(false, false)
case "to" :: "test" :: Nil =>
log.info("Performing a REAL deployment to test environment")
(false, true)
case _ =>
log.warn("""Performing a TEST deployment to production environment, use\\
cpDeploy to production' for a real one""")
(true, false)
}
// The full path of the "fat" jarfile. The jarfile contains the
// entire application and this is the file we will deploy.
val jarPath = assembly.value.getCanonicalPath
// The ansible inventory file. This file contains a list of servers
// that we deploy to. Running "cpDeploy to production" will make
// ansible use our production environment and "cpDeploy to test"
// will make ansible use test servers (i.e virtual machines running
// on the developer host)
val inventory = if (test) "test.inventory" else "production.inventory"
// The name of the target, i.e the name of the current project
// ("cpauth", "data", "meta" etc).
val target = cpDeployTarget.value
val playbook = cpDeployPlaybook.value
val ansibleArgs = Seq(
// "--check" will make ansible simulate all its actions. It's
// only useful when running against the production inventory.
if (check && !test) "--check" else None,
// Add an ansible tag, e.g '-tcpdata_only'. Each ansible role that we use
// is required to have a 'project_only' tag that will only to
// (re)deployment of the jarfile, i.e it'll skip its dependencies (linux,
// nginx, docker etc)
"-t" + target + "_only",
// Add an extra ansible variable specifying which jarfile to deploy.
"-e", s"""${target}_jar_file="$jarPath"""",
// Specify which inventory to use
"-i", (if (test) "test.inventory" else "production.inventory"),
playbook
) collect { case s:String => s }
val ansibleCmd = "ansible-playbook" +: ansibleArgs
val ansibleDir = new java.io.File("../infrastructure/devops/").getCanonicalFile
val ansiblePath = ansibleDir.getAbsolutePath
if(!ansibleDir.exists || !ansibleDir.isDirectory) sys.error("Folder not found: " + ansiblePath)
log.info(ansibleCmd.mkString("RUNNING:\\n", " ", "\\nIN DIRECTORY " + ansiblePath))
Process(ansibleCmd, ansibleDir).run(true).exitValue()
}
override lazy val projectSettings = Seq(
cpDeployPlaybook := "icosprod.yml",
cpDeploy := cpAnsible.dependsOn(gitChecksTask).evaluated,
buildInfoKeys := Seq[BuildInfoKey](name, version),
buildInfoPackage := cpDeployBuildInfoPackage.value,
buildInfoKeys ++= Seq(
BuildInfoKey.action("buildTime") {java.time.Instant.now()},
BuildInfoKey.action("gitOriginRemote") {
Process("git config --get remote.origin.url").lineStream.mkString("")
},
BuildInfoKey.action("gitHash") {
Process("git rev-parse HEAD").lineStream.mkString("")
}
)
)
}
| ICOS-Carbon-Portal/infrastructure | sbt/icoscp-sbt-deploy/src/main/scala/se/lu/nateko/cp/sbtdeploy/IcosCpSbtDeployPlugin.scala | Scala | gpl-3.0 | 4,596 |
package edu.gemini.phase2.template.factory.impl.michelle
import edu.gemini.spModel.gemini.michelle.blueprint.SpMichelleBlueprintSpectroscopy
import edu.gemini.pot.sp.{ISPObservation, ISPGroup}
import edu.gemini.spModel.gemini.michelle.MichelleParams._
case class MichelleSpectroscopy(blueprint:SpMichelleBlueprintSpectroscopy) extends MichelleBase[SpMichelleBlueprintSpectroscopy] {
import blueprint._
// Local Imports
import Disperser.{LOW_RES_10 => LowN, LOW_RES_20 => LowQ}
// INCLUDE note 'README FOR SPECTROSCOPY' at top level of program (only needed once)
addNote("README FOR SPECTROSCOPY") in TopLevel
// INCLUDE {4} - {12} in a Target Group
// SET FPM FROM PI
// SET DISPERSER FROM PI
include(4 to 12:_*) in TargetGroup
forGroup(TargetGroup)(
setMask(fpu),
setDisperser(disperser))
// IF DISPERSER FROM PI == LowN OR LowQ,
// SET TOTAL ON-SOURCE TIME TO 600.0 in bp10
// ELSE
// SET TOTAL ON-SOURCE TIME TO 1800.0 in bp10
// INCLUDE note 'Using asteroids as standards' at top level of
// program (only needed once)
if (disperser == LowN || disperser == LowQ) {
forObs(10)(
setTimeOnSource(600.0))
} else {
forObs(10)(
setTimeOnSource(1800.0))
addNote("Using asteroids as standards") in TopLevel
}
}
| arturog8m/ocs | bundle/edu.gemini.phase2.skeleton.servlet/src/main/scala/edu/gemini/phase2/template/factory/impl/michelle/MichelleSpectroscopy.scala | Scala | bsd-3-clause | 1,292 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.