code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert.jdbc
import java.io.InputStream
import java.nio.charset.Charset
import java.sql.{Connection, DriverManager, PreparedStatement, ResultSet}
import com.typesafe.config.Config
import org.apache.commons.io.{IOUtils, LineIterator}
import org.locationtech.geomesa.convert._
import org.locationtech.geomesa.convert.jdbc.JdbcConverter.{JdbcConfig, ResultSetIterator, StatementIterator}
import org.locationtech.geomesa.convert2.AbstractConverter.{BasicField, BasicOptions}
import org.locationtech.geomesa.convert2.transforms.Expression
import org.locationtech.geomesa.convert2.{AbstractConverter, ConverterConfig}
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.io.{CloseWithLogging, IsCloseable}
import org.opengis.feature.simple.SimpleFeatureType
import scala.annotation.tailrec
import scala.util.Try
class JdbcConverter(sft: SimpleFeatureType, config: JdbcConfig, fields: Seq[BasicField], options: BasicOptions)
extends AbstractConverter[ResultSet, JdbcConfig, BasicField, BasicOptions](sft, config, fields, options) {
private val connection = DriverManager.getConnection(config.connection)
override protected def parse(is: InputStream, ec: EvaluationContext): CloseableIterator[ResultSet] =
new StatementIterator(connection, is, options.encoding)
override protected def values(parsed: CloseableIterator[ResultSet],
ec: EvaluationContext): CloseableIterator[Array[Any]] = {
new ResultSetIterator(parsed, ec)
}
override def close(): Unit = {
CloseWithLogging(connection)
super.close()
}
}
object JdbcConverter {
case class JdbcConfig(
`type`: String,
connection: String,
idField: Option[Expression],
caches: Map[String, Config],
userData: Map[String, Expression]
) extends ConverterConfig
implicit object LineIteratorIsCloseable extends IsCloseable[LineIterator] {
override def close(obj: LineIterator): Try[Unit] = Try(obj.close())
}
/**
* Converts the input to statements and executes them.
*
* Note: the ResultSets are not closed, this should be done by the caller
*
* @param connection connection
* @param is input
* @param encoding input encoding
*/
class StatementIterator private [JdbcConverter] (connection: Connection, is: InputStream, encoding: Charset)
extends CloseableIterator[ResultSet] {
private val statements = IOUtils.lineIterator(is, encoding) // TODO split on ; ?
private var statement: PreparedStatement = _
private var results: ResultSet = _
override final def hasNext: Boolean = results != null || {
CloseWithLogging(Option(statement))
if (!statements.hasNext) {
statement = null
results = null
false
} else {
val sql = statements.next.trim()
statement = connection.prepareCall(if (sql.endsWith(";")) { sql } else { s"$sql;" })
results = statement.executeQuery()
true
}
}
override def next(): ResultSet = {
if (!hasNext) { Iterator.empty.next() } else {
val res = results
results = null
res
}
}
override def close(): Unit = CloseWithLogging(Option(statement), statements)
}
/**
* Converts result sets into values
*
* @param iter result sets
* @param ec evaluation context
*/
class ResultSetIterator private [JdbcConverter] (iter: CloseableIterator[ResultSet], ec: EvaluationContext)
extends CloseableIterator[Array[Any]] {
private var results: ResultSet = _
private var array: Array[Any] = _
private var hasNextResult = false
@tailrec
override final def hasNext: Boolean = hasNextResult || {
CloseWithLogging(Option(results))
if (!iter.hasNext) {
results = null
false
} else {
results = iter.next
array = Array.ofDim[Any](results.getMetaData.getColumnCount + 1)
hasNextResult = results.next()
hasNext
}
}
override def next(): Array[Any] = {
if (!hasNext) { Iterator.empty.next() } else {
ec.line += 1
// the first column will hold the entire row, but set it empty here to
// avoid the previous row being captured in mkString, below
array(0) = ""
var i = 1
while (i < array.length) {
array(i) = results.getObject(i) // note: results are 1-indexed
i += 1
}
array(0) = array.mkString // set the whole row value for reference
hasNextResult = results.next()
array
}
}
override def close(): Unit = CloseWithLogging(Option(results), iter)
}
}
| aheyne/geomesa | geomesa-convert/geomesa-convert-jdbc/src/main/scala/org/locationtech/geomesa/convert/jdbc/JdbcConverter.scala | Scala | apache-2.0 | 5,188 |
package com.twitter.finagle.loadbalancer
import com.twitter.finagle.{ClientConnection, Service, ServiceFactory}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.DefaultTimer
import com.twitter.util.{Duration, Future, Time}
import java.util.concurrent.atomic.AtomicInteger
private trait Server extends ServiceFactory[Unit, Unit] {
/**
* The maximum amount of concurrent load observed.
*/
def maxLoad: Long
/**
* The total number of load that this server received.
*/
def count: Long
}
/**
* Creates a ServiceFactory that applies a latency profile to Services
* it creates.
*/
private object ServerFactory {
/**
* Creates a [[Server]] with the given `id` and applies `nextLatency`
* latency for each request.
*/
def apply(
id: String,
nextLatency: () => Duration,
sr: StatsReceiver
) = new Server {
private val _load = new AtomicInteger(0)
private val _maxLoad = new AtomicInteger(0)
private val _numRequests = new AtomicInteger(0)
private val service = new Service[Unit, Unit] {
val numRequests = sr.counter("count")
val gauges = Seq(
sr.addGauge("load") { _load.get() },
sr.addGauge("maxload") { _maxLoad.get() }
)
def apply(req: Unit) = {
synchronized {
val l = _load.incrementAndGet()
if (l > _maxLoad.get()) _maxLoad.set(l)
}
numRequests.incr()
_numRequests.incrementAndGet()
Future.sleep(nextLatency())(DefaultTimer.twitter).ensure {
_load.decrementAndGet()
}
}
}
def maxLoad = _maxLoad.get().toLong
def count = _numRequests.get().toLong
def apply(conn: ClientConnection) = Future.value(service)
def close(deadline: Time) = Future.Done
override def toString = id
}
} | spockz/finagle | finagle-benchmark/src/main/scala/com/twitter/finagle/loadbalancer/ServerFactory.scala | Scala | apache-2.0 | 1,823 |
package com.datastax.spark.connector.types
import java.net.InetAddress
import java.nio.ByteBuffer
import java.util.{Calendar, GregorianCalendar, UUID, Date}
import scala.collection.JavaConversions._
import scala.collection.immutable.{TreeMap, TreeSet}
import scala.reflect.runtime.universe._
import org.joda.time.DateTime
import com.datastax.spark.connector.UDTValue.UDTValueConverter
import com.datastax.spark.connector.util.{ByteBufferUtil, Symbols}
import Symbols._
class TypeConversionException(val message: String, cause: Exception = null) extends Exception(message, cause)
/** Machinery for converting objects of any type received from Cassandra into objects of Scala types.
* Every converter knows how to convert object to one type. See `TypeConverter`
* companion object for a list of available converters. */
trait TypeConverter[T] extends Serializable {
/** Compile time type of the converter target */
type targetType = T
/** TypeTag for the target type. */
def targetTypeTag: TypeTag[T]
/** String representation of the converter target type.*/
def targetTypeName: String = TypeTag.synchronized(
targetTypeTag.tpe.toString)
/** Returns a function converting an object into `T`. */
def convertPF: PartialFunction[Any, T]
/** Converts and object or throws TypeConversionException if the object can't be converted. */
def convert(obj: Any): T = {
convertPF.applyOrElse(obj, (_: Any) =>
if (obj != null)
throw new TypeConversionException(s"Cannot convert object $obj of type ${obj.getClass} to $targetTypeName.")
else
throw new TypeConversionException(s"Cannot convert object $obj to $targetTypeName.")
)
}
}
/** Handles nullable types and converts any null to null. */
trait NullableTypeConverter[T <: AnyRef] extends TypeConverter[T] {
override def convert(obj: Any): T =
if (obj != null)
super.convert(obj)
else
null.asInstanceOf[T]
}
/** Chains together several converters converting to the same type.
* This way you can extend functionality of any converter to support new input types. */
class ChainedTypeConverter[T](converters: TypeConverter[T]*) extends TypeConverter[T] {
def targetTypeTag = converters.head.targetTypeTag
def convertPF = converters.map(_.convertPF).reduceLeft(_ orElse _)
}
/** Defines a set of converters and implicit functions used to look up an appropriate converter for
* a desired type. Thanks to implicit method lookup, it is possible to implement a generic
* method `CassandraRow#get`, which picks up the right converter basing solely on its type argument. */
object TypeConverter {
private val AnyTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Any]]
}
implicit object AnyConverter extends TypeConverter[Any] {
def targetTypeTag = AnyTypeTag
def convertPF = {
case obj => obj
}
}
private val AnyRefTypeTag = TypeTag.synchronized {
implicitly[TypeTag[AnyRef]]
}
implicit object AnyRefConverter extends TypeConverter[AnyRef] {
def targetTypeTag = AnyRefTypeTag
def convertPF = {
case obj => obj.asInstanceOf[AnyRef]
}
}
private val BooleanTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Boolean]]
}
implicit object BooleanConverter extends TypeConverter[Boolean] {
def targetTypeTag = BooleanTypeTag
def convertPF = {
case x: java.lang.Boolean => x
case x: java.lang.Integer => x != 0
case x: java.lang.Long => x != 0L
case x: java.math.BigInteger => x != java.math.BigInteger.ZERO
case x: String => x.toBoolean
}
}
private val JavaBooleanTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.lang.Boolean]]
}
implicit object JavaBooleanConverter extends NullableTypeConverter[java.lang.Boolean] {
def targetTypeTag = JavaBooleanTypeTag
def convertPF = BooleanConverter.convertPF.andThen(_.asInstanceOf[java.lang.Boolean])
}
private val ByteTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Byte]]
}
implicit object ByteConverter extends TypeConverter[Byte] {
def targetTypeTag = ByteTypeTag
def convertPF = {
case x: Number => x.byteValue
case x: String => x.toByte
}
}
private val JavaByteTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.lang.Byte]]
}
implicit object JavaByteConverter extends NullableTypeConverter[java.lang.Byte] {
def targetTypeTag = JavaByteTypeTag
def convertPF = ByteConverter.convertPF.andThen(_.asInstanceOf[java.lang.Byte])
}
private val ShortTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Short]]
}
implicit object ShortConverter extends TypeConverter[Short] {
def targetTypeTag = ShortTypeTag
def convertPF = {
case x: Number => x.shortValue
case x: String => x.toShort
}
}
private val JavaShortTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.lang.Short]]
}
implicit object JavaShortConverter extends NullableTypeConverter[java.lang.Short] {
def targetTypeTag = JavaShortTypeTag
def convertPF = ShortConverter.convertPF.andThen(_.asInstanceOf[java.lang.Short])
}
private val IntTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Int]]
}
implicit object IntConverter extends TypeConverter[Int] {
def targetTypeTag = IntTypeTag
def convertPF = {
case x: Number => x.intValue
case x: String => x.toInt
}
}
private val JavaIntTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.lang.Integer]]
}
implicit object JavaIntConverter extends NullableTypeConverter[java.lang.Integer] {
def targetTypeTag = JavaIntTypeTag
def convertPF = IntConverter.convertPF.andThen(_.asInstanceOf[java.lang.Integer])
}
private val LongTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Long]]
}
implicit object LongConverter extends TypeConverter[Long] {
def targetTypeTag = LongTypeTag
def convertPF = {
case x: Number => x.longValue
case x: Date => x.getTime
case x: DateTime => x.toDate.getTime
case x: Calendar => x.getTimeInMillis
case x: String => x.toLong
}
}
private val JavaLongTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.lang.Long]]
}
implicit object JavaLongConverter extends NullableTypeConverter[java.lang.Long] {
def targetTypeTag = JavaLongTypeTag
def convertPF = LongConverter.convertPF.andThen(_.asInstanceOf[java.lang.Long])
}
private val FloatTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Float]]
}
implicit object FloatConverter extends TypeConverter[Float] {
def targetTypeTag = FloatTypeTag
def convertPF = {
case x: Number => x.floatValue
case x: String => x.toFloat
}
}
private val JavaFloatTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.lang.Float]]
}
implicit object JavaFloatConverter extends NullableTypeConverter[java.lang.Float] {
def targetTypeTag = JavaFloatTypeTag
def convertPF = FloatConverter.convertPF.andThen(_.asInstanceOf[java.lang.Float])
}
private val DoubleTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Double]]
}
implicit object DoubleConverter extends TypeConverter[Double] {
def targetTypeTag = DoubleTypeTag
def convertPF = {
case x: Number => x.doubleValue
case x: String => x.toDouble
}
}
private val JavaDoubleTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.lang.Double]]
}
implicit object JavaDoubleConverter extends NullableTypeConverter[java.lang.Double] {
def targetTypeTag = JavaDoubleTypeTag
def convertPF = DoubleConverter.convertPF.andThen(_.asInstanceOf[java.lang.Double])
}
private val StringTypeTag = TypeTag.synchronized {
implicitly[TypeTag[String]]
}
implicit object StringConverter extends NullableTypeConverter[String] {
def targetTypeTag = StringTypeTag
def convertPF = {
case x: Date => TimestampFormatter.format(x)
case x: Array[Byte] => "0x" + x.map("%02x" format _).mkString
case x: Map[_, _] => x.map(kv => convert(kv._1) + ": " + convert(kv._2)).mkString("{", ",", "}")
case x: Set[_] => x.map(convert).mkString("{", ",", "}")
case x: Seq[_] => x.map(convert).mkString("[", ",", "]")
case x: Any => x.toString
}
}
private val ByteBufferTypeTag = TypeTag.synchronized {
implicitly[TypeTag[ByteBuffer]]
}
implicit object ByteBufferConverter extends NullableTypeConverter[ByteBuffer] {
def targetTypeTag = ByteBufferTypeTag
def convertPF = {
case x: ByteBuffer => x
case x: Array[Byte] => ByteBuffer.wrap(x)
}
}
private val ByteArrayTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Array[Byte]]]
}
implicit object ByteArrayConverter extends NullableTypeConverter[Array[Byte]] {
def targetTypeTag = ByteArrayTypeTag
def convertPF = {
case x: Array[Byte] => x
case x: ByteBuffer => ByteBufferUtil.toArray(x)
}
}
private val DateTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Date]]
}
implicit object DateConverter extends NullableTypeConverter[Date] {
def targetTypeTag = DateTypeTag
def convertPF = {
case x: Date => x
case x: DateTime => x.toDate
case x: Calendar => x.getTime
case x: Long => new Date(x)
case x: UUID if x.version() == 1 => new Date(x.timestamp())
case x: String => TimestampParser.parse(x)
}
}
private val SqlDateTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.sql.Date]]
}
implicit object SqlDateConverter extends NullableTypeConverter[java.sql.Date] {
def targetTypeTag = SqlDateTypeTag
def convertPF = DateConverter.convertPF.andThen(d => new java.sql.Date(d.getTime))
}
private val JodaDateTypeTag = TypeTag.synchronized {
implicitly[TypeTag[DateTime]]
}
implicit object JodaDateConverter extends NullableTypeConverter[DateTime] {
def targetTypeTag = JodaDateTypeTag
def convertPF = DateConverter.convertPF.andThen(new DateTime(_))
}
private val GregorianCalendarTypeTag = TypeTag.synchronized {
implicitly[TypeTag[GregorianCalendar]]
}
implicit object GregorianCalendarConverter extends NullableTypeConverter[GregorianCalendar] {
private[this] def calendar(date: Date): GregorianCalendar = {
val c = new GregorianCalendar()
c.setTime(date)
c
}
def targetTypeTag = GregorianCalendarTypeTag
def convertPF = DateConverter.convertPF.andThen(calendar)
}
private val BigIntTypeTag = TypeTag.synchronized {
implicitly[TypeTag[BigInt]]
}
implicit object BigIntConverter extends NullableTypeConverter[BigInt] {
def targetTypeTag = BigIntTypeTag
def convertPF = {
case x: BigInt => x
case x: java.math.BigInteger => x
case x: java.lang.Integer => BigInt(x)
case x: java.lang.Long => BigInt(x)
case x: String => BigInt(x)
}
}
private val JavaBigIntegerTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.math.BigInteger]]
}
implicit object JavaBigIntegerConverter extends NullableTypeConverter[java.math.BigInteger] {
def targetTypeTag = JavaBigIntegerTypeTag
def convertPF = {
case x: BigInt => x.bigInteger
case x: java.math.BigInteger => x
case x: java.lang.Integer => new java.math.BigInteger(x.toString)
case x: java.lang.Long => new java.math.BigInteger(x.toString)
case x: String => new java.math.BigInteger(x)
}
}
private val BigDecimalTypeTag = TypeTag.synchronized {
implicitly[TypeTag[BigDecimal]]
}
implicit object BigDecimalConverter extends NullableTypeConverter[BigDecimal] {
def targetTypeTag = BigDecimalTypeTag
def convertPF = {
case x: Number => BigDecimal(x.toString)
case x: String => BigDecimal(x)
}
}
private val JavaBigDecimalTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.math.BigDecimal]]
}
implicit object JavaBigDecimalConverter extends NullableTypeConverter[java.math.BigDecimal] {
def targetTypeTag = JavaBigDecimalTypeTag
def convertPF = {
case x: Number => new java.math.BigDecimal(x.toString)
case x: String => new java.math.BigDecimal(x)
}
}
private val UUIDTypeTag = TypeTag.synchronized {
implicitly[TypeTag[UUID]]
}
implicit object UUIDConverter extends NullableTypeConverter[UUID] {
def targetTypeTag = UUIDTypeTag
def convertPF = {
case x: UUID => x
case x: String => UUID.fromString(x)
}
}
private val InetAddressTypeTag = TypeTag.synchronized {
implicitly[TypeTag[InetAddress]]
}
implicit object InetAddressConverter extends NullableTypeConverter[InetAddress] {
def targetTypeTag = InetAddressTypeTag
def convertPF = {
case x: InetAddress => x
case x: String => InetAddress.getByName(x)
}
}
class TupleConverter[K, V](implicit kc: TypeConverter[K], vc: TypeConverter[V])
extends TypeConverter[(K, V)] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicit val kTag = kc.targetTypeTag
implicit val vTag = vc.targetTypeTag
implicitly[TypeTag[(K, V)]]
}
def convertPF = {
case (k, v) => (kc.convert(k), vc.convert(v))
}
}
class OptionConverter[T](implicit c: TypeConverter[T]) extends TypeConverter[Option[T]] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicit val itemTypeTag = c.targetTypeTag
implicitly[TypeTag[Option[T]]]
}
def convertPF = {
case null => None
case other => Some(c.convert(other))
}
}
abstract class CollectionConverter[CC, T](implicit c: TypeConverter[T], bf: CanBuildFrom[T, CC])
extends TypeConverter[CC] {
protected implicit def itemTypeTag: TypeTag[T] = c.targetTypeTag
private def newCollection(items: Iterable[Any]) = {
val builder = bf()
for (item <- items)
builder += c.convert(item)
builder.result()
}
def convertPF = {
case null => bf.apply().result()
case x: java.util.List[_] => newCollection(x)
case x: java.util.Set[_] => newCollection(x)
case x: java.util.Map[_, _] => newCollection(x)
case x: Iterable[_] => newCollection(x)
}
}
abstract class AbstractMapConverter[CC, K, V](implicit kc: TypeConverter[K], vc: TypeConverter[V], bf: CanBuildFrom[(K, V), CC])
extends CollectionConverter[CC, (K, V)] {
protected implicit def keyTypeTag: TypeTag[K] = kc.targetTypeTag
protected implicit def valueTypeTag: TypeTag[V] = vc.targetTypeTag
}
class ListConverter[T : TypeConverter] extends CollectionConverter[List[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[List[T]]]
}
}
class VectorConverter[T : TypeConverter] extends CollectionConverter[Vector[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Vector[T]]]
}
}
class SetConverter[T : TypeConverter] extends CollectionConverter[Set[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Set[T]]]
}
}
class TreeSetConverter[T : TypeConverter : Ordering] extends CollectionConverter[TreeSet[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[TreeSet[T]]]
}
}
class SeqConverter[T : TypeConverter] extends CollectionConverter[Seq[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Seq[T]]]
}
}
class IndexedSeqConverter[T : TypeConverter] extends CollectionConverter[IndexedSeq[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[IndexedSeq[T]]]
}
}
class IterableConverter[T : TypeConverter] extends CollectionConverter[Iterable[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Iterable[T]]]
}
}
class JavaListConverter[T : TypeConverter] extends CollectionConverter[java.util.List[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.util.List[T]]]
}
}
class JavaArrayListConverter[T : TypeConverter] extends CollectionConverter[java.util.ArrayList[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.util.ArrayList[T]]]
}
}
class JavaSetConverter[T : TypeConverter] extends CollectionConverter[java.util.Set[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.util.Set[T]]]
}
}
class JavaHashSetConverter[T : TypeConverter] extends CollectionConverter[java.util.HashSet[T], T] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.util.HashSet[T]]]
}
}
class MapConverter[K : TypeConverter, V : TypeConverter] extends AbstractMapConverter[Map[K, V], K, V] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[Map[K, V]]]
}
}
class TreeMapConverter[K : TypeConverter : Ordering, V : TypeConverter] extends AbstractMapConverter[TreeMap[K, V], K, V] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[TreeMap[K, V]]]
}
}
class JavaMapConverter[K : TypeConverter, V : TypeConverter] extends AbstractMapConverter[java.util.Map[K, V], K, V] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.util.Map[K, V]]]
}
}
class JavaHashMapConverter[K : TypeConverter, V : TypeConverter] extends AbstractMapConverter[java.util.HashMap[K, V], K, V] {
@transient
lazy val targetTypeTag = TypeTag.synchronized {
implicitly[TypeTag[java.util.HashMap[K, V]]]
}
}
implicit def optionConverter[T : TypeConverter]: OptionConverter[T] =
new OptionConverter[T]
implicit def tupleConverter[K : TypeConverter, V : TypeConverter]: TupleConverter[K, V] =
new TupleConverter[K, V]
implicit def listConverter[T : TypeConverter]: ListConverter[T] =
new ListConverter[T]
implicit def vectorConverter[T : TypeConverter]: VectorConverter[T] =
new VectorConverter[T]
implicit def setConverter[T : TypeConverter]: SetConverter[T] =
new SetConverter[T]
implicit def treeSetConverter[T : TypeConverter : Ordering]: TreeSetConverter[T] =
new TreeSetConverter[T]
implicit def seqConverter[T : TypeConverter]: SeqConverter[T] =
new SeqConverter[T]
implicit def indexedSeqConverter[T : TypeConverter]: IndexedSeqConverter[T] =
new IndexedSeqConverter[T]
implicit def iterableConverter[T : TypeConverter]: IterableConverter[T] =
new IterableConverter[T]
implicit def mapConverter[K : TypeConverter, V : TypeConverter]: MapConverter[K, V] =
new MapConverter[K, V]
implicit def treeMapConverter[K: TypeConverter : Ordering, V : TypeConverter]: TreeMapConverter[K, V] =
new TreeMapConverter[K, V]
// Support for Java collections:
implicit def javaListConverter[T : TypeConverter]: JavaListConverter[T] =
new JavaListConverter[T]
implicit def javaArrayListConverter[T : TypeConverter]: JavaArrayListConverter[T] =
new JavaArrayListConverter[T]
implicit def javaSetConverter[T : TypeConverter]: JavaSetConverter[T] =
new JavaSetConverter[T]
implicit def javaHashSetConverter[T : TypeConverter]: JavaHashSetConverter[T] =
new JavaHashSetConverter[T]
implicit def javaMapConverter[K : TypeConverter, V : TypeConverter]: JavaMapConverter[K, V] =
new JavaMapConverter[K, V]
implicit def javaHashMapConverter[K : TypeConverter, V : TypeConverter]: JavaHashMapConverter[K, V] =
new JavaHashMapConverter[K, V]
/** Converts Scala Options to Java nullable references. Used when saving data to Cassandra. */
class OptionToNullConverter(nestedConverter: TypeConverter[_]) extends NullableTypeConverter[AnyRef] {
def targetTypeTag = implicitly[TypeTag[AnyRef]]
def convertPF = {
case Some(x) => nestedConverter.convert(x).asInstanceOf[AnyRef]
case None => null
case x => nestedConverter.convert(x).asInstanceOf[AnyRef]
}
}
private def orderingFor(tpe: Type): Option[Ordering[_]] = {
if (tpe =:= typeOf[Boolean]) Some(implicitly[Ordering[Boolean]])
else if (tpe =:= typeOf[Byte]) Some(implicitly[Ordering[Byte]])
else if (tpe =:= typeOf[Short]) Some(implicitly[Ordering[Short]])
else if (tpe =:= typeOf[Int]) Some(implicitly[Ordering[Int]])
else if (tpe =:= typeOf[Long]) Some(implicitly[Ordering[Long]])
else if (tpe =:= typeOf[Float]) Some(implicitly[Ordering[Float]])
else if (tpe =:= typeOf[Double]) Some(implicitly[Ordering[Double]])
else if (tpe =:= typeOf[String]) Some(implicitly[Ordering[String]])
else if (tpe =:= typeOf[BigInt]) Some(implicitly[Ordering[BigInt]])
else if (tpe =:= typeOf[BigDecimal]) Some(implicitly[Ordering[BigDecimal]])
else if (tpe =:= typeOf[java.math.BigInteger]) Some(implicitly[Ordering[java.math.BigInteger]])
else if (tpe =:= typeOf[java.math.BigDecimal]) Some(implicitly[Ordering[java.math.BigDecimal]])
else if (tpe =:= typeOf[java.util.Date]) Some(implicitly[Ordering[java.util.Date]])
else if (tpe =:= typeOf[java.sql.Date]) Some(Ordering.by((x: java.sql.Date) => x.getTime))
else if (tpe =:= typeOf[org.joda.time.DateTime]) Some(Ordering.by((x: org.joda.time.DateTime) => x.toDate.getTime))
else if (tpe =:= typeOf[UUID]) Some(implicitly[Ordering[UUID]])
else None
}
private var converters = Seq[TypeConverter[_]](
AnyConverter,
AnyRefConverter,
BooleanConverter,
JavaBooleanConverter,
ByteConverter,
JavaByteConverter,
ShortConverter,
JavaShortConverter,
IntConverter,
JavaIntConverter,
LongConverter,
JavaLongConverter,
FloatConverter,
JavaFloatConverter,
DoubleConverter,
JavaDoubleConverter,
StringConverter,
BigIntConverter,
BigDecimalConverter,
JavaBigIntegerConverter,
JavaBigDecimalConverter,
DateConverter,
SqlDateConverter,
JodaDateConverter,
GregorianCalendarConverter,
InetAddressConverter,
UUIDConverter,
ByteBufferConverter,
ByteArrayConverter,
UDTValueConverter
)
private def forCollectionType(tpe: Type, moreConverters: Seq[TypeConverter[_]]): TypeConverter[_] = TypeTag.synchronized {
tpe match {
case TypeRef(_, symbol, List(arg)) =>
val untypedItemConverter = forType(arg, moreConverters)
type T = untypedItemConverter.targetType
implicit val itemConverter = untypedItemConverter.asInstanceOf[TypeConverter[T]]
implicit val ordering = orderingFor(arg).map(_.asInstanceOf[Ordering[T]]).orNull
symbol match {
case OptionSymbol => optionConverter[T]
case ListSymbol => listConverter[T]
case VectorSymbol => vectorConverter[T]
case SetSymbol => setConverter[T]
case TreeSetSymbol if ordering != null => treeSetConverter[T]
case SeqSymbol => seqConverter[T]
case IndexedSeqSymbol => indexedSeqConverter[T]
case IterableSymbol => iterableConverter[T]
case JavaListSymbol => javaListConverter[T]
case JavaArrayListSymbol => javaArrayListConverter[T]
case JavaSetSymbol => javaSetConverter[T]
case JavaHashSetSymbol => javaHashSetConverter[T]
case _ => throw new IllegalArgumentException(s"Unsupported type: $tpe")
}
case TypeRef(_, symbol, List(k, v)) =>
val untypedKeyConverter = forType(k, moreConverters)
val untypedValueConverter = forType(v, moreConverters)
type K = untypedKeyConverter.targetType
type V = untypedValueConverter.targetType
implicit val keyConverter = untypedKeyConverter.asInstanceOf[TypeConverter[K]]
implicit val valueConverter = untypedValueConverter.asInstanceOf[TypeConverter[V]]
implicit val ordering = orderingFor(k).map(_.asInstanceOf[Ordering[K]]).orNull
symbol match {
case MapSymbol => mapConverter[K, V]
case TreeMapSymbol if ordering != null => treeMapConverter[K, V]
case JavaMapSymbol => javaMapConverter[K, V]
case JavaHashMapSymbol => javaHashMapConverter[K, V]
case _ => throw new IllegalArgumentException(s"Unsupported type: $tpe")
}
case _ => throw new IllegalArgumentException(s"Unsupported type: $tpe")
}
}
/** Useful for getting converter based on a type received from Scala reflection.
* Synchronized to workaround Scala 2.10 reflection thread-safety problems. */
def forType(tpe: Type, moreConverters: Seq[TypeConverter[_]] = Seq.empty): TypeConverter[_] = {
TypeTag.synchronized {
type T = TypeConverter[_]
val selectedConverters =
(converters ++ moreConverters).collect { case c: T if c.targetTypeTag.tpe =:= tpe => c }
selectedConverters match {
case Seq() => forCollectionType(tpe, moreConverters)
case Seq(c) => c
case Seq(cs @ _*) => new ChainedTypeConverter(cs: _*)
}
}
}
/** Useful when implicit converters are not in scope, but a TypeTag is.
* Synchronized to workaround Scala 2.10 reflection thread-safety problems. */
def forType[T : TypeTag](moreConverters: Seq[TypeConverter[_]]): TypeConverter[T] = {
TypeTag.synchronized {
forType(implicitly[TypeTag[T]].tpe, moreConverters).asInstanceOf[TypeConverter[T]]
}
}
/** Useful when implicit converters are not in scope, but a TypeTag is.
* Synchronized to workaround Scala 2.10 reflection thread-safety problems. */
def forType[T : TypeTag]: TypeConverter[T] = {
TypeTag.synchronized {
forType(implicitly[TypeTag[T]].tpe).asInstanceOf[TypeConverter[T]]
}
}
/** Registers a custom converter */
def registerConverter(c: TypeConverter[_]) {
synchronized {
converters = c +: converters
}
}
} | boneill42/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/types/TypeConverter.scala | Scala | apache-2.0 | 26,075 |
package l3
/**
* Literal values for the CL₃ language.
*
* @author Michel Schinz <[email protected]>
*/
sealed trait CL3Literal {
override def toString: String = this match {
case IntLit(i) => i.toString
case BooleanLit(v) => if (v) "#t" else "#f"
case UnitLit => "#u"
}
}
case class IntLit(value: Int) extends CL3Literal
case class BooleanLit(value: Boolean) extends CL3Literal
case object UnitLit extends CL3Literal
| sana/WorkAtEPFL | l3-compiler/compiler-optimizations/compiler/src/l3/CL3Literal.scala | Scala | gpl-3.0 | 446 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.memory.cqengine.utils
import java.util.Date
import com.googlecode.cqengine.index.AttributeIndex
import com.googlecode.cqengine.index.hash.HashIndex
import com.googlecode.cqengine.index.navigable.NavigableIndex
import com.googlecode.cqengine.index.radix.RadixTreeIndex
import com.googlecode.cqengine.index.unique.UniqueIndex
import org.locationtech.jts.geom.Geometry
import org.junit.runner.RunWith
import org.locationtech.geomesa.memory.cqengine.GeoCQEngine
import org.locationtech.geomesa.memory.cqengine.index.AbstractGeoIndex
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class CQIndexingOptionsTest extends Specification {
val spec = "Who:String:cq-index=default," +
"What:Integer:cq-index=unique," +
"When:Date:cq-index=navigable," +
"*Where:Point:srid=4326:cq-index=geometry," +
"Why:String:cq-index=hash"
"CQ Indexing options" should {
"be configurable via SFT spec" >> {
val sft = SimpleFeatureTypes.createType("test", spec)
val types = CQIndexType.getDefinedAttributes(sft)
types must contain(("Who", CQIndexType.DEFAULT))
types must contain(("What", CQIndexType.UNIQUE))
types must contain(("When", CQIndexType.NAVIGABLE))
types must contain(("Where", CQIndexType.GEOMETRY))
types must contain(("Why", CQIndexType.HASH))
}
"be configurable via setCQIndexType" >> {
val sft = SimpleFeatureTypes.createType("test", spec)
CQIndexType.getDefinedAttributes(sft) must contain(("Who", CQIndexType.DEFAULT))
sft.getDescriptor("Who").getUserData.put("cq-index", CQIndexType.HASH.toString)
CQIndexType.getDefinedAttributes(sft) must contain(("Who", CQIndexType.HASH))
}
"fail for invalid index types" in {
val sft = SimpleFeatureTypes.createType("test", spec + ",BadIndex:String:cq-index=foo")
CQIndexType.getDefinedAttributes(sft) must throwAn[Exception]
}
"build IndexedCollections with indices" >> {
val sft = SimpleFeatureTypes.createType("test", spec)
val nameToIndex = scala.collection.mutable.Map.empty[String, AttributeIndex[_, SimpleFeature]]
new GeoCQEngine(sft, CQIndexType.getDefinedAttributes(sft)) {
cqcache.getIndexes.foreach {
case a: AttributeIndex[_, SimpleFeature] => nameToIndex.put(a.getAttribute.getAttributeName, a)
case _ => // no-op
}
}
nameToIndex.get("Where") must beSome[AttributeIndex[_, SimpleFeature]](beAnInstanceOf[AbstractGeoIndex[Geometry, SimpleFeature]])
// Who is a string field and the 'default' hint is used. This should be a Radix index
nameToIndex.get("Who") must beSome[AttributeIndex[_, SimpleFeature]](beAnInstanceOf[RadixTreeIndex[String, SimpleFeature]])
nameToIndex.get("What") must beSome[AttributeIndex[_, SimpleFeature]](beAnInstanceOf[UniqueIndex[Integer, SimpleFeature]])
nameToIndex.get("When") must beSome[AttributeIndex[_, SimpleFeature]](beAnInstanceOf[NavigableIndex[Date, SimpleFeature]])
nameToIndex.get("Why") must beSome[AttributeIndex[_, SimpleFeature]](beAnInstanceOf[HashIndex[String, SimpleFeature]])
}
}
}
| locationtech/geomesa | geomesa-memory/geomesa-cqengine/src/test/scala/org/locationtech/geomesa/memory/cqengine/utils/CQIndexingOptionsTest.scala | Scala | apache-2.0 | 3,872 |
package edu.cmu.lti.nlp.amr.StanfordDecoder
import edu.cmu.lti.nlp.amr.Graph
import nlp.experiments.SequenceSystem
import scala.collection.JavaConversions._
/**
* Created by keenon on 2/16/15.
*
* Use option --stanford-chunk-gen on the AMRParser to use this chunk generator instead of the standard one.
*/
object Decoder {
var sequenceSystem : SequenceSystem = null
def decode(line : String) : Graph = {
var graph = Graph.Null
graph.getNodeById.clear
graph.getNodeByName.clear
val sentence = line.split(" ")
if (sequenceSystem == null) sequenceSystem = new SequenceSystem()
val spans : java.util.Set[edu.stanford.nlp.util.Triple[Integer,Integer,String]] = sequenceSystem.getSpans(line)
for (span : edu.stanford.nlp.util.Triple[Integer,Integer,String] <- spans) {
graph.addSpan(sentence, span.first, span.second + 1, span.third)
}
if (graph.getNodeById.size == 0) { // no invoked concepts
graph = Graph.AMREmpty
}
graph
}
}
| keenon/jamr | src/StanfordDecoder/Decoder.scala | Scala | bsd-2-clause | 999 |
package org.lanyard.dist.disc
import org.lanyard.test.ChiSquareTest
import org.scalatest.FunSpec
import org.scalatest.Matchers
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class BinomialTest extends FunSpec with Matchers with GeneratorDrivenPropertyChecks {
import BinomialTest._
import org.lanyard.random.KISS
import org.lanyard.random.KISSTest._
describe("The binomial distribution") {
it("samples should be positive and smaller/equal than n.") {
forAll((kiss, "RNG"), (binomials, "Binomial")) { (rng: KISS, binomial: Binomial) =>
binomial.randoms(rng).take(10000).foreach { _ should (be >= 0 and be <= binomial.n) }
}
}
it("passes a chi square test") {
forAll((kiss, "RNG"), (binomials, "Binomial")) { (rng: KISS, binomial: Binomial) =>
val histMap = binomial.randoms(rng).take(1000000).groupBy(identity).mapValues(_.length)
val sample = Array.tabulate(binomial.n + 1)(histMap.getOrElse(_, 0).toDouble)
val expected = Array.tabulate(binomial.n + 1)(i => binomial(i) * binomial.n)
val test = ChiSquareTest.one(sample, expected)
println(test)
}
}
}
}
object BinomialTest {
import org.lanyard.dist.cont.Beta
import org.scalacheck.Gen
val binomials = for {
n <- Gen.choose(0, 1E6.toInt)
p <- Gen.chooseNum(0.0, 1.0, 0.0, 1.0)
} yield Binomial(n, p)
}
| perian/Lanyard | src/test/scala/org/lanyard/dist/disc/BinomialTest.scala | Scala | gpl-2.0 | 1,387 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.scalatest.Assertions._
import org.apache.spark.ml.attribute.NominalAttribute
import org.apache.spark.ml.classification.LogisticRegressionSuite._
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.feature.StringIndexer
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.{ParamMap, ParamsSuite}
import org.apache.spark.ml.util.{DefaultReadWriteTest, MetadataUtils, MLTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.classification.LogisticRegressionWithLBFGS
import org.apache.spark.mllib.evaluation.MulticlassMetrics
import org.apache.spark.mllib.linalg.{Vectors => OldVectors}
import org.apache.spark.mllib.regression.{LabeledPoint => OldLabeledPoint}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.Metadata
class OneVsRestSuite extends MLTest with DefaultReadWriteTest {
import testImplicits._
@transient var dataset: Dataset[_] = _
@transient var rdd: RDD[LabeledPoint] = _
override def beforeAll(): Unit = {
super.beforeAll()
val nPoints = 1000
// The following coefficients and xMean/xVariance are computed from iris dataset with lambda=0.2
// As a result, we are drawing samples from probability distribution of an actual model.
val coefficients = Array(
-0.57997, 0.912083, -0.371077, -0.819866, 2.688191,
-0.16624, -0.84355, -0.048509, -0.301789, 4.170682)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
rdd = sc.parallelize(generateMultinomialLogisticInput(
coefficients, xMean, xVariance, true, nPoints, 42), 2)
dataset = rdd.toDF()
}
test("params") {
ParamsSuite.checkParams(new OneVsRest)
val lrModel = new LogisticRegressionModel("lr", Vectors.dense(0.0), 0.0)
val model = new OneVsRestModel("ovr", Metadata.empty, Array(lrModel))
ParamsSuite.checkParams(model)
}
test("one-vs-rest: default params") {
val numClasses = 3
val ova = new OneVsRest()
.setClassifier(new LogisticRegression)
assert(ova.getLabelCol === "label")
assert(ova.getPredictionCol === "prediction")
assert(ova.getRawPredictionCol === "rawPrediction")
val ovaModel = ova.fit(dataset)
MLTestingUtils.checkCopyAndUids(ova, ovaModel)
assert(ovaModel.numClasses === numClasses)
val transformedDataset = ovaModel.transform(dataset)
checkNominalOnDF(transformedDataset, "prediction", ovaModel.numClasses)
checkVectorSizeOnDF(transformedDataset, "rawPrediction", ovaModel.numClasses)
// check for label metadata in prediction col
val predictionColSchema = transformedDataset.schema(ovaModel.getPredictionCol)
assert(MetadataUtils.getNumClasses(predictionColSchema) === Some(3))
val lr = new LogisticRegressionWithLBFGS().setIntercept(true).setNumClasses(numClasses)
lr.optimizer.setRegParam(0.1).setNumIterations(100)
val model = lr.run(rdd.map(OldLabeledPoint.fromML))
val results = model.predict(rdd.map(p => OldVectors.fromML(p.features))).zip(rdd.map(_.label))
// determine the #confusion matrix in each class.
// bound how much error we allow compared to multinomial logistic regression.
val expectedMetrics = new MulticlassMetrics(results)
testTransformerByGlobalCheckFunc[(Double, Vector)](dataset.toDF(), ovaModel,
"prediction", "label") { rows =>
val ovaResults = rows.map { row => (row.getDouble(0), row.getDouble(1)) }
val ovaMetrics = new MulticlassMetrics(sc.makeRDD(ovaResults))
assert(expectedMetrics.confusionMatrix.asML ~== ovaMetrics.confusionMatrix.asML absTol 400)
}
}
test("one-vs-rest: tuning parallelism does not change output") {
val ovaPar1 = new OneVsRest()
.setClassifier(new LogisticRegression)
val ovaModelPar1 = ovaPar1.fit(dataset)
val transformedDatasetPar1 = ovaModelPar1.transform(dataset)
val ovaResultsPar1 = transformedDatasetPar1.select("prediction", "label").rdd.map {
row => (row.getDouble(0), row.getDouble(1))
}
val ovaPar2 = new OneVsRest()
.setClassifier(new LogisticRegression)
.setParallelism(2)
val ovaModelPar2 = ovaPar2.fit(dataset)
val transformedDatasetPar2 = ovaModelPar2.transform(dataset)
val ovaResultsPar2 = transformedDatasetPar2.select("prediction", "label").rdd.map {
row => (row.getDouble(0), row.getDouble(1))
}
val metricsPar1 = new MulticlassMetrics(ovaResultsPar1)
val metricsPar2 = new MulticlassMetrics(ovaResultsPar2)
assert(metricsPar1.confusionMatrix == metricsPar2.confusionMatrix)
ovaModelPar1.models.zip(ovaModelPar2.models).foreach {
case (lrModel1: LogisticRegressionModel, lrModel2: LogisticRegressionModel) =>
assert(lrModel1.coefficients ~== lrModel2.coefficients relTol 1E-3)
assert(lrModel1.intercept ~== lrModel2.intercept relTol 1E-3)
case other =>
fail("Loaded OneVsRestModel expected model of type LogisticRegressionModel " +
s"but found ${other.getClass.getName}")
}
}
test("one-vs-rest: pass label metadata correctly during train") {
val numClasses = 3
val ova = new OneVsRest()
ova.setClassifier(new MockLogisticRegression)
val labelMetadata = NominalAttribute.defaultAttr.withName("label").withNumValues(numClasses)
val labelWithMetadata = dataset("label").as("label", labelMetadata.toMetadata())
val features = dataset("features").as("features")
val datasetWithLabelMetadata = dataset.select(labelWithMetadata, features)
ova.fit(datasetWithLabelMetadata)
}
test("SPARK-8092: ensure label features and prediction cols are configurable") {
val labelIndexer = new StringIndexer()
.setInputCol("label")
.setOutputCol("indexed")
val indexedDataset = labelIndexer
.fit(dataset)
.transform(dataset)
.drop("label")
.withColumnRenamed("features", "f")
val ova = new OneVsRest()
ova.setClassifier(new LogisticRegression())
.setLabelCol(labelIndexer.getOutputCol)
.setFeaturesCol("f")
.setPredictionCol("p")
val ovaModel = ova.fit(indexedDataset)
val transformedDataset = ovaModel.transform(indexedDataset)
val outputFields = transformedDataset.schema.fieldNames.toSet
assert(outputFields.contains("p"))
}
test("SPARK-18625 : OneVsRestModel should support setFeaturesCol and setPredictionCol") {
val ova = new OneVsRest().setClassifier(new LogisticRegression)
val ovaModel = ova.fit(dataset)
val dataset2 = dataset.select(col("label").as("y"), col("features").as("fea"))
ovaModel.setFeaturesCol("fea")
ovaModel.setPredictionCol("pred")
ovaModel.setRawPredictionCol("")
val transformedDataset = ovaModel.transform(dataset2)
val outputFields = transformedDataset.schema.fieldNames.toSet
assert(outputFields === Set("y", "fea", "pred"))
}
test("SPARK-8049: OneVsRest shouldn't output temp columns") {
val logReg = new LogisticRegression()
.setMaxIter(1)
val ovr = new OneVsRest()
.setClassifier(logReg)
val output = ovr.fit(dataset).transform(dataset)
assert(output.schema.fieldNames.toSet
=== Set("label", "features", "prediction", "rawPrediction"))
}
test("SPARK-21306: OneVsRest should support setWeightCol") {
val dataset2 = dataset.withColumn("weight", lit(1))
// classifier inherits hasWeightCol
val ova = new OneVsRest().setWeightCol("weight").setClassifier(new LogisticRegression())
assert(ova.fit(dataset2) !== null)
// classifier doesn't inherit hasWeightCol
val ova2 = new OneVsRest().setWeightCol("weight").setClassifier(new FMClassifier())
assert(ova2.fit(dataset2) !== null)
}
test("SPARK-34045: OneVsRestModel.transform should not call setter of submodels") {
val logReg = new LogisticRegression().setMaxIter(1)
val ovr = new OneVsRest().setClassifier(logReg)
val ovrm = ovr.fit(dataset)
val dataset2 = dataset.withColumnRenamed("features", "features2")
ovrm.setFeaturesCol("features2")
val oldCols = ovrm.models.map(_.getFeaturesCol)
ovrm.transform(dataset2)
val newCols = ovrm.models.map(_.getFeaturesCol)
assert(oldCols === newCols)
}
test("SPARK-34356: OneVsRestModel.transform should avoid potential column conflict") {
val logReg = new LogisticRegression().setMaxIter(1)
val ovr = new OneVsRest().setClassifier(logReg)
val ovrm = ovr.fit(dataset)
assert(ovrm.transform(dataset.withColumn("probability", lit(0.0))).count() === dataset.count())
}
test("OneVsRest.copy and OneVsRestModel.copy") {
val lr = new LogisticRegression()
.setMaxIter(1)
val ovr = new OneVsRest()
withClue("copy with classifier unset should work") {
ovr.copy(ParamMap(lr.maxIter -> 10))
}
ovr.setClassifier(lr)
val ovr1 = ovr.copy(ParamMap(lr.maxIter -> 10))
require(ovr.getClassifier.getOrDefault(lr.maxIter) === 1, "copy should have no side-effects")
require(ovr1.getClassifier.getOrDefault(lr.maxIter) === 10,
"copy should handle extra classifier params")
val ovrModel = ovr1.fit(dataset).copy(ParamMap(lr.thresholds -> Array(0.9, 0.1)))
ovrModel.models.foreach { case m: LogisticRegressionModel =>
require(m.getThreshold === 0.1, "copy should handle extra model params")
}
}
test("read/write: OneVsRest") {
val lr = new LogisticRegression().setMaxIter(10).setRegParam(0.01)
val ova = new OneVsRest()
.setClassifier(lr)
.setLabelCol("myLabel")
.setFeaturesCol("myFeature")
.setPredictionCol("myPrediction")
val ova2 = testDefaultReadWrite(ova, testParams = false)
assert(ova.uid === ova2.uid)
assert(ova.getFeaturesCol === ova2.getFeaturesCol)
assert(ova.getLabelCol === ova2.getLabelCol)
assert(ova.getPredictionCol === ova2.getPredictionCol)
ova2.getClassifier match {
case lr2: LogisticRegression =>
assert(lr.uid === lr2.uid)
assert(lr.getMaxIter === lr2.getMaxIter)
assert(lr.getRegParam === lr2.getRegParam)
case other =>
fail("Loaded OneVsRest expected classifier of type LogisticRegression" +
s" but found ${other.getClass.getName}")
}
}
test("read/write: OneVsRestModel") {
def checkModelData(model: OneVsRestModel, model2: OneVsRestModel): Unit = {
assert(model.uid === model2.uid)
assert(model.getFeaturesCol === model2.getFeaturesCol)
assert(model.getLabelCol === model2.getLabelCol)
assert(model.getPredictionCol === model2.getPredictionCol)
val classifier = model.getClassifier.asInstanceOf[LogisticRegression]
model2.getClassifier match {
case lr2: LogisticRegression =>
assert(classifier.uid === lr2.uid)
assert(classifier.getMaxIter === lr2.getMaxIter)
assert(classifier.getRegParam === lr2.getRegParam)
case other =>
fail("Loaded OneVsRestModel expected classifier of type LogisticRegression" +
s" but found ${other.getClass.getName}")
}
assert(model.labelMetadata === model2.labelMetadata)
model.models.zip(model2.models).foreach {
case (lrModel1: LogisticRegressionModel, lrModel2: LogisticRegressionModel) =>
assert(lrModel1.uid === lrModel2.uid)
assert(lrModel1.coefficients === lrModel2.coefficients)
assert(lrModel1.intercept === lrModel2.intercept)
case other =>
fail(s"Loaded OneVsRestModel expected model of type LogisticRegressionModel" +
s" but found ${other.getClass.getName}")
}
}
val lr = new LogisticRegression().setMaxIter(10).setRegParam(0.01)
val ova = new OneVsRest().setClassifier(lr)
val ovaModel = ova.fit(dataset)
val newOvaModel = testDefaultReadWrite(ovaModel, testParams = false)
checkModelData(ovaModel, newOvaModel)
}
test("should ignore empty output cols") {
val lr = new LogisticRegression().setMaxIter(1)
val ovr = new OneVsRest().setClassifier(lr)
val ovrModel = ovr.fit(dataset)
val output1 = ovrModel.setPredictionCol("").setRawPredictionCol("")
.transform(dataset)
assert(output1.schema.fieldNames.toSet ===
Set("label", "features"))
val output2 = ovrModel.setPredictionCol("prediction").setRawPredictionCol("")
.transform(dataset)
assert(output2.schema.fieldNames.toSet ===
Set("label", "features", "prediction"))
val output3 = ovrModel.setPredictionCol("").setRawPredictionCol("rawPrediction")
.transform(dataset)
assert(output3.schema.fieldNames.toSet ===
Set("label", "features", "rawPrediction"))
val output4 = ovrModel.setPredictionCol("prediction").setRawPredictionCol("rawPrediction")
.transform(dataset)
assert(output4.schema.fieldNames.toSet ===
Set("label", "features", "prediction", "rawPrediction"))
}
test("should support all NumericType labels and not support other types") {
val ovr = new OneVsRest().setClassifier(new LogisticRegression().setMaxIter(1))
MLTestingUtils.checkNumericTypes[OneVsRestModel, OneVsRest](
ovr, spark) { (expected, actual) =>
val expectedModels = expected.models.map(m => m.asInstanceOf[LogisticRegressionModel])
val actualModels = actual.models.map(m => m.asInstanceOf[LogisticRegressionModel])
assert(expectedModels.length === actualModels.length)
expectedModels.zip(actualModels).foreach { case (e, a) =>
assert(e.intercept === a.intercept)
assert(e.coefficients.toArray === a.coefficients.toArray)
}
}
}
}
private class MockLogisticRegression(uid: String) extends LogisticRegression(uid) {
def this() = this("mockLogReg")
setMaxIter(1)
override protected[spark] def train(dataset: Dataset[_]): LogisticRegressionModel = {
val labelSchema = dataset.schema($(labelCol))
// check for label attribute propagation.
assert(MetadataUtils.getNumClasses(labelSchema).forall(_ == 2))
super.train(dataset)
}
}
| mahak/spark | mllib/src/test/scala/org/apache/spark/ml/classification/OneVsRestSuite.scala | Scala | apache-2.0 | 14,980 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.bijection
import java.lang.{Iterable => JIterable}
import java.util.{
Collection => JCollection,
Dictionary => JDictionary,
Enumeration => JEnumeration,
Iterator => JIterator,
List => JList,
Map => JMap,
Set => JSet
}
import scala.collection.JavaConverters._
import scala.collection.mutable
import collection.generic.CanBuildFrom
import scala.reflect.ClassTag
trait CollectionBijections extends BinaryBijections {
/**
* Bijections between collection types defined in scala.collection.JavaConverters.
*/
implicit def iterable2java[T]: Bijection[Iterable[T], JIterable[T]] =
new AbstractBijection[Iterable[T], JIterable[T]] {
override def apply(t: Iterable[T]) = t.asJava
override def invert(u: JIterable[T]) = u.asScala
}
implicit def iterator2java[T]: Bijection[Iterator[T], JIterator[T]] =
new AbstractBijection[Iterator[T], JIterator[T]] {
override def apply(t: Iterator[T]) = t.asJava
override def invert(u: JIterator[T]) = u.asScala
}
implicit def buffer2java[T]: Bijection[mutable.Buffer[T], JList[T]] =
new AbstractBijection[mutable.Buffer[T], JList[T]] {
override def apply(t: mutable.Buffer[T]) = t.asJava
override def invert(u: JList[T]) = u.asScala
}
implicit def mset2java[T]: Bijection[mutable.Set[T], JSet[T]] =
new AbstractBijection[mutable.Set[T], JSet[T]] {
override def apply(t: mutable.Set[T]) = t.asJava
override def invert(u: JSet[T]) = u.asScala
}
implicit def mmap2java[K, V]: Bijection[mutable.Map[K, V], JMap[K, V]] =
new AbstractBijection[mutable.Map[K, V], JMap[K, V]] {
override def apply(t: mutable.Map[K, V]) = t.asJava
override def invert(t: JMap[K, V]) = t.asScala
}
implicit def iterable2jcollection[T]: Bijection[Iterable[T], JCollection[T]] =
new AbstractBijection[Iterable[T], JCollection[T]] {
override def apply(t: Iterable[T]) = t.asJavaCollection
override def invert(u: JCollection[T]) = u.asScala
}
implicit def iterator2jenumeration[T]: Bijection[Iterator[T], JEnumeration[T]] =
new AbstractBijection[Iterator[T], JEnumeration[T]] {
override def apply(t: Iterator[T]) = t.asJavaEnumeration
override def invert(u: JEnumeration[T]) = u.asScala
}
implicit def mmap2jdictionary[K, V]: Bijection[mutable.Map[K, V], JDictionary[K, V]] =
new AbstractBijection[mutable.Map[K, V], JDictionary[K, V]] {
override def apply(t: mutable.Map[K, V]) = t.asJavaDictionary
override def invert(t: JDictionary[K, V]) = t.asScala
}
// Immutable objects (they copy from java to scala):
implicit def seq2Java[T]: Bijection[Seq[T], JList[T]] =
new AbstractBijection[Seq[T], JList[T]] {
def apply(s: Seq[T]) = s.asJava
override def invert(l: JList[T]) = l.asScala.toSeq
}
implicit def set2Java[T]: Bijection[Set[T], JSet[T]] =
new AbstractBijection[Set[T], JSet[T]] {
def apply(s: Set[T]) = s.asJava
override def invert(l: JSet[T]) = l.asScala.toSet
}
implicit def map2Java[K, V]: Bijection[Map[K, V], JMap[K, V]] =
new AbstractBijection[Map[K, V], JMap[K, V]] {
def apply(s: Map[K, V]) = s.asJava
override def invert(l: JMap[K, V]) = l.asScala.toMap
}
/*
* For transformations that may not require a copy, we don't biject on types
* which would require a copy. To change inner type also, use connect[Seq[T], List[T], List[U]]
*/
implicit def seq2List[A]: Bijection[Seq[A], List[A]] =
new AbstractBijection[Seq[A], List[A]] {
def apply(s: Seq[A]) = s.toList
override def invert(l: List[A]) = l
}
implicit def seq2IndexedSeq[A]: Bijection[Seq[A], IndexedSeq[A]] =
new AbstractBijection[Seq[A], IndexedSeq[A]] {
def apply(s: Seq[A]) = s.toIndexedSeq
override def invert(l: IndexedSeq[A]) = l
}
// This doesn't require a copy from Map -> Seq
implicit def seq2Map[K, V]: Bijection[Seq[(K, V)], Map[K, V]] =
new AbstractBijection[Seq[(K, V)], Map[K, V]] {
def apply(s: Seq[(K, V)]) = s.toMap
override def invert(l: Map[K, V]) = l.toSeq
}
// This doesn't require a copy from Set -> Seq
implicit def seq2Set[T]: Bijection[Seq[T], Set[T]] =
new AbstractBijection[Seq[T], Set[T]] {
def apply(s: Seq[T]) = s.toSet
override def invert(l: Set[T]) = l.toSeq
}
protected def trav2Vector[T, C >: Vector[T] <: Traversable[T]]: Bijection[C, Vector[T]] =
new SubclassBijection[C, Vector[T]](classOf[Vector[T]]) {
def applyfn(s: C) = {
// Just build one:
val bldr = new scala.collection.immutable.VectorBuilder[T]
bldr ++= s
bldr.result
}
}
implicit def seq2Vector[T]: Bijection[Seq[T], Vector[T]] = trav2Vector[T, Seq[T]]
implicit def indexedSeq2Vector[T]: Bijection[IndexedSeq[T], Vector[T]] =
trav2Vector[T, IndexedSeq[T]]
/**
* Accepts a Bijection[A, B] and returns a bijection that can transform traversable containers of
* A into traversable containers of B.
*
* Do not go from ordered to unordered containers; Bijection[Iterable[A], Set[B]] is inaccurate,
* and really makes no sense.
*/
def toContainer[A, B, C <: TraversableOnce[A], D <: TraversableOnce[B]](implicit
bij: ImplicitBijection[A, B],
cd: CanBuildFrom[Nothing, B, D],
dc: CanBuildFrom[Nothing, A, C]
): Bijection[C, D] =
new AbstractBijection[C, D] {
def apply(c: C) = {
val builder = cd()
c foreach { builder += bij(_) }
builder.result()
}
override def invert(d: D) = {
val builder = dc()
d foreach { builder += bij.invert(_) }
builder.result()
}
}
implicit def betweenMaps[K1, V1, K2, V2](implicit
kBijection: ImplicitBijection[K1, K2],
vBijection: ImplicitBijection[V1, V2]
) =
toContainer[(K1, V1), (K2, V2), Map[K1, V1], Map[K2, V2]]
implicit def betweenVectors[T, U](implicit bij: ImplicitBijection[T, U]) =
toContainer[T, U, Vector[T], Vector[U]]
implicit def betweenIndexedSeqs[T, U](implicit bij: ImplicitBijection[T, U]) =
toContainer[T, U, IndexedSeq[T], IndexedSeq[U]]
implicit def betweenSets[T, U](implicit bij: ImplicitBijection[T, U]) =
toContainer[T, U, Set[T], Set[U]]
implicit def betweenSeqs[T, U](implicit bij: ImplicitBijection[T, U]) =
toContainer[T, U, Seq[T], Seq[U]]
implicit def betweenLists[T, U](implicit bij: ImplicitBijection[T, U]) =
toContainer[T, U, List[T], List[U]]
implicit def option[T, U](implicit
bij: ImplicitBijection[T, U]
): Bijection[Option[T], Option[U]] =
new AbstractBijection[Option[T], Option[U]] {
override def apply(optt: Option[T]) = optt.map(bij.bijection)
override def invert(optu: Option[U]) = optu.map(bij.bijection.inverse)
}
// Always requires a copy
implicit def vector2List[A, B](implicit
bij: ImplicitBijection[A, B]
): Bijection[Vector[A], List[B]] =
toContainer[A, B, Vector[A], List[B]]
implicit def indexedSeq2List[A, B](implicit
bij: ImplicitBijection[A, B]
): Bijection[IndexedSeq[A], List[B]] =
toContainer[A, B, IndexedSeq[A], List[B]]
/**
* This doesn't actually copy the Array, only wraps/unwraps with WrappedArray
*/
implicit def array2Traversable[T: ClassTag]: Bijection[Array[T], Traversable[T]] =
new AbstractBijection[Array[T], Traversable[T]] {
override def apply(a: Array[T]) = a.toTraversable
override def invert(t: Traversable[T]) = t.toArray
}
/**
* This doesn't actually copy the Array, only wraps/unwraps with WrappedArray
*/
implicit def array2Seq[T: ClassTag]: Bijection[Array[T], Seq[T]] =
new AbstractBijection[Array[T], Seq[T]] {
override def apply(a: Array[T]) = a.toSeq
override def invert(t: Seq[T]) = t.toArray
}
}
| twitter/bijection | bijection-core/src/main/scala-2.12-/com/twitter/bijection/CollectionBijections.scala | Scala | apache-2.0 | 8,407 |
package com.rydgel.yo
import scala.concurrent.duration._
import scala.concurrent.Await
import org.scalatest._
import scala.io.Source
class YoClientSuite extends FunSuite {
val key = new ApiToken(
Source.fromURL(getClass.getResource("/token.txt")).getLines().mkString
)
test("YoAll request with a good API key should be ok") {
val request = YoClient.yoAll(key)
val result = Await.result(request, 5 seconds)
assert(result.isInstanceOf[String])
}
test("Yo request with a good API to a user should be ok") {
val request = YoClient.yo("fitzlord")(key)
val result = Await.result(request, 5 seconds)
assert(result.isInstanceOf[String])
}
}
| Rydgel/Scala-Yo | src/test/scala/com/rydgel/yo/YoClientSuite.scala | Scala | mit | 677 |
/*
* Copyright 2016 Nikolay Donets
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.nikdon.telepooz.model.methods
import com.github.nikdon.telepooz.model.Response
/**
* Use this method to delete a message. A message can only be deleted if it was sent less than 48 hours ago.
* Any such recently sent outgoing message may be deleted. Additionally, if the bot is an administrator in a
* group chat, it can delete any message. If the bot is an administrator in a supergroup, it can delete messages
* from any other user and service messages about people joining or leaving the group (other types of service
* messages may only be removed by the group creator). In channels, bots can only remove their own messages.
* Returns True on success.
*
* @param chat_id Unique identifier for the target chat or username of the target channel
* (in the format @channelusername)
* @param message_id Identifier of the message to delete
*/
case class DeleteMessage(chat_id: String, message_id: Long) extends Method[Response[Boolean]]
| nikdon/telepooz | src/main/scala/com/github/nikdon/telepooz/model/methods/DeleteMessage.scala | Scala | apache-2.0 | 1,597 |
/**
* Copyright 2015 Gianluca Amato <[email protected]>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.fixpoint
/**
* A solver for finite equation systems based on iterative strategies.
* @param eqs the equation system to solve
*/
abstract class IterativeStrategySolver[EQS <: FiniteEquationSystem](val eqs: EQS) extends FixpointSolver[EQS] {
/**
* The iterative strategy to use for solving the equation system.
*/
val strategy: IterativeStrategy[eqs.Unknown]
def apply(start: eqs.Assignment, boxes: eqs.Unknown => eqs.Box): eqs.Assignment = {
import IterativeStrategy._
val current: collection.mutable.HashMap[eqs.Unknown, eqs.Value] = (for (x <- eqs.unknowns) yield (x -> start(x)))(collection.breakOut)
val stack = collection.mutable.Stack.empty[Int]
val stackdirty = collection.mutable.Stack.empty[Boolean]
var dirty = false
var i = 0
while (i < strategy.length) {
strategy(i) match {
case Left =>
stack.push(i + 1)
stackdirty.push(dirty)
dirty = false
i += 1
case El(x) =>
val newval = boxes(x)(current(x), eqs(current)(x))
if (newval != current(x)) {
current(x) = newval
dirty = true
}
i += 1
case Right =>
if (dirty) {
i = stack.top
dirty = false
} else {
stack.pop
dirty = stackdirty.pop()
i += 1
}
}
}
current
}
val name = "Strategy based"
}
object IterativeStrategySolver {
/**
* Returns a solver for an equation system with a given strategy.
* @param eqs the equation system to solve.
* @param a_strategy the iterative strategy to use.
*/
def apply(eqs: FiniteEquationSystem)(a_strategy: IterativeStrategy[eqs.Unknown]) = new IterativeStrategySolver[eqs.type](eqs) { val strategy = a_strategy }
}
| francescaScozzari/Jandom | core/src/main/scala/it/unich/jandom/fixpoint/IterativeStrategySolver.scala | Scala | lgpl-3.0 | 2,605 |
package com.sfxcode.nosql.mongo
import java.util.Date
import org.bson.conversions.Bson
import org.mongodb.scala.bson.BsonDocument
import org.mongodb.scala.bson.collection.immutable.Document
import org.mongodb.scala.model.Filters._
object Filter extends Filter
trait Filter {
val DefaultBson: Bson = BsonDocument(Document())
def valueFilter(key: String, value: Any): Bson =
value match {
case list: List[_] =>
in(key, list: _*)
case set: Set[_] =>
in(key, set.toSeq: _*)
case _: Any =>
equal(key, value)
case _ => DefaultBson
}
def fieldComparisonFilter(firstFieldName: String, secondFieldName: String, operator: String): Bson =
where("this.%s %s this.%s".format(firstFieldName, operator, secondFieldName))
def nullFilter(fieldName: String): Bson = equal(fieldName, value = null)
def notNullFilter(fieldName: String): Bson = not(nullFilter(fieldName))
def dateInRangeFilter(dateFieldKey: String, dateFrom: Date = null, dateUntil: Date = null): Bson =
if (dateFrom != null && dateUntil != null)
and(gte(dateFieldKey, dateFrom), lte(dateFieldKey, dateUntil))
else if (dateUntil != null)
lte(dateFieldKey, dateUntil)
else if (dateFrom != null)
gte(dateFieldKey, dateFrom)
else
Map()
}
| sfxcode/simple-mongo | src/main/scala/com/sfxcode/nosql/mongo/Filter.scala | Scala | apache-2.0 | 1,303 |
import java.util.Locale
object AnyValCodec {
private[this] val types = "Boolean Byte Short Int Long Float Double".split(' ').toList
private[this] val defdef = types.map { tpe => s" implicit def ${tpe.toLowerCase(Locale.ENGLISH)}Codec: MsgpackCodec[$tpe]" }.mkString("\\n")
private[this] val impl = types.map { tpe =>
s"""
override final def ${tpe.toLowerCase(Locale.ENGLISH)}Codec: MsgpackCodec[$tpe] =
MsgpackCodec.tryConst(_ pack$tpe _, _.unpack$tpe())"""
}.mkString("\\n")
def generate(pack: String): String = {
s"""package $pack
// GENERATED CODE: DO NOT EDIT.
trait AnyValCodec {
$defdef
}
private[$pack] trait AnyValCodecImpl extends AnyValCodec {
$impl
}
"""
}
}
| msgpack4z/msgpack4z-core | project/AnyValCodec.scala | Scala | mit | 702 |
package shredzzz.kirkwood.cumath.tensor.modules
import jcuda.{Pointer, Sizeof}
import shredzzz.kirkwood.cumath.tensor.CuTensor
import scala.reflect.ClassTag
import shredzzz.kirkwood.driver.CuContext
import jcuda.runtime.{cudaMemcpyKind, JCuda}
import jcuda.jcublas.JCublas2
import shredzzz.kirkwood.cumath.CuValue
trait DataModule[V]
{
def sizeof: Int
def pointerToVal(value: V): Pointer
def pointerTo(arr: Array[V]): Pointer
def load[TT[X] <: CuTensor[X, _, TT]](x: TT[V])(implicit tag: ClassTag[V], ctx: CuContext): Array[V] = {
val arr = new Array[V](x.size)
JCuda.cudaMemcpy(pointerTo(arr), x.ptr(), x.size * sizeof, cudaMemcpyKind.cudaMemcpyDeviceToHost)
arr
}
def store[TT[X] <: CuTensor[X, _, TT]](x: TT[V], arr: Array[V])(implicit ctx: CuContext) {
JCuda.cudaMemcpy(x.ptr(), pointerTo(arr), x.size * sizeof, cudaMemcpyKind.cudaMemcpyHostToDevice)
}
def copy[TT[X] <: CuTensor[X, _, TT]](x: TT[V], dst: TT[V])(implicit ctx: CuContext) {
JCuda.cudaMemcpy(dst.ptr(), x.ptr(), x.size * sizeof, cudaMemcpyKind.cudaMemcpyDeviceToDevice)
}
def load(x: CuValue[V])(implicit tag: ClassTag[V], ctx: CuContext): V = {
val arr = new Array[V](1)
JCuda.cudaMemcpy(pointerTo(arr), x.ptr(), sizeof, cudaMemcpyKind.cudaMemcpyDeviceToHost)
arr(0)
}
def store(x: CuValue[V], arr: V)(implicit ctx: CuContext) {
JCuda.cudaMemcpy(x.ptr(), pointerToVal(arr), sizeof, cudaMemcpyKind.cudaMemcpyHostToDevice)
}
def copy(x: CuValue[V], dst: CuValue[V])(implicit ctx: CuContext) {
JCuda.cudaMemcpy(dst.ptr(), x.ptr(), sizeof, cudaMemcpyKind.cudaMemcpyDeviceToDevice)
}
}
object BooleanDataModule extends DataModule[Boolean]
{
val sizeof = Sizeof.INT
private def toInt(value: Boolean) = if (value) 1 else 0
private def fromInt(value: Int) = if (value == 0) false else true
def pointerToVal(value: Boolean) = Pointer.to(Array[Int](toInt(value)))
def pointerTo(arr: Array[Boolean]) = Pointer.to(arr.map(toInt(_)))
override def load[TT[X] <: CuTensor[X, _, TT]](x: TT[Boolean])(implicit tag: ClassTag[Boolean], ctx: CuContext): Array[Boolean] = {
val arr = new Array[Int](x.size)
JCuda.cudaMemcpy(Pointer.to(arr), x.ptr(), x.size * sizeof, cudaMemcpyKind.cudaMemcpyDeviceToHost)
arr.map(fromInt(_))
}
override def store[TT[X] <: CuTensor[X, _, TT]](x: TT[Boolean], arr: Array[Boolean])(implicit ctx: CuContext) {
JCublas2.cublasSetVector(x.size, sizeof, pointerTo(arr), 1, x.ptr(), x.stride)
}
override def load(x: CuValue[Boolean])(implicit tag: ClassTag[Boolean], ctx: CuContext): Boolean = {
val arr = new Array[Int](1)
JCuda.cudaMemcpy(Pointer.to(arr), x.ptr(), sizeof, cudaMemcpyKind.cudaMemcpyDeviceToHost)
fromInt(arr(0))
}
override def store(x: CuValue[Boolean], arr: Boolean)(implicit ctx: CuContext) {
JCuda.cudaMemcpy(x.ptr(), pointerToVal(arr), sizeof, cudaMemcpyKind.cudaMemcpyHostToDevice)
}
}
object IntDataModule extends DataModule[Int]
{
val sizeof = Sizeof.INT
def pointerToVal(value: Int) = Pointer.to(Array[Int](value))
def pointerTo(arr: Array[Int]) = Pointer.to(arr)
}
object FloatDataModule extends DataModule[Float]
{
val sizeof = Sizeof.FLOAT
def pointerToVal(value: Float) = Pointer.to(Array[Float](value))
def pointerTo(arr: Array[Float]) = Pointer.to(arr)
}
object DoubleDataModule extends DataModule[Double]
{
val sizeof = Sizeof.DOUBLE
def pointerToVal(value: Double) = Pointer.to(Array[Double](value))
def pointerTo(arr: Array[Double]) = Pointer.to(arr)
}
| shredzzz/kirkwood | src/main/scala/shredzzz/kirkwood/cumath/tensor/modules/DataModule.scala | Scala | apache-2.0 | 3,527 |
package com.peterpotts.snake.predicate
import com.peterpotts.snake.coercion.Compare
case class NotEqualTo[T](extractor: Extractor[T], value: Any) extends Predicate[T] {
def apply(argument: T) = Compare(extractor(argument), value) != 0
override def toString() = s"$extractor != $value"
}
| peterpotts/snake | src/main/scala/com/peterpotts/snake/predicate/NotEqualTo.scala | Scala | mit | 294 |
package com.sfxcode.nosql.mongo.database
import java.util.Date
import org.mongodb.scala.bson.Document
import com.sfxcode.nosql.mongo._
case class CollectionStatus(
ns: String,
collectionType: String,
scaleFactor: Int,
size: Double,
count: Int,
storageSize: Double,
avgObjSize: Int,
nindexes: Int,
indexSizes: Map[String, Int],
totalIndexSize: Int,
ok: Int,
fetched: Date,
map: Map[String, Any]
)
object CollectionStatus {
def apply(document: Document): CollectionStatus = {
val map = document.asPlainMap
CollectionStatus(
map.getOrElse("ns", "-").toString,
map.getOrElse("type", "Standard").toString,
map.getOrElse("scaleFactor", 0).asInstanceOf[Int],
doubleValue(map, "size"),
intValue(map, "count"),
doubleValue(map, "storageSize"),
intValue(map, "avgObjSize"),
intValue(map, "nindexes"),
map.getOrElse("indexSizes", Map()).asInstanceOf[Map[String, Int]],
intValue(map, "totalIndexSize"),
intValue(map, "ok"),
new Date(),
map
)
}
def intValue(map: Map[String, Any], key: String): Int =
map.getOrElse(key, 0) match {
case i: Int => i
case l: Long => l.intValue()
case d: Double => d.intValue()
case _ => 0
}
def doubleValue(map: Map[String, Any], key: String): Double =
map.getOrElse(key, 0) match {
case i: Int => i.doubleValue()
case l: Long => l.doubleValue()
case d: Double => d
case _ => 0
}
}
| sfxcode/simple-mongo | src/main/scala/com/sfxcode/nosql/mongo/database/CollectionStatus.scala | Scala | apache-2.0 | 1,546 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Date, UUID}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileAlreadyExistsException, Path}
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.{FileCommitProtocol, SparkHadoopWriterUtils}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReferences
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils}
import org.apache.spark.sql.execution.{ProjectExec, SortExec, SparkPlan, SQLExecution}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StringType
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.{SerializableConfiguration, Utils}
/** A helper object for writing FileFormat data out to a location. */
object FileFormatWriter extends Logging {
/** Describes how output files should be placed in the filesystem. */
case class OutputSpec(
outputPath: String,
customPartitionLocations: Map[TablePartitionSpec, String],
outputColumns: Seq[Attribute])
/** A function that converts the empty string to null for partition values. */
case class Empty2Null(child: Expression) extends UnaryExpression with String2StringExpression {
override def convert(v: UTF8String): UTF8String = if (v.numBytes() == 0) null else v
override def nullable: Boolean = true
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => {
s"""if ($c.numBytes() == 0) {
| ${ev.isNull} = true;
| ${ev.value} = null;
|} else {
| ${ev.value} = $c;
|}""".stripMargin
})
}
}
/**
* Basic work flow of this command is:
* 1. Driver side setup, including output committer initialization and data source specific
* preparation work for the write job to be issued.
* 2. Issues a write job consists of one or more executor side tasks, each of which writes all
* rows within an RDD partition.
* 3. If no exception is thrown in a task, commits that task, otherwise aborts that task; If any
* exception is thrown during task commitment, also aborts that task.
* 4. If all tasks are committed, commit the job, otherwise aborts the job; If any exception is
* thrown during job commitment, also aborts the job.
* 5. If the job is successfully committed, perform post-commit operations such as
* processing statistics.
* @return The set of all partition paths that were updated during this write job.
*/
def write(
sparkSession: SparkSession,
plan: SparkPlan,
fileFormat: FileFormat,
committer: FileCommitProtocol,
outputSpec: OutputSpec,
hadoopConf: Configuration,
partitionColumns: Seq[Attribute],
bucketSpec: Option[BucketSpec],
statsTrackers: Seq[WriteJobStatsTracker],
options: Map[String, String])
: Set[String] = {
val job = Job.getInstance(hadoopConf)
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[InternalRow])
FileOutputFormat.setOutputPath(job, new Path(outputSpec.outputPath))
val partitionSet = AttributeSet(partitionColumns)
val dataColumns = outputSpec.outputColumns.filterNot(partitionSet.contains)
var needConvert = false
val projectList: Seq[NamedExpression] = plan.output.map {
case p if partitionSet.contains(p) && p.dataType == StringType && p.nullable =>
needConvert = true
Alias(Empty2Null(p), p.name)()
case attr => attr
}
val empty2NullPlan = if (needConvert) ProjectExec(projectList, plan) else plan
val bucketIdExpression = bucketSpec.map { spec =>
val bucketColumns = spec.bucketColumnNames.map(c => dataColumns.find(_.name == c).get)
// Use `HashPartitioning.partitionIdExpression` as our bucket id expression, so that we can
// guarantee the data distribution is same between shuffle and bucketed data source, which
// enables us to only shuffle one side when join a bucketed table and a normal one.
HashPartitioning(bucketColumns, spec.numBuckets).partitionIdExpression
}
val sortColumns = bucketSpec.toSeq.flatMap {
spec => spec.sortColumnNames.map(c => dataColumns.find(_.name == c).get)
}
val caseInsensitiveOptions = CaseInsensitiveMap(options)
val dataSchema = dataColumns.toStructType
DataSourceUtils.verifySchema(fileFormat, dataSchema)
// Note: prepareWrite has side effect. It sets "job".
val outputWriterFactory =
fileFormat.prepareWrite(sparkSession, job, caseInsensitiveOptions, dataSchema)
val description = new WriteJobDescription(
uuid = UUID.randomUUID.toString,
serializableHadoopConf = new SerializableConfiguration(job.getConfiguration),
outputWriterFactory = outputWriterFactory,
allColumns = outputSpec.outputColumns,
dataColumns = dataColumns,
partitionColumns = partitionColumns,
bucketIdExpression = bucketIdExpression,
path = outputSpec.outputPath,
customPartitionLocations = outputSpec.customPartitionLocations,
maxRecordsPerFile = caseInsensitiveOptions.get("maxRecordsPerFile").map(_.toLong)
.getOrElse(sparkSession.sessionState.conf.maxRecordsPerFile),
timeZoneId = caseInsensitiveOptions.get(DateTimeUtils.TIMEZONE_OPTION)
.getOrElse(sparkSession.sessionState.conf.sessionLocalTimeZone),
statsTrackers = statsTrackers
)
// We should first sort by partition columns, then bucket id, and finally sorting columns.
val requiredOrdering = partitionColumns ++ bucketIdExpression ++ sortColumns
// the sort order doesn't matter
val actualOrdering = empty2NullPlan.outputOrdering.map(_.child)
val orderingMatched = if (requiredOrdering.length > actualOrdering.length) {
false
} else {
requiredOrdering.zip(actualOrdering).forall {
case (requiredOrder, childOutputOrder) =>
requiredOrder.semanticEquals(childOutputOrder)
}
}
SQLExecution.checkSQLExecutionId(sparkSession)
// propagate the description UUID into the jobs, so that committers
// get an ID guaranteed to be unique.
job.getConfiguration.set("spark.sql.sources.writeJobUUID", description.uuid)
// This call shouldn't be put into the `try` block below because it only initializes and
// prepares the job, any exception thrown from here shouldn't cause abortJob() to be called.
committer.setupJob(job)
try {
val rdd = if (orderingMatched) {
empty2NullPlan.execute()
} else {
// SPARK-21165: the `requiredOrdering` is based on the attributes from analyzed plan, and
// the physical plan may have different attribute ids due to optimizer removing some
// aliases. Here we bind the expression ahead to avoid potential attribute ids mismatch.
val orderingExpr = bindReferences(
requiredOrdering.map(SortOrder(_, Ascending)), outputSpec.outputColumns)
SortExec(
orderingExpr,
global = false,
child = empty2NullPlan).execute()
}
// SPARK-23271 If we are attempting to write a zero partition rdd, create a dummy single
// partition rdd to make sure we at least set up one write task to write the metadata.
val rddWithNonEmptyPartitions = if (rdd.partitions.length == 0) {
sparkSession.sparkContext.parallelize(Array.empty[InternalRow], 1)
} else {
rdd
}
val jobIdInstant = new Date().getTime
val ret = new Array[WriteTaskResult](rddWithNonEmptyPartitions.partitions.length)
sparkSession.sparkContext.runJob(
rddWithNonEmptyPartitions,
(taskContext: TaskContext, iter: Iterator[InternalRow]) => {
executeTask(
description = description,
jobIdInstant = jobIdInstant,
sparkStageId = taskContext.stageId(),
sparkPartitionId = taskContext.partitionId(),
sparkAttemptNumber = taskContext.taskAttemptId().toInt & Integer.MAX_VALUE,
committer,
iterator = iter)
},
rddWithNonEmptyPartitions.partitions.indices,
(index, res: WriteTaskResult) => {
committer.onTaskCommit(res.commitMsg)
ret(index) = res
})
val commitMsgs = ret.map(_.commitMsg)
committer.commitJob(job, commitMsgs)
logInfo(s"Write Job ${description.uuid} committed.")
processStats(description.statsTrackers, ret.map(_.summary.stats))
logInfo(s"Finished processing stats for write job ${description.uuid}.")
// return a set of all the partition paths that were updated during this job
ret.map(_.summary.updatedPartitions).reduceOption(_ ++ _).getOrElse(Set.empty)
} catch { case cause: Throwable =>
logError(s"Aborting job ${description.uuid}.", cause)
committer.abortJob(job)
throw new SparkException("Job aborted.", cause)
}
}
/** Writes data out in a single Spark task. */
private def executeTask(
description: WriteJobDescription,
jobIdInstant: Long,
sparkStageId: Int,
sparkPartitionId: Int,
sparkAttemptNumber: Int,
committer: FileCommitProtocol,
iterator: Iterator[InternalRow]): WriteTaskResult = {
val jobId = SparkHadoopWriterUtils.createJobID(new Date(jobIdInstant), sparkStageId)
val taskId = new TaskID(jobId, TaskType.MAP, sparkPartitionId)
val taskAttemptId = new TaskAttemptID(taskId, sparkAttemptNumber)
// Set up the attempt context required to use in the output committer.
val taskAttemptContext: TaskAttemptContext = {
// Set up the configuration object
val hadoopConf = description.serializableHadoopConf.value
hadoopConf.set("mapreduce.job.id", jobId.toString)
hadoopConf.set("mapreduce.task.id", taskAttemptId.getTaskID.toString)
hadoopConf.set("mapreduce.task.attempt.id", taskAttemptId.toString)
hadoopConf.setBoolean("mapreduce.task.ismap", true)
hadoopConf.setInt("mapreduce.task.partition", 0)
new TaskAttemptContextImpl(hadoopConf, taskAttemptId)
}
committer.setupTask(taskAttemptContext)
val dataWriter =
if (sparkPartitionId != 0 && !iterator.hasNext) {
// In case of empty job, leave first partition to save meta for file format like parquet.
new EmptyDirectoryDataWriter(description, taskAttemptContext, committer)
} else if (description.partitionColumns.isEmpty && description.bucketIdExpression.isEmpty) {
new SingleDirectoryDataWriter(description, taskAttemptContext, committer)
} else {
new DynamicPartitionDataWriter(description, taskAttemptContext, committer)
}
try {
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
// Execute the task to write rows out and commit the task.
while (iterator.hasNext) {
dataWriter.write(iterator.next())
}
dataWriter.commit()
})(catchBlock = {
// If there is an error, abort the task
dataWriter.abort()
logError(s"Job $jobId aborted.")
}, finallyBlock = {
dataWriter.close()
})
} catch {
case e: FetchFailedException =>
throw e
case f: FileAlreadyExistsException if SQLConf.get.fastFailFileFormatOutput =>
// If any output file to write already exists, it does not make sense to re-run this task.
// We throw the exception and let Executor throw ExceptionFailure to abort the job.
throw new TaskOutputFileAlreadyExistException(f)
case t: Throwable =>
throw new SparkException("Task failed while writing rows.", t)
}
}
/**
* For every registered [[WriteJobStatsTracker]], call `processStats()` on it, passing it
* the corresponding [[WriteTaskStats]] from all executors.
*/
private[datasources] def processStats(
statsTrackers: Seq[WriteJobStatsTracker],
statsPerTask: Seq[Seq[WriteTaskStats]])
: Unit = {
val numStatsTrackers = statsTrackers.length
assert(statsPerTask.forall(_.length == numStatsTrackers),
s"""Every WriteTask should have produced one `WriteTaskStats` object for every tracker.
|There are $numStatsTrackers statsTrackers, but some task returned
|${statsPerTask.find(_.length != numStatsTrackers).get.length} results instead.
""".stripMargin)
val statsPerTracker = if (statsPerTask.nonEmpty) {
statsPerTask.transpose
} else {
statsTrackers.map(_ => Seq.empty)
}
statsTrackers.zip(statsPerTracker).foreach {
case (statsTracker, stats) => statsTracker.processStats(stats)
}
}
}
| witgo/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileFormatWriter.scala | Scala | apache-2.0 | 14,215 |
//
// Token.scala -- Scala class Token
// Project OrcScala
//
// Created by dkitchin on Aug 12, 2011.
//
// Copyright (c) 2019 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.run.core
import scala.util.control.NonFatal
import orc.{ CaughtEvent, ErrorAccessor, FutureState }
import orc.{ OrcEvent, OrcRuntime, Schedulable, StoppedFuture }
import orc.FutureState.Bound
import orc.ast.oil.nameless.{ Argument, Call, Constant, DeclareCallables, DeclareType, Def, Expression, FieldAccess, Graft, HasType, Hole, New, Otherwise, Parallel, Sequence, Site, Stop, Trim, UnboundVariable, Variable, VtimeZone }
import orc.error.OrcException
import orc.error.runtime.{ ArgumentTypeMismatchException, ArityMismatchException, DoesNotHaveMembersException, JavaStackLimitReachedError, NoSuchMemberException, StackLimitReachedError, TokenException }
import orc.lib.time.{ Vawait, Vtime }
import orc.run.Logger
import orc.run.distrib.NoLocationAvailable
import orc.run.distrib.token.{ DOrcExecution, RemoteFutureRef }
import orc.values.{ Field, OrcObject, Signal }
/** Token represents a "process" executing in the Orc program.
*
* For lack of a better place to put it here is a little
* documentation of how publish and other methods on Token
* and other classes (notably Group) handle their Option[AnyRef]
* argument. None represents stop and Some(v) represents the
* value v. So the expression x.get represents the assumption
* (runtime checked) that x is not a stop value.
*
* This convention is not consistent as the Java sites are not
* able to access the Option type well, so the Site API is
* unchanged. This confines the changes to the core runtime,
* however it does mean that there are still parts of the code
* where there is no way to represent stop.
*
* Be careful when blocking because you may be unblocked
* immediately in a different thread.
*
* @see Blockable
*
* @author dkitchin
*/
class Token protected (
protected var node: Expression,
protected var stack: Frame,
protected var env: List[Binding],
protected var group: Group,
protected var clock: Option[VirtualClock],
protected var state: TokenState,
val debugId: Long)
extends GroupMember with Schedulable with Blockable with Resolver {
import TokenState._
/** Convenience constructor with defaults */
protected def this(
node: Expression,
stack: Frame = EmptyFrame,
env: List[Binding] = Nil,
group: Group,
clock: Option[VirtualClock] = None,
state: TokenState = TokenState.Live) = {
this(node, stack, env, group, clock, state, Token.getNextTokenDebugId(group.runtime))
}
var functionFramesPushed: Int = 0
// These fields may be useful for debugging multiple scheduling or multiple run bugs.
// Uses of them are marked with "MULTI_SCHED_DEBUG"
/*
val isScheduled = new AtomicBoolean(false)
val isRunning = new AtomicBoolean(false)
val schedulingThread = new AtomicReference[Thread](null)
*/
def runtime: OrcRuntime = group.runtime
def execution = group.execution
def sourcePosition = node.sourceTextRange
def options = group.options
/** Execution of a token cannot indefinitely block the executing thread. */
override val nonblocking = true
/** Public constructor */
def this(start: Expression, g: Group) = {
this(node = start, group = g, stack = GroupFrame(EmptyFrame))
Tracer.traceTokenCreation(this, this.state)
}
/** Copy constructor with defaults */
private def copy(
node: Expression = node,
stack: Frame = stack,
env: List[Binding] = env,
group: Group = group,
clock: Option[VirtualClock] = clock,
state: TokenState = state): Token = {
//assert(stack.forall(!_.isInstanceOf[FutureFrame]), "Stack being used in copy contains FutureFrames: " + stack)
val newToken = new Token(node, stack, env, group, clock, state)
Tracer.traceTokenCreation(newToken, state)
newToken
}
/*
* On creation: Add a token to its group if it is not halted or killed.
*
* All initialization that must occur before run() executes must happen
* before this point, because once the token is added to a group it may
* run at any time.
*
*/
state match {
case Publishing(_) | Live | Blocked(_) | Suspending(_) | Suspended(_) => group.add(this)
case Halted | Killed => {}
}
private val toStringRecusionGuard = new ThreadLocal[Object]()
override def toString = {
try {
val recursing = toStringRecusionGuard.get
toStringRecusionGuard.set(java.lang.Boolean.TRUE)
getClass.getName + (if (recursing eq null) f"(debugId=$debugId%#x,state=$state, stackTop=$stack, node=$node, node.sourceTextRange=${node.sourceTextRange}, group=$group, clock=$clock)" else "")
} finally {
toStringRecusionGuard.remove()
}
}
/** Change this token's state.
*
* Return true if the token's state was successfully set
* to the requested state.
*
* Return false if the token's state was already Killed.
*/
protected def setState(newState: TokenState): Boolean = synchronized {
/*
* Make sure that the token has not been killed.
* If it has been killed, return false immediately.
*/
if (state != Killed) {
// Logger.finer(s"Changing state: $this to $newState")
Tracer.traceTokenStateTransition(this, state, newState)
state = newState
true
} else {
false
}
}
/** An expensive walk-to-root check for alive state */
def checkAlive(): Boolean = state.isLive && group.checkAlive()
override def setQuiescent() { clock foreach { _.setQuiescent() } }
override def unsetQuiescent() { clock foreach { _.unsetQuiescent() } }
/* When a token is scheduled, notify its clock accordingly */
override def onSchedule() {
Tracer.traceTokenExecStateTransition(this, TokenExecState.Scheduled)
// MULTI_SCHED_DEBUG
/*
val old = isScheduled.getAndSet(true)
if (!(old == false || group.isKilled())) {
assert(false, s"""${System.nanoTime().toHexString}: Failed to set scheduled: ${this.debugId.toHexString}""")
orc.util.Tracer.dumpOnlyLocation(debugId)
}
val curr = Thread.currentThread()
val prev = schedulingThread.getAndSet(curr)
if(!(curr == prev || prev == null)) {
val trace = StackTrace.getStackTrace(3, 1)
println(s"${System.nanoTime().toHexString}: Scheduling from a new thread: Was ${prev.getId.toHexString}, now ${curr.getId.toHexString}. ${this.debugId.toHexString} ${trace.mkString("; ")}")
}*/
unsetQuiescent()
}
/* When a token is finished running, notify its clock accordingly */
override def onComplete() {
setQuiescent()
// MULTI_SCHED_DEBUG
//assert(isRunning.compareAndSet(true, false) || state == Killed || Token.isRunningAlreadyCleared.get, s"""${System.nanoTime().toHexString}: Failed to clear running: $this""")
//Token.isRunningAlreadyCleared.set(false)
Tracer.traceTokenExecStateTransition(this, TokenExecState.DoneRunning)
}
/** Pass an event to this token's enclosing group.
*
* This method is asynchronous:
* it may be called from a thread other than
* the thread currently running this token.
*/
def notifyOrc(event: OrcEvent) { group.notifyOrc(event) }
/** Kill this token.
*
* This method is asynchronous:
* it may be called from a thread other than
* the thread currently running this token.
*/
def kill() {
def findController(victimState: TokenState): Option[CallController] = {
victimState match {
case Suspending(s) => findController(s)
case Suspended(s) => findController(s)
case Blocked(escc: ExternalSiteCallController) => Some(escc)
case Live | Publishing(_) | Blocked(_) | Halted | Killed => None
}
}
Tracer.traceTokenExecStateTransition(this, TokenExecState.Killed)
synchronized {
val controller = findController(state)
if (setState(Killed)) {
/* group.remove(this) conceptually runs here, but as an optimization,
* this is unnecessary. Note that the current Group.remove implementation
* relies on this optimization for correctness of the tokenCount. */
}
controller foreach { _.kill }
}
}
/** Make this token block on some resource.
*
* This method is synchronous:
* it must only be called from a thread that is currently
* executing the run() method of this token.
*/
def blockOn(blocker: Blocker) {
state match {
case Live => setState(Blocked(blocker))
case Killed => {}
case _ => throw new AssertionError("Only live tokens may be blocked: state=" + state + " " + this)
}
}
/** Unblock a token that is currently blocked on some resource.
* Schedule the token to run.
*
* This method is synchronous:
* it must only be called from a thread that is currently
* executing the run() method of this token.
*/
def unblock() {
state match {
case Blocked(_: OtherwiseGroup) => {
if (setState(Live)) { runtime.stage(this) }
}
case Suspending(Blocked(_: OtherwiseGroup)) => {
if (setState(Suspending(Live))) { runtime.stage(this) }
}
case Suspended(Blocked(_: OtherwiseGroup)) => {
setState(Suspended(Live))
}
case Killed => {}
case _ => { throw new AssertionError("unblock on a Token that is not Blocked(OtherwiseGroup)/Killed: state=" + state) }
}
}
/** Suspend the token in preparation for a program rewrite.
*
* This method is asynchronous:
* it may be called from a thread other than
* the thread currently running this token.
*/
def suspend() {
state match {
case Live | Blocked(_) | Publishing(_) => setState(Suspending(state))
case Suspending(_) | Suspended(_) | Halted | Killed => {}
}
}
/** Resume the token from suspension after a program rewrite.
*
* This method is asynchronous:
* it may be called from a thread other than
* the thread currently running this token.
*/
def resume() {
state match {
case Suspending(prevState) => setState(prevState)
case Suspended(prevState) => {
if (setState(prevState)) { runtime.stage(this) }
}
case Publishing(_) | Live | Blocked(_) | Halted | Killed => {}
}
}
protected def fork() = synchronized { (this, copy()) }
def move(e: Expression) = { node = e; this }
def jump(context: List[Binding]) = { env = context; this }
protected def push(newStack: Frame) = {
if (newStack.isInstanceOf[FunctionFrame]) {
functionFramesPushed = functionFramesPushed + 1
if (options.stackSize > 0 && functionFramesPushed > options.stackSize) {
this !! new StackLimitReachedError(options.stackSize)
}
}
stack = newStack
this
}
protected def pushContinuation(k: (Option[AnyRef] => Unit)) = push(FutureFrame(k, stack))
/** Remove the top frame of this token's stack.
*
* This method is synchronous:
* it must only be called from a thread that is currently
* executing the run() method of this token.
*/
def pop() = {
if (stack.isInstanceOf[FunctionFrame]) {
functionFramesPushed = functionFramesPushed - 1
}
stack = stack.asInstanceOf[CompositeFrame].previous
}
def getGroup(): Group = { group }
def getNode(): Expression = { node }
def getEnv(): List[Binding] = { env }
def getStack(): Frame = { stack }
def getClock(): Option[VirtualClock] = { clock }
//def getState(): TokenState = { state }
def migrate(newGroup: Group) = {
require(newGroup != group)
val oldGroup = group
newGroup.add(this)
val removeSucceeded = oldGroup.remove(this)
// If the remove failed we kill instead of switching groups.
// We also remove ourselves from the new group.
if (removeSucceeded) {
group = newGroup
} else {
newGroup.remove(this)
kill()
}
this
}
protected def join(newGroup: Group) = {
push(GroupFrame(stack))
migrate(newGroup)
this
}
def bind(b: Binding) = {
env = b :: env
stack match {
case BindingFrame(n, previous) => { stack = BindingFrame(n + 1, previous) }
/* Tail call optimization (part 1 of 2) */
case _: FunctionFrame if (!options.disableTailCallOpt) => { /* Do not push a binding frame over a tail call.*/ }
case _ => { push(BindingFrame(1, stack)) }
}
this
}
def unbind(n: Int) = { env = env.drop(n); this }
protected def lookup(a: Argument): Binding = {
a match {
case Constant(v) => BoundValue(v)
case vr @ Variable(n) => {
val v = env(n)
v
}
case UnboundVariable(x) =>
Logger.severe(s"Stopping token due to unbound variable $a at ${a.sourceTextRange}")
BoundStop
}
}
protected def functionCall(d: Def, context: List[Binding], params: List[Binding]) {
Logger.fine(s"Calling $d with $params")
if (params.size != d.arity) {
this !! new ArityMismatchException(d.arity, params.size) /* Arity mismatch. */
} else {
/* 1) If this is not a tail call, push a function frame referring to the current environment.
* 2) Change the current environment to the closure's saved environment.
* 3) Add bindings for the arguments to the new current environment.
*
* Caution: The ordering of these operations is very important;
* do not permute them.
*/
/* Tail call optimization (part 2 of 2) */
/*
* Push a new FunctionFrame
* only if the call is not a tail call.
*/
if (!stack.isInstanceOf[FunctionFrame] || options.disableTailCallOpt) {
push(FunctionFrame(node, env, stack))
}
/* Jump into the function context */
jump(context)
/* Bind the args */
for (p <- params) { bind(p) }
/* Move into the function body */
move(d.body)
runtime.stage(this)
}
}
protected def orcSiteCall(s: OrcSite, params: List[AnyRef]) {
if (params.size != s.code.arity) {
this !! new ArityMismatchException(s.code.arity, params.size) /* Arity mismatch. */
} else {
val sh = new OrcSiteCallController(this)
blockOn(sh)
// TODO: Implement TCO for OrcSite calls. By reusing the OrcSiteCallController? When is it safe?
// Just build the stack instead of pushing after we create it.
// The parameters go on in reverse order. First parameter on the "bottom" of the arguments.
val env = (params map BoundValue).reverse ::: s.context
// Build a token that is in a group nested inside the declaration context.
val t = new Token(
s.code.body,
env = env,
group = new OrcSiteCallGroup(s.group, sh),
stack = GroupFrame(EmptyFrame),
clock = s.clock)
runtime.stage(t)
}
}
protected def clockCall(vc: VirtualClockOperation, actuals: List[AnyRef]) {
(vc, actuals) match {
case (`Vawait`, List(t)) => {
clock match {
case Some(cl) => cl.await(this, t)
case None => halt()
}
}
case (`Vtime`, Nil) => {
clock flatMap { _.now() } match {
case Some(t) => publish(Some(t))
case None => halt()
}
}
case _ => this !! new ArityMismatchException(vc.arity, actuals.size)
}
}
protected def siteCall(target: AnyRef, actuals: List[AnyRef]) {
Logger.fine(s"Calling $target with $actuals")
assert(!target.isInstanceOf[Binding])
//FIXME:Refactor: Place in correct classes, not all here
/* Maybe there's an extension mechanism we need to add to Orc here.
* 'Twould be nice to also move the Vclock hook below to this mechanism. */
group.execution match {
case dOrcExecution: DOrcExecution => {
orc.run.distrib.Logger.Invoke.entering(getClass.getName, "siteCall", Seq(target.getClass.getName, target, actuals))
val arguments = actuals.toArray
/* Case 1: If there's a call location override, use it */
val clo = dOrcExecution.callLocationOverride(target, arguments)
val candidateDestinations = if (clo.nonEmpty) {
orc.run.distrib.Logger.Invoke.finest(s"siteCall: $target(${arguments.mkString(",")}): callLocationOverride=$clo")
clo
} else {
/* Look up current locations, and find their intersection */
val intersectLocs = arguments.map(dOrcExecution.currentLocations(_)).fold(dOrcExecution.currentLocations(target))({ _ & _ })
orc.run.distrib.Logger.Invoke.finest(s"siteCall: $target(${arguments.mkString(",")}): intersection of current locations=$intersectLocs")
if (intersectLocs.nonEmpty) {
/* Case 2: If the intersection of current locations is non-empty, use it */
intersectLocs
} else {
/* Look up permitted locations, and find their intersection */
val intersectPermittedLocs = (arguments map dOrcExecution.permittedLocations).fold(dOrcExecution.permittedLocations(target)) { _ & _ }
if (intersectPermittedLocs.nonEmpty) {
/* Case 3: If the intersection of permitted locations is non-empty, use it */
intersectPermittedLocs
} else {
/* Case 4: No permitted location, fail */
val nla = new NoLocationAvailable((target +: arguments.toSeq).map(v => (v, dOrcExecution.currentLocations(v).map(_.runtimeId.longValue))))
orc.run.distrib.Logger.Invoke.throwing(getClass.getName, "invokeIntercepted", nla)
throw nla
}
}
}
orc.run.distrib.Logger.Invoke.finest(s"siteCall: $target(${arguments.mkString(",")}): candidateDestinations=$candidateDestinations")
if (!(candidateDestinations contains dOrcExecution.runtime.here)) {
/* Send remote call */
val destination = dOrcExecution.selectLocationForCall(candidateDestinations)
orc.run.distrib.Logger.Invoke.fine(s"siteCall: $target(${arguments.mkString(",")}): selected location for call: $destination")
dOrcExecution.sendToken(this, destination)
return
} else {
/* Call can be local after all, run here */
orc.run.distrib.Logger.Invoke.finest(s"siteCall: $target(${arguments.mkString(",")}): invoking locally")
/* Fall through */
}
}
case _ => /* Not a distributed execution */
}
//End of code needing refactoring
target match {
case vc: VirtualClockOperation => {
clockCall(vc, actuals)
}
case s: OrcSite => {
orcSiteCall(s, actuals)
}
case _ => {
val scc = new VirtualExternalSiteCallController(this, target, actuals)
blockOn(scc.materialized)
runtime.stage(scc.materialized)
}
}
}
/** Make a call.
* The call target is resolved, but the parameters are not yet resolved.
*/
protected def makeCall(target: AnyRef, params: List[Binding]) {
//FIXME:Refactor: Place in correct classes, not all here
def derefAnyBoundLocalFuture(value: AnyRef): AnyRef = {
value match {
case _: RemoteFutureRef => value
case f: Future => f.get match {
case Bound(bv) => bv
case _ => value
}
case _ => value
}
}
def safeToString(v: AnyRef): String = if (v == null) "null" else try v.toString catch { case NonFatal(_) | _: LinkageError => s"${orc.util.GetScalaTypeName(v)}@${System.identityHashCode(v)}" }
group.execution match {
case dOrcExecution: DOrcExecution => {
target match {
/*FIXME:HACK*/
case c: Closure if c.definition.optionalVariableName.exists(_.startsWith("ᑅSubAstValueSetDef")) => {
/* Attempt to prospectively migrate to Sub-AST value set */
/* Look up current locations, and find their intersection */
val intersectLocs = params.map(derefAnyBoundLocalFuture(_)).map(dOrcExecution.currentLocations(_)).fold(dOrcExecution.currentLocations(target))({ _ & _ })
orc.run.distrib.Logger.Invoke.finest(s"prospective migrate: ${safeToString(target)}(${params.map(safeToString(_)).mkString(",")}): intersection of current locations=$intersectLocs")
if (intersectLocs.nonEmpty && !(intersectLocs contains dOrcExecution.runtime.here)) {
/* Prospective migrate found a location to move to */
val destination = dOrcExecution.selectLocationForCall(intersectLocs)
orc.run.distrib.Logger.Invoke.fine(s"prospective migrate: ${safeToString(target)}(${params.map(safeToString(_)).mkString(",")}): selected location for call: $destination")
dOrcExecution.sendToken(this, destination)
return
} else {
/* Call can be local after all, run here */
orc.run.distrib.Logger.Invoke.finest(s"prospective migrate: ${safeToString(target)}(${params.map(safeToString(_)).mkString(",")}): continuing locally")
/* Fall through */
}
}
case _ => /* Not a ᑅSubAstValueSetDef call */
}
}
case _ => /* Not a distributed execution */
}
//End of code needing refactoring
lazy val applyValue = try {
runtime.getAccessor(target, Field("apply")) match {
case _: ErrorAccessor =>
None
case a =>
Some(a.get(target))
}
} catch {
case _: DoesNotHaveMembersException | _: NoSuchMemberException =>
Logger.fine(s"Call target $target provided an accessor which immediately failed. This is a major performance problem due to .apply checks. Fix the Accessor computation code for this value.")
None
}
target match {
case c: Closure => {
functionCall(c.code, c.context, params)
}
case o if applyValue.isDefined => {
resolvePossibleFuture(applyValue.get) {
makeCall(_, params)
}
}
case s => {
params match {
/* Zero parameters. No need to block. */
case Nil => {
siteCall(s, Nil)
}
/* One parameter. May need to block. No need to join. */
case List(param) => {
resolve(param) { arg: AnyRef =>
if (arg.isInstanceOf[Field]) {
Logger.warning(s"Field call to site is no longer supported: $s($arg)")
}
siteCall(s, List(arg))
}
}
/* Multiple parameters. May need to join. */
case _ => {
/* Prepare to receive a list of arguments from the join once all parameters are resolved. */
pushContinuation({
case Some(args: List[_]) => siteCall(s, args.asInstanceOf[List[AnyRef]])
case Some(_) => throw new AssertionError("Join resulted in a non-list")
case None => halt()
})
/* Create a join over the parameters. */
val j = new Join(params, this, runtime)
/* Perform the join. */
j.join()
}
}
}
}
}
def designateClock(newClock: Option[VirtualClock]) {
newClock foreach { _.unsetQuiescent() }
clock foreach { _.setQuiescent() }
clock = newClock
}
def newVclock(orderingArg: AnyRef, body: Expression) = {
orderingArg match {
case orderingSite: orc.values.sites.TotalSite => {
def ordering(x: AnyRef, y: AnyRef) = {
// TODO: Add error handling, either here or in the scheduler.
// A comparator error should kill the engine.
val i = orderingSite.getInvoker(execution.runtime, Array(x, y)).invokeDirect(orderingSite, Array(x, y)).asInstanceOf[Int]
assert(i == -1 || i == 0 || i == 1, "Vclock time comparator " + orderingSite.name + " did not return -1/0/1")
i
}
join(new VirtualClockGroup(clock, group))
designateClock(Some(new VirtualClock(ordering, runtime)))
move(body)
runtime.stage(this)
}
case _ => {
this !! (new ArgumentTypeMismatchException(0, "TotalSite", orderingArg.toString()))
}
}
}
def newObject(bindings: Map[Field, Expression]) {
def binding2MaybeFuture(b: Binding): AnyRef = {
b match {
case BoundValue(v) => v
case BoundReadable(f: orc.Future) => f
case BoundStop => StoppedFuture
case BoundReadable(v) =>
throw new AssertionError(s"binding2MaybeFuture: bound to a ReadableBlocker that is not a Future: $v")
}
}
val self = new OrcObject()
Logger.fine(s"Instantiating class: ${bindings}")
val objenv = BoundValue(self) :: env
val fields = for ((name, expr) <- bindings) yield {
expr match {
// NOTE: The first two cases are optimizations to avoid creating a group and a token for simple fields.
case Constant(v) => {
(name, v)
}
case Variable(n) => {
(name, binding2MaybeFuture(objenv(n)))
}
case _ => {
// We use a GraftGroup since it is exactly what we need.
// The difference between this and graft is where the future goes.
val pg = new GraftGroup(group)
// A binding frame is not needed since publishing will trigger the token to halt.
val t = new Token(
expr,
env = objenv,
group = pg,
stack = GroupFrame(EmptyFrame),
clock = clock)
runtime.stage(t)
(name, binding2MaybeFuture(pg.binding))
}
}
}
Logger.fine(s"Setup binding for fields: $fields")
self.setFields(fields.toMap)
publish(Some(self))
}
//def stackOK(testStack: Array[java.lang.StackTraceElement], offset: Int): Boolean =
// testStack.length == 4 + offset && testStack(1 + offset).getMethodName() == "runTask" ||
// testStack(1 + offset).getMethodName() == "eval" && testStack(2 + offset).getMethodName() == "run" && stackOK(testStack, offset + 2)
def run() {
val beginProfInterval = orc.util.Profiler.beginInterval(debugId, 'Token_run)
Tracer.traceTokenExecStateTransition(this, TokenExecState.Running)
//val ourStack = new Throwable("Entering Token.run").getStackTrace()
//assert(stackOK(ourStack, 0), "Token run not in ThreadPoolExecutor.Worker! sl="+ourStack.length+", m1="+ourStack(1).getMethodName()+", state="+state)
// MULTI_SCHED_DEBUG
// Add this yeild to increase the odds of thread interleaving. Such as a kill happening while the token is running.
//Thread.`yield`()
// MULTI_SCHED_DEBUG
/*
val old = isScheduled.getAndSet(false)
if (!(old == true || state == Killed)) {
assert(false, s"""${System.nanoTime().toHexString}: Failed to clear scheduled: ${this.debugId.toHexString}""")
orc.util.Tracer.selectLocation(debugId)
}
*/
try {
if (group.isKilled()) { kill() }
// MULTI_SCHED_DEBUG
//assert(isRunning.compareAndSet(false, true) || state == Killed, s"${System.nanoTime().toHexString}: Failed to set running: $this")
state match {
case Live => eval(node)
case Suspending(prevState) => setState(Suspended(prevState))
case Blocked(b) => orc.util.Profiler.measureInterval(debugId, 'Token_Blocked) { b.check(this) }
case Publishing(v) => if (setState(Live)) orc.util.Profiler.measureInterval(debugId, 'Token_Publishing) { stack(this, v) }
case Killed => orc.util.Profiler.measureInterval(debugId, 'Token_Killed) {} // This token was killed while it was on the schedule queue; ignore it
case Suspended(_) => throw new AssertionError(s"suspended token scheduled: $this")
case Halted => throw new AssertionError(s"halted token scheduled: $this")
}
} catch {
case e: OrcException => {
this !! e
}
case e: InterruptedException => {
halt()
Thread.currentThread().interrupt()
} //Thread interrupt causes halt without notify
case e: StackOverflowError => {
this !! new JavaStackLimitReachedError(stack.count(_.isInstanceOf[FunctionFrame]), e)
}
case e: Throwable => {
notifyOrc(CaughtEvent(e))
halt()
}
} finally {
orc.util.Profiler.endInterval(debugId, 'Token_run, beginProfInterval)
}
}
override def resolveOptional(b: Binding)(k: Option[AnyRef] => Unit) = {
// MULTI_SCHED_DEBUG
//Token.isRunningAlreadyCleared.set(true)
//assert(isRunning.compareAndSet(true, false) || state == Killed || Token.isRunningAlreadyCleared.get, s"${System.nanoTime().toHexString}: Failed to clear running: $this")
super.resolveOptional(b)(k)
}
protected def resolvePossibleFuture(v: AnyRef)(k: AnyRef => Unit) {
v match {
case f: orc.Future =>
resolve(f)(k)
case b: Binding =>
throw new AssertionError("This kind of resolve cannot ever handle Bindings")
case v =>
k(v)
}
}
protected def resolve(f: orc.Future)(k: AnyRef => Unit) {
resolveOptional(f) {
case Some(v) => k(v)
case None => halt()
}
}
protected def resolveOptional(f: orc.Future)(k: Option[AnyRef] => Unit) {
f.get match {
case FutureState.Bound(v) =>
k(Some(v))
case FutureState.Stopped =>
k(None)
case FutureState.Unbound => {
pushContinuation(k)
val h = new TokenFutureReader(this)
blockOn(h)
f.read(h)
}
}
}
protected def eval(node: orc.ast.oil.nameless.Expression): Unit = orc.util.Profiler.measureInterval(debugId, 'Token_eval) {
//Logger.finest(s"Evaluating: $node")
node match {
case Stop() => halt()
case Hole(_, _) => halt()
case (a: Argument) => {
resolve(lookup(a)) { v => publish(Some(v)) }
}
case Call(target, args, _) => {
val params = args map lookup
resolve(lookup(target)) { makeCall(_, params) }
}
case Parallel(left, right) => {
val (l, r) = fork()
l.move(left)
r.move(right)
runtime.stage(l, r)
}
case Sequence(left, right) => {
push(SequenceFrame(right, stack))
move(left)
runtime.stage(this)
}
case Graft(value, body) => {
val (v, b) = fork()
val pg = new GraftGroup(group)
b.bind(pg.binding)
v.join(pg)
v.move(value)
b.move(body)
runtime.stage(v, b)
}
case Trim(expr) => {
val g = new TrimGroup(group)
join(g)
move(expr)
runtime.stage(this)
}
case Otherwise(left, right) => {
val (l, r) = fork
r.move(right)
val region = new OtherwiseGroup(group, r)
l.join(region)
l.move(left)
runtime.stage(l)
}
case New(_, bindings, _) => {
newObject(bindings)
}
case FieldAccess(o, f) => {
resolve(lookup(o)) {
_ match {
/*FIXME: Factor out this d-Orc case */
/*TODO: Do we need to do a full token migrate, or can we do a remote read? */
case rr: orc.run.distrib.common.RemoteObjectRef =>
val dOrcExecution = execution.asInstanceOf[DOrcExecution]
dOrcExecution.sendToken(this, dOrcExecution.currentLocations(rr).head)
case s: AnyRef =>
val v = runtime.getAccessor(s, f).get(s)
v match {
case f: orc.Future =>
val h = new TokenFuturePublisher(this)
blockOn(h)
f.read(h)
case _ =>
publish(Some(v))
}
case null =>
throw new DoesNotHaveMembersException(null)
}
}
}
case VtimeZone(timeOrdering, body) => {
resolve(lookup(timeOrdering)) { newVclock(_, body) }
}
case decldefs @ DeclareCallables(openvars, decls, body) => {
/* Closure compaction: Bind only the free variables
* of the defs in this lexical context.
*/
val lexicalContext = openvars map { i: Int => env(i) }
decls.head match {
case _: Def => {
val closureGroup = new ClosureGroup(decls.collect({ case d: Def => d }), lexicalContext, runtime, clock)
runtime.stage(closureGroup)
for (c <- closureGroup.members) {
bind(BoundReadable(c))
}
}
case _: Site => {
val sites = for (s <- decls) yield new OrcSite(s.asInstanceOf[Site], group, clock)
val context = (sites map BoundValue) ::: lexicalContext
for (s <- sites) {
s.context = context
bind(BoundValue(s))
}
}
}
move(body)
runtime.stage(this)
}
case HasType(expr, _) => {
move(expr)
eval(this.node)
}
case DeclareType(_, expr) => {
move(expr)
eval(this.node)
}
}
}
def publish(v: Option[AnyRef]) {
Logger.finest(s"Publishing $v in $this")
v foreach { vv =>
assert(!vv.isInstanceOf[Binding], s"Interpreter bug. Triggered at $this, with $vv")
assert(!vv.isInstanceOf[java.math.BigInteger], s"Type coercion error at $this, with $vv")
assert(!vv.isInstanceOf[java.math.BigDecimal], s"Type coercion error at $this, with $vv")
}
state match {
case Blocked(_: OtherwiseGroup) => throw new AssertionError("publish on a pending Token")
case Live => {
// If we are live then publish normally.
setState(Publishing(v))
runtime.stage(this)
}
case Blocked(_) => {
if (v.isDefined) {
// If we are blocking then publish in a copy of this token.
// This is needed to allow blockers to publish more than once.
val nt = copy(state = Publishing(v))
runtime.stage(nt)
} else {
// However "publishing" stop is handled without duplication.
setState(Publishing(v))
runtime.stage(this)
}
}
case Suspending(_) => {
throw new AssertionError("Suspension is not supported anymore.")
setState(Suspending(Publishing(v)))
runtime.stage(this)
}
case Suspended(_) => {
setState(Suspended(Publishing(v)))
}
case Publishing(_) => throw new AssertionError("Already publishing!")
case Halted | Killed => {}
}
}
@deprecated("Call publish(Some(Signal))", "3.0")
def publish() { publish(Some(Signal)) }
override def halt() {
Logger.finer(s"Token halted: $this")
state match {
case Publishing(_) | Live | Blocked(_) | Suspending(_) => {
setState(Halted)
group.halt(this)
}
case Suspended(_) => throw new AssertionError("halt on a suspended Token")
case Halted | Killed => {}
}
}
def discorporate() {
state match {
case Publishing(_) | Live | Blocked(_) | Suspending(_) => {
group.discorporate(this)
}
case Suspended(_) => throw new AssertionError("discorporate on a suspended Token")
case Halted | Killed => {}
}
}
def !!(e: OrcException) {
e.setPosition(node.sourceTextRange.orNull)
e match {
case te: TokenException if (te.getBacktrace() == null || te.getBacktrace().length == 0) => {
val callPoints = stack collect { case f: FunctionFrame => f.callpoint.sourceTextRange.orNull }
te.setBacktrace(callPoints.take(2048).toArray)
}
case _ => {} // Not a TokenException; no need to collect backtrace
}
notifyOrc(CaughtEvent(e))
halt()
}
override def awakeTerminalValue(v: AnyRef) = {
setState(Live)
publish(Some(v))
}
def awakeNonterminalValue(v: AnyRef) = {
publish(Some(v))
}
def awakeStop() = publish(None)
override def awakeException(e: OrcException) = this !! e
override def awake() { unblock() }
// DEBUG CODE:
def envToString() = {
env.zipWithIndex.map({
case (b, i) => s"$i: " + (b match {
case BoundValue(v) => v.toString
case BoundReadable(c) => c.toString
case BoundStop => "stop"
})
}).mkString("\\n")
}
}
private class LongCounter(private var value: Long) {
def incrementAndGet() = {
value += 1L
value
}
}
object Token {
// MULTI_SCHED_DEBUG
/*
private val isRunningAlreadyCleared = new ThreadLocal[Boolean]() {
override def initialValue() = false
}
*/
private val currentTokenDebugId = new ThreadLocal[LongCounter]() {
override def initialValue() = new LongCounter(0L)
}
def getNextTokenDebugId(runtime: OrcRuntime): Long =
/* FIXME:This adverse coupling to runtime should be removed. Why not add a runtimeThreadId to the Orc trait? */
currentTokenDebugId.get.incrementAndGet() | (runtime.asInstanceOf[orc.run.Orc].runtimeDebugThreadId.toLong << 32)
}
/** Supertype of TokenStates */
abstract sealed class TokenState() {
val isLive: Boolean
}
object TokenState {
/** Token is ready to make progress */
case object Live extends TokenState() {
val isLive = true
}
/** Token is propagating a published value */
case class Publishing(v: Option[AnyRef]) extends TokenState() {
val isLive = true
}
/** Token is waiting on another task */
case class Blocked(blocker: Blocker) extends TokenState() {
val isLive = true
override def toString() = s"$productPrefix($blocker : ${blocker.getClass.getSimpleName})"
}
/** Token has been told to suspend, but it's still in the scheduler queue */
case class Suspending(prevState: TokenState) extends TokenState() {
val isLive = prevState.isLive
}
/** Suspended Tokens must be re-scheduled upon resume */
case class Suspended(prevState: TokenState) extends TokenState() {
val isLive = prevState.isLive
}
/** Token halted itself */
case object Halted extends TokenState() {
val isLive = false
}
/** Token killed by engine */
case object Killed extends TokenState() {
val isLive = false
}
}
/** Supertype of TokenExecStates.
*
* These are not actually used or stored other than for tracing.
*/
abstract sealed class TokenExecState()
object TokenExecState {
/** Token is executing on some thread */
case object Running extends TokenExecState()
/** Token is scheduled to execute */
case object Scheduled extends TokenExecState()
/** Token is waiting or blocked on some event.
*
* That event will trigger the token to be scheduled.
*/
case object DoneRunning extends TokenExecState()
/** Token has been killed */
case object Killed extends TokenExecState()
}
| orc-lang/orc | OrcScala/src/orc/run/core/Token.scala | Scala | bsd-3-clause | 39,124 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras2.layers
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.keras.layers.{Keras2Test, KerasBaseSpec}
import com.intel.analytics.zoo.pipeline.api.keras.models.Sequential
import com.intel.analytics.zoo.pipeline.api.keras.serializer.ModuleSerializationTest
class GlobalAveragePooling1DSpec extends KerasBaseSpec{
"GlobalAveragePooling1D" should "be the same as Keras" taggedAs(Keras2Test) in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, 24])
|input = np.random.random([2, 3, 24])
|output_tensor = GlobalAveragePooling1D()(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = Sequential[Float]()
val layer = GlobalAveragePooling1D[Float](inputShape = Shape(3, 24))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 24))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode)
}
}
class GlobalAveragePooling1DSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = GlobalAveragePooling1D[Float](inputShape = Shape(3, 24))
layer.build(Shape(2, 3, 24))
val input = Tensor[Float](2, 3, 24).rand()
runSerializationTest(layer, input)
}
}
| intel-analytics/analytics-zoo | zoo/src/test/scala/com/intel/analytics/zoo/pipeline/api/keras2/layers/GlobalAveragePooling1DSpec.scala | Scala | apache-2.0 | 2,092 |
package com.rasterfoundry.database
import java.util.UUID
import cats.implicits._
import com.rasterfoundry.database.Implicits._
import com.rasterfoundry.database.filter.Filterables
import com.rasterfoundry.database.util._
import com.rasterfoundry.datamodel._
import com.rasterfoundry.datamodel.{PageRequest, Order}
import doobie.{LogHandler => _, _}
import doobie.implicits._
import doobie.postgres.implicits._
import doobie.util.log._
import doobie.util.{Read, Write}
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.duration.FiniteDuration
/**
* This is abstraction over the listing of arbitrary types from the DB with filters/pagination
*/
abstract class Dao[Model: Read: Write] extends Filterables {
val tableName: String
/** The fragment which holds the associated table's name */
def tableF = Fragment.const(tableName)
/** An abstract select statement to be used for constructing queries */
def selectF: Fragment
/** Begin construction of a complex, filtered query */
def query: Dao.QueryBuilder[Model] =
Dao.QueryBuilder[Model](selectF, tableF, List.empty)
}
object Dao extends LazyLogging {
implicit val logHandler: LogHandler = LogHandler {
case Success(s: String,
a: List[Any],
e1: FiniteDuration,
e2: FiniteDuration) =>
val queryString =
s.lines.dropWhile(_.trim.isEmpty).toArray.mkString("\\n ")
val logString = queryString
.split("\\\\?", -1)
.zip(a.map(s => "'" + s + "'"))
.flatMap({ case (t1, t2) => List(t1, t2) })
.mkString("")
logger.debug(s"""Successful Statement Execution:
|
| ${logString}
|
| arguments = [${a.mkString(", ")}]
| elapsed = ${e1.toMillis} ms exec + ${e2.toMillis} ms processing (${(e1 + e2).toMillis} ms total)
""".stripMargin)
case ProcessingFailure(s, a, e1, e2, t) =>
val queryString =
s.lines.dropWhile(_.trim.isEmpty).toArray.mkString("\\n ")
val logString = queryString
.split("\\\\?", -1)
.zip(a.map(s => "'" + s + "'"))
.flatMap({ case (t1, t2) => List(t1, t2) })
.mkString("")
logger.error(s"""Failed Resultset Processing:
|
| ${logString}
|
| arguments = [${a.mkString(", ")}]
| elapsed = ${e1.toMillis} ms exec + ${e2.toMillis} ms processing (failed) (${(e1 + e2).toMillis} ms total)
| failure = ${t.getMessage}
""".stripMargin)
case ExecFailure(s, a, e1, t) =>
val queryString =
s.lines.dropWhile(_.trim.isEmpty).toArray.mkString("\\n ")
val logString = queryString
.split("\\\\?", -1)
.zip(a.map(s => "'" + s + "'"))
.flatMap({ case (t1, t2) => List(t1, t2) })
.toList
.mkString("")
logger.error(s"""Failed Statement Execution:
|
| ${logString}
|
| arguments = [${a.mkString(", ")}]
| elapsed = ${e1.toMillis} ms exec (failed)
| failure = ${t.getMessage}
""".stripMargin)
}
final case class QueryBuilder[Model: Read: Write](
selectF: Fragment,
tableF: Fragment,
filters: List[Option[Fragment]],
countFragment: Option[Fragment] = None) {
val countF: Fragment =
countFragment.getOrElse(fr"SELECT count(id) FROM" ++ tableF)
val deleteF: Fragment = fr"DELETE FROM" ++ tableF
val existF: Fragment = fr"SELECT 1 FROM" ++ tableF
/** Add another filter to the query being constructed */
def filter[M >: Model, T](thing: T)(
implicit filterable: Filterable[M, T]): QueryBuilder[Model] =
this.copy(filters = filters ++ filterable.toFilters(thing))
def filter[M >: Model](thing: Fragment)(
implicit filterable: Filterable[M, Fragment]): QueryBuilder[Model] =
thing match {
case Fragment.empty => this
case _ => this.copy(filters = filters ++ filterable.toFilters(thing))
}
def filter[M >: Model](id: UUID)(
implicit filterable: Filterable[M, Option[Fragment]])
: QueryBuilder[Model] = {
this.copy(filters = filters ++ filterable.toFilters(Some(fr"id = ${id}")))
}
def filter[M >: Model](
fragments: List[Option[Fragment]]): QueryBuilder[Model] = {
this.copy(filters = filters ::: fragments)
}
// This method exists temporarily to stand in for second-tier object authorization
def ownedBy[M >: Model](user: User, objectId: UUID): QueryBuilder[Model] =
this.filter(objectId).filter(user)
def ownedByOrSuperUser[M >: Model](user: User,
objectId: UUID): QueryBuilder[Model] = {
if (user.isSuperuser) {
this.filter(objectId)
} else {
this.filter(objectId).filter(user)
}
}
def pageOffset[T: Read](pageRequest: PageRequest): ConnectionIO[List[T]] =
(selectF ++ Fragments.whereAndOpt(filters: _*) ++ Page(pageRequest))
.query[T]
.to[List]
def hasNext(pageRequest: PageRequest): ConnectionIO[Boolean] = {
(existF ++ Fragments.whereAndOpt(filters: _*) ++ Page(
pageRequest.copy(offset = pageRequest.offset + 1)))
.query[Boolean]
.to[List]
.map(_.nonEmpty)
}
/** Provide a list of responses within the PaginatedResponse wrapper */
def page[T: Read](pageRequest: PageRequest,
selectF: Fragment,
countF: Fragment,
orderClause: Map[String, Order],
doCount: Boolean): ConnectionIO[PaginatedResponse[T]] = {
for {
page <- (selectF ++ Fragments.whereAndOpt(filters: _*) ++ Page(
pageRequest.copy(sort = orderClause ++ pageRequest.sort)))
.query[T]
.to[List]
(count: Int, hasNext: Boolean) <- doCount match {
case true => {
(countF ++ Fragments.whereAndOpt(filters: _*))
.query[Int]
.unique map { count =>
(count, (pageRequest.offset * pageRequest.limit) + 1 < count)
}
}
case false => {
hasNext(pageRequest) map {
(-1, _)
}
}
}
} yield {
val hasPrevious = pageRequest.offset > 0
PaginatedResponse[T](count,
hasPrevious,
hasNext,
pageRequest.offset,
pageRequest.limit,
page)
}
}
/** Provide a list of responses within the PaginatedResponse wrapper */
def page(pageRequest: PageRequest, orderClause: Map[String, Order])
: ConnectionIO[PaginatedResponse[Model]] =
page(pageRequest, selectF, countF, orderClause, true)
def page(pageRequest: PageRequest,
orderClause: Map[String, Order],
doCount: Boolean): ConnectionIO[PaginatedResponse[Model]] =
page(pageRequest, selectF, countF, orderClause, doCount)
def page(pageRequest: PageRequest): ConnectionIO[PaginatedResponse[Model]] =
page(pageRequest, selectF, countF, Map.empty[String, Order], true)
def listQ(pageRequest: PageRequest): Query0[Model] =
(selectF ++ Fragments.whereAndOpt(filters: _*) ++ Page(Some(pageRequest)))
.query[Model]
/** Provide a list of responses */
def list(pageRequest: PageRequest): ConnectionIO[List[Model]] = {
listQ(pageRequest).to[List]
}
/** Short circuit for quickly getting an approximate count for large queries (e.g. scenes) **/
def sceneCountIO(exactCountOption: Option[Boolean]): ConnectionIO[Int] = {
val countQuery = countF ++ Fragments.whereAndOpt(filters: _*)
val over100IO: ConnectionIO[Boolean] =
(fr"SELECT EXISTS(" ++ (selectF ++ Fragments.whereAndOpt(filters: _*) ++ fr"offset 100") ++ fr")")
.query[Boolean]
.unique
over100IO.flatMap(over100 => {
(exactCountOption, over100) match {
case (Some(true), _) | (_, false) =>
countQuery.query[Int].unique
case _ =>
100.pure[ConnectionIO]
}
})
}
def listQ(limit: Int): Query0[Model] =
(selectF ++ Fragments.whereAndOpt(filters: _*) ++ fr"LIMIT $limit")
.query[Model]
/** Provide a list of responses */
def list(limit: Int): ConnectionIO[List[Model]] = {
listQ(limit).to[List]
}
def listQ(offset: Int, limit: Int): Query0[Model] =
(selectF ++ Fragments.whereAndOpt(filters: _*) ++ fr"OFFSET $offset" ++ fr"LIMIT $limit")
.query[Model]
def listQ(offset: Int, limit: Int, orderClause: Fragment): Query0[Model] =
(selectF ++ Fragments.whereAndOpt(filters: _*) ++ orderClause ++ fr"OFFSET $offset" ++ fr"LIMIT $limit")
.query[Model]
/** Provide a list of responses */
def list: ConnectionIO[List[Model]] = {
(selectF ++ Fragments.whereAndOpt(filters: _*))
.query[Model]
.to[List]
}
/** Provide a list of responses */
def list(offset: Int, limit: Int): ConnectionIO[List[Model]] = {
listQ(offset, limit).to[List]
}
def list(offset: Int,
limit: Int,
orderClause: Fragment): ConnectionIO[List[Model]] = {
listQ(offset, limit, orderClause).to[List]
}
def selectQ: Query0[Model] =
(selectF ++ Fragments.whereAndOpt(filters: _*)).query[Model]
/** Select a single value - returning an Optional value */
def selectOption: ConnectionIO[Option[Model]] =
selectQ.option
/** Select a single value - throw on failure */
def select: ConnectionIO[Model] = {
selectQ.unique
}
def deleteQOption: Option[Update0] = {
if (filters.isEmpty) {
None
} else {
Some((deleteF ++ Fragments.whereAndOpt(filters: _*)).update)
}
}
def delete: ConnectionIO[Int] = {
deleteQOption
.getOrElse(
throw new Exception("Unsafe delete - delete requires filters"))
.run
}
def exists: ConnectionIO[Boolean] = {
(existF ++ Fragments.whereAndOpt(filters: _*) ++ fr"LIMIT 1")
.query[Int]
.to[List]
.map(_.nonEmpty)
}
}
}
| aaronxsu/raster-foundry | app-backend/db/src/main/scala/Dao.scala | Scala | apache-2.0 | 10,294 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops.training.optimizers.schedules
import org.platanios.tensorflow.api.ops.{Basic, Math, Op, Output}
import org.platanios.tensorflow.api.ops.control_flow.ControlFlow
import org.platanios.tensorflow.api.ops.variables.Variable
import org.platanios.tensorflow.api.types.FLOAT32
/** Cycle-linear 10x decay method.
*
* This method applies a cycle-linear decay function to a provided initial learning rate (i.e., `value`). It requires a
* step value to be provided in it's application function, in order to compute the decayed learning rate. You may
* simply pass a TensorFlow variable that you increment at each training step.
*
* The decayed value is computed as follows:
* {{{
* cyclePosition = 1 - abs(((step % (2 * cycleSteps)) - cycleSteps) / cycleSteps)
* decayed = value * (0.1 + cyclePosition) * 3
* }}}
*
* @param cycleSteps Cycle linear decay cycle in terms of number of steps.
* @param startStep Step after which to start decaying the learning rate.
*
* @author Emmanouil Antonios Platanios
*/
class CycleLinear10xDecay protected (
val cycleSteps: Int,
val startStep: Long = 0L,
val name: String = "CycleLinear10xDecay"
) extends Schedule {
/** Applies the decay method to `value`, the current iteration in the optimization loop is `step` and returns the
* result.
*
* @param value Value to decay.
* @param step Option containing current iteration in the optimization loop, if one has been provided.
* @return Decayed value.
* @throws IllegalArgumentException If the decay method requires a value for `step` but the provided option is empty.
*/
@throws[IllegalArgumentException]
override def apply(value: Output, step: Option[Variable]): Output = {
if (step.isEmpty)
throw new IllegalArgumentException("A step needs to be provided for cycle-linear 10x decay.")
Op.createWithNameScope(name, Set(value.op, step.get.op)) {
val stepValue = Math.cast(step.get.value, value.dataType)
val cycleStepsValue = Basic.constant(cycleSteps, value.dataType)
if (startStep == 0L) {
decay(value, stepValue, cycleStepsValue)
} else {
val startStepValue = Basic.constant(startStep, value.dataType)
ControlFlow.cond(
stepValue < startStepValue,
() => value,
() => decay(value, stepValue - startStepValue, cycleStepsValue))
}
}
}
private[this] def decay(initialValue: Output, step: Output, cycleSteps: Output): Output = {
// Cycle the rate linearly by 10x every `cycleSteps`, up and down.
val cyclePosition = 1.0f - Math.abs(((step % (2 * cycleSteps)) - cycleSteps).cast(FLOAT32) / cycleSteps)
(0.1f + cyclePosition) * 3.0f // 10x difference in each cycle (0.3 - 3).
}
}
object CycleLinear10xDecay {
def apply(cycleSteps: Int, startStep: Long = 0L, name: String = "CycleLinear10xDecay"): CycleLinear10xDecay = {
new CycleLinear10xDecay(cycleSteps, startStep, name)
}
}
| eaplatanios/tensorflow | tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/ops/training/optimizers/schedules/CycleLinear10xDecay.scala | Scala | apache-2.0 | 3,648 |
package simulations
import com.typesafe.config.ConfigFactory
import io.gatling.core.Predef._
import io.gatling.http.Predef._
class ReportSimulation extends Simulation {
val conf = ConfigFactory.load()
val httpConf = http.baseURL(conf.getString("baseUrl"))
setUp(
scenario("Report Simulation")
.exec(http("Render").get("/api/report"))
.inject(atOnceUsers(conf.getInt("report.load")))
).protocols(httpConf)
}
| mauriciocc/tcc | simulation/src/test/scala/simulations/ReportSimulation.scala | Scala | mit | 436 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package dev.ligature.rocksdb
private class RocksDBReadTx {
}
| almibe/stinkpot-burrow | src/main/scala/dev/ligature/rocksdb/RocksDBReadTx.scala | Scala | mpl-2.0 | 269 |
package org.ensime.sexp.formats
import org.ensime.sexp._
class ProductFormatsSpec extends FormatSpec
with BasicFormats with StandardFormats with ProductFormats {
case class Foo(i: Int, s: String)
case class Bar(foo: Foo)
case class Baz()
case class Wibble(thing: String, thong: Int, bling: Option[String])
describe("ProductFormats case classes") {
val foo = Foo(13, "foo")
val fooexpect = SexpData(
SexpSymbol(":i") -> SexpNumber(13),
SexpSymbol(":s") -> SexpString("foo")
)
it("should support primitive types") {
// will create the marshaller every time assertFormat is called
assertFormat(foo, fooexpect)
assertFormat(foo, fooexpect)
assertFormat(foo, fooexpect)
}
it("should support 'fast' case classes") {
// can't really test - its a side effect optimisation
implicit val FastFooFormat = SexpFormat[Foo]
assertFormat(foo, fooexpect)
assertFormat(foo, fooexpect)
assertFormat(foo, fooexpect)
}
it("should support nested case classes") {
val bar = Bar(foo)
val expect = SexpData(
SexpSymbol(":foo") -> fooexpect
)
// (this is actually a really big deal, thank you shapeless!)
assertFormat(bar, expect)
}
it("should support zero content case classes") {
assertFormat(Baz(), SexpNil)
}
it("should support missing fields as SexpNil / None") {
val wibble = Wibble("wibble", 13, Some("fork"))
assertFormat(wibble, SexpData(
SexpSymbol(":thing") -> SexpString("wibble"),
SexpSymbol(":thong") -> SexpNumber(13),
SexpSymbol(":bling") -> SexpList(SexpString("fork"))
))
val wobble = Wibble("wibble", 13, None)
// write out None as SexpNil
assertFormat(wobble, SexpData(
SexpSymbol(":thing") -> SexpString("wibble"),
SexpSymbol(":thong") -> SexpNumber(13),
SexpSymbol(":bling") -> SexpNil
))
// but tolerate missing entries
assert(SexpData(
SexpSymbol(":thing") -> SexpString("wibble"),
SexpSymbol(":thong") -> SexpNumber(13)
).convertTo[Wibble] === wobble)
}
}
describe("ProductFormat tuples") {
val foo = (13, "foo")
val fooexpect = SexpList(SexpNumber(13), SexpString("foo"))
it("should support primitive types") {
assertFormat(foo, fooexpect)
}
it("should support 'fast' tuples") {
// can't really test - its a side effect optimisation
implicit val FastFooFormat = SexpFormat[(Int, String)]
assertFormat(foo, fooexpect)
assertFormat(foo, fooexpect)
assertFormat(foo, fooexpect)
}
}
}
class CustomisedProductFormatsSpec extends FormatSpec
with BasicFormats with StandardFormats with ProductFormats
with CamelCaseToDashes {
case class Foo(AThingyMaBob: Int, HTML: String)
describe("ProductFormats with overloaded toWireName") {
it("should support custom field names") {
assertFormat(Foo(13, "foo"), SexpData(
SexpSymbol(":a-thingy-ma-bob") -> SexpNumber(13),
SexpSymbol(":h-t-m-l") -> SexpString("foo")
))
}
}
}
| jacobono/ensime-server | sexpress/src/test/scala/org/ensime/sexp/formats/ProductFormatsSpec.scala | Scala | gpl-3.0 | 3,144 |
package water.sparkling.demo
import water.sparkling.DummyFrameExtractor
import water.fvec.Frame
import water.util.Log
object ProstateDemo extends Demo {
override def run(conf: DemoConf): Unit = prostateDemo(frameExtractor=conf.extractor, sparkMaster = if (conf.local) null else conf.sparkMaster)
def prostateDemo(frameExtractor:RDDFrameExtractor, sparkMaster:String):Unit = {
// Specifies how data are extracted from RDD into Frame
val fextract = frameExtractor
// Dataset to parse
val dataset = "data/prostate.csv"
// Row parser
val rowParser = ProstateParse
val tableName = "prostate_table"
val query = "SELECT * FROM prostate_table WHERE capsule=1"
// Connect to shark cluster and make a query over prostate, transfer data into H2O
val frame:Frame = executeSpark[Prostate](dataset, rowParser, fextract, tableName, query, sparkMaster=sparkMaster)
Log.info("Extracted frame from Spark: ")
Log.info(if (frame!=null) frame.toString + "\\nRows: " + frame.numRows() else "<nothing>")
}
override def name: String = "prostate"
}
| h2oai/h2o-sparkling | src/main/scala/water/sparkling/demo/ProstateDemo.scala | Scala | apache-2.0 | 1,089 |
object TransactionStatus extends Enumeration {
val SUCCESS, PENDING, FAILED = Value
}
class Transaction(val from: String,
val to: String,
val amount: Double,
var status: TransactionStatus.Value = TransactionStatus.PENDING,
val id: String = java.util.UUID.randomUUID.toString,
var receiptReceived: Boolean = false) {
def isCompleted: Boolean = {
this.status != TransactionStatus.PENDING
}
def isSuccessful: Boolean = {
isCompleted && this.status == TransactionStatus.SUCCESS
}
}
| DagF/tdt4165_progspraak_project_h15 | part3-exercise/src/main/scala/Transaction.scala | Scala | mit | 592 |
package naming
import eu.inn.binders.naming.{CamelCaseBuilder, SnakeUpperCaseParser}
import org.scalatest.{FlatSpec, Matchers}
class TestSnakeUpperCaseParser extends FlatSpec with Matchers {
"SnakeUpperCaseParser " should " parse STRING_LIKE_THIS " in {
val parser = new SnakeUpperCaseParser()
val builder = new CamelCaseBuilder()
parser.parse("STRING_LIKE_THIS", builder)
val result = builder.toString
assert(result == "stringLikeThis")
}
} | InnovaCo/binders | src/test/scala/naming/TestSnakeUpperCaseParser.scala | Scala | bsd-3-clause | 470 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.ui
import javax.servlet.http.HttpServletRequest
import scala.xml._
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.status.api.v1.{JobData, StageData}
import org.apache.spark.streaming.Time
import org.apache.spark.streaming.ui.StreamingJobProgressListener.SparkJobId
import org.apache.spark.ui.{UIUtils => SparkUIUtils, WebUIPage}
private[ui] case class SparkJobIdWithUIData(sparkJobId: SparkJobId, jobData: Option[JobData])
private[ui] class BatchPage(parent: StreamingTab) extends WebUIPage("batch") {
private val streamingListener = parent.listener
private val store = parent.parent.store
private def columns: Seq[Node] = {
<th>Output Op Id</th>
<th>Description</th>
<th>Output Op Duration</th>
<th>Status</th>
<th>Job Id</th>
<th>Job Duration</th>
<th class="sorttable_nosort">Stages: Succeeded/Total</th>
<th class="sorttable_nosort">Tasks (for all stages): Succeeded/Total</th>
<th>Error</th>
}
private def generateJobRow(
request: HttpServletRequest,
outputOpData: OutputOperationUIData,
outputOpDescription: Seq[Node],
formattedOutputOpDuration: String,
numSparkJobRowsInOutputOp: Int,
isFirstRow: Boolean,
jobIdWithData: SparkJobIdWithUIData): Seq[Node] = {
if (jobIdWithData.jobData.isDefined) {
generateNormalJobRow(request, outputOpData, outputOpDescription, formattedOutputOpDuration,
numSparkJobRowsInOutputOp, isFirstRow, jobIdWithData.jobData.get)
} else {
generateDroppedJobRow(outputOpData, outputOpDescription, formattedOutputOpDuration,
numSparkJobRowsInOutputOp, isFirstRow, jobIdWithData.sparkJobId)
}
}
private def generateOutputOpRowWithoutSparkJobs(
outputOpData: OutputOperationUIData,
outputOpDescription: Seq[Node],
formattedOutputOpDuration: String): Seq[Node] = {
<tr>
<td class="output-op-id-cell" >{outputOpData.id.toString}</td>
<td>{outputOpDescription}</td>
<td>{formattedOutputOpDuration}</td>
{outputOpStatusCell(outputOpData, rowspan = 1)}
<!-- Job Id -->
<td>-</td>
<!-- Duration -->
<td>-</td>
<!-- Stages: Succeeded/Total -->
<td>-</td>
<!-- Tasks (for all stages): Succeeded/Total -->
<td>-</td>
<!-- Error -->
<td>-</td>
</tr>
}
/**
* Generate a row for a Spark Job. Because duplicated output op infos needs to be collapsed into
* one cell, we use "rowspan" for the first row of an output op.
*/
private def generateNormalJobRow(
request: HttpServletRequest,
outputOpData: OutputOperationUIData,
outputOpDescription: Seq[Node],
formattedOutputOpDuration: String,
numSparkJobRowsInOutputOp: Int,
isFirstRow: Boolean,
sparkJob: JobData): Seq[Node] = {
val duration: Option[Long] = {
sparkJob.submissionTime.map { start =>
val end = sparkJob.completionTime.map(_.getTime()).getOrElse(System.currentTimeMillis())
end - start.getTime()
}
}
val lastFailureReason =
sparkJob.stageIds.sorted.reverse.flatMap(getStageData).
dropWhile(_.failureReason == None).take(1). // get the first info that contains failure
flatMap(info => info.failureReason).headOption.getOrElse("")
val formattedDuration = duration.map(d => SparkUIUtils.formatDuration(d)).getOrElse("-")
val detailUrl = s"${SparkUIUtils.prependBaseUri(
request, parent.basePath)}/jobs/job/?id=${sparkJob.jobId}"
// In the first row, output op id and its information needs to be shown. In other rows, these
// cells will be taken up due to "rowspan".
// scalastyle:off
val prefixCells =
if (isFirstRow) {
<td class="output-op-id-cell" rowspan={numSparkJobRowsInOutputOp.toString}>{outputOpData.id.toString}</td>
<td rowspan={numSparkJobRowsInOutputOp.toString}>
{outputOpDescription}
</td>
<td rowspan={numSparkJobRowsInOutputOp.toString}>{formattedOutputOpDuration}</td> ++
{outputOpStatusCell(outputOpData, numSparkJobRowsInOutputOp)}
} else {
Nil
}
// scalastyle:on
<tr>
{prefixCells}
<td sorttable_customkey={sparkJob.jobId.toString}>
<a href={detailUrl}>
{sparkJob.jobId}{sparkJob.jobGroup.map(id => s"($id)").getOrElse("")}
</a>
</td>
<td sorttable_customkey={duration.getOrElse(Long.MaxValue).toString}>
{formattedDuration}
</td>
<td class="stage-progress-cell">
{sparkJob.numCompletedStages}/{sparkJob.stageIds.size - sparkJob.numSkippedStages}
{if (sparkJob.numFailedStages > 0) s"(${sparkJob.numFailedStages} failed)"}
{if (sparkJob.numSkippedStages > 0) s"(${sparkJob.numSkippedStages} skipped)"}
</td>
<td class="progress-cell">
{
SparkUIUtils.makeProgressBar(
started = sparkJob.numActiveTasks,
completed = sparkJob.numCompletedTasks,
failed = sparkJob.numFailedTasks,
skipped = sparkJob.numSkippedTasks,
reasonToNumKilled = sparkJob.killedTasksSummary,
total = sparkJob.numTasks - sparkJob.numSkippedTasks)
}
</td>
{UIUtils.failureReasonCell(lastFailureReason)}
</tr>
}
/**
* If a job is dropped by sparkListener due to exceeding the limitation, we only show the job id
* with "-" cells.
*/
private def generateDroppedJobRow(
outputOpData: OutputOperationUIData,
outputOpDescription: Seq[Node],
formattedOutputOpDuration: String,
numSparkJobRowsInOutputOp: Int,
isFirstRow: Boolean,
jobId: Int): Seq[Node] = {
// In the first row, output op id and its information needs to be shown. In other rows, these
// cells will be taken up due to "rowspan".
// scalastyle:off
val prefixCells =
if (isFirstRow) {
<td class="output-op-id-cell" rowspan={numSparkJobRowsInOutputOp.toString}>{outputOpData.id.toString}</td>
<td rowspan={numSparkJobRowsInOutputOp.toString}>{outputOpDescription}</td>
<td rowspan={numSparkJobRowsInOutputOp.toString}>{formattedOutputOpDuration}</td> ++
{outputOpStatusCell(outputOpData, numSparkJobRowsInOutputOp)}
} else {
Nil
}
// scalastyle:on
<tr>
{prefixCells}
<td sorttable_customkey={jobId.toString}>
{if (jobId >= 0) jobId.toString else "-"}
</td>
<!-- Duration -->
<td>-</td>
<!-- Stages: Succeeded/Total -->
<td>-</td>
<!-- Tasks (for all stages): Succeeded/Total -->
<td>-</td>
<!-- Error -->
<td>-</td>
</tr>
}
private def generateOutputOpIdRow(
request: HttpServletRequest,
outputOpData: OutputOperationUIData,
sparkJobs: Seq[SparkJobIdWithUIData]): Seq[Node] = {
val formattedOutputOpDuration =
if (outputOpData.duration.isEmpty) {
"-"
} else {
SparkUIUtils.formatDuration(outputOpData.duration.get)
}
val description = generateOutputOpDescription(outputOpData)
if (sparkJobs.isEmpty) {
generateOutputOpRowWithoutSparkJobs(outputOpData, description, formattedOutputOpDuration)
} else {
val firstRow =
generateJobRow(
request,
outputOpData,
description,
formattedOutputOpDuration,
sparkJobs.size,
true,
sparkJobs.head)
val tailRows =
sparkJobs.tail.map { sparkJob =>
generateJobRow(
request,
outputOpData,
description,
formattedOutputOpDuration,
sparkJobs.size,
false,
sparkJob)
}
(firstRow ++ tailRows).flatten
}
}
private def generateOutputOpDescription(outputOp: OutputOperationUIData): Seq[Node] = {
<div>
{outputOp.name}
<span
onclick="this.parentNode.querySelector('.stage-details').classList.toggle('collapsed')"
class="expand-details">
+details
</span>
<div class="stage-details collapsed">
<pre>{outputOp.description}</pre>
</div>
</div>
}
private def getJobData(sparkJobId: SparkJobId): Option[JobData] = {
try {
Some(store.job(sparkJobId))
} catch {
case _: NoSuchElementException => None
}
}
private def getStageData(stageId: Int): Option[StageData] = {
try {
Some(store.lastStageAttempt(stageId))
} catch {
case _: NoSuchElementException => None
}
}
private def generateOutputOperationStatusForUI(failure: String): String = {
if (failure.startsWith("org.apache.spark.SparkException")) {
"Failed due to Spark job error\\n" + failure
} else {
var nextLineIndex = failure.indexOf("\\n")
if (nextLineIndex < 0) {
nextLineIndex = failure.length
}
val firstLine = failure.substring(0, nextLineIndex)
s"Failed due to error: $firstLine\\n$failure"
}
}
/**
* Generate the job table for the batch.
*/
private def generateJobTable(
request: HttpServletRequest,
batchUIData: BatchUIData): Seq[Node] = {
val outputOpIdToSparkJobIds = batchUIData.outputOpIdSparkJobIdPairs.groupBy(_.outputOpId).
map { case (outputOpId, outputOpIdAndSparkJobIds) =>
// sort SparkJobIds for each OutputOpId
(outputOpId, outputOpIdAndSparkJobIds.map(_.sparkJobId).toSeq.sorted)
}
val outputOps: Seq[(OutputOperationUIData, Seq[SparkJobId])] =
batchUIData.outputOperations.map { case (outputOpId, outputOperation) =>
val sparkJobIds = outputOpIdToSparkJobIds.getOrElse(outputOpId, Seq.empty)
(outputOperation, sparkJobIds)
}.toSeq.sortBy(_._1.id)
val outputOpWithJobs = outputOps.map { case (outputOpData, sparkJobIds) =>
(outputOpData, sparkJobIds.map { jobId => SparkJobIdWithUIData(jobId, getJobData(jobId)) })
}
<table id="batch-job-table" class="table table-bordered table-striped table-condensed">
<thead>
{columns}
</thead>
<tbody>
{
outputOpWithJobs.map { case (outputOpData, sparkJobs) =>
generateOutputOpIdRow(request, outputOpData, sparkJobs)
}
}
</tbody>
</table>
}
def render(request: HttpServletRequest): Seq[Node] = streamingListener.synchronized {
val batchTime = Option(request.getParameter("id")).map(id => Time(id.toLong))
.getOrElse {
throw new IllegalArgumentException(s"Missing id parameter")
}
val formattedBatchTime =
UIUtils.formatBatchTime(batchTime.milliseconds, streamingListener.batchDuration)
val batchUIData = streamingListener.getBatchUIData(batchTime).getOrElse {
throw new IllegalArgumentException(s"Batch $formattedBatchTime does not exist")
}
val formattedSchedulingDelay =
batchUIData.schedulingDelay.map(SparkUIUtils.formatDuration).getOrElse("-")
val formattedProcessingTime =
batchUIData.processingDelay.map(SparkUIUtils.formatDuration).getOrElse("-")
val formattedTotalDelay = batchUIData.totalDelay.map(SparkUIUtils.formatDuration).getOrElse("-")
val inputMetadatas = batchUIData.streamIdToInputInfo.values.flatMap { inputInfo =>
inputInfo.metadataDescription.map(desc => inputInfo.inputStreamId -> desc)
}.toSeq
val summary: NodeSeq =
<div>
<ul class="unstyled">
<li>
<strong>Batch Duration: </strong>
{SparkUIUtils.formatDuration(streamingListener.batchDuration)}
</li>
<li>
<strong>Input data size: </strong>
{batchUIData.numRecords} records
</li>
<li>
<strong>Scheduling delay: </strong>
{formattedSchedulingDelay}
</li>
<li>
<strong>Processing time: </strong>
{formattedProcessingTime}
</li>
<li>
<strong>Total delay: </strong>
{formattedTotalDelay}
</li>
{
if (inputMetadatas.nonEmpty) {
<li>
<strong>Input Metadata:</strong>{generateInputMetadataTable(inputMetadatas)}
</li>
}
}
</ul>
</div>
val content = summary ++ generateJobTable(request, batchUIData)
SparkUIUtils.headerSparkPage(
request, s"Details of batch at $formattedBatchTime", content, parent)
}
def generateInputMetadataTable(inputMetadatas: Seq[(Int, String)]): Seq[Node] = {
<table class={SparkUIUtils.TABLE_CLASS_STRIPED_SORTABLE}>
<thead>
<tr>
<th>Input</th>
<th>Metadata</th>
</tr>
</thead>
<tbody>
{inputMetadatas.flatMap(generateInputMetadataRow)}
</tbody>
</table>
}
def generateInputMetadataRow(inputMetadata: (Int, String)): Seq[Node] = {
val streamId = inputMetadata._1
<tr>
<td>{streamingListener.streamName(streamId).getOrElse(s"Stream-$streamId")}</td>
<td>{metadataDescriptionToHTML(inputMetadata._2)}</td>
</tr>
}
private def metadataDescriptionToHTML(metadataDescription: String): Seq[Node] = {
// tab to 4 spaces and "\\n" to "<br/>"
Unparsed(StringEscapeUtils.escapeHtml4(metadataDescription).
replaceAllLiterally("\\t", " ").replaceAllLiterally("\\n", "<br/>"))
}
private def outputOpStatusCell(outputOp: OutputOperationUIData, rowspan: Int): Seq[Node] = {
outputOp.failureReason match {
case Some(failureReason) =>
val failureReasonForUI = UIUtils.createOutputOperationFailureForUI(failureReason)
UIUtils.failureReasonCell(
failureReasonForUI, rowspan, includeFirstLineInExpandDetails = false)
case None =>
if (outputOp.endTime.isEmpty) {
<td rowspan={rowspan.toString}>-</td>
} else {
<td rowspan={rowspan.toString}>Succeeded</td>
}
}
}
}
| WindCanDie/spark | streaming/src/main/scala/org/apache/spark/streaming/ui/BatchPage.scala | Scala | apache-2.0 | 14,859 |
package com.github.dannywe.csv.base
import java.io.{File, Reader}
import au.com.bytecode.opencsv.CSVReader
import com.github.dannywe.csv.base.reader.ReaderLike
import com.github.dannywe.csv.core.TypeAliases._
import scala.collection.JavaConversions._
class CsvReaderAdaptor(csvReader: CSVReader) extends ReaderLike {
override def close(): Unit = csvReader.close()
override def readLine(): Next[StringArray] = {
val next = csvReader.readNext()
next match {
case x: StringArray => Cont(next)
case _ => Stop[StringArray]()
}
}
}
object CsvReaderAdaptor {
def apply(file: File) = new CsvReaderAdaptor(CsvReaderCreator.getCsvReader(file))
def apply(reader: Reader) = new CsvReaderAdaptor(CsvReaderCreator.getCsvReader(reader))
}
| DannyWE/CsvStreamUtils | src/main/scala/com/github/dannywe/csv/base/CsvReaderAdaptor.scala | Scala | apache-2.0 | 769 |
package techex
import techex.TestServer._
import org.specs2.mutable._
import dispatch._, Defaults._
import techex.domain.Nick
import scalaz._, Scalaz._
import _root_.argonaut._, Argonaut._
class LoadPersonalQuestsSpec extends Specification {
try {
val runningserver =
server.run
"The webserwer" should {
"yield a list of quests" in {
val quests =
for {
playerId <- putPlayer(Nick("balle"))
r <- Http(h / "quests" / "player" / playerId.value)
} yield r
val response =
quests()
val json =
Parse.parse(response.getResponseBody).map(_.spaces4).fold(x=>x,y=>y)
json ! (response.getResponseBody must contain("desc"))
}
}
runningserver.shutdown
} catch {
case t: Throwable => t.printStackTrace()
}
}
| kantega/tech-ex-2015 | backend/src/test/scala/techex/LoadPersonalQuestsSpec.scala | Scala | mit | 844 |
package com.twitter.finagle.postgres.connection
import com.twitter.finagle.postgres.Spec
import com.twitter.finagle.postgres.messages.{Query, ParameterStatus, NotificationResponse, NoticeResponse}
class ConnectionAsyncSpec extends Spec {
"A postgres connection" should {
"ignore async messages for new connection" in {
val connection = new Connection()
val response = connection.receive(NotificationResponse(-1, "", ""))
response must equal(None)
val response2 = connection.receive(NoticeResponse(Some("blahblah")))
response2 must equal(None)
val response3 = connection.receive(ParameterStatus("foo", "bar"))
response3 must equal(None)
}
"ignore async messages for connected client" in {
val connection = new Connection(Connected)
val response = connection.receive(NotificationResponse(-1, "", ""))
response must equal(None)
val response2 = connection.receive(NoticeResponse(Some("blahblah")))
response2 must equal(None)
val response3 = connection.receive(ParameterStatus("foo", "bar"))
response3 must equal(None)
}
"ignore async messages when in query" in {
val connection = new Connection(Connected)
connection.send(Query("select * from Test"))
val response = connection.receive(NotificationResponse(-1, "", ""))
response must equal(None)
val response2 = connection.receive(NoticeResponse(Some("blahblah")))
response2 must equal(None)
val response3 = connection.receive(ParameterStatus("foo", "bar"))
response3 must equal(None)
}
}
}
| evnm/finagle-postgres | src/test/scala/com/twitter/finagle/postgres/connection/ConnectionAsyncSpec.scala | Scala | apache-2.0 | 1,610 |
package japgolly.scalajs.react.internal.monocle
import japgolly.scalajs.react._
import japgolly.scalajs.react.util.DefaultEffects._
import japgolly.scalajs.react.util.Effect.Dispatch
trait MonocleExtComponentLowPriorityImplicits {
implicit final def MonocleReactExt_StateWritableCB[I, F[_], A[_], S](i: I)(implicit sa: StateAccessor.Write[I, F, A, S]): MonocleExtComponent.StateWritableCB[I, F, A, S] =
new MonocleExtComponent.StateWritableCB(i)(sa)
}
trait MonocleExtComponent extends MonocleExtComponentLowPriorityImplicits {
implicit final def MonocleReactExt_StateAccess[F[_], A[_], S](m: StateAccess[F, A, S]): MonocleExtComponent.StateAcc[F, A, S, m.type] =
new MonocleExtComponent.StateAcc[F, A, S, m.type](m)
}
object MonocleExtComponent {
// Keep this import here so that Lens etc take priority over .internal
import monocle._
final class OptionalDispatchDsl1[A, L, B](private val f: (L, A, Sync[Unit]) => B) extends AnyVal {
def apply(a: A)(implicit l: L): B =
f(l, a, Sync.empty)
def apply[G[_]](a: A, callback: => G[Unit])(implicit l: L, G: Dispatch[G]): B =
f(l, a, Sync.transDispatch(callback))
}
final class StateAcc[F[_], FA[_], S, M <: StateAccess[F, FA, S]](val self: M) extends AnyVal {
def zoomStateL[T](l: Lens[S, T]): self.WithMappedState[T] =
self.zoomState(l.get)(l.set)
def modStateL[L[_, _, _, _], A, B](l: L[S, S, A, B]): OptionalDispatchDsl1[A => B, MonocleModifier[L], F[Unit]] =
new OptionalDispatchDsl1((L, f, cb) => self.modState(L.modify(l)(f), cb))
def modStateOptionL[L[_, _, _, _], A, B](l: L[S, S, A, B]): OptionalDispatchDsl1[A => Option[B], MonocleOptionalModifier[L], F[Unit]] =
new OptionalDispatchDsl1((L, f, cb) => self.modStateOption(L.modifyOption(l)(f), cb))
def setStateL[L[_, _, _, _], A, B](l: L[S, S, A, B]): OptionalDispatchDsl1[B, MonocleSetter[L], F[Unit]] =
new OptionalDispatchDsl1((L, b, cb) => self.modState(L.set(l)(b), cb))
def setStateOptionL[L[_, _, _, _], A, B](l: L[S, S, A, B]): OptionalDispatchDsl1[Option[B], MonocleSetter[L], F[Unit]] =
new OptionalDispatchDsl1((L, o, cb) =>
o match {
case Some(b) => setStateL(l)(b, cb)(L, Sync)
case None => self.setStateOption(None, cb)
}
)
def modStateAsyncL[L[_, _, _, _], A, B](l: L[S, S, A, B])(f: A => B)(implicit L: MonocleModifier[L]): FA[Unit] =
self.modStateAsync(L.modify(l)(f))
def modStateOptionAsyncL[L[_, _, _, _], A, B](l: L[S, S, A, B])(f: A => Option[B])(implicit L: MonocleOptionalModifier[L]): FA[Unit] =
self.modStateOptionAsync(L.modifyOption(l)(f))
def setStateAsyncL[L[_, _, _, _], A, B](l: L[S, S, A, B])(b: B)(implicit L: MonocleSetter[L]): FA[Unit] =
self.modStateAsync(L.set(l)(b))
def setStateOptionAsyncL[L[_, _, _, _], A, B](l: L[S, S, A, B])(o: Option[B])(implicit L: MonocleSetter[L]): FA[Unit] =
o match {
case Some(b) => setStateAsyncL(l)(b)
case None => self.setStateOptionAsync(None)
}
}
final class StateWritableCB[I, F[_], FA[_], S](private val i: I)(implicit sa: StateAccessor.Write[I, F, FA, S]) {
def modStateL[L[_, _, _, _], A, B](l: L[S, S, A, B]): OptionalDispatchDsl1[A => B, MonocleModifier[L], F[Unit]] =
new OptionalDispatchDsl1((L, f, cb) => sa(i).modState(L.modify(l)(f), cb))
def modStateOptionL[L[_, _, _, _], A, B](l: L[S, S, A, B]): OptionalDispatchDsl1[A => Option[B], MonocleOptionalModifier[L], F[Unit]] =
new OptionalDispatchDsl1((L, f, cb) => sa(i).modStateOption(L.modifyOption(l)(f), cb))
def setStateL[L[_, _, _, _], A, B](l: L[S, S, A, B]): OptionalDispatchDsl1[B, MonocleSetter[L], F[Unit]] =
new OptionalDispatchDsl1((L, b, cb) => sa(i).modState(L.set(l)(b), cb))
def setStateOptionL[L[_, _, _, _], A, B](l: L[S, S, A, B]): OptionalDispatchDsl1[Option[B], MonocleSetter[L], F[Unit]] =
new OptionalDispatchDsl1((L, o, cb) =>
o match {
case Some(b) => setStateL(l)(b, cb)(L, Sync)
case None => sa(i).setStateOption(None, cb)
}
)
def modStateAsyncL[L[_, _, _, _], A, B](l: L[S, S, A, B])(f: A => B)(implicit L: MonocleModifier[L]): FA[Unit] =
sa(i).modStateAsync(L.modify(l)(f))
def modStateOptionAsyncL[L[_, _, _, _], A, B](l: L[S, S, A, B])(f: A => Option[B])(implicit L: MonocleOptionalModifier[L]): FA[Unit] =
sa(i).modStateOptionAsync(L.modifyOption(l)(f))
def setStateAsyncL[L[_, _, _, _], A, B](l: L[S, S, A, B])(b: B)(implicit L: MonocleSetter[L]): FA[Unit] =
sa(i).modStateAsync(L.set(l)(b))
def setStateOptionAsyncL[L[_, _, _, _], A, B](l: L[S, S, A, B])(o: Option[B])(implicit L: MonocleSetter[L]): FA[Unit] =
o match {
case Some(b) => setStateAsyncL(l)(b)
case None => sa(i).setStateOptionAsync(None)
}
}
}
| japgolly/scalajs-react | extraExtMonocle2/src/main/scala/japgolly/scalajs/react/internal/monocle/MonocleExtComponent.scala | Scala | apache-2.0 | 4,866 |
package ildl.internal
class nobridge extends annotation.Annotation | miniboxing/ildl-plugin | components/runtime/src/ildl/internal/nobridges.scala | Scala | bsd-3-clause | 67 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka
import java.io.File
import java.util.Arrays
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicLong
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import org.scalatest.concurrent.Eventually
import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Milliseconds, StreamingContext, Time}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka.KafkaCluster.LeaderOffset
import org.apache.spark.streaming.scheduler._
import org.apache.spark.streaming.scheduler.rate.RateEstimator
import org.apache.spark.util.Utils
class DirectKafkaStreamSuite
extends SparkFunSuite
with BeforeAndAfter
with BeforeAndAfterAll
with Eventually
with Logging {
val sparkConf = new SparkConf()
.setMaster("local[4]")
.setAppName(this.getClass.getSimpleName)
private var ssc: StreamingContext = _
private var testDir: File = _
private var kafkaTestUtils: KafkaTestUtils = _
override def beforeAll {
kafkaTestUtils = new KafkaTestUtils
kafkaTestUtils.setup()
}
override def afterAll {
if (kafkaTestUtils != null) {
kafkaTestUtils.teardown()
kafkaTestUtils = null
}
}
after {
if (ssc != null) {
ssc.stop(stopSparkContext = true)
}
if (testDir != null) {
Utils.deleteRecursively(testDir)
}
}
test("basic stream receiving with multiple topics and smallest starting offset") {
val topics = Set("basic1", "basic2", "basic3")
val data = Map("a" -> 7, "b" -> 9)
topics.foreach { t =>
kafkaTestUtils.createTopic(t)
kafkaTestUtils.sendMessages(t, data)
}
val totalSent = data.values.sum * topics.size
val kafkaParams = Map(
"metadata.broker.list" -> kafkaTestUtils.brokerAddress,
"auto.offset.reset" -> "smallest"
)
ssc = new StreamingContext(sparkConf, Milliseconds(200))
val stream = withClue("Error creating direct stream") {
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
ssc, kafkaParams, topics)
}
val allReceived = new ConcurrentLinkedQueue[(String, String)]()
// hold a reference to the current offset ranges, so it can be used downstream
var offsetRanges = Array[OffsetRange]()
stream.transform { rdd =>
// Get the offset ranges in the RDD
offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
rdd
}.foreachRDD { rdd =>
for (o <- offsetRanges) {
logInfo(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
}
val collected = rdd.mapPartitionsWithIndex { (i, iter) =>
// For each partition, get size of the range in the partition,
// and the number of items in the partition
val off = offsetRanges(i)
val all = iter.toSeq
val partSize = all.size
val rangeSize = off.untilOffset - off.fromOffset
Iterator((partSize, rangeSize))
}.collect
// Verify whether number of elements in each partition
// matches with the corresponding offset range
collected.foreach { case (partSize, rangeSize) =>
assert(partSize === rangeSize, "offset ranges are wrong")
}
}
stream.foreachRDD { rdd => allReceived.addAll(Arrays.asList(rdd.collect(): _*)) }
ssc.start()
eventually(timeout(20000.milliseconds), interval(200.milliseconds)) {
assert(allReceived.size === totalSent,
"didn't get expected number of messages, messages:\\n" +
allReceived.asScala.mkString("\\n"))
}
ssc.stop()
}
test("receiving from largest starting offset") {
val topic = "largest"
val topicPartition = TopicAndPartition(topic, 0)
val data = Map("a" -> 10)
kafkaTestUtils.createTopic(topic)
val kafkaParams = Map(
"metadata.broker.list" -> kafkaTestUtils.brokerAddress,
"auto.offset.reset" -> "largest"
)
val kc = new KafkaCluster(kafkaParams)
def getLatestOffset(): Long = {
kc.getLatestLeaderOffsets(Set(topicPartition)).right.get(topicPartition).offset
}
// Send some initial messages before starting context
kafkaTestUtils.sendMessages(topic, data)
eventually(timeout(10 seconds), interval(20 milliseconds)) {
assert(getLatestOffset() > 3)
}
val offsetBeforeStart = getLatestOffset()
// Setup context and kafka stream with largest offset
ssc = new StreamingContext(sparkConf, Milliseconds(200))
val stream = withClue("Error creating direct stream") {
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
ssc, kafkaParams, Set(topic))
}
assert(
stream.asInstanceOf[DirectKafkaInputDStream[_, _, _, _, _]]
.fromOffsets(topicPartition) >= offsetBeforeStart,
"Start offset not from latest"
)
val collectedData = new ConcurrentLinkedQueue[String]()
stream.map { _._2 }.foreachRDD { rdd => collectedData.addAll(Arrays.asList(rdd.collect(): _*)) }
ssc.start()
val newData = Map("b" -> 10)
kafkaTestUtils.sendMessages(topic, newData)
eventually(timeout(10 seconds), interval(50 milliseconds)) {
collectedData.contains("b")
}
assert(!collectedData.contains("a"))
ssc.stop()
}
test("creating stream by offset") {
val topic = "offset"
val topicPartition = TopicAndPartition(topic, 0)
val data = Map("a" -> 10)
kafkaTestUtils.createTopic(topic)
val kafkaParams = Map(
"metadata.broker.list" -> kafkaTestUtils.brokerAddress,
"auto.offset.reset" -> "largest"
)
val kc = new KafkaCluster(kafkaParams)
def getLatestOffset(): Long = {
kc.getLatestLeaderOffsets(Set(topicPartition)).right.get(topicPartition).offset
}
// Send some initial messages before starting context
kafkaTestUtils.sendMessages(topic, data)
eventually(timeout(10 seconds), interval(20 milliseconds)) {
assert(getLatestOffset() >= 10)
}
val offsetBeforeStart = getLatestOffset()
// Setup context and kafka stream with largest offset
ssc = new StreamingContext(sparkConf, Milliseconds(200))
val stream = withClue("Error creating direct stream") {
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, String](
ssc, kafkaParams, Map(topicPartition -> 11L),
(m: MessageAndMetadata[String, String]) => m.message())
}
assert(
stream.asInstanceOf[DirectKafkaInputDStream[_, _, _, _, _]]
.fromOffsets(topicPartition) >= offsetBeforeStart,
"Start offset not from latest"
)
val collectedData = new ConcurrentLinkedQueue[String]()
stream.foreachRDD { rdd => collectedData.addAll(Arrays.asList(rdd.collect(): _*)) }
ssc.start()
val newData = Map("b" -> 10)
kafkaTestUtils.sendMessages(topic, newData)
eventually(timeout(10 seconds), interval(50 milliseconds)) {
collectedData.contains("b")
}
assert(!collectedData.contains("a"))
ssc.stop()
}
// Test to verify the offset ranges can be recovered from the checkpoints
test("offset recovery") {
val topic = "recovery"
kafkaTestUtils.createTopic(topic)
testDir = Utils.createTempDir()
val kafkaParams = Map(
"metadata.broker.list" -> kafkaTestUtils.brokerAddress,
"auto.offset.reset" -> "smallest"
)
// Send data to Kafka and wait for it to be received
def sendData(data: Seq[Int]) {
val strings = data.map { _.toString}
kafkaTestUtils.sendMessages(topic, strings.map { _ -> 1}.toMap)
}
// Setup the streaming context
ssc = new StreamingContext(sparkConf, Milliseconds(100))
val kafkaStream = withClue("Error creating direct stream") {
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
ssc, kafkaParams, Set(topic))
}
val keyedStream = kafkaStream.map { v => "key" -> v._2.toInt }
val stateStream = keyedStream.updateStateByKey { (values: Seq[Int], state: Option[Int]) =>
Some(values.sum + state.getOrElse(0))
}
ssc.checkpoint(testDir.getAbsolutePath)
// This is ensure all the data is eventually receiving only once
stateStream.foreachRDD { (rdd: RDD[(String, Int)]) =>
rdd.collect().headOption.foreach { x =>
DirectKafkaStreamSuite.total.set(x._2)
}
}
ssc.start()
// Send some data
for (i <- (1 to 10).grouped(4)) {
sendData(i)
}
eventually(timeout(20 seconds), interval(50 milliseconds)) {
assert(DirectKafkaStreamSuite.total.get === (1 to 10).sum)
}
ssc.stop()
// Verify that offset ranges were generated
// Since "offsetRangesAfterStop" will be used to compare with "recoveredOffsetRanges", we should
// collect offset ranges after stopping. Otherwise, because new RDDs keep being generated before
// stopping, we may not be able to get the latest RDDs, then "recoveredOffsetRanges" will
// contain something not in "offsetRangesAfterStop".
val offsetRangesAfterStop = getOffsetRanges(kafkaStream)
assert(offsetRangesAfterStop.size >= 1, "No offset ranges generated")
assert(
offsetRangesAfterStop.head._2.forall { _.fromOffset === 0 },
"starting offset not zero"
)
logInfo("====== RESTARTING ========")
// Recover context from checkpoints
ssc = new StreamingContext(testDir.getAbsolutePath)
val recoveredStream = ssc.graph.getInputStreams().head.asInstanceOf[DStream[(String, String)]]
// Verify offset ranges have been recovered
val recoveredOffsetRanges = getOffsetRanges(recoveredStream).map { x => (x._1, x._2.toSet) }
assert(recoveredOffsetRanges.size > 0, "No offset ranges recovered")
val earlierOffsetRanges = offsetRangesAfterStop.map { x => (x._1, x._2.toSet) }
assert(
recoveredOffsetRanges.forall { or =>
earlierOffsetRanges.contains((or._1, or._2))
},
"Recovered ranges are not the same as the ones generated\\n" +
s"recoveredOffsetRanges: $recoveredOffsetRanges\\n" +
s"earlierOffsetRanges: $earlierOffsetRanges"
)
// Restart context, give more data and verify the total at the end
// If the total is write that means each records has been received only once
ssc.start()
for (i <- (11 to 20).grouped(4)) {
sendData(i)
}
eventually(timeout(20 seconds), interval(50 milliseconds)) {
assert(DirectKafkaStreamSuite.total.get === (1 to 20).sum)
}
ssc.stop()
}
test("Direct Kafka stream report input information") {
val topic = "report-test"
val data = Map("a" -> 7, "b" -> 9)
kafkaTestUtils.createTopic(topic)
kafkaTestUtils.sendMessages(topic, data)
val totalSent = data.values.sum
val kafkaParams = Map(
"metadata.broker.list" -> kafkaTestUtils.brokerAddress,
"auto.offset.reset" -> "smallest"
)
import DirectKafkaStreamSuite._
ssc = new StreamingContext(sparkConf, Milliseconds(200))
val collector = new InputInfoCollector
ssc.addStreamingListener(collector)
val stream = withClue("Error creating direct stream") {
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
ssc, kafkaParams, Set(topic))
}
val allReceived = new ConcurrentLinkedQueue[(String, String)]
stream.foreachRDD { rdd => allReceived.addAll(Arrays.asList(rdd.collect(): _*)) }
ssc.start()
eventually(timeout(20000.milliseconds), interval(200.milliseconds)) {
assert(allReceived.size === totalSent,
"didn't get expected number of messages, messages:\\n" +
allReceived.asScala.mkString("\\n"))
// Calculate all the record number collected in the StreamingListener.
assert(collector.numRecordsSubmitted.get() === totalSent)
assert(collector.numRecordsStarted.get() === totalSent)
assert(collector.numRecordsCompleted.get() === totalSent)
}
ssc.stop()
}
test("maxMessagesPerPartition with backpressure disabled") {
val topic = "maxMessagesPerPartition"
val kafkaStream = getDirectKafkaStream(topic, None)
val input = Map(TopicAndPartition(topic, 0) -> 50L, TopicAndPartition(topic, 1) -> 50L)
assert(kafkaStream.maxMessagesPerPartition(input).get ==
Map(TopicAndPartition(topic, 0) -> 10L, TopicAndPartition(topic, 1) -> 10L))
}
test("maxMessagesPerPartition with no lag") {
val topic = "maxMessagesPerPartition"
val rateController = Some(new ConstantRateController(0, new ConstantEstimator(100), 100))
val kafkaStream = getDirectKafkaStream(topic, rateController)
val input = Map(TopicAndPartition(topic, 0) -> 0L, TopicAndPartition(topic, 1) -> 0L)
assert(kafkaStream.maxMessagesPerPartition(input).isEmpty)
}
test("maxMessagesPerPartition respects max rate") {
val topic = "maxMessagesPerPartition"
val rateController = Some(new ConstantRateController(0, new ConstantEstimator(100), 1000))
val kafkaStream = getDirectKafkaStream(topic, rateController)
val input = Map(TopicAndPartition(topic, 0) -> 1000L, TopicAndPartition(topic, 1) -> 1000L)
assert(kafkaStream.maxMessagesPerPartition(input).get ==
Map(TopicAndPartition(topic, 0) -> 10L, TopicAndPartition(topic, 1) -> 10L))
}
test("using rate controller") {
val topic = "backpressure"
val topicPartitions = Set(TopicAndPartition(topic, 0), TopicAndPartition(topic, 1))
kafkaTestUtils.createTopic(topic, 2)
val kafkaParams = Map(
"metadata.broker.list" -> kafkaTestUtils.brokerAddress,
"auto.offset.reset" -> "smallest"
)
val batchIntervalMilliseconds = 100
val estimator = new ConstantEstimator(100)
val messages = Map("foo" -> 200)
kafkaTestUtils.sendMessages(topic, messages)
val sparkConf = new SparkConf()
// Safe, even with streaming, because we're using the direct API.
// Using 1 core is useful to make the test more predictable.
.setMaster("local[1]")
.setAppName(this.getClass.getSimpleName)
.set("spark.streaming.kafka.maxRatePerPartition", "100")
// Setup the streaming context
ssc = new StreamingContext(sparkConf, Milliseconds(batchIntervalMilliseconds))
val kafkaStream = withClue("Error creating direct stream") {
val kc = new KafkaCluster(kafkaParams)
val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
val m = kc.getEarliestLeaderOffsets(topicPartitions)
.fold(e => Map.empty[TopicAndPartition, Long], m => m.mapValues(lo => lo.offset))
new DirectKafkaInputDStream[String, String, StringDecoder, StringDecoder, (String, String)](
ssc, kafkaParams, m, messageHandler) {
override protected[streaming] val rateController =
Some(new DirectKafkaRateController(id, estimator))
}
}
val collectedData = new ConcurrentLinkedQueue[Array[String]]()
// Used for assertion failure messages.
def dataToString: String =
collectedData.asScala.map(_.mkString("[", ",", "]")).mkString("{", ", ", "}")
// This is to collect the raw data received from Kafka
kafkaStream.foreachRDD { (rdd: RDD[(String, String)], time: Time) =>
val data = rdd.map { _._2 }.collect()
collectedData.add(data)
}
ssc.start()
// Try different rate limits.
// Wait for arrays of data to appear matching the rate.
Seq(100, 50, 20).foreach { rate =>
collectedData.clear() // Empty this buffer on each pass.
estimator.updateRate(rate) // Set a new rate.
// Expect blocks of data equal to "rate", scaled by the interval length in secs.
val expectedSize = Math.round(rate * batchIntervalMilliseconds * 0.001)
eventually(timeout(5.seconds), interval(batchIntervalMilliseconds.milliseconds)) {
// Assert that rate estimator values are used to determine maxMessagesPerPartition.
// Funky "-" in message makes the complete assertion message read better.
assert(collectedData.asScala.exists(_.size == expectedSize),
s" - No arrays of size $expectedSize for rate $rate found in $dataToString")
}
}
ssc.stop()
}
test("maxMessagesPerPartition with zero offset and rate equal to one") {
val topic = "backpressure"
val kafkaParams = Map(
"metadata.broker.list" -> kafkaTestUtils.brokerAddress,
"auto.offset.reset" -> "smallest"
)
val batchIntervalMilliseconds = 60000
val sparkConf = new SparkConf()
// Safe, even with streaming, because we're using the direct API.
// Using 1 core is useful to make the test more predictable.
.setMaster("local[1]")
.setAppName(this.getClass.getSimpleName)
.set("spark.streaming.kafka.maxRatePerPartition", "100")
// Setup the streaming context
ssc = new StreamingContext(sparkConf, Milliseconds(batchIntervalMilliseconds))
val estimatedRate = 1L
val kafkaStream = withClue("Error creating direct stream") {
val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
val fromOffsets = Map(
TopicAndPartition(topic, 0) -> 0L,
TopicAndPartition(topic, 1) -> 0L,
TopicAndPartition(topic, 2) -> 0L,
TopicAndPartition(topic, 3) -> 0L
)
new DirectKafkaInputDStream[String, String, StringDecoder, StringDecoder, (String, String)](
ssc, kafkaParams, fromOffsets, messageHandler) {
override protected[streaming] val rateController =
Some(new DirectKafkaRateController(id, null) {
override def getLatestRate() = estimatedRate
})
}
}
val offsets = Map(
TopicAndPartition(topic, 0) -> 0L,
TopicAndPartition(topic, 1) -> 100L,
TopicAndPartition(topic, 2) -> 200L,
TopicAndPartition(topic, 3) -> 300L
)
val result = kafkaStream.maxMessagesPerPartition(offsets)
val expected = Map(
TopicAndPartition(topic, 0) -> 1L,
TopicAndPartition(topic, 1) -> 10L,
TopicAndPartition(topic, 2) -> 20L,
TopicAndPartition(topic, 3) -> 30L
)
assert(result.contains(expected), s"Number of messages per partition must be at least 1")
}
/** Get the generated offset ranges from the DirectKafkaStream */
private def getOffsetRanges[K, V](
kafkaStream: DStream[(K, V)]): Seq[(Time, Array[OffsetRange])] = {
kafkaStream.generatedRDDs.mapValues { rdd =>
rdd.asInstanceOf[KafkaRDD[K, V, _, _, (K, V)]].offsetRanges
}.toSeq.sortBy { _._1 }
}
private def getDirectKafkaStream(topic: String, mockRateController: Option[RateController]) = {
val batchIntervalMilliseconds = 100
val sparkConf = new SparkConf()
.setMaster("local[1]")
.setAppName(this.getClass.getSimpleName)
.set("spark.streaming.kafka.maxRatePerPartition", "100")
// Setup the streaming context
ssc = new StreamingContext(sparkConf, Milliseconds(batchIntervalMilliseconds))
val earliestOffsets = Map(TopicAndPartition(topic, 0) -> 0L, TopicAndPartition(topic, 1) -> 0L)
val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
new DirectKafkaInputDStream[String, String, StringDecoder, StringDecoder, (String, String)](
ssc, Map[String, String](), earliestOffsets, messageHandler) {
override protected[streaming] val rateController = mockRateController
}
}
}
object DirectKafkaStreamSuite {
val total = new AtomicLong(-1L)
class InputInfoCollector extends StreamingListener {
val numRecordsSubmitted = new AtomicLong(0L)
val numRecordsStarted = new AtomicLong(0L)
val numRecordsCompleted = new AtomicLong(0L)
override def onBatchSubmitted(batchSubmitted: StreamingListenerBatchSubmitted): Unit = {
numRecordsSubmitted.addAndGet(batchSubmitted.batchInfo.numRecords)
}
override def onBatchStarted(batchStarted: StreamingListenerBatchStarted): Unit = {
numRecordsStarted.addAndGet(batchStarted.batchInfo.numRecords)
}
override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted): Unit = {
numRecordsCompleted.addAndGet(batchCompleted.batchInfo.numRecords)
}
}
}
private[streaming] class ConstantEstimator(@volatile private var rate: Long)
extends RateEstimator {
def updateRate(newRate: Long): Unit = {
rate = newRate
}
def compute(
time: Long,
elements: Long,
processingDelay: Long,
schedulingDelay: Long): Option[Double] = Some(rate)
}
private[streaming] class ConstantRateController(id: Int, estimator: RateEstimator, rate: Long)
extends RateController(id, estimator) {
override def publish(rate: Long): Unit = ()
override def getLatestRate(): Long = rate
}
| ioana-delaney/spark | external/kafka-0-8/src/test/scala/org/apache/spark/streaming/kafka/DirectKafkaStreamSuite.scala | Scala | apache-2.0 | 21,854 |
package play.sbtplugin
import sbt._
import Keys._
import play.Play
import play.twirl.sbt.Import.TwirlKeys
import com.typesafe.sbt.web.SbtWeb.autoImport._
object PlayLayoutPlugin extends AutoPlugin {
override def requires = Play
override def trigger = AllRequirements
override def projectSettings = Seq(
target := baseDirectory.value / "target",
sourceDirectory in Compile := baseDirectory.value / "app",
sourceDirectory in Test := baseDirectory.value / "test",
resourceDirectory in Compile := baseDirectory.value / "conf",
scalaSource in Compile := baseDirectory.value / "app",
scalaSource in Test := baseDirectory.value / "test",
javaSource in Compile := baseDirectory.value / "app",
javaSource in Test := baseDirectory.value / "test",
sourceDirectories in (Compile, TwirlKeys.compileTemplates) := Seq((sourceDirectory in Compile).value),
sourceDirectories in (Test, TwirlKeys.compileTemplates) := Seq((sourceDirectory in Test).value),
// sbt-web
sourceDirectory in Assets := (sourceDirectory in Compile).value / "assets",
sourceDirectory in TestAssets := (sourceDirectory in Test).value / "assets",
resourceDirectory in Assets := baseDirectory.value / "public"
)
}
| jyotikamboj/container | pf-framework/src/sbt-plugin/src/main/scala/play/sbtplugin/PlayLayoutPlugin.scala | Scala | mit | 1,243 |
package sml
import scala.collection.mutable.{HashSet, Queue}
/**
A library for traversing graph based knowledge bases over words and phrases
*/
object knowledge
{
/**
Defines a traverser over the knowledge base
*/
abstract class Traverser
{
/**
Returns true if the phrase is defined in the knowledge base
*/
def hasPhrase(phrase:String):Boolean
/**
Returns all the phrases related to the given string
*/
def relatedPhrases(start:String):Set[String]
}
/**
Defines the need implementation details for a knowledge base traversal
*/
abstract class TraverserImpl[T <: Node] extends Traverser
{
/**
Wraps a paraphrase in a node
*/
def init( phrase:String ):Iterable[T]
/**
Returns a nodes successors
*/
def successors(node:T):Iterable[T]
/**
* Returns the neighbors of the given node
*/
def neighbors(start:String):Iterable[T] =
{
Traversal[T](this, start)
}
/**
Returns all the phrases related to the given string
*/
def relatedPhrases(start:String):Set[String] =
{
neighbors(start).map(_.phrase).toSet
}
}
/**
Traverses over a group of traversers and returned the union of their
results
*/
class UnionTraverser(val traversers:Iterable[Traverser]) extends Traverser
{
/**
Returns true if the phrase is defined in the knowledge base
*/
def hasPhrase(phrase:String):Boolean =
{
traversers.exists(_.hasPhrase(phrase))
}
/**
Returns all the phrases related to the given string
*/
def relatedPhrases(start:String):Set[String] =
{
traversers.flatMap(_.relatedPhrases(start)).toSet
}
/**
Add in another traverser
*/
def +(trav:Traverser):UnionTraverser =
{
val extra = trav match
{
case u:UnionTraverser => u.traversers
case t:Traverser => Seq(t)
}
new UnionTraverser(traversers ++ extra)
}
}
/**
A node in a knowledge graph
*/
abstract class Node(val phrase:String, val score:Double)
{
override def equals(other:Any): Boolean = other match
{
case that:Node=> that.phrase == phrase
case _ => false
}
override def hashCode:Int = phrase.hashCode
}
/**
* A traversal of knowledge from a starting point
*/
case class Traversal[T <: Node](val traverser:TraverserImpl[T], val start:String) extends Iterable[T]
{
/**
Returns an iterator over phrase
*/
def iterator = new Iterator[T]
{
val visited = new HashSet[T]()
val fringe = new Queue[T]() ++ traverser.init(start)
override def hasNext:Boolean = !fringe.isEmpty
override def next:T =
{
//get the current
val current = fringe.dequeue
//mark it as visited
visited += current
//add its children to the fringe
fringe ++= (traverser.successors(current).filter(n => !visited(n)))
//return the current value
current
}
}
}
}
| jworr/sml | src/main/scala/sml/knowledge.scala | Scala | gpl-2.0 | 2,826 |
package com.karasiq.shadowcloud.api.json
import akka.util.ByteString
import play.api.libs.json._
import com.karasiq.shadowcloud.api.SCApiServer
trait SCJsonApiServer extends SCApiServer[ByteString, Reads, Writes] with SCJsonApi {
def read[Result: Reads](p: ByteString): Result = {
Json.fromJson(Json.parse(p.toArray)).get
}
def write[Result: Writes](r: Result): ByteString = {
ByteString.fromArrayUnsafe(Json.toBytes(Json.toJson(r)))
}
def decodePayload(payload: ByteString): Map[String, ByteString] = {
Json.parse(payload.toArray).as[JsObject].fields.toMap.mapValues(v ⇒ ByteString(v.as[JsString].value))
}
}
| Karasiq/shadowcloud | server/autowire-api/src/main/scala/com/karasiq/shadowcloud/api/json/SCJsonApiServer.scala | Scala | apache-2.0 | 641 |
package net.fluxo.blue.downloader
/**
* Created with IntelliJ IDEA.
* User: Ronald Kurniawan (viper)
* Date: 27/12/13
* Time: 2:29 PM
*/
class TDownload {
private var _id: Long = 0
def Id: Long = _id
def Id_= (value: Long) = {
_id = value
}
private var _name: Option[String] = None
def Name: Option[String] = _name
def Name_= (value: String) = {
_name = Some(value)
}
private var _type: Option[String] = None // HTTP,FTP,BT
def Type: Option[String] = _type
def Type_= (value: String) = {
_type = Some(value)
}
private var _started: Long = 0 // TIMESTAMP
def Started: Long = _started
def Started_= (value: Long) = {
_started = value
}
private var _totalSize: Long = 0 // BYTES
def TotalSize: Long = _totalSize
def TotalSize_= (value: Long) = {
_totalSize = value
}
private var _downloaded: Long = 0 // BYTES
def Downloaded: Long = _downloaded
def Downloaded_= (value: Long) = {
_downloaded = value
}
private var _url: Option[String] = None
def Url: Option[String] = _url
def Url_= (value: String) = {
_url = Some(value)
}
private var _activeSince: Long = 0 // TIMESTAMP
def ActiveSince: Long = _activeSince
def ActiveSince_= (value: Long) = {
_activeSince = value
}
private var _priority: Boolean = false // 1/0
def Priority: Boolean = _priority
def Priority_= (value: Boolean) = {
_priority = value
}
}
| fluxodesign/FluxoBlue | src/main/scala/net/fluxo/blue/downloader/TDownload.scala | Scala | lgpl-3.0 | 1,412 |
package rpm4s.repo.utils
import java.util.zip.{CRC32, Deflater}
import fs2.{Chunk, Pipe, Pull, RaiseThrowable, Stream}
package object compress {
private val header: Chunk[Byte] = Chunk.bytes(
Array(0x1f.toByte,
0x8b.toByte,
Deflater.DEFLATED.toByte,
0.toByte,
0.toByte,
0.toByte,
0.toByte,
0.toByte,
0.toByte,
0.toByte))
def gzip[F[_]](
level: Int = Deflater.DEFAULT_COMPRESSION,
bufferSize: Int = 1024 * 32,
strategy: Int = Deflater.DEFAULT_STRATEGY
): Pipe[F, Byte, Byte] =
in =>
Stream.suspend {
val crc = new CRC32()
var inputSize = 0
Stream.chunk[F, Byte](header) ++
in.through(_.chunks.flatMap { chunk =>
val bytes = chunk.toBytes
inputSize = inputSize + bytes.size
crc.update(bytes.values)
Stream.chunk(chunk)
})
.through(
fs2.compress.deflate(
nowrap = true,
level = level,
bufferSize = bufferSize,
strategy = strategy
)
) ++
Stream.chunk[F, Byte](Chunk.bytes {
val c = crc.getValue
val size = inputSize % 4294967296L //2^32
Array(
(c & 0xFF).toByte,
((c >> 8) & 0xFF).toByte,
((c >> 16) & 0xFF).toByte,
((c >> 24) & 0xFF).toByte,
(size & 0xFF).toByte,
((size >> 8) & 0xFF).toByte,
((size >> 16) & 0xFF).toByte,
((size >> 24) & 0xFF).toByte
)
})
}
private def awaitShort[F[_]](h: Stream[F, Byte]): Pull[F, Nothing, Option[(Int, Stream[F, Byte])]] = {
h.pull.uncons1.flatMap {
case Some((b1, h)) =>
h.pull.uncons1.flatMap {
case Some((b2, h)) =>
Pull.pure(Some((((b1 & 0xFF) << 8) | (b2 & 0xFF), h)))
case None => Pull.pure(None)
}
case None => Pull.pure(None)
}
}
private def skipFlags[F[_]: RaiseThrowable](flags: Byte, s: Stream[F, Byte]): Pull[F, Byte, Unit] = {
val FTEXT = 1
val FHCRC = 2
val FEXTRA = 4
val FNAME = 8
val FCOMMENT = 16
val a = for {
optRest <- if ((flags & FEXTRA) == FEXTRA) {
awaitShort(s).flatMap {
case Some((size, h)) =>
h.pull.drop(size)
case None => Pull.raiseError(new RuntimeException("premature end of stream"))
}
} else Pull.pure(Some(s))
s <- optRest match {
case Some(s) =>
if ((flags & FNAME) == FNAME) {
Pull.pure(s.dropWhile(_ != 0).drop(1))
} else Pull.pure(s)
case None => Pull.raiseError(new RuntimeException("premature end of stream"))
}
s <- if ((flags & FCOMMENT) == FCOMMENT) {
Pull.pure(s.dropWhile(_ != 0).drop(1))
} else Pull.pure(s)
s <- if ((flags & FHCRC) == FHCRC) Pull.pure(s.drop(2))
else Pull.pure(s)
_ <- s.pull.echo
} yield ()
a
}
//TODO: refactor and upstream
//http://www.zlib.org/rfc-gzip.html#header-trailer
def gunzip[F[_]: RaiseThrowable](bufferSize: Int = 1024 * 32): Pipe[F, Byte, Byte] = h => {
val a = for {
idOpt <- awaitShort(h)
cmOpt <- idOpt match {
case None => Pull.raiseError(new RuntimeException("premature end of stream"))
case Some((id, h)) =>
if (id == 0x1f8b)
h.pull.uncons1
else Pull.raiseError(
new RuntimeException(
s"invalid gzip header ${Integer.toHexString(id)} =! 0x1f8b"))
}
flagsOpt <- cmOpt match {
case None => Pull.raiseError(new RuntimeException("premature end of stream"))
case Some((cm, h)) =>
if (cm == 8)
h.pull.uncons1.flatMap {
case Some((flags, h)) =>
skipFlags(flags, h.drop(6))
case None => Pull.raiseError(new RuntimeException("premature end of stream"))
}
else Pull.raiseError(new RuntimeException(s"unsupported compression method $cm"))
}
} yield ()
a
}.stream.through(fs2.compress.inflate(nowrap = true, bufferSize))
}
| lucidd/rpm4s | repo-utils/jvm/src/main/scala/rpm4s/repo/utils/compress/package.scala | Scala | mit | 4,382 |
/* sbt -- Simple Build Tool
* Copyright 2009,2010 Mark Harrah
*/
package sbt
package impl
import StringUtilities.nonEmpty
trait DependencyBuilders {
final implicit def toGroupID(groupID: String): GroupID =
{
nonEmpty(groupID, "Group ID")
new GroupID(groupID)
}
final implicit def toRepositoryName(name: String): RepositoryName =
{
nonEmpty(name, "Repository name")
new RepositoryName(name)
}
final implicit def moduleIDConfigurable(m: ModuleID): ModuleIDConfigurable =
{
require(m.configurations.isEmpty, "Configurations already specified for module " + m)
new ModuleIDConfigurable(m)
}
}
final class GroupID private[sbt] (private[sbt] val groupID: String) {
def %(artifactID: String) = groupArtifact(artifactID, CrossVersion.Disabled)
def %%(artifactID: String): GroupArtifactID = groupArtifact(artifactID, CrossVersion.binary)
@deprecated(deprecationMessage, "0.12.0")
def %%(artifactID: String, crossVersion: String => String) = groupArtifact(artifactID, CrossVersion.binaryMapped(crossVersion))
@deprecated(deprecationMessage, "0.12.0")
def %%(artifactID: String, alternatives: (String, String)*) = groupArtifact(artifactID, CrossVersion.binaryMapped(Map(alternatives: _*) orElse { case s => s }))
private def groupArtifact(artifactID: String, cross: CrossVersion) =
{
nonEmpty(artifactID, "Artifact ID")
new GroupArtifactID(groupID, artifactID, cross)
}
private[this] def deprecationMessage = """Use the cross method on the constructed ModuleID. For example: ("a" % "b" % "1").cross(...)"""
}
final class GroupArtifactID private[sbt] (
private[sbt] val groupID: String,
private[sbt] val artifactID: String,
private[sbt] val crossVersion: CrossVersion) {
def %(revision: String): ModuleID =
{
nonEmpty(revision, "Revision")
ModuleID(groupID, artifactID, revision).cross(crossVersion)
}
}
final class ModuleIDConfigurable private[sbt] (moduleID: ModuleID) {
def %(configuration: Configuration): ModuleID = %(configuration.name)
def %(configurations: String): ModuleID =
{
nonEmpty(configurations, "Configurations")
val c = configurations
moduleID.copy(configurations = Some(c))
}
}
final class RepositoryName private[sbt] (name: String) {
def at(location: String) =
{
nonEmpty(location, "Repository location")
new MavenRepository(name, location)
}
}
| corespring/sbt | ivy/src/main/scala/sbt/impl/DependencyBuilders.scala | Scala | bsd-3-clause | 2,451 |
package com.twitter.concurrent
import scala.util.Random
import com.twitter.util.{Await, Duration, Future, Promise, Time, Timer}
/**
* An offer to communicate with another process. The offer is
* parameterized on the type of the value communicated. An offer that
* sends a value typically has type {{Unit}}. An offer is activated by
* synchronizing it, which is done with `sync()`.
*
* Note that Offers are persistent values -- they may be synchronized
* multiple times. They represent a standing offer of communication, not
* a one-shot event.
*
* =The synchronization protocol=
*
* Synchronization is performed via a two-phase commit process.
* `prepare()` commences the transaction, and when the other party is
* ready, it returns with a transaction object, `Tx[T]`. This must then
* be ackd or nackd. If both parties acknowledge, `Tx.ack()` returns
* with a commit object, containing the value. This finalizes the
* transaction. Please see the `Tx` documentation for more details on
* that phase of the protocol.
*
* Note that a user should never perform this protocol themselves --
* synchronization should always be done with `sync()`.
*
* Future interrupts are propagated, and failure is passed through. It
* is up to the implementer of the Offer to decide on failure semantics,
* but they are always passed through in all of the combinators.
*
* Note: There is a Java-friendly API for this trait: [[com.twitter.concurrent.AbstractOffer]].
*/
trait Offer[+T] { self =>
import Offer.LostSynchronization
/**
* Prepare a new transaction. This is the first stage of the 2 phase
* commit. This is typically only called by the offer implementation
* directly or by combinators.
*/
def prepare(): Future[Tx[T]]
/**
* Synchronizes this offer, returning a future representing the result
* of the synchronization.
*/
def sync(): Future[T] =
prepare() flatMap { tx =>
tx.ack() flatMap {
case Tx.Commit(v) => Future.value(v)
case Tx.Abort => sync()
}
}
/**
* Synonym for `sync()`
*/
@deprecated("use sync() instead", "5.x")
def apply(): Future[T] = sync()
/* Combinators */
/**
* Map this offer of type {{T}} into one of type {{U}}. The
* translation (performed by {{f}}) is done after the {{Offer[T]}} has
* successfully synchronized.
*/
def map[U](f: T => U): Offer[U] = new Offer[U] {
def prepare() = self.prepare() map { tx =>
new Tx[U] {
import Tx.{Commit, Abort}
def ack() = tx.ack() map {
case Commit(t) => Commit(f(t))
case Abort => Abort
}
def nack() { tx.nack() }
}
}
}
/**
* Synonym for `map()`. Useful in combination with `Offer.choose()`
* and `Offer.select()`
*/
def apply[U](f: T => U): Offer[U] = map(f)
/**
* Like {{map}}, but to a constant (call-by-name).
*/
def const[U](f: => U): Offer[U] = map { _ => f }
/**
* Java-friendly analog of `const()`.
*/
def mapConstFunction[U](f: => U): Offer[U] = const(f)
/**
* Java-friendly analog of `const()`.
*/
def mapConst[U](c: U): Offer[U] = const(c)
/**
* An offer that, when synchronized, attempts to synchronize {{this}}
* immediately, and if it fails, synchronizes on {{other}} instead. This is useful
* for providing default values. Eg.:
*
* {{{
* offer orElse Offer.const { computeDefaultValue() }
* }}}
*/
def orElse[U >: T](other: Offer[U]): Offer[U] = new Offer[U] {
def prepare() = {
val ourTx = self.prepare()
if (ourTx.isDefined) ourTx else {
ourTx foreach { tx => tx.nack() }
ourTx.raise(LostSynchronization)
other.prepare()
}
}
}
def or[U](other: Offer[U]): Offer[Either[T, U]] =
Offer.choose(this map { Left(_) }, other map { Right(_) })
/**
* Synchronize on this offer indefinitely, invoking the given {{f}}
* with each successfully synchronized value. A receiver can use
* this to enumerate over all received values.
*/
def foreach(f: T => Unit) {
sync() foreach { v =>
f(v)
foreach(f)
}
}
/**
* Synchronize (discarding the value), and then invoke the given
* closure. Convenient for loops.
*/
def andThen(f: => Unit) {
sync() onSuccess { _ => f }
}
/**
* Synchronize this offer, blocking for the result. See {{sync()}}
* and {{com.twitter.util.Future.apply()}}
*/
def syncWait(): T = Await.result(sync())
/* Scala actor-style syntax */
/**
* Alias for synchronize.
*/
def ? = sync()
/**
* Synchronize, blocking for the result.
*/
def ?? = syncWait()
}
/**
* Abstract `Offer` class for Java compatibility.
*/
abstract class AbstractOffer[T] extends Offer[T]
/**
* Note: There is a Java-friendly API for this object: [[com.twitter.concurrent.Offers]].
*/
object Offer {
/**
* A constant offer: synchronizes the given value always. This is
* call-by-name and a new value is produced for each `prepare()`.
*
* Note: Updates here must also be done at [[com.twitter.concurrent.Offers.newConstOffer()]].
*/
def const[T](x: => T): Offer[T] = new Offer[T] {
def prepare() = Future.value(Tx.const(x))
}
/**
* An offer that never synchronizes.
*/
val never: Offer[Nothing] = new Offer[Nothing] {
def prepare() = Future.never
}
private[this] val rng = Some(new Random(Time.now.inNanoseconds))
/**
* The offer that chooses exactly one of the given offers. If there are any
* Offers that are synchronizable immediately, one is chosen at random.
*/
def choose[T](evs: Offer[T]*): Offer[T] = choose(rng, evs)
/**
* The offer that chooses exactly one of the given offers. If there are any
* Offers that are synchronizable immediately, the first available in the sequence is selected.
*/
def prioritize[T](evs: Offer[T]*): Offer[T] = choose(None, evs)
/**
* The offer that chooses exactly one of the given offers.
*
* If there are any Offers that are synchronizable immediately, one is chosen
* - at random if {{random}} is defined.
* - in order if {{random}} is None.
*
* Package-exposed for testing.
*/
private[concurrent] def choose[T](random: Option[Random], evs: Seq[Offer[T]]): Offer[T] = {
if (evs.isEmpty) Offer.never else new Offer[T] {
def prepare(): Future[Tx[T]] = {
// to avoid unnecessary allocations we do a bunch of manual looping and shuffling
val inputSize = evs.size
val prepd = new Array[Future[Tx[T]]](inputSize)
val iter = evs.iterator
var i = 0
while (i < inputSize) {
prepd(i) = iter.next().prepare()
i += 1
}
// We use match instead of foreach to reduce allocations.
random match {
case None =>
// Shuffle only if random is defined
case Some(r) =>
while (i > 1) { // i starts at evs.size
val nextPos = r.nextInt(i)
val tmp = prepd(i - 1)
prepd(i - 1) = prepd(nextPos)
prepd(nextPos) = tmp
i -= 1
}
}
i = 0
var foundPos = -1
while (foundPos < 0 && i < prepd.length) {
val winner = prepd(i)
if (winner.isDefined) foundPos = i
i += 1
}
def updateLosers(winPos: Int, prepd: Array[Future[Tx[T]]]): Future[Tx[T]] = {
val winner = prepd(winPos)
var j = 0
while (j < prepd.length) {
val loser = prepd(j)
if (loser ne winner) {
loser onSuccess { tx => tx.nack() }
loser.raise(LostSynchronization)
}
j += 1
}
winner
}
if (foundPos >= 0) {
updateLosers(foundPos, prepd)
} else {
Future.selectIndex(prepd) flatMap { winPos =>
updateLosers(winPos, prepd)
}
}
}
}
}
/**
* `Offer.choose()` and synchronize it.
*/
def select[T](ofs: Offer[T]*): Future[T] = choose(ofs:_*).sync()
private[this] val FutureTxUnit = Future.value(Tx.Unit)
/**
* An offer that is available after the given time out.
*/
def timeout(timeout: Duration)(implicit timer: Timer): Offer[Unit] = new Offer[Unit] {
private[this] val deadline = timeout.fromNow
def prepare() = {
if (deadline <= Time.now) FutureTxUnit else {
val p = new Promise[Tx[Unit]]
val task = timer.schedule(deadline) { p.setValue(Tx.Unit) }
p.setInterruptHandler { case _cause => task.cancel() }
p
}
}
}
object LostSynchronization extends Exception {
override def fillInStackTrace = this
}
}
| folone/util | util-core/src/main/scala/com/twitter/concurrent/Offer.scala | Scala | apache-2.0 | 8,762 |
package dpla.ingestion3.harvesters.resourceSync
import dpla.ingestion3.confs.i3Conf
import dpla.ingestion3.harvesters.Harvester
import org.apache.log4j.Logger
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions.lit
class RsHarvester(spark: SparkSession,
shortName: String,
conf: i3Conf,
harvestLogger: Logger)
extends Harvester(spark, shortName, conf, harvestLogger) {
// TODO Do all RS enpoints support JSON?
override def mimeType: String = "application_json"
override def localHarvest: DataFrame = {
// Set options.
val readerOptions: Map[String, String] = Map(
"endpoint" -> conf.harvest.endpoint
).collect{ case (key, Some(value)) => key -> value } // remove None values
// Run harvest.
val harvestedData: DataFrame = spark.read
.format("dpla.ingestion3.harvesters.resourceSync")
.options(readerOptions)
.load()
// Log errors.
harvestedData.select("error.message", "error.errorSource.url")
.where("error is not null")
.collect
.foreach(row => harvestLogger.warn("ResourceSync harvest error: " + row))
val startTime = System.currentTimeMillis()
val unixEpoch = startTime / 1000L
// Return DataFrame
harvestedData
.select("record.id", "record.document")
.where("document is not null")
.withColumn("ingestDate", lit(unixEpoch))
.withColumn("provider", lit(shortName))
.withColumn("mimetype", lit(mimeType))
}
}
| dpla/ingestion3 | src/main/scala/dpla/ingestion3/harvesters/resourceSync/RsHarvester.scala | Scala | mit | 1,542 |
package uk.ac.ncl.openlab.intake24.services.systemdb.shortUrls
import uk.ac.ncl.openlab.intake24.errors.{CreateError, LookupError, UnexpectedDatabaseError}
/**
* Created by Tim Osadchiy on 22/02/2018.
*/
trait ShortUrlDataService {
def saveShortUrls(urls: Seq[(String, String)]): Either[UnexpectedDatabaseError, Seq[String]]
def getShortUrls(longUrls: Seq[String]): Either[UnexpectedDatabaseError, Map[String, String]]
def getLongUrl(shortUrl: String): Either[UnexpectedDatabaseError, Option[String]]
}
| digitalinteraction/intake24 | SystemDataServices/src/main/scala/uk/ac/ncl/openlab/intake24/services/systemdb/shortUrls/ShortUrlDataService.scala | Scala | apache-2.0 | 519 |
val x = 1
+ 2
+3 // error: Expected a toplevel definition
val b1 = {
22
* 22 // ok
*/*one more*/22 // error: end of statement expected
} // error: ';' expected, but '}' found
val b2: Boolean = {
println(x)
! "hello".isEmpty // error: value ! is not a member of Unit
}
| som-snytt/dotty | tests/neg/multiLineOps.scala | Scala | apache-2.0 | 303 |
package com.datascience.education.tutorial.lecture1
// Task (5a): decide where the implicit conversion should go; implement it
object ImplicitConversions {
type ComplexNumber = (Double, Double)
trait ComplexVector {
def complexVector: List[ComplexNumber]
override def toString = s"Vector contains $complexVector"
}
object ComplexVectors {
def dense(firstValue: ComplexNumber, otherValues: ComplexNumber*): ComplexVector =
new ComplexVector {
val complexVector = firstValue :: otherValues.toList
}
}
// val denseInts = ComplexVectors.dense(4, 2, 6, 9)
}
object ImplicitConversionsExample extends App {
import ImplicitConversions._
// println("Dense Ints")
// println(denseInts)
}
| DS12/scala-class | tutorial/src/main/scala/com/datascience/education/tutorial/lecture1/ImplicitConversions.scala | Scala | cc0-1.0 | 746 |
package com.scalableminds.webknossos.datastore.controllers
import com.scalableminds.util.tools.Fox
import com.scalableminds.webknossos.datastore.storage.DataStoreRedisStore
import javax.inject.Inject
import play.api.mvc.{Action, AnyContent}
import scala.concurrent.ExecutionContext
class Application @Inject()(redisClient: DataStoreRedisStore)(implicit ec: ExecutionContext) extends Controller {
override def allowRemoteOrigin: Boolean = true
def health: Action[AnyContent] = Action.async { implicit request =>
log() {
for {
before <- Fox.successful(System.currentTimeMillis())
_ <- redisClient.checkHealth
afterRedis = System.currentTimeMillis()
_ = logger.info(s"Answering ok for Datastore health check, took ${afterRedis - before} ms")
} yield Ok("Ok")
}
}
}
| scalableminds/webknossos | webknossos-datastore/app/com/scalableminds/webknossos/datastore/controllers/Application.scala | Scala | agpl-3.0 | 826 |
package hayago
package object game {
val firstTurnColour = Colour.Black // Black is always first
} | sungiant/hayago | src/main/scala/game/package.scala | Scala | mit | 101 |
package newts.internal
import cats.Order
import cats.kernel.instances.all._
trait MaxBounded[A] extends Order[A]{
def maxValue: A
}
object MaxBounded{
def apply[A](implicit ev: MaxBounded[A]): MaxBounded[A] = ev
implicit val shortMaxBounded: MaxBounded[Short] = fromOrder(Short.MaxValue)(catsKernelStdOrderForShort)
implicit val intMaxBounded : MaxBounded[Int] = fromOrder( Int.MaxValue)(catsKernelStdOrderForInt)
implicit val longMaxBounded : MaxBounded[Long] = fromOrder( Long.MaxValue)(catsKernelStdOrderForLong)
def fromOrder[A](value: A)(A: Order[A]): MaxBounded[A] = new MaxBounded[A] {
def maxValue: A = value
def compare(x: A, y: A): Int = A.compare(x, y)
}
} | julien-truffaut/newts | core/shared/src/main/scala/newts/internal/MaxBounded.scala | Scala | apache-2.0 | 699 |
package BIDMat
import edu.berkeley.bid.CBLAS._
import edu.berkeley.bid.LAPACK._
import edu.berkeley.bid.SPBLAS._
import scala.util.hashing.MurmurHash3
import java.util.Arrays
case class DMat(nr:Int, nc:Int, data0:Array[Double]) extends DenseMat[Double](nr, nc, data0) {
def size() = length;
def getdata() = data
override def set(v:Float):DMat = {
Arrays.fill(data,0,length,v)
this
}
override def t:DMat = tt(null)
def t(omat:Mat):DMat = tt(omat)
def tt(omat:Mat):DMat = {
val out = DMat.newOrCheckDMat(ncols, nrows, omat, GUID, "t".##)
if (!Mat.useMKL) {
gt(out)
} else {
domatcopy("C", "T", nrows, ncols, 1.0, data, nrows, out.data, ncols)
}
out
}
override def dv:Double =
if (nrows > 1 || ncols > 1) {
throw new RuntimeException("Matrix should be 1x1 to extract value")
} else {
data(0)
}
override def mytype = "DMat";
override def view(nr:Int, nc:Int):DMat = {
if (1L * nr * nc > data.length) {
throw new RuntimeException("view dimensions too large")
}
if (nr == nrows && nc == ncols) {
this
} else {
val out = new DMat(nr, nc, data);
out.setGUID(MurmurHash3.mix(MurmurHash3.mix(nr, nc), (GUID*3145341).toInt));
out
}
}
def horzcat(b: DMat) = DMat(ghorzcat(b))
def vertcat(b: DMat) = DMat(gvertcat(b))
override def nnz:Int = {
var count:Int = 0
var i = 0
while (i < length) {
if (data(i) != 0) {
count += 1
}
i += 1
}
count
}
override def findInds(out:IMat, off:Int):IMat = {
var count = 0
var i = off
while (i < length+off) {
if (data(i) != 0) {
out.data(count) = i
count += 1
}
i += 1
}
out
}
def find3:(IMat, IMat, DMat) = { val (ii, jj, vv) = gfind3 ; (ii, jj, DMat(vv)) }
override def apply(a:IMat):DMat = DMat(gapply(a))
override def apply(a:IMat, b:IMat):DMat = DMat(gapply(a, b))
override def apply(a:IMat, b:Int):DMat = DMat(gapply(a, b))
override def apply(a:Int, b:IMat):DMat = DMat(gapply(a, b))
override def apply(a:Mat):DMat = DMat(gapply(a.asInstanceOf[IMat]))
override def apply(a:Mat, b:Mat):DMat = DMat(gapply(a.asInstanceOf[IMat], b.asInstanceOf[IMat]))
override def apply(a:Mat, b:Int):DMat = DMat(gapply(a.asInstanceOf[IMat], b))
override def apply(a:Int, b:Mat):DMat = DMat(gapply(a, b.asInstanceOf[IMat]))
override def colslice(a:Int, b:Int, out:Mat) = DMat(gcolslice(a, b, out, Mat.oneBased))
override def colslice(a:Int, b:Int, out:Mat, c:Int) = DMat(gcolslice(a, b, out, c))
override def rowslice(a:Int, b:Int, out:Mat) = DMat(growslice(a, b, out, Mat.oneBased))
override def rowslice(a:Int, b:Int, out:Mat, c:Int) = DMat(growslice(a, b, out, c))
override def update(i:Int, b:Double):DMat = {_update(i, b); this}
override def update(i:Int, j:Int, b:Double):DMat = {_update(i, j, b); this}
override def update(i:Int, b:Float):DMat = {_update(i, b.toDouble); this}
override def update(i:Int, j:Int, b:Float):DMat = {_update(i, j, b.toDouble); this}
override def update(i:Int, b:Int):DMat = {_update(i, b.toDouble); this}
override def update(i:Int, j:Int, b:Int):DMat = {_update(i, j, b.toDouble); this}
override def update(iv:IMat, b:Double):DMat = DMat(_update(iv, b))
override def update(iv:IMat, jv:IMat, b:Double):DMat = DMat(_update(iv, jv, b))
override def update(i:Int, jv:IMat, b:Double):DMat = DMat(_update(IMat.ielem(i), jv, b))
override def update(iv:IMat, j:Int, b:Double):DMat = DMat(_update(iv, IMat.ielem(j), b))
override def update(iv:Mat, b:Double):DMat = DMat(_update(iv.asInstanceOf[IMat], b))
override def update(iv:Mat, jv:Mat, b:Double):DMat = DMat(_update(iv.asInstanceOf[IMat], jv.asInstanceOf[IMat], b))
override def update(i:Int, jv:Mat, b:Double):DMat = DMat(_update(IMat.ielem(i), jv.asInstanceOf[IMat], b))
override def update(iv:Mat, j:Int, b:Double):DMat = DMat(_update(iv.asInstanceOf[IMat], IMat.ielem(j), b))
override def update(iv:IMat, b:Float):DMat = DMat(_update(iv, b.toDouble))
override def update(iv:IMat, jv:IMat, b:Float):DMat = DMat(_update(iv, jv, b.toDouble))
override def update(i:Int, jv:IMat, b:Float):DMat = DMat(_update(IMat.ielem(i), jv, b.toDouble))
override def update(iv:IMat, j:Int, b:Float):DMat = DMat(_update(iv, IMat.ielem(j), b.toDouble))
override def update(iv:Mat, b:Float):DMat = DMat(_update(iv.asInstanceOf[IMat], b.toDouble))
override def update(iv:Mat, jv:Mat, b:Float):DMat = DMat(_update(iv.asInstanceOf[IMat], jv.asInstanceOf[IMat], b.toDouble))
override def update(i:Int, jv:Mat, b:Float):DMat = DMat(_update(IMat.ielem(i), jv.asInstanceOf[IMat], b.toDouble))
override def update(iv:Mat, j:Int, b:Float):DMat = DMat(_update(iv.asInstanceOf[IMat], IMat.ielem(j), b.toDouble))
override def update(iv:IMat, b:Int):DMat = DMat(_update(iv, b.toDouble))
override def update(iv:IMat, jv:IMat, b:Int):DMat = DMat(_update(iv, jv, b.toDouble))
override def update(i:Int, jv:IMat, b:Int):DMat = DMat(_update(IMat.ielem(i), jv, b.toDouble))
override def update(iv:IMat, j:Int, b:Int):DMat = DMat(_update(iv, IMat.ielem(j), b.toDouble))
override def update(iv:Mat, b:Int):DMat = DMat(_update(iv.asInstanceOf[IMat], b.toDouble))
override def update(iv:Mat, jv:Mat, b:Int):DMat = DMat(_update(iv.asInstanceOf[IMat], jv.asInstanceOf[IMat], b.toDouble))
override def update(i:Int, jv:Mat, b:Int):DMat = DMat(_update(IMat.ielem(i), jv.asInstanceOf[IMat], b.toDouble))
override def update(iv:Mat, j:Int, b:Int):DMat = DMat(_update(iv.asInstanceOf[IMat], IMat.ielem(j), b.toDouble))
def update(iv:IMat, b:DMat):DMat = DMat(_update(iv, b))
def update(iv:IMat, jv:IMat, b:DMat):DMat = DMat(_update(iv, jv, b))
def update(iv:IMat, j:Int, b:DMat):DMat = DMat(_update(iv, IMat.ielem(j), b))
def update(i:Int, jv:IMat, b:DMat):DMat = DMat(_update(IMat.ielem(i), jv, b))
override def update(iv:IMat, b:Mat):DMat = DMat(_update(iv, b.asInstanceOf[DMat]))
override def update(iv:IMat, jv:IMat, b:Mat):DMat = DMat(_update(iv, jv, b.asInstanceOf[DMat]))
override def update(iv:IMat, j:Int, b:Mat):DMat = DMat(_update(iv, IMat.ielem(j), b.asInstanceOf[DMat]))
override def update(i:Int, jv:IMat, b:Mat):DMat = DMat(_update(IMat.ielem(i), jv, b.asInstanceOf[DMat]))
override def update(iv:Mat, b:Mat):DMat = DMat(_update(iv.asInstanceOf[IMat], b.asInstanceOf[DMat]))
override def update(iv:Mat, jv:Mat, b:Mat):DMat = DMat(_update(iv.asInstanceOf[IMat], jv.asInstanceOf[IMat], b.asInstanceOf[DMat]))
override def update(iv:Mat, j:Int, b:Mat):DMat = DMat(_update(iv.asInstanceOf[IMat], IMat.ielem(j), b.asInstanceOf[DMat]))
override def update(i:Int, jv:Mat, b:Mat):DMat = DMat(_update(IMat.ielem(i), jv.asInstanceOf[IMat], b.asInstanceOf[DMat]))
def quickdists(b:DMat) = {
val out = DMat(ncols, b.ncols)
val bd = b.data
var i = 0
while (i < ncols) {
var j = 0
while (j < b.ncols) {
var k = 0
var sum = 0.0
while (k < nrows) {
val indx1 = k + i*nrows
val indx2 = k + j*nrows
sum += (data(indx1) - bd(indx2))*(data(indx1) - bd(indx2))
k += 1
}
out.data(i+j*ncols) = sum
j += 1
}
i += 1
}
Mat.nflops += 3L * nrows * ncols * b.ncols
out
}
def ddMatOp(b: Mat, f:(Double, Double) => Double, out:Mat) =
b match {
case bb:DMat => DMat(ggMatOp(bb, f, out))
case _ => throw new RuntimeException("unsupported operation "+f+" on "+this+" and "+b)
}
def ddMatOpv(b: Mat, f:(Array[Double],Int,Int,Array[Double],Int,Int,Array[Double],Int,Int,Int) => Double, out:Mat) =
b match {
case bb:DMat => DMat(ggMatOpv(bb, f, out))
case _ => throw new RuntimeException("unsupported operation "+f+" on "+this+" and "+b)
}
def ddMatOpScalar(b: Double, f:(Double, Double) => Double, out:Mat) = DMat(ggMatOpScalar(b, f, out))
def ddMatOpScalarv(b: Double, f:(Array[Double],Int,Int,Array[Double],Int,Int,Array[Double],Int,Int,Int) => Double, out:Mat) =
DMat(ggMatOpScalarv(b, f, out))
def ddReduceOp(n:Int, f1:(Double) => Double, f2:(Double, Double) => Double, out:Mat) = DMat(ggReduceOp(n, f1, f2, out))
def ddReduceOpv(n:Int, f1:(Double) => Double, f2:(Array[Double],Int,Int,Array[Double],Int,Int,Array[Double],Int,Int,Int) => Double, out:Mat) =
DMat(ggReduceOpv(n, f1, f2, out))
def ddReduceAll(n:Int, f1:(Double) => Double, f2:(Double, Double) => Double, out:Mat) =
DMat(ggReduceAll(n, f1, f2, out))
def ddReduceAllv(n:Int, f:(Array[Double],Int,Int,Array[Double],Int,Int,Array[Double],Int,Int,Int) => Double, out:Mat) =
DMat(ggReduceAllv(n, f, out))
override def printOne(i:Int):String = {
val v = data(i)
if (v % 1 == 0 && math.abs(v) < 1e12) {
"%d" format v.longValue
} else {
"%.5g" format v
}
}
override def copyTo(a:Mat) = {
if (nrows != a.nrows || ncols != a.ncols) {
throw new RuntimeException("DMat copyTo dimensions mismatch")
}
a match {
case out:DMat => System.arraycopy(data, 0, out.data, 0, length)
case out:FMat => {Mat.copyToFloatArray(data, 0, out.data, 0, length)}
case out:IMat => {Mat.copyToIntArray(data, 0, out.data, 0, length)}
}
a
}
override def copy = {
val out = DMat.newOrCheckDMat(nrows, ncols, null, GUID, "copy".##)
System.arraycopy(data, 0, out.data, 0, length)
out
}
override def newcopy = {
val out = DMat(nrows, ncols)
System.arraycopy(data, 0, out.data, 0, length)
out
}
override def zeros(nr:Int, nc:Int) = {
val out = DMat(nr, nc)
out
}
override def ones(nr:Int, nc:Int) = {
val out = DMat(nr, nc)
Arrays.fill(out.data, 1)
out
}
override def izeros(m:Int, n:Int) = {
IMat.izeros(m,n)
}
override def iones(m:Int, n:Int) = {
IMat.iones(m,n)
}
override def clearUpper(off:Int) = setUpper(0, off)
override def clearUpper = setUpper(0, 0)
override def clearLower(off:Int) = setLower(0, off)
override def clearLower = setLower(0, 0)
def fDMult(aa:DMat, outmat:Mat):DMat = {
if (ncols == 1 && nrows == 1) {
val out = DMat.newOrCheckDMat(aa.nrows, aa.ncols, outmat, GUID, aa.GUID, "dMult".##)
Mat.nflops += aa.length
var i = 0
val dvar = data(0)
while (i < aa.length) {
out.data(i) = dvar * aa.data(i)
i += 1
}
out
} else if (aa.ncols == 1 && aa.nrows == 1) {
val out = DMat.newOrCheckDMat(nrows, ncols, outmat, GUID, aa.GUID, "dMult".##)
Mat.nflops += length
var i = 0
val dvar = aa.data(0)
while (i < length) {
out.data(i) = dvar * data(i)
i += 1
}
out
} else if (ncols == aa.nrows) {
val out = DMat.newOrCheckDMat(nrows, aa.ncols, outmat, GUID, aa.GUID, "dMult".##)
Mat.nflops += 2 * length.toLong * aa.ncols.toLong
if (!Mat.useMKL) {
out.clear
var i = 0
while (i < aa.ncols) {
var j = 0
while (j < aa.nrows) {
var k = 0
val dval = aa.data(j + i*ncols)
while (k < nrows) {
out.data(k+i*nrows) += data(k+j*nrows)*dval
k += 1
}
j += 1
}
i += 1
}
} else {
if (nrows == 1) {
dgemv(ORDER.ColMajor, TRANSPOSE.Trans, aa.nrows, aa.ncols, 1.0, aa.data, aa.nrows, data, 1, 0, out.data, 1)
} else if (aa.ncols == 1) {
dgemv(ORDER.ColMajor, TRANSPOSE.NoTrans, nrows, ncols, 1.0, data, nrows, aa.data, 1, 0, out.data, 1)
} else {
dgemm(ORDER.ColMajor, TRANSPOSE.NoTrans, TRANSPOSE.NoTrans,
nrows, aa.ncols, ncols, 1.0, data, nrows, aa.data, aa.nrows, 0, out.data, nrows)
}
}
out
} else throw new RuntimeException("dimensions mismatch")
}
def fSMult(ss:SDMat, outmat:Mat):DMat = {
if (ncols != ss.nrows) {
throw new RuntimeException("dimensions mismatch")
} else {
val out = DMat.newOrCheckDMat(nrows, ss.ncols, outmat, GUID, ss.GUID, "fSMult".##)
Mat.nflops += 2 * nrows.toLong * ss.nnz
val ioff = Mat.ioneBased;
val nr = ss.nrows
val nc = ss.ncols
val kk = ncols
var jc0:Array[Int] = null
var ir0:Array[Int] = null
if (ioff == 0) {
jc0 = SparseMat.incInds(ss.jc)
ir0 = SparseMat.incInds(ss.ir)
} else {
jc0 = ss.jc
ir0 = ss.ir
}
if (nrows == 1 && Mat.useMKL) {
dcscmv("T", nr, nc, 1.0, "GLNF", ss.data, ir0, jc0, data, 0.0, out.data)
out
} else {
out.clear
if (nrows < 20 || !Mat.useMKL) {
var i = 0
while (i < ss.ncols) {
var j = ss.jc(i) - ioff
while (j < ss.jc(i+1)-ioff) {
val dval = ss.data(j)
val ival = ss.ir(j) - ioff
var k = 0
while (k < nrows) {
out.data(k+i*nrows) += data(k+ival*nrows)*dval
k += 1
}
j += 1
}
i += 1
}
} else {
dmcscm(nrows, ss.ncols, data, nrows, ss.data, ss.ir, ss.jc, out.data, nrows)
// dcsrmm("N", ss.ncols, nrows, ncols, 1.0, "GLNF", ss.data, ss.ir, ss.jc, data, ncols, 0, out.data, out.ncols)
}
}
out
}
}
def multT(a:SDMat, outmat:Mat):DMat = {
import edu.berkeley.bid.CBLAS._
if (ncols == a.nrows) {
val out = DMat.newOrCheckDMat(nrows, a.ncols, outmat, GUID, a.GUID, "multT".##)
if (outmat.asInstanceOf[AnyRef] != null) out.clear
dmcsrm(nrows, a.ncols, data, nrows, a.data, a.ir, a.jc, out.data, nrows)
Mat.nflops += 2L * a.nnz * nrows
out
} else {
throw new RuntimeException("xT dimensions mismatch")
}
}
def multT(a:DMat, outmat:Mat):DMat = {
if (ncols == a.ncols) {
val out = DMat.newOrCheckDMat(nrows, a.nrows, outmat, GUID, a.GUID, "multT".##)
dgemm(ORDER.ColMajor, TRANSPOSE.NoTrans, TRANSPOSE.Trans,
nrows, a.nrows, ncols, 1.0f, data, nrows, a.data, a.nrows, 0, out.data, out.nrows)
Mat.nflops += 2L * length * a.nrows
out
} else {
throw new RuntimeException("xT dimensions mismatch")
}
}
def Tmult(a:DMat, outmat:Mat):DMat = {
if (nrows == a.nrows) {
val out = DMat.newOrCheckDMat(ncols, a.ncols, outmat, GUID, a.GUID, "Tmult".##)
dgemm(ORDER.ColMajor, TRANSPOSE.Trans, TRANSPOSE.NoTrans,
ncols, a.ncols, nrows, 1.0f, data, nrows, a.data, a.nrows, 0, out.data, out.nrows)
Mat.nflops += 2L * length * a.nrows
out
} else {
throw new RuntimeException("Tx dimensions mismatch")
}
}
/*
* Very slow, row-and-column multiply
*/
def sDMult(a:Mat):DMat =
a match {
case aa:DMat => {
if (ncols == a.nrows) {
val out = DMat.newOrCheckDMat(nrows, a.ncols, null, GUID, a.GUID, "dMult".##)
var i = 0
while (i < a.ncols) {
var j = 0
while (j < nrows) {
var k = 0
var sum = 0.0
while (k < ncols) {
sum += data(j+k*nrows) * aa.data(k+i*a.nrows)
k += 1
}
out.data(j + i*out.nrows) = sum
j += 1
}
i += 1
}
out
} else throw new RuntimeException("dimensions mismatch")
}
case _ => throw new RuntimeException("argument must be dense")
}
/*
* Weka multiply
*/
def wDMult(a:Mat, omat:Mat):DMat =
a match {
case aa:DMat => {
if (ncols == a.nrows) {
val out = DMat.newOrCheckDMat(nrows, a.ncols, null, GUID, a.GUID, "dMult".##)
val tmp = new Array[Double](ncols)
var i = 0
while (i < nrows) {
var j = 0
while (j < ncols) {
tmp(j) = data(i+j*nrows)
j += 1
}
j = 0
while (j < a.ncols) {
var k = 0
var sum = 0.0
while (k < ncols) {
sum += tmp(k) * aa.data(k+i*a.nrows)
k += 1
}
out.data(j + i*out.nrows) = sum
j += 1
}
i += 1
}
out
} else throw new RuntimeException("dimensions mismatch")
}
case _ => throw new RuntimeException("argument must be dense")
}
def ddot(a : DMat):Double =
if (nrows != a.nrows || ncols != a.ncols) {
throw new RuntimeException("ddot dims not compatible")
} else {
Mat.nflops += 2 * length
var v = 0.0
var i = 0
while (i < length){
v += data(i) * a.data(i)
i += 1
}
v
}
override def ddot(a:Mat):Double = ddot(a.asInstanceOf[DMat])
def dot(a:DMat, omat:Mat):DMat = {
if (nrows != a.nrows || ncols != a.ncols) {
throw new RuntimeException("dot dims not compatible")
} else {
val out = DMat.newOrCheckDMat(1, ncols, null, GUID, a.GUID, "dot".##)
if (!Mat.useMKL || length < 512) {
gdot(a, out)
} else {
Mat.nflops += 2L * length
ddotm(nrows, ncols, data, nrows, a.data, nrows, out.data)
}
out
}
}
def dot(a:DMat):DMat = dot(a, null)
def dotr(a:DMat, omat:Mat):DMat = {
if (nrows != a.nrows || ncols != a.ncols) {
throw new RuntimeException("dotr dims not compatible")
} else {
val out = DMat.newOrCheckDMat(nrows, 1, omat, GUID, a.GUID, "dotr".##)
out.clear
if (!Mat.useMKL || length < 512) {
gdotr(a, out)
} else {
Mat.nflops += 2L * length
ddotr(nrows, ncols, data, nrows, a.data, nrows, out.data)
}
out
}
}
def dotr(a:DMat):DMat = dotr(a, null)
def kron(b: DMat, oldmat:Mat):DMat = {
val out = DMat.newOrCheckDMat(nrows*b.nrows, ncols*b.ncols, oldmat, GUID, b.GUID, "kron".##)
var i = 0
while (i < ncols){
var j = 0
while (j < b.ncols) {
var k = 0
while (k < nrows) {
var m = 0
while (m < b.nrows) {
out.data(m + b.nrows*(k + nrows*(j + b.ncols*i))) = data(k + i*nrows) * b.data(m + j*b.nrows)
m += 1
}
k += 1
}
j += 1
}
i += 1
}
Mat.nflops += 1L * nrows * ncols * b.nrows * b.ncols
out
}
def kron(a:DMat):DMat = kron(a, null)
def solvel(a0:Mat):DMat =
a0 match {
case a:DMat => {
Mat.nflops += 2L*a.nrows*a.nrows*a.nrows/3 + 2L*nrows*a.nrows*a.nrows
if (a.nrows != a.ncols || ncols != a.nrows) {
throw new RuntimeException("solvel needs a square matrix")
} else {
val out = DMat.newOrCheckDMat(nrows, ncols, null, GUID, a.GUID, "solvel".##)
val tmp = DMat.newOrCheckDMat(a.nrows, a.ncols, null, GUID, a.GUID, "solvel1".##)
System.arraycopy(a.data, 0, tmp.data, 0, a.length)
System.arraycopy(data, 0, out.data, 0, length)
val ipiv = IMat.newOrCheckIMat(1, ncols, null, GUID, a.GUID, "solvel2".##).data
dgetrf(ORDER.RowMajor, ncols, ncols, tmp.data, ncols, ipiv)
dgetrs(ORDER.RowMajor, "N", ncols, nrows, tmp.data, ncols, ipiv, out.data, nrows)
out
}
}
case _ => throw new RuntimeException("unsupported arg to solvel "+a0)
}
def solver(a0:Mat):DMat =
a0 match {
case a:DMat => {
Mat.nflops += 2L*nrows*nrows*nrows/3 + 2L*nrows*nrows*a.ncols
if (nrows != ncols || ncols != a.nrows) {
throw new RuntimeException("solver needs a square matrix")
} else {
val out = DMat.newOrCheckDMat(a.nrows, a.ncols, null, GUID, a.GUID, "solver".##)
val tmp = DMat.newOrCheckDMat(nrows, ncols, null, GUID, a.GUID, "solver1".##)
System.arraycopy(data, 0, tmp.data, 0, length)
System.arraycopy(a.data, 0, out.data, 0, a.length)
val ipiv = IMat.newOrCheckIMat(1, ncols, null, GUID, a.GUID, "solver2".##).data
dgetrf(ORDER.ColMajor, ncols, ncols, tmp.data, ncols, ipiv)
dgetrs(ORDER.ColMajor, "N", ncols, a.ncols, tmp.data, nrows, ipiv, out.data, nrows)
out
}
}
case _ => throw new RuntimeException("unsupported arg to solver "+a0)
}
def inv:DMat = {
import edu.berkeley.bid.LAPACK._
if (nrows != ncols) {
throw new RuntimeException("inv method needs a square matrix")
} else {
val out = DMat.newOrCheckDMat(nrows, ncols, null, GUID, "inv".##)
System.arraycopy(data, 0, out.data, 0, length)
val ipiv = IMat.newOrCheckIMat(1, ncols, null, GUID, "inv2".##).data
dgetrf(ORDER.ColMajor, nrows, ncols, out.data, nrows, ipiv)
dgetri(ORDER.ColMajor, nrows, out.data, nrows, ipiv)
out
}
}
override def clear = {
Arrays.fill(this.data,0,length,0)
this
}
def cumsumKeyLinear(keys:DMat, out:DMat, istart:Int, iend:Int) = {
var i = istart;
var sum = 0.0;
while (i < iend) {
sum += data(i);
out.data(i) = sum;
if (i + 1 < iend && keys(i) != keys(i+1)) sum = 0;
i += 1;
}
}
def cumsumByKey(keys:DMat, omat:Mat):DMat = {
if (nrows != keys.nrows || ncols != keys.ncols)
throw new RuntimeException("cumsumKey dimensions mismatch");
val out = DMat.newOrCheckDMat(nrows, ncols, omat, GUID, keys.GUID, "cumsumKey".##);
if (nrows == 1) {
cumsumKeyLinear(keys, out, 0, length);
} else {
var i = 0;
while (i < ncols) {
cumsumKeyLinear(keys, out, i*nrows, (i+1)*nrows);
i += 1;
}
}
out
}
def cumsumByKey(keys:DMat):DMat = cumsumByKey(keys, null);
def cummaxKeyLinear(keys:DMat, out:DMat, istart:Int, iend:Int) = {
var i = istart;
var sum = Double.MinValue;
while (i < iend) {
sum = math.max(sum, data(i));
out.data(i) = sum;
if (i + 1 < iend && keys(i) != keys(i+1)) sum = Double.MinValue;
i += 1;
}
}
def cummaxByKey(keys:DMat, omat:Mat):DMat = {
if (nrows != keys.nrows || ncols != keys.ncols)
throw new RuntimeException("cummaxKey dimensions mismatch");
val out = DMat.newOrCheckDMat(nrows, ncols, omat, GUID, keys.GUID, "cummaxKey".##);
if (nrows == 1) {
cummaxKeyLinear(keys, out, 0, length);
} else {
var i = 0;
while (i < ncols) {
cummaxKeyLinear(keys, out, i*nrows, (i+1)*nrows);
i += 1;
}
}
out
}
def cummaxByKey(keys:DMat):DMat = cummaxByKey(keys, null);
def cumminKeyLinear(keys:DMat, out:DMat, istart:Int, iend:Int) = {
var i = istart;
var sum = Double.MaxValue;
while (i < iend) {
sum = math.min(sum, data(i));
out.data(i) = sum;
if (i + 1 < iend && keys(i) != keys(i+1)) sum = Double.MaxValue;
i += 1;
}
}
def cumminByKey(keys:DMat, omat:Mat):DMat = {
if (nrows != keys.nrows || ncols != keys.ncols)
throw new RuntimeException("cumminKey dimensions mismatch");
val out = DMat.newOrCheckDMat(nrows, ncols, omat, GUID, keys.GUID, "cumminKey".##);
if (nrows == 1) {
cumminKeyLinear(keys, out, 0, length);
} else {
var i = 0;
while (i < ncols) {
cumminKeyLinear(keys, out, i*nrows, (i+1)*nrows);
i += 1;
}
}
out
}
def cumminByKey(keys:DMat):DMat = cumminByKey(keys, null);
def reverseLinear(out:DMat, istart:Int, iend:Int) = {
var i = istart;
var sum = 0f;
while (i < iend) {
out.data(istart + iend - i - 1) = data(i)
i += 1;
}
}
def _reverse(omat:Mat):DMat = {
val out = DMat.newOrCheckDMat(nrows, ncols, omat, GUID, "reverse".##);
if (nrows == 1) {
reverseLinear(out, 0, length);
} else {
var i = 0;
while (i < ncols) {
reverseLinear(out, i*nrows, (i+1)*nrows);
i += 1;
}
}
out
}
def reverse:DMat = _reverse(null);
def reverse(omat:Mat):DMat = _reverse(omat);
override def recycle(nr:Int, nc:Int, nnz:Int):DMat = {
if (nrows == nr && nc == ncols) {
this
} else if (data.size >= nr*nc) {
new DMat(nr, nc, data)
} else {
DMat(nr, nc, new Array[Double]((nr*nc*Mat.recycleGrow).toInt))
}
}
/*
* Routines to operate on two DMats. These are the compute routines.
*/
override def unary_- () = ddMatOpScalarv(-1, DMat.vecMulFun, null)
def * (b : DMat) = fDMult(b, null)
def * (b : SDMat) = fSMult(b, null)
def *^ (b : SDMat) = multT(b, null)
def xT (b : SDMat) = multT(b, null)
def *^ (b : DMat) = multT(b, null)
def xT (b : DMat) = multT(b, null)
def Tx (b : DMat) = Tmult(b, null)
def ^* (b : DMat) = Tmult(b, null)
def /< (b : DMat) = solvel(b)
def \\\\ (b : DMat) = solver(b)
def ^ (b : DMat) = ddMatOp(b, DMat.powFun, null)
def + (b : DMat) = ddMatOpv(b, DMat.vecAddFun, null)
def - (b : DMat) = ddMatOpv(b, DMat.vecSubFun, null)
def *@ (b : DMat) = ddMatOpv(b, DMat.vecMulFun, null)
def / (b : DMat) = ddMatOpv(b, DMat.vecDivFun, null)
def ∘ (b : DMat) = ddMatOpv(b, DMat.vecMulFun, null)
def ∙ (b : DMat):DMat = dot(b)
def ∙→ (b : DMat):DMat = dotr(b)
def ∙∙ (b : DMat):Double = ddot(b)
def ** (b : DMat) = kron(b, null)
def ⊗ (b : DMat) = kron(b, null)
def > (b : DMat) = ddMatOp(b, DMat.gtFun, null)
def < (b : DMat) = ddMatOp(b, DMat.ltFun, null)
def == (b : DMat) = ddMatOp(b, DMat.eqFun, null)
def === (b : DMat) = ddMatOp(b, DMat.eqFun, null)
def >= (b : DMat) = ddMatOp(b, DMat.geFun, null)
def <= (b : DMat) = ddMatOp(b, DMat.leFun, null)
def != (b : DMat) = ddMatOp(b, DMat.neFun, null)
override def * (b : Double) = fDMult(DMat.delem(b), null)
override def + (b : Double) = ddMatOpScalarv(b, DMat.vecAddFun, null)
override def - (b : Double) = ddMatOpScalarv(b, DMat.vecSubFun, null)
override def *@ (b : Double) = ddMatOpScalarv(b, DMat.vecMulFun, null)
override def ∘ (b : Double) = ddMatOpScalarv(b, DMat.vecMulFun, null)
override def / (b : Double) = ddMatOpScalarv(b, DMat.vecDivFun, null)
override def ^ (b : Double) = ddMatOpScalar(b, DMat.powFun, null)
override def > (b : Double) = ddMatOpScalar(b, DMat.gtFun, null)
override def < (b : Double) = ddMatOpScalar(b, DMat.ltFun, null)
override def == (b : Double) = ddMatOpScalar(b, DMat.eqFun, null)
override def >= (b : Double) = ddMatOpScalar(b, DMat.geFun, null)
override def <= (b : Double) = ddMatOpScalar(b, DMat.leFun, null)
override def != (b : Double) = ddMatOpScalar(b, DMat.neFun, null)
override def * (b : Float) = fDMult(DMat.delem(b), null)
override def + (b : Float) = ddMatOpScalarv(b, DMat.vecAddFun, null)
override def - (b : Float) = ddMatOpScalarv(b, DMat.vecSubFun, null)
override def *@ (b : Float) = ddMatOpScalarv(b, DMat.vecMulFun, null)
override def ∘ (b : Float) = ddMatOpScalarv(b, DMat.vecMulFun, null)
override def / (b : Float) = ddMatOpScalarv(b, DMat.vecDivFun, null)
override def ^ (b : Float) = ddMatOpScalar(b, DMat.powFun, null)
override def > (b : Float) = ddMatOpScalar(b, DMat.gtFun, null)
override def < (b : Float) = ddMatOpScalar(b, DMat.ltFun, null)
override def == (b : Float) = ddMatOpScalar(b, DMat.eqFun, null)
override def >= (b : Float) = ddMatOpScalar(b, DMat.geFun, null)
override def <= (b : Float) = ddMatOpScalar(b, DMat.leFun, null)
override def != (b : Float) = ddMatOpScalar(b, DMat.neFun, null)
def \\ (b: DMat) = DMat(ghorzcat(b))
def \\ (b:Double) = DMat(ghorzcat(DMat.delem(b)))
def on (b: DMat) = DMat(gvertcat(b))
def on (b: Double) = vertcat(DMat.delem(b))
def ~ (b : DMat):DPair = new DPair(this, b)
def ~ (b : SDMat):SDPair = new SDPair(this, b)
override def ~ (b: Mat):Pair = b match {
case db:DMat => new DPair(this, db)
case sb:SDMat => new SDPair(this, sb)
case _ => throw new RuntimeException("wrong types for operator ~ ")
}
/*
* Specialize to IMats to help the type system.
*/
def * (b : IMat) = Mop_Times.op(this, b, null)
def *^ (b : IMat) = Mop_TimesT.op(this, b, null)
def xT (b : IMat) = Mop_TimesT.op(this, b, null)
def Tx (b : IMat) = Mop_TTimes.op(this, b, null)
def ^* (b : IMat) = Mop_TTimes.op(this, b, null)
def + (b : IMat) = Mop_Plus.op(this, b, null)
def - (b : IMat) = Mop_Minus.op(this, b, null)
def *@ (b : IMat) = Mop_ETimes.op(this, b, null)
def ∘ (b : IMat) = Mop_ETimes.op(this, b, null)
def /< (b : IMat) = Mop_Div.op(this, b, null)
def \\\\ (b : IMat) = Mop_RSolve.op(this, b, null)
def ◁ (b : IMat) = Mop_Div.op(this, b, null)
def ▷ (b : IMat) = Mop_RSolve.op(this, b, null)
def / (b : IMat) = Mop_EDiv.op(this, b, null)
def ^ (b : IMat) = Mop_Pow.op(this, b, null)
def ∙ (b : IMat) = Mop_Dot.op(this, b, null)
def ∙→ (b : IMat) = Mop_Dotr.op(this, b, null)
def dot (b : IMat) = Mop_Dot.op(this, b, null)
def dotr(b : IMat) = Mop_Dotr.op(this, b, null)
def ** (b : IMat) = Mop_Kron.op(this, b, null)
def ⊗ (b : IMat) = Mop_Kron.op(this, b, null)
def \\ (b : IMat) = Mop_HCat.op(this, b, null)
def on (b : IMat) = Mop_VCat.op(this, b, null)
def > (b : IMat) = Mop_GT.op(this, b, null)
def < (b : IMat) = Mop_LT.op(this, b, null)
def == (b : IMat) = Mop_EQ.op(this, b, null)
def === (b : IMat) = Mop_EQ.op(this, b, null)
def >= (b : IMat) = Mop_GE.op(this, b, null)
def <= (b : IMat) = Mop_LE.op(this, b, null)
def != (b : IMat) = Mop_NE.op(this, b, null)
/*
* Specialize to FMats to help the type system.
*/
def * (b : FMat) = Mop_Times.op(this, b, null)
def *^ (b : FMat) = Mop_TimesT.op(this, b, null)
def xT (b : FMat) = Mop_TimesT.op(this, b, null)
def Tx (b : FMat) = Mop_TTimes.op(this, b, null)
def ^* (b : FMat) = Mop_TTimes.op(this, b, null)
def + (b : FMat) = Mop_Plus.op(this, b, null)
def - (b : FMat) = Mop_Minus.op(this, b, null)
def *@ (b : FMat) = Mop_ETimes.op(this, b, null)
def ∘ (b : FMat) = Mop_ETimes.op(this, b, null)
def /< (b : FMat) = Mop_Div.op(this, b, null)
def \\\\ (b : FMat) = Mop_RSolve.op(this, b, null)
def ◁ (b : FMat) = Mop_Div.op(this, b, null)
def ▷ (b : FMat) = Mop_RSolve.op(this, b, null)
def / (b : FMat) = Mop_EDiv.op(this, b, null)
def ^ (b : FMat) = Mop_Pow.op(this, b, null)
def ∙ (b : FMat) = Mop_Dot.op(this, b, null)
def ∙→ (b : FMat) = Mop_Dotr.op(this, b, null)
def dot (b : FMat) = Mop_Dot.op(this, b, null)
def dotr(b : FMat) = Mop_Dotr.op(this, b, null)
def ** (b : FMat) = Mop_Kron.op(this, b, null)
def ⊗ (b : FMat) = Mop_Kron.op(this, b, null)
def \\ (b : FMat) = Mop_HCat.op(this, b, null)
def on (b : FMat) = Mop_VCat.op(this, b, null)
def > (b : FMat) = Mop_GT.op(this, b, null)
def < (b : FMat) = Mop_LT.op(this, b, null)
def == (b : FMat) = Mop_EQ.op(this, b, null)
def === (b : FMat) = Mop_EQ.op(this, b, null)
def >= (b : FMat) = Mop_GE.op(this, b, null)
def <= (b : FMat) = Mop_LE.op(this, b, null)
def != (b : FMat) = Mop_NE.op(this, b, null)
/*
* Specialize to CMats to help the type system.
*/
def * (b : CMat) = Mop_Times.op(this, b, null)
def *^ (b : CMat) = Mop_TimesT.op(this, b, null)
def xT (b : CMat) = Mop_TimesT.op(this, b, null)
def Tx (b : CMat) = Mop_TTimes.op(this, b, null)
def ^* (b : CMat) = Mop_TTimes.op(this, b, null)
def + (b : CMat) = Mop_Plus.op(this, b, null)
def - (b : CMat) = Mop_Minus.op(this, b, null)
def *@ (b : CMat) = Mop_ETimes.op(this, b, null)
def ∘ (b : CMat) = Mop_ETimes.op(this, b, null)
def /< (b : CMat) = Mop_Div.op(this, b, null)
def \\\\ (b : CMat) = Mop_RSolve.op(this, b, null)
def ◁ (b : CMat) = Mop_Div.op(this, b, null)
def ▷ (b : CMat) = Mop_RSolve.op(this, b, null)
def / (b : CMat) = Mop_EDiv.op(this, b, null)
def ^ (b : CMat) = Mop_Pow.op(this, b, null)
def ∙ (b : CMat) = Mop_Dot.op(this, b, null)
def ∙→ (b : CMat) = Mop_Dotr.op(this, b, null)
def dot (b : CMat) = Mop_Dot.op(this, b, null)
def dotr(b : CMat) = Mop_Dotr.op(this, b, null)
def ** (b : CMat) = Mop_Kron.op(this, b, null)
def ⊗ (b : CMat) = Mop_Kron.op(this, b, null)
def \\ (b : CMat) = Mop_HCat.op(this, b, null)
def on (b : CMat) = Mop_VCat.op(this, b, null)
def > (b : CMat) = Mop_GT.op(this, b, null)
def < (b : CMat) = Mop_LT.op(this, b, null)
def == (b : CMat) = Mop_EQ.op(this, b, null)
def === (b : CMat) = Mop_EQ.op(this, b, null)
def >= (b : CMat) = Mop_GE.op(this, b, null)
def <= (b : CMat) = Mop_LE.op(this, b, null)
def != (b : CMat) = Mop_NE.op(this, b, null)
/*
* Specialize to GMats to help the type system.
*/
def * (b : GMat) = Mop_Times.op(this, b, null)
def *^ (b : GMat) = Mop_TimesT.op(this, b, null)
def xT (b : GMat) = Mop_TimesT.op(this, b, null)
def Tx (b : GMat) = Mop_TTimes.op(this, b, null)
def ^* (b : GMat) = Mop_TTimes.op(this, b, null)
def + (b : GMat) = Mop_Plus.op(this, b, null)
def - (b : GMat) = Mop_Minus.op(this, b, null)
def *@ (b : GMat) = Mop_ETimes.op(this, b, null)
def ∘ (b : GMat) = Mop_ETimes.op(this, b, null)
def / (b : GMat) = Mop_EDiv.op(this, b, null)
def /< (b : GMat) = Mop_Div.op(this, b, null)
def \\\\ (b : GMat) = Mop_RSolve.op(this, b, null)
def ◁ (b : GMat) = Mop_Div.op(this, b, null)
def ▷ (b : GMat) = Mop_RSolve.op(this, b, null)
def ^ (b : GMat) = Mop_Pow.op(this, b, null)
def ∙ (b : GMat) = Mop_Dot.op(this, b, null)
def ∙→ (b : GMat) = Mop_Dotr.op(this, b, null)
def dot (b : GMat) = Mop_Dot.op(this, b, null)
def dotr(b : GMat) = Mop_Dotr.op(this, b, null)
def ** (b : GMat) = Mop_Kron.op(this, b, null)
def ⊗ (b : GMat) = Mop_Kron.op(this, b, null)
def \\ (b : GMat) = Mop_HCat.op(this, b, null)
def on (b : GMat) = Mop_VCat.op(this, b, null)
def > (b : GMat) = Mop_GT.op(this, b, null)
def < (b : GMat) = Mop_LT.op(this, b, null)
def == (b : GMat) = Mop_EQ.op(this, b, null)
def === (b : GMat) = Mop_EQ.op(this, b, null)
def >= (b : GMat) = Mop_GE.op(this, b, null)
def <= (b : GMat) = Mop_LE.op(this, b, null)
def != (b : GMat) = Mop_NE.op(this, b, null)
/*
* Operators whose second arg is generic.
*/
override def * (b : Mat) = Mop_Times.op(this, b, null)
override def *^ (b : Mat) = Mop_TimesT.op(this, b, null)
override def xT (b : Mat) = Mop_TimesT.op(this, b, null)
override def Tx (b : Mat) = Mop_TTimes.op(this, b, null)
override def ^* (b : Mat) = Mop_TTimes.op(this, b, null)
override def + (b : Mat) = Mop_Plus.op(this, b, null)
override def - (b : Mat) = Mop_Minus.op(this, b, null)
override def *@ (b : Mat) = Mop_ETimes.op(this, b, null)
override def ∘ (b : Mat) = Mop_ETimes.op(this, b, null)
override def / (b : Mat) = Mop_EDiv.op(this, b, null)
override def /< (b : Mat) = Mop_Div.op(this, b, null)
override def \\\\ (b : Mat) = Mop_RSolve.op(this, b, null)
override def ◁ (b : Mat) = Mop_Div.op(this, b, null)
override def ▷ (b : Mat) = Mop_RSolve.op(this, b, null)
override def ^ (b : Mat) = Mop_Pow.op(this, b, null)
override def ∙ (b : Mat) = Mop_Dot.op(this, b, null)
override def ∙→ (b : Mat) = Mop_Dotr.op(this, b, null)
override def dot (b : Mat) = Mop_Dot.op(this, b, null)
override def dotr (b : Mat) = Mop_Dotr.op(this, b, null)
override def ** (b : Mat) = Mop_Kron.op(this, b, null)
override def ⊗ (b : Mat) = Mop_Kron.op(this, b, null)
override def \\ (b : Mat) = Mop_HCat.op(this, b, null)
override def on (b : Mat) = Mop_VCat.op(this, b, null)
override def > (b : Mat) = Mop_GT.op(this, b, null)
override def < (b : Mat) = Mop_LT.op(this, b, null)
override def >= (b : Mat) = Mop_GE.op(this, b, null)
override def <= (b : Mat) = Mop_LE.op(this, b, null)
override def == (b : Mat) = Mop_EQ.op(this, b, null)
override def === (b : Mat) = Mop_EQ.op(this, b, null)
override def != (b : Mat) = Mop_NE.op(this, b, null)
}
class DPair (val omat:Mat, val mat:DMat) extends Pair{
override def t:DMat = mat.tt(omat)
/*
* Compute routines
*/
def * (b : DMat) = mat.fDMult(b, omat)
def * (b : SDMat) = mat.fSMult(b, omat)
def *^ (b : SDMat) = mat.multT(b, omat)
def xT (b : SDMat) = mat.multT(b, omat)
def *^ (b : DMat) = mat.multT(b, omat)
def xT (b : DMat) = mat.multT(b, omat)
def ^* (b : DMat) = mat.Tmult(b, omat)
def Tx (b : DMat) = mat.Tmult(b, omat)
def + (b : DMat) = mat.ddMatOpv(b, DMat.vecAddFun, omat)
def - (b : DMat) = mat.ddMatOpv(b, DMat.vecSubFun, omat)
def *@ (b : DMat) = mat.ddMatOpv(b, DMat.vecMulFun, omat)
def ∘ (b : DMat) = mat.ddMatOpv(b, DMat.vecMulFun, omat)
def / (b : DMat) = mat.ddMatOpv(b, DMat.vecDivFun, omat)
def ^ (b : DMat) = mat.ddMatOp(b, DMat.powFun, null)
def dot (b :DMat) = mat.dot(b, omat)
def dotr (b :DMat) = mat.dotr(b, omat)
def ∙ (b :DMat) = mat.dot(b, omat)
def ∙→ (b :DMat) = mat.dotr(b, omat)
def ** (b : DMat) = mat.kron(b, omat)
def ⊗ (b : DMat) = mat.kron(b, omat)
def > (b : DMat) = mat.ddMatOp(b, DMat.gtFun, omat)
def < (b : DMat) = mat.ddMatOp(b, DMat.ltFun, omat)
def == (b : DMat) = mat.ddMatOp(b, DMat.eqFun, omat)
def === (b : DMat) = mat.ddMatOp(b, DMat.eqFun, omat)
def >= (b : DMat) = mat.ddMatOp(b, DMat.geFun, omat)
def <= (b : DMat) = mat.ddMatOp(b, DMat.leFun, omat)
def != (b : DMat) = mat.ddMatOp(b, DMat.neFun, omat)
override def * (b : Float) = mat.fDMult(DMat.delem(b), omat)
override def + (b : Float) = mat.ddMatOpScalarv(b, DMat.vecAddFun, omat)
override def - (b : Float) = mat.ddMatOpScalarv(b, DMat.vecSubFun, omat)
override def *@ (b : Float) = mat.ddMatOpScalarv(b, DMat.vecMulFun, omat)
override def ∘ (b : Float) = mat.ddMatOpScalarv(b, DMat.vecMulFun, omat)
override def / (b : Float) = mat.ddMatOpScalarv(b, DMat.vecDivFun, omat)
override def ^ (b : Float) = mat.ddMatOpScalar(b, DMat.powFun, omat)
override def > (b : Float) = mat.ddMatOpScalar(b, DMat.gtFun, omat)
override def < (b : Float) = mat.ddMatOpScalar(b, DMat.ltFun, omat)
override def == (b : Float) = mat.ddMatOpScalar(b, DMat.eqFun, omat)
override def === (b : Float) = mat.ddMatOpScalar(b, DMat.eqFun, omat)
override def >= (b : Float) = mat.ddMatOpScalar(b, DMat.geFun, omat)
override def <= (b : Float) = mat.ddMatOpScalar(b, DMat.leFun, omat)
override def != (b : Float) = mat.ddMatOpScalar(b, DMat.neFun, omat)
override def * (b : Double) = mat.fDMult(DMat.delem(b), omat)
override def + (b : Double) = mat.ddMatOpScalarv(b, DMat.vecAddFun, omat)
override def - (b : Double) = mat.ddMatOpScalarv(b, DMat.vecSubFun, omat)
override def *@ (b : Double) = mat.ddMatOpScalarv(b, DMat.vecMulFun, omat)
override def ∘ (b : Double) = mat.ddMatOpScalarv(b, DMat.vecMulFun, omat)
override def / (b : Double) = mat.ddMatOpScalarv(b, DMat.vecDivFun, omat)
override def ^ (b : Double) = mat.ddMatOpScalar(b, DMat.powFun, omat)
override def > (b : Double) = mat.ddMatOpScalar(b, DMat.gtFun, omat)
override def < (b : Double) = mat.ddMatOpScalar(b, DMat.ltFun, omat)
override def == (b : Double) = mat.ddMatOpScalar(b, DMat.eqFun, omat)
override def === (b : Double) = mat.ddMatOpScalar(b, DMat.eqFun, omat)
override def >= (b : Double) = mat.ddMatOpScalar(b, DMat.geFun, omat)
override def <= (b : Double) = mat.ddMatOpScalar(b, DMat.leFun, omat)
override def != (b : Double) = mat.ddMatOpScalar(b, DMat.neFun, omat)
override def * (b : Int) = mat.fDMult(DMat.delem(b), omat)
override def + (b : Int) = mat.ddMatOpScalarv(b, DMat.vecAddFun, omat)
override def - (b : Int) = mat.ddMatOpScalarv(b, DMat.vecSubFun, omat)
override def *@ (b : Int) = mat.ddMatOpScalarv(b, DMat.vecMulFun, omat)
override def ∘ (b : Int) = mat.ddMatOpScalarv(b, DMat.vecMulFun, omat)
override def / (b : Int) = mat.ddMatOpScalarv(b, DMat.vecDivFun, omat)
override def ^ (b : Int) = mat.ddMatOpScalar(b, DMat.powFun, omat)
override def > (b : Int) = mat.ddMatOpScalar(b, DMat.gtFun, omat)
override def < (b : Int) = mat.ddMatOpScalar(b, DMat.ltFun, omat)
override def == (b : Int) = mat.ddMatOpScalar(b, DMat.eqFun, omat)
override def === (b : Int) = mat.ddMatOpScalar(b, DMat.eqFun, omat)
override def >= (b : Int) = mat.ddMatOpScalar(b, DMat.geFun, omat)
override def <= (b : Int) = mat.ddMatOpScalar(b, DMat.leFun, omat)
override def != (b : Int) = mat.ddMatOpScalar(b, DMat.neFun, omat)
/*
* Specialize to IMat
*/
def * (b : IMat) = Mop_Times.op(mat, b, omat)
def *^ (b : IMat) = Mop_TimesT.op(mat, b, omat)
def xT (b : IMat) = Mop_TimesT.op(mat, b, omat)
def Tx (b : IMat) = Mop_TTimes.op(mat, b, omat)
def ^* (b : IMat) = Mop_TTimes.op(mat, b, omat)
def + (b : IMat) = Mop_Plus.op(mat, b, omat)
def - (b : IMat) = Mop_Minus.op(mat, b, omat)
def *@ (b : IMat) = Mop_ETimes.op(mat, b, omat)
def ∘ (b : IMat) = Mop_ETimes.op(mat, b, omat)
def / (b : IMat) = Mop_EDiv.op(mat, b, omat)
def ^ (b : IMat) = Mop_Pow.op(mat, b, omat)
def ∙ (b : IMat) = Mop_Dot.op(mat, b, omat)
def ∙→ (b : IMat) = Mop_Dotr.op(mat, b, omat)
def dot (b : IMat) = Mop_Dot.op(mat, b, omat)
def dotr(b : IMat) = Mop_Dotr.op(mat, b, omat)
def ⊗ (b : IMat) = Mop_Kron.op(mat, b, omat)
def ** (b : IMat) = Mop_Kron.op(mat, b, omat)
def \\ (b : IMat) = Mop_HCat.op(mat, b, omat)
def on (b : IMat) = Mop_VCat.op(mat, b, omat)
def > (b : IMat) = Mop_GT.op(mat, b, omat)
def < (b : IMat) = Mop_LT.op(mat, b, omat)
def == (b : IMat) = Mop_EQ.op(mat, b, omat)
def === (b : IMat) = Mop_EQ.op(mat, b, omat)
def >= (b : IMat) = Mop_GE.op(mat, b, omat)
def <= (b : IMat) = Mop_LE.op(mat, b, omat)
def != (b : IMat) = Mop_NE.op(mat, b, omat)
/*
* Specialize to FMat
*/
def * (b : FMat) = Mop_Times.op(mat, b, omat)
def *^ (b : FMat) = Mop_TimesT.op(mat, b, omat)
def xT (b : FMat) = Mop_TimesT.op(mat, b, omat)
def Tx (b : FMat) = Mop_TTimes.op(mat, b, omat)
def ^* (b : FMat) = Mop_TTimes.op(mat, b, omat)
def + (b : FMat) = Mop_Plus.op(mat, b, omat)
def - (b : FMat) = Mop_Minus.op(mat, b, omat)
def *@ (b : FMat) = Mop_ETimes.op(mat, b, omat)
def ∘ (b : FMat) = Mop_ETimes.op(mat, b, omat)
def / (b : FMat) = Mop_EDiv.op(mat, b, omat)
def ^ (b : FMat) = Mop_Pow.op(mat, b, omat)
def ∙ (b : FMat) = Mop_Dot.op(mat, b, omat)
def ∙→ (b : FMat) = Mop_Dotr.op(mat, b, omat)
def dot (b : FMat) = Mop_Dot.op(mat, b, omat)
def dotr(b : FMat) = Mop_Dotr.op(mat, b, omat)
def ⊗ (b : FMat) = Mop_Kron.op(mat, b, omat)
def ** (b : FMat) = Mop_Kron.op(mat, b, omat)
def \\ (b : FMat) = Mop_HCat.op(mat, b, omat)
def on (b : FMat) = Mop_VCat.op(mat, b, omat)
def > (b : FMat) = Mop_GT.op(mat, b, omat)
def < (b : FMat) = Mop_LT.op(mat, b, omat)
def == (b : FMat) = Mop_EQ.op(mat, b, omat)
def === (b : FMat) = Mop_EQ.op(mat, b, omat)
def >= (b : FMat) = Mop_GE.op(mat, b, omat)
def <= (b : FMat) = Mop_LE.op(mat, b, omat)
def != (b : FMat) = Mop_NE.op(mat, b, omat)
/*
* Specialize to GMat
*/
def * (b : GMat) = Mop_Times.op(mat, b, omat)
def *^ (b : GMat) = Mop_TimesT.op(mat, b, omat)
def xT (b : GMat) = Mop_TimesT.op(mat, b, omat)
def Tx (b : GMat) = Mop_TTimes.op(mat, b, omat)
def ^* (b : GMat) = Mop_TTimes.op(mat, b, omat)
def + (b : GMat) = Mop_Plus.op(mat, b, omat)
def - (b : GMat) = Mop_Minus.op(mat, b, omat)
def *@ (b : GMat) = Mop_ETimes.op(mat, b, omat)
def ∘ (b : GMat) = Mop_ETimes.op(mat, b, omat)
def / (b : GMat) = Mop_EDiv.op(mat, b, omat)
def ^ (b : GMat) = Mop_Pow.op(mat, b, omat)
def ∙ (b : GMat) = Mop_Dot.op(mat, b, omat)
def ∙→ (b : GMat) = Mop_Dotr.op(mat, b, omat)
def dot (b : GMat) = Mop_Dot.op(mat, b, omat)
def dotr(b : GMat) = Mop_Dotr.op(mat, b, omat)
def ⊗ (b : GMat) = Mop_Kron.op(mat, b, omat)
def ** (b : GMat) = Mop_Kron.op(mat, b, omat)
def \\ (b : GMat) = Mop_HCat.op(mat, b, omat)
def on (b : GMat) = Mop_VCat.op(mat, b, omat)
def > (b : GMat) = Mop_GT.op(mat, b, omat)
def < (b : GMat) = Mop_LT.op(mat, b, omat)
def == (b : GMat) = Mop_EQ.op(mat, b, omat)
def === (b : GMat) = Mop_EQ.op(mat, b, omat)
def >= (b : GMat) = Mop_GE.op(mat, b, omat)
def <= (b : GMat) = Mop_LE.op(mat, b, omat)
def != (b : GMat) = Mop_NE.op(mat, b, omat)
/*
* Generics
*/
override def * (b : Mat):Mat = Mop_Times.op(mat, b, omat)
override def xT (b : Mat):Mat = Mop_TimesT.op(mat, b, omat)
override def *^ (b : Mat):Mat = Mop_TimesT.op(mat, b, omat)
override def Tx (b : Mat):Mat = Mop_TTimes.op(mat, b, omat)
override def ^* (b : Mat):Mat = Mop_TTimes.op(mat, b, omat)
override def + (b : Mat):Mat = Mop_Plus.op(mat, b, omat)
override def - (b : Mat):Mat = Mop_Minus.op(mat, b, omat)
override def *@ (b : Mat):Mat = Mop_ETimes.op(mat, b, omat)
override def ∘ (b : Mat):Mat = Mop_ETimes.op(mat, b, omat)
override def / (b : Mat):Mat = Mop_EDiv.op(mat, b, omat)
override def ^ (b : Mat):Mat = Mop_Pow.op(mat, b, omat)
override def /< (b : Mat):Mat = Mop_Div.op(mat, b, omat)
override def \\\\ (b : Mat):Mat = Mop_RSolve.op(mat, b, omat)
override def ◁ (b : Mat):Mat = Mop_Div.op(mat, b, omat)
override def ▷ (b : Mat):Mat = Mop_RSolve.op(mat, b, omat)
override def ∙ (b : Mat) = Mop_Dot.op(mat, b, omat)
override def ∙→ (b : Mat) = Mop_Dotr.op(mat, b, omat)
override def dot (b : Mat) = Mop_Dot.op(mat, b, omat)
override def dotr(b : Mat) = Mop_Dotr.op(mat, b, omat)
override def ⊗ (b : Mat) = Mop_Kron.op(mat, b, omat)
override def ** (b : Mat) = Mop_Kron.op(mat, b, omat)
override def \\ (b : Mat):Mat = Mop_HCat.op(mat, b, omat)
override def on (b : Mat):Mat = Mop_VCat.op(mat, b, omat)
override def > (b : Mat):Mat = Mop_GT.op(mat, b, omat)
override def < (b : Mat):Mat = Mop_LT.op(mat, b, omat)
override def >= (b : Mat):Mat = Mop_GE.op(mat, b, omat)
override def <= (b : Mat):Mat = Mop_LE.op(mat, b, omat)
override def == (b : Mat):Mat = Mop_EQ.op(mat, b, omat)
override def === (b : Mat):Mat = Mop_EQ.op(mat, b, omat)
override def != (b : Mat):Mat = Mop_NE.op(mat, b, omat)
}
object DMat {
def apply(nr:Int, nc:Int) = new DMat(nr, nc, new Array[Double](nr*nc))
def apply(a:DenseMat[Double]):DMat = {
val out = new DMat(a.nrows, a.ncols, a.data)
out.setGUID(a.GUID)
out
}
def apply(a:Float) = delem(a)
def apply(a:Int) = delem(a)
def apply(a:Double) = delem(a)
def apply(x:Mat):DMat = {
val out = DMat.newOrCheckDMat(x.nrows, x.ncols, null, x.GUID, "DMat".##)
x match {
case dd:DMat => {System.arraycopy(dd.data, 0, out.data, 0, dd.length)}
case ff:FMat => {Mat.copyToDoubleArray(ff.data, 0, out.data, 0, ff.length)}
case ii:IMat => {Mat.copyToDoubleArray(ii.data, 0, out.data, 0, ii.length)}
case ii:LMat => {Mat.copyToDoubleArray(ii.data, 0, out.data, 0, ii.length)}
case ss:SDMat => ss.full(out)
case gg:GMat => {val ff = gg.toFMat(null); Mat.copyToDoubleArray(ff.data, 0, out.data, 0, ff.length)}
case gg:GDMat => gg.toDMat(out)
case _ => throw new RuntimeException("Unsupported source type")
}
out
}
def vecDiv(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = a(ai) / b(bi); ai += ainc; bi += binc; ci += cinc
}
0
}
def vecAdd(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var i = 0
while (i < n) {
c(ci) = a(ai) + b(bi); ai += ainc; bi += binc; ci += cinc; i += 1
}
0
}
def vecSub(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = a(ai) - b(bi); ai += ainc; bi += binc; ci += cinc
}
0
}
def vecMul(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = a(ai) * b(bi); ai += ainc; bi += binc; ci += cinc
}
0
}
def vecMax(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var i = 0
while (i < n) {
c(ci) = math.max(a(ai), b(bi)); ai += ainc; bi += binc; ci += cinc; i += 1
}
0
}
def vecMin(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var i = 0;
while (i < n) {
c(ci) = math.min(a(ai), b(bi)); ai += ainc; bi += binc; ci += cinc; i += 1
}
0
}
def vecEQ(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = if (a(ai) == b(bi)) 1f else 0f; ai += ainc; bi += binc; ci += cinc
}
0
}
def vecNE(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = if (a(ai) != b(bi)) 1f else 0f; ai += ainc; bi += binc; ci += cinc
}
0
}
def vecGT(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = if (a(ai) > b(bi)) 1f else 0f; ai += ainc; bi += binc; ci += cinc
}
0
}
def vecLT(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = if (a(ai) < b(bi)) 1f else 0f; ai += ainc; bi += binc; ci += cinc
}
0
}
def vecGE(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = if (a(ai) >= b(bi)) 1f else 0f; ai += ainc; bi += binc; ci += cinc
}
0
}
def vecLE(a:Array[Double], a0:Int, ainc:Int, b:Array[Double], b0:Int, binc:Int, c:Array[Double], c0:Int, cinc:Int, n:Int):Double = {
var ai = a0; var bi = b0; var ci = c0; var cend = c0 + n * cinc;
while (ci < cend) {
c(ci) = if (a(ai) <= b(bi)) 1f else 0f; ai += ainc; bi += binc; ci += cinc
}
0
}
def vecSum(a:Array[Double], a0:Int, ainc:Int, c:Array[Double], c0:Int, n:Int):Double = {
var ai = a0; var aend = a0 + n * ainc; var sum = 0.0;
while (ai < aend) {
sum += a(ai); ai += ainc;
}
c(c0) = sum;
0
}
val vecAddFun = (vecAdd _)
val vecSubFun = (vecSub _)
val vecMulFun = (vecMul _)
val vecDivFun = (vecDiv _)
val vecMaxFun = (vecMax _)
val vecMinFun = (vecMin _)
val vecEQFun = (vecEQ _)
val vecNEFun = (vecNE _)
val vecGTFun = (vecGT _)
val vecLTFun = (vecLT _)
val vecGEFun = (vecGE _)
val vecLEFun = (vecLE _)
val vecSumFun = (vecSum _)
def lexcomp(a:DMat, out:IMat):(Int, Int) => Int = {
val aa = a.data
val nr = a.nrows
val ii = out.data
(i:Int, j:Int) => {
if (i == j) {
0
} else {
val ip = ii(i)
val jp = ii(j)
var k = 0
while (k < a.ncols && aa(ip+k*nr) == aa(jp+k*nr)) {
k += 1
}
if (k == a.ncols) {
ip compare jp
} else {
if (aa(ip+k*nr) < aa(jp+k*nr)) {
-1
} else {
1
}
}
}
}
}
def isortlex(a:DMat, asc:Boolean):IMat = {
val out = IMat.newOrCheckIMat(a.nrows, 1, null, a.GUID, "sortlex".hashCode)
val compp = lexcomp(a, out)
DenseMat._isortlex(a, asc, out, compp)
}
val gtFun = (x:Double, y:Double) => if (x > y) 1.0f else 0.0
val geFun = (x:Double, y:Double) => if (x >= y) 1.0f else 0.0
val ltFun = (x:Double, y:Double) => if (x < y) 1.0f else 0.0
val leFun = (x:Double, y:Double) => if (x <= y) 1.0f else 0.0
val eqFun = (x:Double, y:Double) => if (x == y) 1.0f else 0.0
val neFun = (x:Double, y:Double) => if (x != y) 1.0f else 0.0
val powFun = (x:Double, y:Double) => math.pow(x,y)
val maxFun = (x:Double, y:Double) => math.max(x, y)
val minFun = (x:Double, y:Double) => math.min(x, y)
val sumFun = (x:Double, y:Double) => x + y
val idFun = (x:Double) => x
val gtPred = (x:Double, y:Double) => (x > y)
val ltPred = (x:Double, y:Double) => (x < y)
def delem(x:Double) = {
val out = DMat.newOrCheckDMat(1,1,null,x.##,"delem".##)
out.data(0) = x
out
}
def newOrCheckDMat(nr:Int, nc:Int, omat:Mat):DMat = {
if (omat.asInstanceOf[AnyRef] == null || (omat.nrows == 0 && omat.ncols == 0)) {
DMat(nr, nc)
} else {
omat match {
case outmat:DMat =>
if (outmat.nrows != nr || outmat.ncols != nc) {
outmat.recycle(nr, nc, 0)
} else {
outmat
}
case _ => throw new RuntimeException("wrong type for out matrix "+omat)
}
}
}
def newOrCheckDMat(nr:Int, nc:Int, outmat:Mat, matGuid:Long, opHash:Int):DMat = {
if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) {
newOrCheckDMat(nr, nc, outmat)
} else {
val key = (matGuid, opHash)
val res = Mat.cache2(key)
if (res != null) {
newOrCheckDMat(nr, nc, res)
} else {
val omat = newOrCheckDMat(nr, nc, null)
Mat.cache2put(key, omat)
omat
}
}
}
def newOrCheckDMat(nr:Int, nc:Int, outmat:Mat, guid1:Long, guid2:Long, opHash:Int):DMat = {
if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) {
newOrCheckDMat(nr, nc, outmat)
} else {
val key = (guid1, guid2, opHash)
val res = Mat.cache3(key)
if (res != null) {
newOrCheckDMat(nr, nc, res)
} else {
val omat = newOrCheckDMat(nr, nc, null)
Mat.cache3put(key, omat)
omat
}
}
}
def newOrCheckDMat(nr:Int, nc:Int, outmat:Mat, guid1:Long, guid2:Long, guid3:Long, opHash:Int):DMat = {
if (outmat.asInstanceOf[AnyRef] != null || !Mat.useCache) {
newOrCheckDMat(nr, nc, outmat)
} else {
val key = (guid1, guid2, guid3, opHash)
val res = Mat.cache4(key)
if (res != null) {
newOrCheckDMat(nr, nc, res)
} else {
val omat = newOrCheckDMat(nr, nc, null)
Mat.cache4put(key, omat)
omat
}
}
}
}
| codeaudit/BIDMat | src/main/scala/BIDMat/DMat.scala | Scala | bsd-3-clause | 56,567 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util
class EventObject(protected var source: AnyRef) {
def getSource(): AnyRef = source
override def toString(): String =
s"${getClass.getSimpleName}[source=$source]"
}
| scala-js/scala-js | javalib/src/main/scala/java/util/EventObject.scala | Scala | apache-2.0 | 466 |
package auth.controllers
import javax.inject.Inject
import com.mohiva.play.silhouette.api.exceptions.{ConfigurationException, ProviderException}
import com.mohiva.play.silhouette.api.services.AuthInfoService
import com.mohiva.play.silhouette.api.util.Credentials
import com.mohiva.play.silhouette.api.{Environment, LoginEvent, Silhouette}
import com.mohiva.play.silhouette.impl
import com.mohiva.play.silhouette.impl.authenticators.JWTAuthenticator
import com.mohiva.play.silhouette.impl.exceptions.IdentityNotFoundException
import com.mohiva.play.silhouette.impl.providers.CredentialsProvider
import auth.models.User
import auth.models.services.UserService
import play.api.i18n.Messages
import play.api.libs.json._
import play.api.libs.functional.syntax._
import play.api.mvc.Action
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* The credentials auth controller.
*
* @param env The Silhouette environment.
*/
class CredentialsAuthController @Inject() (
implicit val env: Environment[User, JWTAuthenticator],
val userService: UserService,
val authInfoService: AuthInfoService)
extends Silhouette[User, JWTAuthenticator] {
/**
* Converts the JSON into a [[impl.providers.OAuth2Info]] object.
*/
implicit val credentialsReads = (
(__ \\ 'email).read[String] and
(__ \\ 'password).read[String]
)(Credentials.apply _)
/**
* Authenticates a user against the credentials provider.
*
* @return The result to display.
*/
def authenticate = Action.async(parse.json) { implicit request =>
request.body.validate[Credentials].map { credentials =>
(env.providers.get(CredentialsProvider.ID) match {
case Some(p: CredentialsProvider) => p.authenticate(credentials)
case _ => Future.failed(new ConfigurationException(s"Cannot find credentials provider"))
}).flatMap { loginInfo =>
userService.retrieve(loginInfo).flatMap {
case Some(user) => env.authenticatorService.create(user.loginInfo).flatMap { authenticator =>
env.eventBus.publish(LoginEvent(user, request, request2lang))
env.authenticatorService.init(authenticator).map { token =>
Ok(Json.obj("token" -> token))
}
}
case None => Future.failed(new IdentityNotFoundException("Couldn't find user"))
}
}.recover {
case e: ProviderException => Unauthorized(Json.obj("message" -> Messages("invalid.credentials")))
}
}.recoverTotal {
case error =>
Future.successful(Unauthorized(Json.obj("message" -> Messages("invalid.credentials"))))
}
}
}
| readren/coc-war-organizer | app/auth/controllers/CredentialsAuthController.scala | Scala | apache-2.0 | 2,650 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.Equality
import org.scalactic.Explicitly
import org.scalactic.StringNormalizations._
import org.scalactic.Uniformity
import org.scalactic.Prettifier
import collection.GenTraversable
import SharedHelpers._
import org.scalactic.ArrayHelper.deep
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers._
class OnlyContainMatcherDeciderSpec extends AnyFunSpec with Explicitly {
private val prettifier = Prettifier.default
val mapTrimmed: Uniformity[(Int, String)] =
new Uniformity[(Int, String)] {
def normalized(s: (Int, String)): (Int, String) = (s._1, s._2.trim)
def normalizedCanHandle(b: Any) =
b match {
case (_: Int, _: String) => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case (k: Int, v: String) => normalized((k, v))
case _ => b
}
}
// SKIP-SCALATESTJS,NATIVE-START
val javaMapTrimmed: Uniformity[java.util.Map.Entry[Int, String]] =
new Uniformity[java.util.Map.Entry[Int, String]] {
def normalized(s: java.util.Map.Entry[Int, String]): java.util.Map.Entry[Int, String] = Entry(s.getKey, s.getValue.trim)
def normalizedCanHandle(b: Any) =
b match {
case entry: java.util.Map.Entry[_, _] =>
(entry.getKey, entry.getValue) match {
case (_: Int, _: String) => true
case _ => false
}
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case entry: java.util.Map.Entry[_, _] =>
(entry.getKey, entry.getValue) match {
case (k: Int, v: String) => normalized(Entry(k, v))
case _ => b
}
case _ => b
}
}
// SKIP-SCALATESTJS,NATIVE-END
val incremented: Uniformity[Int] =
new Uniformity[Int] {
var count = 0
def normalized(s: Int): Int = {
count += 1
s + count
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[Int]
def normalizedOrSame(b: Any) =
b match {
case i: Int => normalized(i)
case _ => b
}
}
val mapIncremented: Uniformity[(Int, String)] =
new Uniformity[(Int, String)] {
var count = 0
def normalized(s: (Int, String)): (Int, String) = {
count += 1
(s._1 + count, s._2)
}
def normalizedCanHandle(b: Any) =
b match {
case (_: Int, _: String) => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case (k: Int, v: String) => normalized((k, v))
case _ => b
}
}
val appended: Uniformity[String] =
new Uniformity[String] {
var count = 0
def normalized(s: String): String = {
count += 1
s + count
}
def normalizedCanHandle(b: Any): Boolean = b.isInstanceOf[String]
def normalizedOrSame(b: Any) =
b match {
case s: String => normalized(s)
case _ => b
}
}
val mapAppended: Uniformity[(Int, String)] =
new Uniformity[(Int, String)] {
var count = 0
def normalized(s: (Int, String)): (Int, String) = {
count += 1
(s._1, s._2 + count)
}
def normalizedCanHandle(b: Any) =
b match {
case (_: Int, _: String) => true
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case (k: Int, v: String) => normalized((k, v))
case _ => b
}
}
// SKIP-SCALATESTJS,NATIVE-START
val javaMapAppended: Uniformity[java.util.Map.Entry[Int, String]] =
new Uniformity[java.util.Map.Entry[Int, String]] {
var count = 0
def normalized(s: java.util.Map.Entry[Int, String]): java.util.Map.Entry[Int, String] = {
count += 1
Entry(s.getKey, s.getValue + count)
}
def normalizedCanHandle(b: Any) =
b match {
case entry: java.util.Map.Entry[_, _] =>
(entry.getKey, entry.getValue) match {
case (_: Int, _: String) => true
case _ => false
}
case _ => false
}
def normalizedOrSame(b: Any) =
b match {
case entry: java.util.Map.Entry[_, _] =>
(entry.getKey, entry.getValue) match {
case (k: Int, v: String) => normalized(Entry(k, v))
case _ => b
}
case _ => b
}
}
// SKIP-SCALATESTJS,NATIVE-END
val lowerCaseEquality =
new Equality[String] {
def areEqual(left: String, right: Any) =
left.toLowerCase == (right match {
case s: String => s.toLowerCase
case other => other
})
}
val mapLowerCaseEquality =
new Equality[(Int, String)] {
def areEqual(left: (Int, String), right: Any) =
right match {
case t2: Tuple2[_, _] =>
left._1 == t2._1 &&
left._2.toLowerCase == (t2._2 match {
case s: String => s.toLowerCase
case other => other
})
case right => left == right
}
}
// SKIP-SCALATESTJS,NATIVE-START
val javaMapLowerCaseEquality =
new Equality[java.util.Map.Entry[Int, String]] {
def areEqual(left: java.util.Map.Entry[Int, String], right: Any) =
right match {
case entry: java.util.Map.Entry[_, _] =>
left.getKey == entry.getKey &&
left.getValue.toLowerCase == (entry.getValue match {
case s: String => s.toLowerCase
case other => other
})
case right => left == right
}
}
// SKIP-SCALATESTJS,NATIVE-END
val reverseEquality =
new Equality[String] {
def areEqual(left: String, right: Any) =
left.reverse == (right match {
case s: String => s.toLowerCase
case other => other
})
}
val mapReverseEquality =
new Equality[(Int, String)] {
def areEqual(left: (Int, String), right: Any) =
right match {
case t2: Tuple2[_, _] =>
left._1 == t2._1 &&
left._2.reverse == (t2._2 match {
case s: String => s.toLowerCase
case other => other
})
case right => left == right
}
}
// SKIP-SCALATESTJS,NATIVE-START
val javaMapReverseEquality =
new Equality[java.util.Map.Entry[Int, String]] {
def areEqual(left: java.util.Map.Entry[Int, String], right: Any) =
right match {
case entry: java.util.Map.Entry[_, _] =>
left.getKey == entry.getKey &&
left.getValue.reverse == (entry.getValue match {
case s: String => s.toLowerCase
case other => other
})
case right => left == right
}
}
// SKIP-SCALATESTJS,NATIVE-END
describe("only ") {
def checkShouldContainStackDepth(e: exceptions.StackDepthException, left: Any, right: GenTraversable[Any], lineNumber: Int): Unit = {
val leftText = FailureMessages.decorateToStringValue(prettifier, left)
e.message should be (Some(leftText + " did not contain only (" + right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ") + ")"))
e.failedCodeFileName should be (Some("OnlyContainMatcherDeciderSpec.scala"))
e.failedCodeLineNumber should be (Some(lineNumber))
}
def checkShouldNotContainStackDepth(e: exceptions.StackDepthException, left: Any, right: GenTraversable[Any], lineNumber: Int): Unit = {
val leftText = FailureMessages.decorateToStringValue(prettifier, left)
e.message should be (Some(leftText + " contained only (" + right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ") + ")"))
e.failedCodeFileName should be (Some("OnlyContainMatcherDeciderSpec.scala"))
e.failedCodeLineNumber should be (Some(lineNumber))
}
it("should take specified normalization when 'should contain' is used") {
(List("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed)
(Set("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed)
(Array("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed)
(Map(1 -> "one", 2 -> " two", 3 -> "three") should contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (after being mapTrimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed)
(javaSet("1", " 2", "3") should contain only (" 1", "2 ", " 3")) (after being trimmed)
(javaMap(Entry(1, "one"), Entry(2, " two"), Entry(3, "three")) should contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (after being javaMapTrimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take specified normalization when 'should not contain' is used") {
(List("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended)
(Set("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended)
(Array("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended)
(Map(1 -> "one", 2 -> "two", 3 -> "three") should not contain only (1 -> "one", 2 -> "two", 3 -> "three")) (after being mapAppended)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended)
(javaSet("1", "2", "3") should not contain only ("1", "2", "3")) (after being appended)
(javaMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three")) should not contain only (Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))) (after being javaMapAppended)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified normalization") {
val left1 = List("1", "2", "3")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should contain only ("1", "2", "3")) (after being appended)
}
checkShouldContainStackDepth(e1, left1, deep(Array("1", "2", "3")), thisLineNumber - 2)
val left2 = Set("1", "2", "3")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should contain only ("1", "2", "3")) (after being appended)
}
checkShouldContainStackDepth(e2, left2, deep(Array("1", "2", "3")), thisLineNumber - 2)
val left3 = Array("1", "2", "3")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should contain only ("1", "2", "3")) (after being appended)
}
checkShouldContainStackDepth(e3, left3, deep(Array("1", "2", "3")), thisLineNumber - 2)
val left4 = Map(1 -> "one", 2 -> "two", 3 -> "three")
val e4 = intercept[exceptions.TestFailedException] {
(left4 should contain only (1 -> "one", 2 -> "two", 3 -> "three")) (after being mapAppended)
}
checkShouldContainStackDepth(e4, left4, deep(Array(1 -> "one", 2 -> "two", 3 -> "three")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left5 = javaList("1", "2", "3")
val e5 = intercept[exceptions.TestFailedException] {
(left5 should contain only ("1", "2", "3")) (after being appended)
}
checkShouldContainStackDepth(e5, left5, deep(Array("1", "2", "3")), thisLineNumber - 2)
val left6 = javaMap(Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))
val e6 = intercept[exceptions.TestFailedException] {
(left6 should contain only (Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))) (after being javaMapAppended)
}
checkShouldContainStackDepth(e6, left6, deep(Array(Entry(1, "one"), Entry(2, "two"), Entry(3, "three"))), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified normalization") {
val left1 = List("1", " 2", "3")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should not contain only (" 1", "2 ", " 3")) (after being trimmed)
}
checkShouldNotContainStackDepth(e1, left1, deep(Array(" 1", "2 ", " 3")), thisLineNumber - 2)
val left2 = Set("1", " 2", "3")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should not contain only (" 1", "2 ", " 3")) (after being trimmed)
}
checkShouldNotContainStackDepth(e2, left2, deep(Array(" 1", "2 ", " 3")), thisLineNumber - 2)
val left3 = Array("1", " 2", "3")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should not contain only (" 1", "2 ", " 3")) (after being trimmed)
}
checkShouldNotContainStackDepth(e3, left3, deep(Array(" 1", "2 ", " 3")), thisLineNumber - 2)
val left4 = Map(1 -> "one", 2 -> " two", 3 -> "three")
val e4 = intercept[exceptions.TestFailedException] {
(left4 should not contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (after being mapTrimmed)
}
checkShouldNotContainStackDepth(e4, left4, deep(Array(1 -> " one", 2 -> "two ", 3 -> " three")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left5 = javaList("1", " 2", "3")
val e5 = intercept[exceptions.TestFailedException] {
(left5 should not contain only (" 1", "2 ", " 3")) (after being trimmed)
}
checkShouldNotContainStackDepth(e5, left5, deep(Array(" 1", "2 ", " 3")), thisLineNumber - 2)
val left6 = javaMap(Entry(1, "one"), Entry(2, " two"), Entry(3, "three"))
val e6 = intercept[exceptions.TestFailedException] {
(left6 should not contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (after being javaMapTrimmed)
}
checkShouldNotContainStackDepth(e6, left6, deep(Array(Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take specified equality and normalization when 'should contain' is used") {
(List("ONE ", " TWO", "THREE ") should contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed)
(Set("ONE ", " TWO", "THREE ") should contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed)
(Array("ONE ", " TWO", "THREE ") should contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed)
(Map(1 -> "ONE ", 2 -> " TWO", 3 -> "THREE ") should contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (decided by mapLowerCaseEquality afterBeing mapTrimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("ONE ", " TWO", "THREE ") should contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed)
(javaMap(Entry(1, "ONE "), Entry(2, " TWO"), Entry(3, "THREE ")) should contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapLowerCaseEquality afterBeing javaMapTrimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should take specified equality and normalization when 'should not contain' is used") {
(List("one ", " two", "three ") should not contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
(Set("one ", " two", "three ") should not contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
(Array("one ", " two", "three ") should not contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
(Map(1 -> "one ", 2 -> " two", 3 -> "three ") should not contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (decided by mapReverseEquality afterBeing mapTrimmed)
// SKIP-SCALATESTJS,NATIVE-START
(javaList("one ", " two", "three ") should not contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
(javaMap(Entry(1, "one "), Entry(2, " two"), Entry(3, "three ")) should not contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapReverseEquality afterBeing javaMapTrimmed)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should contain custom matcher' failed with specified equality and normalization") {
val left1 = List("one ", " two", "three ")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e1, left1, deep(Array(" one", "two ", " three")), thisLineNumber - 2)
val left2 = Set("one ", " two", "three ")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e2, left2, deep(Array(" one", "two ", " three")), thisLineNumber - 2)
val left3 = Array("one ", " two", "three ")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e3, left3, deep(Array(" one", "two ", " three")), thisLineNumber - 2)
val left4 = Map(1 -> "one ", 2 -> " two", 3 -> "three ")
val e4 = intercept[exceptions.TestFailedException] {
(left4 should contain only (1 -> " one", 2 -> "two ", 3 -> " three")) (decided by mapReverseEquality afterBeing mapTrimmed)
}
checkShouldContainStackDepth(e4, left4, deep(Array(1 -> " one", 2 -> "two ", 3 -> " three")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left5 = javaList("one ", " two", "three ")
val e5 = intercept[exceptions.TestFailedException] {
(left5 should contain only (" one", "two ", " three")) (decided by reverseEquality afterBeing trimmed)
}
checkShouldContainStackDepth(e5, left5, deep(Array(" one", "two ", " three")), thisLineNumber - 2)
val left6 = javaMap(Entry(1, "one "), Entry(2, " two"), Entry(3, "three "))
val e6 = intercept[exceptions.TestFailedException] {
(left6 should contain only (Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapReverseEquality afterBeing javaMapTrimmed)
}
checkShouldContainStackDepth(e6, left6, deep(Array(Entry(1, " one"), Entry(2, "two "), Entry(3, " three"))), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
it("should throw TestFailedException with correct stack depth and message when 'should not contain custom matcher' failed with specified equality and normalization") {
val left1 = List("ONE ", " TWO", "THREE ")
val e1 = intercept[exceptions.TestFailedException] {
(left1 should not contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e1, left1, deep(Array(" one", "two ", " three")), thisLineNumber - 2)
val left2 = Set("ONE ", " TWO", "THREE ")
val e2 = intercept[exceptions.TestFailedException] {
(left2 should not contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e2, left2, deep(Array(" one", "two ", " three")), thisLineNumber - 2)
val left3 = Array("ONE ", " TWO", "THREE ")
val e3 = intercept[exceptions.TestFailedException] {
(left3 should not contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e3, left3, deep(Array(" one", "two ", " three")), thisLineNumber - 2)
val left4 = Map(1 -> "ONE ", 2 -> " TWO", 3 -> "THREE ")
val e4 = intercept[exceptions.TestFailedException] {
(left4 should not contain only (1 -> " one ", 2 -> "two ", 3 -> " three")) (decided by mapLowerCaseEquality afterBeing mapTrimmed)
}
checkShouldNotContainStackDepth(e4, left4, deep(Array(1 -> " one ", 2 -> "two ", 3 -> " three")), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-START
val left5 = javaList("ONE ", " TWO", "THREE ")
val e5 = intercept[exceptions.TestFailedException] {
(left5 should not contain only (" one", "two ", " three")) (decided by lowerCaseEquality afterBeing trimmed)
}
checkShouldNotContainStackDepth(e5, left5, deep(Array(" one", "two ", " three")), thisLineNumber - 2)
val left6 = javaMap(Entry(1, "ONE "), Entry(2, " TWO"), Entry(3, "THREE "))
val e6 = intercept[exceptions.TestFailedException] {
(left6 should not contain only (Entry(1, " one "), Entry(2, "two "), Entry(3, " three"))) (decided by javaMapLowerCaseEquality afterBeing javaMapTrimmed)
}
checkShouldNotContainStackDepth(e6, left6, deep(Array(Entry(1, " one "), Entry(2, "two "), Entry(3, " three"))), thisLineNumber - 2)
// SKIP-SCALATESTJS,NATIVE-END
}
}
}
| scalatest/scalatest | jvm/scalatest-test/src/test/scala/org/scalatest/OnlyContainMatcherDeciderSpec.scala | Scala | apache-2.0 | 22,123 |
package org.ucombinator.jaam.patterns
import org.ucombinator.jaam.patterns.stmt._
import org.ucombinator.jaam.util.Stmt
case class StmtPatternToRegEx(pattern: LabeledStmtPattern) extends ((State, Stmt) => (List[State], List[(RegExp, State)])) {
override def apply(state: State, stmt: Stmt): (List[State], List[(RegExp, State)]) = {
(List(), pattern(state, stmt).map((Cat(List()), _)))
}
}
| Ucombinator/jaam | src/main/scala/org/ucombinator/jaam/patterns/StmtPatternToRegEx.scala | Scala | bsd-2-clause | 399 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.services.policies
import com.normation.cfclerk.domain.TechniqueId
import com.normation.cfclerk.services.TechniquesLibraryUpdateNotification
import com.normation.rudder.batch.AsyncDeploymentAgent
import com.normation.rudder.batch.AutomaticStartDeployment
import com.normation.eventlog.EventActor
import com.normation.rudder.domain.eventlog.ReloadTechniqueLibrary
import com.normation.eventlog.EventLogDetails
import net.liftweb.common._
import com.normation.eventlog.ModificationId
import com.normation.cfclerk.services.TechniquesLibraryUpdateType
import com.normation.cfclerk.domain.TechniqueName
import com.normation.rudder.repository.EventLogRepository
class DeployOnTechniqueCallback(
override val name : String
, override val order : Int
, asyncDeploymentAgent: AsyncDeploymentAgent
) extends TechniquesLibraryUpdateNotification with Loggable {
override def updatedTechniques(techniqueIds:Map[TechniqueName, TechniquesLibraryUpdateType], modId:ModificationId, actor:EventActor, reason: Option[String]) : Box[Unit] = {
reason.foreach( msg => logger.info(msg) )
if(techniqueIds.nonEmpty) {
logger.debug("Ask for a policy update since technique library was reloaded")
asyncDeploymentAgent ! AutomaticStartDeployment(modId, actor)
}
Full({})
}
}
class LogEventOnTechniqueReloadCallback(
override val name : String
, override val order: Int
, eventLogRepos : EventLogRepository
) extends TechniquesLibraryUpdateNotification with Loggable {
override def updatedTechniques(techniqueMods: Map[TechniqueName, TechniquesLibraryUpdateType], modId:ModificationId, actor:EventActor, reason: Option[String]) : Box[Unit] = {
eventLogRepos.saveEventLog(modId, ReloadTechniqueLibrary(EventLogDetails(
modificationId = None
, principal = actor
, details = ReloadTechniqueLibrary.buildDetails(techniqueMods)
, reason = reason
))) match {
case eb:EmptyBox =>
eb ?~! "Error when saving event log for techniques library reload"
case Full(x) => Full({})
}
}
}
| armeniaca/rudder | rudder-core/src/main/scala/com/normation/rudder/services/policies/TechniqueReloadingCallbacks.scala | Scala | gpl-3.0 | 3,797 |
package src.main.scala.geodecoding
import scala.util.{Failure, Success, Try}
import scala.io.Source
import src.main.scala.logging.Logging._
import src.main.scala.types.PostalCode
import src.main.scala.cache.KeyValueCache
abstract class GeoDecodingProvider extends Object {
// There can be many functions in this trait: it is just defined the first,
// to get the postal-code associated with a geographical coordinate
protected [this] val urlGeoDecodeFmt: String
protected [this] val cacheGeoDecode: KeyValueCache[(Double, Double), PostalCode]
def convertLatLongToPostalCode(latitude: Double, longitude: Double): Try[PostalCode] = {
// check whether this (latitude/longitude) is already in the cache for this Geodecoder
val cacheKey = (latitude, longitude)
val cachedPostalCode: Option[PostalCode] = cacheGeoDecode.get(cacheKey)
cachedPostalCode match {
case Some(postalCode) => {
logMsg(DEBUG, "Geodecoding for latitude,longitude=(%f, %f) already cached".
format(latitude, longitude))
return Success(postalCode)
}
case None => {
logMsg(DEBUG, "Geodecoding for latitude,longitude=(%f, %f) has not been cached before".
format(latitude, longitude))
}
}
// this (latitude/longitude) is not in the cache for this Geodecoder
val urlGeoDecode = urlGeoDecodeFmt.format(latitude, longitude)
try {
// val aCmdLineArg = cache(i.toString)
val src = Source.fromURL(urlGeoDecode)
val geoDecodeResult = src.mkString
val result = parsePostalCodeInAnswer(geoDecodeResult)
if (result.isSuccess) {
// The (latitude/longitude) were finally geodecoded in parsePostalCodeInAnswer
logMsg(DEBUG, "Caching the geodecoding for latitude,longitude=(%f, %f)".
format(latitude, longitude))
cacheGeoDecode.add(cacheKey, result.get) // .get returns a PostalCode
}
result
// case (value, index) => cache.add(index.toString, value)
} catch {
case e: java.io.IOException => {
logMsg(ERROR, "I/O error occurred while GeoDecoding: %s".
format(e.getMessage))
Failure(e)
}
} // end of catch
} // end of "def convertLatLongToPostalCode"
protected [this] def parsePostalCodeInAnswer(answerJson: String): Try[PostalCode]
def apply(latitude: Double, longitude: Double): Try[PostalCode] =
convertLatLongToPostalCode(latitude, longitude)
}
| je-nunez/urban_planning_on_gtfs_traffic_congestion | src/main/scala/geodecoding/GeoDecodingProvider.scala | Scala | gpl-2.0 | 2,562 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.booleantype
import java.io.{File, PrintWriter}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
class BooleanDataTypesBigFileTest extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll {
val rootPath = new File(this.getClass.getResource("/").getPath
+ "../../../..").getCanonicalPath
override def beforeEach(): Unit = {
sql("drop table if exists boolean_table")
sql("drop table if exists boolean_table2")
sql("drop table if exists carbon_table")
sql("drop table if exists hive_table")
}
override def afterAll(): Unit = {
sql("drop table if exists boolean_table")
sql("drop table if exists boolean_table2")
sql("drop table if exists carbon_table")
sql("drop table if exists hive_table")
assert(BooleanFile.deleteFile(pathOfManyDataType))
assert(BooleanFile.deleteFile(pathOfOnlyBoolean))
}
val pathOfManyDataType = s"$rootPath/integration/spark2/src/test/resources/bool/supportBooleanBigFile.csv"
val pathOfOnlyBoolean = s"$rootPath/integration/spark2/src/test/resources/bool/supportBooleanBigFileOnlyBoolean.csv"
val trueNum = 10000
override def beforeAll(): Unit = {
assert(BooleanFile.createBooleanFileWithOtherDataType(pathOfManyDataType, trueNum))
assert(BooleanFile.createOnlyBooleanFile(pathOfOnlyBoolean, trueNum))
}
test("Loading table: support boolean and other data type, big file") {
sql(
s"""
| CREATE TABLE boolean_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfManyDataType}'
| INTO TABLE boolean_table
| options('FILEHEADER'='intField,booleanField,stringField,doubleField,booleanField2')
""".stripMargin)
checkAnswer(
sql("select count(*) from boolean_table"),
Row(trueNum + trueNum / 10))
}
test("Inserting table: support boolean and other data type, big file") {
sql(
s"""
| CREATE TABLE boolean_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
sql(
s"""
| CREATE TABLE boolean_table2(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfManyDataType}'
| INTO TABLE boolean_table
| options('FILEHEADER'='intField,booleanField,stringField,doubleField,booleanField2')
""".stripMargin)
sql("insert into boolean_table2 select * from boolean_table")
checkAnswer(
sql("select count(*) from boolean_table2"),
Row(trueNum + trueNum / 10))
}
test("Filtering table: support boolean data type, only boolean, big file") {
sql(
s"""
| CREATE TABLE boolean_table(
| booleanField BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfOnlyBoolean}'
| INTO TABLE boolean_table
| options('FILEHEADER'='booleanField')
""".stripMargin)
checkAnswer(
sql("select count(*) from boolean_table"),
Row(trueNum + trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField is not null"),
Row(trueNum + trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField is null"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField = true"),
Row(trueNum))
checkAnswer(
sql("select count(*) from boolean_table where booleanField >= true"),
Row(trueNum))
checkAnswer(
sql("select count(*) from boolean_table where booleanField > true"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField < true"),
Row(trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField = false"),
Row(trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField <= false"),
Row(trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField > false"),
Row(trueNum))
checkAnswer(
sql("select count(*) from boolean_table where booleanField < false"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField in (false)"),
Row(trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField not in (false)"),
Row(trueNum))
checkAnswer(
sql("select count(*) from boolean_table where booleanField in (true,false)"),
Row(trueNum + trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField not in (true,false)"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField like 'f%'"),
Row(trueNum / 10))
}
test("Filtering table: support boolean and other data type, big file") {
sql(
s"""
| CREATE TABLE boolean_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfManyDataType}'
| INTO TABLE boolean_table
| options('FILEHEADER'='intField,booleanField,stringField,doubleField,booleanField2')
""".stripMargin)
checkAnswer(
sql("select booleanField from boolean_table where intField >=1 and intField <11"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true))
)
checkAnswer(
sql(s"select booleanField from boolean_table where intField >='${trueNum - 5}' and intField <=${trueNum + 1}"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(false), Row(false))
)
checkAnswer(
sql(s"select count(*) from boolean_table where intField >='${trueNum - 5}' and doubleField <=${trueNum + 1} and booleanField=false"),
Seq(Row(2))
)
checkAnswer(
sql(s"select * from boolean_table where intField >4 and doubleField < 6.0"),
Seq(Row(5, true, "num5", 5.0, false))
)
checkAnswer(
sql("select count(*) from boolean_table"),
Row(trueNum + trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField is not null"),
Row(trueNum + trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField is null"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField = true"),
Row(trueNum))
checkAnswer(
sql("select count(*) from boolean_table where booleanField >= true"),
Row(trueNum))
checkAnswer(
sql("select count(*) from boolean_table where booleanField > true"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField < true"),
Row(trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField = false"),
Row(trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField <= false"),
Row(trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField > false"),
Row(trueNum))
checkAnswer(
sql("select count(*) from boolean_table where booleanField < false"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField in (false)"),
Row(trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField not in (false)"),
Row(trueNum))
checkAnswer(
sql("select count(*) from boolean_table where booleanField in (true,false)"),
Row(trueNum + trueNum / 10))
checkAnswer(
sql("select count(*) from boolean_table where booleanField not in (true,false)"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField like 'f%'"),
Row(trueNum / 10))
}
test("Filtering table: support boolean and other data type, big file, load twice") {
sql(
s"""
| CREATE TABLE boolean_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
val repeat: Int = 2
for (i <- 0 until repeat) {
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfManyDataType}'
| INTO TABLE boolean_table
| options('FILEHEADER'='intField,booleanField,stringField,doubleField,booleanField2')
""".stripMargin
)
}
checkAnswer(
sql("select booleanField from boolean_table where intField >=1 and intField <11"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true),
Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true))
)
checkAnswer(
sql(s"select booleanField from boolean_table where intField >='${trueNum - 5}' and intField <=${trueNum + 1}"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(false), Row(false),
Row(true), Row(true), Row(true), Row(true), Row(true), Row(false), Row(false))
)
checkAnswer(
sql(s"select count(*) from boolean_table where intField >='${trueNum - 5}' and doubleField <=${trueNum + 1} and booleanField=false"),
Seq(Row(4))
)
checkAnswer(
sql(s"select * from boolean_table where intField >4 and doubleField < 6.0"),
Seq(Row(5, true, "num5", 5.0, false), Row(5, true, "num5", 5.0, false))
)
checkAnswer(
sql("select count(*) from boolean_table"),
Row(repeat * (trueNum + trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField is not null"),
Row(repeat * (trueNum + trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField is null"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField = true"),
Row(repeat * (trueNum)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField >= true"),
Row(repeat * (trueNum)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField > true"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField < true"),
Row(repeat * (trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField = false"),
Row(repeat * (trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField <= false"),
Row(repeat * (trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField > false"),
Row(repeat * (trueNum)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField < false"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField in (false)"),
Row(repeat * (trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField not in (false)"),
Row(repeat * (trueNum)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField in (true,false)"),
Row(repeat * (trueNum + trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField not in (true,false)"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField like 'f%'"),
Row(repeat * (trueNum / 10)))
}
test("Sort_columns: support boolean and other data type, big file") {
sql(
s"""
| CREATE TABLE boolean_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
| TBLPROPERTIES('sort_columns'='booleanField')
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfManyDataType}'
| INTO TABLE boolean_table
| options('FILEHEADER'='intField,booleanField,stringField,doubleField,booleanField2')
""".stripMargin)
checkAnswer(
sql(s"select booleanField from boolean_table where intField >='${trueNum - 5}' and intField <=${trueNum + 1}"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(false), Row(false))
)
}
test("Inserting into Hive table from carbon table: support boolean data type and other format, big file") {
sql(
s"""
| CREATE TABLE carbon_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
sql(
s"""
| CREATE TABLE hive_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfManyDataType}'
| INTO TABLE carbon_table
| options('FILEHEADER'='intField,booleanField,stringField,doubleField,booleanField2')
""".stripMargin)
sql("insert into hive_table select * from carbon_table")
checkAnswer(
sql(s"select booleanField from hive_table where intField >='${trueNum - 5}' and intField <=${trueNum + 1}"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(false), Row(false))
)
checkAnswer(
sql(s"select * from hive_table where intField >4 and doubleField < 6.0"),
Seq(Row(5, true, "num5", 5.0, false))
)
checkAnswer(
sql("select count(*) from hive_table"),
Row(trueNum + trueNum / 10))
checkAnswer(
sql("select count(*) from hive_table where booleanField = true"),
Row(trueNum))
checkAnswer(
sql("select count(*) from hive_table where booleanField = false"),
Row(trueNum / 10))
}
test("Inserting into carbon table from Hive table: support boolean data type and other format, big file") {
sql(
s"""
| CREATE TABLE hive_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
""".stripMargin)
sql(
s"""
| CREATE TABLE carbon_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfManyDataType}'
| INTO TABLE hive_table
""".stripMargin)
sql("insert into carbon_table select * from hive_table")
checkAnswer(
sql(s"select booleanField from carbon_table where intField >='${trueNum - 5}' and intField <=${trueNum + 1}"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(false), Row(false))
)
checkAnswer(
sql(s"select * from carbon_table where intField >4 and doubleField < 6.0"),
Seq(Row(5, true, "num5", 5.0, false))
)
checkAnswer(
sql("select count(*) from carbon_table"),
Row(trueNum + trueNum / 10))
checkAnswer(
sql("select count(*) from carbon_table where booleanField = true"),
Row(trueNum))
checkAnswer(
sql("select count(*) from carbon_table where booleanField = false"),
Row(trueNum / 10))
}
test("Filtering table: unsafe, support boolean and other data type, big file, load twice") {
initConf()
sql(
s"""
| CREATE TABLE boolean_table(
| intField INT,
| booleanField BOOLEAN,
| stringField STRING,
| doubleField DOUBLE,
| booleanField2 BOOLEAN
| )
| STORED BY 'carbondata'
""".stripMargin)
val repeat: Int = 2
for (i <- 0 until repeat) {
sql(
s"""
| LOAD DATA LOCAL INPATH '${pathOfManyDataType}'
| INTO TABLE boolean_table
| options('FILEHEADER'='intField,booleanField,stringField,doubleField,booleanField2')
""".stripMargin
)
}
checkAnswer(
sql("select booleanField from boolean_table where intField >=1 and intField <11"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true),
Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true), Row(true))
)
checkAnswer(
sql(s"select booleanField from boolean_table where intField >='${trueNum - 5}' and intField <=${trueNum + 1}"),
Seq(Row(true), Row(true), Row(true), Row(true), Row(true), Row(false), Row(false),
Row(true), Row(true), Row(true), Row(true), Row(true), Row(false), Row(false))
)
checkAnswer(
sql(s"select count(*) from boolean_table where intField >='${trueNum - 5}' and doubleField <=${trueNum + 1} and booleanField=false"),
Seq(Row(4))
)
checkAnswer(
sql(s"select * from boolean_table where intField >4 and doubleField < 6.0"),
Seq(Row(5, true, "num5", 5.0, false), Row(5, true, "num5", 5.0, false))
)
checkAnswer(
sql("select count(*) from boolean_table"),
Row(repeat * (trueNum + trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField is not null"),
Row(repeat * (trueNum + trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField is null"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField = true"),
Row(repeat * (trueNum)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField >= true"),
Row(repeat * (trueNum)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField > true"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField < true"),
Row(repeat * (trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField = false"),
Row(repeat * (trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField <= false"),
Row(repeat * (trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField > false"),
Row(repeat * (trueNum)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField < false"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField in (false)"),
Row(repeat * (trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField not in (false)"),
Row(repeat * (trueNum)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField in (true,false)"),
Row(repeat * (trueNum + trueNum / 10)))
checkAnswer(
sql("select count(*) from boolean_table where booleanField not in (true,false)"),
Row(0))
checkAnswer(
sql("select count(*) from boolean_table where booleanField like 'f%'"),
Row(repeat * (trueNum / 10)))
defaultConf()
}
def initConf(): Unit = {
CarbonProperties.getInstance().
addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE,
"true")
}
def defaultConf(): Unit = {
CarbonProperties.getInstance().
addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE,
CarbonCommonConstants.ENABLE_DATA_LOADING_STATISTICS_DEFAULT)
}
}
object BooleanFile {
def createBooleanFileWithOtherDataType(path: String, trueLines: Int): Boolean = {
try {
val write = new PrintWriter(path)
var d: Double = 0.0
for (i <- 0 until trueLines) {
write.println(i + "," + true + ",num" + i + "," + d + "," + false)
d = d + 1
}
for (i <- 0 until trueLines / 10) {
write.println((trueLines + i) + "," + false + ",num" + (trueLines + i) + "," + d + "," + true)
d = d + 1
}
write.close()
} catch {
case _: Exception => assert(false)
}
return true
}
def deleteFile(path: String): Boolean = {
try {
val file = new File(path)
file.delete()
} catch {
case _: Exception => assert(false)
}
return true
}
def createOnlyBooleanFile(path: String, num: Int): Boolean = {
try {
val write = new PrintWriter(path)
for (i <- 0 until num) {
write.println(true)
}
for (i <- 0 until num / 10) {
write.println(false)
}
write.close()
} catch {
case _: Exception => assert(false)
}
return true
}
}
| sgururajshetty/carbondata | integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesBigFileTest.scala | Scala | apache-2.0 | 22,936 |
package dao.sitedata
import scala.concurrent.Future
import javax.inject.Inject
import play.api.db.slick.DatabaseConfigProvider
import play.api.db.slick.HasDatabaseConfigProvider
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import slick.driver.JdbcProfile
import slick.jdbc.GetResult
import models.sitedata.SiteInfo
import models.sitedata.SiteInfoDetail
import play.db.NamedDatabase
import play.api.Logger
import org.joda.time.DateTime
import java.sql.Timestamp
import com.github.tototoshi.slick.PostgresJodaSupport._
trait ISiteInfoDao extends BaseDao2[SiteInfo]{
def findAll(): Future[Seq[SiteInfo]]
def findById(id: String): Future[Option[SiteInfo]]
def findByModalityId(id: Long): Future[Option[Seq[SiteInfo]]]
def findByLineTypeId(id: Int): Future[Option[Seq[SiteInfo]]]
def remove(id: String): Future[Int]
def insert(p: SiteInfo): Future[Unit]
def update(p2: SiteInfo): Future[Unit]
}
class SiteInfoDao @Inject()(@NamedDatabase("SiteData") protected val dbConfigProvider: DatabaseConfigProvider)
extends HasDatabaseConfigProvider[JdbcProfile] with ISiteInfoDao {
// import driver.api._
import com.typesafe.slick.driver.ms.SQLServerDriver.api._
class SiteInfoTable(tag: Tag)
extends Table[SiteInfo](tag, models.sitedata.SiteInfoDef.toTable) {
def siteid = column[String]("SiteID", O.PrimaryKey)
def sitename = column[String]("SiteName")
def sitenamekana = column[Option[String]]("SiteNameKana")
def subzoneid = column[Long]("SubZoneID")
// def mailaddress = column[Option[String]]("MaileAddress")
def linetypeid = column[Int]("LineTypeID")
def equipmentmodelid = column[Long]("EquipmentModelID")
def supplement = column[Option[String]]("Supplement")
def spipaddress = column[Option[String]]("SPIPaddress")
def equipmentipaddress = column[Option[String]]("EquipmentIPaddress")
def routeripaddress = column[Option[String]]("RouterIPaddress")
// def phonenumber = column[Option[String]]("PhoneNumber")
// def phoneentry = column[Option[String]]("PhoneEntry")
// def phoneoutnumber = column[Option[String]]("PhoneOutNumber")
// def subaddress = column[Option[String]]("SubAddress")
// def subaddressseparator = column[Option[String]]("SubAddressSeparator")
// def rasloginname = column[Option[String]]("RASLoginName")
// def rasloginpassword = column[Option[String]]("RASLoginPassword")
// def custom = column[Boolean]("Custom")
def note = column[Option[String]]("Note")
// def defaultsite = column[Boolean]("DefaultSite")
// def pcanywhereremotefile = column[Option[java.sql.Types.BLOB]]("PcAnywhereRemoteFile")
// def lastmodifiedtime = column[DateTime]("LastModifiedTime")
// def lastmodifier = column[String]("LastModifier")
// def modifiedtimestamp = column[java.sql.Types.TIMESTAMP]("ModifiedTImeStamp")
// def noautocruisesite = column[Boolean]("NoAutoCruiseSite")
// def securitysite = column[Boolean]("SecuritySite")
// def securitysitemessage = column[String]("SecuritySiteMessage")
def axedasite = column[Boolean]("AxedaSite")
def * = (
siteid,
sitename,
// sitenamekana,
subzoneid,
// mailaddress,
equipmentmodelid,
supplement,
spipaddress,
equipmentipaddress,
routeripaddress,
// phonenumber,
// phoneentry,
// phoneoutnumber,
// subaddress,
// subaddressseparator,
// rasloginname,
// rasloginpassword,
// custom,
note,
// defaultsite,
// pcanywhereremotefile,
// lastmodifiedtime,
// lastmodifier,
// modifiedtimestamp,
// noautocruisesite,
// securitysite,
// securitysitemessage,
axedasite
) <> (SiteInfo.tupled, SiteInfo.unapply _)
}
lazy val sourcefilename = new Exception().getStackTrace.head.getFileName
override def toTable = {
Logger.info(sourcefilename + " toTable called.")
TableQuery[SiteInfoTable]
}
private val Sites = toTable()
override def findAll(): Future[Seq[SiteInfo]] = {
Logger.info(sourcefilename + " findAll called.")
// db.run(Sites.take(1000).result)
db.run(Sites.result)
}
override def findById(id: String): Future[Option[SiteInfo]] = {
Logger.info("(" + sourcefilename + ")" + " findById(" + id + ") called.")
db.run(Sites.filter( _.siteid === id).result.headOption)
}
override def findByModalityId(modalityid: Long): Future[Option[Seq[SiteInfo]]] = {
Logger.info("(" + sourcefilename + ")" + " findByModalitryId() called.")
val query = sql"{call findByModalityId($modalityid)}".as[SiteInfo]
db.run(query).map(x => Option(x))
}
override def findByLineTypeId(id: Int): Future[Option[Seq[SiteInfo]]] = {
db.run(Sites.filter( _.linetypeid === id).result).map(x => Option(x))
}
override def remove(id: String): Future[Int] = {
Logger.info(sourcefilename + " remove(" + id + ") called.")
// db.run(Sites.filter( _.siteid === id).delete)
Future(1)
}
override def insert(p: SiteInfo): Future[Unit] = {
Logger.info(sourcefilename + " insert(" + p.siteid + ") called.")
db.run(Sites += p).map { _ => () }
}
override def update(p2: SiteInfo) = Future[Unit] {
Logger.info(sourcefilename + " update(" + p2.siteid + ") called.")
db.run(
Sites.filter(_.siteid === p2.siteid)
// .map(p => (p.name,p.details, p.price))
.map(p => (p.sitename))
// .update((p2.name,p2.details,p2.price))
.update((p2.sitename))
)
}
implicit val getSiteInfoResult = GetResult(
r => SiteInfo(
r.<<, // siteid: String,
r.<<, // sitename: String,
r.<<, // subzoneid: Long,
r.<<, // equipmentmodelid: Long,
r.<<, // supplement: Option[String],
r.<<, // spipaddress: Option[String],
r.<<, // equipmentipaddress: Option[String],
r.<<, // routeripaddress: Option[String],
r.<<, // note: Option[String],
r.<< // axedasite: Boolean
)
)
}
| tnddn/iv-web | portal/rest-portal/app/dao/sitedata/SiteInfoDao.scala | Scala | apache-2.0 | 5,971 |
/*
* Copyright (C) 2007-2008 Artima, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Example code from:
*
* Programming in Scala (First Edition, Version 6)
* by Martin Odersky, Lex Spoon, Bill Venners
*
* http://booksites.artima.com/programming_in_scala
*/
object InsertionSort2 {
def isort(xs: List[Int]): List[Int] = xs match {
case List() => List()
case x :: xs1 => insert(x, isort(xs1))
}
def insert(x: Int, xs: List[Int]): List[Int] = xs match {
case List() => List(x)
case y :: ys => if (x <= y) x :: xs
else y :: insert(x, ys)
}
def main(args: Array[String]) {
println("isort(List(5, 3, 12)) [" + isort(List(5, 3, 12)) + "]")
}
}
| peachyy/scalastu | lists/InsertionSort2.scala | Scala | apache-2.0 | 1,250 |
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.refactoring.util
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi._
import org.argus.cit.intellij.jawa.lang.lexer.{JawaLexerAdapter, JawaTokenTypes}
import org.argus.cit.intellij.jawa.lang.psi.{JawaPsiUtil, JawaElementTypes}
import org.argus.cit.intellij.jawa.lang.psi.api.toplevel.JawaNamedElement
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
*/
object JawaNamesUtil {
val keywordNames = JawaTokenTypes.KEYWORDS.getTypes.map(_.toString).toSet
private val lexerCache = new ThreadLocal[JawaLexerAdapter] {
override def initialValue(): JawaLexerAdapter = new JawaLexerAdapter()
}
private def checkGeneric(text: String, predicate: JawaLexerAdapter => Boolean): Boolean = {
if (text == null || text == "") return false
val lexer = lexerCache.get()
lexer.start(text, 0, text.length(), 0)
if (!predicate(lexer)) return false
lexer.advance()
lexer.getTokenType == null
}
def isOpCharacter(c : Char) : Boolean = {
c match {
case '~' | '!' | '@' | '#' | '%' | '^' | '*' | '+' | '-' | '<' | '>' | '?' | ':' | '=' | '&' | '|' | '/' | '\\\\' =>
true
case ch =>
Character.getType(ch) == Character.MATH_SYMBOL.toInt || Character.getType(ch) == Character.OTHER_SYMBOL.toInt
}
}
def isIdentifier(text: String): Boolean = {
checkGeneric(text, lexer => lexer.getTokenType == JawaElementTypes.ID)
}
def isQualifiedName(text: String): Boolean = {
if (StringUtil.isEmpty(text)) return false
text.split('.').forall(isIdentifier)
}
def isKeyword(text: String): Boolean = keywordNames.contains(text)
def isOperatorName(text: String): Boolean = isIdentifier(text) && isOpCharacter(text(0))
// def jawaName(element: PsiElement) = element match {
// case jawaNamed: JawaNamedElement => jawaNamed.name
// case psiNamed: PsiNamedElement => psiNamed.getName
// }
//
// def qualifiedName(named: PsiNamedElement): Option[String] = {
// JawaPsiUtil.nameContext(named) match {
// case clazz: PsiClass => Some(clazz.qualifiedName)
// case memb: PsiMember =>
// val containingClass = memb.containingClass
// if (containingClass != null && containingClass.qualifiedName != null && memb.hasModifierProperty(PsiModifier.STATIC)) {
// Some(Seq(containingClass.qualifiedName, named.name).filter(_ != "").mkString("."))
// } else None
// case _ => None
// }
// }
// object isBackticked {
// def unapply(named: JawaNamedElement): Option[String] = {
// val name = named.name
// isBacktickedName.unapply(name)
// }
// }
object isBacktickedName {
def unapply(name: String): Option[String] = {
if (name == null || name.isEmpty) None
else if (name != "`" && name.startsWith("`") && name.endsWith("`")) Some(name.substring(1, name.length - 1))
else None
}
}
def splitName(name: String): Seq[String] = {
if (name == null || name.isEmpty) Seq.empty
else if (name.contains(".")) name.split("\\\\.")
else Seq(name)
}
def toJavaName(name: String) = {
name match {
case isBacktickedName(s) => s
case _ => name
}
}
def clean(name: String): String = {
name match {
case isBacktickedName(s) => s
case _ => name
}
}
def cleanFqn(fqn: String): String =
splitName(fqn).map(clean).mkString(".")
def equivalentFqn(l: String, r: String): Boolean =
l == r || cleanFqn(l) == cleanFqn(r)
def equivalent(l: String, r: String): Boolean =
l == r || clean(l) == clean(r)
def escapeKeywordsFqn(fqn: String): String =
splitName(fqn).map(escapeKeyword).mkString(".")
def escapeKeyword(s: String): String =
if (isKeyword(s)) s"`$s`" else s
}
| arguslab/argus-cit-intellij | src/main/scala/org/argus/cit/intellij/jawa/lang/refactoring/util/JawaNamesUtil.scala | Scala | epl-1.0 | 4,148 |
/*
Deduction Tactics
Copyright (C) 2012-2015 Raymond Dodge
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.rayrobdod.boardGame.javafxView
import java.util.concurrent.CountDownLatch
import org.scalatest.Tag
object FxTest extends Tag("com.rayrobdod.boardGame.javafxView.FxTests")
object InitializeFx {
private[this] var _isSetup:Boolean = false
def setup():Unit = {
val latch = new CountDownLatch(1);
javax.swing.SwingUtilities.invokeLater(
new Runnable() {
override def run() {
new javafx.embed.swing.JFXPanel()
latch.countDown()
}
}
)
latch.await()
_isSetup = true
}
def isSetup:Boolean = _isSetup
} | rayrobdod/boardGame | ViewJavaFx/src/test/scala/InitializeFx.scala | Scala | gpl-3.0 | 1,239 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.laws._
import cats.laws.discipline._
import monix.eval.{Coeval, Task}
import monix.execution.exceptions.DummyException
import monix.tail.batches.BatchCursor
import scala.util.{Failure, Success}
object IterantFlatMapSuite extends BaseTestSuite {
test("Iterant[Task].flatMap equivalence with List.flatMap") { implicit s =>
check2 { (stream: Iterant[Task, Int], f: Int => List[Long]) =>
val result = stream.flatMap(x => Iterant[Task].fromList(f(x))).toListL
val expected = stream.toListL.map((list: List[Int]) => list.flatMap(f))
result <-> expected
}
}
test("Iterant[Task].flatMap can handle errors") { implicit s =>
val dummy = DummyException("dummy")
val stream = Iterant[Task].raiseError[Int](dummy)
assertEquals(stream, stream.flatMap(x => Iterant[Task].of(x)))
}
test("Iterant[Task].next.flatMap guards against direct user code errors") { implicit s =>
val dummy = DummyException("dummy")
var isCanceled = false
val stream = Iterant[Task]
.nextS(1, Task.evalAsync(Iterant[Task].empty[Int]))
.guarantee(Task.evalAsync { isCanceled = true })
val result = stream.flatMap[Int](_ => throw dummy).toListL.runToFuture
s.tick()
assertEquals(result.value, Some(Failure(dummy)))
assert(isCanceled, "isCanceled should be true")
}
test("Iterant[Task].nextCursor.flatMap guards against direct user code errors") { implicit s =>
val dummy = DummyException("dummy")
var isCanceled = false
val stream = Iterant[Task]
.nextCursorS(BatchCursor(1, 2, 3), Task.evalAsync(Iterant[Task].empty[Int]))
.guarantee(Task.evalAsync { isCanceled = true })
val result = stream.flatMap[Int](_ => throw dummy).toListL.runToFuture
s.tick()
assertEquals(result.value, Some(Failure(dummy)))
assert(isCanceled, "isCanceled should be true")
}
test("Iterant[Task].next.flatMap chains stop") { implicit s =>
var effects = Vector.empty[Int]
val stop1T = Task.eval { effects = effects :+ 1 }
val stream1: Iterant[Task, Int] =
Iterant[Task].nextS(1, Task.now(Iterant[Task].haltS[Int](None))).guarantee(stop1T)
val stop2T = Task.eval { effects = effects :+ 2 }
val stream2: Iterant[Task, Int] =
Iterant[Task].nextS(2, Task.now(Iterant[Task].haltS[Int](None))).guarantee(stop2T)
val stop3T = Task.eval { effects = effects :+ 3 }
val stream3: Iterant[Task, Int] =
Iterant[Task].nextS(3, Task.now(Iterant[Task].haltS[Int](None))).guarantee(stop3T)
val composed =
for (x <- stream1; y <- stream2; z <- stream3)
yield x + y + z
val result = composed.headOptionL.runToFuture; s.tick()
assertEquals(result.value, Some(Success(Some(6))))
assertEquals(effects, Vector(3, 2, 1))
}
test("Iterant[Task].nextCursor.flatMap works for large lists") { implicit s =>
val count = 100000
val list = (0 until count).toList
val sumTask = Iterant[Task]
.fromList(list)
.flatMap(x => Iterant[Task].fromList(List(x, x, x)))
.foldLeftL(0L)(_ + _)
val f = sumTask.runToFuture; s.tick()
assertEquals(f.value, Some(Success(3 * (count.toLong * (count - 1) / 2))))
}
test("Iterant[Task].flatMap should protect against indirect user errors") { implicit s =>
check2 { (l: List[Int], idx: Int) =>
val dummy = DummyException("dummy")
val list = if (l.isEmpty) List(1) else l
val source = arbitraryListToIterant[Task, Int](list, idx)
val received = source.flatMap(_ => Iterant[Task].raiseError[Int](dummy))
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant[Task].flatMap should protect against direct exceptions") { implicit s =>
check2 { (l: List[Int], idx: Int) =>
val dummy = DummyException("dummy")
val list = if (l.isEmpty) List(1) else l
val source = arbitraryListToIterant[Task, Int](list, idx)
val received = source.flatMap[Int](_ => throw dummy)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant[Task].flatMap should protect against broken batches") { implicit s =>
check1 { (prefix: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val cursor = new ThrowExceptionCursor(dummy)
val error = Iterant[Task].nextCursorS(cursor, Task.now(Iterant[Task].empty[Int]))
val stream = (prefix.onErrorIgnore ++ error).flatMap(x => Iterant[Task].now(x))
stream <-> prefix.onErrorIgnore ++ Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant[Task].flatMap should protect against broken generators") { implicit s =>
check1 { (prefix: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val generator = new ThrowExceptionBatch(dummy)
val error = Iterant[Task].nextBatchS(generator, Task.now(Iterant[Task].empty[Int]))
val stream = (prefix.onErrorIgnore ++ error).flatMap(x => Iterant[Task].now(x))
stream <-> prefix.onErrorIgnore ++ Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant[Coeval].flatMap equivalence with List.flatMap") { implicit s =>
check2 { (stream: Iterant[Coeval, Int], f: Int => List[Long]) =>
val result = stream.flatMap(x => Iterant[Coeval].fromList(f(x))).toListL
val expected = stream.toListL.map((list: List[Int]) => list.flatMap(f))
result <-> expected
}
}
test("Iterant[Coeval].flatMap can handle errors") { implicit s =>
val dummy = DummyException("dummy")
val stream = Iterant[Coeval].raiseError[Int](dummy)
assertEquals(stream, stream.flatMap(x => Iterant[Coeval].pure(x)))
}
test("Iterant[Coeval].next.flatMap guards against direct user code errors") { _ =>
val dummy = DummyException("dummy")
var isCanceled = false
val stream = Iterant[Coeval].nextS(1, Coeval(Iterant[Coeval].empty[Int])).guarantee(Coeval { isCanceled = true })
val result = stream.flatMap[Int](_ => throw dummy).toListL.runTry()
assertEquals(result, Failure(dummy))
assert(isCanceled, "isCanceled should be true")
}
test("Iterant[Coeval].nextCursor.flatMap guards against direct user code errors") { _ =>
val dummy = DummyException("dummy")
var isCanceled = false
val stream = Iterant[Coeval]
.nextCursorS(BatchCursor(1, 2, 3), Coeval(Iterant[Coeval].empty[Int]))
.guarantee(Coeval { isCanceled = true })
val result = stream.flatMap[Int](_ => throw dummy).toListL.runTry()
assertEquals(result, Failure(dummy))
assert(isCanceled, "isCanceled should be true")
}
test("Iterant[Coeval].next.flatMap chains stop") { implicit s =>
var effects = Vector.empty[Int]
val stop1T = Coeval.eval { effects = effects :+ 1 }
val stream1: Iterant[Coeval, Int] =
Iterant[Coeval].nextS(1, Coeval.now(Iterant[Coeval].haltS[Int](None))).guarantee(stop1T)
val stop2T = Coeval.eval { effects = effects :+ 2 }
val stream2: Iterant[Coeval, Int] =
Iterant[Coeval].nextS(2, Coeval.now(Iterant[Coeval].haltS[Int](None))).guarantee(stop2T)
val stop3T = Coeval.eval { effects = effects :+ 3 }
val stream3: Iterant[Coeval, Int] =
Iterant[Coeval].nextS(3, Coeval.now(Iterant[Coeval].haltS[Int](None))).guarantee(stop3T)
val composed =
for (x <- stream1; y <- stream2; z <- stream3)
yield x + y + z
assertEquals(composed.headOptionL.value(), Some(6))
assertEquals(effects, Vector(3, 2, 1))
}
test("Iterant[Coeval].flatMap should protect against indirect user errors") { implicit s =>
check2 { (l: List[Int], idx: Int) =>
val dummy = DummyException("dummy")
val list = if (l.isEmpty) List(1) else l
val source = arbitraryListToIterant[Coeval, Int](list, idx, allowErrors = false)
val received = source.flatMap(_ => Iterant[Coeval].raiseError[Int](dummy))
received <-> Iterant[Coeval].haltS[Int](Some(dummy))
}
}
test("Iterant[Coeval].flatMap should protect against direct exceptions") { implicit s =>
check2 { (l: List[Int], idx: Int) =>
val dummy = DummyException("dummy")
val list = if (l.isEmpty) List(1) else l
val source = arbitraryListToIterant[Coeval, Int](list, idx).onErrorIgnore
val received = source.flatMap[Int](_ => throw dummy)
received <-> Iterant[Coeval].haltS[Int](Some(dummy))
}
}
test("Iterant[Coeval].flatMap should protect against broken batches") { implicit s =>
check1 { (prefix: Iterant[Coeval, Int]) =>
val dummy = DummyException("dummy")
val cursor = new ThrowExceptionCursor(dummy)
val error = Iterant[Coeval].nextCursorS(cursor, Coeval.now(Iterant[Coeval].empty[Int]))
val stream = (prefix.onErrorIgnore ++ error).flatMap(x => Iterant[Coeval].now(x))
stream <-> prefix.onErrorIgnore ++ Iterant[Coeval].haltS[Int](Some(dummy))
}
}
test("Iterant[Coeval].flatMap should protect against broken generators") { implicit s =>
check1 { (prefix: Iterant[Coeval, Int]) =>
val dummy = DummyException("dummy")
val cursor = new ThrowExceptionBatch(dummy)
val error = Iterant[Coeval].nextBatchS(cursor, Coeval.now(Iterant[Coeval].empty[Int]))
val stream = (prefix ++ error).flatMap(x => Iterant[Coeval].now(x))
stream <-> prefix ++ Iterant[Coeval].haltS[Int](Some(dummy))
}
}
test("Iterant.unsafeFlatMap <-> flatMap for pure iterants") { implicit s =>
check2 { (iter: Iterant[Coeval, Int], f: Int => Iterant[Coeval, Int]) =>
iter.unsafeFlatMap(f) <-> iter.flatMap(f)
}
}
test("Iterant.concatMap is alias of flatMap") { implicit s =>
check2 { (iter: Iterant[Coeval, Int], f: Int => Iterant[Coeval, Int]) =>
iter.flatMap(f) <-> iter.concatMap(f)
}
}
test("Iterant.concat is alias of flatten") { implicit s =>
check2 { (iter: Iterant[Coeval, Int], f: Int => Iterant[Coeval, Int]) =>
iter.map(f).flatten <-> iter.map(f).concat
}
}
test("fa.map(f).flatten <-> fa.flatMap(f)") { implicit s =>
check2 { (iter: Iterant[Coeval, Int], f: Int => Iterant[Coeval, Int]) =>
iter.map(f).flatten <-> iter.flatMap(f)
}
}
}
| monifu/monifu | monix-tail/shared/src/test/scala/monix/tail/IterantFlatMapSuite.scala | Scala | apache-2.0 | 10,769 |
/*
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*/
package org.locationtech.geomesa.accumulo.stats
import java.io.Serializable
import java.util.{Map => jMap}
import org.apache.accumulo.core.client.Connector
import org.locationtech.geomesa.accumulo.data.AccumuloDataStoreFactory
import org.locationtech.geomesa.security.AuditProvider
import scala.collection.JavaConversions._
class ParamsAuditProvider extends AuditProvider {
private var id = "unknown"
override def getCurrentUserId: String = id
override val getCurrentUserDetails: jMap[AnyRef, AnyRef] = Map.empty[AnyRef, AnyRef]
override def configure(params: jMap[String, Serializable]): Unit = {
import AccumuloDataStoreFactory.params._
val user = if (params.containsKey(connParam.key)) {
connParam.lookUp(params).asInstanceOf[Connector].whoami()
} else if (params.containsKey(userParam)) {
userParam.lookUp(params).asInstanceOf[String]
} else {
null
}
if (user != null) {
id = s"accumulo[$user]"
}
}
}
| vpipkt/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/stats/ParamsAuditProvider.scala | Scala | apache-2.0 | 1,301 |
package org.scaladebugger.docs.layouts.partials.common.vendor
import scalatags.Text.all._
/**
* Represents a <script ... > containing clipboard.js.
*/
object ClipboardJS {
def apply(): Modifier = {
script(src := "/scripts/vendor/clipboard/clipboard.min.js")
}
}
| chipsenkbeil/scala-debugger | scala-debugger-docs/src/main/scala/org/scaladebugger/docs/layouts/partials/common/vendor/ClipboardJS.scala | Scala | apache-2.0 | 274 |
package scalax.chart
/** Mixin for charts which may display labels.
*
* @tparam G type of label generator
*/
private[chart] trait Labels[G] {
/** Optionally returns this charts label generator. */
def labelGenerator: Option[G]
/** Sets this charts label generator. */
def labelGenerator_=(generator: Option[G]): Unit
/** Sets this charts label generator. */
final def labelGenerator_=(generator: G): Unit = {
labelGenerator = Option(generator)
}
}
| wookietreiber/scala-chart | src/main/scala/scalax/chart/Labels.scala | Scala | lgpl-3.0 | 477 |
// Creating class called ChecksumAccumulator
// Classes in Scala cannot contain static methods.
class ChecksumAccumulator{
// Variables inside classes are called fields
private var sum = 0
// Defining two methods: add and checksum
def add(b: Byte): Unit = sum += b
def checksum(): Int = ~(sum & 0xFF) + 1
}
object main extends App{
var ca = new ChecksumAccumulator
ca.add("1".toByte)
println(ca.checksum())
} | arcyfelix/Courses | 18-10-18-Programming-in-Scala-by-Martin-Odersky-Lex-Spoon-and-Bill-Venners/04-ClassesFieldsAndMethods/src/main.scala | Scala | apache-2.0 | 424 |
package charactor.core.model.objects.charactor.parts
import charactor.core.model.objects.charactor.Charactor
import charactor.core.messages.AttackMessage
import actors.OutputChannel
object FightStrategy
{
}
class FightStrategy
{
def fight(owner: Charactor, target: OutputChannel[Any])
{
target ! new AttackMessage(owner.energy.Value);
}
}
| PiotrTrzpil/charactor | src/charactor/core/model/objects/charactor/parts/FightStrategy.scala | Scala | apache-2.0 | 348 |
package com.harrys.hyppo.worker.actor.queue
import com.harrys.hyppo.source.api.PersistingSemantics
import com.harrys.hyppo.worker.api.proto.{PersistProcessedDataRequest, WorkerInput}
import com.rabbitmq.client.Channel
import scala.util.Try
/**
* Created by jpetty on 11/6/15.
*/
final case class WorkQueueExecution
(
channel: Channel,
headers: QueueItemHeaders,
input: WorkerInput,
leases: AcquiredResourceLeases
) {
val idempotent: Boolean = input match {
case p: PersistProcessedDataRequest if p.integration.details.persistingSemantics == PersistingSemantics.Unsafe =>
false
case _ => true
}
def tryWithChannel[T](action: (Channel) => T) : Try[T] = {
Try(withChannel(action))
}
def withChannel[T](action: (Channel) => T) : T = this.synchronized {
action(channel)
}
}
| harrystech/hyppo-worker | worker/src/main/scala/com/harrys/hyppo/worker/actor/queue/WorkQueueExecution.scala | Scala | mit | 828 |
import sbt._
import Keys._
import bintray.Plugin._
object FirkinBuild extends Build {
val VERSION = "0.3.0"
lazy val common = project settings(commonSettings: _*)
lazy val server = project settings(serverSettings : _*) dependsOn(common, client)
lazy val client = project settings(clientSettings: _*)
lazy val root = (project in file(".")).aggregate(common, client)
lazy val serverRun = taskKey[Unit]("Run a Firkin server.")
def baseSettings = Defaults.defaultSettings ++ Seq(
organization := "com.freevariable",
version := VERSION,
resolvers ++= Seq(
"Akka Repo" at "http://repo.akka.io/repository",
"spray" at "http://repo.spray.io/",
"Will's bintray" at "https://dl.bintray.com/willb/maven/"
),
crossScalaVersions := Seq(SCALA_210_VERSION, SCALA_211_VERSION),
licenses += ("Apache-2.0", url("http://opensource.org/licenses/Apache-2.0")),
scalacOptions ++= Seq("-feature", "-Yrepl-sync", "-target:jvm-1.7", "-Xlint")
) ++ bintraySettings
def jsonSettings = Seq(
libraryDependencies ++= Seq(
"org.json4s" %% "json4s-jackson" % JSON4S_VERSION,
"org.json4s" %% "json4s-ext" % JSON4S_VERSION
)
)
def colossusSettings = Seq(
libraryDependencies ++= Seq(
"com.tumblr" %% "colossus" % "0.6.5",
"com.typesafe.akka" %% "akka-actor" % AKKA_VERSION,
"com.typesafe.akka" %% "akka-agent" % AKKA_VERSION,
"com.typesafe.akka" %% "akka-testkit" % AKKA_VERSION
)
)
def commonSettings = baseSettings ++ colossusSettings ++ jsonSettings ++ Seq(
name := "firkin",
crossScalaVersions := Seq(SCALA_210_VERSION, SCALA_211_VERSION)
)
def clientSettings = baseSettings ++ jsonSettings ++ Seq(
name := "firkin-client",
crossScalaVersions := Seq(SCALA_210_VERSION, SCALA_211_VERSION),
libraryDependencies ++= Seq(
"net.databinder.dispatch" %% "dispatch-core" % "0.11.1"
)
)
def serverSettings = commonSettings ++ Seq(
name := "firkin-server",
initialCommands in console := """
import com.freevariable.firkin.Firkin
val cache = Firkin.basicStart
"""
)
val SCALA_210_VERSION = "2.10.4"
val SCALA_211_VERSION = "2.11.5"
val JSON4S_VERSION = "3.2.10"
val AKKA_VERSION = "2.3.9"
}
| willb/firkin | project/Build.scala | Scala | apache-2.0 | 2,295 |
package com.dekayd.pyramidcode.tests
import com.dekayd.pyramidcode.utils.CoordinateUtils._
import org.junit.Test
import com.dekayd.pyramidcode.coordinates.DegreesMinutesSeconds
import com.dekayd.pyramidcode.coordinates.Degrees
import com.typesafe.scalalogging.slf4j.LazyLogging
/**
* * @author kelly
*/
class CoordinateUtilsTest extends LazyLogging{
@Test
def testConvertToGiza= {
val stonehengeGreenwich:Degrees = DegreesMinutesSeconds(-1, 49, 28)
val stonehengeGiza:Degrees = toDegreesGiza(stonehengeGreenwich)
logger.info(s"StonehengeGiza: $stonehengeGiza")
}
} | bluestix/pyramidcode | pyramid-code/pyramid-code-services/src/test/scala/com/dekayd/pyramidcode/tests/CoordinateUtilsTest.scala | Scala | gpl-2.0 | 588 |
import scala.annotation.tailrec
/**
* Random [[java.lang.String]] things.
*/
object StringStuff {
def occurrencesOf(target: Set[Char])(text: String): IndexedSeq[Int] =
for ((character, index) <- text.zipWithIndex if target.contains(character)) yield index
def groupFirstLast[T](list: Seq[T]): Map[T, T] = {
@tailrec
def group(list: Seq[T], accumulator: Map[T, T]): Map[T, T] = list match {
case Seq() => accumulator
case Seq(a) => accumulator ++ Map(a -> a)
case _ => group(list.tail.init, accumulator + (list.head -> list.last) + (list.last -> list.head)) // We need this to go both ways
}
group(list, Map())
}
def bothCases(characters: Seq[Char]): Seq[Char] =
(for (character <- characters) yield Seq(character.toLower, character.toUpper)).flatten
def reverseTarget(target: Set[Char], text: String): String = {
val matchedIndices = groupFirstLast(occurrencesOf(target)(text))
(for ((character, index) <- text.zipWithIndex) yield {
if (target.contains(character)) text(matchedIndices(index)) else character
}).mkString("")
}
def sortByCloseness(key: String, inputs: Seq[String]): Seq[String] =
inputs.sortBy(editDistance(key, _))
def editDistance(s1: String, s2: String): Int = damerauLevenshteinDistance(s1, s2)
def damerauLevenshteinDistance(s1: String, s2: String): Int = {
if (s1 == s2) return 0
// INFinite distance is the max possible distance
val inf = s1.length + s2.length
// Create and initialize the character array indices
val da = collection.mutable.HashMap[Char, Int]()
da ++= s1.union(s2).map(_ -> 0)
// Create the distance matrix H
val h = Array.ofDim[Int](s1.length + 2, s2.length + 2)
// initialize the left and top edges of H
for (i <- 0 to s1.length) {
h(i + 1)(0) = inf
h(i + 1)(1) = i
}
for (j <- 0 to s2.length) {
h(0)(j + 1) = inf
h(1)(j + 1) = j
}
// fill in the distance matrix H
// look at each character in s1
for (i <- 1 to s1.length) {
var db = 0
// look at each character in b
for (j <- 1 to s2.length) {
val (i1, j1) = (da(s2(j - 1)), db)
val cost =
if (s1(i - 1) == s2(j - 1)) {
db = j
0
} else 1
h(i + 1)(j + 1) = Seq(h(i)(j) + cost, /*substitution*/
h(i + 1)(j) + 1, /*insertion*/
h(i)(j + 1) + 1, /*deletion*/
h(i1)(j1) + (i - i1) + (j - j1) - 1 /*transposition*/).min
}
da.put(s1.charAt(i - 1), i)
}
h(s1.length + 1)(s2.length + 1)
}
}
| tamchow/ScalaStuff | src/StringStuff.scala | Scala | bsd-3-clause | 2,655 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.utils
import java.{lang => jl, util => ju}
import org.scalajs.testsuite.utils.AssertThrows.assertThrows
import org.scalajs.testsuite.javalib.util.TrivialImmutableCollection
import org.scalajs.testsuite.javalib.util.TrivialImmutableMap
trait CollectionsTestBase {
val range: Range = 0 to 30
def rangeOfElems[A](toElem: Int => A): TrivialImmutableCollection[A] =
TrivialImmutableCollection(range.map(toElem): _*)
class A extends jl.Comparable[A] {
def compareTo(o: A): Int = this.##.compareTo(o.##)
}
class B extends A
class C extends B
class CustomComparable(val v: Int) extends jl.Comparable[CustomComparable] {
override def compareTo(o: CustomComparable): Int =
(v % 8).compareTo(o.v % 8)
override def toString(): String =
s"CustomComparable($v)"
}
def testCollectionUnmodifiability[E](coll: ju.Collection[E], elem: E): Unit = {
val empty = TrivialImmutableCollection[E]()
assertThrows(classOf[UnsupportedOperationException], coll.add(elem))
assertThrows(classOf[UnsupportedOperationException], coll.addAll(empty))
assertThrows(classOf[UnsupportedOperationException], coll.clear())
assertThrows(classOf[UnsupportedOperationException], coll.remove(elem))
assertThrows(classOf[UnsupportedOperationException], coll.removeAll(empty))
assertThrows(classOf[UnsupportedOperationException], coll.retainAll(empty))
testIteratorsUnmodifiability(() => coll.iterator())
}
def testSetUnmodifiability[E](set: ju.Set[E], elem: E): Unit =
testCollectionUnmodifiability(set, elem)
def testSortedSetUnmodifiability[E](set: ju.SortedSet[E], elem: E,
recursive: Boolean = false): Unit = {
testSetUnmodifiability(set, elem)
def testSubsets(ss: ju.SortedSet[E]) = {
if (recursive) testSetUnmodifiability(ss, elem)
else testSortedSetUnmodifiability(ss, elem, true)
}
testSubsets(set.headSet(elem))
testSubsets(set.tailSet(elem))
testSubsets(set.subSet(elem, elem))
}
def testListUnmodifiability[E](list: ju.List[E], elem: E,
recursive: Boolean = false): Unit = {
testCollectionUnmodifiability(list, elem)
assertThrows(classOf[UnsupportedOperationException], list.add(0, elem))
assertThrows(classOf[UnsupportedOperationException],
list.addAll(0, TrivialImmutableCollection[E]()))
assertThrows(classOf[UnsupportedOperationException], list.remove(0))
assertThrows(classOf[UnsupportedOperationException], list.set(0, elem))
def testSublist(sl: ju.List[E]): Unit = {
if (recursive) testCollectionUnmodifiability(sl, elem)
else testListUnmodifiability(sl, elem, true)
}
testSublist(list.subList(0, list.size / 2))
testListIteratorsUnmodifiability(() => list.listIterator(), elem)
testListIteratorsUnmodifiability(() => list.listIterator(0), elem)
}
def testOnFirstPositionOfIterator[Iter <: ju.Iterator[_]](
newIter: () => Iter, action: Iter => Unit,
expectedException: Option[Class[_ <: Throwable]]): Unit = {
val it = newIter()
if (it.hasNext) {
it.next()
expectedException match {
case Some(exClass) => assertThrows(exClass, action(it))
case None => action(it)
}
}
}
def testMapUnmodifiability[K, V](map: ju.Map[K, V], key: K, value: V): Unit = {
assertThrows(classOf[UnsupportedOperationException], map.clear())
assertThrows(classOf[UnsupportedOperationException], map.put(key, value))
assertThrows(classOf[UnsupportedOperationException],
map.putAll(TrivialImmutableMap[K, V]()))
testSetUnmodifiability(map.entrySet(),
new ju.AbstractMap.SimpleImmutableEntry(key, value))
testSetUnmodifiability(map.keySet(), key)
testCollectionUnmodifiability(map.values(), value)
}
def testSortedMapUnmodifiability[K, V](map: ju.SortedMap[K, V], key: K, value: V,
recursive: Boolean = false): Unit = {
testMapUnmodifiability(map, key, value)
def testSubmap(sm: ju.SortedMap[K, V]) = {
if (recursive) testMapUnmodifiability(sm, key, value)
else testSortedMapUnmodifiability(sm, key, value, true)
}
testSubmap(map.headMap(key))
testSubmap(map.tailMap(key))
testSubmap(map.subMap(key, key))
}
def testIteratorsUnmodifiability[E](newIter: () => ju.Iterator[E]): Unit = {
testOnFirstPositionOfIterator[ju.Iterator[E]](newIter, _.remove(),
Some(classOf[UnsupportedOperationException]))
}
def testListIteratorsUnmodifiability[E](newIter: () => ju.ListIterator[E],
elem: E): Unit = {
testIteratorsUnmodifiability(newIter)
testOnFirstPositionOfIterator[ju.ListIterator[E]](newIter, _.add(elem),
Some(classOf[UnsupportedOperationException]))
testOnFirstPositionOfIterator[ju.ListIterator[E]](newIter, _.set(elem),
Some(classOf[UnsupportedOperationException]))
}
}
| scala-js/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/utils/CollectionsTestBase.scala | Scala | apache-2.0 | 5,147 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.laws._
import cats.laws.discipline._
import monix.eval.{Coeval, Task}
object IterantRetryIfEmptySuite extends BaseTestSuite {
test("Iterant.pure(1).retryIfEmpty mirrors source") { _ =>
val r = Iterant[Coeval].pure(1).retryIfEmpty(None).toListL.value()
assertEquals(r, List(1))
}
test("Iterant.suspend(Iterant.pure(1)).retryIfEmpty mirrors source") { _ =>
val r = Iterant.suspend(Iterant[Coeval].pure(1)).retryIfEmpty(None).toListL.value()
assertEquals(r, List(1))
}
test("(batch ++ batch ++ batch ++ batch).retryIfEmpty mirrors source") { _ =>
def batch(start: Int) = Iterant[Coeval].fromArray(Array(start, start + 1, start + 2))
val r = (batch(1) ++ batch(4) ++ batch(7) ++ batch(10)).retryIfEmpty(None).toListL.value()
assertEquals(r, (1 to 12).toList)
}
test("iterant.retryIfEmpty <-> iterant (for pure streams)") { _ =>
check1 { (stream: Iterant[Coeval, Int]) =>
stream.retryIfEmpty(Some(1)) <-> stream
}
}
test("iterant.retryIfEmpty handles Scopes properly") { _ =>
var cycles = 0
var acquired = 0
val empty = Iterant[Coeval].suspend(Coeval {
cycles += 1
Iterant[Coeval].empty[Int]
})
val resource = Iterant[Coeval]
.resource(Coeval(acquired += 1))(_ =>
Coeval {
assertEquals(acquired, 1)
acquired -= 1
})
.flatMap(_ => Iterant[Coeval].empty[Int])
val r = (empty ++ resource).retryIfEmpty(Some(2)).toListL.value()
assertEquals(r, Nil)
assertEquals(cycles, 3)
assertEquals(acquired, 0)
}
test("iterant.retryIfEmpty actually retries until source emits something") { _ =>
var cycles = 10
val iterant = Iterant[Coeval].suspend(Coeval {
cycles -= 1
if (cycles == 0)
Iterant[Coeval].pure(1)
else
Iterant[Coeval].empty[Int]
})
val r = iterant.retryIfEmpty(Some(9)).toListL.value()
assertEquals(r, List(1))
}
test("iterant.retryIfEmpty gives up after maxRetries") { _ =>
var cycles = 10
val iterant = Iterant[Coeval].suspend(Coeval {
cycles -= 1
if (cycles == 0)
Iterant[Coeval].pure(1)
else
Iterant[Coeval].empty[Int]
})
val r = iterant.retryIfEmpty(Some(8)).toListL.value()
assertEquals(r, Nil)
}
test("iterant.retryIfEmpty(None) repeats until it succeeds") { _ =>
import scala.util.Random
val stream = Iterant[Coeval].suspend(Coeval {
val nr = Random.nextInt()
if (nr % 10 != 0)
Iterant[Coeval].empty[Int]
else
Iterant[Coeval].of(1, 2, 3)
})
val r = stream.retryIfEmpty(None).toListL.value()
assertEquals(r, List(1, 2, 3))
}
test("iterant.retryIfEmpty(None) repeats until the end of time") { implicit sc =>
val f = Iterant[Task]
.suspend(Iterant.empty[Task, Int])
.retryIfEmpty(None)
.toListL
.runToFuture
var count = 1000
while (count > 0) {
count -= 1
assert(sc.tickOne(), "sc.tickOne()")
assert(!f.isCompleted, "!f.isCompleted")
}
f.cancel()
sc.tick()
}
}
| monixio/monix | monix-tail/shared/src/test/scala/monix/tail/IterantRetryIfEmptySuite.scala | Scala | apache-2.0 | 3,784 |
package com.cerner.beadledom.lifecycle.legacy
import com.google.inject.spi.ProvisionListener.ProvisionInvocation
import com.google.inject.{Binding, Key}
import javax.annotation.{PostConstruct, PreDestroy}
import org.hamcrest.Matchers.contains
import org.mockito.Mockito
import org.mockito.hamcrest.MockitoHamcrest
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{FunSpec, MustMatchers}
import scala.reflect.Manifest
/**
* Unit tests for [[LifecycleProvisionListener]].
*
* @author John Leacox
*/
class LifecycleProvisionListenerSpec extends FunSpec with MustMatchers with MockitoSugar {
describe("LifecycleProvisionListener") {
describe("#onProvision") {
it("executes PostConstruct methods on the injectee") {
val shutdownManager = mock[LifecycleShutdownManager]
val provisionListener = new LifecycleProvisionListener()
LifecycleProvisionListener.init(shutdownManager, provisionListener)
val injectee = new TestPostConstruct
val provision = createMockProvision(injectee, classOf[TestPostConstruct])
provisionListener.onProvision(provision)
injectee.hasExecutedStartup mustBe true
}
it("executes PostConstruct methods on the parent class of injectee") {
val shutdownManager = mock[LifecycleShutdownManager]
val provisionListener = new LifecycleProvisionListener()
LifecycleProvisionListener.init(shutdownManager, provisionListener)
val injectee = new TestPostConstructWithParent
val provision = createMockProvision(injectee, classOf[TestPostConstructWithParent])
provisionListener.onProvision(provision)
injectee.hasExecutedStartup mustBe true
}
it("adds PreDestroy methods to the shutdown manager") {
val shutdownManager = mock[LifecycleShutdownManager]
val provisionListener = new LifecycleProvisionListener()
LifecycleProvisionListener.init(shutdownManager, provisionListener)
val injectee = new TestPreDestroy
val provision = createMockProvision(injectee, classOf[TestPreDestroy])
provisionListener.onProvision(provision)
val invokableShutdownMethod = new InvokableLifecycleMethodImpl(injectee,
classOf[TestPreDestroy].getDeclaredMethod("shutdown"), classOf[PreDestroy])
Mockito.verify(shutdownManager).addPreDestroyMethods(
MockitoHamcrest.argThat(contains(invokableShutdownMethod))
.asInstanceOf[java.util.List[InvokableLifecycleMethod]])
}
}
}
def createMockProvision[T](injectee: T, clazz: Class[T])
(implicit manifest: Manifest[T]): ProvisionInvocation[T] = {
val binding = mock[Binding[T]]
val key = Key.get(clazz)
Mockito.when(binding.getKey).thenReturn(key)
val invocation = mock[ProvisionInvocation[T]]
Mockito.when(invocation.getBinding).thenReturn(binding)
Mockito.when(invocation.provision).thenReturn(injectee)
invocation
}
class TestPostConstruct {
var hasExecutedStartup: Boolean = false
@PostConstruct
def startup(): Unit = {
hasExecutedStartup = true
}
}
class TestPostConstructWithParent extends TestPostConstruct {
}
class TestPreDestroy {
@PreDestroy
def shutdown() = ???
}
}
| bbaugher/beadledom | lifecycle/src/test/scala/com/cerner/beadledom/lifecycle/legacy/LifecycleProvisionListenerSpec.scala | Scala | apache-2.0 | 3,276 |
import org.scalatest._
class NucleotideCountSpecs extends FlatSpec with Matchers {
"empty dna string" should "have no adenosine" in {
new DNA("").count('A') should be (0)
}
it should "have no nucleotides" in {
pending
val expected = Map('A' -> 0, 'T' -> 0, 'C' -> 0, 'G' -> 0)
new DNA("").nucleotideCounts should be (expected)
}
"a repetitive sequence" should "count cytidine" in {
pending
new DNA("CCCCC").count('C') should be (5)
}
it should "have only guanosine" in {
pending
val expected = Map('A' -> 0, 'T' -> 0, 'C' -> 0, 'G' -> 8)
new DNA("GGGGGGGG").nucleotideCounts should be (expected)
}
"a mixed dna string" should "count only thymidine" in {
pending
new DNA("GGGGGTAACCCGG").count('T') should be (1)
}
it should "count a nucleotide only once" in {
pending
val dna = new DNA("CGATTGGG")
dna.count('T')
dna.count('T') should be (2)
}
it should "not change counts after counting adenosine" in {
pending
val dna = new DNA("GATTACA")
dna.count('A')
val expected = Map('A' -> 3, 'T' -> 2, 'C' -> 1, 'G' -> 1)
dna.nucleotideCounts should be (expected)
}
it should "validate nucleotides" in {
pending
evaluating {
new DNA("GACT").count('X')
} should produce [IllegalArgumentException]
}
it should "validate dna not rna" in {
pending
evaluating {
new DNA("ACGU")
} should produce [IllegalArgumentException]
}
it should "validate dna" in {
pending
evaluating {
new DNA("John")
} should produce [IllegalArgumentException]
}
it should "count all nucleotides" in {
pending
val s = "AGCTTTTCATTCTGACTGCAACGGGCAATATGTCTCTGTGTGGATTAAAAAAAGAGTGTCTGATAGCAGC"
val dna = new DNA(s)
val expected = Map('A' -> 20, 'T' -> 21, 'G' -> 17, 'C' -> 12)
dna.nucleotideCounts should be (expected)
}
}
| tomave/exercism-io-solutions | scala/nucleotide-count/src/test/scala/nucleotide_count_test.scala | Scala | agpl-3.0 | 1,892 |
package cz.vse.easyminer.util
import spray.http.Uri
object RestUtils {
implicit class PathExtension(path: Uri.Path) {
private def findClosestParent(path: Uri.Path) : Uri.Path = path match {
case Uri.Path.Empty => path
case Uri.Path.Slash(tail) => findClosestParent(tail)
case Uri.Path.Segment(_, Uri.Path.Slash(tail)) => tail
case Uri.Path.Segment(_, path @ Uri.Path.Empty) => path
}
def parent = findClosestParent(path.reverse).reverse
}
} | KIZI/EasyMiner-Apriori-R | src/main/scala/cz/vse/easyminer/util/RestUtils.scala | Scala | bsd-3-clause | 489 |
package com.github.andyglow.config
import scala.jdk.CollectionConverters._
private[config] object ScalaVersionSpecific {
implicit class CollectionsToScala[T](private val coll: java.lang.Iterable[T]) extends AnyVal {
def scala: Iterable[T] = coll.asScala
}
implicit class MapsToScala[K, V](private val coll: java.util.Map[K, V]) extends AnyVal {
def scala: Map[K, V] = coll.asScala.toMap
}
}
| andyglow/typesafe-config-scala | src/main/scala-3/com/github/andyglow/config/ScalaVersionSpecific.scala | Scala | gpl-3.0 | 413 |
package calc
/**
* Brute force solver for the problem at https://brilliant.org/practice/arithmetic-puzzles-level-2-challenges/?p=2
*
* In a nutshell, given 1 _ 2 _ 3 _ 4, and the operators +, -, *, /, how many ways can you construct
* a simple expression such that it evaluates to 10? (And for extra credit, 1 _ 2 _ 3 _ 4 _ 5 => 15).
* Of course you can solve this by being smart, but this shows how to write simple program to solve it in
* a brute-force way.
*
* Output:
* For 1 _ 2 _ 3 _ 4, there are 2 solutions:
* 1 + 2 + 3 + 4
* 1 * 2 * 3 + 4
* For 1 _ 2 _ 3 _ 4 _ 5, there are 3 solutions:
* 1 + 2 + 3 + 4 + 5
* 1 - 2 * 3 + 4 * 5
* 1 * 2 * 3 + 4 + 5
*/
object Calc extends App with Ops {
val ops = Seq("+", "-", "*", "/")
val soln4 = for {
op1 <- ops
op2 <- ops
op3 <- ops
input = s"1 $op1 2 $op2 3 $op3 4" if parseAll(expr, input).get == 10
} yield input
println(s"For 1 _ 2 _ 3 _ 4, there are ${soln4.size} solutions:\\n${soln4.mkString("\\n")}")
val soln5 = for {
op1 <- ops
op2 <- ops
op3 <- ops
op4 <- ops
input = s"1 $op1 2 $op2 3 $op3 4 $op4 5" if parseAll(expr, input).get == 15
} yield input
println(s"For 1 _ 2 _ 3 _ 4 _ 5, there are ${soln5.size} solutions:\\n${soln5.mkString("\\n")}")
} | ebowman/calc | src/main/scala/calc/Calc.scala | Scala | unlicense | 1,289 |
package akka.rtcweb.protocol.sdp.grouping
import akka.rtcweb.protocol.sdp.renderer.Renderer._
import akka.rtcweb.protocol.sdp.renderer.{ Renderer, Rendering }
trait GroupingExtensionAttributeRenderer {
import akka.rtcweb.protocol.sdp.renderer.Rendering.SP
private implicit val semanticsRenderer: Renderer[Semantics] = stringRenderer[Semantics] {
case Semantics.FID => "FID"
case Semantics.LS => "LS"
case Semantics.UnknownSemanticsExtension(name) => name
case e: SemanticsExtension => ???
}
def renderGroupingExtensionAttributes[R <: Rendering](r: R, v: GroupingExtensionAttribute): r.type = v match {
case MediaStreamIdentifier(tag) => r ~ "mid:" ~ tag
case Group(semantics, streams) => r ~ "group:" ~ semantics; streams.foreach(r ~ SP ~ _.tag); r
}
}
| danielwegener/akka-rtcweb | src/main/scala/akka/rtcweb/protocol/sdp/grouping/GroupingExtensionAttributeRenderer.scala | Scala | apache-2.0 | 793 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.planner.plan.optimize.program.FlinkStreamProgram.LOGICAL_REWRITE
import org.apache.flink.table.planner.plan.optimize.program._
import org.apache.flink.table.planner.utils.{StreamTableTestUtil, TableTestBase}
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.tools.RuleSets
import org.junit.{Before, Test}
/**
* Test for [[TemporalJoinRewriteWithUniqueKeyRule]].
*/
class TemporalJoinRewriteWithUniqueKeyRuleTest extends TableTestBase {
protected val util: StreamTableTestUtil = streamTestUtil()
@Before
def setup(): Unit = {
util.buildStreamProgram(LOGICAL_REWRITE)
val chainedProgram = util.getStreamProgram()
// add test rule
chainedProgram.addLast(
"test_rules",
FlinkHepRuleSetProgramBuilder.newBuilder
.setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE)
.setHepMatchOrder(HepMatchOrder.BOTTOM_UP)
.add(RuleSets.ofList(
FlinkLogicalRankRule.INSTANCE,
CalcRankTransposeRule.INSTANCE,
RankNumberColumnRemoveRule.INSTANCE,
CalcSnapshotTransposeRule.INSTANCE,
TemporalJoinRewriteWithUniqueKeyRule.INSTANCE))
.build())
util.replaceStreamProgram(chainedProgram)
util.addTable(
"""
|CREATE TABLE T1 (
| id STRING,
| mount INT,
| proctime as PROCTIME(),
| rowtime TIMESTAMP(3),
| WATERMARK FOR rowtime AS rowtime
|) WITH (
| 'connector' = 'COLLECTION',
| 'is-bounded' = 'false'
|)
""".stripMargin)
//lookup table, CollectionTableSource implements LookupableTableSource interface
util.addTable(
"""
|CREATE TABLE T2 (
| id STRING,
| rate INT,
| rowtime TIMESTAMP(3),
| WATERMARK FOR rowtime AS rowtime,
| PRIMARY KEY(id) NOT ENFORCED
|) WITH (
| 'connector' = 'COLLECTION',
| 'is-bounded' = 'false'
|)
""".stripMargin)
util.addTable(
"""
|CREATE TABLE T3 (
| id STRING,
| rate INT,
| rowtime TIMESTAMP(3),
| WATERMARK FOR rowtime AS rowtime
|) WITH (
| 'connector' = 'COLLECTION',
| 'is-bounded' = 'false'
|)
""".stripMargin)
util.addTable(
" CREATE VIEW DeduplicatedView as SELECT id, rate, rowtime FROM " +
" (SELECT *, " +
" ROW_NUMBER() OVER (PARTITION BY id ORDER BY rowtime DESC) AS rowNum " +
" FROM T3 " +
" ) T " +
" WHERE rowNum = 1")
}
@Test
def testPrimaryKeyInTemporalJoin(): Unit = {
util.verifyPlan("SELECT * FROM T1 JOIN T2 FOR SYSTEM_TIME AS OF T1.rowtime AS T " +
"ON T1.id = T.id")
}
@Test
def testInferredPrimaryKeyInTemporalJoin(): Unit = {
util.verifyPlan("SELECT * FROM T1 JOIN DeduplicatedView FOR SYSTEM_TIME AS OF " +
"T1.rowtime AS T ON T1.id = T.id")
}
@Test
def testPrimaryKeyInTemporalJoinOnTrue(): Unit = {
expectedException.expect(classOf[ValidationException])
expectedException.expectMessage("Currently the join key in " +
"Temporal Table Join can not be empty.")
util.verifyPlan("SELECT * FROM T1 JOIN T2 FOR SYSTEM_TIME AS OF T1.rowtime AS T " +
"ON TRUE")
}
@Test
def testInvalidPrimaryKeyInTemporalJoin(): Unit = {
util.addTable(
"""
|CREATE TABLE noPkTable (
| id STRING,
| rate INT,
| rowtime TIMESTAMP(3),
| WATERMARK FOR rowtime AS rowtime
|) WITH (
| 'connector' = 'COLLECTION',
| 'is-bounded' = 'false'
|)
""".stripMargin)
expectedException.expect(classOf[ValidationException])
expectedException.expectMessage("Temporal Table Join requires primary key in versioned table," +
" but no primary key can be found. The physical plan is:\\nFlinkLogicalJoin(" +
"condition=[AND(=($0, $4), __INITIAL_TEMPORAL_JOIN_CONDITION($3, $6," +
" __TEMPORAL_JOIN_LEFT_KEY($0), __TEMPORAL_JOIN_RIGHT_KEY($4)))], joinType=[left])")
util.verifyPlan("SELECT * FROM T1 LEFT JOIN noPkTable FOR SYSTEM_TIME AS OF " +
"T1.rowtime AS T ON T1.id = T.id")
}
@Test
def testInvalidInferredPrimaryKeyInTemporalJoin(): Unit = {
util.addTable(
" CREATE VIEW noPkView as SELECT id, rate, rowtime FROM " +
" (SELECT *, " +
" ROW_NUMBER() OVER (PARTITION BY id ORDER BY rowtime DESC) AS rowNum " +
" FROM T3 " +
" ) T " +
" WHERE rowNum = 2")
expectedException.expect(classOf[ValidationException])
expectedException.expectMessage("Temporal Table Join requires primary key in versioned table," +
" but no primary key can be found. The physical plan is:\\n" +
"FlinkLogicalJoin(condition=[AND(=($0, $4), __INITIAL_TEMPORAL_JOIN_CONDITION(" +
"$3, $6, __TEMPORAL_JOIN_LEFT_KEY($0), __TEMPORAL_JOIN_RIGHT_KEY($4)))], joinType=[inner])")
util.verifyPlan("SELECT * FROM T1 JOIN noPkView FOR SYSTEM_TIME AS OF " +
"T1.rowtime AS T ON T1.id = T.id")
}
@Test
def testInferredPrimaryKeyInTemporalJoinOnTrue(): Unit = {
expectedException.expect(classOf[ValidationException])
expectedException.expectMessage("Currently the join key in " +
"Temporal Table Join can not be empty.")
util.verifyPlan("SELECT * FROM T1 JOIN DeduplicatedView FOR SYSTEM_TIME AS OF " +
"T1.rowtime AS T ON TRUE")
}
}
| greghogan/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/TemporalJoinRewriteWithUniqueKeyRuleTest.scala | Scala | apache-2.0 | 6,385 |
package gui
/**
* Advanced components that are needed for this interface.
*/
package object component
| AlexGonRo/Instance-Selection-Algorithms-Spark | Algoritmos_de_selección_de_instancias/src/main/scala/gui/component/package.scala | Scala | gpl-3.0 | 105 |
object Main extends App {
def f(a:Int=>Int):Int = a(4)
def g:Int = f((x,y)=>x)
}
| yusuke2255/dotty | tests/untried/neg/t556.scala | Scala | bsd-3-clause | 85 |
package eventstore.util
import com.typesafe.config.{ Config, ConfigFactory }
import org.specs2.mutable.Specification
import org.specs2.specification.{ Scope, Step, Fragments }
import akka.actor.ActorSystem
import akka.testkit.{ ImplicitSender, TestKit }
import scala.concurrent.{ Awaitable, Await }
import scala.concurrent.duration._
abstract class ActorSpec extends Specification with NoConversions {
implicit lazy val system = ActorSystem("test", config)
def config: Config = ConfigFactory.load
override def map(fs: => Fragments) = super.map(fs) ^ Step(TestKit.shutdownActorSystem(system))
protected abstract class ActorScope extends TestKit(system) with ImplicitSender with Scope
def await_[T](awaitable: Awaitable[T], atMost: Duration = 3.seconds): T = awaitable.await_(atMost)
implicit class RichAwaitable[T](val awaitable: Awaitable[T]) {
def await_(implicit atMost: Duration = 3.seconds) = Await.result(awaitable, atMost)
}
}
| pawelkaczor/EventStore.JVM | src/test/scala/eventstore/util/ActorSpec.scala | Scala | bsd-3-clause | 958 |
/*
* Artificial Intelligence for Humans
* Volume 1: Fundamental Algorithms
* Scala Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
* Copyright 2013 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.learning
import com.heatonresearch.aifh.general.fns.FnRBF
import com.heatonresearch.aifh.general.fns.GaussianFunction
import com.heatonresearch.aifh.randomize.GenerateRandom
import com.heatonresearch.aifh.general.data.RichData
import RichData._
import scala.collection.mutable.ArrayBuffer
/**
* A RBF network is an advanced machine learning algorithm that uses a series of RBF functions to perform
* regression. It can also perform classification by means of one-of-n encoding.
* <p/>
* The long term memory of a RBF network is made up of the widths and centers of the RBF functions, as well as
* input and output weighting.
* <p/>
* http://en.wikipedia.org/wiki/RBF_network
*
* Construct the RBF network.
*
* @param inputCount The input count.
* @param rbfCount The number of RBF functions.
* @param outputCount The output count.
*/
class RBFNetwork(val inputCount: Int, rbfCount: Int, val outputCount: Int) extends RegressionAlgorithm with ClassificationAlgorithm {
/**
* An index to the input weights in the long term memory.
*/
private val indexInputWeights = 0
val inputWeightCount = inputCount * rbfCount
val outputWeightCount = (rbfCount + 1) * outputCount
val rbfParams = (inputCount + 1) * rbfCount
/**
* An index to the output weights in the long term memory.
*/
private val indexOutputWeights: Int = inputWeightCount + rbfParams
/**
* The weights & RBF parameters. See constructor for layout.
*/
override val longTermMemory = ArrayBuffer.fill(inputWeightCount + outputWeightCount + rbfParams)(0.0)
/**
* The RBF functions.
*/
private val rbf: Vector[FnRBF] = {
val arr = Array.ofDim[FnRBF](rbfCount)
for(i <- 0 until rbfCount) {
val rbfIndex: Int = inputWeightCount + ((inputCount + 1) * i)
arr(i) = new GaussianFunction(inputCount, longTermMemory, rbfIndex)
}
arr.toVector
}
override def computeRegression(input: Vector[Double]): Vector[Double] = {
val rbfOutput = ArrayBuffer.fill(rbf.length + 1)(0.0)
rbfOutput(rbfOutput.length - 1) = 1.0
for(rbfIndex <- 0 until rbf.length) {
val weightedInput = for(inputIndex <- 0 until input.length) yield {
val memoryIndex = indexInputWeights + (rbfIndex * inputCount) + inputIndex
input(inputIndex) * longTermMemory(memoryIndex)
}
rbfOutput(rbfIndex) = rbf(rbfIndex).evaluate(weightedInput.toVector)
}
val result = ArrayBuffer.fill(outputCount)(0.0)
for(outputIndex <- 0 until result.length) {
var sum = 0.0
for(rbfIndex <- 0 until rbfOutput.length) {
val memoryIndex = indexOutputWeights + (outputIndex * (rbf.length + 1)) + rbfIndex
sum += rbfOutput(rbfIndex) * longTermMemory(memoryIndex)
}
result(outputIndex) = sum
}
result.toVector
}
/**
* Randomize the long term memory, with the specified random number generator.
*
* @param rnd A random number generator.
*/
def reset(rnd: GenerateRandom) {
for(i <- 0 until longTermMemory.length)
longTermMemory(i) = rnd.nextDouble(-1, 1)
}
override def computeClassification(input: Vector[Double]): Int = {
val output = computeRegression(input)
output.maxIndex
}
override def toString: String = {
s"[RBFNetwork:inputCount=$inputCount,outputCount=$outputCount,RBFs=${rbf.mkString("[",",","]")}]"
}
}
| PeterLauris/aifh | vol1/scala-examples/src/main/scala/com/heatonresearch/aifh/learning/RBFNetwork.scala | Scala | apache-2.0 | 4,308 |
package com.bolour.boardgame.scala.server.domain.json
import spray.json.{DefaultJsonProtocol, JsString, JsValue, RootJsonFormat, deserializationError}
object JsonUtil {
def removeQuotes(s: String): String = {
s.replaceAll("\\"", "")
}
/**
* There must be a simpler way to convert a json string that has been
* quoted by spray, to a simple string value, or to prevent spray to add
* the quotes in the first place, but I have missed it so far!
*
* @param json A JsString presented as a JsValue.
* @return The enclosed string without quote.
*/
def unwrapQuotedJsonString(json: JsValue): String = {
val value = json.asInstanceOf[JsString]
removeQuotes(value.value)
}
}
| azadbolour/boardgame | scala-server/app/com/bolour/boardgame/scala/server/domain/json/JsonUtil.scala | Scala | agpl-3.0 | 719 |
package lore.compiler.semantics.functions
import lore.compiler.feedback.{Feedback, Reporter}
import lore.compiler.semantics.NamePath
import lore.compiler.semantics.scopes.Binding
import lore.compiler.types.TupleType
class MultiFunctionDefinition(val name: NamePath, val functions: Vector[FunctionDefinition]) extends Binding {
val hierarchy: DispatchHierarchy = DispatchHierarchyBuilder.build(this)
/**
* Resolves a multiple dispatch application of the multi-function for the given type. The empty fit and ambiguous
* call errors must be customized.
*/
def dispatch(
tpe: TupleType,
emptyFit: => Feedback.Error,
ambiguousCall: Vector[FunctionDefinition] => Feedback.Error,
)(implicit reporter: Reporter): Option[FunctionInstance] = {
Dispatch.resolve(hierarchy, tpe, emptyFit, ambiguousCall)
}
/**
* Calculates the multi-function's fit set for the given type.
*/
def fit(tpe: TupleType): Vector[FunctionDefinition] = Dispatch.fit(hierarchy, tpe)
/**
* Calculates the multi-function's min set for the given type.
*/
def min(tpe: TupleType): Vector[FunctionDefinition] = Dispatch.min(hierarchy, tpe)
override def toString: String = name.toString
}
| marcopennekamp/lore | compiler/src/lore/compiler/semantics/functions/MultiFunctionDefinition.scala | Scala | mit | 1,218 |
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.serde2.binarysortable
// Putting it in this package so it can access the package level visible function
// static void BinarySortableSerDe.serialize(OutputByteBuffer, Object, ObjectInspector, boolean)
import java.util.{List => JList}
import scala.collection.JavaConversions._
import org.apache.hadoop.hive.serde2.objectinspector.{StructField, StructObjectInspector}
/**
* Used to serialize a row of data. It needs to be initialized with an object inspector
* for the row.
*/
class HiveStructSerializer(val rowObjectInspector: StructObjectInspector) {
def serialize(obj: Object): Array[Byte] = {
outputByteBuffer.reset()
var i = 0
while (i < fields.size) {
BinarySortableSerDe.serialize(
outputByteBuffer,
rowObjectInspector.getStructFieldData(obj, fields.get(i)),
fields.get(i).getFieldObjectInspector(),
false)
i += 1
}
val bytes = new Array[Byte](outputByteBuffer.length)
System.arraycopy(outputByteBuffer.getData(), 0, bytes, 0, outputByteBuffer.length)
println("bytes: " + bytes.toSeq)
bytes
}
private val outputByteBuffer = new OutputByteBuffer
private val fields: JList[_ <: StructField] = rowObjectInspector.getAllStructFieldRefs
}
| sameeragarwal/blinkdb_dev | src/main/scala/shark/execution/serialization/HiveStructSerializer.scala | Scala | apache-2.0 | 1,920 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.util.{ArrayList => JArrayList, Arrays, List => JList}
import scala.collection.JavaConverters._
import org.apache.commons.lang3.exception.ExceptionUtils
import org.apache.hadoop.hive.metastore.api.{FieldSchema, Schema}
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{AnalysisException, SQLContext}
import org.apache.spark.sql.execution.QueryExecution
private[hive] class SparkSQLDriver(val context: SQLContext = SparkSQLEnv.sqlContext)
extends Driver
with Logging {
private[hive] var tableSchema: Schema = _
private[hive] var hiveResponse: Seq[String] = _
override def init(): Unit = {
}
private def getResultSetSchema(query: QueryExecution): Schema = {
val analyzed = query.analyzed
logDebug(s"Result Schema: ${analyzed.output}")
if (analyzed.output.isEmpty) {
new Schema(Arrays.asList(new FieldSchema("Response code", "string", "")), null)
} else {
val fieldSchemas = analyzed.output.map { attr =>
new FieldSchema(attr.name, attr.dataType.catalogString, "")
}
new Schema(fieldSchemas.asJava, null)
}
}
override def run(command: String): CommandProcessorResponse = {
// TODO unify the error code
try {
context.sparkContext.setJobDescription(command)
val execution = context.sessionState.executePlan(context.sql(command).logicalPlan)
hiveResponse = execution.hiveResultString()
tableSchema = getResultSetSchema(execution)
new CommandProcessorResponse(0)
} catch {
case ae: AnalysisException =>
logDebug(s"Failed in [$command]", ae)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(ae), null, ae)
case cause: Throwable =>
logError(s"Failed in [$command]", cause)
new CommandProcessorResponse(1, ExceptionUtils.getStackTrace(cause), null, cause)
}
}
override def close(): Int = {
hiveResponse = null
tableSchema = null
0
}
override def getResults(res: JList[_]): Boolean = {
if (hiveResponse == null) {
false
} else {
res.asInstanceOf[JArrayList[String]].addAll(hiveResponse.asJava)
hiveResponse = null
true
}
}
override def getSchema: Schema = tableSchema
override def destroy() {
super.destroy()
hiveResponse = null
tableSchema = null
}
}
| u2009cf/spark-radar | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala | Scala | apache-2.0 | 3,305 |
/*
* Copyright (C) 2014 AyaIB Developers (http://github.com/fauu/AyaIB)
*
* This software is licensed under the GNU General Public License
* (version 3 or later). See the COPYING file in this distribution.
*
* You should have received a copy of the GNU Library General Public License
* along with this software. If not, see <http://www.gnu.org/licenses/>.
*
* Authored by: Piotr Grabowski <[email protected]>
*/
package controllers
import play.api.mvc.{Action, Controller}
import scala.concurrent.ExecutionContext.Implicits.global
import auth.{Janitor, AuthConfigImpl}
import jp.t2v.lab.play2.auth.AuthElement
import context.AyaIBContext
import scala.concurrent.Future
import models.forms.BoardForm
import scala.util.{Success, Failure}
import utils.exceptions.IncorrectInputException
import play.api.Logger
object StaffPanelController extends Controller with AuthElement with AuthConfigImpl {
val boardService = AyaIBContext.boardService
// def index = StackAction(AuthorityKey -> Janitor) { implicit request =>
def index = Action { implicit request =>
// val user = loggedIn
Ok(views.html.staff.index())
}
def manageBoards = Action.async { implicit request =>
boardService.findAllBoards map (boards => Ok(views.html.staff.manageBoards(boards)))
}
def newBoardForm = Action { implicit request =>
Ok(views.html.staff.addNewBoard(BoardForm.get fill BoardForm()))
}
def addNewBoard = Action.async { implicit request =>
val boardForm = BoardForm.get.bindFromRequest
val boardData = boardForm fold (formWithErrors => None, boardData => Some(boardData))
boardData map { boardData =>
boardService.addBoard(boardData) map {
case Success(()) =>
Redirect(routes.StaffPanelController.newBoardForm)
case Failure(ex: IncorrectInputException) =>
Redirect(routes.StaffPanelController.newBoardForm) flashing ("error" -> ex.getMessage)
case Failure(ex) =>
Logger error s"Cannot add new board: $ex"
Redirect(routes.StaffPanelController.newBoardForm) flashing ("failure" -> "")
}
} getOrElse Future.successful(Redirect(routes.StaffPanelController.newBoardForm))
// val postData = PostForm.get.bindFromRequest fold (formWithErrors => None, postData => Some(postData))
//
// postData map { postData =>
// val fileWrapperOption: Option[FileWrapper] = request.body.file("file") match {
// case Some(file: FilePart[TemporaryFile]) =>
// Some(new FileWrapper(file.ref.file, file.filename, file.contentType))
// case _ => None
// }
//
// val futureNewPostNoOption = boardService.addPost(boardName, Some(threadNo), postData, fileWrapperOption)
//
// futureNewPostNoOption map {
// case Success(newPostNo) =>
// Redirect(routes.BoardController.showThread(boardName, threadNo) + "#post-" + newPostNo)
//
// case Failure(ex: IncorrectInputException) =>
// Redirect(routes.BoardController.show(boardName)).flashing("error" -> ex.getMessage)
//
// case Failure(ex) =>
// Logger.error(s"Cannot add new post: $ex")
// Redirect(routes.BoardController.show(boardName)).flashing("failure" -> "")
// }
// } getOrElse Future.successful {
// Redirect(routes.BoardController.show(boardName)).flashing("error" -> "Please fill all required fields")
// }
}
}
| fauu/AyaIB | app/controllers/StaffPanelController.scala | Scala | gpl-3.0 | 3,383 |
package bijection
object BufferableGenerator {
val pkg = "package com.twitter.bijection"
/* Example of the code generated:
implicit def tuple2[A,B](implicit ba: Bufferable[A], bb: Bufferable[B]): Bufferable[(A,B)] =
new AbstractBufferable[(A,B)] {
def put(bytebuf: ByteBuffer, tup: (A,B)) = {
var nextBb = bytebuf
nextBb = reallocatingPut(nextBb) { ba.put(_, tup._1) }
nextBb = reallocatingPut(nextBb) { bb.put(_, tup._2) }
nextBb
}
// this should perform better than for comprehension
def get(bytebuf: ByteBuffer) = attempt(bytebuf) { bytebuf =>
val (bufa, a) = ba.unsafeGet(bytebuf)
val (bufb, b) = bb.unsafeGet(bufa)
(bufb, (a,b))
}
}
*/
val lowerLetters = ('a' to 'z').toIndexedSeq
val upperLetters = ('A' to 'Z').toIndexedSeq
def bufferableParam(idx: Int) =
"b" + lowerLetters(idx) + ": Bufferable[" + upperLetters(idx) + "]"
def typeList(cnt: Int) =
upperLetters.slice(0, cnt) map { _.toString } mkString (",")
def tupleTypeList(cnt: Int) = "Tuple" + cnt + "[" + typeList(cnt) + "]"
def reallocatingPut(idx: Int) =
"nextBb = reallocatingPut(nextBb) { b" + lowerLetters(idx) + ".put(_, tup._" + (idx + 1) + ") }"
def bufferGet(idx: Int) = {
val getFrom = if (idx == 0) "bytebuf" else ("buf" + lowerLetters(idx - 1))
val lowlet = lowerLetters(idx)
"val (buf%s, %s) = b%s.unsafeGet(%s)".format(lowlet, lowlet, lowlet, getFrom)
}
def bufferableType(idx: Int) = "Bufferable[" + upperLetters(idx) + "]"
// Here we put it all together:
def implicitTuple(cnt: Int): String =
" implicit def tuple" + cnt + "[" + typeList(cnt) + "](implicit " +
((0 until cnt) map { bufferableParam(_) } mkString (", ")) + "):\\n" +
" Bufferable[" + tupleTypeList(cnt) + "] = new AbstractBufferable[" + tupleTypeList(
cnt
) + "] {\\n" +
" def put(bytebuf: ByteBuffer, tup: " + tupleTypeList(cnt) + ") = {\\n" +
" var nextBb = bytebuf\\n" +
" " + ((0 until cnt) map { reallocatingPut(_) }).mkString("", "\\n ", "\\n") +
" nextBb\\n" +
" }\\n" +
" def get(bytebuf: ByteBuffer) = attempt(bytebuf) { bytebuf =>\\n" +
" " + ((0 until cnt) map { bufferGet(_) }).mkString("", "\\n ", "\\n") +
" val res = Tuple" + cnt + (0 until cnt)
.map { lowerLetters(_) }
.mkString("(", ", ", ")") + "\\n" +
" (buf" + lowerLetters(cnt - 1) + ", res)\\n" +
" }\\n" +
" }"
def generate = {
val b = new StringBuffer
b.append("// Autogenerated code DO NOT EDIT BY HAND\\n")
b.append(pkg).append("\\n")
b.append("import Bufferable.reallocatingPut\\n")
b.append("import java.nio.ByteBuffer\\n")
b.append("import com.twitter.bijection.Inversion.attempt\\n")
b.append("\\ntrait GeneratedTupleBufferable {\\n")
(1 to 22).foreach { cnt => b.append(implicitTuple(cnt)).append("\\n") }
b.append("}")
b.toString
}
}
| twitter/bijection | project/BufferableGenerator.scala | Scala | apache-2.0 | 3,072 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx.lib
import scala.reflect.ClassTag
import breeze.linalg.{Vector => BV}
import org.apache.spark.graphx._
import org.apache.spark.internal.Logging
import org.apache.spark.ml.linalg.{Vector, Vectors}
/**
* PageRank algorithm implementation. There are two implementations of PageRank implemented.
*
* The first implementation uses the standalone `Graph` interface and runs PageRank
* for a fixed number of iterations:
* {{{
* var PR = Array.fill(n)( 1.0 )
* val oldPR = Array.fill(n)( 1.0 )
* for( iter <- 0 until numIter ) {
* swap(oldPR, PR)
* for( i <- 0 until n ) {
* PR[i] = alpha + (1 - alpha) * inNbrs[i].map(j => oldPR[j] / outDeg[j]).sum
* }
* }
* }}}
*
* The second implementation uses the `Pregel` interface and runs PageRank until
* convergence:
*
* {{{
* var PR = Array.fill(n)( 1.0 )
* val oldPR = Array.fill(n)( 0.0 )
* while( max(abs(PR - oldPr)) > tol ) {
* swap(oldPR, PR)
* for( i <- 0 until n if abs(PR[i] - oldPR[i]) > tol ) {
* PR[i] = alpha + (1 - \alpha) * inNbrs[i].map(j => oldPR[j] / outDeg[j]).sum
* }
* }
* }}}
*
* `alpha` is the random reset probability (typically 0.15), `inNbrs[i]` is the set of
* neighbors which link to `i` and `outDeg[j]` is the out degree of vertex `j`.
*
* @note This is not the "normalized" PageRank and as a consequence pages that have no
* inlinks will have a PageRank of alpha.
*/
object PageRank extends Logging {
/**
* Run PageRank for a fixed number of iterations returning a graph
* with vertex attributes containing the PageRank and edge
* attributes the normalized edge weight.
*
* @tparam VD the original vertex attribute (not used)
* @tparam ED the original edge attribute (not used)
*
* @param graph the graph on which to compute PageRank
* @param numIter the number of iterations of PageRank to run
* @param resetProb the random reset probability (alpha)
*
* @return the graph containing with each vertex containing the PageRank and each edge
* containing the normalized weight.
*/
def run[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED], numIter: Int,
resetProb: Double = 0.15): Graph[Double, Double] =
{
runWithOptions(graph, numIter, resetProb)
}
/**
* Run PageRank for a fixed number of iterations returning a graph
* with vertex attributes containing the PageRank and edge
* attributes the normalized edge weight.
*
* @tparam VD the original vertex attribute (not used)
* @tparam ED the original edge attribute (not used)
*
* @param graph the graph on which to compute PageRank
* @param numIter the number of iterations of PageRank to run
* @param resetProb the random reset probability (alpha)
* @param srcId the source vertex for a Personalized Page Rank (optional)
*
* @return the graph containing with each vertex containing the PageRank and each edge
* containing the normalized weight.
*
*/
def runWithOptions[VD: ClassTag, ED: ClassTag](
graph: Graph[VD, ED], numIter: Int, resetProb: Double = 0.15,
srcId: Option[VertexId] = None): Graph[Double, Double] =
{
require(numIter > 0, s"Number of iterations must be greater than 0," +
s" but got ${numIter}")
require(resetProb >= 0 && resetProb <= 1, s"Random reset probability must belong" +
s" to [0, 1], but got ${resetProb}")
val personalized = srcId.isDefined
val src: VertexId = srcId.getOrElse(-1L)
// Initialize the PageRank graph with each edge attribute having
// weight 1/outDegree and each vertex with attribute resetProb.
// When running personalized pagerank, only the source vertex
// has an attribute resetProb. All others are set to 0.
var rankGraph: Graph[Double, Double] = graph
// Associate the degree with each vertex
.outerJoinVertices(graph.outDegrees) { (vid, vdata, deg) => deg.getOrElse(0) }
// Set the weight on the edges based on the degree
.mapTriplets( e => 1.0 / e.srcAttr, TripletFields.Src )
// Set the vertex attributes to the initial pagerank values
.mapVertices { (id, attr) =>
if (!(id != src && personalized)) resetProb else 0.0
}
def delta(u: VertexId, v: VertexId): Double = { if (u == v) 1.0 else 0.0 }
var iteration = 0
var prevRankGraph: Graph[Double, Double] = null
while (iteration < numIter) {
rankGraph.cache()
// Compute the outgoing rank contributions of each vertex, perform local preaggregation, and
// do the final aggregation at the receiving vertices. Requires a shuffle for aggregation.
val rankUpdates = rankGraph.aggregateMessages[Double](
ctx => ctx.sendToDst(ctx.srcAttr * ctx.attr), _ + _, TripletFields.Src)
// Apply the final rank updates to get the new ranks, using join to preserve ranks of vertices
// that didn't receive a message. Requires a shuffle for broadcasting updated ranks to the
// edge partitions.
prevRankGraph = rankGraph
val rPrb = if (personalized) {
(src: VertexId, id: VertexId) => resetProb * delta(src, id)
} else {
(src: VertexId, id: VertexId) => resetProb
}
rankGraph = rankGraph.joinVertices(rankUpdates) {
(id, oldRank, msgSum) => rPrb(src, id) + (1.0 - resetProb) * msgSum
}.cache()
rankGraph.edges.foreachPartition(x => {}) // also materializes rankGraph.vertices
logInfo(s"PageRank finished iteration $iteration.")
prevRankGraph.vertices.unpersist(false)
prevRankGraph.edges.unpersist(false)
iteration += 1
}
rankGraph
}
/**
* Run Personalized PageRank for a fixed number of iterations, for a
* set of starting nodes in parallel. Returns a graph with vertex attributes
* containing the pagerank relative to all starting nodes (as a sparse vector) and
* edge attributes the normalized edge weight
*
* @tparam VD The original vertex attribute (not used)
* @tparam ED The original edge attribute (not used)
*
* @param graph The graph on which to compute personalized pagerank
* @param numIter The number of iterations to run
* @param resetProb The random reset probability
* @param sources The list of sources to compute personalized pagerank from
* @return the graph with vertex attributes
* containing the pagerank relative to all starting nodes (as a sparse vector) and
* edge attributes the normalized edge weight
*/
def runParallelPersonalizedPageRank[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED],
numIter: Int, resetProb: Double = 0.15,
sources: Array[VertexId]): Graph[Vector, Double] = {
require(numIter > 0, s"Number of iterations must be greater than 0," +
s" but got ${numIter}")
require(resetProb >= 0 && resetProb <= 1, s"Random reset probability must belong" +
s" to [0, 1], but got ${resetProb}")
require(sources.nonEmpty, s"The list of sources must be non-empty," +
s" but got ${sources.mkString("[", ",", "]")}")
// TODO if one sources vertex id is outside of the int range
// we won't be able to store its activations in a sparse vector
val zero = Vectors.sparse(sources.size, List()).asBreeze
val sourcesInitMap = sources.zipWithIndex.map { case (vid, i) =>
val v = Vectors.sparse(sources.size, Array(i), Array(resetProb)).asBreeze
(vid, v)
}.toMap
val sc = graph.vertices.sparkContext
val sourcesInitMapBC = sc.broadcast(sourcesInitMap)
// Initialize the PageRank graph with each edge attribute having
// weight 1/outDegree and each source vertex with attribute 1.0.
var rankGraph = graph
// Associate the degree with each vertex
.outerJoinVertices(graph.outDegrees) { (vid, vdata, deg) => deg.getOrElse(0) }
// Set the weight on the edges based on the degree
.mapTriplets(e => 1.0 / e.srcAttr, TripletFields.Src)
.mapVertices { (vid, attr) =>
if (sourcesInitMapBC.value contains vid) {
sourcesInitMapBC.value(vid)
} else {
zero
}
}
var i = 0
while (i < numIter) {
val prevRankGraph = rankGraph
// Propagates the message along outbound edges
// and adding start nodes back in with activation resetProb
val rankUpdates = rankGraph.aggregateMessages[BV[Double]](
ctx => ctx.sendToDst(ctx.srcAttr :* ctx.attr),
(a : BV[Double], b : BV[Double]) => a :+ b, TripletFields.Src)
rankGraph = rankGraph.joinVertices(rankUpdates) {
(vid, oldRank, msgSum) =>
val popActivations: BV[Double] = msgSum :* (1.0 - resetProb)
val resetActivations = if (sourcesInitMapBC.value contains vid) {
sourcesInitMapBC.value(vid)
} else {
zero
}
popActivations :+ resetActivations
}.cache()
rankGraph.edges.foreachPartition(x => {}) // also materializes rankGraph.vertices
prevRankGraph.vertices.unpersist(false)
prevRankGraph.edges.unpersist(false)
logInfo(s"Parallel Personalized PageRank finished iteration $i.")
i += 1
}
rankGraph.mapVertices { (vid, attr) =>
Vectors.fromBreeze(attr)
}
}
/**
* Run a dynamic version of PageRank returning a graph with vertex attributes containing the
* PageRank and edge attributes containing the normalized edge weight.
*
* @tparam VD the original vertex attribute (not used)
* @tparam ED the original edge attribute (not used)
*
* @param graph the graph on which to compute PageRank
* @param tol the tolerance allowed at convergence (smaller => more accurate).
* @param resetProb the random reset probability (alpha)
*
* @return the graph containing with each vertex containing the PageRank and each edge
* containing the normalized weight.
*/
def runUntilConvergence[VD: ClassTag, ED: ClassTag](
graph: Graph[VD, ED], tol: Double, resetProb: Double = 0.15): Graph[Double, Double] =
{
runUntilConvergenceWithOptions(graph, tol, resetProb)
}
/**
* Run a dynamic version of PageRank returning a graph with vertex attributes containing the
* PageRank and edge attributes containing the normalized edge weight.
*
* @tparam VD the original vertex attribute (not used)
* @tparam ED the original edge attribute (not used)
*
* @param graph the graph on which to compute PageRank
* @param tol the tolerance allowed at convergence (smaller => more accurate).
* @param resetProb the random reset probability (alpha)
* @param srcId the source vertex for a Personalized Page Rank (optional)
*
* @return the graph containing with each vertex containing the PageRank and each edge
* containing the normalized weight.
*/
def runUntilConvergenceWithOptions[VD: ClassTag, ED: ClassTag](
graph: Graph[VD, ED], tol: Double, resetProb: Double = 0.15,
srcId: Option[VertexId] = None): Graph[Double, Double] =
{
require(tol >= 0, s"Tolerance must be no less than 0, but got ${tol}")
require(resetProb >= 0 && resetProb <= 1, s"Random reset probability must belong" +
s" to [0, 1], but got ${resetProb}")
val personalized = srcId.isDefined
val src: VertexId = srcId.getOrElse(-1L)
// Initialize the pagerankGraph with each edge attribute
// having weight 1/outDegree and each vertex with attribute 1.0.
val pagerankGraph: Graph[(Double, Double), Double] = graph
// Associate the degree with each vertex
.outerJoinVertices(graph.outDegrees) {
(vid, vdata, deg) => deg.getOrElse(0)
}
// Set the weight on the edges based on the degree
.mapTriplets( e => 1.0 / e.srcAttr )
// Set the vertex attributes to (initialPR, delta = 0)
.mapVertices { (id, attr) =>
if (id == src) (resetProb, Double.NegativeInfinity) else (0.0, 0.0)
}
.cache()
// Define the three functions needed to implement PageRank in the GraphX
// version of Pregel
def vertexProgram(id: VertexId, attr: (Double, Double), msgSum: Double): (Double, Double) = {
val (oldPR, lastDelta) = attr
val newPR = oldPR + (1.0 - resetProb) * msgSum
(newPR, newPR - oldPR)
}
def personalizedVertexProgram(id: VertexId, attr: (Double, Double),
msgSum: Double): (Double, Double) = {
val (oldPR, lastDelta) = attr
var teleport = oldPR
val delta = if (src==id) 1.0 else 0.0
teleport = oldPR*delta
val newPR = teleport + (1.0 - resetProb) * msgSum
val newDelta = if (lastDelta == Double.NegativeInfinity) newPR else newPR - oldPR
(newPR, newDelta)
}
def sendMessage(edge: EdgeTriplet[(Double, Double), Double]) = {
if (edge.srcAttr._2 > tol) {
Iterator((edge.dstId, edge.srcAttr._2 * edge.attr))
} else {
Iterator.empty
}
}
def messageCombiner(a: Double, b: Double): Double = a + b
// The initial message received by all vertices in PageRank
val initialMessage = if (personalized) 0.0 else resetProb / (1.0 - resetProb)
// Execute a dynamic version of Pregel.
val vp = if (personalized) {
(id: VertexId, attr: (Double, Double), msgSum: Double) =>
personalizedVertexProgram(id, attr, msgSum)
} else {
(id: VertexId, attr: (Double, Double), msgSum: Double) =>
vertexProgram(id, attr, msgSum)
}
Pregel(pagerankGraph, initialMessage, activeDirection = EdgeDirection.Out)(
vp, sendMessage, messageCombiner)
.mapVertices((vid, attr) => attr._1)
} // end of deltaPageRank
}
| Panos-Bletsos/spark-cost-model-optimizer | graphx/src/main/scala/org/apache/spark/graphx/lib/PageRank.scala | Scala | apache-2.0 | 14,439 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import com.mongodb.{MongoNamespace => JMongoNamespace}
/**
* A companion object for MongoNamespace
*
* @since 1.0
*/
object MongoNamespace {
def apply(namespace: String): JMongoNamespace = new JMongoNamespace(namespace)
def apply(databaseName: String, collectionName: String): JMongoNamespace = new JMongoNamespace(databaseName, collectionName)
}
| rozza/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/MongoNamespace.scala | Scala | apache-2.0 | 987 |
package org.jetbrains.plugins.scala
package codeInsight.delegate
import com.intellij.codeInsight.generation._
import com.intellij.codeInsight.{CodeInsightBundle, CodeInsightUtilBase}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.editor.{Editor, ScrollType}
import com.intellij.openapi.fileEditor.FileDocumentManager
import com.intellij.openapi.project.Project
import com.intellij.openapi.ui.DialogWrapper
import com.intellij.psi._
import com.intellij.psi.search.LocalSearchScope
import com.intellij.psi.search.searches.ReferencesSearch
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.util.IncorrectOperationException
import org.jetbrains.annotations.{NotNull, Nullable}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameterClause, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types.PhysicalSignature
import org.jetbrains.plugins.scala.lang.psi.{TypeAdjuster, ScalaPsiUtil, types}
import org.jetbrains.plugins.scala.lang.resolve.processor.CompletionProcessor
import org.jetbrains.plugins.scala.lang.resolve.{ResolveUtils, ScalaResolveResult, StdKinds}
import org.jetbrains.plugins.scala.overrideImplement._
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings
import scala.collection.JavaConversions._
/**
* Nikolay.Tropin
* 2014-03-21
*/
class ScalaGenerateDelegateHandler extends GenerateDelegateHandler {
type ClassMember = overrideImplement.ClassMember
override def isValidFor(editor: Editor, file: PsiFile): Boolean = hasTargetElements(file, editor)
override def invoke(@NotNull project: Project, @NotNull editor: Editor, @NotNull file: PsiFile) {
if (!CodeInsightUtilBase.prepareEditorForWrite(editor)) return
if (!FileDocumentManager.getInstance.requestWriting(editor.getDocument, project)) return
PsiDocumentManager.getInstance(project).commitAllDocuments()
val target = chooseTarget(file, editor, project)
if (target == null) return
val candidates = chooseMethods(target, file, editor, project)
if (candidates == null || candidates.length == 0) return
val elementAtOffset = file.findElementAt(editor.getCaretModel.getOffset)
val specifyType = ScalaApplicationSettings.getInstance().SPECIFY_RETURN_TYPE_EXPLICITLY
inWriteCommandAction(project) {
try {
val aClass = classAtOffset(editor.getCaretModel.getOffset, file)
val generatedMethods = for (member <- candidates) yield {
val prototype: ScFunctionDefinition =
ScalaPsiElementFactory.createMethodFromSignature(member.sign, aClass.getManager, specifyType, body = "???")
.asInstanceOf[ScFunctionDefinition]
prototype.setModifierProperty("override", value = member.isOverride)
val body = methodBody(target, prototype)
prototype.body.foreach(_.replace(body))
val genInfo = new ScalaGenerationInfo(member)
val added = aClass.addMember(prototype, Option(genInfo.findInsertionAnchor(aClass, elementAtOffset)))
.asInstanceOf[ScFunctionDefinition]
if (added.superMethod.nonEmpty) added.setModifierProperty("override", value = true)
added
}
if (!generatedMethods.isEmpty) {
val firstMethod = generatedMethods(0)
val body = firstMethod.body.get
editor.getCaretModel.moveToOffset(body.getTextRange.getStartOffset)
editor.getScrollingModel.scrollToCaret(ScrollType.RELATIVE)
editor.getSelectionModel.removeSelection()
}
TypeAdjuster.adjustFor(generatedMethods)
}
catch {
case e: IncorrectOperationException => throw new IncorrectOperationException(s"Could not delegate methods to ${target.getText}")
}
}
}
private def methodBody(delegate: ClassMember, prototype: ScFunction): ScExpression = {
def typeParameterUsedIn(parameter: ScTypeParam, elements: Seq[PsiElement]) = {
elements.exists(elem => ReferencesSearch.search(parameter, new LocalSearchScope(elem)).findAll().nonEmpty)
}
val typeParamsForCall: String = {
val typeParams = prototype.typeParameters
val parametersAndRetType = prototype.parameters ++ prototype.returnTypeElement
if (typeParams.exists(!typeParameterUsedIn(_, parametersAndRetType))) {
typeParams.map(_.nameId.getText).mkString("[", ", ", "]")
}
else ""
}
val dText: String = delegateText(delegate)
val methodName = prototype.name
def paramClauseApplicationText(paramClause: ScParameterClause) = {
paramClause.parameters.map(_.name).mkString("(", ", ", ")")
}
val params = prototype.effectiveParameterClauses.map(paramClauseApplicationText).mkString
ScalaPsiElementFactory.createExpressionFromText(s"$dText.$methodName$typeParamsForCall$params", prototype.getManager)
}
private def delegateText(delegate: ClassMember): String = {
val delegateText = delegate match {
case field@(_: ScValueMember | _: ScVariableMember | _: JavaFieldMember) => field.asInstanceOf[ScalaNamedMember].name
case methMember: ScMethodMember =>
methMember.sign.method match {
case m: PsiMethod if m.isAccessor => m.getName
case f: ScFunction if f.isEmptyParen => f.name + "()"
case f: ScFunction if f.isParameterless => f.name
}
}
delegateText
}
@Nullable
private def chooseMethods(delegate: ClassMember, file: PsiFile, editor: Editor, project: Project): Array[ScMethodMember] = {
val delegateType = delegate.asInstanceOf[ScalaTypedMember].scType
val aClass = classAtOffset(editor.getCaretModel.getOffset, file)
val tBody = aClass.extendsBlock.templateBody.get
val place = ScalaPsiElementFactory.createExpressionWithContextFromText(delegateText(delegate), tBody, tBody.getFirstChild)
if (aClass == null) return null
val processor = new CompletionProcessor(StdKinds.methodRef, place, false)
processor.processType(delegateType, place)
val candidates = processor.candidatesS
val members = toMethodMembers(candidates, place)
if (!ApplicationManager.getApplication.isUnitTestMode) {
val chooser = new ScalaMemberChooser[ScMethodMember](members.toArray, false, true, false, true, aClass)
chooser.setTitle(CodeInsightBundle.message("generate.delegate.method.chooser.title"))
chooser.show()
if (chooser.getExitCode != DialogWrapper.OK_EXIT_CODE) return null
chooser.getSelectedElements.toBuffer.toArray
}
else if (members.nonEmpty) Array(members.head) else Array()
}
private def toMethodMembers(candidates: Iterable[ScalaResolveResult], place: PsiElement): Seq[ScMethodMember] = {
object isSuitable {
def unapply(srr: ScalaResolveResult): Option[PhysicalSignature] = {
if (srr.implicitConversionClass.nonEmpty || srr.implicitFunction.nonEmpty) return None
srr.getElement match {
case meth: PsiMethod if meth.isConstructor || meth.getContainingClass == null => None
case meth: PsiMethod if meth.getContainingClass.getQualifiedName == CommonClassNames.JAVA_LANG_OBJECT => None
case meth: PsiMethod if !ResolveUtils.isAccessible(meth, place, forCompletion = true) => None
case meth: PsiMethod => Some(new PhysicalSignature(meth, srr.substitutor))
case _ => None
}
}
}
candidates.toSeq.collect {
case isSuitable(sign) => new ScMethodMember(sign, isOverride = false)
}
}
@Nullable
private def chooseTarget(file: PsiFile, editor: Editor, project: Project): ClassMember = {
val elements: Array[ClassMember] = targetElements(file, editor)
if (elements == null || elements.length == 0) return null
if (!ApplicationManager.getApplication.isUnitTestMode) {
val chooser = new ScalaMemberChooser(elements, false, false, false, false, classAtOffset(editor.getCaretModel.getOffset, file))
chooser.setTitle(CodeInsightBundle.message("generate.delegate.target.chooser.title"))
chooser.show()
if (chooser.getExitCode != DialogWrapper.OK_EXIT_CODE) return null
val selectedElements = chooser.getSelectedElements
if (selectedElements != null && selectedElements.size > 0) return selectedElements.get(0)
}
else {
return elements(0)
}
null
}
private def targetElements(file: PsiFile, editor: Editor): Array[ClassMember] = {
parentClasses(file, editor).flatMap(targetsIn).toArray
}
private def hasTargetElements(file: PsiFile, editor: Editor): Boolean = {
parentClasses(file, editor).exists(hasTargetsIn)
}
private def targetsIn(clazz: ScTemplateDefinition): Seq[ClassMember] = {
//todo add ScObjectMember for targets
val allMembers = ScalaOIUtil.allMembers(clazz, withSelfType = true)
.flatMap(ScalaOIUtil.toClassMember(_, isImplement = false))
allMembers.toSeq.filter(canBeTargetInClass(_, clazz))
}
private def hasTargetsIn(clazz: ScTemplateDefinition): Boolean = {
for {
m <- ScalaOIUtil.allMembers(clazz, withSelfType = true)
cm <- ScalaOIUtil.toClassMember(m, isImplement = false)
} {
if (canBeTargetInClass(cm, clazz)) return true
}
false
}
private def canBeTargetInClass(member: ClassMember, clazz: ScTemplateDefinition): Boolean = member match {
case ta: ScAliasMember => false
case typed: ScalaTypedMember if typed.scType == types.Unit => false
case method: ScMethodMember =>
method.getElement match {
case m: PsiMethod if {val cl = m.getContainingClass; cl != null && cl.getQualifiedName == CommonClassNames.JAVA_LANG_OBJECT} => false
case f: ScFunction => (f.isParameterless || f.isEmptyParen) && ResolveUtils.isAccessible(f, clazz, forCompletion = false)
case m: PsiMethod => m.isAccessor && ResolveUtils.isAccessible(m, clazz, forCompletion = false)
case _ => false
}
case v @ (_: ScValueMember | _: ScVariableMember | _: JavaFieldMember)
if ResolveUtils.isAccessible(v.getElement, clazz, forCompletion = false) => true
case _ => false
}
private def classAtOffset(offset: Int, file: PsiFile) = {
val td = PsiTreeUtil.getContextOfType(file.findElementAt(offset), classOf[ScTemplateDefinition])
if (td == null || td.extendsBlock.templateBody.isEmpty) null
else td
}
private def parentClasses(file: PsiFile, editor: Editor): Seq[ScTemplateDefinition] = {
val closestClass = classAtOffset(editor.getCaretModel.getOffset, file)
if (closestClass == null) return Seq.empty
closestClass +: closestClass.parentsInFile.toSeq.collect {case td: ScTemplateDefinition => td}
}
}
| igrocki/intellij-scala | src/org/jetbrains/plugins/scala/codeInsight/delegate/ScalaGenerateDelegateHandler.scala | Scala | apache-2.0 | 10,996 |
package swarmize.aws.swf
import com.amazonaws.AmazonWebServiceRequest
import com.amazonaws.handlers.AsyncHandler
import com.amazonaws.services.simpleworkflow.AmazonSimpleWorkflowAsync
import com.amazonaws.services.simpleworkflow.model._
import scala.concurrent.{Future, Promise}
object SwfAsyncHelpers {
class AsyncHandlerToFuture[REQUEST <: AmazonWebServiceRequest, RESULT] extends AsyncHandler[REQUEST, RESULT] {
val promise = Promise[RESULT]()
def future = promise.future
override def onError(exception: Exception): Unit = promise.failure(exception)
override def onSuccess(request: REQUEST, result: RESULT): Unit = promise.success(result)
}
implicit class SwfAsync(swf: AmazonSimpleWorkflowAsync) {
private def invoke[REQUEST <: AmazonWebServiceRequest, RESULT]
(
method: (REQUEST, AsyncHandler[REQUEST, RESULT]) => java.util.concurrent.Future[RESULT],
req: REQUEST
): Future[RESULT] = {
val handler = new AsyncHandlerToFuture[REQUEST, RESULT]
method(req, handler)
handler.future
}
// ignore the red in IntelliJ here, the scala compiler understands this :)
def pollForDecisionTaskFuture(req: PollForDecisionTaskRequest): Future[DecisionTask] =
invoke(swf.pollForDecisionTaskAsync, req)
def respondDecisionTaskCompletedFuture(req: RespondDecisionTaskCompletedRequest): Future[Void] =
invoke(swf.respondDecisionTaskCompletedAsync, req)
def pollForActivityTaskFuture(req: PollForActivityTaskRequest): Future[ActivityTask] =
invoke(swf.pollForActivityTaskAsync, req)
}
} | FreeSchoolHackers/swarmize | shared-lib/src/main/scala/swarmize/aws/swf/SwfAsyncHelpers.scala | Scala | apache-2.0 | 1,587 |
import org.scalacheck._
import Prop._
class SimpleTest extends Properties("Simple") {
property("increment scala") = forAll( (i: Int) => (new a.b.ScalaA).increment(i) == i+1)
property("increment java") = forAll( (i: Int) => (new JavaA).inc(i) == i+1)
// property("decrement scala") = forAll( (i: Int) => (new b.ScalaB).decrement(i) == i+1)
// property("decrement java") = forAll( (i: Int) => (new a.JavaB).dec(i) == i+1)
}
object MainTest {
def main(args: Array[String]): Unit = ()
}
| Duhemm/sbt | sbt/src/sbt-test/project/flatten/test-src/SimpleTest.scala | Scala | bsd-3-clause | 499 |
package ch.uzh.ifi.pdeboer.pplib.hcomp.ballot.snippet
import com.typesafe.scalalogging.LazyLogging
import scala.xml._
/**
* Created by mattia on 02.09.15.
*/
case class SnippetHTMLValidator(baseURL: String) extends LazyLogging {
def fixFormAttributes(ns: NodeSeq): NodeSeq = {
val htmlToDisplayOnBallotPage: NodeSeq = ns(0).seq.map(updateForm(_))
htmlToDisplayOnBallotPage
}
def updateForm(node: Node): Node = node match {
case elem@Elem(_, "form", _, _, child@_*) => {
elem.asInstanceOf[Elem] % Attribute(None, "action", Text(baseURL + "/storeAnswer"), Null) %
Attribute(None, "method", Text("get"), Null) copy (child = child map updateForm)
}
case elem@Elem(_, _, _, _, child@_*) => {
elem.asInstanceOf[Elem].copy(child = child map updateForm)
}
case other => other
}
def hasInvalidInputElements(form: NodeSeq): Boolean = {
val supportedFields = List[(String, Map[String, List[String]])](
"input" -> Map("type" -> List[String]("submit", "radio", "hidden")),
"textarea" -> Map("name" -> List.empty[String]),
"button" -> Map("type" -> List[String]("submit")),
"select" -> Map("name" -> List.empty[String]))
val definedSupportedFields = supportedFields.map(supportedField => {
if ((form \\ supportedField._1).nonEmpty) {
(form \\ supportedField._1) -> supportedField._2
}
}).collect { case fieldAttributes: (NodeSeq, Map[String, List[String]]) => fieldAttributes }
if (definedSupportedFields.isEmpty) {
logger.error("The form doesn't contain any input, select, textarea or button element.")
true
} else {
definedSupportedFields.forall(htmlElement => !hasInvalidAttributes(htmlElement._1, htmlElement._2))
}
}
private def hasInvalidAttributes(inputElement: NodeSeq, possibleValidAttributes: Map[String, List[String]]): Boolean = {
possibleValidAttributes.exists(attribute => {
inputElement.exists(element =>
element.attribute(attribute._1).exists(attributeValue => {
if (attribute._2.isEmpty) {
true
} else {
attribute._2.contains(attributeValue.text) || attribute._2.isEmpty
}
})
)
})
}
}
| manuelroesch/PaperValidator | app/helper/questiongenerator/snippet/SnippetHTMLValidator.scala | Scala | mit | 2,120 |
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.structureview
import com.intellij.ide.structureView.{StructureViewModel, StructureViewTreeElement, TextEditorBasedStructureViewModel}
import com.intellij.ide.util.treeView.smartTree.Sorter
import org.argus.cit.intellij.jawa.lang.psi.api.JawaFile
import org.argus.cit.intellij.jawa.lang.psi.impl.JawaFileImpl
import org.argus.cit.intellij.jawa.lang.structureview.elements.impl.JawaFileStructureViewElement
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
*/
class JawaStructureViewModel(psiFile: JawaFile) extends TextEditorBasedStructureViewModel(psiFile) with StructureViewModel.ElementInfoProvider {
override def getSorters: Array[Sorter] = List(Sorter.ALPHA_SORTER).toArray
override def isAlwaysLeaf(element: StructureViewTreeElement): Boolean = element.isInstanceOf[JawaFileImpl]
override def isAlwaysShowsPlus(structureViewTreeElement: StructureViewTreeElement): Boolean = false
override def getRoot: StructureViewTreeElement = new JawaFileStructureViewElement(psiFile)
}
| arguslab/argus-cit-intellij | src/main/scala/org/argus/cit/intellij/jawa/lang/structureview/JawaStructureViewModel.scala | Scala | epl-1.0 | 1,422 |
package ai.dragonfly.versionedjson.examples.test
object TestVersionedJson extends App {
Tests.testVersionedJson()
} | dragonfly-ai/VersionedJsonForScalaJs | versionedjson/jvm/src/main/scala/ai/dragonfly/versionedjson/examples/test/TestVersionedJson.scala | Scala | apache-2.0 | 120 |
/*-
* #%L
* FWAPP Framework
* %%
* Copyright (C) 2016 - 2017 Open Design Flow
* %%
* This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package org.odfi.wsb.fwapp.lib.security.provider.kerberos
import org.odfi.wsb.fwapp.lib.security.Authenticator
class KerberosAuthenticator extends Authenticator {
this.onDownMessage {
req =>
(req.getURLParameter("kerberos.principal"),req.getURLParameter("kerberos.password")) match {
case (Some(login),Some(pw)) =>
//-- Look for Provider
withProvider[KerberosAuthProvider] {
provider =>
provider.authenticate(login, pw)
}
case other =>
}
}
}
| opendesignflow/fwapp | src/main/scala/org/odfi/wsb/fwapp/lib/security/provider/kerberos/KerberosAuthenticator.scala | Scala | agpl-3.0 | 1,322 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.core.compiler.parser
import java.io.{LineNumberReader, StringReader}
import org.argus.jawa.core.util.ISet
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
*/
object LightWeightJawaParser {
val TITLE = "LightWeightJawaParser"
val DEBUG = false
def splitCode(code: String): ISet[String] = {
code.replaceAll("(record `)", "DELIMITER$1").split("DELIMITER").tail.toSet
}
def getCode(recordCode: String, contentSig: String): Option[String] = {
val lnr = new LineNumberReader(new StringReader(recordCode))
var lineNo = 0
var chunkLineNo = 0
val sb = new StringBuilder
var lineText = lnr.readLine
val keywords = Set("record", "global", "procedure")
var found = false
import scala.util.control.Breaks._
breakable{
while (lineText != null) {
val word = getFirstWord(lineText)
if (keywords.contains(word) && found) break
if (keywords.contains(word)) {
if(lineText.contains(contentSig))
found = true
chunkLineNo = lineNo
}
if(found){
sb.append(lineText)
sb.append('\\n')
}
lineNo += 1
lineText = lnr.readLine
}
}
if(found) Some(sb.toString.intern())
else None
}
def getFirstWord(line: String): String = {
val size = line.length
var i = 0
while (i < size && line.charAt(i).isWhitespace) {
i += 1
}
var j = i
while (j < size && !line.charAt(j).isWhitespace) {
j += 1
}
if (i < size && j <= size) line.substring(i, j)
else ""
}
def getClassName(line: String): String = {
val size = line.length
var i = if(line.contains("record")) line.indexOf("record") + 7 else size
while (i < size && line.charAt(i).isWhitespace) {
i += 1
}
var j = i
while (j < size && !line.charAt(j).isWhitespace && !line.charAt(j).equals('@')) {
j += 1
}
if (i < size && j <= size) line.substring(i + 1, j - 1)
else throw new RuntimeException("Doing " + TITLE + ". Cannot find name from record code: \\n" + line)
}
}
| arguslab/Argus-SAF | jawa/src/main/scala/org/argus/jawa/core/compiler/parser/LightWeightJawaParser.scala | Scala | apache-2.0 | 2,481 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.