code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Duration, Time}
import org.apache.spark.streaming.scheduler.Job
import scala.reflect.ClassTag
/**
* An internal DStream used to represent output operations like DStream.foreachRDD.
* @param parent Parent DStream
* @param foreachFunc Function to apply on each RDD generated by the parent DStream
* @param displayInnerRDDOps Whether the detailed callsites and scopes of the RDDs generated
* by `foreachFunc` will be displayed in the UI; only the scope and
* callsite of `DStream.foreachRDD` will be displayed.
*/
private[streaming]
class ForEachDStream[T: ClassTag] (
parent: DStream[T],
foreachFunc: (RDD[T], Time) => Unit,
displayInnerRDDOps: Boolean
) extends DStream[Unit](parent.ssc) {
override def dependencies: List[DStream[_]] = List(parent)
override def slideDuration: Duration = parent.slideDuration
override def compute(validTime: Time): Option[RDD[Unit]] = None
override def generateJob(time: Time): Option[Job] = {
parent.getOrCompute(time) match {
case Some(rdd) =>
val jobFunc = () => createRDDWithLocalProperties(time, displayInnerRDDOps) {
foreachFunc(rdd, time)
}
Some(new Job(time, jobFunc))
case None => None
}
}
}
| chenc10/Spark-PAF | streaming/src/main/scala/org/apache/spark/streaming/dstream/ForEachDStream.scala | Scala | apache-2.0 | 2,197 |
package db.migration
import io.circe.Json
import org.scalatest.{FlatSpec, Matchers}
import pl.touk.nussknacker.engine.api.CirceUtil
class V1_031__FragmentSpecificDataSpec extends FlatSpec with Matchers {
private lazy val expectedScenario = {
val rawJsonString =
"""{
| "metaData": {
| "id": "empty-2",
| "typeSpecificData": {
| "parallelism": 2,
| "spillStateToDisk": true,
| "useAsyncInterpretation": null,
| "checkpointIntervalInSeconds": null,
| "type": "StreamMetaData"
| }
| }
|}
|""".stripMargin
Some(CirceUtil.decodeJsonUnsafe[Json](rawJsonString, "Invalid json string."))
}
private lazy val expectedFragment = {
val rawJsonString =
"""{
| "metaData": {
| "id": "empty-2",
| "typeSpecificData": {
| "docsUrl": null,
| "type": "FragmentSpecificData"
| }
| }
|}
|""".stripMargin
Some(CirceUtil.decodeJsonUnsafe[Json](rawJsonString, "Invalid json string."))
}
it should "convert scenario metadata" in {
val rawJsonString =
"""
|{
| "metaData": {
| "id": "empty-2",
| "isSubprocess": false,
| "typeSpecificData": {
| "parallelism": 2,
| "spillStateToDisk": true,
| "useAsyncInterpretation": null,
| "checkpointIntervalInSeconds": null,
| "type": "StreamMetaData"
| }
| }
|}
|""".stripMargin
val oldJson = CirceUtil.decodeJsonUnsafe[Json](rawJsonString, "Invalid json string.")
val converted = V1_031__FragmentSpecificData.migrateMetadata(oldJson)
converted shouldBe expectedScenario
}
it should "convert fragment metadata" in {
val rawJsonString =
"""
|{
| "metaData": {
| "id": "empty-2",
| "isSubprocess": true,
| "typeSpecificData": {
| "parallelism": 1,
| "spillStateToDisk": true,
| "useAsyncInterpretation": null,
| "checkpointIntervalInSeconds": null,
| "type": "StreamMetaData"
| }
| }
|}
|""".stripMargin
val oldJson = CirceUtil.decodeJsonUnsafe[Json](rawJsonString, "Invalid json string.")
val converted = V1_031__FragmentSpecificData.migrateMetadata(oldJson)
converted shouldBe expectedFragment
}
it should "do nothing if json already without 'isSubprocess' field" in {
val rawJsonString =
"""
|{
| "metaData": {
| "id": "empty-2",
| "typeSpecificData": {
| "parallelism": 1,
| "spillStateToDisk": true,
| "useAsyncInterpretation": null,
| "checkpointIntervalInSeconds": null,
| "type": "StreamMetaData"
| }
| }
|}
|""".stripMargin
val oldJson = CirceUtil.decodeJsonUnsafe[Json](rawJsonString, "Invalid json string.")
val converted = V1_031__FragmentSpecificData.migrateMetadata(oldJson)
converted shouldBe Some(oldJson)
}
}
| TouK/nussknacker | ui/server/src/test/scala/db/migration/V1_031__FragmentSpecificDataSpec.scala | Scala | apache-2.0 | 3,235 |
/**
* Generated by Scrooge
* version: 3.13.0
* rev: 0921444211eb6b3d2ac9fd31a1bf189f94c6ae85
* built at: 20140325-114520
*/
package com.twitter.zipkin.gen
import com.twitter.scrooge.{
TFieldBlob, ThriftException, ThriftStruct, ThriftStructCodec3, ThriftStructFieldInfo, ThriftUtil}
import org.apache.thrift.protocol._
import org.apache.thrift.transport.{TMemoryBuffer, TTransport}
import java.nio.ByteBuffer
import java.util.Arrays
import scala.collection.immutable.{Map => immutable$Map}
import scala.collection.mutable.Builder
import scala.collection.mutable.{
ArrayBuffer => mutable$ArrayBuffer, Buffer => mutable$Buffer,
HashMap => mutable$HashMap, HashSet => mutable$HashSet}
import scala.collection.{Map, Set}
object QueryResponse extends ThriftStructCodec3[QueryResponse] {
private val NoPassthroughFields = immutable$Map.empty[Short, TFieldBlob]
val Struct = new TStruct("QueryResponse")
val TraceIdsField = new TField("trace_ids", TType.LIST, 1)
val TraceIdsFieldManifest = implicitly[Manifest[Seq[Long]]]
val StartTsField = new TField("start_ts", TType.I64, 2)
val StartTsFieldManifest = implicitly[Manifest[Long]]
val EndTsField = new TField("end_ts", TType.I64, 3)
val EndTsFieldManifest = implicitly[Manifest[Long]]
/**
* Field information in declaration order.
*/
lazy val fieldInfos: scala.List[ThriftStructFieldInfo] = scala.List[ThriftStructFieldInfo](
new ThriftStructFieldInfo(
TraceIdsField,
false,
TraceIdsFieldManifest,
None,
Some(implicitly[Manifest[Long]]),
immutable$Map(
),
immutable$Map(
)
),
new ThriftStructFieldInfo(
StartTsField,
false,
StartTsFieldManifest,
None,
None,
immutable$Map(
),
immutable$Map(
)
),
new ThriftStructFieldInfo(
EndTsField,
false,
EndTsFieldManifest,
None,
None,
immutable$Map(
),
immutable$Map(
)
)
)
lazy val structAnnotations: immutable$Map[String, String] =
immutable$Map[String, String](
)
/**
* Checks that all required fields are non-null.
*/
def validate(_item: QueryResponse) {
}
override def encode(_item: QueryResponse, _oproto: TProtocol) {
_item.write(_oproto)
}
override def decode(_iprot: TProtocol): QueryResponse = {
var traceIds: Seq[Long] = Seq[Long]()
var startTs: Long = 0L
var endTs: Long = 0L
var _passthroughFields: Builder[(Short, TFieldBlob), immutable$Map[Short, TFieldBlob]] = null
var _done = false
_iprot.readStructBegin()
while (!_done) {
val _field = _iprot.readFieldBegin()
if (_field.`type` == TType.STOP) {
_done = true
} else {
_field.id match {
case 1 =>
_field.`type` match {
case TType.LIST => {
traceIds = readTraceIdsValue(_iprot)
}
case _actualType =>
val _expectedType = TType.LIST
throw new TProtocolException(
"Received wrong type for field 'traceIds' (expected=%s, actual=%s).".format(
ttypeToHuman(_expectedType),
ttypeToHuman(_actualType)
)
)
}
case 2 =>
_field.`type` match {
case TType.I64 => {
startTs = readStartTsValue(_iprot)
}
case _actualType =>
val _expectedType = TType.I64
throw new TProtocolException(
"Received wrong type for field 'startTs' (expected=%s, actual=%s).".format(
ttypeToHuman(_expectedType),
ttypeToHuman(_actualType)
)
)
}
case 3 =>
_field.`type` match {
case TType.I64 => {
endTs = readEndTsValue(_iprot)
}
case _actualType =>
val _expectedType = TType.I64
throw new TProtocolException(
"Received wrong type for field 'endTs' (expected=%s, actual=%s).".format(
ttypeToHuman(_expectedType),
ttypeToHuman(_actualType)
)
)
}
case _ =>
if (_passthroughFields == null)
_passthroughFields = immutable$Map.newBuilder[Short, TFieldBlob]
_passthroughFields += (_field.id -> TFieldBlob.read(_field, _iprot))
}
_iprot.readFieldEnd()
}
}
_iprot.readStructEnd()
new Immutable(
traceIds,
startTs,
endTs,
if (_passthroughFields == null)
NoPassthroughFields
else
_passthroughFields.result()
)
}
def apply(
traceIds: Seq[Long] = Seq[Long](),
startTs: Long,
endTs: Long
): QueryResponse =
new Immutable(
traceIds,
startTs,
endTs
)
def unapply(_item: QueryResponse): Option[scala.Product3[Seq[Long], Long, Long]] = Some(_item)
private def readTraceIdsValue(_iprot: TProtocol): Seq[Long] = {
val _list = _iprot.readListBegin()
if (_list.size == 0) {
_iprot.readListEnd()
Nil
} else {
val _rv = new mutable$ArrayBuffer[Long](_list.size)
var _i = 0
while (_i < _list.size) {
_rv += {
_iprot.readI64()
}
_i += 1
}
_iprot.readListEnd()
_rv
}
}
private def writeTraceIdsField(traceIds_item: Seq[Long], _oprot: TProtocol) {
_oprot.writeFieldBegin(TraceIdsField)
writeTraceIdsValue(traceIds_item, _oprot)
_oprot.writeFieldEnd()
}
private def writeTraceIdsValue(traceIds_item: Seq[Long], _oprot: TProtocol) {
_oprot.writeListBegin(new TList(TType.I64, traceIds_item.size))
traceIds_item.foreach { traceIds_item_element =>
_oprot.writeI64(traceIds_item_element)
}
_oprot.writeListEnd()
}
private def readStartTsValue(_iprot: TProtocol): Long = {
_iprot.readI64()
}
private def writeStartTsField(startTs_item: Long, _oprot: TProtocol) {
_oprot.writeFieldBegin(StartTsField)
writeStartTsValue(startTs_item, _oprot)
_oprot.writeFieldEnd()
}
private def writeStartTsValue(startTs_item: Long, _oprot: TProtocol) {
_oprot.writeI64(startTs_item)
}
private def readEndTsValue(_iprot: TProtocol): Long = {
_iprot.readI64()
}
private def writeEndTsField(endTs_item: Long, _oprot: TProtocol) {
_oprot.writeFieldBegin(EndTsField)
writeEndTsValue(endTs_item, _oprot)
_oprot.writeFieldEnd()
}
private def writeEndTsValue(endTs_item: Long, _oprot: TProtocol) {
_oprot.writeI64(endTs_item)
}
private def ttypeToHuman(byte: Byte) = {
// from https://github.com/apache/thrift/blob/master/lib/java/src/org/apache/thrift/protocol/TType.java
byte match {
case TType.STOP => "STOP"
case TType.VOID => "VOID"
case TType.BOOL => "BOOL"
case TType.BYTE => "BYTE"
case TType.DOUBLE => "DOUBLE"
case TType.I16 => "I16"
case TType.I32 => "I32"
case TType.I64 => "I64"
case TType.STRING => "STRING"
case TType.STRUCT => "STRUCT"
case TType.MAP => "MAP"
case TType.SET => "SET"
case TType.LIST => "LIST"
case TType.ENUM => "ENUM"
case _ => "UNKNOWN"
}
}
object Immutable extends ThriftStructCodec3[QueryResponse] {
override def encode(_item: QueryResponse, _oproto: TProtocol) { _item.write(_oproto) }
override def decode(_iprot: TProtocol): QueryResponse = QueryResponse.decode(_iprot)
}
/**
* The default read-only implementation of QueryResponse. You typically should not need to
* directly reference this class; instead, use the QueryResponse.apply method to construct
* new instances.
*/
class Immutable(
val traceIds: Seq[Long],
val startTs: Long,
val endTs: Long,
override val _passthroughFields: immutable$Map[Short, TFieldBlob]
) extends QueryResponse {
def this(
traceIds: Seq[Long] = Seq[Long](),
startTs: Long,
endTs: Long
) = this(
traceIds,
startTs,
endTs,
Map.empty
)
}
/**
* This Proxy trait allows you to extend the QueryResponse trait with additional state or
* behavior and implement the read-only methods from QueryResponse using an underlying
* instance.
*/
trait Proxy extends QueryResponse {
protected def _underlying_QueryResponse: QueryResponse
override def traceIds: Seq[Long] = _underlying_QueryResponse.traceIds
override def startTs: Long = _underlying_QueryResponse.startTs
override def endTs: Long = _underlying_QueryResponse.endTs
override def _passthroughFields = _underlying_QueryResponse._passthroughFields
}
}
trait QueryResponse
extends ThriftStruct
with scala.Product3[Seq[Long], Long, Long]
with java.io.Serializable
{
import QueryResponse._
def traceIds: Seq[Long]
def startTs: Long
def endTs: Long
def _passthroughFields: immutable$Map[Short, TFieldBlob] = immutable$Map.empty
def _1 = traceIds
def _2 = startTs
def _3 = endTs
/**
* Gets a field value encoded as a binary blob using TCompactProtocol. If the specified field
* is present in the passthrough map, that value is returend. Otherwise, if the specified field
* is known and not optional and set to None, then the field is serialized and returned.
*/
def getFieldBlob(_fieldId: Short): Option[TFieldBlob] = {
lazy val _buff = new TMemoryBuffer(32)
lazy val _oprot = new TCompactProtocol(_buff)
_passthroughFields.get(_fieldId) orElse {
val _fieldOpt: Option[TField] =
_fieldId match {
case 1 =>
if (traceIds ne null) {
writeTraceIdsValue(traceIds, _oprot)
Some(QueryResponse.TraceIdsField)
} else {
None
}
case 2 =>
if (true) {
writeStartTsValue(startTs, _oprot)
Some(QueryResponse.StartTsField)
} else {
None
}
case 3 =>
if (true) {
writeEndTsValue(endTs, _oprot)
Some(QueryResponse.EndTsField)
} else {
None
}
case _ => None
}
_fieldOpt match {
case Some(_field) =>
val _data = Arrays.copyOfRange(_buff.getArray, 0, _buff.length)
Some(TFieldBlob(_field, _data))
case None =>
None
}
}
}
/**
* Collects TCompactProtocol-encoded field values according to `getFieldBlob` into a map.
*/
def getFieldBlobs(ids: TraversableOnce[Short]): immutable$Map[Short, TFieldBlob] =
(ids flatMap { id => getFieldBlob(id) map { id -> _ } }).toMap
/**
* Sets a field using a TCompactProtocol-encoded binary blob. If the field is a known
* field, the blob is decoded and the field is set to the decoded value. If the field
* is unknown and passthrough fields are enabled, then the blob will be stored in
* _passthroughFields.
*/
def setField(_blob: TFieldBlob): QueryResponse = {
var traceIds: Seq[Long] = this.traceIds
var startTs: Long = this.startTs
var endTs: Long = this.endTs
var _passthroughFields = this._passthroughFields
_blob.id match {
case 1 =>
traceIds = readTraceIdsValue(_blob.read)
case 2 =>
startTs = readStartTsValue(_blob.read)
case 3 =>
endTs = readEndTsValue(_blob.read)
case _ => _passthroughFields += (_blob.id -> _blob)
}
new Immutable(
traceIds,
startTs,
endTs,
_passthroughFields
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is subtracked
* from the passthroughFields map, if present.
*/
def unsetField(_fieldId: Short): QueryResponse = {
var traceIds: Seq[Long] = this.traceIds
var startTs: Long = this.startTs
var endTs: Long = this.endTs
_fieldId match {
case 1 =>
traceIds = Seq[Long]()
case 2 =>
startTs = 0L
case 3 =>
endTs = 0L
case _ =>
}
new Immutable(
traceIds,
startTs,
endTs,
_passthroughFields - _fieldId
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is subtracked
* from the passthroughFields map, if present.
*/
def unsetTraceIds: QueryResponse = unsetField(1)
def unsetStartTs: QueryResponse = unsetField(2)
def unsetEndTs: QueryResponse = unsetField(3)
override def write(_oprot: TProtocol) {
QueryResponse.validate(this)
_oprot.writeStructBegin(Struct)
if (traceIds ne null) writeTraceIdsField(traceIds, _oprot)
writeStartTsField(startTs, _oprot)
writeEndTsField(endTs, _oprot)
_passthroughFields.values foreach { _.write(_oprot) }
_oprot.writeFieldStop()
_oprot.writeStructEnd()
}
def copy(
traceIds: Seq[Long] = this.traceIds,
startTs: Long = this.startTs,
endTs: Long = this.endTs,
_passthroughFields: immutable$Map[Short, TFieldBlob] = this._passthroughFields
): QueryResponse =
new Immutable(
traceIds,
startTs,
endTs,
_passthroughFields
)
override def canEqual(other: Any): Boolean = other.isInstanceOf[QueryResponse]
override def equals(other: Any): Boolean =
_root_.scala.runtime.ScalaRunTime._equals(this, other) &&
_passthroughFields == other.asInstanceOf[QueryResponse]._passthroughFields
override def hashCode: Int = _root_.scala.runtime.ScalaRunTime._hashCode(this)
override def toString: String = _root_.scala.runtime.ScalaRunTime._toString(this)
override def productArity: Int = 3
override def productElement(n: Int): Any = n match {
case 0 => this.traceIds
case 1 => this.startTs
case 2 => this.endTs
case _ => throw new IndexOutOfBoundsException(n.toString)
}
override def productPrefix: String = "QueryResponse"
} | pkoryzna/zipkin | zipkin-scrooge/target/src_managed/main/com/twitter/zipkin/gen/QueryResponse.scala | Scala | apache-2.0 | 14,332 |
package net.lshift.diffa.agent.client
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import javax.ws.rs.core.MediaType
import com.sun.jersey.api.client.ClientResponse
import net.lshift.diffa.kernel.differencing.PairScanState
import scala.collection.JavaConversions._
import com.sun.jersey.core.util.MultivaluedMapImpl
import net.lshift.diffa.client.RestClientParams
/**
* A RESTful client to manage participant scanning.
*/
class ScanningRestClient(serverRootUrl:String, domain:String, params: RestClientParams = RestClientParams.default)
extends DomainAwareRestClient(serverRootUrl, domain, "domains/{domain}/scanning/", params) {
def startScan(pairKey: String, view:Option[String] = None) = {
val p = resource.path("pairs").path(pairKey).path("scan")
val postData = new MultivaluedMapImpl()
view match {
case None =>
case Some(v) => postData.add("view", v)
}
val response = p.accept(MediaType.APPLICATION_JSON_TYPE).
`type`("application/x-www-form-urlencoded").post(classOf[ClientResponse], postData)
val status = response.getClientResponseStatus
status.getStatusCode match {
case 202 => // Successfully submitted (202 is "Accepted")
case x:Int => throw new RuntimeException("HTTP " + x + " : " + status.getReasonPhrase)
}
true
}
def getScanStatus = {
val path = resource.path("states")
val media = path.accept(MediaType.APPLICATION_JSON_TYPE)
val response = media.get(classOf[ClientResponse])
val status = response.getClientResponseStatus
status.getStatusCode match {
case 200 => {
val responseData = response.getEntity(classOf[java.util.Map[String, String]])
responseData.map {case (k, v) => k -> PairScanState.valueOf(v) }.toMap
}
case x:Int => handleHTTPError(x, path, status)
}
}
def cancelScanning(pairKey: String) = {
delete("/pairs/" + pairKey + "/scan")
true
}
} | aprescott/diffa | agent/src/test/scala/net/lshift/diffa/agent/client/ScanningRestClient.scala | Scala | apache-2.0 | 2,506 |
package com.nrinaudo.fetch
import java.net.URI
object Url {
// - URI-based construction ------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
def fromUri(uri: URI): Option[Url] = for {
protocolStr <- Option(uri.getScheme)
protocol <- Protocol.parse(protocolStr)
host <- Option(uri.getHost)
} yield Url(protocol, host, if(uri.getPort == -1) protocol.defaultPort else uri.getPort,
splitPath(uri.getRawPath), QueryString(uri.getRawQuery), Option(uri.getFragment))
private def splitPath(path: String) =
if(path == null) Nil
else path.split("/").toList.filter(!_.isEmpty).map(UrlEncoder.decode)
// - String-based construction ---------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
def parse(url: String): Option[Url] = fromUri(new URI(url))
}
case class Url(protocol: Protocol, host: String, port: Int, path: List[String] = List(),
query: QueryString = QueryString(), fragment: Option[String] = None) {
// - Url building ----------------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
def protocol(value: Protocol): Url = copy(protocol = value)
def host(value: String): Url = copy(host = value)
def port(value: Int): Url = copy(port = value)
def path(value: String*): Url = path(value.toList)
def path(value: List[String]): Url = copy(path = value)
def fragment(value: Option[String]): Url = copy(fragment = value)
// TODO: maybe a list, with its slow appends, is not the best solution for storing paths?
def addSegment(value: String): Url = path(path :+ value :_*)
def query(value: QueryString): Url = copy(query = value)
def param[T: ValueWriter](name: String, values: T*): Url = query(query.set(name, values: _*))
def /(segment: String): Url = addSegment(segment)
def ?(value: QueryString): Url = query(value)
// TODO: this is currently extremely cumbersome if T happens to be a List[T].
// Implicit from ValueFormat[T] to ValueFormat[List[T]]?
def &[T: ValueWriter](value: (String, T)): Url = param(value._1, value._2)
// - Object methods --------------------------------------------------------------------------------------------------
// -------------------------------------------------------------------------------------------------------------------
// Note: I wanted to implement this properly with the correct URI constructor and all, but it turns out this creates
// a string and then parses it...
def toURI: URI = new URI(toString)
override lazy val toString = {
val builder = new StringBuilder
// Protocol and host.
builder.append(protocol.name).append("://").append(host)
// Appends the port if different from the default one.
if(protocol.defaultPort != port) builder.append(':').append(port)
// Path
builder.append(path.filter(!_.isEmpty).map(UrlEncoder.encode).mkString("/", "/", ""))
// Query String.
if(query.values.nonEmpty) query.writeTo(builder.append('?'))
fragment.foreach {r => builder.append("#").append(UrlEncoder.encode(r))}
builder.toString()
}
} | nrinaudo/fetch | core/src/main/scala/com/nrinaudo/fetch/Url.scala | Scala | mit | 3,528 |
package Pacman
import Chisel._
class CircularPeekBuffer(
numberOfBlocks: Int,
wordPerBlock: Int,
wordWidth: Int
) extends Module {
val io = new Bundle {
val wordIn = Decoupled(Bits(width = wordWidth)).flip
val wordOut = Bits(width = wordWidth).asOutput
val startOut = Bool().asOutput
val pipeReady = Bool().asInput
}
val queue = Module(
new CircularPeekQueue(wordPerBlock, numberOfBlocks, wordWidth))
val wordCounter = Module(new Counter(0, wordPerBlock))
val readyBlocks = Module(new UpDownCounter(0, numberOfBlocks))
val isReady = readyBlocks.io.value =/= UInt(numberOfBlocks - 1)
val signalReadingInput = io.wordIn.valid && isReady
val signalLastInputWord = signalReadingInput && wordCounter.io.value === UInt(
wordPerBlock - 1)
val signalNewPeekBlock = io.pipeReady && readyBlocks.io.value =/= UInt(0)
wordCounter.io.enable := signalReadingInput
wordCounter.io.rst := wordCounter.io.value === UInt(wordPerBlock - 1)
readyBlocks.io.up := signalLastInputWord
readyBlocks.io.down := signalNewPeekBlock
queue.io.input := io.wordIn.bits
queue.io.writeEnable := signalReadingInput
queue.io.nextBlock := signalNewPeekBlock
io.wordOut := queue.io.output
io.startOut := Reg(init = Bool(false), next = signalNewPeekBlock)
io.wordIn.ready := isReady
}
| martinhath/bnn | src/main/scala/CircularPeekBuffer.scala | Scala | mit | 1,332 |
package observatory
import java.time.LocalDate
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.functions._
/**
* 1st milestone: data extraction
*/
object Extraction {
import Spark.session.implicits._
import observatory.Implicits._
def readStations(stationsFile: String): Dataset[Station] = {
Spark.session.read
.option("mode", "FAILFAST")
.schema(Station.struct)
.csv(getClass.getResource(stationsFile).toExternalForm).as[Station]
.filter((station: Station) => station.lat.isDefined && station.lon.isDefined)
}
def readTemperatures(temperaturesFile: String): Dataset[Record] = {
Spark.session.read
.option("mode", "FAILFAST")
.schema(Record.struct)
.csv(getClass.getResource(temperaturesFile).toExternalForm).as[Record]
}
/**
* @param year Year number
* @param stationsFile Path of the stations resource file to use (e.g. "/stations.csv")
* @param temperaturesFile Path of the temperatures resource file to use (e.g. "/1975.csv")
* @return A sequence containing triplets (date, location, temperature)
*/
def locateTemperatures(year: Int, stationsFile: String, temperaturesFile: String): Iterable[(LocalDate, Location, Temperature)] = {
val stations = readStations(stationsFile)
val temperatures = readTemperatures(temperaturesFile)
stations
.join(temperatures, stations("stn") <=> temperatures("stn") && stations("wban") <=> temperatures("wban"))
.map(row => (
LocalDate.of(year, row.getAs[Byte]("month"), row.getAs[Byte]("day")),
Location(row.getAs[Double]("lat"), row.getAs[Double]("lon")),
row.getAs[Double]("temp").toCelsius
))
.collect()
}
/**
* @param records A sequence containing triplets (date, location, temperature)
* @return A sequence containing, for each location, the average temperature over the year.
*/
def locationYearlyAverageRecords(records: Iterable[(LocalDate, Location, Double)]): Iterable[(Location, Double)] =
Spark.session.sparkContext
.parallelize(records.toSeq)
.map { case (date, location, temp) => (date.getYear, location, temp) }
.toDF("year", "location", "temp")
.groupBy('year, 'location)
.agg('year, 'location, avg('temp).as("temp"))
.select('location.as[Location], 'temp.as[Double])
.collect()
} | yurii-khomenko/fpScalaSpec | c5w1observatory/src/main/scala/observatory/Extraction.scala | Scala | gpl-3.0 | 2,387 |
package org.jetbrains.plugins.scala.lang.typeInference.generated
import org.jetbrains.plugins.scala.DependencyManagerBase._
import org.jetbrains.plugins.scala.SlowTests
import org.jetbrains.plugins.scala.base.libraryLoaders.{IvyManagedLoader, LibraryLoader}
import org.jetbrains.plugins.scala.lang.typeInference.TypeInferenceTestBase
import org.junit.experimental.categories.Category
/**
* @author Alefas
* @since 11/12/15
*/
@Category(Array(classOf[SlowTests]))
class TypeInferenceSlickTest extends TypeInferenceTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "slick/"
override protected def additionalLibraries(): Seq[LibraryLoader] =
IvyManagedLoader("com.typesafe.slick" % "slick_2.11" % "3.2.1") :: Nil
def testSCL9261(): Unit = doTest()
def testSCL8829(): Unit = doTest()
def testImplicitMacroTest(): Unit = doTest()
}
| jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/typeInference/generated/TypeInferenceSlickTest.scala | Scala | apache-2.0 | 939 |
package com.arcusys.learn.controllers.api
import com.arcusys.learn.exceptions.BadRequestException
import com.arcusys.learn.facades.{ PackageFacadeContract, TagFacadeContract }
import com.arcusys.learn.ioc.Configuration
import com.arcusys.learn.liferay.permission.{ PortletName, ModifyPermission, ViewPermission, PermissionUtil }
import com.arcusys.learn.liferay.services.PermissionHelper
import com.arcusys.learn.models.request.PackageRequest
import com.arcusys.learn.models.valamispackage.{ PackageSerializer, PackageUploadModel }
import com.arcusys.learn.web.ServletBase
import com.arcusys.valamis.lesson.service.ValamisPackageService
import com.arcusys.valamis.lrs.serializer.DateTimeSerializer
import com.arcusys.valamis.lrs.service.LrsClientManager
import com.escalatesoft.subcut.inject.BindingModule
import org.json4s.{ DefaultFormats, Formats }
import PermissionUtil._
class PackageApiController(configuration: BindingModule) extends BaseApiController(configuration) with ServletBase {
private val packageFacade = inject[PackageFacadeContract]
private val lrsReader = inject[LrsClientManager]
private val packageService = inject[ValamisPackageService]
before() {
scentry.authenticate(LIFERAY_STRATEGY_NAME)
}
options() {
response.setHeader("Access-Control-Allow-Methods", "HEAD,GET,POST,PUT,DELETE")
response.setHeader("Access-Control-Allow-Headers", "Content-Type,Content-Length,Authorization,If-Match,If-None-Match,X-Experience-API-Version,X-Experience-API-Consistent-Through")
response.setHeader("Access-Control-Expose-Headers", "ETag,Last-Modified,Cache-Control,Content-Type,Content-Length,WWW-Authenticate,X-Experience-API-Version,X-Experience-API-Consistent-Through")
}
private val tagFacade = inject[TagFacadeContract]
def this() = this(Configuration)
get("/packages(/)") {
val packageRequest = PackageRequest(this)
packageRequest.action match {
case "VISIBLE" => jsonAction {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonViewer)
val companyId = packageRequest.companyId
val courseId = packageRequest.courseId
val pageId = packageRequest.pageIdRequired
val playerId = packageRequest.playerIdRequired
val user = getLiferayUser
val tagId = packageRequest.tagId
val filter = packageRequest.filter
lrsReader.statementApi(
packageFacade.getForPlayer(_, companyId, courseId, pageId, filter, tagId, playerId, user,
packageRequest.isSortDirectionAsc, packageRequest.sortBy, packageRequest.page, packageRequest.count),
packageRequest.lrsAuth)
}
case "ALL" => jsonAction {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonViewer, PortletName.LessonManager)
val courseId = packageRequest.courseId
val companyId = packageRequest.companyId
val user = getLiferayUser
val scope = packageRequest.scope
val filter = packageRequest.filter
val tagId = packageRequest.tagId
val isSortDirectionAsc = packageRequest.isSortDirectionAsc
val skip = packageRequest.skip
val count = packageRequest.count
val packageType = packageRequest.packageType
val page = packageRequest.page
packageFacade.getAllPackages(packageType, Some(courseId), scope, filter, tagId, isSortDirectionAsc, skip, count, page, companyId, user)
}
case _ => {
throw new BadRequestException
}
}
}
get("/packages/getPersonalForPlayer") {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonViewer)
val packageRequest = PackageRequest(this)
jsonAction {
val playerId = packageRequest.playerIdRequired
val companyId = packageRequest.companyId
val groupId = getLiferayUser.getGroupId
val user = getLiferayUser
packageFacade.getForPlayerConfig(playerId, companyId, groupId, user)
}
}
get("/packages/getByScope") {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonManager, PortletName.LessonViewer)
val packageRequest = PackageRequest(this)
jsonAction {
val courseId = packageRequest.courseId
val pageId = packageRequest.pageId
val playerId = packageRequest.playerId
val companyId = packageRequest.companyId
val user = getLiferayUser
val scope = packageRequest.scope
val courseIds = List(getLiferayUser.getGroupId.toInt)
packageFacade.getByScopeType(courseId, scope, pageId, playerId, companyId, courseIds, user)
}
}
get("/packages/getLastOpen") {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.RecentLessons)
val packageRequest = PackageRequest(this)
implicit val formats: Formats = DefaultFormats + DateTimeSerializer
jsonAction {
lrsReader.statementApi(statementApi => {
packageService.getLastPaskages( getUserId, statementApi, packageRequest.countPackage, getCompanyId)
}, packageRequest.lrsAuth)
}
}
post("/packages(/)")(jsonAction {
PermissionUtil.requirePermissionApi(ModifyPermission, PortletName.LessonManager)
val packageRequest = PackageRequest(this)
packageRequest.action match {
case "UPDATE" => {
val packageId = packageRequest.packageId
val courseId = packageRequest.courseId
val companyId = packageRequest.companyId
val pageId = packageRequest.pageId
val playerId = packageRequest.playerId
val user = getLiferayUser
val scope = packageRequest.scope
val visibility = packageRequest.visibility
val isDefault = packageRequest.isDefault
val title = packageRequest.title.get
val description = packageRequest.description.getOrElse("")
val packageType = packageRequest.packageTypeRequired
val passingLimit = packageRequest.passingLimit
val rerunInterval = packageRequest.rerunInterval
val rerunIntervalType = packageRequest.rerunIntervalType
val tags = packageRequest.tags
val beginDate = packageRequest.beginDate
val endDate = packageRequest.endDate
packageFacade.updatePackage(packageId, tags, passingLimit, rerunInterval, rerunIntervalType, beginDate, endDate, scope, visibility, isDefault, companyId, courseId, title, description, packageType, pageId, playerId, user)
}
case "UPDATELOGO" => {
val packageId = packageRequest.packageId
val packageLogo = packageRequest.packageLogo
val packageType = packageRequest.packageTypeRequired
packageFacade.updatePackageLogo(packageId, packageType, packageLogo)
}
case "UPDATEPACKAGES" => {
implicit val fs: Formats = DefaultFormats + new PackageSerializer
val packages = parseJson[Seq[PackageUploadModel]](packageRequest.packages).get
val scope = packageRequest.scope
val courseId = packageRequest.courseId
val pageId = packageRequest.pageId
val playerId = packageRequest.playerId
packageFacade.uploadPackages(packages, scope, courseId, pageId, playerId)
}
case "DELETE" => {
val packageId = packageRequest.packageId
val packageType = packageRequest.packageTypeRequired
packageFacade.removePackage(packageId, packageType)
}
case "REMOVEPACKAGES" => {
val packageIds = packageRequest.packageIds
packageFacade.removePackages(packageIds)
}
case _ => {
throw new BadRequestException
}
}
})
post("/packages/updatePackageScopeVisibility/:id") {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonViewer)
val packageRequest = PackageRequest(this)
val courseId = packageRequest.courseId
val pageId = packageRequest.pageId
val playerId = packageRequest.playerId
val user = getLiferayUser
val scope = packageRequest.scope
val id = packageRequest.packageId
val visibility = packageRequest.visibility
val isDefault = packageRequest.isDefault
packageFacade.updatePackageScopeVisibility(id, scope, courseId, visibility, isDefault, pageId, playerId, user)
}
post("/packages/addPackageToPlayer/:playerID") {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonViewer)
val packageRequest = PackageRequest(this)
val playerId = packageRequest.playerIdRequired
val packageId = packageRequest.packageId
packageFacade.addPackageToPlayer(playerId, packageId)
}
post("/packages/updatePlayerScope") {
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.LessonViewer)
val packageRequest = PackageRequest(this)
val scope = packageRequest.scope
val playerId = packageRequest.playerIdRequired
packageFacade.updatePlayerScope(scope, playerId)
}
}
| icacic/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/controllers/api/PackageApiController.scala | Scala | gpl-3.0 | 8,860 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.java
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.TypeExtractor
import org.apache.flink.api.java.{DataSet, ExecutionEnvironment}
import org.apache.flink.table.api._
import org.apache.flink.table.expressions.ExpressionParser
import org.apache.flink.table.functions.{AggregateFunction, TableFunction}
import _root_.scala.collection.JavaConverters._
/**
* The [[TableEnvironment]] for a Java batch [[ExecutionEnvironment]] that works
* with [[DataSet]]s.
*
* A TableEnvironment can be used to:
* - convert a [[DataSet]] to a [[Table]]
* - register a [[DataSet]] in the [[TableEnvironment]]'s catalog
* - register a [[Table]] in the [[TableEnvironment]]'s catalog
* - scan a registered table to obtain a [[Table]]
* - specify a SQL query on registered tables to obtain a [[Table]]
* - convert a [[Table]] into a [[DataSet]]
* - explain the AST and execution plan of a [[Table]]
*
* @param execEnv The Java batch [[ExecutionEnvironment]] of the TableEnvironment.
* @param config The configuration of the TableEnvironment.
*
* @deprecated This constructor will be removed. Use BatchTableEnvironment.create() instead.
*/
class BatchTableEnvironment @Deprecated() (
execEnv: ExecutionEnvironment,
config: TableConfig)
extends org.apache.flink.table.api.BatchTableEnvironment(execEnv, config) {
/**
* Converts the given [[DataSet]] into a [[Table]].
*
* The field names of the [[Table]] are automatically derived from the type of the [[DataSet]].
*
* @param dataSet The [[DataSet]] to be converted.
* @tparam T The type of the [[DataSet]].
* @return The converted [[Table]].
*/
def fromDataSet[T](dataSet: DataSet[T]): Table = {
val name = createUniqueTableName()
registerDataSetInternal(name, dataSet)
scan(name)
}
/**
* Converts the given [[DataSet]] into a [[Table]] with specified field names.
*
* Example:
*
* {{{
* DataSet<Tuple2<String, Long>> set = ...
* Table tab = tableEnv.fromDataSet(set, "a, b")
* }}}
*
* @param dataSet The [[DataSet]] to be converted.
* @param fields The field names of the resulting [[Table]].
* @tparam T The type of the [[DataSet]].
* @return The converted [[Table]].
*/
def fromDataSet[T](dataSet: DataSet[T], fields: String): Table = {
val exprs = ExpressionParser
.parseExpressionList(fields).asScala
.toArray
val name = createUniqueTableName()
registerDataSetInternal(name, dataSet, exprs)
scan(name)
}
/**
* Registers the given [[DataSet]] as table in the
* [[TableEnvironment]]'s catalog.
* Registered tables can be referenced in SQL queries.
*
* The field names of the [[Table]] are automatically derived from the type of the [[DataSet]].
*
* @param name The name under which the [[DataSet]] is registered in the catalog.
* @param dataSet The [[DataSet]] to register.
* @tparam T The type of the [[DataSet]] to register.
*/
def registerDataSet[T](name: String, dataSet: DataSet[T]): Unit = {
checkValidTableName(name)
registerDataSetInternal(name, dataSet)
}
/**
* Registers the given [[DataSet]] as table with specified field names in the
* [[TableEnvironment]]'s catalog.
* Registered tables can be referenced in SQL queries.
*
* Example:
*
* {{{
* DataSet<Tuple2<String, Long>> set = ...
* tableEnv.registerDataSet("myTable", set, "a, b")
* }}}
*
* @param name The name under which the [[DataSet]] is registered in the catalog.
* @param dataSet The [[DataSet]] to register.
* @param fields The field names of the registered table.
* @tparam T The type of the [[DataSet]] to register.
*/
def registerDataSet[T](name: String, dataSet: DataSet[T], fields: String): Unit = {
val exprs = ExpressionParser
.parseExpressionList(fields).asScala
.toArray
checkValidTableName(name)
registerDataSetInternal(name, dataSet, exprs)
}
/**
* Converts the given [[Table]] into a [[DataSet]] of a specified type.
*
* The fields of the [[Table]] are mapped to [[DataSet]] fields as follows:
* - [[org.apache.flink.types.Row]] and [[org.apache.flink.api.java.tuple.Tuple]]
* types: Fields are mapped by position, field types must match.
* - POJO [[DataSet]] types: Fields are mapped by field name, field types must match.
*
* @param table The [[Table]] to convert.
* @param clazz The class of the type of the resulting [[DataSet]].
* @tparam T The type of the resulting [[DataSet]].
* @return The converted [[DataSet]].
*/
def toDataSet[T](table: Table, clazz: Class[T]): DataSet[T] = {
// Use the default query config.
translate[T](table, queryConfig)(TypeExtractor.createTypeInfo(clazz))
}
/**
* Converts the given [[Table]] into a [[DataSet]] of a specified type.
*
* The fields of the [[Table]] are mapped to [[DataSet]] fields as follows:
* - [[org.apache.flink.types.Row]] and [[org.apache.flink.api.java.tuple.Tuple]]
* types: Fields are mapped by position, field types must match.
* - POJO [[DataSet]] types: Fields are mapped by field name, field types must match.
*
* @param table The [[Table]] to convert.
* @param typeInfo The [[TypeInformation]] that specifies the type of the resulting [[DataSet]].
* @tparam T The type of the resulting [[DataSet]].
* @return The converted [[DataSet]].
*/
def toDataSet[T](table: Table, typeInfo: TypeInformation[T]): DataSet[T] = {
// Use the default batch query config.
translate[T](table, queryConfig)(typeInfo)
}
/**
* Converts the given [[Table]] into a [[DataSet]] of a specified type.
*
* The fields of the [[Table]] are mapped to [[DataSet]] fields as follows:
* - [[org.apache.flink.types.Row]] and [[org.apache.flink.api.java.tuple.Tuple]]
* types: Fields are mapped by position, field types must match.
* - POJO [[DataSet]] types: Fields are mapped by field name, field types must match.
*
* @param table The [[Table]] to convert.
* @param clazz The class of the type of the resulting [[DataSet]].
* @param queryConfig The configuration for the query to generate.
* @tparam T The type of the resulting [[DataSet]].
* @return The converted [[DataSet]].
*/
def toDataSet[T](
table: Table,
clazz: Class[T],
queryConfig: BatchQueryConfig): DataSet[T] = {
translate[T](table, queryConfig)(TypeExtractor.createTypeInfo(clazz))
}
/**
* Converts the given [[Table]] into a [[DataSet]] of a specified type.
*
* The fields of the [[Table]] are mapped to [[DataSet]] fields as follows:
* - [[org.apache.flink.types.Row]] and [[org.apache.flink.api.java.tuple.Tuple]]
* types: Fields are mapped by position, field types must match.
* - POJO [[DataSet]] types: Fields are mapped by field name, field types must match.
*
* @param table The [[Table]] to convert.
* @param typeInfo The [[TypeInformation]] that specifies the type of the resulting [[DataSet]].
* @param queryConfig The configuration for the query to generate.
* @tparam T The type of the resulting [[DataSet]].
* @return The converted [[DataSet]].
*/
def toDataSet[T](
table: Table,
typeInfo: TypeInformation[T],
queryConfig: BatchQueryConfig): DataSet[T] = {
translate[T](table, queryConfig)(typeInfo)
}
/**
* Registers a [[TableFunction]] under a unique name in the TableEnvironment's catalog.
* Registered functions can be referenced in Table API and SQL queries.
*
* @param name The name under which the function is registered.
* @param tf The TableFunction to register.
* @tparam T The type of the output row.
*/
def registerFunction[T](name: String, tf: TableFunction[T]): Unit = {
implicit val typeInfo: TypeInformation[T] = TypeExtractor
.createTypeInfo(tf, classOf[TableFunction[_]], tf.getClass, 0)
.asInstanceOf[TypeInformation[T]]
registerTableFunctionInternal[T](name, tf)
}
/**
* Registers an [[AggregateFunction]] under a unique name in the TableEnvironment's catalog.
* Registered functions can be referenced in Table API and SQL queries.
*
* @param name The name under which the function is registered.
* @param f The AggregateFunction to register.
* @tparam T The type of the output value.
* @tparam ACC The type of aggregate accumulator.
*/
def registerFunction[T, ACC](
name: String,
f: AggregateFunction[T, ACC])
: Unit = {
implicit val typeInfo: TypeInformation[T] = TypeExtractor
.createTypeInfo(f, classOf[AggregateFunction[T, ACC]], f.getClass, 0)
.asInstanceOf[TypeInformation[T]]
implicit val accTypeInfo: TypeInformation[ACC] = TypeExtractor
.createTypeInfo(f, classOf[AggregateFunction[T, ACC]], f.getClass, 1)
.asInstanceOf[TypeInformation[ACC]]
registerAggregateFunctionInternal[T, ACC](name, f)
}
}
object BatchTableEnvironment {
/**
* Returns a [[TableEnvironment]] for a Java batch [[ExecutionEnvironment]] that works
* with [[DataSet]]s.
*
* A TableEnvironment can be used to:
* - convert a [[DataSet]] to a [[Table]]
* - register a [[DataSet]] in the [[TableEnvironment]]'s catalog
* - register a [[Table]] in the [[TableEnvironment]]'s catalog
* - scan a registered table to obtain a [[Table]]
* - specify a SQL query on registered tables to obtain a [[Table]]
* - convert a [[Table]] into a [[DataSet]]
* - explain the AST and execution plan of a [[Table]]
*
* @param executionEnvironment The Java batch [[ExecutionEnvironment]] of the TableEnvironment.
*/
def create(executionEnvironment: ExecutionEnvironment): BatchTableEnvironment = {
new BatchTableEnvironment(executionEnvironment, new TableConfig())
}
/**
* Returns a [[TableEnvironment]] for a Java batch [[ExecutionEnvironment]] that works
* with [[DataSet]]s.
*
* A TableEnvironment can be used to:
* - convert a [[DataSet]] to a [[Table]]
* - register a [[DataSet]] in the [[TableEnvironment]]'s catalog
* - register a [[Table]] in the [[TableEnvironment]]'s catalog
* - scan a registered table to obtain a [[Table]]
* - specify a SQL query on registered tables to obtain a [[Table]]
* - convert a [[Table]] into a [[DataSet]]
* - explain the AST and execution plan of a [[Table]]
*
* @param executionEnvironment The Java batch [[ExecutionEnvironment]] of the TableEnvironment.
* @param tableConfig The configuration of the TableEnvironment.
*/
def create(
executionEnvironment: ExecutionEnvironment,
tableConfig: TableConfig): BatchTableEnvironment = {
new BatchTableEnvironment(executionEnvironment, tableConfig)
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/api/java/BatchTableEnvironment.scala | Scala | apache-2.0 | 11,775 |
package mu.node.echod.grpc
import java.security.PublicKey
import com.google.common.collect.Iterables
import io.grpc._
import mu.node.echod.models.UserContext
/*
* Obtain the user context by reading from the JSON Web Token that is sent as an OAuth bearer
* token with the HTTP request header.
*/
class UserContextServerInterceptor(jwtVerificationKey: PublicKey) extends ServerInterceptor {
override def interceptCall[ReqT, RespT](
call: ServerCall[ReqT, RespT],
headers: Metadata,
next: ServerCallHandler[ReqT, RespT]): ServerCall.Listener[ReqT] = {
readBearerToken(headers) flatMap { token =>
UserContext.fromJwt(token, jwtVerificationKey)
} map { userContext =>
val withUserContext = Context
.current()
.withValue[UserContext](UserContextServerInterceptor.userContextKey, userContext)
Contexts.interceptCall(withUserContext, call, headers, next)
} getOrElse {
next.startCall(call, headers)
}
}
private def readBearerToken(headers: Metadata): Option[String] = {
val authorizationHeaderKey = Metadata.Key.of("Authorization", Metadata.ASCII_STRING_MARSHALLER)
try {
Iterables
.toArray(headers.getAll(authorizationHeaderKey), classOf[String])
.find(header => header.startsWith("Bearer "))
.map(header => header.replaceFirst("Bearer ", ""))
} catch {
case _: Exception => Option.empty
}
}
}
object UserContextServerInterceptor {
val userContextKey: Context.Key[UserContext] = Context.key("user_context")
}
| vyshane/grpc-scala-microservice-kit | app/src/main/scala/grpc/UserContextServerInterceptor.scala | Scala | apache-2.0 | 1,547 |
package za.jwatson.glycanoweb.react
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra._
import japgolly.scalajs.react.MonocleReact._
import japgolly.scalajs.react.vdom.prefix_<^._
import za.jwatson.glycanoweb.convention.Convention.Palette
import monocle.Monocle._
import za.jwatson.glycanoweb.react.GlycanoApp.{AppState, Mode}
import za.jwatson.glycanoweb.react.bootstrap.RadioGroupMap
import za.jwatson.glycanoweb.render.{SubstituentShape, DisplayConv}
import za.jwatson.glycanoweb.structure._
import org.scalajs.dom
import scalaz.effect.IO
object ResiduePanel {
case class Props(rvMode: ReusableVar[Mode], rvPlaceAnomer: ReusableVar[Anomer], rvPlaceAbsolute: ReusableVar[Absolute],
displayConv: DisplayConv, scaleSubstituents: Double, conventions: Map[String, DisplayConv])
implicit val reuseDouble: Reusability[Double] = Reusability.by_==
implicit val reuseConventions: Reusability[Map[String, DisplayConv]] = Reusability.by_==
implicit val reuseState: Reusability[Map[DisplayConv, Palette]] = Reusability.by_==
implicit val reuseProps: Reusability[Props] = Reusability.caseClass[Props]
class Backend($: BackendScope[Props, Map[DisplayConv, Palette]]) {
def clickResidue(rt: ResidueType, subs: Map[Int, Vector[SubstituentType]]) = for {
p <- $.props
_ <- p.rvMode.mod {
case Mode.PlaceResidue(r) if r.rt == rt && r.subs == subs => Mode.Select
case _ => Mode.PlaceResidue(Residue(
p.rvPlaceAnomer.value,
p.rvPlaceAbsolute.value,
rt, subs
))
}
} yield ()
val setAnoFn: Option[Anomer] ~=> Callback = ReusableFn(a => for {
v <- CallbackOption.liftOption(a)
p <- $.props
_ <- p.rvPlaceAnomer.set(v)
} yield ())
val setAbsFn: Option[Absolute] ~=> Callback = ReusableFn(a => for {
v <- CallbackOption.liftOption(a)
p <- $.props
_ <- p.rvPlaceAbsolute.set(v)
} yield ())
val getNameAnoFn: Anomer ~=> String = ReusableFn(_.symbol)
val getNameAbsFn: Absolute ~=> String = ReusableFn(_.symbol)
def render(p: Props, state: Map[DisplayConv, Palette]) = {
val residueTabs = <.ul(c"nav nav-tabs", ^.role := "tablist", ^.marginBottom := 5.px)(
for (pal <- p.displayConv.conv.palettes :+ Palette.Repeat) yield {
val f = index[Map[DisplayConv, Palette], DisplayConv, Palette](p.displayConv).set(pal)
<.li(
<.a(
^.href := "#",
^.onClick --> $.modState(f),
^.role := "tab",
"data-toggle".reactAttr := "tab",
^.padding := "4px 7px"
)(pal.name),
state.get(p.displayConv).contains(pal) ?= c"active"
)
}
)
val rvAno = setAnoFn.asVar(Some(p.rvPlaceAnomer.value))
val rvAbs = setAbsFn.asVar(Some(p.rvPlaceAbsolute.value))
val residueConfig = div"btn-toolbar"(^.role := "toolbar", ^.display.`inline-block`)(
RadioAnomer(RadioGroupMap.Props[Anomer](rvAno, Anomer.Anomers, getNameAnoFn, toggle = false)),
RadioAbsolute(RadioGroupMap.Props[Absolute](rvAbs, Absolute.Absolutes, getNameAbsFn, toggle = false))
)
val residuePages = div"btn-group"("data-toggle".reactAttr := "buttons")(
div"tab-content"(
div"tab-pane active"(^.role := "tabpanel")(
for ((rt, subs) <- state(p.displayConv).residues) yield {
val res = Residue(p.rvPlaceAnomer.value, p.rvPlaceAbsolute.value, rt, subs)
val ((x, y), w, h) = p.displayConv.bounds(res)
val scale = 0.4
val (residue, handle) = p.displayConv.shapes(res)
val residueLinks = p.displayConv.links(res)
val substituents = for ((i, sts) <- subs.toSeq) yield {
val (x1, y1) = residueLinks(i - 1)
<.svg.g(^.svg.transform := s"translate($x1, $y1) scale(${p.scaleSubstituents})")(
SVGSubstituentStack.C.withKey(i)(sts)
)
}
val selected = p.rvMode.value match {
case Mode.PlaceResidue(r) if r.rt == rt && r.subs == subs => true
case _ => false
}
<.span(
<.button(^.cls := s"btn btn-default", selected ?= c"active", ^.title := res.desc, ^.padding := 2.px, ^.onClick --> clickResidue(rt, subs))(
<.svg.svg(
^.svg.width := (w + 20) * scale,
^.svg.height := (h + 20) * scale
)(
<.svg.g(^.svg.transform := s"scale($scale) translate(${10 - x} ${10 - y})")(
<.svg.g(residue, handle, p.displayConv.name == "UCT" ?= substituents)
)
)
)
)
}
)
)
)
div"panel panel-default"(
div"panel-heading"("Residues"),
div"panel-body text-center"(
div"row"(div"col-xs-12"(residueTabs)),
div"row"(div"col-xs-12"(residueConfig)),
div"row"(div"col-xs-12"(residuePages))
)
)
}
}
val RadioAnomer = RadioGroupMap[Anomer]
val RadioAbsolute = RadioGroupMap[Absolute]
val reuseAppState = Reusability.by((s: AppState) => (s.placeAnomer, s.placeAbsolute, s.mode, s.displayConv, s.scaleSubstituents))
val C = ReactComponentB[Props]("ResiduePanel")
.initialState_P[Map[DisplayConv, Palette]] { props =>
props.conventions.map {
case (_, dc) => dc -> dc.conv.palettes.head
}
}
.renderBackend[Backend]
.domType[dom.html.Div]
.configure(Reusability.shouldComponentUpdate)
.build
}
| james-za/glycano | core/src/main/scala/za/jwatson/glycanoweb/react/ResiduePanel.scala | Scala | mit | 5,689 |
package controllers
import models.dao.{ User, UserDAO }
import play.api.mvc._
trait SecuredController extends BaseController {
val userDAO: UserDAO
private def username(request: RequestHeader): Option[String] =
request.session.get("email")
private def onUnauthorized(request: RequestHeader): Result =
Results.Redirect(routes.Application.login())
def IsAuthenticated(f: => String => Request[AnyContent] => Result): EssentialAction =
Security.Authenticated(username, onUnauthorized) { user =>
Action(request => f(user)(request))
}
def IsAuthenticatedMultipart(
f: => String => Request[play.api.mvc.MultipartFormData[play.api.libs.Files.TemporaryFile]] => Result
): EssentialAction =
Security.Authenticated(username, onUnauthorized) { user =>
Action(parse.multipartFormData)(request => f(user)(request))
}
@SuppressWarnings(Array("org.wartremover.warts.ImplicitConversion"))
implicit def emailToUser(email: String): Option[User] =
userDAO.findByEmail(email)
}
| jcranky/lojinha | app/controllers/SecuredController.scala | Scala | gpl-3.0 | 1,024 |
package org.apache.mesos.chronos.scheduler.jobs
import org.apache.mesos.chronos.scheduler.config.SchedulerConfiguration
import org.apache.mesos.chronos.scheduler.graph.JobGraph
import org.apache.mesos.chronos.scheduler.mesos.MesosOfferReviver
import org.apache.mesos.chronos.scheduler.state.PersistenceStore
import com.codahale.metrics.MetricRegistry
import com.google.common.util.concurrent.ListeningScheduledExecutorService
import org.joda.time._
import org.rogach.scallop.ScallopConf
import org.specs2.mock._
import org.specs2.mutable._
class TaskManagerSpec extends SpecificationWithJUnit with Mockito {
private[this] def makeConfig(args: String*): SchedulerConfiguration = {
val opts = new ScallopConf(args) with SchedulerConfiguration {
// scallop will trigger sys exit
override protected def onError(e: Throwable): Unit = throw e
}
opts.afterInit()
opts
}
"TaskManager" should {
"Calculate the correct time delay between scheduling and dispatching the job" in {
val taskManager = new TaskManager(mock[ListeningScheduledExecutorService], mock[PersistenceStore],
mock[JobGraph], null, MockJobUtils.mockFullObserver, mock[MetricRegistry], makeConfig(),
mock[MesosOfferReviver])
val millis = taskManager.getMillisUntilExecution(new DateTime(DateTimeZone.UTC).plus(Hours.ONE))
val expectedSeconds = scala.math.round(Period.hours(1).toStandardDuration.getMillis / 1000d)
//Due to startup time / JVM overhead, millis wouldn't be totally accurate.
val actualSeconds = scala.math.round(millis / 1000d)
actualSeconds must_== expectedSeconds
}
"Handle None job option in getTask" in {
val mockJobGraph = mock[JobGraph]
val mockPersistencStore: PersistenceStore = mock[PersistenceStore]
val taskManager = new TaskManager(mock[ListeningScheduledExecutorService], mockPersistencStore,
mockJobGraph, null, MockJobUtils.mockFullObserver, mock[MetricRegistry], makeConfig(),
mock[MesosOfferReviver])
val job = new ScheduleBasedJob("R/2012-01-01T00:00:01.000Z/PT1M", "test", "sample-command")
mockJobGraph.lookupVertex("test").returns(Some(job)) // so we can enqueue a job.
taskManager.enqueue("ct:1420843781398:0:test:", highPriority = true)
mockJobGraph.getJobForName("test").returns(None)
taskManager.getTask must_== None
there was one(mockPersistencStore).removeTask("ct:1420843781398:0:test:")
}
"Revive offers when adding a new task and --revive_offers_for_new_jobs is set" in {
val mockJobGraph = mock[JobGraph]
val mockPersistencStore: PersistenceStore = mock[PersistenceStore]
val mockMesosOfferReviver = mock[MesosOfferReviver]
val config = makeConfig("--revive_offers_for_new_jobs")
val taskManager = new TaskManager(mock[ListeningScheduledExecutorService], mockPersistencStore,
mockJobGraph, null, MockJobUtils.mockFullObserver, mock[MetricRegistry], config, mockMesosOfferReviver)
val job = new ScheduleBasedJob("R/2012-01-01T00:00:01.000Z/PT1M", "test", "sample-command")
mockJobGraph.lookupVertex("test").returns(Some(job)) // so we can enqueue a job.
taskManager.enqueue("ct:1420843781398:0:test:", highPriority = true)
there was one(mockMesosOfferReviver).reviveOffers
}
"Don't revive offers when adding a new task and --revive_offers_for_new_jobs is not set" in {
val mockJobGraph = mock[JobGraph]
val mockPersistencStore: PersistenceStore = mock[PersistenceStore]
val mockMesosOfferReviver = mock[MesosOfferReviver]
val config = makeConfig()
val taskManager = new TaskManager(mock[ListeningScheduledExecutorService], mockPersistencStore,
mockJobGraph, null, MockJobUtils.mockFullObserver, mock[MetricRegistry], config, mockMesosOfferReviver)
val job = new ScheduleBasedJob("R/2012-01-01T00:00:01.000Z/PT1M", "test", "sample-command")
mockJobGraph.lookupVertex("test").returns(Some(job)) // so we can enqueue a job.
taskManager.enqueue("ct:1420843781398:0:test:", highPriority = true)
there were noCallsTo(mockMesosOfferReviver)
}
}
}
| kapil-malik/chronos | src/test/scala/org/apache/mesos/chronos/scheduler/jobs/TaskManagerSpec.scala | Scala | apache-2.0 | 4,177 |
package org.nlogo.extensions.webview
import org.nlogo.nvm._
class NotifyingConcurrentJob(
delegate: Job,
val onFinished: (Option[AnyRef]) => Unit,
val onError: Exception => Unit)
extends ConcurrentJob(delegate.owner, delegate.agentset, delegate.topLevelProcedure, 0, delegate.parentContext, delegate.random) {
var hasFinished = false
var error = Option.empty[Exception]
override def step() = {
try {
super.step()
} catch {
case e: Exception =>
error = Some(e)
throw e
} finally {
if (hasFinished && error.nonEmpty)
error.foreach(onError)
else if (hasFinished)
onFinished(Option(result))
}
}
override def finish() = {
super.finish()
hasFinished = true
}
}
| NetLogo/webview | src/main/scala/NotifyingConcurrentJob.scala | Scala | cc0-1.0 | 762 |
package dotty.tools.dotc
package sbt
import core._
import Annotations._
import Contexts._
import Decorators._
import Denotations._
import Flags._
import Phases._
import Types._
import Symbols._
import NameOps._
import xsbti.api
import xsbti.api.SafeLazy.strict
/** Utilities to deal with xsbti.api.
*
* Mostly comes from https://github.com/sbt/zinc/blob/c46643f3e68d7d4f270bf318e3f150f5a59c0aab/internal/zinc-apiinfo/src/main/scala/xsbt/api/APIUtil.scala
*/
object APIUtils {
private object Constants {
val PublicAccess = api.Public.create()
val EmptyModifiers = new api.Modifiers(false, false, false, false, false, false, false, false)
val EmptyStructure = api.Structure.of(strict(Array.empty), strict(Array.empty), strict(Array.empty))
val EmptyType = api.EmptyType.of()
}
import Constants._
/** Registers a dummy class for sbt's incremental compilation.
*
* If a compiler phase creates a new named (module) class/trait after the phase
* `ExtractAPI`, it must register that class for sbt's incremental compilation
* on its own, lest crashes happen. In theory, the full API of the class needs
* to be constructed, but if the class is never accessed by Scala source code,
* a dummy empty class can be registered instead, using this method.
*/
def registerDummyClass(classSym: ClassSymbol)(implicit ctx: Context): Unit = {
if (ctx.sbtCallback != null) {
val classLike = emptyClassLike(classSym)
ctx.sbtCallback.api(ctx.compilationUnit.source.file.file, classLike)
}
}
// See APIUtils.emptyClassLike
private def emptyClassLike(classSym: ClassSymbol)(implicit ctx: Context): api.ClassLike = {
val name = classSym.fullName.stripModuleClassSuffix.toString
val definitionType =
if (classSym.is(Trait)) api.DefinitionType.Trait
else if (classSym.is(Module)) api.DefinitionType.Module
else api.DefinitionType.ClassDef
val topLevel = classSym.isTopLevelClass
api.ClassLike.of(name, PublicAccess, EmptyModifiers, Array.empty, definitionType,
strict(EmptyType), strict(EmptyStructure), Array.empty, Array.empty, topLevel, Array.empty)
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/sbt/APIUtils.scala | Scala | apache-2.0 | 2,158 |
package org.oxygen.redio.gui
import io.netty.buffer.Unpooled
import net.minecraft.client.gui.{GuiButton, GuiTextField}
import net.minecraft.client.resources.I18n
import net.minecraft.network.PacketBuffer
import net.minecraft.network.play.client.C17PacketCustomPayload
import net.minecraft.tileentity.TileEntity
import net.minecraftforge.fml.relauncher.{Side, SideOnly}
import org.lwjgl.input.Keyboard
import org.oxygen.redio.common.Constants
import org.oxygen.redio.gui.containers.ContainerSetName
@SideOnly(Side.CLIENT)
class GuiSetName(val tileEntity: TileEntity) extends GuiBase(new ContainerSetName(tileEntity))
{
private var buttonOK: GuiButton = null
private var textName: GuiTextField = null
override def initGui() =
{
super.initGui()
Keyboard.enableRepeatEvents(true)
textName = new GuiTextField(Constants.Gui.SetName.TEXT_NAME, fontRendererObj, guiLeft, guiTop + 30, xSize, 20)
buttonOK = new GuiButton(Constants.Gui.SetName.BTN_OK, (width - 50) / 2, guiTop + 70, 50, 20, I18n.format("gui.done"))
addButton(buttonOK)
buttonOK.enabled = false
textName.setFocused(true)
textName.setMaxStringLength(16)
}
override def keyTyped(typedChar: Char, keyCode: Int) =
{
textName.textboxKeyTyped(typedChar, keyCode)
buttonOK.enabled = !textName.getText.trim.isEmpty
if (keyCode == Keyboard.KEY_RETURN ||
keyCode == Keyboard.KEY_NUMPADENTER)
actionPerformed(buttonOK)
}
override def onGuiClosed() =
{
val buffer = new PacketBuffer(Unpooled.buffer())
super.onGuiClosed()
Keyboard.enableRepeatEvents(false)
buffer.writeString(textName.getText.trim)
buffer.writeBlockPos(tileEntity.getPos)
mc.getNetHandler.addToSendQueue(new C17PacketCustomPayload(Constants.Gui.SetName.NAME, buffer))
}
override def mouseClicked(mouseX: Int, mouseY: Int, button: Int) =
{
super.mouseClicked(mouseX, mouseY, button)
textName.mouseClicked(mouseX, mouseY, button)
}
override def drawComponents(mouseX: Int, mouseY: Int, ticks: Float) =
{
textName.drawTextBox()
super.drawComponents(mouseX, mouseY, ticks)
drawCenteredString(fontRendererObj, I18n.format("gui.setname"), width / 2, guiTop + 10, 0xffffffff)
}
override def actionPerformed(button: GuiButton) = if (button.enabled) button.id match
{
case Constants.Gui.SetName.BTN_OK => mc.thePlayer.closeScreen()
case _ =>
}
}
| chenzhuoyu/RedIO | src/main/scala/org/oxygen/redio/gui/GuiSetName.scala | Scala | lgpl-2.1 | 2,334 |
/*
* (c) Copyright 2019 EntIT Software LLC, a Micro Focus company, L.P.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0 which accompany this distribution.
*
* The Apache License is available at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.cloudslang.content.google.actions.compute.compute_engine.networks
import java.util
import com.google.api.services.compute.model.Network
import com.hp.oo.sdk.content.annotations.{Action, Output, Param, Response}
import com.hp.oo.sdk.content.plugin.ActionMetadata.{MatchType, ResponseType}
import io.cloudslang.content.constants.BooleanValues.TRUE
import io.cloudslang.content.constants.OutputNames.{EXCEPTION, RETURN_CODE, RETURN_RESULT}
import io.cloudslang.content.constants.{ResponseNames, ReturnCodes}
import io.cloudslang.content.google.services.compute.compute_engine.networks.{NetworkController, NetworkService}
import io.cloudslang.content.google.utils.Constants.{NEW_LINE, TIMEOUT_EXCEPTION}
import io.cloudslang.content.google.utils.action.DefaultValues._
import io.cloudslang.content.google.utils.action.GoogleOutputNames.{NETWORK_ID, NETWORK_NAME, STATUS}
import io.cloudslang.content.google.utils.action.InputNames._
import io.cloudslang.content.google.utils.action.InputUtils.{convertSecondsToMilli, verifyEmpty}
import io.cloudslang.content.google.utils.action.InputValidator.{validateBoolean, validateNonNegativeDouble, validateNonNegativeLong, validateProxyPort}
import io.cloudslang.content.google.utils.action.OutputUtils.toPretty
import io.cloudslang.content.google.utils.service.{GoogleAuth, HttpTransportUtils, JsonFactoryUtils}
import io.cloudslang.content.google.utils.{ErrorOperation, OperationStatus, SuccessOperation}
import io.cloudslang.content.utils.BooleanUtilities.toBoolean
import io.cloudslang.content.utils.NumberUtilities.{toDouble, toInteger, toLong}
import io.cloudslang.content.utils.OutputUtilities.{getFailureResultsMap, getSuccessResultsMap}
import org.apache.commons.lang3.StringUtils.{EMPTY, defaultIfEmpty}
import scala.collection.JavaConversions._
import scala.concurrent.TimeoutException
/**
* Created by victor on 26.04.2017.
*/
class NetworksInsert {
/**
* Creates a disk resource in the specified project using the data included as inputs.
*
* @param projectId Name of the Google Cloud project.
* @param accessToken The access token from GetAccessToken.
* @param networkName Name of the Network. Provided by the client when the Network is created. The name must be
* 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters
* long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first
* character must be a lowercase letter, and all following characters must be a dash, lowercase
* letter, or digit, except the last character, which cannot be a dash.
* @param networkDescriptionInp Optional - The description of the new Network
* @param ipV4RangeInp Optional - The range of internal addresses that are legal on this network. This range is a CIDR
* specification, for example: 192.168.0.0/16. Provided by the client when the network is created.
* @param autoCreateSubnetworksInp Optional - When set to true, the network is created in "auto subnet mode". When set to false, the network
* is in "custom subnet mode".
* In "auto subnet mode", a newly created network is assigned the default CIDR of 10.128.0.0/9 and
* it automatically creates one subnetwork per region.
* Note: If <ipV4RangeInp> is set, then this input is ignored
* @param asyncInp Optional - Boolean specifying whether the operation to run sync or async.
* Valid values: "true", "false"
* Default: "true"
* @param timeoutInp Optional - The time, in seconds, to wait for a response if the async input is set to "false".
* If the value is 0, the operation will wait until zone operation progress is 100.
* Valid values: Any positive number including 0.
* Default: "30"
* @param pollingIntervalInp Optional - The time, in seconds, to wait before a new request that verifies if the operation finished
* is executed, if the async input is set to "false".
* Valid values: Any positive number including 0.
* Default: "1"
* @param proxyHost Optional - Proxy server used to connect to Google Cloud API. If empty no proxy will
* be used.
* @param proxyPortInp Optional - Proxy server port.
* Default: "8080"
* @param proxyUsername Optional - Proxy server user name.
* @param proxyPasswordInp Optional - Proxy server password associated with the proxyUsername input value.
* @param prettyPrintInp Optional - Whether to format (pretty print) the resulting json.
* Valid values: "true", "false"
* Default: "true"
* @return A map containing a GlobalOperation resource as returnResult, it's name as globalOperationName and the
* status of the operation. If <asyncInp> is set to false the map will also contain the name of the network
* and the network id.
* In case an exception occurs the failure message is provided.
*/
@Action(name = "Insert Network",
outputs = Array(
new Output(RETURN_CODE),
new Output(RETURN_RESULT),
new Output(EXCEPTION),
new Output(GLOBAL_OPERATION_NAME),
new Output(NETWORK_NAME),
new Output(NETWORK_ID),
new Output(STATUS)
),
responses = Array(
new Response(text = ResponseNames.SUCCESS, field = RETURN_CODE, value = ReturnCodes.SUCCESS, matchType = MatchType.COMPARE_EQUAL, responseType = ResponseType.RESOLVED),
new Response(text = ResponseNames.FAILURE, field = RETURN_CODE, value = ReturnCodes.FAILURE, matchType = MatchType.COMPARE_EQUAL, responseType = ResponseType.ERROR, isOnFail = true)
)
)
def execute(@Param(value = PROJECT_ID, required = true) projectId: String,
@Param(value = ACCESS_TOKEN, required = true, encrypted = true) accessToken: String,
@Param(value = NETWORK_NAME, required = true) networkName: String,
@Param(value = NETWORK_DESCRIPTION) networkDescriptionInp: String,
@Param(value = AUTO_CREATE_SUBNETWORKS) autoCreateSubnetworksInp: String,
@Param(value = IPV4_RANGE) ipV4RangeInp: String,
@Param(value = ASYNC) asyncInp: String,
@Param(value = TIMEOUT) timeoutInp: String,
@Param(value = POLLING_INTERVAL) pollingIntervalInp: String,
@Param(value = PROXY_HOST) proxyHost: String,
@Param(value = PROXY_PORT) proxyPortInp: String,
@Param(value = PROXY_USERNAME) proxyUsername: String,
@Param(value = PROXY_PASSWORD, encrypted = true) proxyPasswordInp: String,
@Param(value = PRETTY_PRINT) prettyPrintInp: String): util.Map[String, String] = {
val proxyHostOpt = verifyEmpty(proxyHost)
val proxyUsernameOpt = verifyEmpty(proxyUsername)
val networkDescription = defaultIfEmpty(networkDescriptionInp, EMPTY)
val autoCreateSubnetworksStr = defaultIfEmpty(autoCreateSubnetworksInp, DEFAULT_AUTO_CREATE_SUBNETWORKS)
val ipV4Range = verifyEmpty(ipV4RangeInp)
val proxyPortStr = defaultIfEmpty(proxyPortInp, DEFAULT_PROXY_PORT)
val proxyPassword = defaultIfEmpty(proxyPasswordInp, EMPTY)
val prettyPrintStr = defaultIfEmpty(prettyPrintInp, DEFAULT_PRETTY_PRINT)
val asyncStr = defaultIfEmpty(asyncInp, TRUE)
val timeoutStr = defaultIfEmpty(timeoutInp, DEFAULT_SYNC_TIMEOUT)
val pollingIntervalStr = defaultIfEmpty(pollingIntervalInp, DEFAULT_POLLING_INTERVAL)
val validationStream = validateProxyPort(proxyPortStr) ++
validateBoolean(prettyPrintStr, PRETTY_PRINT) ++
validateBoolean(autoCreateSubnetworksStr, AUTO_CREATE_SUBNETWORKS) ++
validateBoolean(asyncStr, ASYNC) ++
validateNonNegativeLong(timeoutStr, TIMEOUT) ++
validateNonNegativeDouble(pollingIntervalStr, POLLING_INTERVAL)
if (validationStream.nonEmpty) {
return getFailureResultsMap(validationStream.mkString(NEW_LINE))
}
val proxyPort = toInteger(proxyPortStr)
val prettyPrint = toBoolean(prettyPrintStr)
val autoCreateSubnetworks = toBoolean(autoCreateSubnetworksStr)
val async = toBoolean(asyncStr)
val timeout = toLong(timeoutStr)
val pollingIntervalMilli = convertSecondsToMilli(toDouble(pollingIntervalStr))
try {
val httpTransport = HttpTransportUtils.getNetHttpTransport(proxyHostOpt, proxyPort, proxyUsernameOpt, proxyPassword)
val jsonFactory = JsonFactoryUtils.getDefaultJacksonFactory
val credential = GoogleAuth.fromAccessToken(accessToken)
val computeNetwork: Network = NetworkController.createNetwork(
networkName = networkName,
networkDescription = networkDescription,
autoCreateSubnetworks = autoCreateSubnetworks,
ipV4Range = ipV4Range)
OperationStatus(NetworkService.insert(httpTransport, jsonFactory, credential, projectId, computeNetwork, async, timeout,
pollingIntervalMilli)) match {
case SuccessOperation(operation) =>
val status = defaultIfEmpty(operation.getStatus, EMPTY)
val resultMap = getSuccessResultsMap(toPretty(prettyPrint, operation)) +
(GLOBAL_OPERATION_NAME -> operation.getName) +
(STATUS -> status)
if (async) {
resultMap
} else {
val network = NetworkService.get(httpTransport, jsonFactory, credential, projectId, networkName)
val name = defaultIfEmpty(network.getName, EMPTY)
val networkId = Option(network.getId).getOrElse(BigInt(0)).toString
resultMap +
(NETWORK_NAME -> name) +
(NETWORK_ID -> networkId)
}
case ErrorOperation(error) => getFailureResultsMap(error)
}
} catch {
case t: TimeoutException => getFailureResultsMap(TIMEOUT_EXCEPTION, t)
case e: Throwable => getFailureResultsMap(e)
}
}
}
| CloudSlang/cs-actions | cs-google/src/main/scala/io/cloudslang/content/google/actions/compute/compute_engine/networks/NetworksInsert.scala | Scala | apache-2.0 | 11,273 |
package com.seanshubin.learn.datomic.domain
class AddTaskServiceImpl(todoDatabaseService: TaskDatabaseService) extends AddTaskService {
override def addTodo(name: String): Long = ???
override def clearDone(): Unit = ???
override def setDone(id: Long, done: Boolean): Unit = ???
override def list(): Seq[Todo] = ???
}
| SeanShubin/learn-datomic | domain/src/main/scala/com/seanshubin/learn/datomic/domain/AddTaskServiceImpl.scala | Scala | unlicense | 329 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io._
import java.lang.management.{LockInfo, ManagementFactory, MonitorInfo, ThreadInfo}
import java.lang.reflect.InvocationTargetException
import java.math.{MathContext, RoundingMode}
import java.net._
import java.nio.ByteBuffer
import java.nio.channels.{Channels, FileChannel}
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Paths}
import java.util.{Locale, Properties, Random, UUID}
import java.util.concurrent._
import java.util.concurrent.atomic.AtomicBoolean
import java.util.zip.GZIPInputStream
import javax.net.ssl.HttpsURLConnection
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import scala.reflect.ClassTag
import scala.util.{Failure, Success, Try}
import scala.util.control.{ControlThrowable, NonFatal}
import scala.util.matching.Regex
import _root_.io.netty.channel.unix.Errors.NativeIoException
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import com.google.common.io.{ByteStreams, Files => GFiles}
import com.google.common.net.InetAddresses
import org.apache.commons.lang3.SystemUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.log4j.PropertyConfigurator
import org.eclipse.jetty.util.MultiException
import org.json4s._
import org.slf4j.Logger
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, SerializerInstance}
/** CallSite represents a place in user code. It can have a short and a long form. */
private[spark] case class CallSite(shortForm: String, longForm: String)
private[spark] object CallSite {
val SHORT_FORM = "callSite.short"
val LONG_FORM = "callSite.long"
val empty = CallSite("", "")
}
/**
* Various utility methods used by Spark.
*/
private[spark] object Utils extends Logging {
val random = new Random()
private val sparkUncaughtExceptionHandler = new SparkUncaughtExceptionHandler
/**
* Define a default value for driver memory here since this value is referenced across the code
* base and nearly all files already use Utils.scala
*/
val DEFAULT_DRIVER_MEM_MB = JavaUtils.DEFAULT_DRIVER_MEM_MB.toInt
private val MAX_DIR_CREATION_ATTEMPTS: Int = 10
@volatile private var localRootDirs: Array[String] = null
/**
* The performance overhead of creating and logging strings for wide schemas can be large. To
* limit the impact, we bound the number of fields to include by default. This can be overridden
* by setting the 'spark.debug.maxToStringFields' conf in SparkEnv.
*/
val DEFAULT_MAX_TO_STRING_FIELDS = 25
private def maxNumToStringFields = {
if (SparkEnv.get != null) {
SparkEnv.get.conf.getInt("spark.debug.maxToStringFields", DEFAULT_MAX_TO_STRING_FIELDS)
} else {
DEFAULT_MAX_TO_STRING_FIELDS
}
}
/** Whether we have warned about plan string truncation yet. */
private val truncationWarningPrinted = new AtomicBoolean(false)
/**
* Format a sequence with semantics similar to calling .mkString(). Any elements beyond
* maxNumToStringFields will be dropped and replaced by a "... N more fields" placeholder.
*
* @return the trimmed and formatted string.
*/
def truncatedString[T](
seq: Seq[T],
start: String,
sep: String,
end: String,
maxNumFields: Int = maxNumToStringFields): String = {
if (seq.length > maxNumFields) {
if (truncationWarningPrinted.compareAndSet(false, true)) {
logWarning(
"Truncated the string representation of a plan since it was too large. This " +
"behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.")
}
val numFields = math.max(0, maxNumFields - 1)
seq.take(numFields).mkString(
start, sep, sep + "... " + (seq.length - numFields) + " more fields" + end)
} else {
seq.mkString(start, sep, end)
}
}
/** Shorthand for calling truncatedString() without start or end strings. */
def truncatedString[T](seq: Seq[T], sep: String): String = truncatedString(seq, "", sep, "")
/** Serialize an object using Java serialization */
def serialize[T](o: T): Array[Byte] = {
val bos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(bos)
oos.writeObject(o)
oos.close()
bos.toByteArray
}
/** Deserialize an object using Java serialization */
def deserialize[T](bytes: Array[Byte]): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis)
ois.readObject.asInstanceOf[T]
}
/** Deserialize an object using Java serialization and the given ClassLoader */
def deserialize[T](bytes: Array[Byte], loader: ClassLoader): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis) {
override def resolveClass(desc: ObjectStreamClass): Class[_] = {
// scalastyle:off classforname
Class.forName(desc.getName, false, loader)
// scalastyle:on classforname
}
}
ois.readObject.asInstanceOf[T]
}
/** Deserialize a Long value (used for [[org.apache.spark.api.python.PythonPartitioner]]) */
def deserializeLongValue(bytes: Array[Byte]) : Long = {
// Note: we assume that we are given a Long value encoded in network (big-endian) byte order
var result = bytes(7) & 0xFFL
result = result + ((bytes(6) & 0xFFL) << 8)
result = result + ((bytes(5) & 0xFFL) << 16)
result = result + ((bytes(4) & 0xFFL) << 24)
result = result + ((bytes(3) & 0xFFL) << 32)
result = result + ((bytes(2) & 0xFFL) << 40)
result = result + ((bytes(1) & 0xFFL) << 48)
result + ((bytes(0) & 0xFFL) << 56)
}
/** Serialize via nested stream using specific serializer */
def serializeViaNestedStream(os: OutputStream, ser: SerializerInstance)(
f: SerializationStream => Unit): Unit = {
val osWrapper = ser.serializeStream(new OutputStream {
override def write(b: Int): Unit = os.write(b)
override def write(b: Array[Byte], off: Int, len: Int): Unit = os.write(b, off, len)
})
try {
f(osWrapper)
} finally {
osWrapper.close()
}
}
/** Deserialize via nested stream using specific serializer */
def deserializeViaNestedStream(is: InputStream, ser: SerializerInstance)(
f: DeserializationStream => Unit): Unit = {
val isWrapper = ser.deserializeStream(new InputStream {
override def read(): Int = is.read()
override def read(b: Array[Byte], off: Int, len: Int): Int = is.read(b, off, len)
})
try {
f(isWrapper)
} finally {
isWrapper.close()
}
}
/**
* Get the ClassLoader which loaded Spark.
*/
def getSparkClassLoader: ClassLoader = getClass.getClassLoader
/**
* Get the Context ClassLoader on this thread or, if not present, the ClassLoader that
* loaded Spark.
*
* This should be used whenever passing a ClassLoader to Class.ForName or finding the currently
* active loader when setting up ClassLoader delegation chains.
*/
def getContextOrSparkClassLoader: ClassLoader =
Option(Thread.currentThread().getContextClassLoader).getOrElse(getSparkClassLoader)
/** Determines whether the provided class is loadable in the current thread. */
def classIsLoadable(clazz: String): Boolean = {
// scalastyle:off classforname
Try { Class.forName(clazz, false, getContextOrSparkClassLoader) }.isSuccess
// scalastyle:on classforname
}
// scalastyle:off classforname
/** Preferred alternative to Class.forName(className) */
def classForName(className: String): Class[_] = {
Class.forName(className, true, getContextOrSparkClassLoader)
// scalastyle:on classforname
}
/**
* Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.DataOutput]]
*/
def writeByteBuffer(bb: ByteBuffer, out: DataOutput): Unit = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val originalPosition = bb.position()
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
bb.position(originalPosition)
}
}
/**
* Primitive often used when writing [[java.nio.ByteBuffer]] to [[java.io.OutputStream]]
*/
def writeByteBuffer(bb: ByteBuffer, out: OutputStream): Unit = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val originalPosition = bb.position()
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
bb.position(originalPosition)
}
}
/**
* JDK equivalent of `chmod 700 file`.
*
* @param file the file whose permissions will be modified
* @return true if the permissions were successfully changed, false otherwise.
*/
def chmod700(file: File): Boolean = {
file.setReadable(false, false) &&
file.setReadable(true, true) &&
file.setWritable(false, false) &&
file.setWritable(true, true) &&
file.setExecutable(false, false) &&
file.setExecutable(true, true)
}
/**
* Create a directory inside the given parent directory. The directory is guaranteed to be
* newly created, and is not marked for automatic deletion.
*/
def createDirectory(root: String, namePrefix: String = "spark"): File = {
var attempts = 0
val maxAttempts = MAX_DIR_CREATION_ATTEMPTS
var dir: File = null
while (dir == null) {
attempts += 1
if (attempts > maxAttempts) {
throw new IOException("Failed to create a temp directory (under " + root + ") after " +
maxAttempts + " attempts!")
}
try {
dir = new File(root, namePrefix + "-" + UUID.randomUUID.toString)
if (dir.exists() || !dir.mkdirs()) {
dir = null
}
} catch { case e: SecurityException => dir = null; }
}
dir.getCanonicalFile
}
/**
* Create a temporary directory inside the given parent directory. The directory will be
* automatically deleted when the VM shuts down.
*/
def createTempDir(
root: String = System.getProperty("java.io.tmpdir"),
namePrefix: String = "spark"): File = {
val dir = createDirectory(root, namePrefix)
ShutdownHookManager.registerShutdownDeleteDir(dir)
dir
}
/**
* Copy all data from an InputStream to an OutputStream. NIO way of file stream to file stream
* copying is disabled by default unless explicitly set transferToEnabled as true,
* the parameter transferToEnabled should be configured by spark.file.transferTo = [true|false].
*/
def copyStream(
in: InputStream,
out: OutputStream,
closeStreams: Boolean = false,
transferToEnabled: Boolean = false): Long = {
tryWithSafeFinally {
if (in.isInstanceOf[FileInputStream] && out.isInstanceOf[FileOutputStream]
&& transferToEnabled) {
// When both streams are File stream, use transferTo to improve copy performance.
val inChannel = in.asInstanceOf[FileInputStream].getChannel()
val outChannel = out.asInstanceOf[FileOutputStream].getChannel()
val size = inChannel.size()
copyFileStreamNIO(inChannel, outChannel, 0, size)
size
} else {
var count = 0L
val buf = new Array[Byte](8192)
var n = 0
while (n != -1) {
n = in.read(buf)
if (n != -1) {
out.write(buf, 0, n)
count += n
}
}
count
}
} {
if (closeStreams) {
try {
in.close()
} finally {
out.close()
}
}
}
}
def copyFileStreamNIO(
input: FileChannel,
output: FileChannel,
startPosition: Long,
bytesToCopy: Long): Unit = {
val initialPos = output.position()
var count = 0L
// In case transferTo method transferred less data than we have required.
while (count < bytesToCopy) {
count += input.transferTo(count + startPosition, bytesToCopy - count, output)
}
assert(count == bytesToCopy,
s"request to copy $bytesToCopy bytes, but actually copied $count bytes.")
// Check the position after transferTo loop to see if it is in the right position and
// give user information if not.
// Position will not be increased to the expected length after calling transferTo in
// kernel version 2.6.32, this issue can be seen in
// https://bugs.openjdk.java.net/browse/JDK-7052359
// This will lead to stream corruption issue when using sort-based shuffle (SPARK-3948).
val finalPos = output.position()
val expectedPos = initialPos + bytesToCopy
assert(finalPos == expectedPos,
s"""
|Current position $finalPos do not equal to expected position $expectedPos
|after transferTo, please check your kernel version to see if it is 2.6.32,
|this is a kernel bug which will lead to unexpected behavior when using transferTo.
|You can set spark.file.transferTo = false to disable this NIO feature.
""".stripMargin)
}
/**
* Construct a URI container information used for authentication.
* This also sets the default authenticator to properly negotiation the
* user/password based on the URI.
*
* Note this relies on the Authenticator.setDefault being set properly to decode
* the user name and password. This is currently set in the SecurityManager.
*/
def constructURIForAuthentication(uri: URI, securityMgr: SecurityManager): URI = {
val userCred = securityMgr.getSecretKey()
if (userCred == null) throw new Exception("Secret key is null with authentication on")
val userInfo = securityMgr.getHttpUser() + ":" + userCred
new URI(uri.getScheme(), userInfo, uri.getHost(), uri.getPort(), uri.getPath(),
uri.getQuery(), uri.getFragment())
}
/**
* A file name may contain some invalid URI characters, such as " ". This method will convert the
* file name to a raw path accepted by `java.net.URI(String)`.
*
* Note: the file name must not contain "/" or "\\"
*/
def encodeFileNameToURIRawPath(fileName: String): String = {
require(!fileName.contains("/") && !fileName.contains("\\\\"))
// `file` and `localhost` are not used. Just to prevent URI from parsing `fileName` as
// scheme or host. The prefix "/" is required because URI doesn't accept a relative path.
// We should remove it after we get the raw path.
new URI("file", null, "localhost", -1, "/" + fileName, null, null).getRawPath.substring(1)
}
/**
* Get the file name from uri's raw path and decode it. If the raw path of uri ends with "/",
* return the name before the last "/".
*/
def decodeFileNameInURI(uri: URI): String = {
val rawPath = uri.getRawPath
val rawFileName = rawPath.split("/").last
new URI("file:///" + rawFileName).getPath.substring(1)
}
/**
* Download a file or directory to target directory. Supports fetching the file in a variety of
* ways, including HTTP, Hadoop-compatible filesystems, and files on a standard filesystem, based
* on the URL parameter. Fetching directories is only supported from Hadoop-compatible
* filesystems.
*
* If `useCache` is true, first attempts to fetch the file to a local cache that's shared
* across executors running the same application. `useCache` is used mainly for
* the executors, and not in local mode.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*/
def fetchFile(
url: String,
targetDir: File,
conf: SparkConf,
securityMgr: SecurityManager,
hadoopConf: Configuration,
timestamp: Long,
useCache: Boolean): File = {
val fileName = decodeFileNameInURI(new URI(url))
val targetFile = new File(targetDir, fileName)
val fetchCacheEnabled = conf.getBoolean("spark.files.useFetchCache", defaultValue = true)
if (useCache && fetchCacheEnabled) {
val cachedFileName = s"${url.hashCode}${timestamp}_cache"
val lockFileName = s"${url.hashCode}${timestamp}_lock"
val localDir = new File(getLocalDir(conf))
val lockFile = new File(localDir, lockFileName)
val lockFileChannel = new RandomAccessFile(lockFile, "rw").getChannel()
// Only one executor entry.
// The FileLock is only used to control synchronization for executors download file,
// it's always safe regardless of lock type (mandatory or advisory).
val lock = lockFileChannel.lock()
val cachedFile = new File(localDir, cachedFileName)
try {
if (!cachedFile.exists()) {
doFetchFile(url, localDir, cachedFileName, conf, securityMgr, hadoopConf)
}
} finally {
lock.release()
lockFileChannel.close()
}
copyFile(
url,
cachedFile,
targetFile,
conf.getBoolean("spark.files.overwrite", false)
)
} else {
doFetchFile(url, targetDir, fileName, conf, securityMgr, hadoopConf)
}
// Decompress the file if it's a .tar or .tar.gz
if (fileName.endsWith(".tar.gz") || fileName.endsWith(".tgz")) {
logInfo("Untarring " + fileName)
executeAndGetOutput(Seq("tar", "-xzf", fileName), targetDir)
} else if (fileName.endsWith(".tar")) {
logInfo("Untarring " + fileName)
executeAndGetOutput(Seq("tar", "-xf", fileName), targetDir)
}
// Make the file executable - That's necessary for scripts
FileUtil.chmod(targetFile.getAbsolutePath, "a+x")
// Windows does not grant read permission by default to non-admin users
// Add read permission to owner explicitly
if (isWindows) {
FileUtil.chmod(targetFile.getAbsolutePath, "u+r")
}
targetFile
}
/**
* Download `in` to `tempFile`, then move it to `destFile`.
*
* If `destFile` already exists:
* - no-op if its contents equal those of `sourceFile`,
* - throw an exception if `fileOverwrite` is false,
* - attempt to overwrite it otherwise.
*
* @param url URL that `sourceFile` originated from, for logging purposes.
* @param in InputStream to download.
* @param destFile File path to move `tempFile` to.
* @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
* `sourceFile`
*/
private def downloadFile(
url: String,
in: InputStream,
destFile: File,
fileOverwrite: Boolean): Unit = {
val tempFile = File.createTempFile("fetchFileTemp", null,
new File(destFile.getParentFile.getAbsolutePath))
logInfo(s"Fetching $url to $tempFile")
try {
val out = new FileOutputStream(tempFile)
Utils.copyStream(in, out, closeStreams = true)
copyFile(url, tempFile, destFile, fileOverwrite, removeSourceFile = true)
} finally {
// Catch-all for the couple of cases where for some reason we didn't move `tempFile` to
// `destFile`.
if (tempFile.exists()) {
tempFile.delete()
}
}
}
/**
* Copy `sourceFile` to `destFile`.
*
* If `destFile` already exists:
* - no-op if its contents equal those of `sourceFile`,
* - throw an exception if `fileOverwrite` is false,
* - attempt to overwrite it otherwise.
*
* @param url URL that `sourceFile` originated from, for logging purposes.
* @param sourceFile File path to copy/move from.
* @param destFile File path to copy/move to.
* @param fileOverwrite Whether to delete/overwrite an existing `destFile` that does not match
* `sourceFile`
* @param removeSourceFile Whether to remove `sourceFile` after / as part of moving/copying it to
* `destFile`.
*/
private def copyFile(
url: String,
sourceFile: File,
destFile: File,
fileOverwrite: Boolean,
removeSourceFile: Boolean = false): Unit = {
if (destFile.exists) {
if (!filesEqualRecursive(sourceFile, destFile)) {
if (fileOverwrite) {
logInfo(
s"File $destFile exists and does not match contents of $url, replacing it with $url"
)
if (!destFile.delete()) {
throw new SparkException(
"Failed to delete %s while attempting to overwrite it with %s".format(
destFile.getAbsolutePath,
sourceFile.getAbsolutePath
)
)
}
} else {
throw new SparkException(
s"File $destFile exists and does not match contents of $url")
}
} else {
// Do nothing if the file contents are the same, i.e. this file has been copied
// previously.
logInfo(
"%s has been previously copied to %s".format(
sourceFile.getAbsolutePath,
destFile.getAbsolutePath
)
)
return
}
}
// The file does not exist in the target directory. Copy or move it there.
if (removeSourceFile) {
Files.move(sourceFile.toPath, destFile.toPath)
} else {
logInfo(s"Copying ${sourceFile.getAbsolutePath} to ${destFile.getAbsolutePath}")
copyRecursive(sourceFile, destFile)
}
}
private def filesEqualRecursive(file1: File, file2: File): Boolean = {
if (file1.isDirectory && file2.isDirectory) {
val subfiles1 = file1.listFiles()
val subfiles2 = file2.listFiles()
if (subfiles1.size != subfiles2.size) {
return false
}
subfiles1.sortBy(_.getName).zip(subfiles2.sortBy(_.getName)).forall {
case (f1, f2) => filesEqualRecursive(f1, f2)
}
} else if (file1.isFile && file2.isFile) {
GFiles.equal(file1, file2)
} else {
false
}
}
private def copyRecursive(source: File, dest: File): Unit = {
if (source.isDirectory) {
if (!dest.mkdir()) {
throw new IOException(s"Failed to create directory ${dest.getPath}")
}
val subfiles = source.listFiles()
subfiles.foreach(f => copyRecursive(f, new File(dest, f.getName)))
} else {
Files.copy(source.toPath, dest.toPath)
}
}
/**
* Download a file or directory to target directory. Supports fetching the file in a variety of
* ways, including HTTP, Hadoop-compatible filesystems, and files on a standard filesystem, based
* on the URL parameter. Fetching directories is only supported from Hadoop-compatible
* filesystems.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*/
def doFetchFile(
url: String,
targetDir: File,
filename: String,
conf: SparkConf,
securityMgr: SecurityManager,
hadoopConf: Configuration): File = {
val targetFile = new File(targetDir, filename)
val uri = new URI(url)
val fileOverwrite = conf.getBoolean("spark.files.overwrite", defaultValue = false)
Option(uri.getScheme).getOrElse("file") match {
case "spark" =>
if (SparkEnv.get == null) {
throw new IllegalStateException(
"Cannot retrieve files with 'spark' scheme without an active SparkEnv.")
}
val source = SparkEnv.get.rpcEnv.openChannel(url)
val is = Channels.newInputStream(source)
downloadFile(url, is, targetFile, fileOverwrite)
case "http" | "https" | "ftp" =>
var uc: URLConnection = null
if (securityMgr.isAuthenticationEnabled()) {
logDebug("fetchFile with security enabled")
val newuri = constructURIForAuthentication(uri, securityMgr)
uc = newuri.toURL().openConnection()
uc.setAllowUserInteraction(false)
} else {
logDebug("fetchFile not using security")
uc = new URL(url).openConnection()
}
Utils.setupSecureURLConnection(uc, securityMgr)
val timeoutMs =
conf.getTimeAsSeconds("spark.files.fetchTimeout", "60s").toInt * 1000
uc.setConnectTimeout(timeoutMs)
uc.setReadTimeout(timeoutMs)
uc.connect()
val in = uc.getInputStream()
downloadFile(url, in, targetFile, fileOverwrite)
case "file" =>
// In the case of a local file, copy the local file to the target directory.
// Note the difference between uri vs url.
val sourceFile = if (uri.isAbsolute) new File(uri) else new File(url)
copyFile(url, sourceFile, targetFile, fileOverwrite)
case _ =>
val fs = getHadoopFileSystem(uri, hadoopConf)
val path = new Path(uri)
fetchHcfsFile(path, targetDir, fs, conf, hadoopConf, fileOverwrite,
filename = Some(filename))
}
targetFile
}
/**
* Fetch a file or directory from a Hadoop-compatible filesystem.
*
* Visible for testing
*/
private[spark] def fetchHcfsFile(
path: Path,
targetDir: File,
fs: FileSystem,
conf: SparkConf,
hadoopConf: Configuration,
fileOverwrite: Boolean,
filename: Option[String] = None): Unit = {
if (!targetDir.exists() && !targetDir.mkdir()) {
throw new IOException(s"Failed to create directory ${targetDir.getPath}")
}
val dest = new File(targetDir, filename.getOrElse(path.getName))
if (fs.isFile(path)) {
val in = fs.open(path)
try {
downloadFile(path.toString, in, dest, fileOverwrite)
} finally {
in.close()
}
} else {
fs.listStatus(path).foreach { fileStatus =>
fetchHcfsFile(fileStatus.getPath(), dest, fs, conf, hadoopConf, fileOverwrite)
}
}
}
/**
* Validate that a given URI is actually a valid URL as well.
* @param uri The URI to validate
*/
@throws[MalformedURLException]("when the URI is an invalid URL")
def validateURL(uri: URI): Unit = {
Option(uri.getScheme).getOrElse("file") match {
case "http" | "https" | "ftp" =>
try {
uri.toURL
} catch {
case e: MalformedURLException =>
val ex = new MalformedURLException(s"URI (${uri.toString}) is not a valid URL.")
ex.initCause(e)
throw ex
}
case _ => // will not be turned into a URL anyway
}
}
/**
* Get the path of a temporary directory. Spark's local directories can be configured through
* multiple settings, which are used with the following precedence:
*
* - If called from inside of a YARN container, this will return a directory chosen by YARN.
* - If the SPARK_LOCAL_DIRS environment variable is set, this will return a directory from it.
* - Otherwise, if the spark.local.dir is set, this will return a directory from it.
* - Otherwise, this will return java.io.tmpdir.
*
* Some of these configuration options might be lists of multiple paths, but this method will
* always return a single directory.
*/
def getLocalDir(conf: SparkConf): String = {
getOrCreateLocalRootDirs(conf).headOption.getOrElse {
val configuredLocalDirs = getConfiguredLocalDirs(conf)
throw new IOException(
s"Failed to get a temp directory under [${configuredLocalDirs.mkString(",")}].")
}
}
private[spark] def isRunningInYarnContainer(conf: SparkConf): Boolean = {
// These environment variables are set by YARN.
conf.getenv("CONTAINER_ID") != null
}
/**
* Gets or creates the directories listed in spark.local.dir or SPARK_LOCAL_DIRS,
* and returns only the directories that exist / could be created.
*
* If no directories could be created, this will return an empty list.
*
* This method will cache the local directories for the application when it's first invoked.
* So calling it multiple times with a different configuration will always return the same
* set of directories.
*/
private[spark] def getOrCreateLocalRootDirs(conf: SparkConf): Array[String] = {
if (localRootDirs == null) {
this.synchronized {
if (localRootDirs == null) {
localRootDirs = getOrCreateLocalRootDirsImpl(conf)
}
}
}
localRootDirs
}
/**
* Return the configured local directories where Spark can write files. This
* method does not create any directories on its own, it only encapsulates the
* logic of locating the local directories according to deployment mode.
*/
def getConfiguredLocalDirs(conf: SparkConf): Array[String] = {
val shuffleServiceEnabled = conf.getBoolean("spark.shuffle.service.enabled", false)
if (isRunningInYarnContainer(conf)) {
// If we are in yarn mode, systems can have different disk layouts so we must set it
// to what Yarn on this system said was available. Note this assumes that Yarn has
// created the directories already, and that they are secured so that only the
// user has access to them.
getYarnLocalDirs(conf).split(",")
} else if (conf.getenv("SPARK_EXECUTOR_DIRS") != null) {
conf.getenv("SPARK_EXECUTOR_DIRS").split(File.pathSeparator)
} else if (conf.getenv("SPARK_LOCAL_DIRS") != null) {
conf.getenv("SPARK_LOCAL_DIRS").split(",")
} else if (conf.getenv("MESOS_DIRECTORY") != null && !shuffleServiceEnabled) {
// Mesos already creates a directory per Mesos task. Spark should use that directory
// instead so all temporary files are automatically cleaned up when the Mesos task ends.
// Note that we don't want this if the shuffle service is enabled because we want to
// continue to serve shuffle files after the executors that wrote them have already exited.
Array(conf.getenv("MESOS_DIRECTORY"))
} else {
if (conf.getenv("MESOS_DIRECTORY") != null && shuffleServiceEnabled) {
logInfo("MESOS_DIRECTORY available but not using provided Mesos sandbox because " +
"spark.shuffle.service.enabled is enabled.")
}
// In non-Yarn mode (or for the driver in yarn-client mode), we cannot trust the user
// configuration to point to a secure directory. So create a subdirectory with restricted
// permissions under each listed directory.
conf.get("spark.local.dir", System.getProperty("java.io.tmpdir")).split(",")
}
}
private def getOrCreateLocalRootDirsImpl(conf: SparkConf): Array[String] = {
val configuredLocalDirs = getConfiguredLocalDirs(conf)
val uris = configuredLocalDirs.filter { root =>
// Here, we guess if the given value is a URI at its best - check if scheme is set.
Try(new URI(root).getScheme != null).getOrElse(false)
}
if (uris.nonEmpty) {
logWarning(
"The configured local directories are not expected to be URIs; however, got suspicious " +
s"values [${uris.mkString(", ")}]. Please check your configured local directories.")
}
configuredLocalDirs.flatMap { root =>
try {
val rootDir = new File(root)
if (rootDir.exists || rootDir.mkdirs()) {
val dir = createTempDir(root)
chmod700(dir)
Some(dir.getAbsolutePath)
} else {
logError(s"Failed to create dir in $root. Ignoring this directory.")
None
}
} catch {
case e: IOException =>
logError(s"Failed to create local root dir in $root. Ignoring this directory.")
None
}
}
}
/** Get the Yarn approved local directories. */
private def getYarnLocalDirs(conf: SparkConf): String = {
val localDirs = Option(conf.getenv("LOCAL_DIRS")).getOrElse("")
if (localDirs.isEmpty) {
throw new Exception("Yarn Local dirs can't be empty")
}
localDirs
}
/** Used by unit tests. Do not call from other places. */
private[spark] def clearLocalRootDirs(): Unit = {
localRootDirs = null
}
/**
* Shuffle the elements of a collection into a random order, returning the
* result in a new collection. Unlike scala.util.Random.shuffle, this method
* uses a local random number generator, avoiding inter-thread contention.
*/
def randomize[T: ClassTag](seq: TraversableOnce[T]): Seq[T] = {
randomizeInPlace(seq.toArray)
}
/**
* Shuffle the elements of an array into a random order, modifying the
* original array. Returns the original array.
*/
def randomizeInPlace[T](arr: Array[T], rand: Random = new Random): Array[T] = {
for (i <- (arr.length - 1) to 1 by -1) {
val j = rand.nextInt(i + 1)
val tmp = arr(j)
arr(j) = arr(i)
arr(i) = tmp
}
arr
}
/**
* Get the local host's IP address in dotted-quad format (e.g. 1.2.3.4).
* Note, this is typically not used from within core spark.
*/
private lazy val localIpAddress: InetAddress = findLocalInetAddress()
private def findLocalInetAddress(): InetAddress = {
val defaultIpOverride = System.getenv("SPARK_LOCAL_IP")
if (defaultIpOverride != null) {
InetAddress.getByName(defaultIpOverride)
} else {
val address = InetAddress.getLocalHost
if (address.isLoopbackAddress) {
// Address resolves to something like 127.0.1.1, which happens on Debian; try to find
// a better address using the local network interfaces
// getNetworkInterfaces returns ifs in reverse order compared to ifconfig output order
// on unix-like system. On windows, it returns in index order.
// It's more proper to pick ip address following system output order.
val activeNetworkIFs = NetworkInterface.getNetworkInterfaces.asScala.toSeq
val reOrderedNetworkIFs = if (isWindows) activeNetworkIFs else activeNetworkIFs.reverse
for (ni <- reOrderedNetworkIFs) {
val addresses = ni.getInetAddresses.asScala
.filterNot(addr => addr.isLinkLocalAddress || addr.isLoopbackAddress).toSeq
if (addresses.nonEmpty) {
val addr = addresses.find(_.isInstanceOf[Inet4Address]).getOrElse(addresses.head)
// because of Inet6Address.toHostName may add interface at the end if it knows about it
val strippedAddress = InetAddress.getByAddress(addr.getAddress)
// We've found an address that looks reasonable!
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + "; using " +
strippedAddress.getHostAddress + " instead (on interface " + ni.getName + ")")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
return strippedAddress
}
}
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + ", but we couldn't find any" +
" external IP address!")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
}
address
}
}
private var customHostname: Option[String] = sys.env.get("SPARK_LOCAL_HOSTNAME")
/**
* Allow setting a custom host name because when we run on Mesos we need to use the same
* hostname it reports to the master.
*/
def setCustomHostname(hostname: String) {
// DEBUG code
Utils.checkHost(hostname)
customHostname = Some(hostname)
}
/**
* Get the local machine's FQDN.
*/
def localCanonicalHostName(): String = {
customHostname.getOrElse(localIpAddress.getCanonicalHostName)
}
/**
* Get the local machine's hostname.
*/
def localHostName(): String = {
customHostname.getOrElse(localIpAddress.getHostAddress)
}
/**
* Get the local machine's URI.
*/
def localHostNameForURI(): String = {
customHostname.getOrElse(InetAddresses.toUriString(localIpAddress))
}
def checkHost(host: String) {
assert(host != null && host.indexOf(':') == -1, s"Expected hostname (not IP) but got $host")
}
def checkHostPort(hostPort: String) {
assert(hostPort != null && hostPort.indexOf(':') != -1,
s"Expected host and port but got $hostPort")
}
// Typically, this will be of order of number of nodes in cluster
// If not, we should change it to LRUCache or something.
private val hostPortParseResults = new ConcurrentHashMap[String, (String, Int)]()
def parseHostPort(hostPort: String): (String, Int) = {
// Check cache first.
val cached = hostPortParseResults.get(hostPort)
if (cached != null) {
return cached
}
val indx: Int = hostPort.lastIndexOf(':')
// This is potentially broken - when dealing with ipv6 addresses for example, sigh ...
// but then hadoop does not support ipv6 right now.
// For now, we assume that if port exists, then it is valid - not check if it is an int > 0
if (-1 == indx) {
val retval = (hostPort, 0)
hostPortParseResults.put(hostPort, retval)
return retval
}
val retval = (hostPort.substring(0, indx).trim(), hostPort.substring(indx + 1).trim().toInt)
hostPortParseResults.putIfAbsent(hostPort, retval)
hostPortParseResults.get(hostPort)
}
/**
* Return the string to tell how long has passed in milliseconds.
*/
def getUsedTimeMs(startTimeMs: Long): String = {
" " + (System.currentTimeMillis - startTimeMs) + " ms"
}
private def listFilesSafely(file: File): Seq[File] = {
if (file.exists()) {
val files = file.listFiles()
if (files == null) {
throw new IOException("Failed to list files for dir: " + file)
}
files
} else {
List()
}
}
/**
* Lists files recursively.
*/
def recursiveList(f: File): Array[File] = {
require(f.isDirectory)
val current = f.listFiles
current ++ current.filter(_.isDirectory).flatMap(recursiveList)
}
/**
* Delete a file or directory and its contents recursively.
* Don't follow directories if they are symlinks.
* Throws an exception if deletion is unsuccessful.
*/
def deleteRecursively(file: File) {
if (file != null) {
try {
if (file.isDirectory && !isSymlink(file)) {
var savedIOException: IOException = null
for (child <- listFilesSafely(file)) {
try {
deleteRecursively(child)
} catch {
// In case of multiple exceptions, only last one will be thrown
case ioe: IOException => savedIOException = ioe
}
}
if (savedIOException != null) {
throw savedIOException
}
ShutdownHookManager.removeShutdownDeleteDir(file)
}
} finally {
if (file.delete()) {
logTrace(s"${file.getAbsolutePath} has been deleted")
} else {
// Delete can also fail if the file simply did not exist
if (file.exists()) {
throw new IOException("Failed to delete: " + file.getAbsolutePath)
}
}
}
}
}
/**
* Check to see if file is a symbolic link.
*/
def isSymlink(file: File): Boolean = {
return Files.isSymbolicLink(Paths.get(file.toURI))
}
/**
* Determines if a directory contains any files newer than cutoff seconds.
*
* @param dir must be the path to a directory, or IllegalArgumentException is thrown
* @param cutoff measured in seconds. Returns true if there are any files or directories in the
* given directory whose last modified time is later than this many seconds ago
*/
def doesDirectoryContainAnyNewFiles(dir: File, cutoff: Long): Boolean = {
if (!dir.isDirectory) {
throw new IllegalArgumentException(s"$dir is not a directory!")
}
val filesAndDirs = dir.listFiles()
val cutoffTimeInMillis = System.currentTimeMillis - (cutoff * 1000)
filesAndDirs.exists(_.lastModified() > cutoffTimeInMillis) ||
filesAndDirs.filter(_.isDirectory).exists(
subdir => doesDirectoryContainAnyNewFiles(subdir, cutoff)
)
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to microseconds for internal use. If
* no suffix is provided, the passed number is assumed to be in ms.
*/
def timeStringAsMs(str: String): Long = {
JavaUtils.timeStringAsMs(str)
}
/**
* Convert a time parameter such as (50s, 100ms, or 250us) to seconds for internal use. If
* no suffix is provided, the passed number is assumed to be in seconds.
*/
def timeStringAsSeconds(str: String): Long = {
JavaUtils.timeStringAsSec(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to bytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in bytes.
*/
def byteStringAsBytes(str: String): Long = {
JavaUtils.byteStringAsBytes(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to kibibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in kibibytes.
*/
def byteStringAsKb(str: String): Long = {
JavaUtils.byteStringAsKb(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m) to mebibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in mebibytes.
*/
def byteStringAsMb(str: String): Long = {
JavaUtils.byteStringAsMb(str)
}
/**
* Convert a passed byte string (e.g. 50b, 100k, or 250m, 500g) to gibibytes for internal use.
*
* If no suffix is provided, the passed number is assumed to be in gibibytes.
*/
def byteStringAsGb(str: String): Long = {
JavaUtils.byteStringAsGb(str)
}
/**
* Convert a Java memory parameter passed to -Xmx (such as 300m or 1g) to a number of mebibytes.
*/
def memoryStringToMb(str: String): Int = {
// Convert to bytes, rather than directly to MB, because when no units are specified the unit
// is assumed to be bytes
(JavaUtils.byteStringAsBytes(str) / 1024 / 1024).toInt
}
/**
* Convert a quantity in bytes to a human-readable string such as "4.0 MB".
*/
def bytesToString(size: Long): String = bytesToString(BigInt(size))
def bytesToString(size: BigInt): String = {
val EB = 1L << 60
val PB = 1L << 50
val TB = 1L << 40
val GB = 1L << 30
val MB = 1L << 20
val KB = 1L << 10
if (size >= BigInt(1L << 11) * EB) {
// The number is too large, show it in scientific notation.
BigDecimal(size, new MathContext(3, RoundingMode.HALF_UP)).toString() + " B"
} else {
val (value, unit) = {
if (size >= 2 * EB) {
(BigDecimal(size) / EB, "EB")
} else if (size >= 2 * PB) {
(BigDecimal(size) / PB, "PB")
} else if (size >= 2 * TB) {
(BigDecimal(size) / TB, "TB")
} else if (size >= 2 * GB) {
(BigDecimal(size) / GB, "GB")
} else if (size >= 2 * MB) {
(BigDecimal(size) / MB, "MB")
} else if (size >= 2 * KB) {
(BigDecimal(size) / KB, "KB")
} else {
(BigDecimal(size), "B")
}
}
"%.1f %s".formatLocal(Locale.US, value, unit)
}
}
/**
* Returns a human-readable string representing a duration such as "35ms"
*/
def msDurationToString(ms: Long): String = {
val second = 1000
val minute = 60 * second
val hour = 60 * minute
val locale = Locale.US
ms match {
case t if t < second =>
"%d ms".formatLocal(locale, t)
case t if t < minute =>
"%.1f s".formatLocal(locale, t.toFloat / second)
case t if t < hour =>
"%.1f m".formatLocal(locale, t.toFloat / minute)
case t =>
"%.2f h".formatLocal(locale, t.toFloat / hour)
}
}
/**
* Convert a quantity in megabytes to a human-readable string such as "4.0 MB".
*/
def megabytesToString(megabytes: Long): String = {
bytesToString(megabytes * 1024L * 1024L)
}
/**
* Execute a command and return the process running the command.
*/
def executeCommand(
command: Seq[String],
workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty,
redirectStderr: Boolean = true): Process = {
val builder = new ProcessBuilder(command: _*).directory(workingDir)
val environment = builder.environment()
for ((key, value) <- extraEnvironment) {
environment.put(key, value)
}
val process = builder.start()
if (redirectStderr) {
val threadName = "redirect stderr for command " + command(0)
def log(s: String): Unit = logInfo(s)
processStreamByLine(threadName, process.getErrorStream, log)
}
process
}
/**
* Execute a command and get its output, throwing an exception if it yields a code other than 0.
*/
def executeAndGetOutput(
command: Seq[String],
workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty,
redirectStderr: Boolean = true): String = {
val process = executeCommand(command, workingDir, extraEnvironment, redirectStderr)
val output = new StringBuilder
val threadName = "read stdout for " + command(0)
def appendToOutput(s: String): Unit = output.append(s).append("\\n")
val stdoutThread = processStreamByLine(threadName, process.getInputStream, appendToOutput)
val exitCode = process.waitFor()
stdoutThread.join() // Wait for it to finish reading output
if (exitCode != 0) {
logError(s"Process $command exited with code $exitCode: $output")
throw new SparkException(s"Process $command exited with code $exitCode")
}
output.toString
}
/**
* Return and start a daemon thread that processes the content of the input stream line by line.
*/
def processStreamByLine(
threadName: String,
inputStream: InputStream,
processLine: String => Unit): Thread = {
val t = new Thread(threadName) {
override def run() {
for (line <- Source.fromInputStream(inputStream).getLines()) {
processLine(line)
}
}
}
t.setDaemon(true)
t.start()
t
}
/**
* Execute a block of code that evaluates to Unit, forwarding any uncaught exceptions to the
* default UncaughtExceptionHandler
*
* NOTE: This method is to be called by the spark-started JVM process.
*/
def tryOrExit(block: => Unit) {
try {
block
} catch {
case e: ControlThrowable => throw e
case t: Throwable => sparkUncaughtExceptionHandler.uncaughtException(t)
}
}
/**
* Execute a block of code that evaluates to Unit, stop SparkContext if there is any uncaught
* exception
*
* NOTE: This method is to be called by the driver-side components to avoid stopping the
* user-started JVM process completely; in contrast, tryOrExit is to be called in the
* spark-started JVM process .
*/
def tryOrStopSparkContext(sc: SparkContext)(block: => Unit) {
try {
block
} catch {
case e: ControlThrowable => throw e
case t: Throwable =>
val currentThreadName = Thread.currentThread().getName
if (sc != null) {
logError(s"uncaught error in thread $currentThreadName, stopping SparkContext", t)
sc.stopInNewThread()
}
if (!NonFatal(t)) {
logError(s"throw uncaught fatal error in thread $currentThreadName", t)
throw t
}
}
}
/**
* Execute a block of code that returns a value, re-throwing any non-fatal uncaught
* exceptions as IOException. This is used when implementing Externalizable and Serializable's
* read and write methods, since Java's serializer will not report non-IOExceptions properly;
* see SPARK-4080 for more context.
*/
def tryOrIOException[T](block: => T): T = {
try {
block
} catch {
case e: IOException =>
logError("Exception encountered", e)
throw e
case NonFatal(e) =>
logError("Exception encountered", e)
throw new IOException(e)
}
}
/** Executes the given block. Log non-fatal errors if any, and only throw fatal errors */
def tryLogNonFatalError(block: => Unit) {
try {
block
} catch {
case NonFatal(t) =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
}
}
/**
* Execute a block of code, then a finally block, but if exceptions happen in
* the finally block, do not suppress the original exception.
*
* This is primarily an issue with `finally { out.close() }` blocks, where
* close needs to be called to clean up `out`, but if an exception happened
* in `out.write`, it's likely `out` may be corrupted and `out.close` will
* fail as well. This would then suppress the original/likely more meaningful
* exception from the original `out.write` call.
*/
def tryWithSafeFinally[T](block: => T)(finallyBlock: => Unit): T = {
var originalThrowable: Throwable = null
try {
block
} catch {
case t: Throwable =>
// Purposefully not using NonFatal, because even fatal exceptions
// we don't want to have our finallyBlock suppress
originalThrowable = t
throw originalThrowable
} finally {
try {
finallyBlock
} catch {
case t: Throwable if (originalThrowable != null && originalThrowable != t) =>
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
throw originalThrowable
}
}
}
/**
* Execute a block of code and call the failure callbacks in the catch block. If exceptions occur
* in either the catch or the finally block, they are appended to the list of suppressed
* exceptions in original exception which is then rethrown.
*
* This is primarily an issue with `catch { abort() }` or `finally { out.close() }` blocks,
* where the abort/close needs to be called to clean up `out`, but if an exception happened
* in `out.write`, it's likely `out` may be corrupted and `abort` or `out.close` will
* fail as well. This would then suppress the original/likely more meaningful
* exception from the original `out.write` call.
*/
def tryWithSafeFinallyAndFailureCallbacks[T](block: => T)
(catchBlock: => Unit = (), finallyBlock: => Unit = ()): T = {
var originalThrowable: Throwable = null
try {
block
} catch {
case cause: Throwable =>
// Purposefully not using NonFatal, because even fatal exceptions
// we don't want to have our finallyBlock suppress
originalThrowable = cause
try {
logError("Aborting task", originalThrowable)
TaskContext.get().asInstanceOf[TaskContextImpl].markTaskFailed(originalThrowable)
catchBlock
} catch {
case t: Throwable =>
if (originalThrowable != t) {
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in catch: ${t.getMessage}", t)
}
}
throw originalThrowable
} finally {
try {
finallyBlock
} catch {
case t: Throwable if (originalThrowable != null && originalThrowable != t) =>
originalThrowable.addSuppressed(t)
logWarning(s"Suppressing exception in finally: ${t.getMessage}", t)
throw originalThrowable
}
}
}
/** Default filtering function for finding call sites using `getCallSite`. */
private def sparkInternalExclusionFunction(className: String): Boolean = {
// A regular expression to match classes of the internal Spark API's
// that we want to skip when finding the call site of a method.
val SPARK_CORE_CLASS_REGEX =
"""^org\\.apache\\.spark(\\.api\\.java)?(\\.util)?(\\.rdd)?(\\.broadcast)?\\.[A-Z]""".r
val SPARK_SQL_CLASS_REGEX = """^org\\.apache\\.spark\\.sql.*""".r
val SCALA_CORE_CLASS_PREFIX = "scala"
val isSparkClass = SPARK_CORE_CLASS_REGEX.findFirstIn(className).isDefined ||
SPARK_SQL_CLASS_REGEX.findFirstIn(className).isDefined
val isScalaClass = className.startsWith(SCALA_CORE_CLASS_PREFIX)
// If the class is a Spark internal class or a Scala class, then exclude.
isSparkClass || isScalaClass
}
/**
* When called inside a class in the spark package, returns the name of the user code class
* (outside the spark package) that called into Spark, as well as which Spark method they called.
* This is used, for example, to tell users where in their code each RDD got created.
*
* @param skipClass Function that is used to exclude non-user-code classes.
*/
def getCallSite(skipClass: String => Boolean = sparkInternalExclusionFunction): CallSite = {
// Keep crawling up the stack trace until we find the first function not inside of the spark
// package. We track the last (shallowest) contiguous Spark method. This might be an RDD
// transformation, a SparkContext function (such as parallelize), or anything else that leads
// to instantiation of an RDD. We also track the first (deepest) user method, file, and line.
var lastSparkMethod = "<unknown>"
var firstUserFile = "<unknown>"
var firstUserLine = 0
var insideSpark = true
val callStack = new ArrayBuffer[String]() :+ "<unknown>"
Thread.currentThread.getStackTrace().foreach { ste: StackTraceElement =>
// When running under some profilers, the current stack trace might contain some bogus
// frames. This is intended to ensure that we don't crash in these situations by
// ignoring any frames that we can't examine.
if (ste != null && ste.getMethodName != null
&& !ste.getMethodName.contains("getStackTrace")) {
if (insideSpark) {
if (skipClass(ste.getClassName)) {
lastSparkMethod = if (ste.getMethodName == "<init>") {
// Spark method is a constructor; get its class name
ste.getClassName.substring(ste.getClassName.lastIndexOf('.') + 1)
} else {
ste.getMethodName
}
callStack(0) = ste.toString // Put last Spark method on top of the stack trace.
} else {
if (ste.getFileName != null) {
firstUserFile = ste.getFileName
if (ste.getLineNumber >= 0) {
firstUserLine = ste.getLineNumber
}
}
callStack += ste.toString
insideSpark = false
}
} else {
callStack += ste.toString
}
}
}
val callStackDepth = System.getProperty("spark.callstack.depth", "20").toInt
val shortForm =
if (firstUserFile == "HiveSessionImpl.java") {
// To be more user friendly, show a nicer string for queries submitted from the JDBC
// server.
"Spark JDBC Server Query"
} else {
s"$lastSparkMethod at $firstUserFile:$firstUserLine"
}
val longForm = callStack.take(callStackDepth).mkString("\\n")
CallSite(shortForm, longForm)
}
private val UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF =
"spark.worker.ui.compressedLogFileLengthCacheSize"
private val DEFAULT_UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE = 100
private var compressedLogFileLengthCache: LoadingCache[String, java.lang.Long] = null
private def getCompressedLogFileLengthCache(
sparkConf: SparkConf): LoadingCache[String, java.lang.Long] = this.synchronized {
if (compressedLogFileLengthCache == null) {
val compressedLogFileLengthCacheSize = sparkConf.getInt(
UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF,
DEFAULT_UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE)
compressedLogFileLengthCache = CacheBuilder.newBuilder()
.maximumSize(compressedLogFileLengthCacheSize)
.build[String, java.lang.Long](new CacheLoader[String, java.lang.Long]() {
override def load(path: String): java.lang.Long = {
Utils.getCompressedFileLength(new File(path))
}
})
}
compressedLogFileLengthCache
}
/**
* Return the file length, if the file is compressed it returns the uncompressed file length.
* It also caches the uncompressed file size to avoid repeated decompression. The cache size is
* read from workerConf.
*/
def getFileLength(file: File, workConf: SparkConf): Long = {
if (file.getName.endsWith(".gz")) {
getCompressedLogFileLengthCache(workConf).get(file.getAbsolutePath)
} else {
file.length
}
}
/** Return uncompressed file length of a compressed file. */
private def getCompressedFileLength(file: File): Long = {
var gzInputStream: GZIPInputStream = null
try {
// Uncompress .gz file to determine file size.
var fileSize = 0L
gzInputStream = new GZIPInputStream(new FileInputStream(file))
val bufSize = 1024
val buf = new Array[Byte](bufSize)
var numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
while (numBytes > 0) {
fileSize += numBytes
numBytes = ByteStreams.read(gzInputStream, buf, 0, bufSize)
}
fileSize
} catch {
case e: Throwable =>
logError(s"Cannot get file length of ${file}", e)
throw e
} finally {
if (gzInputStream != null) {
gzInputStream.close()
}
}
}
/** Return a string containing part of a file from byte 'start' to 'end'. */
def offsetBytes(path: String, length: Long, start: Long, end: Long): String = {
val file = new File(path)
val effectiveEnd = math.min(length, end)
val effectiveStart = math.max(0, start)
val buff = new Array[Byte]((effectiveEnd-effectiveStart).toInt)
val stream = if (path.endsWith(".gz")) {
new GZIPInputStream(new FileInputStream(file))
} else {
new FileInputStream(file)
}
try {
ByteStreams.skipFully(stream, effectiveStart)
ByteStreams.readFully(stream, buff)
} finally {
stream.close()
}
Source.fromBytes(buff).mkString
}
/**
* Return a string containing data across a set of files. The `startIndex`
* and `endIndex` is based on the cumulative size of all the files take in
* the given order. See figure below for more details.
*/
def offsetBytes(files: Seq[File], fileLengths: Seq[Long], start: Long, end: Long): String = {
assert(files.length == fileLengths.length)
val startIndex = math.max(start, 0)
val endIndex = math.min(end, fileLengths.sum)
val fileToLength = files.zip(fileLengths).toMap
logDebug("Log files: \\n" + fileToLength.mkString("\\n"))
val stringBuffer = new StringBuffer((endIndex - startIndex).toInt)
var sum = 0L
files.zip(fileLengths).foreach { case (file, fileLength) =>
val startIndexOfFile = sum
val endIndexOfFile = sum + fileToLength(file)
logDebug(s"Processing file $file, " +
s"with start index = $startIndexOfFile, end index = $endIndex")
/*
____________
range 1: | |
| case A |
files: |==== file 1 ====|====== file 2 ======|===== file 3 =====|
| case B . case C . case D |
range 2: |___________.____________________.______________|
*/
if (startIndex <= startIndexOfFile && endIndex >= endIndexOfFile) {
// Case C: read the whole file
stringBuffer.append(offsetBytes(file.getAbsolutePath, fileLength, 0, fileToLength(file)))
} else if (startIndex > startIndexOfFile && startIndex < endIndexOfFile) {
// Case A and B: read from [start of required range] to [end of file / end of range]
val effectiveStartIndex = startIndex - startIndexOfFile
val effectiveEndIndex = math.min(endIndex - startIndexOfFile, fileToLength(file))
stringBuffer.append(Utils.offsetBytes(
file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
} else if (endIndex > startIndexOfFile && endIndex < endIndexOfFile) {
// Case D: read from [start of file] to [end of require range]
val effectiveStartIndex = math.max(startIndex - startIndexOfFile, 0)
val effectiveEndIndex = endIndex - startIndexOfFile
stringBuffer.append(Utils.offsetBytes(
file.getAbsolutePath, fileLength, effectiveStartIndex, effectiveEndIndex))
}
sum += fileToLength(file)
logDebug(s"After processing file $file, string built is ${stringBuffer.toString}")
}
stringBuffer.toString
}
/**
* Clone an object using a Spark serializer.
*/
def clone[T: ClassTag](value: T, serializer: SerializerInstance): T = {
serializer.deserialize[T](serializer.serialize(value))
}
private def isSpace(c: Char): Boolean = {
" \\t\\r\\n".indexOf(c) != -1
}
/**
* Split a string of potentially quoted arguments from the command line the way that a shell
* would do it to determine arguments to a command. For example, if the string is 'a "b c" d',
* then it would be parsed as three arguments: 'a', 'b c' and 'd'.
*/
def splitCommandString(s: String): Seq[String] = {
val buf = new ArrayBuffer[String]
var inWord = false
var inSingleQuote = false
var inDoubleQuote = false
val curWord = new StringBuilder
def endWord() {
buf += curWord.toString
curWord.clear()
}
var i = 0
while (i < s.length) {
val nextChar = s.charAt(i)
if (inDoubleQuote) {
if (nextChar == '"') {
inDoubleQuote = false
} else if (nextChar == '\\\\') {
if (i < s.length - 1) {
// Append the next character directly, because only " and \\ may be escaped in
// double quotes after the shell's own expansion
curWord.append(s.charAt(i + 1))
i += 1
}
} else {
curWord.append(nextChar)
}
} else if (inSingleQuote) {
if (nextChar == '\\'') {
inSingleQuote = false
} else {
curWord.append(nextChar)
}
// Backslashes are not treated specially in single quotes
} else if (nextChar == '"') {
inWord = true
inDoubleQuote = true
} else if (nextChar == '\\'') {
inWord = true
inSingleQuote = true
} else if (!isSpace(nextChar)) {
curWord.append(nextChar)
inWord = true
} else if (inWord && isSpace(nextChar)) {
endWord()
inWord = false
}
i += 1
}
if (inWord || inDoubleQuote || inSingleQuote) {
endWord()
}
buf
}
/* Calculates 'x' modulo 'mod', takes to consideration sign of x,
* i.e. if 'x' is negative, than 'x' % 'mod' is negative too
* so function return (x % mod) + mod in that case.
*/
def nonNegativeMod(x: Int, mod: Int): Int = {
val rawMod = x % mod
rawMod + (if (rawMod < 0) mod else 0)
}
// Handles idiosyncrasies with hash (add more as required)
// This method should be kept in sync with
// org.apache.spark.network.util.JavaUtils#nonNegativeHash().
def nonNegativeHash(obj: AnyRef): Int = {
// Required ?
if (obj eq null) return 0
val hash = obj.hashCode
// math.abs fails for Int.MinValue
val hashAbs = if (Int.MinValue != hash) math.abs(hash) else 0
// Nothing else to guard against ?
hashAbs
}
/**
* NaN-safe version of `java.lang.Double.compare()` which allows NaN values to be compared
* according to semantics where NaN == NaN and NaN is greater than any non-NaN double.
*/
def nanSafeCompareDoubles(x: Double, y: Double): Int = {
val xIsNan: Boolean = java.lang.Double.isNaN(x)
val yIsNan: Boolean = java.lang.Double.isNaN(y)
if ((xIsNan && yIsNan) || (x == y)) 0
else if (xIsNan) 1
else if (yIsNan) -1
else if (x > y) 1
else -1
}
/**
* NaN-safe version of `java.lang.Float.compare()` which allows NaN values to be compared
* according to semantics where NaN == NaN and NaN is greater than any non-NaN float.
*/
def nanSafeCompareFloats(x: Float, y: Float): Int = {
val xIsNan: Boolean = java.lang.Float.isNaN(x)
val yIsNan: Boolean = java.lang.Float.isNaN(y)
if ((xIsNan && yIsNan) || (x == y)) 0
else if (xIsNan) 1
else if (yIsNan) -1
else if (x > y) 1
else -1
}
/**
* Returns the system properties map that is thread-safe to iterator over. It gets the
* properties which have been set explicitly, as well as those for which only a default value
* has been defined.
*/
def getSystemProperties: Map[String, String] = {
System.getProperties.stringPropertyNames().asScala
.map(key => (key, System.getProperty(key))).toMap
}
/**
* Method executed for repeating a task for side effects.
* Unlike a for comprehension, it permits JVM JIT optimization
*/
def times(numIters: Int)(f: => Unit): Unit = {
var i = 0
while (i < numIters) {
f
i += 1
}
}
/**
* Timing method based on iterations that permit JVM JIT optimization.
*
* @param numIters number of iterations
* @param f function to be executed. If prepare is not None, the running time of each call to f
* must be an order of magnitude longer than one millisecond for accurate timing.
* @param prepare function to be executed before each call to f. Its running time doesn't count.
* @return the total time across all iterations (not counting preparation time)
*/
def timeIt(numIters: Int)(f: => Unit, prepare: Option[() => Unit] = None): Long = {
if (prepare.isEmpty) {
val start = System.currentTimeMillis
times(numIters)(f)
System.currentTimeMillis - start
} else {
var i = 0
var sum = 0L
while (i < numIters) {
prepare.get.apply()
val start = System.currentTimeMillis
f
sum += System.currentTimeMillis - start
i += 1
}
sum
}
}
/**
* Counts the number of elements of an iterator using a while loop rather than calling
* [[scala.collection.Iterator#size]] because it uses a for loop, which is slightly slower
* in the current version of Scala.
*/
def getIteratorSize[T](iterator: Iterator[T]): Long = {
var count = 0L
while (iterator.hasNext) {
count += 1L
iterator.next()
}
count
}
/**
* Generate a zipWithIndex iterator, avoid index value overflowing problem
* in scala's zipWithIndex
*/
def getIteratorZipWithIndex[T](iterator: Iterator[T], startIndex: Long): Iterator[(T, Long)] = {
new Iterator[(T, Long)] {
require(startIndex >= 0, "startIndex should be >= 0.")
var index: Long = startIndex - 1L
def hasNext: Boolean = iterator.hasNext
def next(): (T, Long) = {
index += 1L
(iterator.next(), index)
}
}
}
/**
* Creates a symlink.
*
* @param src absolute path to the source
* @param dst relative path for the destination
*/
def symlink(src: File, dst: File): Unit = {
if (!src.isAbsolute()) {
throw new IOException("Source must be absolute")
}
if (dst.isAbsolute()) {
throw new IOException("Destination must be relative")
}
Files.createSymbolicLink(dst.toPath, src.toPath)
}
/** Return the class name of the given object, removing all dollar signs */
def getFormattedClassName(obj: AnyRef): String = {
obj.getClass.getSimpleName.replace("$", "")
}
/** Return an option that translates JNothing to None */
def jsonOption(json: JValue): Option[JValue] = {
json match {
case JNothing => None
case value: JValue => Some(value)
}
}
/** Return an empty JSON object */
def emptyJson: JsonAST.JObject = JObject(List[JField]())
/**
* Return a Hadoop FileSystem with the scheme encoded in the given path.
*/
def getHadoopFileSystem(path: URI, conf: Configuration): FileSystem = {
FileSystem.get(path, conf)
}
/**
* Return a Hadoop FileSystem with the scheme encoded in the given path.
*/
def getHadoopFileSystem(path: String, conf: Configuration): FileSystem = {
getHadoopFileSystem(new URI(path), conf)
}
/**
* Return the absolute path of a file in the given directory.
*/
def getFilePath(dir: File, fileName: String): Path = {
assert(dir.isDirectory)
val path = new File(dir, fileName).getAbsolutePath
new Path(path)
}
/**
* Whether the underlying operating system is Windows.
*/
val isWindows = SystemUtils.IS_OS_WINDOWS
/**
* Whether the underlying operating system is Mac OS X.
*/
val isMac = SystemUtils.IS_OS_MAC_OSX
/**
* Pattern for matching a Windows drive, which contains only a single alphabet character.
*/
val windowsDrive = "([a-zA-Z])".r
/**
* Indicates whether Spark is currently running unit tests.
*/
def isTesting: Boolean = {
sys.env.contains("SPARK_TESTING") || sys.props.contains("spark.testing")
}
/**
* Strip the directory from a path name
*/
def stripDirectory(path: String): String = {
new File(path).getName
}
/**
* Terminates a process waiting for at most the specified duration.
*
* @return the process exit value if it was successfully terminated, else None
*/
def terminateProcess(process: Process, timeoutMs: Long): Option[Int] = {
// Politely destroy first
process.destroy()
if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
// Successful exit
Option(process.exitValue())
} else {
try {
process.destroyForcibly()
} catch {
case NonFatal(e) => logWarning("Exception when attempting to kill process", e)
}
// Wait, again, although this really should return almost immediately
if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
Option(process.exitValue())
} else {
logWarning("Timed out waiting to forcibly kill process")
None
}
}
}
/**
* Return the stderr of a process after waiting for the process to terminate.
* If the process does not terminate within the specified timeout, return None.
*/
def getStderr(process: Process, timeoutMs: Long): Option[String] = {
val terminated = process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)
if (terminated) {
Some(Source.fromInputStream(process.getErrorStream).getLines().mkString("\\n"))
} else {
None
}
}
/**
* Execute the given block, logging and re-throwing any uncaught exception.
* This is particularly useful for wrapping code that runs in a thread, to ensure
* that exceptions are printed, and to avoid having to catch Throwable.
*/
def logUncaughtExceptions[T](f: => T): T = {
try {
f
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
throw t
}
}
/** Executes the given block in a Try, logging any uncaught exceptions. */
def tryLog[T](f: => T): Try[T] = {
try {
val res = f
scala.util.Success(res)
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logError(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
scala.util.Failure(t)
}
}
/** Returns true if the given exception was fatal. See docs for scala.util.control.NonFatal. */
def isFatalError(e: Throwable): Boolean = {
e match {
case NonFatal(_) |
_: InterruptedException |
_: NotImplementedError |
_: ControlThrowable |
_: LinkageError =>
false
case _ =>
true
}
}
/**
* Return a well-formed URI for the file described by a user input string.
*
* If the supplied path does not contain a scheme, or is a relative path, it will be
* converted into an absolute path with a file:// scheme.
*/
def resolveURI(path: String): URI = {
try {
val uri = new URI(path)
if (uri.getScheme() != null) {
return uri
}
// make sure to handle if the path has a fragment (applies to yarn
// distributed cache)
if (uri.getFragment() != null) {
val absoluteURI = new File(uri.getPath()).getAbsoluteFile().toURI()
return new URI(absoluteURI.getScheme(), absoluteURI.getHost(), absoluteURI.getPath(),
uri.getFragment())
}
} catch {
case e: URISyntaxException =>
}
new File(path).getAbsoluteFile().toURI()
}
/** Resolve a comma-separated list of paths. */
def resolveURIs(paths: String): String = {
if (paths == null || paths.trim.isEmpty) {
""
} else {
paths.split(",").filter(_.trim.nonEmpty).map { p => Utils.resolveURI(p) }.mkString(",")
}
}
/** Return all non-local paths from a comma-separated list of paths. */
def nonLocalPaths(paths: String, testWindows: Boolean = false): Array[String] = {
val windows = isWindows || testWindows
if (paths == null || paths.trim.isEmpty) {
Array.empty
} else {
paths.split(",").filter { p =>
val uri = resolveURI(p)
Option(uri.getScheme).getOrElse("file") match {
case windowsDrive(d) if windows => false
case "local" | "file" => false
case _ => true
}
}
}
}
/**
* Load default Spark properties from the given file. If no file is provided,
* use the common defaults file. This mutates state in the given SparkConf and
* in this JVM's system properties if the config specified in the file is not
* already set. Return the path of the properties file used.
*/
def loadDefaultSparkProperties(conf: SparkConf, filePath: String = null): String = {
val path = Option(filePath).getOrElse(getDefaultPropertiesFile())
Option(path).foreach { confFile =>
getPropertiesFromFile(confFile).filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.setIfMissing(k, v)
sys.props.getOrElseUpdate(k, v)
}
}
path
}
/**
* Updates Spark config with properties from a set of Properties.
* Provided properties have the highest priority.
*/
def updateSparkConfigFromProperties(
conf: SparkConf,
properties: Map[String, String]) : Unit = {
properties.filter { case (k, v) =>
k.startsWith("spark.")
}.foreach { case (k, v) =>
conf.set(k, v)
}
}
/** Load properties present in the given file. */
def getPropertiesFromFile(filename: String): Map[String, String] = {
val file = new File(filename)
require(file.exists(), s"Properties file $file does not exist")
require(file.isFile(), s"Properties file $file is not a normal file")
val inReader = new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8)
try {
val properties = new Properties()
properties.load(inReader)
properties.stringPropertyNames().asScala.map(
k => (k, properties.getProperty(k).trim)).toMap
} catch {
case e: IOException =>
throw new SparkException(s"Failed when loading Spark properties from $filename", e)
} finally {
inReader.close()
}
}
/** Return the path of the default Spark properties file. */
def getDefaultPropertiesFile(env: Map[String, String] = sys.env): String = {
env.get("SPARK_CONF_DIR")
.orElse(env.get("SPARK_HOME").map { t => s"$t${File.separator}conf" })
.map { t => new File(s"$t${File.separator}spark-defaults.conf")}
.filter(_.isFile)
.map(_.getAbsolutePath)
.orNull
}
/**
* Return a nice string representation of the exception. It will call "printStackTrace" to
* recursively generate the stack trace including the exception and its causes.
*/
def exceptionString(e: Throwable): String = {
if (e == null) {
""
} else {
// Use e.printStackTrace here because e.getStackTrace doesn't include the cause
val stringWriter = new StringWriter()
e.printStackTrace(new PrintWriter(stringWriter))
stringWriter.toString
}
}
private implicit class Lock(lock: LockInfo) {
def lockString: String = {
lock match {
case monitor: MonitorInfo =>
s"Monitor(${lock.getClassName}@${lock.getIdentityHashCode}})"
case _ =>
s"Lock(${lock.getClassName}@${lock.getIdentityHashCode}})"
}
}
}
/** Return a thread dump of all threads' stacktraces. Used to capture dumps for the web UI */
def getThreadDump(): Array[ThreadStackTrace] = {
// We need to filter out null values here because dumpAllThreads() may return null array
// elements for threads that are dead / don't exist.
val threadInfos = ManagementFactory.getThreadMXBean.dumpAllThreads(true, true).filter(_ != null)
threadInfos.sortBy(_.getThreadId).map(threadInfoToThreadStackTrace)
}
def getThreadDumpForThread(threadId: Long): Option[ThreadStackTrace] = {
if (threadId <= 0) {
None
} else {
// The Int.MaxValue here requests the entire untruncated stack trace of the thread:
val threadInfo =
Option(ManagementFactory.getThreadMXBean.getThreadInfo(threadId, Int.MaxValue))
threadInfo.map(threadInfoToThreadStackTrace)
}
}
private def threadInfoToThreadStackTrace(threadInfo: ThreadInfo): ThreadStackTrace = {
val monitors = threadInfo.getLockedMonitors.map(m => m.getLockedStackFrame -> m).toMap
val stackTrace = threadInfo.getStackTrace.map { frame =>
monitors.get(frame) match {
case Some(monitor) =>
monitor.getLockedStackFrame.toString + s" => holding ${monitor.lockString}"
case None =>
frame.toString
}
}.mkString("\\n")
// use a set to dedup re-entrant locks that are held at multiple places
val heldLocks =
(threadInfo.getLockedSynchronizers ++ threadInfo.getLockedMonitors).map(_.lockString).toSet
ThreadStackTrace(
threadId = threadInfo.getThreadId,
threadName = threadInfo.getThreadName,
threadState = threadInfo.getThreadState,
stackTrace = stackTrace,
blockedByThreadId =
if (threadInfo.getLockOwnerId < 0) None else Some(threadInfo.getLockOwnerId),
blockedByLock = Option(threadInfo.getLockInfo).map(_.lockString).getOrElse(""),
holdingLocks = heldLocks.toSeq)
}
/**
* Convert all spark properties set in the given SparkConf to a sequence of java options.
*/
def sparkJavaOpts(conf: SparkConf, filterKey: (String => Boolean) = _ => true): Seq[String] = {
conf.getAll
.filter { case (k, _) => filterKey(k) }
.map { case (k, v) => s"-D$k=$v" }
}
/**
* Maximum number of retries when binding to a port before giving up.
*/
def portMaxRetries(conf: SparkConf): Int = {
val maxRetries = conf.getOption("spark.port.maxRetries").map(_.toInt)
if (conf.contains("spark.testing")) {
// Set a higher number of retries for tests...
maxRetries.getOrElse(100)
} else {
maxRetries.getOrElse(16)
}
}
/**
* Returns the user port to try when trying to bind a service. Handles wrapping and skipping
* privileged ports.
*/
def userPort(base: Int, offset: Int): Int = {
(base + offset - 1024) % (65536 - 1024) + 1024
}
/**
* Attempt to start a service on the given port, or fail after a number of attempts.
* Each subsequent attempt uses 1 + the port used in the previous attempt (unless the port is 0).
*
* @param startPort The initial port to start the service on.
* @param startService Function to start service on a given port.
* This is expected to throw java.net.BindException on port collision.
* @param conf A SparkConf used to get the maximum number of retries when binding to a port.
* @param serviceName Name of the service.
* @return (service: T, port: Int)
*/
def startServiceOnPort[T](
startPort: Int,
startService: Int => (T, Int),
conf: SparkConf,
serviceName: String = ""): (T, Int) = {
require(startPort == 0 || (1024 <= startPort && startPort < 65536),
"startPort should be between 1024 and 65535 (inclusive), or 0 for a random free port.")
val serviceString = if (serviceName.isEmpty) "" else s" '$serviceName'"
val maxRetries = portMaxRetries(conf)
for (offset <- 0 to maxRetries) {
// Do not increment port if startPort is 0, which is treated as a special port
val tryPort = if (startPort == 0) {
startPort
} else {
userPort(startPort, offset)
}
try {
val (service, port) = startService(tryPort)
logInfo(s"Successfully started service$serviceString on port $port.")
return (service, port)
} catch {
case e: Exception if isBindCollision(e) =>
if (offset >= maxRetries) {
val exceptionMessage = if (startPort == 0) {
s"${e.getMessage}: Service$serviceString failed after " +
s"$maxRetries retries (on a random free port)! " +
s"Consider explicitly setting the appropriate binding address for " +
s"the service$serviceString (for example spark.driver.bindAddress " +
s"for SparkDriver) to the correct binding address."
} else {
s"${e.getMessage}: Service$serviceString failed after " +
s"$maxRetries retries (starting from $startPort)! Consider explicitly setting " +
s"the appropriate port for the service$serviceString (for example spark.ui.port " +
s"for SparkUI) to an available port or increasing spark.port.maxRetries."
}
val exception = new BindException(exceptionMessage)
// restore original stack trace
exception.setStackTrace(e.getStackTrace)
throw exception
}
if (startPort == 0) {
// As startPort 0 is for a random free port, it is most possibly binding address is
// not correct.
logWarning(s"Service$serviceString could not bind on a random free port. " +
"You may check whether configuring an appropriate binding address.")
} else {
logWarning(s"Service$serviceString could not bind on port $tryPort. " +
s"Attempting port ${tryPort + 1}.")
}
}
}
// Should never happen
throw new SparkException(s"Failed to start service$serviceString on port $startPort")
}
/**
* Return whether the exception is caused by an address-port collision when binding.
*/
def isBindCollision(exception: Throwable): Boolean = {
exception match {
case e: BindException =>
if (e.getMessage != null) {
return true
}
isBindCollision(e.getCause)
case e: MultiException =>
e.getThrowables.asScala.exists(isBindCollision)
case e: NativeIoException =>
(e.getMessage != null && e.getMessage.startsWith("bind() failed: ")) ||
isBindCollision(e.getCause)
case e: Exception => isBindCollision(e.getCause)
case _ => false
}
}
/**
* configure a new log4j level
*/
def setLogLevel(l: org.apache.log4j.Level) {
org.apache.log4j.Logger.getRootLogger().setLevel(l)
}
/**
* config a log4j properties used for testsuite
*/
def configTestLog4j(level: String): Unit = {
val pro = new Properties()
pro.put("log4j.rootLogger", s"$level, console")
pro.put("log4j.appender.console", "org.apache.log4j.ConsoleAppender")
pro.put("log4j.appender.console.target", "System.err")
pro.put("log4j.appender.console.layout", "org.apache.log4j.PatternLayout")
pro.put("log4j.appender.console.layout.ConversionPattern",
"%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n")
PropertyConfigurator.configure(pro)
}
/**
* If the given URL connection is HttpsURLConnection, it sets the SSL socket factory and
* the host verifier from the given security manager.
*/
def setupSecureURLConnection(urlConnection: URLConnection, sm: SecurityManager): URLConnection = {
urlConnection match {
case https: HttpsURLConnection =>
sm.sslSocketFactory.foreach(https.setSSLSocketFactory)
sm.hostnameVerifier.foreach(https.setHostnameVerifier)
https
case connection => connection
}
}
def invoke(
clazz: Class[_],
obj: AnyRef,
methodName: String,
args: (Class[_], AnyRef)*): AnyRef = {
val (types, values) = args.unzip
val method = clazz.getDeclaredMethod(methodName, types: _*)
method.setAccessible(true)
method.invoke(obj, values.toSeq: _*)
}
// Limit of bytes for total size of results (default is 1GB)
def getMaxResultSize(conf: SparkConf): Long = {
memoryStringToMb(conf.get("spark.driver.maxResultSize", "1g")).toLong << 20
}
/**
* Return the current system LD_LIBRARY_PATH name
*/
def libraryPathEnvName: String = {
if (isWindows) {
"PATH"
} else if (isMac) {
"DYLD_LIBRARY_PATH"
} else {
"LD_LIBRARY_PATH"
}
}
/**
* Return the prefix of a command that appends the given library paths to the
* system-specific library path environment variable. On Unix, for instance,
* this returns the string LD_LIBRARY_PATH="path1:path2:$LD_LIBRARY_PATH".
*/
def libraryPathEnvPrefix(libraryPaths: Seq[String]): String = {
val libraryPathScriptVar = if (isWindows) {
s"%${libraryPathEnvName}%"
} else {
"$" + libraryPathEnvName
}
val libraryPath = (libraryPaths :+ libraryPathScriptVar).mkString("\\"",
File.pathSeparator, "\\"")
val ampersand = if (Utils.isWindows) {
" &"
} else {
""
}
s"$libraryPathEnvName=$libraryPath$ampersand"
}
/**
* Return the value of a config either through the SparkConf or the Hadoop configuration
* if this is Yarn mode. In the latter case, this defaults to the value set through SparkConf
* if the key is not set in the Hadoop configuration.
*/
def getSparkOrYarnConfig(conf: SparkConf, key: String, default: String): String = {
val sparkValue = conf.get(key, default)
if (conf.get(SparkLauncher.SPARK_MASTER, null) == "yarn") {
new YarnConfiguration(SparkHadoopUtil.get.newConfiguration(conf)).get(key, sparkValue)
} else {
sparkValue
}
}
/**
* Return a pair of host and port extracted from the `sparkUrl`.
*
* A spark url (`spark://host:port`) is a special URI that its scheme is `spark` and only contains
* host and port.
*
* @throws org.apache.spark.SparkException if sparkUrl is invalid.
*/
@throws(classOf[SparkException])
def extractHostPortFromSparkUrl(sparkUrl: String): (String, Int) = {
try {
val uri = new java.net.URI(sparkUrl)
val host = uri.getHost
val port = uri.getPort
if (uri.getScheme != "spark" ||
host == null ||
port < 0 ||
(uri.getPath != null && !uri.getPath.isEmpty) || // uri.getPath returns "" instead of null
uri.getFragment != null ||
uri.getQuery != null ||
uri.getUserInfo != null) {
throw new SparkException("Invalid master URL: " + sparkUrl)
}
(host, port)
} catch {
case e: java.net.URISyntaxException =>
throw new SparkException("Invalid master URL: " + sparkUrl, e)
}
}
/**
* Returns the current user name. This is the currently logged in user, unless that's been
* overridden by the `SPARK_USER` environment variable.
*/
def getCurrentUserName(): String = {
Option(System.getenv("SPARK_USER"))
.getOrElse(UserGroupInformation.getCurrentUser().getShortUserName())
}
val EMPTY_USER_GROUPS = Set.empty[String]
// Returns the groups to which the current user belongs.
def getCurrentUserGroups(sparkConf: SparkConf, username: String): Set[String] = {
val groupProviderClassName = sparkConf.get("spark.user.groups.mapping",
"org.apache.spark.security.ShellBasedGroupsMappingProvider")
if (groupProviderClassName != "") {
try {
val groupMappingServiceProvider = classForName(groupProviderClassName).newInstance.
asInstanceOf[org.apache.spark.security.GroupMappingServiceProvider]
val currentUserGroups = groupMappingServiceProvider.getGroups(username)
return currentUserGroups
} catch {
case e: Exception => logError(s"Error getting groups for user=$username", e)
}
}
EMPTY_USER_GROUPS
}
/**
* Split the comma delimited string of master URLs into a list.
* For instance, "spark://abc,def" becomes [spark://abc, spark://def].
*/
def parseStandaloneMasterUrls(masterUrls: String): Array[String] = {
masterUrls.stripPrefix("spark://").split(",").map("spark://" + _)
}
/** An identifier that backup masters use in their responses. */
val BACKUP_STANDALONE_MASTER_PREFIX = "Current state is not alive"
/** Return true if the response message is sent from a backup Master on standby. */
def responseFromBackup(msg: String): Boolean = {
msg.startsWith(BACKUP_STANDALONE_MASTER_PREFIX)
}
/**
* To avoid calling `Utils.getCallSite` for every single RDD we create in the body,
* set a dummy call site that RDDs use instead. This is for performance optimization.
*/
def withDummyCallSite[T](sc: SparkContext)(body: => T): T = {
val oldShortCallSite = sc.getLocalProperty(CallSite.SHORT_FORM)
val oldLongCallSite = sc.getLocalProperty(CallSite.LONG_FORM)
try {
sc.setLocalProperty(CallSite.SHORT_FORM, "")
sc.setLocalProperty(CallSite.LONG_FORM, "")
body
} finally {
// Restore the old ones here
sc.setLocalProperty(CallSite.SHORT_FORM, oldShortCallSite)
sc.setLocalProperty(CallSite.LONG_FORM, oldLongCallSite)
}
}
/**
* Return whether the specified file is a parent directory of the child file.
*/
@tailrec
def isInDirectory(parent: File, child: File): Boolean = {
if (child == null || parent == null) {
return false
}
if (!child.exists() || !parent.exists() || !parent.isDirectory()) {
return false
}
if (parent.equals(child)) {
return true
}
isInDirectory(parent, child.getParentFile)
}
/**
*
* @return whether it is local mode
*/
def isLocalMaster(conf: SparkConf): Boolean = {
val master = conf.get("spark.master", "")
master == "local" || master.startsWith("local[")
}
/**
* Return whether dynamic allocation is enabled in the given conf.
*/
def isDynamicAllocationEnabled(conf: SparkConf): Boolean = {
val dynamicAllocationEnabled = conf.getBoolean("spark.dynamicAllocation.enabled", false)
dynamicAllocationEnabled &&
(!isLocalMaster(conf) || conf.getBoolean("spark.dynamicAllocation.testing", false))
}
/**
* Return the initial number of executors for dynamic allocation.
*/
def getDynamicAllocationInitialExecutors(conf: SparkConf): Int = {
if (conf.get(DYN_ALLOCATION_INITIAL_EXECUTORS) < conf.get(DYN_ALLOCATION_MIN_EXECUTORS)) {
logWarning(s"${DYN_ALLOCATION_INITIAL_EXECUTORS.key} less than " +
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} is invalid, ignoring its setting, " +
"please update your configs.")
}
if (conf.get(EXECUTOR_INSTANCES).getOrElse(0) < conf.get(DYN_ALLOCATION_MIN_EXECUTORS)) {
logWarning(s"${EXECUTOR_INSTANCES.key} less than " +
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} is invalid, ignoring its setting, " +
"please update your configs.")
}
val initialExecutors = Seq(
conf.get(DYN_ALLOCATION_MIN_EXECUTORS),
conf.get(DYN_ALLOCATION_INITIAL_EXECUTORS),
conf.get(EXECUTOR_INSTANCES).getOrElse(0)).max
logInfo(s"Using initial executors = $initialExecutors, max of " +
s"${DYN_ALLOCATION_INITIAL_EXECUTORS.key}, ${DYN_ALLOCATION_MIN_EXECUTORS.key} and " +
s"${EXECUTOR_INSTANCES.key}")
initialExecutors
}
def tryWithResource[R <: Closeable, T](createResource: => R)(f: R => T): T = {
val resource = createResource
try f.apply(resource) finally resource.close()
}
/**
* Returns a path of temporary file which is in the same directory with `path`.
*/
def tempFileWith(path: File): File = {
new File(path.getAbsolutePath + "." + UUID.randomUUID())
}
/**
* Returns the name of this JVM process. This is OS dependent but typically (OSX, Linux, Windows),
* this is formatted as PID@hostname.
*/
def getProcessName(): String = {
ManagementFactory.getRuntimeMXBean().getName()
}
/**
* Utility function that should be called early in `main()` for daemons to set up some common
* diagnostic state.
*/
def initDaemon(log: Logger): Unit = {
log.info(s"Started daemon with process name: ${Utils.getProcessName()}")
SignalUtils.registerLogger(log)
}
/**
* Unions two comma-separated lists of files and filters out empty strings.
*/
def unionFileLists(leftList: Option[String], rightList: Option[String]): Set[String] = {
var allFiles = Set.empty[String]
leftList.foreach { value => allFiles ++= value.split(",") }
rightList.foreach { value => allFiles ++= value.split(",") }
allFiles.filter { _.nonEmpty }
}
/**
* Return the jar files pointed by the "spark.jars" property. Spark internally will distribute
* these jars through file server. In the YARN mode, it will return an empty list, since YARN
* has its own mechanism to distribute jars.
*/
def getUserJars(conf: SparkConf): Seq[String] = {
val sparkJars = conf.getOption("spark.jars")
sparkJars.map(_.split(",")).map(_.filter(_.nonEmpty)).toSeq.flatten
}
/**
* Return the local jar files which will be added to REPL's classpath. These jar files are
* specified by --jars (spark.jars) or --packages, remote jars will be downloaded to local by
* SparkSubmit at first.
*/
def getLocalUserJarsForShell(conf: SparkConf): Seq[String] = {
val localJars = conf.getOption("spark.repl.local.jars")
localJars.map(_.split(",")).map(_.filter(_.nonEmpty)).toSeq.flatten
}
private[spark] val REDACTION_REPLACEMENT_TEXT = "*********(redacted)"
/**
* Redact the sensitive values in the given map. If a map key matches the redaction pattern then
* its value is replaced with a dummy text.
*/
def redact(conf: SparkConf, kvs: Seq[(String, String)]): Seq[(String, String)] = {
val redactionPattern = conf.get(SECRET_REDACTION_PATTERN)
redact(redactionPattern, kvs)
}
/**
* Redact the sensitive values in the given map. If a map key matches the redaction pattern then
* its value is replaced with a dummy text.
*/
def redact(regex: Option[Regex], kvs: Seq[(String, String)]): Seq[(String, String)] = {
regex match {
case None => kvs
case Some(r) => redact(r, kvs)
}
}
/**
* Redact the sensitive information in the given string.
*/
def redact(regex: Option[Regex], text: String): String = {
regex match {
case None => text
case Some(r) =>
if (text == null || text.isEmpty) {
text
} else {
r.replaceAllIn(text, REDACTION_REPLACEMENT_TEXT)
}
}
}
private def redact(redactionPattern: Regex, kvs: Seq[(String, String)]): Seq[(String, String)] = {
// If the sensitive information regex matches with either the key or the value, redact the value
// While the original intent was to only redact the value if the key matched with the regex,
// we've found that especially in verbose mode, the value of the property may contain sensitive
// information like so:
// "sun.java.command":"org.apache.spark.deploy.SparkSubmit ... \\
// --conf spark.executorEnv.HADOOP_CREDSTORE_PASSWORD=secret_password ...
//
// And, in such cases, simply searching for the sensitive information regex in the key name is
// not sufficient. The values themselves have to be searched as well and redacted if matched.
// This does mean we may be accounting more false positives - for example, if the value of an
// arbitrary property contained the term 'password', we may redact the value from the UI and
// logs. In order to work around it, user would have to make the spark.redaction.regex property
// more specific.
kvs.map { case (key, value) =>
redactionPattern.findFirstIn(key)
.orElse(redactionPattern.findFirstIn(value))
.map { _ => (key, REDACTION_REPLACEMENT_TEXT) }
.getOrElse((key, value))
}
}
/**
* Looks up the redaction regex from within the key value pairs and uses it to redact the rest
* of the key value pairs. No care is taken to make sure the redaction property itself is not
* redacted. So theoretically, the property itself could be configured to redact its own value
* when printing.
*/
def redact(kvs: Map[String, String]): Seq[(String, String)] = {
val redactionPattern = kvs.getOrElse(
SECRET_REDACTION_PATTERN.key,
SECRET_REDACTION_PATTERN.defaultValueString
).r
redact(redactionPattern, kvs.toArray)
}
def stringToSeq(str: String): Seq[String] = {
str.split(",").map(_.trim()).filter(_.nonEmpty)
}
/**
* Create instances of extension classes.
*
* The classes in the given list must:
* - Be sub-classes of the given base class.
* - Provide either a no-arg constructor, or a 1-arg constructor that takes a SparkConf.
*
* The constructors are allowed to throw "UnsupportedOperationException" if the extension does not
* want to be registered; this allows the implementations to check the Spark configuration (or
* other state) and decide they do not need to be added. A log message is printed in that case.
* Other exceptions are bubbled up.
*/
def loadExtensions[T](extClass: Class[T], classes: Seq[String], conf: SparkConf): Seq[T] = {
classes.flatMap { name =>
try {
val klass = classForName(name)
require(extClass.isAssignableFrom(klass),
s"$name is not a subclass of ${extClass.getName()}.")
val ext = Try(klass.getConstructor(classOf[SparkConf])) match {
case Success(ctor) =>
ctor.newInstance(conf)
case Failure(_) =>
klass.getConstructor().newInstance()
}
Some(ext.asInstanceOf[T])
} catch {
case _: NoSuchMethodException =>
throw new SparkException(
s"$name did not have a zero-argument constructor or a" +
" single-argument constructor that accepts SparkConf. Note: if the class is" +
" defined inside of another Scala class, then its constructors may accept an" +
" implicit parameter that references the enclosing class; in this case, you must" +
" define the class as a top-level class in order to prevent this extra" +
" parameter from breaking Spark's ability to find a valid constructor.")
case e: InvocationTargetException =>
e.getCause() match {
case uoe: UnsupportedOperationException =>
logDebug(s"Extension $name not being initialized.", uoe)
logInfo(s"Extension $name not being initialized.")
None
case null => throw e
case cause => throw cause
}
}
}
}
/**
* Check the validity of the given Kubernetes master URL and return the resolved URL. Prefix
* "k8s://" is appended to the resolved URL as the prefix is used by KubernetesClusterManager
* in canCreate to determine if the KubernetesClusterManager should be used.
*/
def checkAndGetK8sMasterUrl(rawMasterURL: String): String = {
require(rawMasterURL.startsWith("k8s://"),
"Kubernetes master URL must start with k8s://.")
val masterWithoutK8sPrefix = rawMasterURL.substring("k8s://".length)
// To handle master URLs, e.g., k8s://host:port.
if (!masterWithoutK8sPrefix.contains("://")) {
val resolvedURL = s"https://$masterWithoutK8sPrefix"
logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
s"URL is $resolvedURL.")
return s"k8s://$resolvedURL"
}
val masterScheme = new URI(masterWithoutK8sPrefix).getScheme
val resolvedURL = masterScheme.toLowerCase match {
case "https" =>
masterWithoutK8sPrefix
case "http" =>
logWarning("Kubernetes master URL uses HTTP instead of HTTPS.")
masterWithoutK8sPrefix
case null =>
val resolvedURL = s"https://$masterWithoutK8sPrefix"
logInfo("No scheme specified for kubernetes master URL, so defaulting to https. Resolved " +
s"URL is $resolvedURL.")
resolvedURL
case _ =>
throw new IllegalArgumentException("Invalid Kubernetes master scheme: " + masterScheme)
}
s"k8s://$resolvedURL"
}
}
private[util] object CallerContext extends Logging {
val callerContextSupported: Boolean = {
SparkHadoopUtil.get.conf.getBoolean("hadoop.caller.context.enabled", false) && {
try {
Utils.classForName("org.apache.hadoop.ipc.CallerContext")
Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
true
} catch {
case _: ClassNotFoundException =>
false
case NonFatal(e) =>
logWarning("Fail to load the CallerContext class", e)
false
}
}
}
}
/**
* An utility class used to set up Spark caller contexts to HDFS and Yarn. The `context` will be
* constructed by parameters passed in.
* When Spark applications run on Yarn and HDFS, its caller contexts will be written into Yarn RM
* audit log and hdfs-audit.log. That can help users to better diagnose and understand how
* specific applications impacting parts of the Hadoop system and potential problems they may be
* creating (e.g. overloading NN). As HDFS mentioned in HDFS-9184, for a given HDFS operation, it's
* very helpful to track which upper level job issues it.
*
* @param from who sets up the caller context (TASK, CLIENT, APPMASTER)
*
* The parameters below are optional:
* @param upstreamCallerContext caller context the upstream application passes in
* @param appId id of the app this task belongs to
* @param appAttemptId attempt id of the app this task belongs to
* @param jobId id of the job this task belongs to
* @param stageId id of the stage this task belongs to
* @param stageAttemptId attempt id of the stage this task belongs to
* @param taskId task id
* @param taskAttemptNumber task attempt id
*/
private[spark] class CallerContext(
from: String,
upstreamCallerContext: Option[String] = None,
appId: Option[String] = None,
appAttemptId: Option[String] = None,
jobId: Option[Int] = None,
stageId: Option[Int] = None,
stageAttemptId: Option[Int] = None,
taskId: Option[Long] = None,
taskAttemptNumber: Option[Int] = None) extends Logging {
private val context = prepareContext("SPARK_" +
from +
appId.map("_" + _).getOrElse("") +
appAttemptId.map("_" + _).getOrElse("") +
jobId.map("_JId_" + _).getOrElse("") +
stageId.map("_SId_" + _).getOrElse("") +
stageAttemptId.map("_" + _).getOrElse("") +
taskId.map("_TId_" + _).getOrElse("") +
taskAttemptNumber.map("_" + _).getOrElse("") +
upstreamCallerContext.map("_" + _).getOrElse(""))
private def prepareContext(context: String): String = {
// The default max size of Hadoop caller context is 128
lazy val len = SparkHadoopUtil.get.conf.getInt("hadoop.caller.context.max.size", 128)
if (context == null || context.length <= len) {
context
} else {
val finalContext = context.substring(0, len)
logWarning(s"Truncated Spark caller context from $context to $finalContext")
finalContext
}
}
/**
* Set up the caller context [[context]] by invoking Hadoop CallerContext API of
* [[org.apache.hadoop.ipc.CallerContext]], which was added in hadoop 2.8.
*/
def setCurrentContext(): Unit = {
if (CallerContext.callerContextSupported) {
try {
val callerContext = Utils.classForName("org.apache.hadoop.ipc.CallerContext")
val builder = Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
val builderInst = builder.getConstructor(classOf[String]).newInstance(context)
val hdfsContext = builder.getMethod("build").invoke(builderInst)
callerContext.getMethod("setCurrent", callerContext).invoke(null, hdfsContext)
} catch {
case NonFatal(e) =>
logWarning("Fail to set Spark caller context", e)
}
}
}
}
/**
* A utility class to redirect the child process's stdout or stderr.
*/
private[spark] class RedirectThread(
in: InputStream,
out: OutputStream,
name: String,
propagateEof: Boolean = false)
extends Thread(name) {
setDaemon(true)
override def run() {
scala.util.control.Exception.ignoring(classOf[IOException]) {
// FIXME: We copy the stream on the level of bytes to avoid encoding problems.
Utils.tryWithSafeFinally {
val buf = new Array[Byte](1024)
var len = in.read(buf)
while (len != -1) {
out.write(buf, 0, len)
out.flush()
len = in.read(buf)
}
} {
if (propagateEof) {
out.close()
}
}
}
}
}
/**
* An [[OutputStream]] that will store the last 10 kilobytes (by default) written to it
* in a circular buffer. The current contents of the buffer can be accessed using
* the toString method.
*/
private[spark] class CircularBuffer(sizeInBytes: Int = 10240) extends java.io.OutputStream {
private var pos: Int = 0
private var isBufferFull = false
private val buffer = new Array[Byte](sizeInBytes)
def write(input: Int): Unit = {
buffer(pos) = input.toByte
pos = (pos + 1) % buffer.length
isBufferFull = isBufferFull || (pos == 0)
}
override def toString: String = {
if (!isBufferFull) {
return new String(buffer, 0, pos, StandardCharsets.UTF_8)
}
val nonCircularBuffer = new Array[Byte](sizeInBytes)
System.arraycopy(buffer, pos, nonCircularBuffer, 0, buffer.length - pos)
System.arraycopy(buffer, 0, nonCircularBuffer, buffer.length - pos, pos)
new String(nonCircularBuffer, StandardCharsets.UTF_8)
}
}
| saltstar/spark | core/src/main/scala/org/apache/spark/util/Utils.scala | Scala | apache-2.0 | 107,740 |
package by.verkpavel.grafolnet.personal
import java.awt.Image
trait Personal {
def parse(item: Image)
} | VerkhovtsovPavel/BSUIR_Labs | Diploma/diplom/src/main/scala/by/verkpavel/grafolnet/personal/Personal.scala | Scala | mit | 107 |
package pureconfig
trait NamingConvention {
def toTokens(s: String): Seq[String]
def fromTokens(l: Seq[String]): String
}
trait CapitalizedWordsNamingConvention extends NamingConvention {
def toTokens(s: String): Seq[String] = {
CapitalizedWordsNamingConvention.wordBreakPattern.split(s).map(_.toLowerCase)
}
}
object CapitalizedWordsNamingConvention {
private val wordBreakPattern = String.format(
"%s|%s|%s",
"(?<=[A-Z])(?=[A-Z][a-z])",
"(?<=[^A-Z])(?=[A-Z])",
"(?<=[A-Za-z])(?=[^A-Za-z])").r
}
/**
* CamelCase identifiers look like `camelCase` and `useMorePureconfig`
* @see https://en.wikipedia.org/wiki/Camel_case
*/
object CamelCase extends CapitalizedWordsNamingConvention {
def fromTokens(l: Seq[String]): String = {
l match {
case Seq() => ""
case h +: Seq() => h.toLowerCase
case h +: t => h.toLowerCase + t.map(_.capitalize).mkString
}
}
}
/**
* PascalCase identifiers look like e.g.`PascalCase` and `UseMorePureconfig`
* @see https://en.wikipedia.org/wiki/PascalCase
*/
object PascalCase extends CapitalizedWordsNamingConvention {
def fromTokens(l: Seq[String]): String = l.map(_.capitalize).mkString
}
class StringDelimitedNamingConvention(d: String) extends NamingConvention {
def toTokens(s: String): Seq[String] =
s.split(d).map(_.toLowerCase)
def fromTokens(l: Seq[String]): String =
l.map(_.toLowerCase).mkString(d)
}
/**
* KebabCase identifiers look like `kebab-case` and `use-more-pureconfig`
* @see http://wiki.c2.com/?KebabCase
*/
object KebabCase extends StringDelimitedNamingConvention("-")
/**
* SnakeCase identifiers look like `snake_case` and `use_more_pureconfig`
* @see https://en.wikipedia.org/wiki/Snake_case
*/
object SnakeCase extends StringDelimitedNamingConvention("_")
| derekmorr/pureconfig | core/src/main/scala/pureconfig/NamingConvention.scala | Scala | mpl-2.0 | 1,800 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.e2.engine
import org.apache.predictionio.e2.fixture.{NaiveBayesFixture, SharedSparkContext}
import org.scalatest.{Matchers, FlatSpec}
import scala.language.reflectiveCalls
class CategoricalNaiveBayesTest extends FlatSpec with Matchers
with SharedSparkContext with NaiveBayesFixture {
val Tolerance = .0001
val labeledPoints = fruit.labeledPoints
"Model" should "have log priors and log likelihoods" in {
val labeledPointsRdd = sc.parallelize(labeledPoints)
val model = CategoricalNaiveBayes.train(labeledPointsRdd)
model.priors(fruit.Banana) should be(-.7885 +- Tolerance)
model.priors(fruit.Orange) should be(-1.7047 +- Tolerance)
model.priors(fruit.OtherFruit) should be(-1.0116 +- Tolerance)
model.likelihoods(fruit.Banana)(0)(fruit.Long) should
be(-.2231 +- Tolerance)
model.likelihoods(fruit.Banana)(0)(fruit.NotLong) should
be(-1.6094 +- Tolerance)
model.likelihoods(fruit.Banana)(1)(fruit.Sweet) should
be(-.2231 +- Tolerance)
model.likelihoods(fruit.Banana)(1)(fruit.NotSweet) should
be(-1.6094 +- Tolerance)
model.likelihoods(fruit.Banana)(2)(fruit.Yellow) should
be(-.2231 +- Tolerance)
model.likelihoods(fruit.Banana)(2)(fruit.NotYellow) should
be(-1.6094 +- Tolerance)
model.likelihoods(fruit.Orange)(0) should not contain key(fruit.Long)
model.likelihoods(fruit.Orange)(0)(fruit.NotLong) should be(0.0)
model.likelihoods(fruit.Orange)(1)(fruit.Sweet) should
be(-.6931 +- Tolerance)
model.likelihoods(fruit.Orange)(1)(fruit.NotSweet) should
be(-.6931 +- Tolerance)
model.likelihoods(fruit.Orange)(2)(fruit.NotYellow) should be(0.0)
model.likelihoods(fruit.Orange)(2) should not contain key(fruit.Yellow)
model.likelihoods(fruit.OtherFruit)(0)(fruit.Long) should
be(-.6931 +- Tolerance)
model.likelihoods(fruit.OtherFruit)(0)(fruit.NotLong) should
be(-.6931 +- Tolerance)
model.likelihoods(fruit.OtherFruit)(1)(fruit.Sweet) should
be(-.2877 +- Tolerance)
model.likelihoods(fruit.OtherFruit)(1)(fruit.NotSweet) should
be(-1.3863 +- Tolerance)
model.likelihoods(fruit.OtherFruit)(2)(fruit.Yellow) should
be(-1.3863 +- Tolerance)
model.likelihoods(fruit.OtherFruit)(2)(fruit.NotYellow) should
be(-.2877 +- Tolerance)
}
"Model's log score" should "be the log score of the given point" in {
val labeledPointsRdd = sc.parallelize(labeledPoints)
val model = CategoricalNaiveBayes.train(labeledPointsRdd)
val score = model.logScore(LabeledPoint(
fruit.Banana,
Array(fruit.Long, fruit.NotSweet, fruit.NotYellow))
)
score should not be None
score.get should be(-4.2304 +- Tolerance)
}
it should "be negative infinity for a point with a non-existing feature" in {
val labeledPointsRdd = sc.parallelize(labeledPoints)
val model = CategoricalNaiveBayes.train(labeledPointsRdd)
val score = model.logScore(LabeledPoint(
fruit.Banana,
Array(fruit.Long, fruit.NotSweet, "Not Exist"))
)
score should not be None
score.get should be(Double.NegativeInfinity)
}
it should "be none for a point with a non-existing label" in {
val labeledPointsRdd = sc.parallelize(labeledPoints)
val model = CategoricalNaiveBayes.train(labeledPointsRdd)
val score = model.logScore(LabeledPoint(
"Not Exist",
Array(fruit.Long, fruit.NotSweet, fruit.Yellow))
)
score should be(None)
}
it should "use the provided default likelihood function" in {
val labeledPointsRdd = sc.parallelize(labeledPoints)
val model = CategoricalNaiveBayes.train(labeledPointsRdd)
val score = model.logScore(
LabeledPoint(
fruit.Banana,
Array(fruit.Long, fruit.NotSweet, "Not Exist")
),
ls => ls.min - math.log(2)
)
score should not be None
score.get should be(-4.9236 +- Tolerance)
}
"Model predict" should "return the correct label" in {
val labeledPointsRdd = sc.parallelize(labeledPoints)
val model = CategoricalNaiveBayes.train(labeledPointsRdd)
val label = model.predict(Array(fruit.Long, fruit.Sweet, fruit.Yellow))
label should be(fruit.Banana)
}
}
| pferrel/PredictionIO | e2/src/test/scala/org/apache/predictionio/e2/engine/CategoricalNaiveBayesTest.scala | Scala | apache-2.0 | 5,042 |
package de.frosner.broccoli.models
import enumeratum.{Enum, EnumEntry, PlayJsonEnum}
import scala.collection.immutable
sealed trait ParameterType extends EnumEntry with EnumEntry.Lowercase
object ParameterType extends Enum[ParameterType] with PlayJsonEnum[ParameterType] {
val values: immutable.IndexedSeq[ParameterType] = findValues
case object Raw extends ParameterType
case object String extends ParameterType
case object Integer extends ParameterType
case object Decimal extends ParameterType
}
| FRosner/cluster-broccoli | server/src/main/scala/de/frosner/broccoli/models/ParameterType.scala | Scala | apache-2.0 | 513 |
/*
* Copyright 2012 杨博 (Yang Bo)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dongxiguo.commons.continuations
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import scala.util.continuations._
import scala.collection.TraversableLike
import com.dongxiguo.fastring.Fastring.Implicits._
@deprecated("应改用com.dongxiguo.commons.continuations.FunctionQueue", "0.1.2")
protected object SequentialRunner {
implicit private val (logger, formatter, appender) = ZeroLoggerFactory.newLogger(this)
private[SequentialRunner] sealed abstract class State[Task, TaskQueue <: TraversableLike[Task, TaskQueue]]
private final case class Idle[Task, TaskQueue <: TraversableLike[Task, TaskQueue]](
tasks: TaskQueue) extends State[Task, TaskQueue]
private final case class Running[Task, TaskQueue <: TraversableLike[Task, TaskQueue]](
tasks: TaskQueue) extends State[Task, TaskQueue]
private final case class ShuttedDown[Task, TaskQueue <: TraversableLike[Task, TaskQueue]](
tasks: TaskQueue) extends State[Task, TaskQueue]
}
// FIXME: SequentialRunner性能太差,enqueue和flush竟然需要3000纳秒,比一次线程切换开销还大!
@deprecated("应改用com.dongxiguo.commons.continuations.FunctionQueue", "0.1.2")
abstract class SequentialRunner[Task, TaskQueue <: TraversableLike[Task, TaskQueue]]
extends AtomicReference[SequentialRunner.State[Task, TaskQueue]] {
import SequentialRunner._
protected def consumeSome(tasks: TaskQueue): TaskQueue @suspendable
implicit protected def taskQueueCanBuildFrom: collection.generic.CanBuildFrom[TaskQueue, Task, TaskQueue]
private def emptyTaskQueue: TaskQueue = taskQueueCanBuildFrom().result
set(Idle(emptyTaskQueue))
@tailrec
private def takeMore(remainingTasks: TaskQueue): TaskQueue = {
logger.finer {
fast"${remainingTasks.size} remaining tasks now, takeMore."
}
super.get match {
case oldState: ShuttedDown[Task, TaskQueue] =>
logger.finer(fast"Found ${oldState.tasks.size} more tasks.")
if (super.compareAndSet(oldState, ShuttedDown(emptyTaskQueue))) {
val result = remainingTasks ++ oldState.tasks
logger.finest(fast"After takeMore, there is ${result.size} tasks.")
result
} else {
// retry
takeMore(remainingTasks)
}
case oldState: Running[Task, TaskQueue] =>
logger.finer(
fast"remainingTasks.size: ${
remainingTasks.size.toString
}\\noldState.tasks.size: ${
oldState.tasks.size
}\\n(remainingTasks ++ oldState.tasks).size: ${
(remainingTasks ++ oldState.tasks).size
}")
val result = remainingTasks ++ oldState.tasks
val newState: State[Task, TaskQueue] =
if (result.isEmpty) {
Idle(emptyTaskQueue)
} else {
Running(emptyTaskQueue)
}
if (super.compareAndSet(oldState, newState)) {
logger.finest(fast"After takeMore, there is ${result.size} tasks.")
result
} else {
// retry
takeMore(remainingTasks)
}
case Idle(_) =>
throw new IllegalStateException
}
}
private def run(tasks: TaskQueue): Unit @suspendable = {
var varTasks = tasks
while (!varTasks.isEmpty) {
val remainingTasks = consumeSome(varTasks)
varTasks = takeMore(remainingTasks)
}
}
@tailrec
final def enqueue(tasks: Task*) {
val oldState = super.get
val newState: State[Task, TaskQueue] =
oldState match {
case oldState: Idle[Task, TaskQueue] =>
new Idle[Task, TaskQueue](oldState.tasks ++ tasks)
case oldState: Running[Task, TaskQueue] =>
Running(oldState.tasks ++ tasks)
case _: ShuttedDown[Task, TaskQueue] =>
throw new ShuttedDownException("SequentialRunner is shutted down!")
}
if (!super.compareAndSet(oldState, newState)) {
// retry
enqueue(tasks: _*)
}
}
final def flush() {
super.get match {
case oldState: Idle[Task, TaskQueue] =>
val newState = new Running[Task, TaskQueue](emptyTaskQueue)
if (super.compareAndSet(oldState, newState)) {
reset {
run(oldState.tasks)
}
} else {
// retry
flush()
}
case _: Running[Task, TaskQueue] | _: ShuttedDown[Task, TaskQueue] =>
}
}
/**
* 标记为shutDown,不得再往队列中增加任务
* @param lastTasks 这个队列将会最后执行的一批任务
*/
@tailrec
final def shutDown(lastTasks: Task*) {
super.get match {
case oldState: Idle[Task, TaskQueue] =>
val newState = new ShuttedDown[Task, TaskQueue](emptyTaskQueue)
if (super.compareAndSet(oldState, newState)) {
reset {
run(oldState.tasks ++ lastTasks)
}
} else {
// retry
shutDown(lastTasks: _*)
}
case oldState: Running[Task, TaskQueue] =>
val newState =
new ShuttedDown[Task, TaskQueue](oldState.tasks ++ lastTasks)
if (!super.compareAndSet(oldState, newState)) {
// retry
shutDown(lastTasks: _*)
}
case _: ShuttedDown[Task, TaskQueue] =>
}
}
/**
* 标记为shutDown,不得再往队列中增加任务
*/
@tailrec
final def shutDown() {
super.get match {
case oldState: Idle[Task, TaskQueue] =>
val newState = new ShuttedDown[Task, TaskQueue](emptyTaskQueue)
if (super.compareAndSet(oldState, newState)) {
reset {
run(oldState.tasks)
}
} else {
// retry
shutDown()
}
case oldState: Running[Task, TaskQueue] =>
val newState = new ShuttedDown[Task, TaskQueue](oldState.tasks)
if (!super.compareAndSet(oldState, newState)) {
// retry
shutDown()
}
case _: ShuttedDown[Task, TaskQueue] =>
}
}
}
// vim: expandtab softtabstop=2 shiftwidth=2
| Atry/commons-continuations | src/main/scala/com/dongxiguo/commons/continuations/SequentialRunner.scala | Scala | apache-2.0 | 6,621 |
package com.cds.learnscala.test.seqTest
object GivenNames {
def unapplySeq(name: String): Option[Seq[String]] = {
val names = name.trim.split(" ")
if (names.forall(_.isEmpty)) None
else Some(names)
}
}
| anancds/scala-project | learn-scala/src/main/scala/com/cds/learnscala/test/seqTest/GivenNames.scala | Scala | mit | 219 |
package io.mth.route
import scalaz._, Scalaz._
sealed trait Method {
def route[A](r: Route[A]): Route[A] =
Route.route(req =>
if (this == req.method)
r(req)
else
notfound
)
def apply[A](a: A) = constant(a)
def constant[A](a: A): Route[A] =
route(a.point[Route])
}
object Method extends Methods
trait Methods {
def parseMethod(m: String) = m match {
case "GET" => Get
case "PUT" => Put
case "POST" => Post
case "HEAD" => Head
case "DELETE" => Delete
case "OPTIONS" => Options
case "TRACE" => Trace
}
}
case object Get extends Method
case object Put extends Method
case object Post extends Method
case object Head extends Method
case object Delete extends Method
case object Options extends Method
case object Trace extends Method
| markhibberd/route | src/scala/io/mth/route/Method.scala | Scala | bsd-3-clause | 814 |
package com.daxin
//
//private[daxin] class Dog限制dog类只能在daxin包下或者daxin的子包下
//反编译之后的class文件是public权限的
private[daxin] class Dog {
var name:String=_
var age:Int=0
//测试文件去看com.test下面的DogTest.scala文件
}
//反编译之后的文件class是public权限的
//Cat后面的private是私有的构造方法,只能在期伴生对象中new
private[daxin] class Cat private{
var name:String= _
//scala 中没有public关键字吧
def getMiao():String={
"Miao Miao ..."
}
}
object Cat{
def main(args: Array[String]): Unit = {
val c=new Cat
c.name="Tomcat"
println(c.name)
}
} | Dax1n/Scala | ObjectOrientedDemo/src/com/daxin/Dog.scala | Scala | apache-2.0 | 731 |
package org.scalafmt.cli
import java.io.File
import java.util.Date
import org.scalafmt.Versions
import org.scalafmt.config.Config
import org.scalafmt.util.AbsoluteFile
import org.scalafmt.util.BuildTime
import org.scalafmt.util.FileOps
import org.scalafmt.util.GitCommit
import scopt.OptionParser
object CliArgParser {
@GitCommit val gitCommit: String = ???
@BuildTime val buildTimeMs: Long = ???
val usageExamples: String =
"""|scalafmt # Format all files in the current project, configuration is determined in this order:
| # 1. .scalafmt.conf file in current directory
| # 2. .scalafmt.conf inside root directory of current git repo
| # 3. no configuration, default style
|scalafmt --test # throw exception on mis-formatted files, won't write to files.
|scalafmt --diff # Format all files that were edited in git diff against master branch.
|scalafmt --diff-branch 2.x # same as --diff, except against branch 2.x
|scalafmt --stdin # read from stdin and print to stdout
|scalafmt --stdin --assume-filename foo.sbt # required to format .sbt files
|scalafmt -f Code.scala # print formatted contents to stdout.
|scalafmt -i -f Code1.scala,A.scala # write formatted contents to file.
|scalafmt -i -f . --exclude target # format all files in directory excluding target
|scalafmt --config .scalafmt.conf # read custom style from file
|scalafmt --config-str "style=IntelliJ" # define custom style as a flag, must be quoted.""".stripMargin
val scoptParser: OptionParser[CliOptions] =
new scopt.OptionParser[CliOptions]("scalafmt") {
override def showUsageOnError = false
def printAndExit(inludeUsage: Boolean)(ignore: Unit,
c: CliOptions): CliOptions = {
if (inludeUsage) showUsage
else showHeader
sys.exit
c
}
def readConfigFromFile(file: String, c: CliOptions): CliOptions = {
readConfig(
FileOps.readFile(
AbsoluteFile.fromFile(new File(file), c.common.workingDirectory)),
c
)
}
def readConfig(contents: String, c: CliOptions): CliOptions = {
Config.fromHocon(contents) match {
case Right(style) => c.copy(config = style)
case Left(e) => throw e
}
}
head("scalafmt", Versions.nightly)
opt[Unit]('h', "help")
.action(printAndExit(inludeUsage = true))
.text("prints this usage text")
opt[Unit]('v', "version")
.action(printAndExit(inludeUsage = false))
.text("print version ")
opt[Seq[File]]('f', "files")
.action { (files, c) =>
c.copy(
customFiles =
AbsoluteFile.fromFiles(files, c.common.workingDirectory))
}
.text(
"file or directory, in which case all *.scala files are formatted.")
opt[Seq[String]]("exclude")
.action((excludes, c) => c.copy(customExcludes = excludes))
.text(
"file or directory, in which case all *.scala files are formatted.")
opt[String]('c', "config")
.action(readConfigFromFile)
.text("a file path to .scalafmt.conf.")
opt[String]("config-str")
.action(readConfig)
.text("configuration defined as a string")
opt[Unit]("stdin")
.action((_, c) => c.copy(stdIn = true))
.text("read from stdin and print to stdout")
opt[String]("assume-filename")
.action((filename, c) => c.copy(assumeFilename = filename))
.text("required to format .sbt files with --stdin flag.")
opt[Unit]('i', "in-place")
.action((_, c) => c.copy(inPlace = true))
.text("write output to file, does nothing if file is not specified")
opt[Unit]("test")
.action((_, c) => c.copy(testing = true))
.text("test for mis-formatted code, exits with status 1 on failure.")
opt[File]("migrate2hocon")
.action((file, c) =>
c.copy(migrate =
Some(AbsoluteFile.fromFile(file, c.common.workingDirectory))))
.text("""migrate .scalafmt CLI style configuration to hocon style configuration in .scalafmt.conf""")
opt[Unit]("diff")
.action((_, c) => c.copy(diff = Some("master")))
.text("If set, only format edited files in git diff against master.")
opt[String]("diff-branch")
.action((branch, c) => c.copy(diff = Some(branch)))
.text("If set, only format edited files in git diff against provided branch.")
opt[Unit]("build-info")
.action({
case (_, c) =>
println(buildInfo)
sys.exit
})
.text("prints build information")
opt[Unit]("quiet")
.action((_, c) => c.copy(quiet = true))
.text("don't print out stuff to console.")
opt[Unit]("debug")
.action((_, c) => c.copy(debug = true))
.text("print out diagnostics to console.")
opt[Unit]("non-interactive")
.action((_, c) => c.copy(nonInteractive = true))
.text("disable fancy progress bar, useful in ci or sbt plugin.")
opt[(Int, Int)]("range")
.hidden()
.action({
case ((from, to), c) =>
val offset = if (from == to) 0 else -1
c.copy(range = c.range + Range(from - 1, to + offset))
})
.text("(experimental) only format line range from=to")
note(s"""|Examples:
|$usageExamples
|Please file bugs to https://github.com/scalameta/scalafmt/issues
""".stripMargin)
}
def buildInfo =
s"""build commit: $gitCommit
|build time: ${new Date(buildTimeMs)}""".stripMargin
}
| Daxten/scalafmt | cli/src/main/scala/org/scalafmt/cli/CliArgParser.scala | Scala | apache-2.0 | 5,777 |
package latis.writer
import latis.dm._
/**
* Writes the .proto file that describes the dataset but contains no data or metadata.
*/
class ProtoWriter extends TextWriter {
override def makeHeader(dataset: Dataset): String = "message " + toCamelCase(dataset.getName) + " {" + newLine
override def makeFooter(dataset: Dataset): String = "}"
override def writeVariable(variable: Variable): Unit = printWriter.print(varToString(variable))
override def varToString(variable: Variable): String = {
makeMessage(variable) + makeOpLabel(variable)
}
/**
* Provided for repeated variables in functions.
*/
def varToRepString(variable: Variable): String = makeMessage(variable) + makeRepLabel(variable)
/**
* Matches each scalar type to an appropriate protobuf type:
* Real -> double
* Integer -> int64
* Text -> string
* (Indexes are dropped)
*/
override def makeScalar(scalar: Scalar): String = {
val name = scalar.getName
scalar match{
case _: Index => ""
case _: Real => indent(count) + "optional double " + name + " = " + tag + ";" + newLine
case _: Integer => indent(count) + "optional int64 " + name + " = " + tag + ";" + newLine
case _: Text => indent(count) + "optional string " + name + " = " + tag + ";" + newLine
case _: Binary => indent(count) + "optional bytes " + name + " = " + tag + ";" + newLine
}
}
/**
* Makes the label for a repeated scalar.
*/
def makeScalarRep(scalar: Scalar): String = {
val name = scalar.getName
scalar match{
case _: Index => ""
case _: Real => indent(count) + "repeated float " + name + " = " + tag + ";" + newLine
case _: Integer => indent(count) + "repeated int64 " + name + " = " + tag + ";" + newLine
case _: Text => indent(count) + "repeated string " + name + " = " + tag + ";" + newLine
case _: Binary => indent(count) + "repeated bytes " + name + " = " + tag + ";" + newLine
}
}
/**
* Ignores the sample and looks at the inner variables
*/
override def makeSample(sample: Sample): String = {
val temp = tag
tag = 0
val s = sample match {
case Sample(d, r: Tuple) => varToRepString(d) + r.getVariables.map(varToRepString(_)).mkString("")
case _ => sample.getVariables.map(varToRepString(_)).mkString("")
}
tag = temp
s
}
/**
* Makes a message of the tuple.
*/
override def makeTuple(tuple: Tuple): String = {
val temp = tag
tag = 0
count += indentSize
val vars = tuple.getVariables
val s = indent(count-indentSize) + "message " + toCamelCase(tuple.getName) + " {" + newLine +
vars.map(varToString(_)).mkString("") +
indent(count-indentSize) + "}" + newLine
tag = temp
count -= indentSize
s
}
/**
* Make a message from a function.
*/
override def makeFunction(function: Function): String = {
val temp = tag
tag = 0
count += indentSize
val s = varToRepString(Sample(function.getDomain, function.getRange))
count -= indentSize
tag = temp
indent(count) + "message " + toCamelCase(function.getName) + " {" + newLine + s + indent(count) + "}" + newLine
}
/**
* Makes an optional label for a variable.
*/
def makeOpLabel(variable: Variable): String = {
tag += 1
variable match {
case s: Scalar => makeScalar(s)
case _: Sample => ""
case t: Tuple => indent(count) + "optional " + toCamelCase(t.getName) + " " + t.getName + " = " + tag + ";" + newLine
case f: Function => indent(count) + "optional " + toCamelCase(f.getName) + " " + f.getName + " = " + tag + ";" + newLine
}
}
/**
* Makes a repeated label for a variable.
*/
def makeRepLabel(variable: Variable): String = {
tag += 1
variable match {
case s: Scalar => makeScalarRep(s)
case _: Sample => ""
case t: Tuple => indent(count) + "repeated " + toCamelCase(t.getName) + " " + t.getName + " = " + tag + ";" + newLine
case f: Function => indent(count) + "repeated " + toCamelCase(f.getName) + " " + f.getName + " = " + tag + ";" + newLine
}
}
/**
* For non-scalar Variables, creates a nested message.
*/
def makeMessage(variable: Variable): String = variable match{
case scalar: Scalar => ""
case sample: Sample => makeSample(sample)
case tuple: Tuple => makeTuple(tuple)
case function: Function => makeFunction(function)
}
/**
* Uses a counter to indent each line by the proper amount.
*/
def indent(num: Int): String = {
" " * num
}
val indentSize = 4
var tag = 0
var count = indentSize
def toCamelCase(string: String): String = {
string.split('_').map(_.capitalize).mkString("")
}
override def mimeType: String = "text/proto"
}
| dlindhol/LaTiS | src/main/scala/latis/writer/ProtoWriter.scala | Scala | epl-1.0 | 4,890 |
package com.datastax.spark.connector.cql.sai
import com.datastax.oss.driver.api.core.CqlSession
import com.datastax.spark.connector.SparkCassandraITSpecBase
import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.datasource.{CassandraScan, CassandraScanBuilder}
import com.datastax.spark.connector.rdd.CqlWhereClause
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.cassandra._
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.execution.{FilterExec, ProjectExec, SparkPlan}
import org.apache.spark.sql.sources.{EqualTo, Filter, GreaterThan, GreaterThanOrEqual, In, LessThan, LessThanOrEqual}
import org.scalatest.Matchers
trait SaiBaseSpec extends Matchers with SparkCassandraITSpecBase {
override lazy val conn = CassandraConnector(defaultConf)
def df(table: String): DataFrame = spark.read.cassandraFormat(table, ks).load()
def createTableWithIndexes(session: CqlSession, tableName: String, columns: Seq[String]): Unit = {
val typesDeclaration = columns.map { t => s"${t}_col ${t}" }.mkString("", ",", ",")
session.execute(
s"""CREATE TABLE IF NOT EXISTS $ks.$tableName (
| pk int,
| $typesDeclaration
| PRIMARY KEY (pk));""".stripMargin)
columns.foreach { t =>
session.execute(
s"CREATE CUSTOM INDEX ${t}_sai_idx ON $ks.$tableName (${t}_col) USING 'StorageAttachedIndex';")
}
}
def findFilterOption(plan: SparkPlan): Option[FilterExec] = {
plan match {
case filter: FilterExec => Option(filter)
case project: ProjectExec => findFilterOption(project.child)
case _ => None
}
}
def findFilter(plan: SparkPlan): FilterExec = {
findFilterOption(plan).getOrElse(throw new NoSuchElementException("Filter was not found in the given plan"))
}
def findCassandraScan(plan: SparkPlan): CassandraScan = {
plan match {
case BatchScanExec(_, scan: CassandraScan) => scan
case filter: FilterExec => findCassandraScan(filter.child)
case project: ProjectExec => findCassandraScan(project.child)
case _ => throw new NoSuchElementException("RowDataSourceScanExec was not found in the given plan")
}
}
def debug(dataFrame: DataFrame)(f: => Unit): DataFrame = {
dataFrame.explain(true)
f
dataFrame.show()
dataFrame
}
def assertPushDown(dataFrame: DataFrame): DataFrame = debug(dataFrame) {
val plan = dataFrame.queryExecution.sparkPlan
withClue("The given plan should not contain Filter element, some of the predicates were not pushed down.") {
findFilterOption(plan) should not be defined
}
}
def assertNoPushDown(dataFrame: DataFrame): DataFrame = debug(dataFrame) {
val plan = dataFrame.queryExecution.sparkPlan
findFilter(plan)
val scan = findCassandraScan(plan)
withClue("The given plan should not contain pushed down predicates") {
scan.cqlQueryParts.whereClause.predicates shouldBe empty
}
}
def assertNonPushedColumns(dataFrame: DataFrame, nonPushedColumns: String*): DataFrame = debug(dataFrame) {
val plan = dataFrame.queryExecution.sparkPlan
val filter = findFilter(plan)
val nonPushedFromPlan = filter.condition.children.collect {
case e: AttributeReference => e.name
}
nonPushedFromPlan.toSet should be(nonPushedColumns.toSet)
}
def assertPushedPredicate(dataFrame: DataFrame, pushedPredicate: Filter*): DataFrame = debug(dataFrame) {
val plan = dataFrame.queryExecution.sparkPlan
val scan = findCassandraScan(plan)
val handled = scan.cqlQueryParts.whereClause
val expected = CassandraScanBuilder.filterToCqlWhereClause(scan.tableDef, pushedPredicate.toArray)
def comparablePredicates(where: CqlWhereClause): Set[String] = {
where.toString.drop(2).dropRight(2).split("],\\\\[").toSet
}
withClue("The given df contains unexpected set of push down filters") {
comparablePredicates(handled) shouldBe comparablePredicates(expected)
}
}
}
| datastax/spark-cassandra-connector | connector/src/it/scala/com/datastax/spark/connector/cql/sai/SaiBaseSpec.scala | Scala | apache-2.0 | 4,111 |
/*
* Copyright (C) 2009 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo
package turtle
import java.awt.Color
import core.Style
trait Pen {
def init(): Unit
def clear(): Unit
def updatePosition(): Unit
def undoUpdatePosition(): Unit
def startMove(x: Double, y: Double): Unit
def move(x: Double, y: Double): Unit
def endMove(x: Double, y: Double): Unit
def setColor(color: Color): Unit
def setThickness(t: Double): Unit
def setFillColor(color: Color): Unit
def setStyle(style: Style): Unit
def undoStyle(oldStyle: Style): Unit
def undoMove(): Unit
def getColor: Color
def getFillColor: Color
def getThickness: Double
def setFontSize(n: Int)
def getFontSize: Int
def write(text: String)
}
| dotta/kojo | KojoEnv/src/net/kogics/kojo/turtle/Pen.scala | Scala | gpl-3.0 | 1,241 |
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.textmining.preprocessing
import com.datastax.spark.connector._
import de.hpi.ingestion.framework.SparkJob
import de.hpi.ingestion.implicits.TupleImplicits._
import de.hpi.ingestion.textmining.models._
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
/**
* Resolves all redirects for each `ParsedWikipediaEntry` by replacing them with the pages they point to and writing
* the redirects to a cassandra table.
*/
class RedirectResolver extends SparkJob {
import RedirectResolver._
appName = "Redirect Resolver"
configFile = "textmining.xml"
var parsedWikipedia: RDD[ParsedWikipediaEntry] = _
var resolvedParsedWikipedia: RDD[ParsedWikipediaEntry] = _
var wikipediaRedirects: RDD[Redirect] = _
var savedWikipediaRedirects: RDD[Redirect] = _
// $COVERAGE-OFF$
/**
* Loads Parsed Wikipedia entries from the Cassandra.
* @param sc Spark Context used to load the RDDs
*/
override def load(sc: SparkContext): Unit = {
parsedWikipedia = sc.cassandraTable[ParsedWikipediaEntry](settings("keyspace"), settings("parsedWikiTable"))
wikipediaRedirects = sc.cassandraTable[Redirect](settings("keyspace"), settings("redirectTable"))
}
/**
* Saves Parsed Wikipedia entries with resolved redirects
* and the redirects themselves to the Cassandra.
* @param sc Spark Context used to connect to the Cassandra or the HDFS
*/
override def save(sc: SparkContext): Unit = {
resolvedParsedWikipedia.saveToCassandra(settings("keyspace"), settings("parsedWikiTable"))
if(!savedWikipediaRedirects.isEmpty()) {
savedWikipediaRedirects.saveToCassandra(settings("keyspace"), settings("redirectTable"))
}
}
// $COVERAGE-ON$
/**
* Resolves redirects for every ParsedWikipediaEntry. It checks if redirects where already found,
* if not it finds all redirects, resolves transitive redirects
* and then replaces links to redirect pages with links to the page the redirect directs to.
* @param sc Spark Context used to e.g. broadcast variables
*/
override def run(sc: SparkContext): Unit = {
var redirects = wikipediaRedirects
.map(Redirect.unapply(_).get)
.collect
.toMap
val saveRedirectsToCassandra = redirects.isEmpty
if(saveRedirectsToCassandra) {
val redirectMap = buildRedirectDict(parsedWikipedia)
redirects = resolveTransitiveRedirects(redirectMap)
}
val dictBroadcast = sc.broadcast(redirects)
resolvedParsedWikipedia = parsedWikipedia
.mapPartitions({ entryPartition =>
val localDict = dictBroadcast.value
entryPartition.map(entry => resolveRedirects(entry, localDict))
}, true)
val redirectsList = if(saveRedirectsToCassandra) redirects.map(Redirect.tupled).toList else Nil
savedWikipediaRedirects = sc.parallelize(redirectsList)
}
}
object RedirectResolver {
/**
* Resolves all redirects in the links of an articles. It does so by replacing the target page with the page the
* redirect points to.
*
* @param entry Parsed Wikipedia Entry containing the links that will be cleaned
* @param dict Map containing all redirects
* @return Parsed Wikipedia Entry with no redirect pages as target of a link
*/
def resolveRedirects(entry: ParsedWikipediaEntry, dict: Map[String, String]): ParsedWikipediaEntry = {
entry.allLinks().foreach(link => link.page = dict.getOrElse(link.page, link.page))
entry
}
/**
* Builds Map of redirects from redirect articles.
*
* @param articles RDD of Parsed Wikipedia Articles
* @return Map containing the redirects in the form redirect page -> target page
*/
def buildRedirectDict(articles: RDD[ParsedWikipediaEntry]): Map[String, String] = {
articles
.filter(TextParser.isRedirectPage)
.map(entry => (entry.title, entry.textlinks.headOption.map(_.page)))
.filter(t => t._2.isDefined && t._1 != t._2.get)
.map(_.map(identity, _.get))
.collect()
.toMap
}
/**
* Resolves transitive redirects by replacing the target page with the transitive target page. Also removes
* all reflexive entries.
*
* @param redirectMap Map containing the redirects that will be cleaned
* @return Map containing the cleaned redirects
*/
def resolveTransitiveRedirects(redirectMap: Map[String, String]): Map[String, String] = {
var resolvedRedirects = redirectMap
var resolvableEntries = Map[String, String]()
var visited = Set[String]()
do {
resolvedRedirects ++= resolvableEntries.map(_.map(identity, resolvedRedirects))
resolvedRedirects = resolvedRedirects.filter(t => t._1 != t._2)
visited ++= resolvableEntries.values
resolvableEntries = resolvedRedirects
.filter(t => resolvedRedirects.contains(t._2))
.filter(t => !visited.contains(t._2))
} while(resolvableEntries.nonEmpty)
resolvedRedirects
}
}
| bpn1/ingestion | src/main/scala/de/hpi/ingestion/textmining/preprocessing/RedirectResolver.scala | Scala | apache-2.0 | 5,890 |
// scalac: -Xfatal-warnings
//
class A
case object B extends A
object Test {
val x1 = (B: A)
println(x1 == B) // no warning
println(B == x1) // no warning
val x2 = (B: A with Product)
println(x2 == B) // no warning
println(B == x2) // spurious warning: "always returns false"
}
| scala/scala | test/files/pos/t5932.scala | Scala | apache-2.0 | 295 |
package json.entity
import play.api.libs.json._
import plm.universe.Entity
import plm.universe.bugglequest.AbstractBuggle
import play.api.libs.json.Json.toJsFieldJsValueWrapper
object EntityToJson {
def entitiesWrite(entities: Array[Entity]): JsValue = {
var json: JsValue = Json.obj()
entities.foreach { entity =>
json = json.as[JsObject] ++ entityWrite(entity).as[JsObject]
}
return json
}
def entityWrite(entity: Entity): JsValue = {
var json: JsValue = null
entity match {
case abstractBuggle: AbstractBuggle =>
json = AbstractBuggleToJson.abstractBuggleWrite(abstractBuggle)
}
return Json.obj(
entity.getName -> json
)
}
} | BaptisteMounier/webPLM | app/json/entity/EntityToJson.scala | Scala | agpl-3.0 | 715 |
package com.routably.beessolver.vrp.data
import com.routably.beessolver.vrp.Location
import com.routably.beessolver.vrp.Job
// Best
//
// 2960 976 -1168.37760100 ---------------------------
// COMMENT : (Christophides and Eilon)
object P09D151K14 extends Problem {
def maxVehicles = 15
def maxCapacity = 200
def maxRouteTime = 200
val serviceTime = 10
def depot = new Location(35, 35)
def jobs = ImportUtil.toJobsWithoutId(List(
(41, 49, 10),
(35, 17, 7),
(55, 45, 13),
(55, 20, 19),
(15, 30, 26),
(25, 30, 3),
(20, 50, 5),
(10, 43, 9),
(55, 60, 16),
(30, 60, 16),
(20, 65, 12),
(50, 35, 19),
(30, 25, 23),
(15, 10, 20),
(30, 5, 8),
(10, 20, 19),
(5, 30, 2),
(20, 40, 12),
(15, 60, 17),
(45, 65, 9),
(45, 20, 11),
(45, 10, 18),
(55, 5, 29),
(65, 35, 3),
(65, 20, 6),
(45, 30, 17),
(35, 40, 16),
(41, 37, 16),
(64, 42, 9),
(40, 60, 21),
(31, 52, 27),
(35, 69, 23),
(53, 52, 11),
(65, 55, 14),
(63, 65, 8),
(2, 60, 5),
(20, 20, 8),
(5, 5, 16),
(60, 12, 31),
(40, 25, 9),
(42, 7, 5),
(24, 12, 5),
(23, 3, 7),
(11, 14, 18),
(6, 38, 16),
(2, 48, 1),
(8, 56, 27),
(13, 52, 36),
(6, 68, 30),
(47, 47, 13),
(49, 58, 10),
(27, 43, 9),
(37, 31, 14),
(57, 29, 18),
(63, 23, 2),
(53, 12, 6),
(32, 12, 7),
(36, 26, 18),
(21, 24, 28),
(17, 34, 3),
(12, 24, 13),
(24, 58, 19),
(27, 69, 10),
(15, 77, 9),
(62, 77, 20),
(49, 73, 25),
(67, 5, 25),
(56, 39, 36),
(37, 47, 6),
(37, 56, 5),
(57, 68, 15),
(47, 16, 25),
(44, 17, 9),
(46, 13, 8),
(49, 11, 18),
(49, 42, 13),
(53, 43, 14),
(61, 52, 3),
(57, 48, 23),
(56, 37, 6),
(55, 54, 26),
(15, 47, 16),
(14, 37, 11),
(11, 31, 7),
(16, 22, 41),
(4, 18, 35),
(28, 18, 26),
(26, 52, 9),
(26, 35, 15),
(31, 67, 3),
(15, 19, 1),
(22, 22, 2),
(18, 24, 22),
(26, 27, 27),
(25, 24, 20),
(22, 27, 11),
(25, 21, 12),
(19, 21, 10),
(20, 26, 9),
(18, 18, 17),
(37, 52, 7),
(49, 49, 30),
(52, 64, 16),
(20, 26, 9),
(40, 30, 21),
(21, 47, 15),
(17, 63, 19),
(31, 62, 23),
(52, 33, 11),
(51, 21, 5),
(42, 41, 19),
(31, 32, 29),
(5, 25, 23),
(12, 42, 21),
(36, 16, 10),
(52, 41, 15),
(27, 23, 3),
(17, 33, 41),
(13, 13, 9),
(57, 58, 28),
(62, 42, 8),
(42, 57, 8),
(16, 57, 16),
(8, 52, 10),
(7, 38, 28),
(27, 68, 7),
(30, 48, 15),
(43, 67, 14),
(58, 48, 6),
(58, 27, 19),
(37, 69, 11),
(38, 46, 12),
(46, 10, 23),
(61, 33, 26),
(62, 63, 17),
(63, 69, 6),
(32, 22, 9),
(45, 35, 15),
(59, 15, 14),
(5, 6, 7),
(10, 17, 27),
(21, 10, 13),
(5, 64, 11),
(30, 15, 16),
(39, 10, 10),
(32, 39, 5),
(25, 32, 25),
(25, 55, 17),
(48, 28, 18),
(56, 37, 10)
), serviceTime)
def bestKnownAnswer = 1162.55
}
| aishfenton/bees_solver | src/main/scala/com/routably/beessolver/vrp/data/P09D151K14.scala | Scala | mit | 3,136 |
/*
* Copyright 2011, Patrick Boe
* ===========================
* This program is distributed under the terms of the GNU General Public License.
*
* This file is part of Thimblus.
*
* Thimblus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Thimblus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Thimblus. If not, see <http://www.gnu.org/licenses/>.
*/
package org.thimblus.ssh
case class SSHConnector(hostname: String, username: String, keypath: String, pass: String);
object SSH{
def keycheck(filecheck: String=>Boolean) = {
((filecheck("~/.ssh/id_rsa") &&
filecheck("~/.ssh/id_rsa.pub")) ||
(filecheck("~/.ssh/id_dsa") &&
filecheck("~/.ssh/id_dsa.pub"))) &&
filecheck("~/.ssh/known_hosts")
}
}
// vim: sw=2:softtabstop=2:et:
| patrickboe/thimblus | src/main/scala/org/thimblus/ssh/ssh.scala | Scala | gpl-3.0 | 1,231 |
package mesosphere.mesos
import mesosphere.marathon.MarathonTestHelper
import mesosphere.marathon.tasks.{ PortsMatcher, PortsMatch }
import org.scalatest.{ Matchers, GivenWhenThen, FunSuite }
import scala.collection.immutable.Seq
class ResourceMatchTest
extends FunSuite with GivenWhenThen with Matchers {
test("resources include all matched reservations") {
Given("a resource match with reservations")
val memReservation = MarathonTestHelper.reservation(principal = "memPrincipal", labels = Map("resource" -> "mem"))
val portReservation = MarathonTestHelper.reservation(principal = "portPrincipal", labels = Map("resource" -> "ports"))
val resourceMatch = ResourceMatcher.ResourceMatch(
scalarMatches = Iterable(
ScalarMatch(
"mem", 128.0,
consumed = Iterable(ScalarMatch.Consumption(128.0, "role1", reservation = Some(memReservation))),
scope = ScalarMatchResult.Scope.NoneDisk
)
),
portsMatch = PortsMatch(Seq(Some(PortsMatcher.PortWithRole("role2", 80, reservation = Some(portReservation)))))
)
When("converting it to resources")
val resources = resourceMatch.resources
Then("the resources should refer to the reservations")
resources should equal(
Iterable(
MarathonTestHelper.scalarResource("mem", 128, "role1", reservation = Some(memReservation)),
MarathonTestHelper.portsResource(80, 80, "role2", reservation = Some(portReservation))
)
)
}
}
| yp-engineering/marathon | src/test/scala/mesosphere/mesos/ResourceMatchTest.scala | Scala | apache-2.0 | 1,495 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.group
import java.lang.management.ManagementFactory
import java.nio.ByteBuffer
import java.util.concurrent.locks.ReentrantLock
import java.util.{Collections, Optional}
import com.yammer.metrics.core.Gauge
import javax.management.ObjectName
import kafka.api._
import kafka.cluster.Partition
import kafka.common.OffsetAndMetadata
import kafka.log.{AppendOrigin, LogAppendInfo, UnifiedLog}
import kafka.metrics.KafkaYammerMetrics
import kafka.server.{FetchDataInfo, FetchLogEnd, HostedPartition, KafkaConfig, LogOffsetMetadata, ReplicaManager, RequestLocal}
import kafka.utils.{KafkaScheduler, MockTime, TestUtils}
import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor
import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.metrics.{JmxReporter, KafkaMetricsContext, Metrics => kMetrics}
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.OffsetFetchResponse
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.utils.Utils
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import org.mockito.{ArgumentCaptor, ArgumentMatchers}
import org.mockito.ArgumentMatchers.{any, anyInt, anyLong, anyShort}
import org.mockito.Mockito.{mock, reset, times, verify, when}
import scala.jdk.CollectionConverters._
import scala.collection._
class GroupMetadataManagerTest {
var time: MockTime = null
var replicaManager: ReplicaManager = null
var groupMetadataManager: GroupMetadataManager = null
var scheduler: KafkaScheduler = null
var partition: Partition = null
var defaultOffsetRetentionMs = Long.MaxValue
var metrics: kMetrics = null
val groupId = "foo"
val groupInstanceId = "bar"
val groupPartitionId = 0
val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
val protocolType = "protocolType"
val rebalanceTimeout = 60000
val sessionTimeout = 10000
val defaultRequireStable = false
val numOffsetsPartitions = 2
private val offsetConfig = {
val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(nodeId = 0, zkConnect = ""))
OffsetConfig(maxMetadataSize = config.offsetMetadataMaxSize,
loadBufferSize = config.offsetsLoadBufferSize,
offsetsRetentionMs = config.offsetsRetentionMinutes * 60 * 1000L,
offsetsRetentionCheckIntervalMs = config.offsetsRetentionCheckIntervalMs,
offsetsTopicNumPartitions = config.offsetsTopicPartitions,
offsetsTopicSegmentBytes = config.offsetsTopicSegmentBytes,
offsetsTopicReplicationFactor = config.offsetsTopicReplicationFactor,
offsetsTopicCompressionCodec = config.offsetsTopicCompressionCodec,
offsetCommitTimeoutMs = config.offsetCommitTimeoutMs,
offsetCommitRequiredAcks = config.offsetCommitRequiredAcks)
}
@BeforeEach
def setUp(): Unit = {
defaultOffsetRetentionMs = offsetConfig.offsetsRetentionMs
metrics = new kMetrics()
time = new MockTime
replicaManager = mock(classOf[ReplicaManager])
groupMetadataManager = new GroupMetadataManager(0, ApiVersion.latestVersion, offsetConfig, replicaManager,
time, metrics)
groupMetadataManager.startup(() => numOffsetsPartitions, false)
partition = mock(classOf[Partition])
}
@AfterEach
def tearDown(): Unit = {
groupMetadataManager.shutdown()
}
@Test
def testLogInfoFromCleanupGroupMetadata(): Unit = {
var expiredOffsets: Int = 0
var infoCount = 0
val gmm = new GroupMetadataManager(0, ApiVersion.latestVersion, offsetConfig, replicaManager, time, metrics) {
override def cleanupGroupMetadata(groups: Iterable[GroupMetadata], requestLocal: RequestLocal,
selector: GroupMetadata => Map[TopicPartition, OffsetAndMetadata]): Int = expiredOffsets
override def info(msg: => String): Unit = infoCount += 1
}
gmm.startup(() => numOffsetsPartitions, false)
try {
// if there are no offsets to expire, we skip to log
gmm.cleanupGroupMetadata()
assertEquals(0, infoCount)
// if there are offsets to expire, we should log info
expiredOffsets = 100
gmm.cleanupGroupMetadata()
assertEquals(1, infoCount)
} finally {
gmm.shutdown()
}
}
@Test
def testLoadOffsetsWithoutGroup(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val startOffset = 15L
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE, offsetCommitRecords.toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testLoadEmptyGroupWithOffsets(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val generation = 15
val protocolType = "consumer"
val startOffset = 15L
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets)
val groupMetadataRecord = buildEmptyGroupRecord(generation, protocolType)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertNull(group.leaderOrNull)
assertNull(group.protocolName.orNull)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testLoadTransactionalOffsetsWithoutGroup(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val producerId = 1000L
val producerEpoch: Short = 2
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val buffer = ByteBuffer.allocate(1024)
var nextOffset = 0
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, committedOffsets)
nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true)
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testDoNotLoadAbortedTransactionalOffsetCommits(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val producerId = 1000L
val producerEpoch: Short = 2
val groupEpoch = 2
val abortedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val buffer = ByteBuffer.allocate(1024)
var nextOffset = 0
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, abortedOffsets)
nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = false)
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
// Since there are no committed offsets for the group, and there is no other group metadata, we don't expect the
// group to be loaded.
assertEquals(None, groupMetadataManager.getGroup(groupId))
}
@Test
def testGroupLoadedWithPendingCommits(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val producerId = 1000L
val producerEpoch: Short = 2
val groupEpoch = 2
val foo0 = new TopicPartition("foo", 0)
val foo1 = new TopicPartition("foo", 1)
val bar0 = new TopicPartition("bar", 0)
val pendingOffsets = Map(
foo0 -> 23L,
foo1 -> 455L,
bar0 -> 8992L
)
val buffer = ByteBuffer.allocate(1024)
var nextOffset = 0
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, pendingOffsets)
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
// The group should be loaded with pending offsets.
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
// Ensure that no offsets are materialized, but that we have offsets pending.
assertEquals(0, group.allOffsets.size)
assertTrue(group.hasOffsets)
assertTrue(group.hasPendingOffsetCommitsFromProducer(producerId))
assertTrue(group.hasPendingOffsetCommitsForTopicPartition(foo0))
assertTrue(group.hasPendingOffsetCommitsForTopicPartition(foo1))
assertTrue(group.hasPendingOffsetCommitsForTopicPartition(bar0))
}
@Test
def testLoadWithCommittedAndAbortedTransactionalOffsetCommits(): Unit = {
// A test which loads a log with a mix of committed and aborted transactional offset committed messages.
val groupMetadataTopicPartition = groupTopicPartition
val producerId = 1000L
val producerEpoch: Short = 2
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val abortedOffsets = Map(
new TopicPartition("foo", 2) -> 231L,
new TopicPartition("foo", 3) -> 4551L,
new TopicPartition("bar", 1) -> 89921L
)
val buffer = ByteBuffer.allocate(1024)
var nextOffset = 0
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, abortedOffsets)
nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = false)
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, committedOffsets)
nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true)
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
// Ensure that only the committed offsets are materialized, and that there are no pending commits for the producer.
// This allows us to be certain that the aborted offset commits are truly discarded.
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId))
}
@Test
def testLoadWithCommittedAndAbortedAndPendingTransactionalOffsetCommits(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val producerId = 1000L
val producerEpoch: Short = 2
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val foo3 = new TopicPartition("foo", 3)
val abortedOffsets = Map(
new TopicPartition("foo", 2) -> 231L,
foo3 -> 4551L,
new TopicPartition("bar", 1) -> 89921L
)
val pendingOffsets = Map(
foo3 -> 2312L,
new TopicPartition("foo", 4) -> 45512L,
new TopicPartition("bar", 2) -> 899212L
)
val buffer = ByteBuffer.allocate(1024)
var nextOffset = 0
val commitOffsetsLogPosition = nextOffset
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, committedOffsets)
nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true)
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, abortedOffsets)
nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = false)
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, pendingOffsets)
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
// Ensure that only the committed offsets are materialized, and that there are no pending commits for the producer.
// This allows us to be certain that the aborted offset commits are truly discarded.
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
assertEquals(Some(commitOffsetsLogPosition), group.offsetWithRecordMetadata(topicPartition).head.appendedBatchOffset)
}
// We should have pending commits.
assertTrue(group.hasPendingOffsetCommitsFromProducer(producerId))
assertTrue(group.hasPendingOffsetCommitsForTopicPartition(foo3))
// The loaded pending commits should materialize after a commit marker comes in.
groupMetadataManager.handleTxnCompletion(producerId, List(groupMetadataTopicPartition.partition).toSet, isCommit = true)
assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId))
pendingOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testLoadTransactionalOffsetCommitsFromMultipleProducers(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val firstProducerId = 1000L
val firstProducerEpoch: Short = 2
val secondProducerId = 1001L
val secondProducerEpoch: Short = 3
val groupEpoch = 2
val committedOffsetsFirstProducer = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val committedOffsetsSecondProducer = Map(
new TopicPartition("foo", 2) -> 231L,
new TopicPartition("foo", 3) -> 4551L,
new TopicPartition("bar", 1) -> 89921L
)
val buffer = ByteBuffer.allocate(1024)
var nextOffset = 0L
val firstProduceRecordOffset = nextOffset
nextOffset += appendTransactionalOffsetCommits(buffer, firstProducerId, firstProducerEpoch, nextOffset, committedOffsetsFirstProducer)
nextOffset += completeTransactionalOffsetCommit(buffer, firstProducerId, firstProducerEpoch, nextOffset, isCommit = true)
val secondProducerRecordOffset = nextOffset
nextOffset += appendTransactionalOffsetCommits(buffer, secondProducerId, secondProducerEpoch, nextOffset, committedOffsetsSecondProducer)
nextOffset += completeTransactionalOffsetCommit(buffer, secondProducerId, secondProducerEpoch, nextOffset, isCommit = true)
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
// Ensure that only the committed offsets are materialized, and that there are no pending commits for the producer.
// This allows us to be certain that the aborted offset commits are truly discarded.
assertEquals(committedOffsetsFirstProducer.size + committedOffsetsSecondProducer.size, group.allOffsets.size)
committedOffsetsFirstProducer.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
assertEquals(Some(firstProduceRecordOffset), group.offsetWithRecordMetadata(topicPartition).head.appendedBatchOffset)
}
committedOffsetsSecondProducer.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
assertEquals(Some(secondProducerRecordOffset), group.offsetWithRecordMetadata(topicPartition).head.appendedBatchOffset)
}
}
@Test
def testGroupLoadWithConsumerAndTransactionalOffsetCommitsConsumerWins(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val producerId = 1000L
val producerEpoch: Short = 2
val groupEpoch = 2
val transactionalOffsetCommits = Map(
new TopicPartition("foo", 0) -> 23L
)
val consumerOffsetCommits = Map(
new TopicPartition("foo", 0) -> 24L
)
val buffer = ByteBuffer.allocate(1024)
var nextOffset = 0
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, transactionalOffsetCommits)
val consumerRecordOffset = nextOffset
nextOffset += appendConsumerOffsetCommit(buffer, nextOffset, consumerOffsetCommits)
nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true)
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
// The group should be loaded with pending offsets.
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
assertEquals(1, group.allOffsets.size)
assertTrue(group.hasOffsets)
assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId))
assertEquals(consumerOffsetCommits.size, group.allOffsets.size)
consumerOffsetCommits.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
assertEquals(Some(consumerRecordOffset), group.offsetWithRecordMetadata(topicPartition).head.appendedBatchOffset)
}
}
@Test
def testGroupLoadWithConsumerAndTransactionalOffsetCommitsTransactionWins(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val producerId = 1000L
val producerEpoch: Short = 2
val groupEpoch = 2
val transactionalOffsetCommits = Map(
new TopicPartition("foo", 0) -> 23L
)
val consumerOffsetCommits = Map(
new TopicPartition("foo", 0) -> 24L
)
val buffer = ByteBuffer.allocate(1024)
var nextOffset = 0
nextOffset += appendConsumerOffsetCommit(buffer, nextOffset, consumerOffsetCommits)
nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, transactionalOffsetCommits)
nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true)
buffer.flip()
val records = MemoryRecords.readableRecords(buffer)
expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
// The group should be loaded with pending offsets.
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
assertEquals(1, group.allOffsets.size)
assertTrue(group.hasOffsets)
assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId))
assertEquals(consumerOffsetCommits.size, group.allOffsets.size)
transactionalOffsetCommits.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testGroupNotExists(): Unit = {
// group is not owned
assertFalse(groupMetadataManager.groupNotExists(groupId))
groupMetadataManager.addPartitionOwnership(groupPartitionId)
// group is owned but does not exist yet
assertTrue(groupMetadataManager.groupNotExists(groupId))
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
// group is owned but not Dead
assertFalse(groupMetadataManager.groupNotExists(groupId))
group.transitionTo(Dead)
// group is owned and Dead
assertTrue(groupMetadataManager.groupNotExists(groupId))
}
private def appendConsumerOffsetCommit(buffer: ByteBuffer, baseOffset: Long, offsets: Map[TopicPartition, Long]) = {
val builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.LOG_APPEND_TIME, baseOffset)
val commitRecords = createCommittedOffsetRecords(offsets)
commitRecords.foreach(builder.append)
builder.build()
offsets.size
}
private def appendTransactionalOffsetCommits(buffer: ByteBuffer, producerId: Long, producerEpoch: Short,
baseOffset: Long, offsets: Map[TopicPartition, Long]): Int = {
val builder = MemoryRecords.builder(buffer, CompressionType.NONE, baseOffset, producerId, producerEpoch, 0, true)
val commitRecords = createCommittedOffsetRecords(offsets)
commitRecords.foreach(builder.append)
builder.build()
offsets.size
}
private def completeTransactionalOffsetCommit(buffer: ByteBuffer, producerId: Long, producerEpoch: Short, baseOffset: Long,
isCommit: Boolean): Int = {
val builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE,
TimestampType.LOG_APPEND_TIME, baseOffset, time.milliseconds(), producerId, producerEpoch, 0, true, true,
RecordBatch.NO_PARTITION_LEADER_EPOCH)
val controlRecordType = if (isCommit) ControlRecordType.COMMIT else ControlRecordType.ABORT
builder.appendEndTxnMarker(time.milliseconds(), new EndTransactionMarker(controlRecordType, 0))
builder.build()
1
}
@Test
def testLoadOffsetsWithTombstones(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val startOffset = 15L
val groupEpoch = 2
val tombstonePartition = new TopicPartition("foo", 1)
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
tombstonePartition -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets)
val tombstone = new SimpleRecord(GroupMetadataManager.offsetCommitKey(groupId, tombstonePartition), null)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(offsetCommitRecords ++ Seq(tombstone)).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
assertEquals(committedOffsets.size - 1, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
if (topicPartition == tombstonePartition)
assertEquals(None, group.offset(topicPartition))
else
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testLoadOffsetsAndGroup(): Unit = {
loadOffsetsAndGroup(groupTopicPartition, 2)
}
def loadOffsetsAndGroup(groupMetadataTopicPartition: TopicPartition, groupEpoch: Int): GroupMetadata = {
val generation = 935
val protocolType = "consumer"
val protocol = "range"
val startOffset = 15L
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets)
val memberId = "98098230493"
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Stable, group.currentState)
assertEquals(memberId, group.leaderOrNull)
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertEquals(protocol, group.protocolName.orNull)
assertEquals(Set(memberId), group.allMembers)
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
assertTrue(group.offset(topicPartition).map(_.expireTimestamp).contains(None))
}
group
}
@Test
def testLoadOffsetsAndGroupIgnored(): Unit = {
val groupEpoch = 2
loadOffsetsAndGroup(groupTopicPartition, groupEpoch)
assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition()))
groupMetadataManager.removeGroupsAndOffsets(groupTopicPartition, Some(groupEpoch), _ => ())
assertTrue(groupMetadataManager.getGroup(groupId).isEmpty,
"Removed group remained in cache")
assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition()))
groupMetadataManager.loadGroupsAndOffsets(groupTopicPartition, groupEpoch - 1, _ => (), 0L)
assertTrue(groupMetadataManager.getGroup(groupId).isEmpty,
"Removed group remained in cache")
assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition()))
}
@Test
def testUnloadOffsetsAndGroup(): Unit = {
val groupEpoch = 2
loadOffsetsAndGroup(groupTopicPartition, groupEpoch)
groupMetadataManager.removeGroupsAndOffsets(groupTopicPartition, Some(groupEpoch), _ => ())
assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition()))
assertTrue(groupMetadataManager.getGroup(groupId).isEmpty,
"Removed group remained in cache")
}
@Test
def testUnloadOffsetsAndGroupIgnored(): Unit = {
val groupEpoch = 2
val initiallyLoaded = loadOffsetsAndGroup(groupTopicPartition, groupEpoch)
groupMetadataManager.removeGroupsAndOffsets(groupTopicPartition, Some(groupEpoch - 1), _ => ())
assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition()))
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(initiallyLoaded.groupId, group.groupId)
assertEquals(initiallyLoaded.currentState, group.currentState)
assertEquals(initiallyLoaded.leaderOrNull, group.leaderOrNull)
assertEquals(initiallyLoaded.generationId, group.generationId)
assertEquals(initiallyLoaded.protocolType, group.protocolType)
assertEquals(initiallyLoaded.protocolName.orNull, group.protocolName.orNull)
assertEquals(initiallyLoaded.allMembers, group.allMembers)
assertEquals(initiallyLoaded.allOffsets.size, group.allOffsets.size)
initiallyLoaded.allOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition))
assertTrue(group.offset(topicPartition).map(_.expireTimestamp).contains(None))
}
}
@Test
def testUnloadOffsetsAndGroupIgnoredAfterStopReplica(): Unit = {
val groupEpoch = 2
val initiallyLoaded = loadOffsetsAndGroup(groupTopicPartition, groupEpoch)
groupMetadataManager.removeGroupsAndOffsets(groupTopicPartition, None, _ => ())
assertTrue(groupMetadataManager.getGroup(groupId).isEmpty,
"Removed group remained in cache")
assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition()),
"Replica which was stopped still in epochForPartitionId")
loadOffsetsAndGroup(groupTopicPartition, groupEpoch + 1)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(initiallyLoaded.groupId, group.groupId)
assertEquals(initiallyLoaded.currentState, group.currentState)
assertEquals(initiallyLoaded.leaderOrNull, group.leaderOrNull)
assertEquals(initiallyLoaded.generationId, group.generationId)
assertEquals(initiallyLoaded.protocolType, group.protocolType)
assertEquals(initiallyLoaded.protocolName.orNull, group.protocolName.orNull)
assertEquals(initiallyLoaded.allMembers, group.allMembers)
assertEquals(initiallyLoaded.allOffsets.size, group.allOffsets.size)
initiallyLoaded.allOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition))
assertTrue(group.offset(topicPartition).map(_.expireTimestamp).contains(None))
}
}
@Test
def testLoadGroupWithTombstone(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val startOffset = 15L
val groupEpoch = 2
val memberId = "98098230493"
val groupMetadataRecord = buildStableGroupRecordWithMember(generation = 15,
protocolType = "consumer", protocol = "range", memberId)
val groupMetadataTombstone = new SimpleRecord(GroupMetadataManager.groupMetadataKey(groupId), null)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
Seq(groupMetadataRecord, groupMetadataTombstone).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
assertEquals(None, groupMetadataManager.getGroup(groupId))
}
@Test
def testLoadGroupWithLargeGroupMetadataRecord(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val startOffset = 15L
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
// create a GroupMetadata record larger then offsets.load.buffer.size (here at least 16 bytes larger)
val assignmentSize = OffsetConfig.DefaultLoadBufferSize + 16
val memberId = "98098230493"
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets)
val groupMetadataRecord = buildStableGroupRecordWithMember(generation = 15,
protocolType = "consumer", protocol = "range", memberId, new Array[Byte](assignmentSize))
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testLoadGroupAndOffsetsWithCorruptedLog(): Unit = {
// Simulate a case where startOffset < endOffset but log is empty. This could theoretically happen
// when all the records are expired and the active segment is truncated or when the partition
// is accidentally corrupted.
val startOffset = 0L
val endOffset = 10L
val groupEpoch = 2
val logMock: UnifiedLog = mock(classOf[UnifiedLog])
when(replicaManager.getLog(groupTopicPartition)).thenReturn(Some(logMock))
expectGroupMetadataLoad(logMock, startOffset, MemoryRecords.EMPTY)
when(replicaManager.getLogEndOffset(groupTopicPartition)).thenReturn(Some(endOffset))
groupMetadataManager.loadGroupsAndOffsets(groupTopicPartition, groupEpoch, _ => (), 0L)
verify(logMock).logStartOffset
verify(logMock).read(ArgumentMatchers.eq(startOffset),
maxLength = anyInt(),
isolation = ArgumentMatchers.eq(FetchLogEnd),
minOneMessage = ArgumentMatchers.eq(true))
verify(replicaManager).getLog(groupTopicPartition)
verify(replicaManager, times(2)).getLogEndOffset(groupTopicPartition)
assertFalse(groupMetadataManager.isPartitionLoading(groupTopicPartition.partition()))
}
@Test
def testOffsetWriteAfterGroupRemoved(): Unit = {
// this test case checks the following scenario:
// 1. the group exists at some point in time, but is later removed (because all members left)
// 2. a "simple" consumer (i.e. not a consumer group) then uses the same groupId to commit some offsets
val groupMetadataTopicPartition = groupTopicPartition
val generation = 293
val protocolType = "consumer"
val protocol = "range"
val startOffset = 15L
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets)
val memberId = "98098230493"
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId)
val groupMetadataTombstone = new SimpleRecord(GroupMetadataManager.groupMetadataKey(groupId), null)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(Seq(groupMetadataRecord, groupMetadataTombstone) ++ offsetCommitRecords).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testLoadGroupAndOffsetsFromDifferentSegments(): Unit = {
val generation = 293
val protocolType = "consumer"
val protocol = "range"
val startOffset = 15L
val groupEpoch = 2
val tp0 = new TopicPartition("foo", 0)
val tp1 = new TopicPartition("foo", 1)
val tp2 = new TopicPartition("bar", 0)
val tp3 = new TopicPartition("xxx", 0)
val fileRecordsMock: FileRecords = mock(classOf[FileRecords])
val logMock: UnifiedLog = mock(classOf[UnifiedLog])
when(replicaManager.getLog(groupTopicPartition)).thenReturn(Some(logMock))
val segment1MemberId = "a"
val segment1Offsets = Map(tp0 -> 23L, tp1 -> 455L, tp3 -> 42L)
val segment1Records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(createCommittedOffsetRecords(segment1Offsets) ++ Seq(buildStableGroupRecordWithMember(
generation, protocolType, protocol, segment1MemberId))).toArray: _*)
val segment1End = startOffset + segment1Records.records.asScala.size
val segment2MemberId = "b"
val segment2Offsets = Map(tp0 -> 33L, tp2 -> 8992L, tp3 -> 10L)
val segment2Records = MemoryRecords.withRecords(segment1End, CompressionType.NONE,
(createCommittedOffsetRecords(segment2Offsets) ++ Seq(buildStableGroupRecordWithMember(
generation, protocolType, protocol, segment2MemberId))).toArray: _*)
val segment2End = segment1End + segment2Records.records.asScala.size
when(logMock.logStartOffset)
.thenReturn(segment1End)
.thenReturn(segment2End)
when(logMock.read(ArgumentMatchers.eq(segment1End),
maxLength = anyInt(),
isolation = ArgumentMatchers.eq(FetchLogEnd),
minOneMessage = ArgumentMatchers.eq(true)))
.thenReturn(FetchDataInfo(LogOffsetMetadata(segment1End), fileRecordsMock))
when(logMock.read(ArgumentMatchers.eq(segment2End),
maxLength = anyInt(),
isolation = ArgumentMatchers.eq(FetchLogEnd),
minOneMessage = ArgumentMatchers.eq(true)))
.thenReturn(FetchDataInfo(LogOffsetMetadata(segment2End), fileRecordsMock))
when(fileRecordsMock.sizeInBytes())
.thenReturn(segment1Records.sizeInBytes)
.thenReturn(segment2Records.sizeInBytes)
val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer])
when(fileRecordsMock.readInto(bufferCapture.capture(), anyInt()))
.thenAnswer(_ => {
val buffer = bufferCapture.getValue
buffer.put(segment1Records.buffer.duplicate)
buffer.flip()
}).thenAnswer(_ => {
val buffer = bufferCapture.getValue
buffer.put(segment2Records.buffer.duplicate)
buffer.flip()
})
when(replicaManager.getLogEndOffset(groupTopicPartition)).thenReturn(Some(segment2End))
groupMetadataManager.loadGroupsAndOffsets(groupTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Stable, group.currentState)
assertEquals(segment2MemberId, group.leaderOrNull, "segment2 group record member should be elected")
assertEquals(Set(segment2MemberId), group.allMembers, "segment2 group record member should be only member")
// offsets of segment1 should be overridden by segment2 offsets of the same topic partitions
val committedOffsets = segment1Offsets ++ segment2Offsets
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testAddGroup(): Unit = {
val group = new GroupMetadata("foo", Empty, time)
assertEquals(group, groupMetadataManager.addGroup(group))
assertEquals(group, groupMetadataManager.addGroup(new GroupMetadata("foo", Empty, time)))
}
@Test
def testloadGroupWithStaticMember(): Unit = {
val generation = 27
val protocolType = "consumer"
val staticMemberId = "staticMemberId"
val dynamicMemberId = "dynamicMemberId"
val staticMember = new MemberMetadata(staticMemberId, Some(groupInstanceId), "", "", rebalanceTimeout, sessionTimeout,
protocolType, List(("protocol", Array[Byte]())))
val dynamicMember = new MemberMetadata(dynamicMemberId, None, "", "", rebalanceTimeout, sessionTimeout,
protocolType, List(("protocol", Array[Byte]())))
val members = Seq(staticMember, dynamicMember)
val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, members, time)
assertTrue(group.is(Empty))
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertTrue(group.has(staticMemberId))
assertTrue(group.has(dynamicMemberId))
assertTrue(group.hasStaticMember(groupInstanceId))
assertEquals(Some(staticMemberId), group.currentStaticMemberId(groupInstanceId))
}
@Test
def testLoadConsumerGroup(): Unit = {
val generation = 27
val protocolType = "consumer"
val protocol = "protocol"
val memberId = "member1"
val topic = "foo"
val subscriptions = List(
("protocol", ConsumerProtocol.serializeSubscription(new Subscription(List(topic).asJava)).array())
)
val member = new MemberMetadata(memberId, Some(groupInstanceId), "", "", rebalanceTimeout,
sessionTimeout, protocolType, subscriptions)
val members = Seq(member)
val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, null, None,
members, time)
assertTrue(group.is(Stable))
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertEquals(protocol, group.protocolName.orNull)
assertEquals(Some(Set(topic)), group.getSubscribedTopics)
assertTrue(group.has(memberId))
}
@Test
def testLoadEmptyConsumerGroup(): Unit = {
val generation = 27
val protocolType = "consumer"
val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None,
Seq(), time)
assertTrue(group.is(Empty))
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertNull(group.protocolName.orNull)
assertEquals(Some(Set.empty), group.getSubscribedTopics)
}
@Test
def testLoadConsumerGroupWithFaultyConsumerProtocol(): Unit = {
val generation = 27
val protocolType = "consumer"
val protocol = "protocol"
val memberId = "member1"
val subscriptions = List(("protocol", Array[Byte]()))
val member = new MemberMetadata(memberId, Some(groupInstanceId), "", "", rebalanceTimeout,
sessionTimeout, protocolType, subscriptions)
val members = Seq(member)
val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, null, None,
members, time)
assertTrue(group.is(Stable))
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertEquals(protocol, group.protocolName.orNull)
assertEquals(None, group.getSubscribedTopics)
assertTrue(group.has(memberId))
}
@Test
def testShouldThrowExceptionForUnsupportedGroupMetadataVersion(): Unit = {
val generation = 1
val protocol = "range"
val memberId = "memberId"
val unsupportedVersion = Short.MinValue
// put the unsupported version as the version value
val groupMetadataRecordValue = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId)
.value().putShort(unsupportedVersion)
// reset the position to the starting position 0 so that it can read the data in correct order
groupMetadataRecordValue.position(0)
val e = assertThrows(classOf[IllegalStateException],
() => GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecordValue, time))
assertEquals(s"Unknown group metadata message version: $unsupportedVersion", e.getMessage)
}
@Test
def testCurrentStateTimestampForAllGroupMetadataVersions(): Unit = {
val generation = 1
val protocol = "range"
val memberId = "memberId"
for (apiVersion <- ApiVersion.allVersions) {
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, apiVersion = apiVersion)
val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time)
// GROUP_METADATA_VALUE_SCHEMA_V2 or higher should correctly set the currentStateTimestamp
if (apiVersion >= KAFKA_2_1_IV0)
assertEquals(Some(time.milliseconds()), deserializedGroupMetadata.currentStateTimestamp,
s"the apiVersion $apiVersion doesn't set the currentStateTimestamp correctly.")
else
assertTrue(deserializedGroupMetadata.currentStateTimestamp.isEmpty,
s"the apiVersion $apiVersion should not set the currentStateTimestamp.")
}
}
@Test
def testReadFromOldGroupMetadata(): Unit = {
val generation = 1
val protocol = "range"
val memberId = "memberId"
val oldApiVersions = Array(KAFKA_0_9_0, KAFKA_0_10_1_IV0, KAFKA_2_1_IV0)
for (apiVersion <- oldApiVersions) {
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, apiVersion = apiVersion)
val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time)
assertEquals(groupId, deserializedGroupMetadata.groupId)
assertEquals(generation, deserializedGroupMetadata.generationId)
assertEquals(protocolType, deserializedGroupMetadata.protocolType.get)
assertEquals(protocol, deserializedGroupMetadata.protocolName.orNull)
assertEquals(1, deserializedGroupMetadata.allMembers.size)
assertEquals(deserializedGroupMetadata.allMembers, deserializedGroupMetadata.allDynamicMembers)
assertTrue(deserializedGroupMetadata.allMembers.contains(memberId))
assertTrue(deserializedGroupMetadata.allStaticMembers.isEmpty)
}
}
@Test
def testStoreEmptyGroup(): Unit = {
val generation = 27
val protocolType = "consumer"
val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, Seq.empty, time)
groupMetadataManager.addGroup(group)
val capturedRecords = expectAppendMessage(Errors.NONE)
var maybeError: Option[Errors] = None
def callback(error: Errors): Unit = {
maybeError = Some(error)
}
groupMetadataManager.storeGroup(group, Map.empty, callback)
assertEquals(Some(Errors.NONE), maybeError)
val records = capturedRecords.getValue()(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId))
.records.asScala.toList
assertEquals(1, records.size)
val record = records.head
val groupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, record.value, time)
assertTrue(groupMetadata.is(Empty))
assertEquals(generation, groupMetadata.generationId)
assertEquals(Some(protocolType), groupMetadata.protocolType)
}
@Test
def testStoreEmptySimpleGroup(): Unit = {
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val capturedRecords = expectAppendMessage(Errors.NONE)
var maybeError: Option[Errors] = None
def callback(error: Errors): Unit = {
maybeError = Some(error)
}
groupMetadataManager.storeGroup(group, Map.empty, callback)
assertEquals(Some(Errors.NONE), maybeError)
val records = capturedRecords.getValue()(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId))
.records.asScala.toList
assertEquals(1, records.size)
val record = records.head
val groupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, record.value, time)
assertTrue(groupMetadata.is(Empty))
assertEquals(0, groupMetadata.generationId)
assertEquals(None, groupMetadata.protocolType)
}
@Test
def testStoreGroupErrorMapping(): Unit = {
assertStoreGroupErrorMapping(Errors.NONE, Errors.NONE)
assertStoreGroupErrorMapping(Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.COORDINATOR_NOT_AVAILABLE)
assertStoreGroupErrorMapping(Errors.NOT_ENOUGH_REPLICAS, Errors.COORDINATOR_NOT_AVAILABLE)
assertStoreGroupErrorMapping(Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND, Errors.COORDINATOR_NOT_AVAILABLE)
assertStoreGroupErrorMapping(Errors.NOT_LEADER_OR_FOLLOWER, Errors.NOT_COORDINATOR)
assertStoreGroupErrorMapping(Errors.MESSAGE_TOO_LARGE, Errors.UNKNOWN_SERVER_ERROR)
assertStoreGroupErrorMapping(Errors.RECORD_LIST_TOO_LARGE, Errors.UNKNOWN_SERVER_ERROR)
assertStoreGroupErrorMapping(Errors.INVALID_FETCH_SIZE, Errors.UNKNOWN_SERVER_ERROR)
assertStoreGroupErrorMapping(Errors.CORRUPT_MESSAGE, Errors.CORRUPT_MESSAGE)
}
private def assertStoreGroupErrorMapping(appendError: Errors, expectedError: Errors): Unit = {
reset(replicaManager)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
expectAppendMessage(appendError)
var maybeError: Option[Errors] = None
def callback(error: Errors): Unit = {
maybeError = Some(error)
}
groupMetadataManager.storeGroup(group, Map.empty, callback)
assertEquals(Some(expectedError), maybeError)
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any(),
any(),
any[Option[ReentrantLock]],
any(),
any())
verify(replicaManager).getMagic(any())
}
@Test
def testStoreNonEmptyGroup(): Unit = {
val memberId = "memberId"
val clientId = "clientId"
val clientHost = "localhost"
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, rebalanceTimeout, sessionTimeout,
protocolType, List(("protocol", Array[Byte]())))
group.add(member, _ => ())
group.transitionTo(PreparingRebalance)
group.initNextGeneration()
expectAppendMessage(Errors.NONE)
var maybeError: Option[Errors] = None
def callback(error: Errors): Unit = {
maybeError = Some(error)
}
groupMetadataManager.storeGroup(group, Map(memberId -> Array[Byte]()), callback)
assertEquals(Some(Errors.NONE), maybeError)
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any(),
any(),
any[Option[ReentrantLock]],
any(),
any())
verify(replicaManager).getMagic(any())
}
@Test
def testStoreNonEmptyGroupWhenCoordinatorHasMoved(): Unit = {
when(replicaManager.getMagic(any())).thenReturn(None)
val memberId = "memberId"
val clientId = "clientId"
val clientHost = "localhost"
val group = new GroupMetadata(groupId, Empty, time)
val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, rebalanceTimeout, sessionTimeout,
protocolType, List(("protocol", Array[Byte]())))
group.add(member, _ => ())
group.transitionTo(PreparingRebalance)
group.initNextGeneration()
var maybeError: Option[Errors] = None
def callback(error: Errors): Unit = {
maybeError = Some(error)
}
groupMetadataManager.storeGroup(group, Map(memberId -> Array[Byte]()), callback)
assertEquals(Some(Errors.NOT_COORDINATOR), maybeError)
verify(replicaManager).getMagic(any())
}
@Test
def testCommitOffset(): Unit = {
val memberId = ""
val topicPartition = new TopicPartition("foo", 0)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val offsets = immutable.Map(topicPartition -> OffsetAndMetadata(offset, "", time.milliseconds()))
expectAppendMessage(Errors.NONE)
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count"))
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertTrue(group.hasOffsets)
assertFalse(commitErrors.isEmpty)
val maybeError = commitErrors.get.get(topicPartition)
assertEquals(Some(Errors.NONE), maybeError)
assertTrue(group.hasOffsets)
val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition)))
val maybePartitionResponse = cachedOffsets.get(topicPartition)
assertFalse(maybePartitionResponse.isEmpty)
val partitionResponse = maybePartitionResponse.get
assertEquals(Errors.NONE, partitionResponse.error)
assertEquals(offset, partitionResponse.offset)
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any(),
any(),
any[Option[ReentrantLock]],
any(),
any())
// Will update sensor after commit
assertEquals(1, TestUtils.totalMetricValue(metrics, "offset-commit-count"))
}
@Test
def testTransactionalCommitOffsetCommitted(): Unit = {
val memberId = ""
val topicPartition = new TopicPartition("foo", 0)
val offset = 37
val producerId = 232L
val producerEpoch = 0.toShort
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val offsetAndMetadata = OffsetAndMetadata(offset, "", time.milliseconds())
val offsets = immutable.Map(topicPartition -> offsetAndMetadata)
val capturedResponseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit])
when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback, producerId, producerEpoch)
assertTrue(group.hasOffsets)
assertTrue(group.allOffsets.isEmpty)
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
capturedResponseCallback.capture(),
any[Option[ReentrantLock]],
any(),
any())
verify(replicaManager).getMagic(any())
capturedResponseCallback.getValue.apply(Map(groupTopicPartition ->
new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L)))
assertTrue(group.hasOffsets)
assertTrue(group.allOffsets.isEmpty)
group.completePendingTxnOffsetCommit(producerId, isCommit = true)
assertTrue(group.hasOffsets)
assertFalse(group.allOffsets.isEmpty)
assertEquals(Some(offsetAndMetadata), group.offset(topicPartition))
}
@Test
def testTransactionalCommitOffsetAppendFailure(): Unit = {
val memberId = ""
val topicPartition = new TopicPartition("foo", 0)
val offset = 37
val producerId = 232L
val producerEpoch = 0.toShort
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val offsets = immutable.Map(topicPartition -> OffsetAndMetadata(offset, "", time.milliseconds()))
when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback, producerId, producerEpoch)
assertTrue(group.hasOffsets)
assertTrue(group.allOffsets.isEmpty)
val capturedResponseCallback = verifyAppendAndCaptureCallback()
capturedResponseCallback.getValue.apply(Map(groupTopicPartition ->
new PartitionResponse(Errors.NOT_ENOUGH_REPLICAS, 0L, RecordBatch.NO_TIMESTAMP, 0L)))
assertFalse(group.hasOffsets)
assertTrue(group.allOffsets.isEmpty)
group.completePendingTxnOffsetCommit(producerId, isCommit = false)
assertFalse(group.hasOffsets)
assertTrue(group.allOffsets.isEmpty)
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
any(),
any[Option[ReentrantLock]],
any(),
any())
verify(replicaManager).getMagic(any())
}
@Test
def testTransactionalCommitOffsetAborted(): Unit = {
val memberId = ""
val topicPartition = new TopicPartition("foo", 0)
val offset = 37
val producerId = 232L
val producerEpoch = 0.toShort
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val offsets = immutable.Map(topicPartition -> OffsetAndMetadata(offset, "", time.milliseconds()))
when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback, producerId, producerEpoch)
assertTrue(group.hasOffsets)
assertTrue(group.allOffsets.isEmpty)
val capturedResponseCallback = verifyAppendAndCaptureCallback()
capturedResponseCallback.getValue.apply(Map(groupTopicPartition ->
new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L)))
assertTrue(group.hasOffsets)
assertTrue(group.allOffsets.isEmpty)
group.completePendingTxnOffsetCommit(producerId, isCommit = false)
assertFalse(group.hasOffsets)
assertTrue(group.allOffsets.isEmpty)
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
any(),
any[Option[ReentrantLock]],
any(),
any())
verify(replicaManager).getMagic(any())
}
@Test
def testCommitOffsetWhenCoordinatorHasMoved(): Unit = {
when(replicaManager.getMagic(any())).thenReturn(None)
val memberId = ""
val topicPartition = new TopicPartition("foo", 0)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val offsets = immutable.Map(topicPartition -> OffsetAndMetadata(offset, "", time.milliseconds()))
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertFalse(commitErrors.isEmpty)
val maybeError = commitErrors.get.get(topicPartition)
assertEquals(Some(Errors.NOT_COORDINATOR), maybeError)
verify(replicaManager).getMagic(any())
}
@Test
def testCommitOffsetFailure(): Unit = {
assertCommitOffsetErrorMapping(Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.COORDINATOR_NOT_AVAILABLE)
assertCommitOffsetErrorMapping(Errors.NOT_ENOUGH_REPLICAS, Errors.COORDINATOR_NOT_AVAILABLE)
assertCommitOffsetErrorMapping(Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND, Errors.COORDINATOR_NOT_AVAILABLE)
assertCommitOffsetErrorMapping(Errors.NOT_LEADER_OR_FOLLOWER, Errors.NOT_COORDINATOR)
assertCommitOffsetErrorMapping(Errors.MESSAGE_TOO_LARGE, Errors.INVALID_COMMIT_OFFSET_SIZE)
assertCommitOffsetErrorMapping(Errors.RECORD_LIST_TOO_LARGE, Errors.INVALID_COMMIT_OFFSET_SIZE)
assertCommitOffsetErrorMapping(Errors.INVALID_FETCH_SIZE, Errors.INVALID_COMMIT_OFFSET_SIZE)
assertCommitOffsetErrorMapping(Errors.CORRUPT_MESSAGE, Errors.CORRUPT_MESSAGE)
}
private def assertCommitOffsetErrorMapping(appendError: Errors, expectedError: Errors): Unit = {
reset(replicaManager)
val memberId = ""
val topicPartition = new TopicPartition("foo", 0)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val offsets = immutable.Map(topicPartition -> OffsetAndMetadata(offset, "", time.milliseconds()))
when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count"))
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertTrue(group.hasOffsets)
val capturedResponseCallback = verifyAppendAndCaptureCallback()
capturedResponseCallback.getValue.apply(Map(groupTopicPartition ->
new PartitionResponse(appendError, 0L, RecordBatch.NO_TIMESTAMP, 0L)))
assertFalse(commitErrors.isEmpty)
val maybeError = commitErrors.get.get(topicPartition)
assertEquals(Some(expectedError), maybeError)
assertFalse(group.hasOffsets)
val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition).map(_.offset))
verify(replicaManager).getMagic(any())
// Will not update sensor if failed
assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count"))
}
@Test
def testCommitOffsetPartialFailure(): Unit = {
val memberId = ""
val topicPartition = new TopicPartition("foo", 0)
val topicPartitionFailed = new TopicPartition("foo", 1)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val offsets = immutable.Map(
topicPartition -> OffsetAndMetadata(offset, "", time.milliseconds()),
// This will failed
topicPartitionFailed -> OffsetAndMetadata(offset, "s" * (offsetConfig.maxMetadataSize + 1) , time.milliseconds())
)
when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count"))
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertTrue(group.hasOffsets)
val capturedResponseCallback = verifyAppendAndCaptureCallback()
capturedResponseCallback.getValue.apply(Map(groupTopicPartition ->
new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L)))
assertFalse(commitErrors.isEmpty)
assertEquals(Some(Errors.NONE), commitErrors.get.get(topicPartition))
assertEquals(Some(Errors.OFFSET_METADATA_TOO_LARGE), commitErrors.get.get(topicPartitionFailed))
assertTrue(group.hasOffsets)
val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition, topicPartitionFailed)))
assertEquals(Some(offset), cachedOffsets.get(topicPartition).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartitionFailed).map(_.offset))
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
any(),
any[Option[ReentrantLock]],
any(),
any())
verify(replicaManager).getMagic(any())
assertEquals(1, TestUtils.totalMetricValue(metrics, "offset-commit-count"))
}
@Test
def testOffsetMetadataTooLarge(): Unit = {
val memberId = ""
val topicPartition = new TopicPartition("foo", 0)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val offsets = immutable.Map(
topicPartition -> OffsetAndMetadata(offset, "s" * (offsetConfig.maxMetadataSize + 1) , time.milliseconds())
)
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count"))
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertFalse(group.hasOffsets)
assertFalse(commitErrors.isEmpty)
val maybeError = commitErrors.get.get(topicPartition)
assertEquals(Some(Errors.OFFSET_METADATA_TOO_LARGE), maybeError)
assertFalse(group.hasOffsets)
val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition).map(_.offset))
assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count"))
}
@Test
def testExpireOffset(): Unit = {
val memberId = ""
val topicPartition1 = new TopicPartition("foo", 0)
val topicPartition2 = new TopicPartition("foo", 1)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
// expire the offset after 1 millisecond
val startMs = time.milliseconds
val offsets = immutable.Map(
topicPartition1 -> OffsetAndMetadata(offset, "", startMs, startMs + 1),
topicPartition2 -> OffsetAndMetadata(offset, "", startMs, startMs + 3))
mockGetPartition()
expectAppendMessage(Errors.NONE)
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertTrue(group.hasOffsets)
assertFalse(commitErrors.isEmpty)
assertEquals(Some(Errors.NONE), commitErrors.get.get(topicPartition1))
// expire only one of the offsets
time.sleep(2)
when(partition.appendRecordsToLeader(any[MemoryRecords],
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
assertEquals(Some(group), groupMetadataManager.getGroup(groupId))
assertEquals(None, group.offset(topicPartition1))
assertEquals(Some(offset), group.offset(topicPartition2).map(_.offset))
val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topicPartition2).map(_.offset))
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any(),
any(),
any[Option[ReentrantLock]],
any(),
any())
verify(replicaManager, times(2)).getMagic(any())
}
@Test
def testGroupMetadataRemoval(): Unit = {
val topicPartition1 = new TopicPartition("foo", 0)
val topicPartition2 = new TopicPartition("foo", 1)
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
group.generationId = 5
// expect the group metadata tombstone
val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords])
when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
mockGetPartition()
when(partition.appendRecordsToLeader(recordsCapture.capture(),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
val records = recordsCapture.getValue.records.asScala.toList
recordsCapture.getValue.batches.forEach { batch =>
assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic)
assertEquals(TimestampType.CREATE_TIME, batch.timestampType)
}
assertEquals(1, records.size)
val metadataTombstone = records.head
assertTrue(metadataTombstone.hasKey)
assertFalse(metadataTombstone.hasValue)
assertTrue(metadataTombstone.timestamp > 0)
val groupKey = GroupMetadataManager.readMessageKey(metadataTombstone.key).asInstanceOf[GroupMetadataKey]
assertEquals(groupId, groupKey.key)
// the full group should be gone since all offsets were removed
assertEquals(None, groupMetadataManager.getGroup(groupId))
val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition2).map(_.offset))
}
@Test
def testGroupMetadataRemovalWithLogAppendTime(): Unit = {
val topicPartition1 = new TopicPartition("foo", 0)
val topicPartition2 = new TopicPartition("foo", 1)
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
group.generationId = 5
// expect the group metadata tombstone
val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords])
when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
mockGetPartition()
when(partition.appendRecordsToLeader(recordsCapture.capture(),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
val records = recordsCapture.getValue.records.asScala.toList
recordsCapture.getValue.batches.forEach { batch =>
assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic)
// Use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically.
assertEquals(TimestampType.CREATE_TIME, batch.timestampType)
}
assertEquals(1, records.size)
val metadataTombstone = records.head
assertTrue(metadataTombstone.hasKey)
assertFalse(metadataTombstone.hasValue)
assertTrue(metadataTombstone.timestamp > 0)
val groupKey = GroupMetadataManager.readMessageKey(metadataTombstone.key).asInstanceOf[GroupMetadataKey]
assertEquals(groupId, groupKey.key)
// the full group should be gone since all offsets were removed
assertEquals(None, groupMetadataManager.getGroup(groupId))
val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition2).map(_.offset))
}
@Test
def testExpireGroupWithOffsetsOnly(): Unit = {
// verify that the group is removed properly, but no tombstone is written if
// this is a group which is only using kafka for offset storage
val memberId = ""
val topicPartition1 = new TopicPartition("foo", 0)
val topicPartition2 = new TopicPartition("foo", 1)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
// expire the offset after 1 millisecond
val startMs = time.milliseconds
val offsets = immutable.Map(
topicPartition1 -> OffsetAndMetadata(offset, Optional.empty(), "", startMs, Some(startMs + 1)),
topicPartition2 -> OffsetAndMetadata(offset, "", startMs, startMs + 3))
mockGetPartition()
expectAppendMessage(Errors.NONE)
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertTrue(group.hasOffsets)
assertFalse(commitErrors.isEmpty)
assertEquals(Some(Errors.NONE), commitErrors.get.get(topicPartition1))
// expire all of the offsets
time.sleep(4)
// expect the offset tombstone
val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords])
when(partition.appendRecordsToLeader(recordsCapture.capture(),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
// verify the tombstones are correct and only for the expired offsets
val records = recordsCapture.getValue.records.asScala.toList
assertEquals(2, records.size)
records.foreach { message =>
assertTrue(message.hasKey)
assertFalse(message.hasValue)
val offsetKey = GroupMetadataManager.readMessageKey(message.key).asInstanceOf[OffsetKey]
assertEquals(groupId, offsetKey.key.group)
assertEquals("foo", offsetKey.key.topicPartition.topic)
}
// the full group should be gone since all offsets were removed
assertEquals(None, groupMetadataManager.getGroup(groupId))
val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition2).map(_.offset))
verify(replicaManager).onlinePartition(groupTopicPartition)
}
@Test
def testOffsetExpirationSemantics(): Unit = {
val memberId = "memberId"
val clientId = "clientId"
val clientHost = "localhost"
val topic = "foo"
val topicPartition1 = new TopicPartition(topic, 0)
val topicPartition2 = new TopicPartition(topic, 1)
val topicPartition3 = new TopicPartition(topic, 2)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
val subscription = new Subscription(List(topic).asJava)
val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, rebalanceTimeout, sessionTimeout,
protocolType, List(("protocol", ConsumerProtocol.serializeSubscription(subscription).array())))
group.add(member, _ => ())
group.transitionTo(PreparingRebalance)
group.initNextGeneration()
val startMs = time.milliseconds
// old clients, expiry timestamp is explicitly set
val tp1OffsetAndMetadata = OffsetAndMetadata(offset, "", startMs, startMs + 1)
val tp2OffsetAndMetadata = OffsetAndMetadata(offset, "", startMs, startMs + 3)
// new clients, no per-partition expiry timestamp, offsets of group expire together
val tp3OffsetAndMetadata = OffsetAndMetadata(offset, "", startMs)
val offsets = immutable.Map(
topicPartition1 -> tp1OffsetAndMetadata,
topicPartition2 -> tp2OffsetAndMetadata,
topicPartition3 -> tp3OffsetAndMetadata)
mockGetPartition()
expectAppendMessage(Errors.NONE)
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertTrue(group.hasOffsets)
assertFalse(commitErrors.isEmpty)
assertEquals(Some(Errors.NONE), commitErrors.get.get(topicPartition1))
// do not expire any offset even though expiration timestamp is reached for one (due to group still being active)
time.sleep(2)
groupMetadataManager.cleanupGroupMetadata()
// group and offsets should still be there
assertEquals(Some(group), groupMetadataManager.getGroup(groupId))
assertEquals(Some(tp1OffsetAndMetadata), group.offset(topicPartition1))
assertEquals(Some(tp2OffsetAndMetadata), group.offset(topicPartition2))
assertEquals(Some(tp3OffsetAndMetadata), group.offset(topicPartition3))
var cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2, topicPartition3)))
assertEquals(Some(offset), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topicPartition2).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topicPartition3).map(_.offset))
verify(replicaManager).onlinePartition(groupTopicPartition)
group.transitionTo(PreparingRebalance)
group.transitionTo(Empty)
// expect the offset tombstone
when(partition.appendRecordsToLeader(any[MemoryRecords],
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
// group is empty now, only one offset should expire
assertEquals(Some(group), groupMetadataManager.getGroup(groupId))
assertEquals(None, group.offset(topicPartition1))
assertEquals(Some(tp2OffsetAndMetadata), group.offset(topicPartition2))
assertEquals(Some(tp3OffsetAndMetadata), group.offset(topicPartition3))
cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2, topicPartition3)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topicPartition2).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topicPartition3).map(_.offset))
verify(replicaManager, times(2)).onlinePartition(groupTopicPartition)
time.sleep(2)
// expect the offset tombstone
when(partition.appendRecordsToLeader(any[MemoryRecords],
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
// one more offset should expire
assertEquals(Some(group), groupMetadataManager.getGroup(groupId))
assertEquals(None, group.offset(topicPartition1))
assertEquals(None, group.offset(topicPartition2))
assertEquals(Some(tp3OffsetAndMetadata), group.offset(topicPartition3))
cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2, topicPartition3)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition2).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topicPartition3).map(_.offset))
verify(replicaManager, times(3)).onlinePartition(groupTopicPartition)
// advance time to just before the offset of last partition is to be expired, no offset should expire
time.sleep(group.currentStateTimestamp.get + defaultOffsetRetentionMs - time.milliseconds() - 1)
groupMetadataManager.cleanupGroupMetadata()
// one more offset should expire
assertEquals(Some(group), groupMetadataManager.getGroup(groupId))
assertEquals(None, group.offset(topicPartition1))
assertEquals(None, group.offset(topicPartition2))
assertEquals(Some(tp3OffsetAndMetadata), group.offset(topicPartition3))
cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2, topicPartition3)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition2).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topicPartition3).map(_.offset))
verify(replicaManager, times(4)).onlinePartition(groupTopicPartition)
// advance time enough for that last offset to expire
time.sleep(2)
// expect the offset tombstone
when(partition.appendRecordsToLeader(any[MemoryRecords],
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
// group and all its offsets should be gone now
assertEquals(None, groupMetadataManager.getGroup(groupId))
assertEquals(None, group.offset(topicPartition1))
assertEquals(None, group.offset(topicPartition2))
assertEquals(None, group.offset(topicPartition3))
cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2, topicPartition3)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition2).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition3).map(_.offset))
verify(replicaManager, times(5)).onlinePartition(groupTopicPartition)
assert(group.is(Dead))
}
@Test
def testOffsetExpirationOfSimpleConsumer(): Unit = {
val memberId = "memberId"
val topic = "foo"
val topicPartition1 = new TopicPartition(topic, 0)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
// expire the offset after 1 and 3 milliseconds (old clients) and after default retention (new clients)
val startMs = time.milliseconds
// old clients, expiry timestamp is explicitly set
val tp1OffsetAndMetadata = OffsetAndMetadata(offset, "", startMs)
// new clients, no per-partition expiry timestamp, offsets of group expire together
val offsets = immutable.Map(
topicPartition1 -> tp1OffsetAndMetadata)
mockGetPartition()
expectAppendMessage(Errors.NONE)
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertTrue(group.hasOffsets)
assertFalse(commitErrors.isEmpty)
assertEquals(Some(Errors.NONE), commitErrors.get.get(topicPartition1))
// do not expire offsets while within retention period since commit timestamp
val expiryTimestamp = offsets(topicPartition1).commitTimestamp + defaultOffsetRetentionMs
time.sleep(expiryTimestamp - time.milliseconds() - 1)
groupMetadataManager.cleanupGroupMetadata()
// group and offsets should still be there
assertEquals(Some(group), groupMetadataManager.getGroup(groupId))
assertEquals(Some(tp1OffsetAndMetadata), group.offset(topicPartition1))
var cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1)))
assertEquals(Some(offset), cachedOffsets.get(topicPartition1).map(_.offset))
verify(replicaManager).onlinePartition(groupTopicPartition)
// advance time to enough for offsets to expire
time.sleep(2)
// expect the offset tombstone
when(partition.appendRecordsToLeader(any[MemoryRecords],
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
// group and all its offsets should be gone now
assertEquals(None, groupMetadataManager.getGroup(groupId))
assertEquals(None, group.offset(topicPartition1))
cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset))
verify(replicaManager, times(2)).onlinePartition(groupTopicPartition)
assert(group.is(Dead))
}
@Test
def testOffsetExpirationOfActiveGroupSemantics(): Unit = {
val memberId = "memberId"
val clientId = "clientId"
val clientHost = "localhost"
val topic1 = "foo"
val topic1Partition0 = new TopicPartition(topic1, 0)
val topic1Partition1 = new TopicPartition(topic1, 1)
val topic2 = "bar"
val topic2Partition0 = new TopicPartition(topic2, 0)
val topic2Partition1 = new TopicPartition(topic2, 1)
val offset = 37
groupMetadataManager.addPartitionOwnership(groupPartitionId)
val group = new GroupMetadata(groupId, Empty, time)
groupMetadataManager.addGroup(group)
// Subscribe to topic1 and topic2
val subscriptionTopic1AndTopic2 = new Subscription(List(topic1, topic2).asJava)
val member = new MemberMetadata(
memberId,
Some(groupInstanceId),
clientId,
clientHost,
rebalanceTimeout,
sessionTimeout,
ConsumerProtocol.PROTOCOL_TYPE,
List(("protocol", ConsumerProtocol.serializeSubscription(subscriptionTopic1AndTopic2).array()))
)
group.add(member, _ => ())
group.transitionTo(PreparingRebalance)
group.initNextGeneration()
group.transitionTo(Stable)
val startMs = time.milliseconds
val t1p0OffsetAndMetadata = OffsetAndMetadata(offset, "", startMs)
val t1p1OffsetAndMetadata = OffsetAndMetadata(offset, "", startMs)
val t2p0OffsetAndMetadata = OffsetAndMetadata(offset, "", startMs)
val t2p1OffsetAndMetadata = OffsetAndMetadata(offset, "", startMs)
val offsets = immutable.Map(
topic1Partition0 -> t1p0OffsetAndMetadata,
topic1Partition1 -> t1p1OffsetAndMetadata,
topic2Partition0 -> t2p0OffsetAndMetadata,
topic2Partition1 -> t2p1OffsetAndMetadata)
mockGetPartition()
expectAppendMessage(Errors.NONE)
var commitErrors: Option[immutable.Map[TopicPartition, Errors]] = None
def callback(errors: immutable.Map[TopicPartition, Errors]): Unit = {
commitErrors = Some(errors)
}
groupMetadataManager.storeOffsets(group, memberId, offsets, callback)
assertTrue(group.hasOffsets)
assertFalse(commitErrors.isEmpty)
assertEquals(Some(Errors.NONE), commitErrors.get.get(topic1Partition0))
// advance time to just after the offset of last partition is to be expired
time.sleep(defaultOffsetRetentionMs + 2)
// no offset should expire because all topics are actively consumed
groupMetadataManager.cleanupGroupMetadata()
assertEquals(Some(group), groupMetadataManager.getGroup(groupId))
assert(group.is(Stable))
assertEquals(Some(t1p0OffsetAndMetadata), group.offset(topic1Partition0))
assertEquals(Some(t1p1OffsetAndMetadata), group.offset(topic1Partition1))
assertEquals(Some(t2p0OffsetAndMetadata), group.offset(topic2Partition0))
assertEquals(Some(t2p1OffsetAndMetadata), group.offset(topic2Partition1))
var cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topic1Partition0, topic1Partition1, topic2Partition0, topic2Partition1)))
assertEquals(Some(offset), cachedOffsets.get(topic1Partition0).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topic1Partition1).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topic2Partition0).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topic2Partition1).map(_.offset))
verify(replicaManager).onlinePartition(groupTopicPartition)
group.transitionTo(PreparingRebalance)
// Subscribe to topic1, offsets of topic2 should be removed
val subscriptionTopic1 = new Subscription(List(topic1).asJava)
group.updateMember(
member,
List(("protocol", ConsumerProtocol.serializeSubscription(subscriptionTopic1).array())),
null
)
group.initNextGeneration()
group.transitionTo(Stable)
// expect the offset tombstone
when(partition.appendRecordsToLeader(any[MemoryRecords],
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())).thenReturn(LogAppendInfo.UnknownLogAppendInfo)
groupMetadataManager.cleanupGroupMetadata()
verify(partition).appendRecordsToLeader(any[MemoryRecords],
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator), requiredAcks = anyInt(),
any())
verify(replicaManager, times(2)).onlinePartition(groupTopicPartition)
assertEquals(Some(group), groupMetadataManager.getGroup(groupId))
assert(group.is(Stable))
assertEquals(Some(t1p0OffsetAndMetadata), group.offset(topic1Partition0))
assertEquals(Some(t1p1OffsetAndMetadata), group.offset(topic1Partition1))
assertEquals(None, group.offset(topic2Partition0))
assertEquals(None, group.offset(topic2Partition1))
cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topic1Partition0, topic1Partition1, topic2Partition0, topic2Partition1)))
assertEquals(Some(offset), cachedOffsets.get(topic1Partition0).map(_.offset))
assertEquals(Some(offset), cachedOffsets.get(topic1Partition1).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topic2Partition0).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topic2Partition1).map(_.offset))
}
@Test
def testLoadOffsetFromOldCommit(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val generation = 935
val protocolType = "consumer"
val protocol = "range"
val startOffset = 15L
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val apiVersion = KAFKA_1_1_IV0
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, apiVersion = apiVersion, retentionTimeOpt = Some(100))
val memberId = "98098230493"
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, apiVersion = apiVersion)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Stable, group.currentState)
assertEquals(memberId, group.leaderOrNull)
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertEquals(protocol, group.protocolName.orNull)
assertEquals(Set(memberId), group.allMembers)
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
assertTrue(group.offset(topicPartition).map(_.expireTimestamp).get.nonEmpty)
}
}
@Test
def testLoadOffsetWithExplicitRetention(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val generation = 935
val protocolType = "consumer"
val protocol = "range"
val startOffset = 15L
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, retentionTimeOpt = Some(100))
val memberId = "98098230493"
val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Stable, group.currentState)
assertEquals(memberId, group.leaderOrNull)
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertEquals(protocol, group.protocolName.orNull)
assertEquals(Set(memberId), group.allMembers)
assertEquals(committedOffsets.size, group.allOffsets.size)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
assertTrue(group.offset(topicPartition).map(_.expireTimestamp).get.nonEmpty)
}
}
@Test
def testSerdeOffsetCommitValue(): Unit = {
val offsetAndMetadata = OffsetAndMetadata(
offset = 537L,
leaderEpoch = Optional.of(15),
metadata = "metadata",
commitTimestamp = time.milliseconds(),
expireTimestamp = None)
def verifySerde(apiVersion: ApiVersion, expectedOffsetCommitValueVersion: Int): Unit = {
val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, apiVersion)
val buffer = ByteBuffer.wrap(bytes)
assertEquals(expectedOffsetCommitValueVersion, buffer.getShort(0).toInt)
val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer)
assertEquals(offsetAndMetadata.offset, deserializedOffsetAndMetadata.offset)
assertEquals(offsetAndMetadata.metadata, deserializedOffsetAndMetadata.metadata)
assertEquals(offsetAndMetadata.commitTimestamp, deserializedOffsetAndMetadata.commitTimestamp)
// Serialization drops the leader epoch silently if an older inter-broker protocol is in use
val expectedLeaderEpoch = if (expectedOffsetCommitValueVersion >= 3)
offsetAndMetadata.leaderEpoch
else
Optional.empty()
assertEquals(expectedLeaderEpoch, deserializedOffsetAndMetadata.leaderEpoch)
}
for (version <- ApiVersion.allVersions) {
val expectedSchemaVersion = version match {
case v if v < KAFKA_2_1_IV0 => 1
case v if v < KAFKA_2_1_IV1 => 2
case _ => 3
}
verifySerde(version, expectedSchemaVersion)
}
}
@Test
def testSerdeOffsetCommitValueWithExpireTimestamp(): Unit = {
// If expire timestamp is set, we should always use version 1 of the offset commit
// value schema since later versions do not support it
val offsetAndMetadata = OffsetAndMetadata(
offset = 537L,
leaderEpoch = Optional.empty(),
metadata = "metadata",
commitTimestamp = time.milliseconds(),
expireTimestamp = Some(time.milliseconds() + 1000))
def verifySerde(apiVersion: ApiVersion): Unit = {
val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, apiVersion)
val buffer = ByteBuffer.wrap(bytes)
assertEquals(1, buffer.getShort(0).toInt)
val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer)
assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata)
}
for (version <- ApiVersion.allVersions)
verifySerde(version)
}
@Test
def testSerdeOffsetCommitValueWithNoneExpireTimestamp(): Unit = {
val offsetAndMetadata = OffsetAndMetadata(
offset = 537L,
leaderEpoch = Optional.empty(),
metadata = "metadata",
commitTimestamp = time.milliseconds(),
expireTimestamp = None)
def verifySerde(apiVersion: ApiVersion): Unit = {
val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, apiVersion)
val buffer = ByteBuffer.wrap(bytes)
val version = buffer.getShort(0).toInt
if (apiVersion < KAFKA_2_1_IV0)
assertEquals(1, version)
else if (apiVersion < KAFKA_2_1_IV1)
assertEquals(2, version)
else
assertEquals(3, version)
val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer)
assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata)
}
for (version <- ApiVersion.allVersions)
verifySerde(version)
}
@Test
def testLoadOffsetsWithEmptyControlBatch(): Unit = {
val groupMetadataTopicPartition = groupTopicPartition
val startOffset = 15L
val generation = 15
val groupEpoch = 2
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets)
val groupMetadataRecord = buildEmptyGroupRecord(generation, protocolType)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*)
// Prepend empty control batch to valid records
val mockBatch: MutableRecordBatch = mock(classOf[MutableRecordBatch])
when(mockBatch.iterator).thenReturn(Collections.emptyIterator[Record])
when(mockBatch.isControlBatch).thenReturn(true)
when(mockBatch.isTransactional).thenReturn(true)
when(mockBatch.nextOffset).thenReturn(16L)
val mockRecords: MemoryRecords = mock(classOf[MemoryRecords])
when(mockRecords.batches).thenReturn((Iterable[MutableRecordBatch](mockBatch) ++ records.batches.asScala).asJava)
when(mockRecords.records).thenReturn(records.records())
when(mockRecords.sizeInBytes()).thenReturn(DefaultRecordBatch.RECORD_BATCH_OVERHEAD + records.sizeInBytes())
val logMock: UnifiedLog = mock(classOf[UnifiedLog])
when(logMock.logStartOffset).thenReturn(startOffset)
when(logMock.read(ArgumentMatchers.eq(startOffset),
maxLength = anyInt(),
isolation = ArgumentMatchers.eq(FetchLogEnd),
minOneMessage = ArgumentMatchers.eq(true)))
.thenReturn(FetchDataInfo(LogOffsetMetadata(startOffset), mockRecords))
when(replicaManager.getLog(groupMetadataTopicPartition)).thenReturn(Some(logMock))
when(replicaManager.getLogEndOffset(groupMetadataTopicPartition)).thenReturn(Some[Long](18))
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L)
// Empty control batch should not have caused the load to fail
val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache"))
assertEquals(groupId, group.groupId)
assertEquals(Empty, group.currentState)
assertEquals(generation, group.generationId)
assertEquals(Some(protocolType), group.protocolType)
assertNull(group.leaderOrNull)
assertNull(group.protocolName.orNull)
committedOffsets.foreach { case (topicPartition, offset) =>
assertEquals(Some(offset), group.offset(topicPartition).map(_.offset))
}
}
@Test
def testCommittedOffsetParsing(): Unit = {
val groupId = "group"
val topicPartition = new TopicPartition("topic", 0)
val offsetCommitRecord = TestUtils.records(Seq(
new SimpleRecord(
GroupMetadataManager.offsetCommitKey(groupId, topicPartition),
GroupMetadataManager.offsetCommitValue(OffsetAndMetadata(35L, "", time.milliseconds()), ApiVersion.latestVersion)
)
)).records.asScala.head
val (keyStringOpt, valueStringOpt) = GroupMetadataManager.formatRecordKeyAndValue(offsetCommitRecord)
assertEquals(Some(s"offset_commit::group=$groupId,partition=$topicPartition"), keyStringOpt)
assertEquals(Some("offset=35"), valueStringOpt)
}
@Test
def testCommittedOffsetTombstoneParsing(): Unit = {
val groupId = "group"
val topicPartition = new TopicPartition("topic", 0)
val offsetCommitRecord = TestUtils.records(Seq(
new SimpleRecord(GroupMetadataManager.offsetCommitKey(groupId, topicPartition), null)
)).records.asScala.head
val (keyStringOpt, valueStringOpt) = GroupMetadataManager.formatRecordKeyAndValue(offsetCommitRecord)
assertEquals(Some(s"offset_commit::group=$groupId,partition=$topicPartition"), keyStringOpt)
assertEquals(Some("<DELETE>"), valueStringOpt)
}
@Test
def testGroupMetadataParsingWithNullUserData(): Unit = {
val generation = 935
val protocolType = "consumer"
val protocol = "range"
val memberId = "98098230493"
val assignmentBytes = Utils.toArray(ConsumerProtocol.serializeAssignment(
new ConsumerPartitionAssignor.Assignment(List(new TopicPartition("topic", 0)).asJava, null)
))
val groupMetadataRecord = TestUtils.records(Seq(
buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, assignmentBytes)
)).records.asScala.head
val (keyStringOpt, valueStringOpt) = GroupMetadataManager.formatRecordKeyAndValue(groupMetadataRecord)
assertEquals(Some(s"group_metadata::group=$groupId"), keyStringOpt)
assertEquals(Some("{\\"protocolType\\":\\"consumer\\",\\"protocol\\":\\"range\\"," +
"\\"generationId\\":935,\\"assignment\\":\\"{98098230493=[topic-0]}\\"}"), valueStringOpt)
}
@Test
def testGroupMetadataTombstoneParsing(): Unit = {
val groupId = "group"
val groupMetadataRecord = TestUtils.records(Seq(
new SimpleRecord(GroupMetadataManager.groupMetadataKey(groupId), null)
)).records.asScala.head
val (keyStringOpt, valueStringOpt) = GroupMetadataManager.formatRecordKeyAndValue(groupMetadataRecord)
assertEquals(Some(s"group_metadata::group=$groupId"), keyStringOpt)
assertEquals(Some("<DELETE>"), valueStringOpt)
}
private def verifyAppendAndCaptureCallback(): ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = {
val capturedArgument: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit])
verify(replicaManager).appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
capturedArgument.capture(),
any[Option[ReentrantLock]],
any(),
any())
capturedArgument
}
private def expectAppendMessage(error: Errors): ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = {
val capturedCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit])
val capturedRecords: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]])
when(replicaManager.appendRecords(anyLong(),
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
capturedRecords.capture(),
capturedCallback.capture(),
any[Option[ReentrantLock]],
any(),
any())
).thenAnswer(_ => {
capturedCallback.getValue.apply(
Map(groupTopicPartition ->
new PartitionResponse(error, 0L, RecordBatch.NO_TIMESTAMP, 0L)
)
)})
when(replicaManager.getMagic(any())).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
capturedRecords
}
private def buildStableGroupRecordWithMember(generation: Int,
protocolType: String,
protocol: String,
memberId: String,
assignmentBytes: Array[Byte] = Array.emptyByteArray,
apiVersion: ApiVersion = ApiVersion.latestVersion): SimpleRecord = {
val memberProtocols = List((protocol, Array.emptyByteArray))
val member = new MemberMetadata(memberId, Some(groupInstanceId), "clientId", "clientHost", 30000, 10000, protocolType, memberProtocols)
val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, memberId,
if (apiVersion >= KAFKA_2_1_IV0) Some(time.milliseconds()) else None, Seq(member), time)
val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId)
val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map(memberId -> assignmentBytes), apiVersion)
new SimpleRecord(groupMetadataKey, groupMetadataValue)
}
private def buildEmptyGroupRecord(generation: Int, protocolType: String): SimpleRecord = {
val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, Seq.empty, time)
val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId)
val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map.empty, ApiVersion.latestVersion)
new SimpleRecord(groupMetadataKey, groupMetadataValue)
}
private def expectGroupMetadataLoad(groupMetadataTopicPartition: TopicPartition,
startOffset: Long,
records: MemoryRecords): Unit = {
val logMock: UnifiedLog = mock(classOf[UnifiedLog])
when(replicaManager.getLog(groupMetadataTopicPartition)).thenReturn(Some(logMock))
val endOffset = expectGroupMetadataLoad(logMock, startOffset, records)
when(replicaManager.getLogEndOffset(groupMetadataTopicPartition)).thenReturn(Some(endOffset))
}
/**
* mock records into a mocked log
*
* @return the calculated end offset to be mocked into [[ReplicaManager.getLogEndOffset]]
*/
private def expectGroupMetadataLoad(logMock: UnifiedLog,
startOffset: Long,
records: MemoryRecords): Long = {
val endOffset = startOffset + records.records.asScala.size
val fileRecordsMock: FileRecords = mock(classOf[FileRecords])
when(logMock.logStartOffset).thenReturn(startOffset)
when(logMock.read(ArgumentMatchers.eq(startOffset),
maxLength = anyInt(),
isolation = ArgumentMatchers.eq(FetchLogEnd),
minOneMessage = ArgumentMatchers.eq(true)))
.thenReturn(FetchDataInfo(LogOffsetMetadata(startOffset), fileRecordsMock))
when(fileRecordsMock.sizeInBytes()).thenReturn(records.sizeInBytes)
val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer])
when(fileRecordsMock.readInto(bufferCapture.capture(), anyInt())).thenAnswer(_ => {
val buffer = bufferCapture.getValue
buffer.put(records.buffer.duplicate)
buffer.flip()
})
endOffset
}
private def createCommittedOffsetRecords(committedOffsets: Map[TopicPartition, Long],
groupId: String = groupId,
apiVersion: ApiVersion = ApiVersion.latestVersion,
retentionTimeOpt: Option[Long] = None): Seq[SimpleRecord] = {
committedOffsets.map { case (topicPartition, offset) =>
val commitTimestamp = time.milliseconds()
val offsetAndMetadata = retentionTimeOpt match {
case Some(retentionTimeMs) =>
val expirationTime = commitTimestamp + retentionTimeMs
OffsetAndMetadata(offset, "", commitTimestamp, expirationTime)
case None =>
OffsetAndMetadata(offset, "", commitTimestamp)
}
val offsetCommitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition)
val offsetCommitValue = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, apiVersion)
new SimpleRecord(offsetCommitKey, offsetCommitValue)
}.toSeq
}
private def mockGetPartition(): Unit = {
when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition))
when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition))
}
private def getGauge(manager: GroupMetadataManager, name: String): Gauge[Int] = {
KafkaYammerMetrics.defaultRegistry().allMetrics().get(manager.metricName(name, Map.empty)).asInstanceOf[Gauge[Int]]
}
private def expectMetrics(manager: GroupMetadataManager,
expectedNumGroups: Int,
expectedNumGroupsPreparingRebalance: Int,
expectedNumGroupsCompletingRebalance: Int): Unit = {
assertEquals(expectedNumGroups, getGauge(manager, "NumGroups").value)
assertEquals(expectedNumGroupsPreparingRebalance, getGauge(manager, "NumGroupsPreparingRebalance").value)
assertEquals(expectedNumGroupsCompletingRebalance, getGauge(manager, "NumGroupsCompletingRebalance").value)
}
@Test
def testMetrics(): Unit = {
groupMetadataManager.cleanupGroupMetadata()
expectMetrics(groupMetadataManager, 0, 0, 0)
val group = new GroupMetadata("foo2", Stable, time)
groupMetadataManager.addGroup(group)
expectMetrics(groupMetadataManager, 1, 0, 0)
group.transitionTo(PreparingRebalance)
expectMetrics(groupMetadataManager, 1, 1, 0)
group.transitionTo(CompletingRebalance)
expectMetrics(groupMetadataManager, 1, 0, 1)
}
@Test
def testPartitionLoadMetric(): Unit = {
val server = ManagementFactory.getPlatformMBeanServer
val mBeanName = "kafka.server:type=group-coordinator-metrics"
val reporter = new JmxReporter
val metricsContext = new KafkaMetricsContext("kafka.server")
reporter.contextChange(metricsContext)
metrics.addReporter(reporter)
def partitionLoadTime(attribute: String): Double = {
server.getAttribute(new ObjectName(mBeanName), attribute).asInstanceOf[Double]
}
assertTrue(server.isRegistered(new ObjectName(mBeanName)))
assertEquals(Double.NaN, partitionLoadTime( "partition-load-time-max"), 0)
assertEquals(Double.NaN, partitionLoadTime("partition-load-time-avg"), 0)
assertTrue(reporter.containsMbean(mBeanName))
val groupMetadataTopicPartition = groupTopicPartition
val startOffset = 15L
val memberId = "98098230493"
val committedOffsets = Map(
new TopicPartition("foo", 0) -> 23L,
new TopicPartition("foo", 1) -> 455L,
new TopicPartition("bar", 0) -> 8992L
)
val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets)
val groupMetadataRecord = buildStableGroupRecordWithMember(generation = 15,
protocolType = "consumer", protocol = "range", memberId)
val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE,
(offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*)
expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records)
// When passed a specific start offset, assert that the measured values are in excess of that.
val now = time.milliseconds()
val diff = 1000
val groupEpoch = 2
groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), now - diff)
assertTrue(partitionLoadTime("partition-load-time-max") >= diff)
assertTrue(partitionLoadTime("partition-load-time-avg") >= diff)
}
}
| TiVo/kafka | core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala | Scala | apache-2.0 | 115,351 |
package com.github.diegopacheco.sandbox.scala.camel.boot
import org.springframework.context.support.ClassPathXmlApplicationContext
import org.apache.camel.impl.DefaultCamelContext
import com.github.diegopacheco.sandbox.scala.camel.beans.Stopper
object MainApp extends App {
var ctx = new ClassPathXmlApplicationContext("classpath:spring-camel-beans.xml")
var camel:DefaultCamelContext = ctx.getBean("camel").asInstanceOf[DefaultCamelContext]
ctx.getBean("stop").asInstanceOf[Stopper].setCamelCtx(camel)
camel.start
println("did stop it")
} | diegopacheco/scala-playground | camel/camel-sandbox/src/main/scala/com/github/diegopacheco/sandbox/scala/camel/boot/MainApp.scala | Scala | unlicense | 578 |
import annotation.nowarn
trait Pattern {
trait NumericOps[T] extends Serializable {
def zero: T
def add(a: T, b: T): T
def add(a: T, b: T, c: T): T = add(a, add(b, c))
def sum(terms: Iterable[T]) = terms.foldLeft(zero)(add)
def sum(terms: Iterator[T]) = terms.foldLeft(zero)(add)
}
trait Expr[T] {
/** Returns arguments of this operator */
def args: Iterable[Expr[_]]
def + (other: Expr[T])(implicit n: NumericOps[T]) = Add(List(this, other))
def specialize(implicit num: NumericOps[T]): Expr[T] =
this match {
case Add(Seq(a, b)) => Add2(a, b)
case Add(Seq(a, b, c)) => Add3(a, b, c)
case x => x
}
}
trait TwoArg[T] extends Expr[T] {
val left: Expr[T]
val right: Expr[T]
val args = List(left, right)
}
trait ManyArg[T] extends Expr[T]
case class Add[T](args: Iterable[Expr[T]])(implicit @nowarn num: NumericOps[T]) extends ManyArg[T] {
override def toString = "(" + args.mkString(" + ") + ")"
}
case class Add2[T](left: Expr[T], right: Expr[T])(implicit @nowarn num: NumericOps[T]) extends TwoArg[T] {
override def toString = "(" + left + " + " + right + ")"
}
case class Add3[T](a1: Expr[T], a2: Expr[T], a3: Expr[T])(implicit @nowarn num: NumericOps[T]) extends ManyArg[T] {
val args = List(a1, a2, a3)
override def toString = "(" + a1 + " + " + a2 + " + " + a3 + ")"
}
}
| lrytz/scala | test/files/pos/patmat-exprs-b.scala | Scala | apache-2.0 | 1,436 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.planner.logical.greedy
import org.neo4j.cypher.internal.frontend.v2_3.SemanticDirection
import org.neo4j.cypher.internal.frontend.v2_3.ast._
import org.neo4j.cypher.internal.compiler.v2_3.planner.logical.plans._
import org.neo4j.cypher.internal.compiler.v2_3.planner.{LogicalPlanningTestSupport, QueryGraph}
import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite
class ProjectEndpointsTest
extends CypherFunSuite
with LogicalPlanningTestSupport {
private implicit val subQueryLookupTable = Map.empty[PatternExpression, QueryGraph]
val aName = IdName("a")
val bName = IdName("b")
val rName = IdName("r")
test("project single simple outgoing relationship") {
implicit val context = newMockedLogicalPlanningContext(planContext = newMockedPlanContext)
val inputPlan = Argument(Set(rName))(solved)()
val planTable = greedyPlanTableWith(inputPlan)
val patternRel = PatternRelationship(rName, (aName, bName), SemanticDirection.OUTGOING, Seq.empty, SimplePatternLength)
val qg = QueryGraph.empty.addPatternRelationship(patternRel)
projectEndpoints(planTable, qg) should equal(Seq(
ProjectEndpoints(inputPlan, rName, aName, startInScope = false, bName, endInScope = false, None, directed = true, SimplePatternLength)(solved)
))
}
test("project single simple outgoing relationship and verifies it's type") {
implicit val context = newMockedLogicalPlanningContext(planContext = newMockedPlanContext)
val inputPlan = Argument(Set(rName))(solved)()
val planTable = greedyPlanTableWith(inputPlan)
val patternRel = PatternRelationship(rName, (aName, bName), SemanticDirection.OUTGOING, Seq(RelTypeName("X") _), SimplePatternLength)
val qg = QueryGraph.empty.addPatternRelationship(patternRel)
projectEndpoints(planTable, qg) should equal(Seq(
ProjectEndpoints(inputPlan, rName, aName, startInScope = false, bName, endInScope = false, Some(Seq(RelTypeName("X") _)), directed = true, SimplePatternLength)(solved)
))
}
test("project single simple incoming relationship") {
implicit val context = newMockedLogicalPlanningContext(planContext = newMockedPlanContext)
val inputPlan = Argument(Set(rName))(solved)()
val planTable = greedyPlanTableWith(inputPlan)
val patternRel = PatternRelationship(rName, (aName, bName), SemanticDirection.INCOMING, Seq.empty, SimplePatternLength)
val qg = QueryGraph.empty.addPatternRelationship(patternRel)
projectEndpoints(planTable, qg) should equal(Seq(
ProjectEndpoints(inputPlan, rName, bName, startInScope = false, aName, endInScope = false, None, directed = true, SimplePatternLength)(solved)
))
}
test("project single simple outgoing relationship where start node is bound") {
implicit val context = newMockedLogicalPlanningContext(planContext = newMockedPlanContext)
val inputPlan = Argument(Set(aName, rName))(solved)()
val planTable = greedyPlanTableWith(inputPlan)
val patternRel = PatternRelationship(rName, (aName, bName), SemanticDirection.OUTGOING, Seq.empty, SimplePatternLength)
val qg = QueryGraph.empty.addPatternRelationship(patternRel)
projectEndpoints(planTable, qg) should equal(Seq(
ProjectEndpoints(inputPlan, rName, aName, startInScope = true, bName, endInScope = false, None, directed = true, SimplePatternLength)(solved)
))
}
test("project single simple outgoing relationship where end node is bound") {
implicit val context = newMockedLogicalPlanningContext(planContext = newMockedPlanContext)
val inputPlan = Argument(Set(bName, rName))(solved)()
val planTable = greedyPlanTableWith(inputPlan)
val patternRel = PatternRelationship(rName, (aName, bName), SemanticDirection.OUTGOING, Seq.empty, SimplePatternLength)
val qg = QueryGraph.empty.addPatternRelationship(patternRel)
projectEndpoints(planTable, qg) should equal(Seq(
ProjectEndpoints(inputPlan, rName, aName, startInScope = false, bName, endInScope = true, None, directed = true, SimplePatternLength)(solved)
))
}
test("project single simple outgoing relationship where both nodes are bound") {
implicit val context = newMockedLogicalPlanningContext(planContext = newMockedPlanContext)
val inputPlan = Argument(Set(aName, bName, rName))(solved)()
val planTable = greedyPlanTableWith(inputPlan)
val patternRel = PatternRelationship(rName, (aName, bName), SemanticDirection.OUTGOING, Seq.empty, SimplePatternLength)
val qg = QueryGraph.empty.addPatternRelationship(patternRel)
projectEndpoints(planTable, qg) should equal(Seq(
ProjectEndpoints(inputPlan, rName, aName, startInScope = true, bName, endInScope = true, None, directed = true, SimplePatternLength)(solved)
))
}
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/planner/logical/greedy/ProjectEndpointsTest.scala | Scala | apache-2.0 | 5,603 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.common
import org.scalatest.FlatSpec
import org.scalatest.matchers.MustMatchers
class WrappedTest extends FlatSpec with MustMatchers {
"Wrapped" must "extract the cause of an exception" in {
val innerEx = new NoSuchElementException("missing foo")
val outerEx = new RuntimeException("something went wrong", innerEx)
Wrapped.unapply(outerEx) must be (Some(innerEx))
}
it must "extract nothing from exceptions without cause" in {
val simpleEx = new RuntimeException("simply failed")
Wrapped.unapply(simpleEx) must not be 'defined
}
it must "extract nothing from null" in {
Wrapped.unapply(null) must not be 'defined
}
}
| telefonicaid/fiware-cosmos-platform | common/src/test/scala/es/tid/cosmos/common/WrappedTest.scala | Scala | apache-2.0 | 1,319 |
package org.crudible.lift.binding.model
import scala.xml.Text
import scala.xml.NodeSeq
import org.crudible.lift.util.WebHelpers
class LiftMarkup extends WebHelpers {
implicit def optStrToOptText(opt: Option[String]) = { opt.map(m => Text(m)) }
implicit def optIntToOptText(opt: Option[Int]) = { opt.map(m => Text(m.toString())) }
implicit def optNodeSeqToNodeSeq(opt: Option[NodeSeq]) = { opt.getOrElse(Text("")) }
} | rehei/crudible | crudible-lift/src/main/scala/org/crudible/lift/binding/model/LiftMarkup.scala | Scala | apache-2.0 | 426 |
package tests.emptyparens
class C {
def f1()(implicit i: Int) = i
def f2()(using i: Int) = i
def f3(s: String)(implicit i: Int) = i
def f4(s: String)(using i: Int) = i
def f5()()(using i: Int) = i
def f6() = 1
def f7()() = 2
def f8(i: Int)() = 1
}
class C1()(implicit i: Int)
class C2()(using i: Int)
class C3()()
class C4()(i: Int) | dotty-staging/dotty | scaladoc-testcases/src/tests/emptyparens.scala | Scala | apache-2.0 | 360 |
/*
* # Trove
*
* This file is part of Trove - A FREE desktop budgeting application that
* helps you track your finances, FREES you from complex budgeting, and
* enables you to build your TROVE of savings!
*
* Copyright © 2016-2019 Eric John Fredericks.
*
* Trove is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Trove is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Trove. If not, see <http://www.gnu.org/licenses/>.
*/
package trove.ui.fxext
import scalafx.geometry.Insets
import scalafx.scene.control.Label
object TextField {
def apply(metadata: FieldMetadata): TextField = new TextField(metadata)
}
class TextField(metadata: FieldMetadata) extends scalafx.scene.control.TextField {
val maxChars: Int = metadata.maxChars.getOrElse(Int.MaxValue)
require(maxChars > 0)
val label: Label = Label(metadata.name)
label.setPadding(Insets(5))
metadata.controlWidth.foreach { width =>
minWidth = width
maxWidth = width
prefWidth = width
}
text.onChange {
(_,oldValue,newValue) => {
if(newValue.length > oldValue.length && newValue.length > maxChars) {
text = newValue.substring(0, maxChars)
}
}
}
}
| emanchgo/budgetfree | src/main/scala/trove/ui/fxext/TextField.scala | Scala | gpl-3.0 | 1,645 |
/*
* DifferenceSpec.scala
*
* Copyright 2017 wayfarerx <[email protected]> (@thewayfarerx)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.wayfarerx.dreamsleeve.data
import org.scalatest._
/**
* Test case for the difference implementations.
*/
class DifferenceSpec extends FlatSpec with Matchers {
import Difference._
val generator = Hash.Generator()
"A create" should "act as a hashable creation of a document" in {
val da = Document("e", Table())
val db = Document("f", Table())
val a = Create(da)
val b = Create(db)
a == a shouldBe true
a == b shouldBe false
a == ("Hello": Any) shouldBe false
a.toString shouldBe s"Create($da)"
b.toString shouldBe s"Create($db)"
a.hash shouldBe generator.hash(Create.Header, da.hash)
Create.unapply(a) shouldBe Some(da)
Difference.unapply(a) shouldBe true
}
"A revise" should "verify the hash of a document and apply a change" in {
val ta = Table(Value.String("a") -> Value.Number(1))
val tb = Table(Value.String("a") -> Value.Number(2))
val da = Document("e", ta)
val db = Document("g", tb)
val a = Revise(da, db.title, Update(ta, tb))
val b = Revise(db, da.title, Update(tb, ta))
a == a shouldBe true
a == b shouldBe false
a.toString shouldBe s"Revise(${da.hash},${db.title},${Update(ta, tb)})"
b.toString shouldBe s"Revise(${db.hash},${da.title},${Update(tb, ta)})"
a.hash shouldBe generator.hash(Revise.Header, da.hash, db.title, a.update.hash)
Revise.unapply(a) shouldBe Some((da.hash, db.title, a.update))
Difference.unapply(a) shouldBe true
}
"A delete" should "verify the hash of a document" in {
val da = Document("e", Table())
val db = Document("f", Table())
val a = Delete(da)
val b = Delete(db.hash)
a == a shouldBe true
a == b shouldBe false
a.toString shouldBe s"Delete(${da.hash})"
b.toString shouldBe s"Delete(${db.hash})"
a.hash shouldBe generator.hash(Delete.Header, da.hash)
Delete.unapply(a) shouldBe Some(da.hash)
Difference.unapply(a) shouldBe true
}
}
| wayfarerx/dreamsleeve | shared/data/src/test/scala/net/wayfarerx/dreamsleeve/data/DifferenceSpec.scala | Scala | apache-2.0 | 2,611 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.recorder.scenario
import java.nio.charset.Charset
import scala.collection.breakOut
import scala.collection.JavaConversions.asScalaBuffer
import scala.concurrent.duration.FiniteDuration
import scala.io.Codec.UTF8
import io.gatling.http.HeaderNames._
import io.gatling.http.HeaderValues._
import io.gatling.http.fetch.{ EmbeddedResource, HtmlParser }
import io.gatling.http.util.HttpHelper.parseFormBody
import io.gatling.recorder.config.RecorderConfiguration
import io.gatling.recorder.http.model.{SafeHttpRequest, SafeHttpResponse}
import org.asynchttpclient.util.Base64
import org.asynchttpclient.uri.Uri
private[recorder] case class TimedScenarioElement[+T <: ScenarioElement](sendTime: Long, arrivalTime: Long, element: T)
private[recorder] sealed trait RequestBody
private[recorder] case class RequestBodyParams(params: List[(String, String)]) extends RequestBody
private[recorder] case class RequestBodyBytes(bytes: Array[Byte]) extends RequestBody
private[recorder] sealed trait ResponseBody
private[recorder] case class ResponseBodyBytes(bytes: Array[Byte]) extends ResponseBody
private[recorder] sealed trait ScenarioElement
private[recorder] case class PauseElement(duration: FiniteDuration) extends ScenarioElement
private[recorder] case class TagElement(text: String) extends ScenarioElement
private[recorder] object RequestElement {
val HtmlContentType = """(?i)text/html\\s*(;\\s+charset=(.+))?""".r
val CacheHeaders = Set(CacheControl, IfMatch, IfModifiedSince, IfNoneMatch, IfRange, IfUnmodifiedSince)
def apply(request: SafeHttpRequest, response: SafeHttpResponse)(implicit configuration: RecorderConfiguration): RequestElement = {
val requestHeaders: Map[String, String] = request.headers.entries.map { entry => (entry.getKey, entry.getValue) }(breakOut)
val requestContentType = requestHeaders.get(ContentType)
val requestUserAgent = requestHeaders.get(UserAgent)
val responseContentType = Option(response.headers.get(ContentType))
val containsFormParams = requestContentType.exists(_.contains(ApplicationFormUrlEncoded))
val requestBody =
if (request.body.nonEmpty) {
if (containsFormParams)
// The payload consists of a Unicode string using only characters in the range U+0000 to U+007F
// cf: http://www.w3.org/TR/html5/forms.html#application/x-www-form-urlencoded-decoding-algorithm
Some(RequestBodyParams(parseFormBody(new String(request.body, UTF8.name))))
else
Some(RequestBodyBytes(request.body))
} else {
None
}
val responseBody =
if (response.body.nonEmpty) {
Some(ResponseBodyBytes(response.body))
} else {
None
}
val embeddedResources = responseContentType.collect {
case HtmlContentType(_, headerCharset) =>
val charsetName = Option(headerCharset).filter(Charset.isSupported).getOrElse(UTF8.name)
val charset = Charset.forName(charsetName)
if (response.body.nonEmpty) {
val htmlBuff = new String(response.body, charset)
val userAgent = requestUserAgent.flatMap(io.gatling.http.fetch.UserAgent.parseFromHeader)
Some(new HtmlParser().getEmbeddedResources(Uri.create(request.uri), htmlBuff, userAgent))
} else {
None
}
}.flatten.getOrElse(Nil)
val filteredRequestHeaders =
if (configuration.http.removeCacheHeaders)
requestHeaders.filterKeys(name => !CacheHeaders.contains(name))
else
requestHeaders
RequestElement(new String(request.uri), request.method.toString, filteredRequestHeaders, requestBody, responseBody, response.status.code, embeddedResources)
}
}
private[recorder] case class RequestElement(
uri: String,
method: String,
headers: Map[String, String],
body: Option[RequestBody],
responseBody: Option[ResponseBody],
statusCode: Int,
embeddedResources: List[EmbeddedResource],
nonEmbeddedResources: List[RequestElement] = Nil
) extends ScenarioElement {
val (baseUrl, pathQuery) = {
val uriComponents = Uri.create(uri)
val base = new StringBuilder().append(uriComponents.getScheme).append("://").append(uriComponents.getHost)
val port = uriComponents.getScheme match {
case "http" if !Set(-1, 80).contains(uriComponents.getPort) => ":" + uriComponents.getPort
case "https" if !Set(-1, 443).contains(uriComponents.getPort) => ":" + uriComponents.getPort
case _ => ""
}
base.append(port)
(base.toString, uriComponents.toRelativeUrl)
}
var printedUrl = uri
// TODO NICO mutable external fields are a very bad idea
var filteredHeadersId: Option[Int] = None
var id: Int = 0
def setId(id: Int) = {
this.id = id
this
}
def makeRelativeTo(baseUrl: String): RequestElement = {
if (baseUrl == this.baseUrl)
printedUrl = pathQuery
this
}
val basicAuthCredentials: Option[(String, String)] = {
def parseCredentials(header: String) =
new String(Base64.decode(header.split(" ")(1))).split(":") match {
case Array(username, password) =>
val credentials = (username, password)
Some(credentials)
case _ => None
}
headers.get(Authorization).filter(_.startsWith("Basic ")).flatMap(parseCredentials)
}
}
| GabrielPlassard/gatling | gatling-recorder/src/main/scala/io/gatling/recorder/scenario/ScenarioElement.scala | Scala | apache-2.0 | 6,042 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions.utils
import java.sql.{Date, Time, Timestamp}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.table.api.Types
import org.apache.flink.table.functions.{ScalarFunction, FunctionContext}
import org.junit.Assert
import scala.annotation.varargs
import scala.collection.mutable
import scala.io.Source
case class SimplePojo(name: String, age: Int)
object Func0 extends ScalarFunction {
def eval(index: Int): Int = {
index
}
}
object Func1 extends ScalarFunction {
def eval(index: Integer): Integer = {
index + 1
}
}
object Func2 extends ScalarFunction {
def eval(index: Integer, str: String, pojo: SimplePojo): String = {
s"$index and $str and $pojo"
}
}
object Func3 extends ScalarFunction {
def eval(index: Integer, str: String): String = {
s"$index and $str"
}
}
object Func4 extends ScalarFunction {
def eval(): Integer = {
null
}
}
object Func5 extends ScalarFunction {
def eval(): Int = {
-1
}
}
object Func6 extends ScalarFunction {
def eval(date: Date, time: Time, timestamp: Timestamp): (Date, Time, Timestamp) = {
(date, time, timestamp)
}
}
object Func7 extends ScalarFunction {
def eval(a: Integer, b: Integer): Integer = {
a + b
}
}
object Func8 extends ScalarFunction {
def eval(a: Int): String = {
"a"
}
def eval(a: Int, b: Int): String = {
"b"
}
def eval(a: String, b: String): String = {
"c"
}
}
object Func9 extends ScalarFunction {
def eval(a: Int, b: Int, c: Long): String = {
s"$a and $b and $c"
}
}
object Func10 extends ScalarFunction {
def eval(c: Long): Long = {
c
}
override def getResultType(signature: Array[Class[_]]): TypeInformation[_] = {
Types.SQL_TIMESTAMP
}
}
object Func11 extends ScalarFunction {
def eval(a: Int, b: Long): String = {
s"$a and $b"
}
}
object Func12 extends ScalarFunction {
def eval(a: Long): Long = {
a
}
override def getResultType(signature: Array[Class[_]]): TypeInformation[_] = {
Types.INTERVAL_MILLIS
}
}
object ShouldNotExecuteFunc extends ScalarFunction {
def eval(s: String): Boolean = {
throw new Exception("This func should never be executed")
}
}
class RichFunc0 extends ScalarFunction {
var openCalled = false
var closeCalled = false
override def open(context: FunctionContext): Unit = {
super.open(context)
if (openCalled) {
Assert.fail("Open called more than once.")
} else {
openCalled = true
}
if (closeCalled) {
Assert.fail("Close called before open.")
}
}
def eval(index: Int): Int = {
if (!openCalled) {
Assert.fail("Open was not called before eval.")
}
if (closeCalled) {
Assert.fail("Close called before eval.")
}
index + 1
}
override def close(): Unit = {
super.close()
if (closeCalled) {
Assert.fail("Close called more than once.")
} else {
closeCalled = true
}
if (!openCalled) {
Assert.fail("Open was not called before close.")
}
}
}
class RichFunc1 extends ScalarFunction {
var added = Int.MaxValue
override def open(context: FunctionContext): Unit = {
added = context.getJobParameter("int.value", "0").toInt
}
def eval(index: Int): Int = {
index + added
}
override def close(): Unit = {
added = Int.MaxValue
}
}
class RichFunc2 extends ScalarFunction {
var prefix = "ERROR_VALUE"
override def open(context: FunctionContext): Unit = {
prefix = context.getJobParameter("string.value", "")
}
def eval(value: String): String = {
prefix + "#" + value
}
override def close(): Unit = {
prefix = "ERROR_VALUE"
}
}
class RichFunc3 extends ScalarFunction {
private val words = mutable.HashSet[String]()
override def open(context: FunctionContext): Unit = {
val file = context.getCachedFile("words")
for (line <- Source.fromFile(file.getCanonicalPath).getLines) {
words.add(line.trim)
}
}
def eval(value: String): Boolean = {
words.contains(value)
}
override def close(): Unit = {
words.clear()
}
}
class Func13(prefix: String) extends ScalarFunction {
def eval(a: String): String = {
s"$prefix-$a"
}
}
object Func14 extends ScalarFunction {
@varargs
def eval(a: Int*): Int = {
a.sum
}
}
object Func15 extends ScalarFunction {
@varargs
def eval(a: String, b: Int*): String = {
a + b.length
}
def eval(a: String): String = {
a
}
}
object Func16 extends ScalarFunction {
def eval(a: Seq[String]): String = {
a.mkString(", ")
}
}
object Func17 extends ScalarFunction {
// Without @varargs, we will throw an exception
def eval(a: String*): String = {
a.mkString(", ")
}
}
object Func18 extends ScalarFunction {
def eval(str: String, prefix: String): Boolean = {
str.startsWith(prefix)
}
}
| zohar-mizrahi/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/utils/userDefinedScalarFunctions.scala | Scala | apache-2.0 | 5,715 |
package es.upm.oeg.epnoi.matching.metrics.corpus
import es.upm.oeg.epnoi.matching.metrics.domain.entity.{Metadata, RegularResource}
import es.upm.oeg.epnoi.matching.metrics.feature.LuceneTokenizer
import es.upm.oeg.epnoi.matching.metrics.topics._
import es.upm.oeg.epnoi.matching.metrics.utils.SparkWrapper
/**
* Corpus defined by 4 topics: baseball, motor, religion and space
*/
case object Articles {
val metadatas: Map[String,Metadata] = Map(
("baseball01.txt",Metadata("baseball01","2011",List(Authors.a1,Authors.a2,Authors.a3))),
("baseball02.txt",Metadata("baseball01","2012",List(Authors.a1,Authors.a2,Authors.a3))),
("baseball03.txt",Metadata("baseball01","2013",List(Authors.a1,Authors.a2,Authors.a3))),
("baseball04.txt",Metadata("baseball01","2014",List(Authors.a1,Authors.a2,Authors.a3))),
("baseball05.txt",Metadata("baseball01","2015",List(Authors.a1,Authors.a2,Authors.a3))),
("motor01.txt",Metadata("motor01","2011",List(Authors.a4,Authors.a5))),
("motor02.txt",Metadata("motor02","2012",List(Authors.a4,Authors.a5))),
("motor03.txt",Metadata("motor03","2013",List(Authors.a4,Authors.a5))),
("motor04.txt",Metadata("motor04","2014",List(Authors.a4,Authors.a5))),
("motor05.txt",Metadata("motor05","2015",List(Authors.a4,Authors.a5))),
("religion01.txt",Metadata("religion01","2011",List(Authors.a6,Authors.a7,Authors.a8))),
("religion02.txt",Metadata("religion02","2012",List(Authors.a6,Authors.a7,Authors.a8))),
("religion03.txt",Metadata("religion03","2013",List(Authors.a6,Authors.a7,Authors.a8))),
("religion04.txt",Metadata("religion04","2014",List(Authors.a6,Authors.a7,Authors.a8))),
("religion05.txt",Metadata("religion05","2015",List(Authors.a6,Authors.a7,Authors.a8))),
("space01.txt",Metadata("space01","2011",List(Authors.a8,Authors.a9))),
("space02.txt",Metadata("space02","2012",List(Authors.a8,Authors.a9))),
("space03.txt",Metadata("space03","2013",List(Authors.a8,Authors.a9))),
("space04.txt",Metadata("space04","2014",List(Authors.a8,Authors.a9))),
("space05.txt",Metadata("space05","2015",List(Authors.a8,Authors.a9)))
).withDefaultValue(Metadata("hockey","2015",List(Authors.a10)))
val corpus = SparkWrapper.readCorpus("src/test/corpus/articles/*").map{x=>
val name = x._1.substring(x._1.lastIndexOf("/")+1)
RegularResource(
uri = s"ro.oeg.es/resource/$name",
url = x._1,
metadata = metadatas(name),
bagOfWords = LuceneTokenizer(x._2),
resources = Seq.empty)
}
}
| cbadenes/epnoi-matching-metrics | src/test/scala/es/upm/oeg/epnoi/matching/metrics/corpus/Articles.scala | Scala | apache-2.0 | 2,557 |
package org.http4s.dsl
/** A conjunction extractor. Generally used as an infix operator.
*
* {{{
* scala> import org.http4s.dsl.&
* scala> object Even { def unapply(i: Int) = (i % 2) == 0 }
* scala> object Positive { def unapply(i: Int) = i > 0 }
* scala> def describe(i: Int) = i match {
* | case Even() & Positive() => "even and positive"
* | case Even() => "even but not positive"
* | case Positive() => "positive but not even"
* | case _ => "neither even nor positive"
* | }
* scala> describe(-1)
* res0: String = neither even nor positive
* scala> describe(0)
* res1: String = even but not positive
* scala> describe(1)
* res2: String = positive but not even
* scala> describe(2)
* res3: String = even and positive
* }}}
*/
object & {
def unapply[A](a: A): Some[(A, A)] = Some((a, a))
}
| ChristopherDavenport/http4s | dsl/src/main/scala/org/http4s/dsl/and.scala | Scala | apache-2.0 | 871 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util
import java.util.{Collections, Properties}
import java.util.Arrays.asList
import java.util.concurrent.{ExecutionException, TimeUnit}
import java.io.File
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import org.apache.kafka.clients.admin.KafkaAdminClientTest
import org.apache.kafka.common.utils.{Time, Utils}
import kafka.log.LogConfig
import kafka.server.{Defaults, KafkaConfig, KafkaServer}
import org.apache.kafka.clients.admin._
import kafka.utils.{Logging, TestUtils}
import kafka.utils.Implicits._
import org.apache.kafka.clients.admin.NewTopic
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.clients.producer.KafkaProducer
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.{ConsumerGroupState, KafkaFuture, TopicPartition, TopicPartitionReplica}
import org.apache.kafka.common.acl._
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.errors._
import org.junit.{After, Before, Rule, Test}
import org.apache.kafka.common.requests.{DeleteRecordsRequest, MetadataResponse}
import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourceType}
import org.junit.rules.Timeout
import org.junit.Assert._
import scala.util.Random
import scala.collection.JavaConverters._
import java.lang.{Long => JLong}
import kafka.zk.KafkaZkClient
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
/**
* An integration test of the KafkaAdminClient.
*
* Also see {@link org.apache.kafka.clients.admin.KafkaAdminClientTest} for a unit test of the admin client.
*/
class AdminClientIntegrationTest extends IntegrationTestHarness with Logging {
import AdminClientIntegrationTest._
@Rule
def globalTimeout = Timeout.millis(120000)
var client: AdminClient = null
val topic = "topic"
val partition = 0
val topicPartition = new TopicPartition(topic, partition)
@Before
override def setUp(): Unit = {
super.setUp
TestUtils.waitUntilBrokerMetadataIsPropagated(servers)
}
@After
override def tearDown(): Unit = {
if (client != null)
Utils.closeQuietly(client, "AdminClient")
super.tearDown()
}
val serverCount = 3
val consumerCount = 1
val producerCount = 1
override def generateConfigs = {
val cfgs = TestUtils.createBrokerConfigs(serverCount, zkConnect, interBrokerSecurityProtocol = Some(securityProtocol),
trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties, logDirCount = 2)
cfgs.foreach { config =>
config.setProperty(KafkaConfig.ListenersProp, s"${listenerName.value}://localhost:${TestUtils.RandomPort}")
config.remove(KafkaConfig.InterBrokerSecurityProtocolProp)
config.setProperty(KafkaConfig.InterBrokerListenerNameProp, listenerName.value)
config.setProperty(KafkaConfig.ListenerSecurityProtocolMapProp, s"${listenerName.value}:${securityProtocol.name}")
config.setProperty(KafkaConfig.DeleteTopicEnableProp, "true")
config.setProperty(KafkaConfig.GroupInitialRebalanceDelayMsProp, "0")
// We set this in order to test that we don't expose sensitive data via describe configs. This will already be
// set for subclasses with security enabled and we don't want to overwrite it.
if (!config.containsKey(KafkaConfig.SslTruststorePasswordProp))
config.setProperty(KafkaConfig.SslTruststorePasswordProp, "some.invalid.pass")
}
cfgs.foreach(_ ++= serverConfig)
cfgs.map(KafkaConfig.fromProps)
}
def createConfig(): util.Map[String, Object] = {
val config = new util.HashMap[String, Object]
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "20000")
val securityProps: util.Map[Object, Object] =
TestUtils.adminClientSecurityConfigs(securityProtocol, trustStoreFile, clientSaslProperties)
securityProps.asScala.foreach { case (key, value) => config.put(key.asInstanceOf[String], value) }
config
}
def waitForTopics(client: AdminClient, expectedPresent: Seq[String], expectedMissing: Seq[String]): Unit = {
TestUtils.waitUntilTrue(() => {
val topics = client.listTopics.names.get()
expectedPresent.forall(topicName => topics.contains(topicName)) &&
expectedMissing.forall(topicName => !topics.contains(topicName))
}, "timed out waiting for topics")
}
def assertFutureExceptionTypeEquals(future: KafkaFuture[_], clazz: Class[_ <: Throwable]): Unit = {
try {
future.get()
fail("Expected CompletableFuture.get to return an exception")
} catch {
case e: ExecutionException =>
val cause = e.getCause()
assertTrue("Expected an exception of type " + clazz.getName + "; got type " +
cause.getClass().getName, clazz.isInstance(cause))
}
}
@Test
def testClose(): Unit = {
val client = AdminClient.create(createConfig())
client.close()
client.close() // double close has no effect
}
@Test
def testListNodes(): Unit = {
client = AdminClient.create(createConfig())
val brokerStrs = brokerList.split(",").toList.sorted
var nodeStrs: List[String] = null
do {
val nodes = client.describeCluster().nodes().get().asScala
nodeStrs = nodes.map ( node => s"${node.host}:${node.port}" ).toList.sorted
} while (nodeStrs.size < brokerStrs.size)
assertEquals(brokerStrs.mkString(","), nodeStrs.mkString(","))
}
@Test
def testCreateDeleteTopics(): Unit = {
client = AdminClient.create(createConfig())
val topics = Seq("mytopic", "mytopic2")
val newTopics = Seq(
new NewTopic("mytopic", Map((0: Integer) -> Seq[Integer](1, 2).asJava, (1: Integer) -> Seq[Integer](2, 0).asJava).asJava),
new NewTopic("mytopic2", 3, 3)
)
client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)).all.get()
waitForTopics(client, List(), topics)
client.createTopics(newTopics.asJava).all.get()
waitForTopics(client, topics, List())
val results = client.createTopics(newTopics.asJava).values()
assertTrue(results.containsKey("mytopic"))
assertFutureExceptionTypeEquals(results.get("mytopic"), classOf[TopicExistsException])
assertTrue(results.containsKey("mytopic2"))
assertFutureExceptionTypeEquals(results.get("mytopic2"), classOf[TopicExistsException])
val topicToDescription = client.describeTopics(topics.asJava).all.get()
assertEquals(topics.toSet, topicToDescription.keySet.asScala)
val topic0 = topicToDescription.get("mytopic")
assertEquals(false, topic0.isInternal)
assertEquals("mytopic", topic0.name)
assertEquals(2, topic0.partitions.size)
val topic0Partition0 = topic0.partitions.get(0)
assertEquals(1, topic0Partition0.leader.id)
assertEquals(0, topic0Partition0.partition)
assertEquals(Seq(1, 2), topic0Partition0.isr.asScala.map(_.id))
assertEquals(Seq(1, 2), topic0Partition0.replicas.asScala.map(_.id))
val topic0Partition1 = topic0.partitions.get(1)
assertEquals(2, topic0Partition1.leader.id)
assertEquals(1, topic0Partition1.partition)
assertEquals(Seq(2, 0), topic0Partition1.isr.asScala.map(_.id))
assertEquals(Seq(2, 0), topic0Partition1.replicas.asScala.map(_.id))
val topic1 = topicToDescription.get("mytopic2")
assertEquals(false, topic1.isInternal)
assertEquals("mytopic2", topic1.name)
assertEquals(3, topic1.partitions.size)
for (partitionId <- 0 until 3) {
val partition = topic1.partitions.get(partitionId)
assertEquals(partitionId, partition.partition)
assertEquals(3, partition.replicas.size)
partition.replicas.asScala.foreach { replica =>
assertTrue(replica.id >= 0)
assertTrue(replica.id < serverCount)
}
assertEquals("No duplicate replica ids", partition.replicas.size, partition.replicas.asScala.map(_.id).distinct.size)
assertEquals(3, partition.isr.size)
assertEquals(partition.replicas, partition.isr)
assertTrue(partition.replicas.contains(partition.leader))
}
client.deleteTopics(topics.asJava).all.get()
waitForTopics(client, List(), topics)
}
@Test
def testMetadataRefresh(): Unit = {
client = AdminClient.create(createConfig())
val topics = Seq("mytopic")
val newTopics = Seq(new NewTopic("mytopic", 3, 3))
client.createTopics(newTopics.asJava).all.get()
waitForTopics(client, expectedPresent = topics, expectedMissing = List())
val controller = servers.find(_.config.brokerId == TestUtils.waitUntilControllerElected(zkClient)).get
controller.shutdown()
controller.awaitShutdown()
val topicDesc = client.describeTopics(topics.asJava).all.get()
assertEquals(topics.toSet, topicDesc.keySet.asScala)
}
/**
* describe should not auto create topics
*/
@Test
def testDescribeNonExistingTopic(): Unit = {
client = AdminClient.create(createConfig())
val existingTopic = "existing-topic"
client.createTopics(Seq(existingTopic).map(new NewTopic(_, 1, 1)).asJava).all.get()
waitForTopics(client, Seq(existingTopic), List())
val nonExistingTopic = "non-existing"
val results = client.describeTopics(Seq(nonExistingTopic, existingTopic).asJava).values
assertEquals(existingTopic, results.get(existingTopic).get.name)
intercept[ExecutionException](results.get(nonExistingTopic).get).getCause.isInstanceOf[UnknownTopicOrPartitionException]
assertEquals(None, zkClient.getTopicPartitionCount(nonExistingTopic))
}
@Test
def testDescribeCluster(): Unit = {
client = AdminClient.create(createConfig())
val nodes = client.describeCluster.nodes.get()
val clusterId = client.describeCluster().clusterId().get()
assertEquals(servers.head.apis.clusterId, clusterId)
val controller = client.describeCluster().controller().get()
assertEquals(servers.head.apis.metadataCache.getControllerId.
getOrElse(MetadataResponse.NO_CONTROLLER_ID), controller.id())
val brokers = brokerList.split(",")
assertEquals(brokers.size, nodes.size)
for (node <- nodes.asScala) {
val hostStr = s"${node.host}:${node.port}"
assertTrue(s"Unknown host:port pair $hostStr in brokerVersionInfos", brokers.contains(hostStr))
}
}
@Test
def testDescribeLogDirs(): Unit = {
client = AdminClient.create(createConfig())
val topic = "topic"
val leaderByPartition = createTopic(topic, numPartitions = 10, replicationFactor = 1)
val partitionsByBroker = leaderByPartition.groupBy { case (partitionId, leaderId) => leaderId }.mapValues(_.keys.toSeq)
val brokers = (0 until serverCount).map(Integer.valueOf)
val logDirInfosByBroker = client.describeLogDirs(brokers.asJava).all.get
(0 until serverCount).foreach { brokerId =>
val server = servers.find(_.config.brokerId == brokerId).get
val expectedPartitions = partitionsByBroker(brokerId)
val logDirInfos = logDirInfosByBroker.get(brokerId)
val replicaInfos = logDirInfos.asScala.flatMap { case (logDir, logDirInfo) => logDirInfo.replicaInfos.asScala }.filterKeys(_.topic == topic)
assertEquals(expectedPartitions.toSet, replicaInfos.keys.map(_.partition).toSet)
logDirInfos.asScala.foreach { case (logDir, logDirInfo) =>
logDirInfo.replicaInfos.asScala.keys.foreach(tp =>
assertEquals(server.logManager.getLog(tp).get.dir.getParent, logDir)
)
}
}
}
@Test
def testDescribeReplicaLogDirs(): Unit = {
client = AdminClient.create(createConfig())
val topic = "topic"
val leaderByPartition = createTopic(topic, numPartitions = 10, replicationFactor = 1)
val replicas = leaderByPartition.map { case (partition, brokerId) =>
new TopicPartitionReplica(topic, partition, brokerId)
}.toSeq
val replicaDirInfos = client.describeReplicaLogDirs(replicas.asJavaCollection).all.get
replicaDirInfos.asScala.foreach { case (topicPartitionReplica, replicaDirInfo) =>
val server = servers.find(_.config.brokerId == topicPartitionReplica.brokerId()).get
val tp = new TopicPartition(topicPartitionReplica.topic(), topicPartitionReplica.partition())
assertEquals(server.logManager.getLog(tp).get.dir.getParent, replicaDirInfo.getCurrentReplicaLogDir)
}
}
@Test
def testAlterReplicaLogDirs(): Unit = {
client = AdminClient.create(createConfig())
val topic = "topic"
val tp = new TopicPartition(topic, 0)
val randomNums = servers.map(server => server -> Random.nextInt(2)).toMap
// Generate two mutually exclusive replicaAssignment
val firstReplicaAssignment = servers.map { server =>
val logDir = new File(server.config.logDirs(randomNums(server))).getAbsolutePath
new TopicPartitionReplica(topic, 0, server.config.brokerId) -> logDir
}.toMap
val secondReplicaAssignment = servers.map { server =>
val logDir = new File(server.config.logDirs(1 - randomNums(server))).getAbsolutePath
new TopicPartitionReplica(topic, 0, server.config.brokerId) -> logDir
}.toMap
// Verify that replica can be created in the specified log directory
val futures = client.alterReplicaLogDirs(firstReplicaAssignment.asJava,
new AlterReplicaLogDirsOptions).values.asScala.values
futures.foreach { future =>
val exception = intercept[ExecutionException](future.get)
assertTrue(exception.getCause.isInstanceOf[ReplicaNotAvailableException])
}
createTopic(topic, numPartitions = 1, replicationFactor = serverCount)
servers.foreach { server =>
val logDir = server.logManager.getLog(tp).get.dir.getParent
assertEquals(firstReplicaAssignment(new TopicPartitionReplica(topic, 0, server.config.brokerId)), logDir)
}
// Verify that replica can be moved to the specified log directory after the topic has been created
client.alterReplicaLogDirs(secondReplicaAssignment.asJava, new AlterReplicaLogDirsOptions).all.get
servers.foreach { server =>
TestUtils.waitUntilTrue(() => {
val logDir = server.logManager.getLog(tp).get.dir.getParent
secondReplicaAssignment(new TopicPartitionReplica(topic, 0, server.config.brokerId)) == logDir
}, "timed out waiting for replica movement")
}
// Verify that replica can be moved to the specified log directory while the producer is sending messages
val running = new AtomicBoolean(true)
val numMessages = new AtomicInteger
import scala.concurrent.ExecutionContext.Implicits._
val producerFuture = Future {
val producer = TestUtils.createNewProducer(
TestUtils.getBrokerListStrFromServers(servers, protocol = securityProtocol),
securityProtocol = securityProtocol,
trustStoreFile = trustStoreFile,
retries = 0, // Producer should not have to retry when broker is moving replica between log directories.
requestTimeoutMs = 10000,
acks = -1
)
try {
while (running.get) {
val future = producer.send(new ProducerRecord(topic, s"xxxxxxxxxxxxxxxxxxxx-$numMessages".getBytes))
numMessages.incrementAndGet()
future.get(10, TimeUnit.SECONDS)
}
numMessages.get
} finally producer.close()
}
try {
TestUtils.waitUntilTrue(() => numMessages.get > 10, s"only $numMessages messages are produced before timeout. Producer future ${producerFuture.value}")
client.alterReplicaLogDirs(firstReplicaAssignment.asJava, new AlterReplicaLogDirsOptions).all.get
servers.foreach { server =>
TestUtils.waitUntilTrue(() => {
val logDir = server.logManager.getLog(tp).get.dir.getParent
firstReplicaAssignment(new TopicPartitionReplica(topic, 0, server.config.brokerId)) == logDir
}, s"timed out waiting for replica movement. Producer future ${producerFuture.value}")
}
val currentMessagesNum = numMessages.get
TestUtils.waitUntilTrue(() => numMessages.get - currentMessagesNum > 10,
s"only ${numMessages.get - currentMessagesNum} messages are produced within timeout after replica movement. Producer future ${producerFuture.value}")
} finally running.set(false)
val finalNumMessages = Await.result(producerFuture, Duration(20, TimeUnit.SECONDS))
// Verify that all messages that are produced can be consumed
val consumerRecords = TestUtils.consumeTopicRecords(servers, topic, finalNumMessages,
securityProtocol = securityProtocol, trustStoreFile = trustStoreFile)
consumerRecords.zipWithIndex.foreach { case (consumerRecord, index) =>
assertEquals(s"xxxxxxxxxxxxxxxxxxxx-$index", new String(consumerRecord.value))
}
}
@Test
def testDescribeAndAlterConfigs(): Unit = {
client = AdminClient.create(createConfig)
// Create topics
val topic1 = "describe-alter-configs-topic-1"
val topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1)
val topicConfig1 = new Properties
topicConfig1.setProperty(LogConfig.MaxMessageBytesProp, "500000")
topicConfig1.setProperty(LogConfig.RetentionMsProp, "60000000")
createTopic(topic1, numPartitions = 1, replicationFactor = 1, topicConfig1)
val topic2 = "describe-alter-configs-topic-2"
val topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2)
createTopic(topic2, numPartitions = 1, replicationFactor = 1)
// Describe topics and broker
val brokerResource1 = new ConfigResource(ConfigResource.Type.BROKER, servers(1).config.brokerId.toString)
val brokerResource2 = new ConfigResource(ConfigResource.Type.BROKER, servers(2).config.brokerId.toString)
val configResources = Seq(topicResource1, topicResource2, brokerResource1, brokerResource2)
val describeResult = client.describeConfigs(configResources.asJava)
val configs = describeResult.all.get
assertEquals(4, configs.size)
val maxMessageBytes1 = configs.get(topicResource1).get(LogConfig.MaxMessageBytesProp)
assertEquals(LogConfig.MaxMessageBytesProp, maxMessageBytes1.name)
assertEquals(topicConfig1.get(LogConfig.MaxMessageBytesProp), maxMessageBytes1.value)
assertFalse(maxMessageBytes1.isDefault)
assertFalse(maxMessageBytes1.isSensitive)
assertFalse(maxMessageBytes1.isReadOnly)
assertEquals(topicConfig1.get(LogConfig.RetentionMsProp),
configs.get(topicResource1).get(LogConfig.RetentionMsProp).value)
val maxMessageBytes2 = configs.get(topicResource2).get(LogConfig.MaxMessageBytesProp)
assertEquals(Defaults.MessageMaxBytes.toString, maxMessageBytes2.value)
assertEquals(LogConfig.MaxMessageBytesProp, maxMessageBytes2.name)
assertTrue(maxMessageBytes2.isDefault)
assertFalse(maxMessageBytes2.isSensitive)
assertFalse(maxMessageBytes2.isReadOnly)
assertEquals(servers(1).config.values.size, configs.get(brokerResource1).entries.size)
assertEquals(servers(1).config.brokerId.toString, configs.get(brokerResource1).get(KafkaConfig.BrokerIdProp).value)
val listenerSecurityProtocolMap = configs.get(brokerResource1).get(KafkaConfig.ListenerSecurityProtocolMapProp)
assertEquals(servers(1).config.getString(KafkaConfig.ListenerSecurityProtocolMapProp), listenerSecurityProtocolMap.value)
assertEquals(KafkaConfig.ListenerSecurityProtocolMapProp, listenerSecurityProtocolMap.name)
assertFalse(listenerSecurityProtocolMap.isDefault)
assertFalse(listenerSecurityProtocolMap.isSensitive)
assertFalse(listenerSecurityProtocolMap.isReadOnly)
val truststorePassword = configs.get(brokerResource1).get(KafkaConfig.SslTruststorePasswordProp)
assertEquals(KafkaConfig.SslTruststorePasswordProp, truststorePassword.name)
assertNull(truststorePassword.value)
assertFalse(truststorePassword.isDefault)
assertTrue(truststorePassword.isSensitive)
assertFalse(truststorePassword.isReadOnly)
val compressionType = configs.get(brokerResource1).get(KafkaConfig.CompressionTypeProp)
assertEquals(servers(1).config.compressionType.toString, compressionType.value)
assertEquals(KafkaConfig.CompressionTypeProp, compressionType.name)
assertTrue(compressionType.isDefault)
assertFalse(compressionType.isSensitive)
assertFalse(compressionType.isReadOnly)
assertEquals(servers(2).config.values.size, configs.get(brokerResource2).entries.size)
assertEquals(servers(2).config.brokerId.toString, configs.get(brokerResource2).get(KafkaConfig.BrokerIdProp).value)
assertEquals(servers(2).config.logCleanerThreads.toString,
configs.get(brokerResource2).get(KafkaConfig.LogCleanerThreadsProp).value)
checkValidAlterConfigs(client, topicResource1, topicResource2)
}
@Test
def testCreatePartitions(): Unit = {
client = AdminClient.create(createConfig)
// Create topics
val topic1 = "create-partitions-topic-1"
createTopic(topic1, numPartitions = 1, replicationFactor = 1)
val topic2 = "create-partitions-topic-2"
createTopic(topic2, numPartitions = 1, replicationFactor = 2)
// assert that both the topics have 1 partition
assertEquals(1, client.describeTopics(Set(topic1).asJava).values.get(topic1).get.partitions.size)
assertEquals(1, client.describeTopics(Set(topic2).asJava).values.get(topic2).get.partitions.size)
val validateOnly = new CreatePartitionsOptions().validateOnly(true)
val actuallyDoIt = new CreatePartitionsOptions().validateOnly(false)
def partitions(topic: String) =
client.describeTopics(Set(topic).asJava).values.get(topic).get.partitions
def numPartitions(topic: String) =
partitions(topic).size
// validateOnly: try creating a new partition (no assignments), to bring the total to 3 partitions
var alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(3)).asJava, validateOnly)
var altered = alterResult.values.get(topic1).get
assertEquals(1, numPartitions(topic1))
// try creating a new partition (no assignments), to bring the total to 3 partitions
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(3)).asJava, actuallyDoIt)
altered = alterResult.values.get(topic1).get
assertEquals(3, numPartitions(topic1))
// validateOnly: now try creating a new partition (with assignments), to bring the total to 3 partitions
val newPartition2Assignments = asList[util.List[Integer]](asList(0, 1), asList(1, 2))
alterResult = client.createPartitions(Map(topic2 ->
NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, validateOnly)
altered = alterResult.values.get(topic2).get
assertEquals(1, numPartitions(topic2))
// now try creating a new partition (with assignments), to bring the total to 3 partitions
alterResult = client.createPartitions(Map(topic2 ->
NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, actuallyDoIt)
altered = alterResult.values.get(topic2).get
val actualPartitions2 = partitions(topic2)
assertEquals(3, actualPartitions2.size)
assertEquals(Seq(0, 1), actualPartitions2.get(1).replicas.asScala.map(_.id).toList)
assertEquals(Seq(1, 2), actualPartitions2.get(2).replicas.asScala.map(_.id).toList)
// loop over error cases calling with+without validate-only
for (option <- Seq(validateOnly, actuallyDoIt)) {
val desc = if (option.validateOnly()) "validateOnly" else "validateOnly=false"
// try a newCount which would be a decrease
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(1)).asJava, option)
try {
alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidPartitionsException when newCount is a decrease")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidPartitionsException])
assertEquals(desc, "Topic currently has 3 partitions, which is higher than the requested 1.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
// try a newCount which would be a noop (without assignment)
alterResult = client.createPartitions(Map(topic2 ->
NewPartitions.increaseTo(3)).asJava, option)
try {
alterResult.values.get(topic2).get
fail(s"$desc: Expect InvalidPartitionsException when requesting a noop")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidPartitionsException])
assertEquals(desc, "Topic already has 3 partitions.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic2))
}
// try a newCount which would be a noop (where the assignment matches current state)
alterResult = client.createPartitions(Map(topic2 ->
NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, option)
try {
alterResult.values.get(topic2).get
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidPartitionsException])
assertEquals(desc, "Topic already has 3 partitions.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic2))
}
// try a newCount which would be a noop (where the assignment doesn't match current state)
alterResult = client.createPartitions(Map(topic2 ->
NewPartitions.increaseTo(3, newPartition2Assignments.asScala.reverse.toList.asJava)).asJava, option)
try {
alterResult.values.get(topic2).get
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidPartitionsException])
assertEquals(desc, "Topic already has 3 partitions.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic2))
}
// try a bad topic name
val unknownTopic = "an-unknown-topic"
alterResult = client.createPartitions(Map(unknownTopic ->
NewPartitions.increaseTo(2)).asJava, option)
try {
alterResult.values.get(unknownTopic).get
fail(s"$desc: Expect InvalidTopicException when using an unknown topic")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[UnknownTopicOrPartitionException])
assertEquals(desc, "The topic 'an-unknown-topic' does not exist.", e.getCause.getMessage)
}
// try an invalid newCount
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(-22)).asJava, option)
try {
altered = alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidPartitionsException when newCount is invalid")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidPartitionsException])
assertEquals(desc, "Topic currently has 3 partitions, which is higher than the requested -22.",
e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
// try assignments where the number of brokers != replication factor
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(4, asList(asList(1, 2)))).asJava, option)
try {
altered = alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidPartitionsException when #brokers != replication factor")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidReplicaAssignmentException])
assertEquals(desc, "Inconsistent replication factor between partitions, partition 0 has 1 " +
"while partitions [3] have replication factors [2], respectively.",
e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
// try #assignments < with the increase
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(6, asList(asList(1)))).asJava, option)
try {
altered = alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidReplicaAssignmentException when #assignments != newCount - oldCount")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidReplicaAssignmentException])
assertEquals(desc, "Increasing the number of partitions by 3 but 1 assignments provided.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
// try #assignments > with the increase
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(4, asList(asList(1), asList(2)))).asJava, option)
try {
altered = alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidReplicaAssignmentException when #assignments != newCount - oldCount")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidReplicaAssignmentException])
assertEquals(desc, "Increasing the number of partitions by 1 but 2 assignments provided.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
// try with duplicate brokers in assignments
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(4, asList(asList(1, 1)))).asJava, option)
try {
altered = alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidReplicaAssignmentException when assignments has duplicate brokers")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidReplicaAssignmentException])
assertEquals(desc, "Duplicate brokers not allowed in replica assignment: 1, 1 for partition id 3.",
e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
// try assignments with differently sized inner lists
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(5, asList(asList(1), asList(1, 0)))).asJava, option)
try {
altered = alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidReplicaAssignmentException when assignments have differently sized inner lists")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidReplicaAssignmentException])
assertEquals(desc, "Inconsistent replication factor between partitions, partition 0 has 1 " +
"while partitions [4] have replication factors [2], respectively.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
// try assignments with unknown brokers
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(4, asList(asList(12)))).asJava, option)
try {
altered = alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidReplicaAssignmentException when assignments contains an unknown broker")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidReplicaAssignmentException])
assertEquals(desc, "Unknown broker(s) in replica assignment: 12.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
// try with empty assignments
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(4, Collections.emptyList())).asJava, option)
try {
altered = alterResult.values.get(topic1).get
fail(s"$desc: Expect InvalidReplicaAssignmentException when assignments is empty")
} catch {
case e: ExecutionException =>
assertTrue(desc, e.getCause.isInstanceOf[InvalidReplicaAssignmentException])
assertEquals(desc, "Increasing the number of partitions by 1 but 0 assignments provided.", e.getCause.getMessage)
assertEquals(desc, 3, numPartitions(topic1))
}
}
// a mixed success, failure response
alterResult = client.createPartitions(Map(
topic1 -> NewPartitions.increaseTo(4),
topic2 -> NewPartitions.increaseTo(2)).asJava, actuallyDoIt)
// assert that the topic1 now has 4 partitions
altered = alterResult.values.get(topic1).get
assertEquals(4, numPartitions(topic1))
try {
altered = alterResult.values.get(topic2).get
} catch {
case e: ExecutionException =>
case e: ExecutionException =>
assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException])
assertEquals("Topic currently has 3 partitions, which is higher than the requested 2.", e.getCause.getMessage)
// assert that the topic2 still has 3 partitions
assertEquals(3, numPartitions(topic2))
}
// finally, try to add partitions to a topic queued for deletion
val deleteResult = client.deleteTopics(asList(topic1))
deleteResult.values.get(topic1).get
alterResult = client.createPartitions(Map(topic1 ->
NewPartitions.increaseTo(4)).asJava, validateOnly)
try {
altered = alterResult.values.get(topic1).get
fail("Expect InvalidTopicException when the topic is queued for deletion")
} catch {
case e: ExecutionException =>
assertTrue(e.getCause.isInstanceOf[InvalidTopicException])
assertEquals("The topic is queued for deletion.", e.getCause.getMessage)
}
}
@Test
def testSeekAfterDeleteRecords(): Unit = {
createTopic(topic, numPartitions = 2, replicationFactor = serverCount)
client = AdminClient.create(createConfig)
val consumer = consumers.head
subscribeAndWaitForAssignment(topic, consumer)
sendRecords(producers.head, 10, topicPartition)
consumer.seekToBeginning(Collections.singleton(topicPartition))
assertEquals(0L, consumer.position(topicPartition))
val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava)
val lowWatermark = result.lowWatermarks().get(topicPartition).get.lowWatermark
assertEquals(5L, lowWatermark)
consumer.seekToBeginning(Collections.singletonList(topicPartition))
assertEquals(5L, consumer.position(topicPartition))
consumer.seek(topicPartition, 7L)
assertEquals(7L, consumer.position(topicPartition))
client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK)).asJava).all.get
consumer.seekToBeginning(Collections.singletonList(topicPartition))
assertEquals(10L, consumer.position(topicPartition))
}
@Test
def testLogStartOffsetCheckpoint(): Unit = {
createTopic(topic, numPartitions = 2, replicationFactor = serverCount)
client = AdminClient.create(createConfig)
subscribeAndWaitForAssignment(topic, consumers.head)
sendRecords(producers.head, 10, topicPartition)
var result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava)
var lowWatermark: Option[Long] = Some(result.lowWatermarks.get(topicPartition).get.lowWatermark)
assertEquals(Some(5), lowWatermark)
for (i <- 0 until serverCount) {
killBroker(i)
}
restartDeadBrokers()
client.close()
brokerList = TestUtils.bootstrapServers(servers, listenerName)
client = AdminClient.create(createConfig)
TestUtils.waitUntilTrue(() => {
// Need to retry if leader is not available for the partition
result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(0L)).asJava)
lowWatermark = None
val future = result.lowWatermarks().get(topicPartition)
try {
lowWatermark = Some(future.get.lowWatermark)
lowWatermark.contains(5L)
} catch {
case e: ExecutionException if e.getCause.isInstanceOf[LeaderNotAvailableException] ||
e.getCause.isInstanceOf[NotLeaderForPartitionException] => false
}
}, s"Expected low watermark of the partition to be 5 but got ${lowWatermark.getOrElse("no response within the timeout")}")
}
@Test
def testLogStartOffsetAfterDeleteRecords(): Unit = {
createTopic(topic, numPartitions = 2, replicationFactor = serverCount)
client = AdminClient.create(createConfig)
subscribeAndWaitForAssignment(topic, consumers.head)
sendRecords(producers.head, 10, topicPartition)
val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava)
val lowWatermark = result.lowWatermarks.get(topicPartition).get.lowWatermark
assertEquals(3L, lowWatermark)
for (i <- 0 until serverCount)
assertEquals(3, servers(i).replicaManager.getReplica(topicPartition).get.logStartOffset)
}
@Test
def testReplicaCanFetchFromLogStartOffsetAfterDeleteRecords(): Unit = {
val leaders = createTopic(topic, numPartitions = 1, replicationFactor = serverCount)
val followerIndex = if (leaders(0) != servers(0).config.brokerId) 0 else 1
def waitForFollowerLog(expectedStartOffset: Long, expectedEndOffset: Long): Unit = {
TestUtils.waitUntilTrue(() => servers(followerIndex).replicaManager.getReplica(topicPartition) != None,
"Expected follower to create replica for partition")
// wait until the follower discovers that log start offset moved beyond its HW
TestUtils.waitUntilTrue(() => {
servers(followerIndex).replicaManager.getReplica(topicPartition).get.logStartOffset == expectedStartOffset
}, s"Expected follower to discover new log start offset $expectedStartOffset")
TestUtils.waitUntilTrue(() => {
servers(followerIndex).replicaManager.getReplica(topicPartition).get.logEndOffset.messageOffset == expectedEndOffset
}, s"Expected follower to catch up to log end offset $expectedEndOffset")
}
// we will produce to topic and delete records while one follower is down
killBroker(followerIndex)
client = AdminClient.create(createConfig)
sendRecords(producers.head, 100, topicPartition)
val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava)
result.all().get()
// start the stopped broker to verify that it will be able to fetch from new log start offset
restartDeadBrokers()
waitForFollowerLog(expectedStartOffset=3L, expectedEndOffset=100L)
// after the new replica caught up, all replicas should have same log start offset
for (i <- 0 until serverCount)
assertEquals(3, servers(i).replicaManager.getReplica(topicPartition).get.logStartOffset)
// kill the same follower again, produce more records, and delete records beyond follower's LOE
killBroker(followerIndex)
sendRecords(producers.head, 100, topicPartition)
val result1 = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(117L)).asJava)
result1.all().get()
restartDeadBrokers()
waitForFollowerLog(expectedStartOffset=117L, expectedEndOffset=200L)
}
@Test
def testAlterLogDirsAfterDeleteRecords(): Unit = {
client = AdminClient.create(createConfig)
createTopic(topic, numPartitions = 1, replicationFactor = serverCount)
val expectedLEO = 100
sendRecords(producers.head, expectedLEO, topicPartition)
// delete records to move log start offset
val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava)
result.all().get()
// make sure we are in the expected state after delete records
for (i <- 0 until serverCount) {
assertEquals(3, servers(i).replicaManager.getReplica(topicPartition).get.logStartOffset)
assertEquals(expectedLEO, servers(i).replicaManager.getReplica(topicPartition).get.logEndOffset.messageOffset)
}
// we will create another dir just for one server
val futureLogDir = servers(0).config.logDirs(1)
val futureReplica = new TopicPartitionReplica(topic, 0, servers(0).config.brokerId)
// Verify that replica can be moved to the specified log directory
client.alterReplicaLogDirs(Map(futureReplica -> futureLogDir).asJava).all.get
TestUtils.waitUntilTrue(() => {
futureLogDir == servers(0).logManager.getLog(topicPartition).get.dir.getParent
}, "timed out waiting for replica movement")
// once replica moved, its LSO and LEO should match other replicas
assertEquals(3, servers(0).replicaManager.getReplica(topicPartition).get.logStartOffset)
assertEquals(expectedLEO, servers(0).replicaManager.getReplica(topicPartition).get.logEndOffset.messageOffset)
}
@Test
def testOffsetsForTimesAfterDeleteRecords(): Unit = {
createTopic(topic, numPartitions = 2, replicationFactor = serverCount)
client = AdminClient.create(createConfig)
val consumer = consumers.head
subscribeAndWaitForAssignment(topic, consumer)
sendRecords(producers.head, 10, topicPartition)
assertEquals(0L, consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava).get(topicPartition).offset())
var result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava)
result.all.get
assertEquals(5L, consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava).get(topicPartition).offset())
result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK)).asJava)
result.all.get
assertNull(consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava).get(topicPartition))
}
@Test
def testConsumeAfterDeleteRecords(): Unit = {
val consumer = consumers.head
subscribeAndWaitForAssignment(topic, consumer)
client = AdminClient.create(createConfig)
sendRecords(producers.head, 10, topicPartition)
var messageCount = 0
TestUtils.waitUntilTrue(() => {
messageCount += consumer.poll(0).count
messageCount == 10
}, "Expected 10 messages", 3000L)
client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava).all.get
consumer.seek(topicPartition, 1)
messageCount = 0
TestUtils.waitUntilTrue(() => {
messageCount += consumer.poll(0).count
messageCount == 7
}, "Expected 7 messages", 3000L)
client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(8L)).asJava).all.get
consumer.seek(topicPartition, 1)
messageCount = 0
TestUtils.waitUntilTrue(() => {
messageCount += consumer.poll(0).count
messageCount == 2
}, "Expected 2 messages", 3000L)
}
@Test
def testDeleteRecordsWithException(): Unit = {
subscribeAndWaitForAssignment(topic, consumers.head)
client = AdminClient.create(createConfig)
sendRecords(producers.head, 10, topicPartition)
assertEquals(5L, client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava)
.lowWatermarks.get(topicPartition).get.lowWatermark)
// OffsetOutOfRangeException if offset > high_watermark
var cause = intercept[ExecutionException] {
client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(20L)).asJava).lowWatermarks.get(topicPartition).get
}.getCause
assertEquals(classOf[OffsetOutOfRangeException], cause.getClass)
val nonExistPartition = new TopicPartition(topic, 3)
// LeaderNotAvailableException if non existent partition
cause = intercept[ExecutionException] {
client.deleteRecords(Map(nonExistPartition -> RecordsToDelete.beforeOffset(20L)).asJava).lowWatermarks.get(nonExistPartition).get
}.getCause
assertEquals(classOf[LeaderNotAvailableException], cause.getClass)
}
@Test
def testDescribeConfigsForTopic(): Unit = {
createTopic(topic, numPartitions = 2, replicationFactor = serverCount)
client = AdminClient.create(createConfig)
val existingTopic = new ConfigResource(ConfigResource.Type.TOPIC, topic)
client.describeConfigs(Collections.singletonList(existingTopic)).values.get(existingTopic).get()
val nonExistentTopic = new ConfigResource(ConfigResource.Type.TOPIC, "unknown")
val describeResult1 = client.describeConfigs(Collections.singletonList(nonExistentTopic))
assertTrue(intercept[ExecutionException](describeResult1.values.get(nonExistentTopic).get).getCause.isInstanceOf[UnknownTopicOrPartitionException])
val invalidTopic = new ConfigResource(ConfigResource.Type.TOPIC, "(invalid topic)")
val describeResult2 = client.describeConfigs(Collections.singletonList(invalidTopic))
assertTrue(intercept[ExecutionException](describeResult2.values.get(invalidTopic).get).getCause.isInstanceOf[InvalidTopicException])
}
private def subscribeAndWaitForAssignment(topic: String, consumer: KafkaConsumer[Array[Byte], Array[Byte]]): Unit = {
consumer.subscribe(Collections.singletonList(topic))
TestUtils.waitUntilTrue(() => {
consumer.poll(0)
!consumer.assignment.isEmpty
}, "Expected non-empty assignment")
}
private def sendRecords(producer: KafkaProducer[Array[Byte], Array[Byte]],
numRecords: Int,
topicPartition: TopicPartition): Unit = {
val futures = (0 until numRecords).map( i => {
val record = new ProducerRecord(topicPartition.topic, topicPartition.partition, s"$i".getBytes, s"$i".getBytes)
debug(s"Sending this record: $record")
producer.send(record)
})
futures.foreach(_.get)
}
@Test
def testInvalidAlterConfigs(): Unit = {
client = AdminClient.create(createConfig)
checkInvalidAlterConfigs(zkClient, servers, client)
}
val ACL1 = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL),
new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW))
/**
* Test that ACL operations are not possible when the authorizer is disabled.
* Also see {@link kafka.api.SaslSslAdminClientIntegrationTest} for tests of ACL operations
* when the authorizer is enabled.
*/
@Test
def testAclOperations(): Unit = {
client = AdminClient.create(createConfig())
assertFutureExceptionTypeEquals(client.describeAcls(AclBindingFilter.ANY).values(), classOf[SecurityDisabledException])
assertFutureExceptionTypeEquals(client.createAcls(Collections.singleton(ACL1)).all(),
classOf[SecurityDisabledException])
assertFutureExceptionTypeEquals(client.deleteAcls(Collections.singleton(ACL1.toFilter())).all(),
classOf[SecurityDisabledException])
}
/**
* Test closing the AdminClient with a generous timeout. Calls in progress should be completed,
* since they can be done within the timeout. New calls should receive timeouts.
*/
@Test
def testDelayedClose(): Unit = {
client = AdminClient.create(createConfig())
val topics = Seq("mytopic", "mytopic2")
val newTopics = topics.map(new NewTopic(_, 1, 1))
val future = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)).all()
client.close(2, TimeUnit.HOURS)
val future2 = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)).all()
assertFutureExceptionTypeEquals(future2, classOf[TimeoutException])
future.get
client.close(30, TimeUnit.MINUTES) // multiple close-with-timeout should have no effect
}
/**
* Test closing the AdminClient with a timeout of 0, when there are calls with extremely long
* timeouts in progress. The calls should be aborted after the hard shutdown timeout elapses.
*/
@Test
def testForceClose(): Unit = {
val config = createConfig()
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:22")
client = AdminClient.create(config)
// Because the bootstrap servers are set up incorrectly, this call will not complete, but must be
// cancelled by the close operation.
val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1)).asJava,
new CreateTopicsOptions().timeoutMs(900000)).all()
client.close(0, TimeUnit.MILLISECONDS)
assertFutureExceptionTypeEquals(future, classOf[TimeoutException])
}
/**
* Check that a call with a timeout does not complete before the minimum timeout has elapsed,
* even when the default request timeout is shorter.
*/
@Test
def testMinimumRequestTimeouts(): Unit = {
val config = createConfig()
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:22")
config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "0")
client = AdminClient.create(config)
val startTimeMs = Time.SYSTEM.milliseconds()
val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1)).asJava,
new CreateTopicsOptions().timeoutMs(2)).all()
assertFutureExceptionTypeEquals(future, classOf[TimeoutException])
val endTimeMs = Time.SYSTEM.milliseconds()
assertTrue("Expected the timeout to take at least one millisecond.", endTimeMs > startTimeMs);
}
/**
* Test injecting timeouts for calls that are in flight.
*/
@Test
def testCallInFlightTimeouts(): Unit = {
val config = createConfig()
config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "100000000")
val factory = new KafkaAdminClientTest.FailureInjectingTimeoutProcessorFactory()
val client = KafkaAdminClientTest.createInternal(new AdminClientConfig(config), factory)
val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1)).asJava,
new CreateTopicsOptions().validateOnly(true)).all()
assertFutureExceptionTypeEquals(future, classOf[TimeoutException])
val future2 = client.createTopics(Seq("mytopic3", "mytopic4").map(new NewTopic(_, 1, 1)).asJava,
new CreateTopicsOptions().validateOnly(true)).all()
future2.get
assertEquals(1, factory.failuresInjected)
}
/**
* Test the consumer group APIs.
*/
@Test
def testConsumerGroups(): Unit = {
val config = createConfig()
val client = AdminClient.create(config)
try {
// Verify that initially there are no consumer groups to list.
val list1 = client.listConsumerGroups()
assertTrue(0 == list1.all().get().size())
assertTrue(0 == list1.errors().get().size())
assertTrue(0 == list1.valid().get().size())
val testTopicName = "test_topic"
val testNumPartitions = 2
client.createTopics(Collections.singleton(
new NewTopic(testTopicName, testNumPartitions, 1))).all().get()
waitForTopics(client, List(testTopicName), List())
val producer = createNewProducer
try {
producer.send(new ProducerRecord(testTopicName, 0, null, null)).get()
} finally {
Utils.closeQuietly(producer, "producer")
}
val testGroupId = "test_group_id"
val testClientId = "test_client_id"
val fakeGroupId = "fake_group_id"
val newConsumerConfig = new Properties(consumerConfig)
newConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId)
newConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId)
val consumer = TestUtils.createNewConsumer(brokerList,
securityProtocol = this.securityProtocol,
trustStoreFile = this.trustStoreFile,
saslProperties = this.clientSaslProperties,
props = Some(newConsumerConfig))
try {
// Start a consumer in a thread that will subscribe to a new group.
val consumerThread = new Thread {
override def run {
consumer.subscribe(Collections.singleton(testTopicName))
while (true) {
consumer.poll(5000)
consumer.commitSync()
}
}
}
try {
consumerThread.start
// Test that we can list the new group.
TestUtils.waitUntilTrue(() => {
val matching = client.listConsumerGroups().all().get().asScala.
filter(listing => listing.groupId().equals(testGroupId))
!matching.isEmpty
}, s"Expected to be able to list $testGroupId")
val result = client.describeConsumerGroups(Seq(testGroupId, fakeGroupId).asJava)
assertEquals(2, result.describedGroups().size())
// Test that we can get information about the test consumer group.
assertTrue(result.describedGroups().containsKey(testGroupId))
val testGroupDescription = result.describedGroups().get(testGroupId).get()
assertEquals(testGroupId, testGroupDescription.groupId())
assertFalse(testGroupDescription.isSimpleConsumerGroup())
assertEquals(1, testGroupDescription.members().size())
val member = testGroupDescription.members().iterator().next()
assertEquals(testClientId, member.clientId())
val topicPartitions = member.assignment().topicPartitions()
assertEquals(testNumPartitions, topicPartitions.size())
assertEquals(testNumPartitions, topicPartitions.asScala.
count(tp => tp.topic().equals(testTopicName)))
// Test that the fake group is listed as dead.
assertTrue(result.describedGroups().containsKey(fakeGroupId))
val fakeGroupDescription = result.describedGroups().get(fakeGroupId).get()
assertEquals(fakeGroupId, fakeGroupDescription.groupId())
assertEquals(0, fakeGroupDescription.members().size())
assertEquals("", fakeGroupDescription.partitionAssignor())
assertEquals(ConsumerGroupState.DEAD, fakeGroupDescription.state())
// Test that all() returns 2 results
assertEquals(2, result.all().get().size())
// Test listConsumerGroupOffsets
TestUtils.waitUntilTrue(() => {
val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get()
val part = new TopicPartition(testTopicName, 0)
parts.containsKey(part) && (parts.get(part).offset() == 1)
}, s"Expected the offset for partition 0 to eventually become 1.")
// Test consumer group deletion
val deleteResult = client.deleteConsumerGroups(Seq(testGroupId, fakeGroupId).asJava)
assertEquals(2, deleteResult.deletedGroups().size())
// Deleting the fake group ID should get GroupIdNotFoundException.
assertTrue(deleteResult.deletedGroups().containsKey(fakeGroupId))
assertFutureExceptionTypeEquals(deleteResult.deletedGroups().get(fakeGroupId),
classOf[GroupIdNotFoundException])
// Deleting the real group ID should get GroupNotEmptyException
assertTrue(deleteResult.deletedGroups().containsKey(testGroupId))
assertFutureExceptionTypeEquals(deleteResult.deletedGroups().get(testGroupId),
classOf[GroupNotEmptyException])
} finally {
consumerThread.interrupt()
consumerThread.join()
}
} finally {
Utils.closeQuietly(consumer, "consumer")
}
} finally {
Utils.closeQuietly(client, "adminClient")
}
}
}
object AdminClientIntegrationTest {
import org.scalatest.Assertions._
def checkValidAlterConfigs(client: AdminClient, topicResource1: ConfigResource, topicResource2: ConfigResource): Unit = {
// Alter topics
var topicConfigEntries1 = Seq(
new ConfigEntry(LogConfig.FlushMsProp, "1000")
).asJava
var topicConfigEntries2 = Seq(
new ConfigEntry(LogConfig.MinCleanableDirtyRatioProp, "0.9"),
new ConfigEntry(LogConfig.CompressionTypeProp, "lz4")
).asJava
var alterResult = client.alterConfigs(Map(
topicResource1 -> new Config(topicConfigEntries1),
topicResource2 -> new Config(topicConfigEntries2)
).asJava)
assertEquals(Set(topicResource1, topicResource2).asJava, alterResult.values.keySet)
alterResult.all.get
// Verify that topics were updated correctly
var describeResult = client.describeConfigs(Seq(topicResource1, topicResource2).asJava)
var configs = describeResult.all.get
assertEquals(2, configs.size)
assertEquals("1000", configs.get(topicResource1).get(LogConfig.FlushMsProp).value)
assertEquals(Defaults.MessageMaxBytes.toString,
configs.get(topicResource1).get(LogConfig.MaxMessageBytesProp).value)
assertEquals((Defaults.LogRetentionHours * 60 * 60 * 1000).toString,
configs.get(topicResource1).get(LogConfig.RetentionMsProp).value)
assertEquals("0.9", configs.get(topicResource2).get(LogConfig.MinCleanableDirtyRatioProp).value)
assertEquals("lz4", configs.get(topicResource2).get(LogConfig.CompressionTypeProp).value)
// Alter topics with validateOnly=true
topicConfigEntries1 = Seq(
new ConfigEntry(LogConfig.MaxMessageBytesProp, "10")
).asJava
topicConfigEntries2 = Seq(
new ConfigEntry(LogConfig.MinCleanableDirtyRatioProp, "0.3")
).asJava
alterResult = client.alterConfigs(Map(
topicResource1 -> new Config(topicConfigEntries1),
topicResource2 -> new Config(topicConfigEntries2)
).asJava, new AlterConfigsOptions().validateOnly(true))
assertEquals(Set(topicResource1, topicResource2).asJava, alterResult.values.keySet)
alterResult.all.get
// Verify that topics were not updated due to validateOnly = true
describeResult = client.describeConfigs(Seq(topicResource1, topicResource2).asJava)
configs = describeResult.all.get
assertEquals(2, configs.size)
assertEquals(Defaults.MessageMaxBytes.toString,
configs.get(topicResource1).get(LogConfig.MaxMessageBytesProp).value)
assertEquals("0.9", configs.get(topicResource2).get(LogConfig.MinCleanableDirtyRatioProp).value)
}
def checkInvalidAlterConfigs(zkClient: KafkaZkClient, servers: Seq[KafkaServer], client: AdminClient): Unit = {
// Create topics
val topic1 = "invalid-alter-configs-topic-1"
val topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1)
TestUtils.createTopic(zkClient, topic1, 1, 1, servers)
val topic2 = "invalid-alter-configs-topic-2"
val topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2)
TestUtils.createTopic(zkClient, topic2, 1, 1, servers)
val topicConfigEntries1 = Seq(
new ConfigEntry(LogConfig.MinCleanableDirtyRatioProp, "1.1"), // this value is invalid as it's above 1.0
new ConfigEntry(LogConfig.CompressionTypeProp, "lz4")
).asJava
var topicConfigEntries2 = Seq(new ConfigEntry(LogConfig.CompressionTypeProp, "snappy")).asJava
val brokerResource = new ConfigResource(ConfigResource.Type.BROKER, servers.head.config.brokerId.toString)
val brokerConfigEntries = Seq(new ConfigEntry(KafkaConfig.ZkConnectProp, "localhost:2181")).asJava
// Alter configs: first and third are invalid, second is valid
var alterResult = client.alterConfigs(Map(
topicResource1 -> new Config(topicConfigEntries1),
topicResource2 -> new Config(topicConfigEntries2),
brokerResource -> new Config(brokerConfigEntries)
).asJava)
assertEquals(Set(topicResource1, topicResource2, brokerResource).asJava, alterResult.values.keySet)
assertTrue(intercept[ExecutionException](alterResult.values.get(topicResource1).get).getCause.isInstanceOf[InvalidRequestException])
alterResult.values.get(topicResource2).get
assertTrue(intercept[ExecutionException](alterResult.values.get(brokerResource).get).getCause.isInstanceOf[InvalidRequestException])
// Verify that first and third resources were not updated and second was updated
var describeResult = client.describeConfigs(Seq(topicResource1, topicResource2, brokerResource).asJava)
var configs = describeResult.all.get
assertEquals(3, configs.size)
assertEquals(Defaults.LogCleanerMinCleanRatio.toString,
configs.get(topicResource1).get(LogConfig.MinCleanableDirtyRatioProp).value)
assertEquals(Defaults.CompressionType.toString,
configs.get(topicResource1).get(LogConfig.CompressionTypeProp).value)
assertEquals("snappy", configs.get(topicResource2).get(LogConfig.CompressionTypeProp).value)
assertEquals(Defaults.CompressionType.toString, configs.get(brokerResource).get(KafkaConfig.CompressionTypeProp).value)
// Alter configs with validateOnly = true: first and third are invalid, second is valid
topicConfigEntries2 = Seq(new ConfigEntry(LogConfig.CompressionTypeProp, "gzip")).asJava
alterResult = client.alterConfigs(Map(
topicResource1 -> new Config(topicConfigEntries1),
topicResource2 -> new Config(topicConfigEntries2),
brokerResource -> new Config(brokerConfigEntries)
).asJava, new AlterConfigsOptions().validateOnly(true))
assertEquals(Set(topicResource1, topicResource2, brokerResource).asJava, alterResult.values.keySet)
assertTrue(intercept[ExecutionException](alterResult.values.get(topicResource1).get).getCause.isInstanceOf[InvalidRequestException])
alterResult.values.get(topicResource2).get
assertTrue(intercept[ExecutionException](alterResult.values.get(brokerResource).get).getCause.isInstanceOf[InvalidRequestException])
// Verify that no resources are updated since validate_only = true
describeResult = client.describeConfigs(Seq(topicResource1, topicResource2, brokerResource).asJava)
configs = describeResult.all.get
assertEquals(3, configs.size)
assertEquals(Defaults.LogCleanerMinCleanRatio.toString,
configs.get(topicResource1).get(LogConfig.MinCleanableDirtyRatioProp).value)
assertEquals(Defaults.CompressionType.toString,
configs.get(topicResource1).get(LogConfig.CompressionTypeProp).value)
assertEquals("snappy", configs.get(topicResource2).get(LogConfig.CompressionTypeProp).value)
assertEquals(Defaults.CompressionType.toString, configs.get(brokerResource).get(KafkaConfig.CompressionTypeProp).value)
}
}
| Ishiihara/kafka | core/src/test/scala/integration/kafka/api/AdminClientIntegrationTest.scala | Scala | apache-2.0 | 62,709 |
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.DefinitionAttributes.{DefinitionAttributePreference, DefinitionAttributeRefresh}
import org.elasticsearch.action.get.MultiGetRequest.Item
import org.elasticsearch.action.get.{MultiGetRequest, MultiGetRequestBuilder, MultiGetResponse}
import org.elasticsearch.client.Client
import scala.concurrent.Future
/** @author Stephen Samuel */
trait MultiGetDsl extends GetDsl {
implicit object MultiGetDefinitionExecutable
extends Executable[MultiGetDefinition, MultiGetResponse, MultiGetResponse] {
override def apply(c: Client, t: MultiGetDefinition): Future[MultiGetResponse] = {
injectFuture(c.multiGet(t.build, _))
}
}
}
class MultiGetDefinition(gets: Iterable[GetDefinition])
extends DefinitionAttributePreference
with DefinitionAttributeRefresh {
val _builder = new MultiGetRequestBuilder(ProxyClients.client)
gets foreach { get =>
val item = new Item(get.indexesTypes.index, get.indexesTypes.typ.orNull, get.id)
item.routing(get.build.routing())
item.fields(get.build.fields():_*)
item.version(get.build.version())
_builder.add(item)
}
def build: MultiGetRequest = _builder.request()
def realtime(realtime: Boolean): this.type = {
_builder.setRealtime(realtime)
this
}
}
| ExNexu/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/MultiGetDsl.scala | Scala | apache-2.0 | 1,306 |
/*
* Copyright 2016 Groupon, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupon.sparklint.events
import java.io.{File, FileNotFoundException}
import scala.collection.mutable
import scala.util.{Failure, Success, Try}
/**
* @throws FileNotFoundException if the folder provided doesn't exist or is a file
* @param folder the folder of the log files
* @author rxue
* @since 1.0.5
*/
@throws[FileNotFoundException]
class FolderEventSourceGroupManager(folder: File) extends GenericEventSourceGroupManager(folder.getName, true) {
if (!folder.exists() || folder.isFile) {
throw new FileNotFoundException(folder.getAbsolutePath)
}
private val ignoredFiles: mutable.Set[String] = mutable.Set.empty
def pull(): Unit = {
for (file <- folder.listFiles().filter(_.isFile).filter(f => !ignoredFiles.contains(f.getAbsolutePath))) {
tryPullEventSource(file) match {
case Success(_) | Failure(_: UnrecognizedLogFileException) =>
ignoredFiles.add(file.getAbsolutePath)
case _ =>
// allow retry for failures other than UnrecognizedLogFileException
}
}
}
private def tryPullEventSource(file: File): Try[EventSource] = Try({
val es = EventSource.fromFile(file)
registerEventSource(es)
es
})
}
| groupon/sparklint | src/main/scala/com/groupon/sparklint/events/FolderEventSourceGroupManager.scala | Scala | apache-2.0 | 1,809 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package connectors
import config.ApplicationConfig
import exceptions.{DuplicateEnrolmentException, InvalidEnrolmentCredentialsException}
import generators.auth.UserDetailsGenerator
import generators.{AmlsReferenceNumberGenerator, BaseGenerator}
import models.enrolment.{AmlsEnrolmentKey, ErrorResponse, TaxEnrolment}
import org.mockito.Matchers.{any, eq => eqTo}
import org.mockito.Mockito.{verify, when}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import play.api.libs.json.Json
import play.api.test.Helpers._
import uk.gov.hmrc.http.{HeaderCarrier, HttpClient, HttpResponse, UpstreamErrorResponse}
import uk.gov.hmrc.play.audit.http.connector.AuditConnector
import utils.AmlsSpec
import scala.concurrent.Future
class TaxEnrolmentsConnectorSpec extends AmlsSpec
with ScalaFutures
with AmlsReferenceNumberGenerator
with UserDetailsGenerator
with BaseGenerator {
implicit override val patienceConfig = PatienceConfig(timeout = Span(2, Seconds), interval = Span(20, Millis))
trait Fixture {
val http = mock[HttpClient]
val appConfig = mock[ApplicationConfig]
val auditConnector = mock[AuditConnector]
val groupIdentfier = stringOfLengthGen(10).sample.get
implicit val headerCarrier: HeaderCarrier = HeaderCarrier()
val connector = new TaxEnrolmentsConnector(http, appConfig, auditConnector)
val baseUrl = "http://localhost:3001"
val serviceStub = "tax-enrolments"
val enrolKey = AmlsEnrolmentKey(amlsRegistrationNumber)
when {
appConfig.enrolmentStoreUrl
} thenReturn baseUrl
when {
appConfig.enrolmentStubsUrl
} thenReturn serviceStub
val enrolment = TaxEnrolment("123456789", postcodeGen.sample.get)
def jsonError(code: String, message: String): String = Json.toJson(ErrorResponse(code, message)).toString
}
"configuration" when {
"stubbed" must {
"return stubs base url" in new Fixture {
when {
appConfig.enrolmentStubsEnabled
} thenReturn true
connector.baseUrl mustBe s"${appConfig.enrolmentStubsUrl}/tax-enrolments"
}
}
"not stubbed" must {
"return tax enrolments base url" in new Fixture {
connector.baseUrl mustBe s"${appConfig.enrolmentStoreUrl}/tax-enrolments"
}
}
}
"enrol" when {
"called" must {
"call the ES8 enrolment store endpoint to enrol the user" in new Fixture {
val endpointUrl = s"$baseUrl/${serviceStub}/groups/$groupIdentfier/enrolments/${enrolKey.key}"
when {
http.POST[TaxEnrolment, HttpResponse](any(), any(), any())(any(), any(), any(), any())
} thenReturn Future.successful(HttpResponse(OK, ""))
whenReady(connector.enrol(enrolKey, enrolment, Some(groupIdentfier))) { _ =>
verify(http).POST[TaxEnrolment, HttpResponse](eqTo(endpointUrl), eqTo(enrolment), any())(any(), any(), any(), any())
verify(auditConnector).sendEvent(any())(any(), any())
}
}
"throw an exception when no group identifier is available" in new Fixture {
intercept[Exception] {
await(connector.enrol(enrolKey, enrolment, None))
}
}
"throws a DuplicateEnrolmentException when the enrolment has already been created" in new Fixture {
when {
http.POST[TaxEnrolment, HttpResponse](any(), any(), any())(any(), any(), any(), any())
} thenReturn Future.failed(UpstreamErrorResponse(jsonError("ERROR_INVALID_IDENTIFIERS", "The enrolment identifiers provided were invalid"), BAD_REQUEST, BAD_REQUEST))
intercept[DuplicateEnrolmentException] {
await(connector.enrol(enrolKey, enrolment, Some(groupIdentfier)))
}
}
"throws a InvalidEnrolmentCredentialsException when the enrolment has the wrong type of role" in new Fixture {
when {
http.POST[TaxEnrolment, HttpResponse](any(), any(), any())(any(), any(), any(), any())
} thenReturn Future.failed(UpstreamErrorResponse(jsonError("INVALID_CREDENTIAL_ID", "Invalid credential ID"), FORBIDDEN, FORBIDDEN))
intercept[InvalidEnrolmentCredentialsException] {
await(connector.enrol(enrolKey, enrolment, Some(groupIdentfier)))
}
}
}
}
"deEnrol" when {
"called" must {
"call the ES9 API endpoint" in new Fixture {
val endpointUrl = s"$baseUrl/${serviceStub}/groups/$groupIdentfier/enrolments/${enrolKey.key}"
when {
http.DELETE[HttpResponse](any(), any())(any(), any(), any())
} thenReturn Future.successful(HttpResponse(NO_CONTENT, ""))
whenReady(connector.deEnrol(amlsRegistrationNumber, Some(groupIdentfier))) { _ =>
verify(http).DELETE[HttpResponse](eqTo(endpointUrl), any())(any(), any(), any())
verify(auditConnector).sendEvent(any())(any(), any())
}
}
"throw an exception when there is no group identifier" in new Fixture {
intercept[Exception] {
await(connector.deEnrol(amlsRegistrationNumber, None))
} match {
case ex => ex.getMessage mustBe "Group identifier is unavailable"
}
}
}
}
"removeKnownFacts" when {
"called" must {
"call the ES7 API endpoint" in new Fixture {
val endpointUrl = s"$baseUrl/${serviceStub}/enrolments/${enrolKey.key}"
when {
http.DELETE[HttpResponse](any(), any())(any(), any(), any())
} thenReturn Future.successful(HttpResponse(NO_CONTENT, ""))
whenReady(connector.removeKnownFacts(amlsRegistrationNumber)) { _ =>
verify(http).DELETE[HttpResponse](eqTo(endpointUrl), any())(any(), any(), any())
verify(auditConnector).sendEvent(any())(any(), any())
}
}
}
}
}
| hmrc/amls-frontend | test/connectors/TaxEnrolmentsConnectorSpec.scala | Scala | apache-2.0 | 6,385 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.raster.data
import java.io.{Closeable, Serializable}
import java.util.Map.Entry
import java.util.concurrent.{Callable, TimeUnit}
import java.util.{Map => JMap}
import com.google.common.cache.CacheBuilder
import com.google.common.collect.{ImmutableMap, ImmutableSetMultimap}
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.client.{BatchWriterConfig, Connector, TableExistsException}
import org.apache.accumulo.core.data.{Key, Mutation, Range, Value}
import org.apache.accumulo.core.security.TablePermission
import org.geotools.coverage.grid.GridEnvelope2D
import org.joda.time.DateTime
import org.locationtech.geomesa.accumulo.data.stats.usage._
import org.locationtech.geomesa.accumulo.index.Strategy._
import org.locationtech.geomesa.accumulo.iterators.BBOXCombiner._
import org.locationtech.geomesa.accumulo.util.SelfClosingIterator
import org.locationtech.geomesa.raster._
import org.locationtech.geomesa.raster.index.RasterIndexSchema
import org.locationtech.geomesa.raster.util.RasterUtils
import org.locationtech.geomesa.security.AuthorizationsProvider
import org.locationtech.geomesa.utils.geohash.BoundingBox
import org.locationtech.geomesa.utils.stats.{MethodProfiling, NoOpTimings, Timings, TimingsImpl}
import scala.collection.JavaConversions._
class AccumuloRasterStore(val connector: Connector,
val tableName: String,
val authorizationsProvider: AuthorizationsProvider,
val writeVisibilities: String,
writeMemoryConfig: Option[String] = None,
writeThreadsConfig: Option[Int] = None,
queryThreadsConfig: Option[Int] = None,
collectStats: Boolean = false) extends Closeable with MethodProfiling with LazyLogging {
val writeMemory = writeMemoryConfig.getOrElse("10000").toLong
val writeThreads = writeThreadsConfig.getOrElse(10)
val bwConfig: BatchWriterConfig =
new BatchWriterConfig().setMaxMemory(writeMemory).setMaxWriteThreads(writeThreads)
val numQThreads = queryThreadsConfig.getOrElse(20)
private val tableOps = connector.tableOperations()
private val securityOps = connector.securityOperations
private val profileTable = s"${tableName}_queries"
private def getBoundsRowID = tableName + "_bounds"
private val usageStats = if (collectStats) new GeoMesaUsageStatsImpl(connector, profileTable, true) else null
def getAuths = authorizationsProvider.getAuthorizations
/**
* Given A Query, return a single buffered image that is a mosaic of the tiles
* This is primarily used to satisfy WCS/WMS queries.
*
* @param query
* @param params
* @return Buffered
*/
def getMosaicedRaster(query: RasterQuery, params: GeoMesaCoverageQueryParams) = {
implicit val timings = if (collectStats) new TimingsImpl else NoOpTimings
val rasters = getRasters(query)
val (image, numRasters) = profile(
RasterUtils.mosaicChunks(rasters, params.width.toInt, params.height.toInt, params.envelope),
"mosaic")
if (collectStats) {
val stat = RasterQueryStat(tableName,
System.currentTimeMillis(),
query.toString,
timings.time("planning"),
timings.time("scanning") - timings.time("planning"),
timings.time("mosaic"),
numRasters)
usageStats.writeUsageStat(stat)
}
image
}
def getRasters(rasterQuery: RasterQuery)(implicit timings: Timings): Iterator[Raster] = {
profile({
val batchScanner = connector.createBatchScanner(tableName, authorizationsProvider.getAuthorizations, numQThreads)
val plan = AccumuloRasterQueryPlanner.getQueryPlan(rasterQuery, getResToGeoHashLenMap, getResToBoundsMap)
plan match {
case Some(qp) =>
configureBatchScanner(batchScanner, qp)
adaptIteratorToChunks(SelfClosingIterator(batchScanner))
case _ => Iterator.empty
}
}, "scanning")
}
def getQueryRecords(numRecords: Int): Iterator[String] = {
val scanner = connector.createScanner(profileTable, authorizationsProvider.getAuthorizations)
scanner.iterator.take(numRecords).map(RasterQueryStatTransform.decodeStat)
}
def getBounds: BoundingBox = {
ensureBoundsTableExists()
val scanner = connector.createScanner(GEOMESA_RASTER_BOUNDS_TABLE, authorizationsProvider.getAuthorizations)
scanner.setRange(new Range(getBoundsRowID))
val resultingBounds = SelfClosingIterator(scanner)
if (resultingBounds.isEmpty) {
BoundingBox(-180, 180, -90, 90)
} else {
//TODO: GEOMESA-646 anti-meridian questions
reduceValuesToBoundingBox(resultingBounds.map(_.getValue))
}
}
def getAvailableBoundingBoxes: Seq[BoundingBox] = getResToBoundsMap.values().toSeq
def getAvailableResolutions: Seq[Double] = getResToGeoHashLenMap.keySet.toSeq.sorted
def getAvailableGeoHashLengths: Set[Int] = getResToGeoHashLenMap.values().toSet
def getResToGeoHashLenMap: ImmutableSetMultimap[Double, Int] =
AccumuloRasterStore.geoHashLenCache.get(tableName, resToGeoHashLenMapCallable)
def resToGeoHashLenMapCallable = new Callable[ImmutableSetMultimap[Double, Int]] {
override def call(): ImmutableSetMultimap[Double, Int] = {
val m = new ImmutableSetMultimap.Builder[Double, Int]()
for {
k <- metaScanner().map(_.getKey)
} {
val resolution = lexiDecodeStringToDouble(k.getColumnQualifier.toString)
val geohashlen = lexiDecodeStringToInt(k.getColumnFamily.toString)
m.put(resolution, geohashlen)
}
m.build()
}
}
def getResToBoundsMap: ImmutableMap[Double, BoundingBox] =
AccumuloRasterStore.extentCache.get(tableName, resToBoundsCallable)
def resToBoundsCallable = new Callable[ImmutableMap[Double, BoundingBox]] {
override def call(): ImmutableMap[Double, BoundingBox] = {
val m = new ImmutableMap.Builder[Double, BoundingBox]()
for {
kv <- metaScanner()
} {
val resolution = lexiDecodeStringToDouble(kv.getKey.getColumnQualifier.toString)
val bounds = valueToBbox(kv.getValue)
m.put(resolution, bounds)
}
m.build()
}
}
def metaScanner = () => {
ensureBoundsTableExists()
val scanner = connector.createScanner(GEOMESA_RASTER_BOUNDS_TABLE, getAuths)
scanner.setRange(new Range(getBoundsRowID))
SelfClosingIterator(scanner)
}
def getGridRange: GridEnvelope2D = {
val bounds = getBounds
val resolutions = getAvailableResolutions
// If no resolutions are available, then we have an empty table so assume default value for now
// TODO: determine what to do about the resolution, arguably should be resolutions.max: https://geomesa.atlassian.net/browse/GEOMESA-868
val resolution = if (resolutions.isEmpty) defaultResolution else resolutions.min
val width = Math.abs(bounds.getWidth / resolution).toInt
val height = Math.abs(bounds.getHeight / resolution).toInt
new GridEnvelope2D(0, 0, width, height)
}
def adaptIteratorToChunks(iter: java.util.Iterator[Entry[Key, Value]]): Iterator[Raster] = {
iter.map(entry => RasterIndexSchema.decode((entry.getKey, entry.getValue)))
}
private def dateToAccTimestamp(dt: DateTime): Long = dt.getMillis / 1000
private def createBoundsMutation(raster: Raster): Mutation = {
// write the bounds mutation
val mutation = new Mutation(getBoundsRowID)
val value = bboxToValue(BoundingBox(raster.metadata.geom.getEnvelopeInternal))
val resolution = lexiEncodeDoubleToString(raster.resolution)
val geohashlen = lexiEncodeIntToString(raster.minimumBoundingGeoHash.map( _.hash.length ).getOrElse(0))
mutation.put(geohashlen, resolution, value)
mutation
}
private def createMutation(raster: Raster): Mutation = {
val (key, value) = RasterIndexSchema.encode(raster, writeVisibilities)
val mutation = new Mutation(key.getRow)
val colFam = key.getColumnFamily
val colQual = key.getColumnQualifier
val colVis = key.getColumnVisibilityParsed
val timestamp: Long = dateToAccTimestamp(raster.time)
mutation.put(colFam, colQual, colVis, timestamp, value)
mutation
}
def putRasters(rasters: Seq[Raster]) = rasters.foreach(putRaster)
def putRaster(raster: Raster) {
writeMutations(tableName, createMutation(raster))
writeMutations(GEOMESA_RASTER_BOUNDS_TABLE, createBoundsMutation(raster))
}
private def writeMutations(tableName: String, mutations: Mutation*) {
val writer = connector.createBatchWriter(tableName, bwConfig)
mutations.foreach { m => writer.addMutation(m) }
writer.flush()
writer.close()
}
def createTableStructure() = {
ensureTableExists(tableName)
ensureBoundsTableExists()
}
def ensureBoundsTableExists() = {
createTable(GEOMESA_RASTER_BOUNDS_TABLE)
if (!tableOps.listIterators(GEOMESA_RASTER_BOUNDS_TABLE).containsKey("GEOMESA_BBOX_COMBINER")) {
val bboxcombinercfg = AccumuloRasterBoundsPlanner.getBoundsScannerCfg(tableName)
tableOps.attachIterator(GEOMESA_RASTER_BOUNDS_TABLE, bboxcombinercfg)
}
}
private def ensureTableExists(tableName: String) {
// TODO: WCS: ensure that this does not duplicate what is done in AccumuloDataStore
// Perhaps consolidate with different default configurations
// GEOMESA-564
val user = connector.whoami
val defaultVisibilities = authorizationsProvider.getAuthorizations.toString.replaceAll(",", "&")
if (!tableOps.exists(tableName)) {
createTables(user, defaultVisibilities, Array(tableName, profileTable):_*)
}
}
private def createTables(user: String, defaultVisibilities: String, tableNames: String*) = {
tableNames.foreach(tableName => {
createTable(tableName)
AccumuloRasterTableConfig.settings(defaultVisibilities).foreach { case (key, value) =>
tableOps.setProperty(tableName, key, value)
}
AccumuloRasterTableConfig.permissions.split(",").foreach { p =>
securityOps.grantTablePermission(user, tableName, TablePermission.valueOf(p))
}
})
}
private def createTable(tableName: String) = {
if(!tableOps.exists(tableName)) {
try {
tableOps.create(tableName)
} catch {
case e: TableExistsException => // this can happen with multiple threads but shouldn't cause any issues
}
}
}
def deleteRasterTable(): Unit = {
deleteMetaData()
deleteTable(profileTable)
deleteTable(tableName)
}
private def deleteTable(table: String): Unit = {
try {
if (tableOps.exists(table)) {
tableOps.delete(table)
}
} catch {
case e: Exception => logger.warn(s"Error occurred when attempting to delete table: $table", e)
}
}
private def deleteMetaData(): Unit = {
try {
if (tableOps.exists(GEOMESA_RASTER_BOUNDS_TABLE)) {
val deleter = connector.createBatchDeleter(GEOMESA_RASTER_BOUNDS_TABLE, getAuths, 3, bwConfig)
val deleteRange = new Range(getBoundsRowID)
deleter.setRanges(Seq(deleteRange))
deleter.delete()
deleter.close()
AccumuloRasterStore.geoHashLenCache.invalidate(tableName)
}
} catch {
case e: Exception => logger.warn(s"Error occurred when attempting to delete Metadata for table: $tableName")
}
}
override def close(): Unit = if (usageStats != null) { usageStats.close() }
}
object AccumuloRasterStore {
import org.locationtech.geomesa.accumulo.data.AccumuloDataStoreFactory._
import org.locationtech.geomesa.accumulo.data.AccumuloDataStoreParams._
def apply(username: String,
password: String,
instanceId: String,
zookeepers: String,
tableName: String,
auths: String,
writeVisibilities: String,
useMock: Boolean = false,
writeMemoryConfig: Option[String] = None,
writeThreadsConfig: Option[Int] = None,
queryThreadsConfig: Option[Int] = None,
collectStats: Boolean = false): AccumuloRasterStore = {
val conn = AccumuloStoreHelper.buildAccumuloConnector(username, password, instanceId, zookeepers, useMock)
val authorizationsProvider = AccumuloStoreHelper.getAuthorizationsProvider(auths.split(","), conn)
val rasterStore = new AccumuloRasterStore(conn, tableName, authorizationsProvider, writeVisibilities,
writeMemoryConfig, writeThreadsConfig, queryThreadsConfig, collectStats)
// this will actually create the Accumulo Table
rasterStore.createTableStructure()
rasterStore
}
def apply(config: JMap[String, Serializable]): AccumuloRasterStore = {
val username: String = userParam.lookUp(config).asInstanceOf[String]
val password: String = passwordParam.lookUp(config).asInstanceOf[String]
val instance: String = instanceIdParam.lookUp(config).asInstanceOf[String]
val zookeepers: String = zookeepersParam.lookUp(config).asInstanceOf[String]
val auths: String = authsParam.lookupOpt[String](config).getOrElse("")
val vis: String = visibilityParam.lookupOpt[String](config).getOrElse("")
val tablename: String = tableNameParam.lookUp(config).asInstanceOf[String]
val useMock: Boolean = mockParam.lookUp(config).asInstanceOf[String].toBoolean
val wMem: Option[String] = RasterParams.writeMemoryParam.lookupOpt[String](config)
val wThread: Option[Int] = writeThreadsParam.lookupOpt[Int](config)
val qThread: Option[Int] = queryThreadsParam.lookupOpt[Int](config)
val cStats: Boolean = java.lang.Boolean.valueOf(collectQueryStatsParam.lookupOpt[Boolean](config).getOrElse(false))
AccumuloRasterStore(username, password, instance, zookeepers, tablename,
auths, vis, useMock, wMem, wThread, qThread, cStats)
}
val geoHashLenCache =
CacheBuilder.newBuilder()
.expireAfterAccess(10, TimeUnit.MINUTES)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build[String, ImmutableSetMultimap[Double, Int]]
val extentCache =
CacheBuilder.newBuilder()
.expireAfterAccess(10, TimeUnit.MINUTES)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build[String, ImmutableMap[Double, BoundingBox]]
}
object AccumuloRasterTableConfig {
/**
* documentation for raster table settings:
*
* table.security.scan.visibility.default
* - The default visibility for the table
*
* table.iterator.majc.vers.opt.maxVersions
* - Versioning iterator setting
* - max versions, major compaction
*
* table.iterator.minc.vers.opt.maxVersions
* - Versioning iterator setting
* - max versions, minor compaction
*
* table.iterator.scan.vers.opt.maxVersions
* - Versioning iterator setting
* - max versions, scan time
*
* table.split.threshold
* - The split threshold for the table, when reached
* - Accumulo splits the table into tablets of this size.
*
* @param visibilities
* @return
*/
def settings(visibilities: String): Map[String, String] = Map (
"table.security.scan.visibility.default" -> visibilities,
"table.iterator.majc.vers.opt.maxVersions" -> rasterMajcMaxVers,
"table.iterator.minc.vers.opt.maxVersions" -> rasterMincMaxVers,
"table.iterator.scan.vers.opt.maxVersions" -> rasterScanMaxVers,
"table.split.threshold" -> rasterSplitThresh
)
val permissions = "BULK_IMPORT,READ,WRITE,ALTER_TABLE"
}
| mdzimmerman/geomesa | geomesa-raster/src/main/scala/org/locationtech/geomesa/raster/data/AccumuloRasterStore.scala | Scala | apache-2.0 | 16,090 |
package chandu0101.scalajs.react.components
package semanticui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.VdomNode
import scala.scalajs.js
import scala.scalajs.js.`|`
import scala.scalajs.js.annotation.JSName
/**
* This file is generated - submit issues instead of PR against it
*/
case class SuiGridRow(
textAlign: js.UndefOr[SemanticTEXTALIGNMENTS] = js.undefined,
columns: js.UndefOr[
SemanticGridRowCOLUMNS | SemanticWIDTHSNUMBER | SemanticWIDTHSSTRING | Double] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
reversed: js.UndefOr[GridPropReversed] = js.undefined,
centered: js.UndefOr[Boolean] = js.undefined,
divided: js.UndefOr[Boolean] = js.undefined,
key: js.UndefOr[String] = js.undefined,
className: js.UndefOr[String] = js.undefined,
only: js.UndefOr[GridPropOnly] = js.undefined,
verticalAlign: js.UndefOr[SemanticVERTICALALIGNMENTS] = js.undefined,
color: js.UndefOr[SemanticCOLORS] = js.undefined,
as: js.UndefOr[String | js.Function] = js.undefined,
stretched: js.UndefOr[Boolean] = js.undefined
) {
def apply(children: VdomNode*) = {
val props = JSMacro[SuiGridRow](this)
val component = JsComponent[js.Object, Children.Varargs, Null](Sui.GridRow)
component(props)(children: _*)
}
}
| rleibman/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/semanticui/SuiGridRow.scala | Scala | apache-2.0 | 1,355 |
//package com.burness.demo
//import org.json4s._
//import org.json4s.jackson.JsonMethods._
///**
// * Created by burness on 16/5/9.
// */
//class ReadConfig (private val configFile : String){
//
// def loadconfig(configFile :String): Unit ={
// val data = parse()
//
//
//
// }
//
//}
| spark-mler/algorithmEngine | src/main/scala/com.burness/algorithm/demo/ReadConfig.scala | Scala | apache-2.0 | 292 |
package sample.stream.data_transfer.utils
import akka.util.ByteString
trait CommonMarshallers extends SourceMarshallers {
implicit val stringMarshaller = marshaller[String](x => ByteString(x))
implicit val stringUnmarshaller = unmarshaller[String](x => x.utf8String)
implicit val bytesMarshaller = marshaller[ByteString](identity)
implicit val bytesUnmarshaller = unmarshaller[ByteString](identity)
}
| pallavig/akka-examples | src/main/scala/sample/stream/data_transfer/utils/CommonMarshallers.scala | Scala | cc0-1.0 | 412 |
/*
* Copyright (c) 2015-2022 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.weather
package providers.darksky
import scala.concurrent.duration._
import cats.{Id, Monad}
import cats.data.EitherT
import cats.effect.Sync
import cats.syntax.either._
import com.snowplowanalytics.lrumap.CreateLruMap
import Cache.CacheKey
import errors.{InvalidConfigurationError, WeatherError}
import responses.DarkSkyResponse
sealed trait CreateDarkSky[F[_]] {
/**
* Create a `DarkSkyClient`
* @param apiHost URL to the Dark Sky API endpoints
* @param apiKey API key from Dark Sky
* @return a DarkSkyClient
*/
def create(apiHost: String, apiKey: String, timeout: FiniteDuration): DarkSkyClient[F]
/**
* Create a `DarkSkyCacheClient` capable of caching results
* @param apiHost URL to the Dark Sky API endpoints
* @param apiKey API key from Dark Sky
* @param timeout time after which active request will be considered failed
* @param cacheSize amount of responses stored in the cache
* @param geoPrecision nth part of 1 to which latitude and longitude will be rounded
* stored in cache. e.g. coordinate 45.678 will be rounded to values 46.0, 45.5, 45.7, 45.78 by
* geoPrecision 1,2,10,100 respectively. geoPrecision 1 will give ~60km accuracy in the worst
* case; 2 ~30km etc
* @return either an InvalidConfigurationError or a DarkSkyCacheClient
*/
def create(
apiHost: String,
apiKey: String,
timeout: FiniteDuration,
cacheSize: Int,
geoPrecision: Int
): F[Either[InvalidConfigurationError, DarkSkyCacheClient[F]]]
}
object CreateDarkSky {
def apply[F[_]](implicit ev: CreateDarkSky[F]): CreateDarkSky[F] = ev
implicit def syncCreateDarkSky[F[_]: Sync: Transport](implicit
CLM: CreateLruMap[F, CacheKey, Either[WeatherError, DarkSkyResponse]]
): CreateDarkSky[F] =
new CreateDarkSky[F] {
override def create(
apiHost: String,
apiKey: String,
timeout: FiniteDuration
): DarkSkyClient[F] = new DarkSkyClient[F](apiHost, apiKey, timeout, ssl = true)
override def create(
apiHost: String,
apiKey: String,
timeout: FiniteDuration,
cacheSize: Int,
geoPrecision: Int
): F[Either[InvalidConfigurationError, DarkSkyCacheClient[F]]] =
cacheClient[F](apiHost, apiKey, timeout, cacheSize, geoPrecision, ssl = true)
}
implicit def idCreateDarkSky(implicit T: Transport[Id]): CreateDarkSky[Id] =
new CreateDarkSky[Id] {
override def create(
apiHost: String,
apiKey: String,
timeout: FiniteDuration
): DarkSkyClient[Id] = new DarkSkyClient[Id](apiHost, apiKey, timeout, ssl = true)
override def create(
apiHost: String,
apiKey: String,
timeout: FiniteDuration,
cacheSize: Int,
geoPrecision: Int
): Id[Either[InvalidConfigurationError, DarkSkyCacheClient[Id]]] =
cacheClient[Id](apiHost, apiKey, timeout, cacheSize, geoPrecision, ssl = true)
}
private[darksky] def cacheClient[F[_]: Monad](
apiHost: String,
apiKey: String,
timeout: FiniteDuration,
cacheSize: Int,
geoPrecision: Int,
ssl: Boolean
)(implicit
CLM: CreateLruMap[F, CacheKey, Either[WeatherError, DarkSkyResponse]],
T: Transport[F]
): F[Either[InvalidConfigurationError, DarkSkyCacheClient[F]]] =
(for {
_ <- EitherT.fromEither[F] {
().asRight
.filterOrElse(
_ => geoPrecision > 0,
InvalidConfigurationError("geoPrecision must be greater than 0")
)
.filterOrElse(
_ => cacheSize > 0,
InvalidConfigurationError("cacheSize must be greater than 0")
)
}
cache <- EitherT.right[InvalidConfigurationError](Cache.init(cacheSize, geoPrecision))
client = new DarkSkyCacheClient[F](cache, apiHost, apiKey, timeout, ssl)
} yield client).value
}
| snowplow/scala-weather | src/main/scala/com.snowplowanalytics/weather/providers/darksky/CreateDarkSky.scala | Scala | apache-2.0 | 4,610 |
package filodb.core.metadata
import scala.collection.JavaConverters._
import com.typesafe.config.{Config, ConfigFactory, ConfigRenderOptions}
import net.ceedubs.ficus.Ficus._
import org.scalactic._
import filodb.core._
import filodb.core.binaryrecord2._
import filodb.core.downsample.{ChunkDownsampler, DownsamplePeriodMarker}
import filodb.memory.{BinaryRegion, MemFactory}
import filodb.memory.format.{ZeroCopyUTF8String => ZCUTF8}
/**
* A dataset describes the schema (column name & type) and distribution for a stream/set of data.
* The schema consists of an ordered list of data and partition columns.
* To create, use Dataset.apply/make so there is validation.
*
* A dataset is partitioned, partition columns controls how data is distributed.
* For time-series data, the partition columns would describe each entity or metric or host or app instance.
*
* A typical schema for time series (such as Prometheus):
* partition columns: metricName:string, tags:map
* data columns: timestamp:long, value:double
*
* NOTE: this data structure will be deprecated slowly in favor of PartitionSchema/DataSchema.
* NOTE2: name is used for ingestion stream name, which is separate from the name of the schema.
*
* The Column IDs (used for querying) for data columns are numbered starting with 0, and for partition
* columns are numbered starting with PartColStartIndex. This means position is the same or easily derived
*
* The rowKeyIDs are the dataColumns IDs/positions for the "row key", typically a timestamp column but
* something which makes a value unique within a partition and describes a range of data in a chunk.
*/
final case class Dataset(name: String, schema: Schema) {
val options = schema.partition.options
val dataColumns = schema.data.columns
val partitionColumns = schema.partition.columns
val ref = DatasetRef(name, None)
val rowKeyColumns = schema.data.columns take 1
val ingestionSchema = schema.ingestionSchema
val comparator = schema.comparator
val partKeySchema = schema.partKeySchema
// Used for ChunkSetReader.binarySearchKeyChunks
val rowKeyOrdering = CompositeReaderOrdering(rowKeyColumns.map(_.columnType.keyType))
val timestampColumn = rowKeyColumns.head
val timestampColID = timestampColumn.id
private val partKeyBuilder = new RecordBuilder(MemFactory.onHeapFactory, Dataset.DefaultContainerSize)
/**
* Creates a PartitionKey (BinaryRecord v2) from individual parts. Horribly slow, use for testing only.
*/
def partKey(parts: Any*): Array[Byte] = {
val offset = partKeyBuilder.partKeyFromObjects(schema, parts: _*)
val bytes = partKeySchema.asByteArray(partKeyBuilder.allContainers.head.base, offset)
partKeyBuilder.reset()
bytes
}
}
/**
* Config options for a table define operational details for the column store and memtable.
* Every option must have a default!
*/
case class DatasetOptions(shardKeyColumns: Seq[String],
metricColumn: String,
hasDownsampledData: Boolean = false,
// TODO: deprecate these options once we move all input to Telegraf/Influx
// They are needed only to differentiate raw Prometheus-sourced data
ignoreShardKeyColumnSuffixes: Map[String, Seq[String]] = Map.empty,
ignoreTagsOnPartitionKeyHash: Seq[String] = Nil,
// For each key, copy the tag to the value if the value is absent
copyTags: Seq[(String, String)] = Seq.empty) {
override def toString: String = {
toConfig.root.render(ConfigRenderOptions.concise)
}
def toConfig: Config = {
val map: scala.collection.mutable.Map[String, Any] = scala.collection.mutable.Map(
"shardKeyColumns" -> shardKeyColumns.asJava,
"metricColumn" -> metricColumn,
"hasDownsampledData" -> hasDownsampledData,
"ignoreShardKeyColumnSuffixes" ->
ignoreShardKeyColumnSuffixes.mapValues(_.asJava).asJava,
"ignoreTagsOnPartitionKeyHash" -> ignoreTagsOnPartitionKeyHash.asJava,
"copyTags" -> copyTags.groupBy(_._2).map { case (k, v) => (k, v.map(_._1).asJava)}.asJava)
ConfigFactory.parseMap(map.asJava)
}
val nonMetricShardColumns = shardKeyColumns.filterNot(_ == metricColumn).sorted
val nonMetricShardKeyBytes = nonMetricShardColumns.map(_.getBytes).toArray
val nonMetricShardKeyUTF8 = nonMetricShardColumns.map(ZCUTF8.apply).toArray
val nonMetricShardKeyHash = nonMetricShardKeyBytes.map(BinaryRegion.hash32)
val ignorePartKeyHashTags = ignoreTagsOnPartitionKeyHash.toSet
val metricBytes = metricColumn.getBytes
val metricUTF8 = ZCUTF8(metricBytes)
val metricHash = BinaryRegion.hash32(metricBytes)
}
object DatasetOptions {
val DefaultOptions = DatasetOptions(shardKeyColumns = Nil,
metricColumn = "__name__",
// defaults that work well for Prometheus
ignoreShardKeyColumnSuffixes =
Map("__name__" -> Seq("_bucket", "_count", "_sum")),
ignoreTagsOnPartitionKeyHash = Seq("le"))
val DefaultOptionsConfig = ConfigFactory.parseString(DefaultOptions.toString)
def fromString(s: String): DatasetOptions =
fromConfig(ConfigFactory.parseString(s).withFallback(DefaultOptionsConfig))
def fromConfig(config: Config): DatasetOptions = {
val copyTagsValue = config.as[Map[String, Seq[String]]]("copyTags")
.toSeq
.flatMap { case (key, value) => value.map (_ -> key) }
DatasetOptions(shardKeyColumns = config.as[Seq[String]]("shardKeyColumns"),
metricColumn = config.getString("metricColumn"),
hasDownsampledData = config.as[Option[Boolean]]("hasDownsampledData").getOrElse(false),
ignoreShardKeyColumnSuffixes =
config.as[Map[String, Seq[String]]]("ignoreShardKeyColumnSuffixes"),
ignoreTagsOnPartitionKeyHash = config.as[Seq[String]]("ignoreTagsOnPartitionKeyHash"),
copyTags = copyTagsValue)
}
}
/**
* Contains many helper functions especially pertaining to Dataset creation and validation.
*/
object Dataset {
val rowKeyIDs = Seq(0) // First or timestamp column is always the row keys
val DefaultContainerSize = 10240
/**
* Creates a new Dataset with various options
*
* @param name The name of the dataset
* @param partitionColumns list of partition columns in name:type form
* @param dataColumns list of data columns in name:type form
* @return a Dataset, or throws an exception if a dataset cannot be created
*/
def apply(name: String,
partitionColumns: Seq[String],
dataColumns: Seq[String],
keyColumns: Seq[String]): Dataset =
apply(name, partitionColumns, dataColumns, Nil, None, DatasetOptions.DefaultOptions)
def apply(name: String,
partitionColumns: Seq[String],
dataColumns: Seq[String],
downsamplers: Seq[String],
downsamplerPeriodMarker: Option[String],
options: DatasetOptions): Dataset =
make(name, partitionColumns, dataColumns, downsamplers, downsamplerPeriodMarker, options)
.badMap(BadSchemaError).toTry.get
def apply(name: String,
partitionColumns: Seq[String],
dataColumns: Seq[String],
options: DatasetOptions): Dataset =
apply(name, partitionColumns, dataColumns, Nil, None, options)
def apply(name: String,
partitionColumns: Seq[String],
dataColumns: Seq[String]): Dataset =
apply(name, partitionColumns, dataColumns, DatasetOptions.DefaultOptions)
sealed trait BadSchema
case class BadDownsampler(msg: String) extends BadSchema
case class BadDownsamplerPeriodMarker(msg: String) extends BadSchema
case class BadColumnType(colType: String) extends BadSchema
case class BadColumnName(colName: String, reason: String) extends BadSchema
case class NotNameColonType(nameTypeString: String) extends BadSchema
case class BadColumnParams(msg: String) extends BadSchema
case class ColumnErrors(errs: Seq[BadSchema]) extends BadSchema
case class UnknownRowKeyColumn(keyColumn: String) extends BadSchema
case class IllegalMapColumn(reason: String) extends BadSchema
case class NoTimestampRowKey(colName: String, colType: String) extends BadSchema
case class HashConflict(detail: String) extends BadSchema
case class BadSchemaError(badSchema: BadSchema) extends Exception(badSchema.toString)
import OptionSugar._
import Column.ColumnType._
def validateMapColumn(partColumns: Seq[Column], dataColumns: Seq[Column]): Unit Or BadSchema = {
// There cannot be a map column in the data columns
val dataOr = dataColumns.find(_.columnType == MapColumn)
.toOr("no map columns in dataColumns").swap
.badMap(x => IllegalMapColumn("Cannot have map column in data columns"))
// A map column must be in the last position only in the partition columns
def validatePartMapCol(): Unit Or BadSchema = {
val mapCols = partColumns.filter(_.columnType == MapColumn)
if (mapCols.length > 1) {
Bad(IllegalMapColumn("Cannot have more than 1 map column"))
} else if (mapCols.length == 0) {
Good(())
} else {
val partIndex = partColumns.indexWhere(_.name == mapCols.head.name)
if (partIndex < 0) {
Bad(IllegalMapColumn("Map column not in partition columns"))
} else if (partIndex != partColumns.length - 1) {
Bad(IllegalMapColumn(s"Map column found in partition key pos $partIndex, but needs to be last"))
} else {
Good(())
}
}
}
for { nothing1 <- dataOr
nothing2 <- validatePartMapCol() } yield ()
}
def validateTimeSeries(dataColumns: Seq[Column], rowKeyIDs: Seq[Int]): Unit Or BadSchema =
dataColumns(rowKeyIDs.head).columnType match {
case Column.ColumnType.LongColumn => Good(())
case Column.ColumnType.TimestampColumn => Good(())
case other: Column.ColumnType => Bad(NoTimestampRowKey(dataColumns(rowKeyIDs.head).name, other.toString))
}
def validateDownsamplers(downsamplers: Seq[String],
downsampleSchema: Option[String]): Seq[ChunkDownsampler] Or BadSchema = {
try {
if (downsamplers.nonEmpty && downsampleSchema.isEmpty) Bad(BadDownsampler("downsample-schema not defined!"))
else Good(ChunkDownsampler.downsamplers(downsamplers))
} catch {
case e: IllegalArgumentException => Bad(BadDownsampler(e.getMessage))
}
}
def validatedDownsamplerPeriodMarker(marker: Option[String]): DownsamplePeriodMarker Or BadSchema = {
try {
val v = marker.map(m => DownsamplePeriodMarker.downsamplePeriodMarker(m))
.getOrElse(DownsamplePeriodMarker.timeDownsamplePeriodMarker)
Good(v)
} catch {
case e: IllegalArgumentException =>
e.printStackTrace()
Bad(BadDownsamplerPeriodMarker(e.getMessage))
}
}
// Partition columns have a column ID starting with this number. This implies there cannot be
// any more data columns than this number.
val PartColStartIndex = 0x010000
final def isPartitionID(columnID: Int): Boolean = columnID >= PartColStartIndex
/**
* Creates and validates a new Dataset
* @param name The name of the dataset
* @param partitionColNameTypes list of partition columns in name:type[:params] form
* @param dataColNameTypes list of data columns in name:type[:params] form
* @param downsamplerNames a list of downsamplers to use on this schema
* @param options DatasetOptions
* @param valueColumn the default value column to pick if a column is not supplied
* @param dsSchema Option, name of downsample schema, required if downsamplerNames is not empty
* @return Good(Dataset) or Bad(BadSchema)
*/
def make(name: String,
partitionColNameTypes: Seq[String],
dataColNameTypes: Seq[String],
downsamplerNames: Seq[String] = Seq.empty,
downsamplePeriodMarker: Option[String] = None,
options: DatasetOptions = DatasetOptions.DefaultOptions,
valueColumn: Option[String] = None,
dsSchema: Option[String] = None): Dataset Or BadSchema = {
// Default value column is the last data column name
val valueCol = valueColumn.getOrElse(dataColNameTypes.last.split(":").head)
for { partSchema <- PartitionSchema.make(partitionColNameTypes, options)
dataSchema <- DataSchema.make(name, dataColNameTypes, downsamplerNames,
downsamplePeriodMarker, valueCol, dsSchema) }
yield { Dataset(name, Schema(partSchema, dataSchema, None)) }
}
}
| tuplejump/FiloDB | core/src/main/scala/filodb.core/metadata/Dataset.scala | Scala | apache-2.0 | 13,015 |
package concrete
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import concrete.constraint.semantic.ClauseConstraint
/**
* @author vion
*/
class ProblemStateTest extends FlatSpec with Matchers {
"entailment manager" should "compute entailed constraint" in {
val v1 = new Variable("v1", BooleanDomain())
val v2 = new Variable("v2", BooleanDomain())
val c = new ClauseConstraint(Array(v1, v2), Array())
val problem = Problem(v1, v2)
problem.addConstraint(c)
val state = problem.initState.toState
assert(!state.entailed.hasInactiveVar(c))
state.activeConstraints(v1) should contain theSameElementsAs Seq(0)
state.activeConstraints(v2) should contain theSameElementsAs Seq(0)
val ent = state.entail(c)
assert(ent.entailed.hasInactiveVar(c))
assert(ent.activeConstraints(v1).isEmpty)
assert(ent.activeConstraints(v2).isEmpty)
}
} | concrete-cp/concrete | src/test/scala/concrete/ProblemStateTest.scala | Scala | lgpl-2.1 | 932 |
package bad.robot.temperature.server
import bad.robot.temperature._
import bad.robot.temperature.rrd.Host
import org.specs2.mutable.Specification
class ConnectionTest extends Specification {
"Encode Json" >> {
encode(Connection(Host("box.local", Some("Z"), Some("Europe/London")), IpAddress("127.0.0.1"))).spaces2ps must_==
"""{
| "host" : {
| "name" : "box.local",
| "utcOffset" : "Z",
| "timezone" : "Europe/London"
| },
| "ip" : {
| "value" : "127.0.0.1"
| }
|}""".stripMargin
}
}
| tobyweston/temperature-machine | src/test/scala/bad/robot/temperature/server/ConnectionTest.scala | Scala | apache-2.0 | 590 |
package sri.sangria.web.components
import sri.sangria.web.styles.Colors
import sri.universal.components.{View, _}
import sri.web.all._
import sri.web.styles.WebStyleSheet
import scala.scalajs.js.{UndefOr => U, undefined}
object Footer {
val Component = () => View(style = styles.footer)(
Text()("Built using Sri-Web")
)
object styles extends WebStyleSheet {
val footer = style(alignItems.center,
justifyContent.center,
borderTopWidth := "1px",
height := 70,
borderTopColor := Colors.lightGrey)
}
def apply() = createStatelessFunctionElementNoProps(Component)
}
| chandu0101/sri-sangria-example | web/src/main/scala/sri/sangria/web/components/Footer.scala | Scala | apache-2.0 | 610 |
// Solution-2.scala
// Solution to Exercise 2 in "Vectors"
import com.atomicscala.AtomicTest._
val v4 = Vector(Vector(0, 1, 2), Vector(3, 4, 5))
println(v4)
/* REPL Session:
scala> val v4 = Vector(Vector(0, 1, 2), Vector(3, 4, 5))
v4: scala.collection.immutable.Vector[scala.collection.immutable.Vector[Int]] = Vector(Vector(0, 1, 2), Vector(3, 4, 5))
*/
/* OUTPUT_SHOULD_BE
Vector(Vector(0, 1, 2), Vector(3, 4, 5))
*/
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/18_Vectors/Solution-2.scala | Scala | apache-2.0 | 422 |
package chapter3
import FilteringActorProtocol._
import akka.actor.{ActorRef, Props}
class FilteringActorTest extends tool.combinTestKitAndSpec{
"filter out message " in {
val props = Props(new FilteringActor(testActor, 5))
val filter = system.actorOf(props, "filter-1")
filter ! Event(1)
filter ! Event(2)
filter ! Event(3)
filter ! Event(4)
filter ! Event(5)
filter ! Event(4)
filter ! Event(3)
filter ! Event(5)
filter ! Event(6)
val eventId = receiveWhile() {
case Event(id) if id <= 5 => id
}
eventId must be(List(1, 2, 3, 4, 5))
expectMsg(Event(6))
}
}
| wjingyao2008/firsttry | myarchtype/src/test/scala/chapter3/FilteringActorTest.scala | Scala | apache-2.0 | 671 |
package uk.gov.gds.ier.transaction.crown.previousAddress
import uk.gov.gds.ier.transaction.crown.CrownControllers
import com.google.inject.{Inject, Singleton}
import uk.gov.gds.ier.config.Config
import uk.gov.gds.ier.security.EncryptionService
import uk.gov.gds.ier.serialiser.JsonSerialiser
import uk.gov.gds.ier.service.AddressService
import uk.gov.gds.ier.step.{CrownStep, Routes}
import uk.gov.gds.ier.transaction.crown.InprogressCrown
import uk.gov.gds.ier.assets.RemoteAssets
@Singleton
class PreviousAddressPostcodeStep @Inject() (
val serialiser: JsonSerialiser,
val config: Config,
val encryptionService: EncryptionService,
val remoteAssets: RemoteAssets,
val addressService: AddressService,
val crown: CrownControllers
) extends CrownStep
with PreviousAddressPostcodeMustache
with PreviousAddressForms {
val validation = postcodeStepForm
val routing = Routes(
get = routes.PreviousAddressPostcodeStep.get,
post = routes.PreviousAddressPostcodeStep.post,
editGet = routes.PreviousAddressPostcodeStep.editGet,
editPost = routes.PreviousAddressPostcodeStep.editPost
)
def nextStep(currentState: InprogressCrown) = {
val isPreviousAddressNI = currentState.previousAddress.exists(
_.previousAddress.exists(prevAddr => addressService.isNothernIreland(prevAddr.postcode)))
if (isPreviousAddressNI) {
crown.NationalityStep
} else {
crown.PreviousAddressSelectStep
}
}
override val onSuccess = TransformApplication { currentState =>
val prevAddressCleaned = currentState.previousAddress.map { prev =>
prev.copy(
previousAddress = prev.previousAddress.map(_.copy(
addressLine = None,
uprn = None,
manualAddress = None,
gssCode = None
))
)
}
currentState.copy(
previousAddress = prevAddressCleaned,
possibleAddresses = None
)
} andThen GoToNextIncompleteStep()
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/transaction/crown/previousAddress/PreviousAddressPostcodeStep.scala | Scala | mit | 1,960 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
import sbt._
object Dependencies {
val resolutionRepos = Seq(
"ScalaTools snapshots at Sonatype" at "https://oss.sonatype.org/content/repositories/snapshots/",
"Concurrent Maven Repo" at "http://conjars.org/repo", // For Scalding, Cascading etc
// For Snowplow libs
"Snowplow Analytics Maven repo" at "http://maven.snplow.com/releases/",
"Snowplow Analytics Maven snapshot repo" at "http://maven.snplow.com/snapshots/",
// For user-agent-utils
"user-agent-utils repo" at "https://raw.github.com/HaraldWalker/user-agent-utils/mvn-repo/"
)
object V {
// Java
val hadoop = "1.1.2"
// val commonsLang = "3.1"
val jacksonDatabind = "2.2.3"
val jsonValidator = "2.2.3"
val yodaTime = "2.1"
val yodaConvert = "1.2"
// Scala
val json4sJackson = "3.2.11"
val commonEnrich = "0.13.0"
val scalding = "0.10.0"
val scalaz7 = "7.0.0"
val igluClient = "0.2.0"
// Scala (test only)
val specs2 = "1.14" // Downgrade to prevent issues in job tests. WAS: "2.3.11"
val scalazSpecs2 = "0.1.2"
}
object Libraries {
// Java
val hadoopCore = "org.apache.hadoop" % "hadoop-core" % V.hadoop % "provided"
// val commonsLang = "org.apache.commons" % "commons-lang3" % V.commonsLang
val jacksonDatabind = "com.fasterxml.jackson.core" % "jackson-databind" % V.jacksonDatabind
val jsonValidator = "com.github.fge" % "json-schema-validator" % V.jsonValidator
val yodaTime = "joda-time" % "joda-time" % V.yodaTime
val yodaConvert = "org.joda" % "joda-convert" % V.yodaConvert
// Scala
val json4sJackson = "org.json4s" %% "json4s-jackson" % V.json4sJackson
val commonEnrich = "com.snowplowanalytics" % "snowplow-common-enrich" % V.commonEnrich
val scaldingCore = "com.twitter" %% "scalding-core" % V.scalding
val scaldingArgs = "com.twitter" %% "scalding-args" % V.scalding
// val scaldingJson = "com.twitter" %% "scalding-json" % V.scalding
val scalaz7 = "org.scalaz" %% "scalaz-core" % V.scalaz7
val igluClient = "com.snowplowanalytics" % "iglu-scala-client" % V.igluClient
// Scala (test only)
val specs2 = "org.specs2" %% "specs2" % V.specs2 % "test"
val scalazSpecs2 = "org.typelevel" %% "scalaz-specs2" % V.scalazSpecs2 % "test"
}
}
| wesley1001/snowplow | 3-enrich/scala-hadoop-shred/project/Dependencies.scala | Scala | apache-2.0 | 3,474 |
/*
* Copyright (C) 2012 Pavel Fatin <http://pavelfatin.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.pavelfatin.fs
package internal
/** Various implementations of the `Data` interface.
*
* @see [[com.pavelfatin.fs.Data]]
*/
package object data {}
| pavelfatin/toyfs | src/main/scala/com/pavelfatin/fs/internal/data/package.scala | Scala | gpl-3.0 | 880 |
package com.twitter.finagle.zookeeper
import com.twitter.finagle.common.zookeeper.{ZooKeeperClient, ZooKeeperUtils}
import com.twitter.concurrent.{Offer, Broker, AsyncMutex}
import com.twitter.finagle.addr.StabilizingAddr.State._
import com.twitter.finagle.util.InetSocketAddressUtil
import com.twitter.util.Duration
import java.net.InetSocketAddress
import org.apache.zookeeper.Watcher.Event.KeeperState
import org.apache.zookeeper.{Watcher, WatchedEvent}
import scala.collection._
import scala.jdk.CollectionConverters._
private[finagle] class ZooKeeperHealthHandler extends Watcher {
private[this] val mu = new AsyncMutex
val pulse = new Broker[Health]()
def process(evt: WatchedEvent) =
for {
permit <- mu.acquire()
() <- evt.getState match {
case KeeperState.SyncConnected => pulse ! Healthy
case _ => pulse ! Unhealthy
}
} permit.release()
}
private[finagle] object DefaultZkClientFactory
extends ZkClientFactory(ZooKeeperUtils.DEFAULT_ZK_SESSION_TIMEOUT)
private[finagle] class ZkClientFactory(val sessionTimeout: Duration) {
private[this] val zkClients: mutable.Map[Set[InetSocketAddress], ZooKeeperClient] = mutable.Map()
def hostSet(hosts: String) = InetSocketAddressUtil.parseHosts(hosts).toSet
def get(zkHosts: Set[InetSocketAddress]): (ZooKeeperClient, Offer[Health]) = synchronized {
val client =
zkClients.getOrElseUpdate(zkHosts, new ZooKeeperClient(sessionTimeout, zkHosts.asJava))
// TODO: Watchers are tied to the life of the client,
// which, in turn, is tied to the life of ZkClientFactory.
// Maybe we should expose a way to unregister watchers.
val healthHandler = new ZooKeeperHealthHandler
client.register(healthHandler)
(client, healthHandler.pulse.recv)
}
private[zookeeper] def clear() = synchronized { zkClients.clear() }
}
| twitter/finagle | finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZkClientFactory.scala | Scala | apache-2.0 | 1,858 |
package com.twitter.finagle
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.benchmark.StdBenchAnnotations
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.util.{Await, Var}
import org.openjdk.jmh.annotations._
/**
* This is primarily a test for allocations not performance
* so it is recommended to run with `-prof gc`.
*/
@State(Scope.Benchmark)
class InetResolverBenchmark extends StdBenchAnnotations {
@Param(Array("8.8.8.8"))
var ip = ""
@Param(Array("api.twitter.com"))
var hostname = ""
private[this] val inetResolver =
InetResolver(new InMemoryStatsReceiver())
private[this] val timeout = 1.second
private[this] def notPending(addrs: Var[Addr]): Addr =
Await.result(addrs.changes.filter(_ != Addr.Pending).toFuture(), timeout)
@Benchmark
def bindIp(): Addr =
notPending(inetResolver.bind(ip))
@Benchmark
def bindHostname(): Addr =
notPending(inetResolver.bind(hostname))
}
| twitter/finagle | finagle-benchmark/src/main/scala/com/twitter/finagle/InetResolverBenchmark.scala | Scala | apache-2.0 | 980 |
package circumflex
package web
import java.lang.reflect.InvocationTargetException
import javax.servlet._
import javax.servlet.http.{HttpServletResponse, HttpServletRequest}
import java.io._
import util.control.ControlThrowable
import core._
/*!# Circumflex Filter
`CircumflexFilter` is an entry point of your web application. It handles
context lifecycle (initializes context before the request is processed and
finalizes context after the response is sent), serves static files
and executes main request router.
To setup your web application place following snippet into your `WEB-INF/web.xml`:
``` {.xml}
<filter>
<filter-name>Circumflex Filter</filter-name>
<filter-class>circumflex.web.CircumflexFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>Circumflex Filter</filter-name>
<url-pattern>*</url-pattern>
</filter-mapping>
```
You can also include `<dispatcher>REQUEST</dispatcher>`, `<dispatcher>FORWARD</dispatcher>`,
`<dispatcher>INCLUDE</dispatcher>` and `<dispatcher>ERROR</dispatcher>` under `filter-mapping`
if your application requires so (for example, include `ERROR` dispatcher if you wish to
serve error pages with Circumflex; beware of infinite loops, however).
The filter configuration is saved into the `cx.filterConfig` configuration parameter and
is available throughout your configuration via the `filterConfig` method of the
`circumflex.web` package.
*/
class CircumflexFilter extends Filter {
def init(filterConfig: FilterConfig) {
WEB_LOG.info("Circumflex 3.0")
cx("cx.filterConfig") = filterConfig
}
def destroy() {}
/*!## Serving static
Static files are images, stylesheets, javascripts and all other application assets
which do not require special processing and can be served to clients "as is".
By default static files are served from `/public` location of your webapp root,
but you can specify different location by setting the `cx.public` configuration
parameter.
*/
def serveStatic(req: HttpServletRequest,
res: HttpServletResponse,
chain: FilterChain): Boolean = {
if (req.getMethod.equalsIgnoreCase("get") || req.getMethod.equalsIgnoreCase("head")) {
val uri = req.getRequestURI
val publicUri = cx.getOrElse("cx.public", "/public").toString
val contextPath = servletContext.getContextPath
if (uri.startsWith(contextPath + publicUri) && !uri.endsWith("/")) {
chain.doFilter(req, res)
return true
}
val relativeUri = uri.substring(contextPath.length)
val decodedUri = decodeURI(publicUri + relativeUri)
val path = filterConfig.getServletContext.getRealPath(decodedUri)
if (path == null)
return false
val resource = new File(path)
if (resource.isFile) {
req.getRequestDispatcher(decodedUri).forward(req, res)
return true
}
}
false
}
/*!## Main Lifecycle {#lifecycle}
The lifecycle of `CircumflexFilter` involves following actions:
1. try to serve static context and immediately exit on success;
2. initialize `Context` and fill it with following variables:
* `cx.request` will hold current `HttpRequest`;
* `cx.response` will hold current `HttpResponse`;
* `cx.filterChain` will hold current `FilterChain`;
* `cx.locale` will hold current request locale as obtained from
raw HTTP request;
* other variables from `prepareContext`;
3. the main router is instantiated (it's class should be specified via the
`cx.router` configuration parameter;
4. depending on the result of router's execution, either the response or
the error is flushed to the client;
5. the `Context` is destroyed.
*/
def doFilter(req: ServletRequest,
res: ServletResponse,
chain: FilterChain) {
(req, res) match {
case (req: HttpServletRequest, res: HttpServletResponse) =>
// try to serve static first
if (serveStatic(req, res, chain))
return
// initialize context
Context.executeInNew { ctx =>
ctx("cx.request") = new HttpRequest(req)
ctx("cx.response") = new HttpResponse(res)
ctx("cx.filterChain") = chain
ctx("cx.locale") = req.getLocale
prepareContext(ctx)
try {
WEB_LOG.trace(req)
// execute main router
try {
cx.instantiate("cx.router") // ResponseSentMarker must be thrown
onNoMatch()
} catch {
case e: InvocationTargetException => e.getCause match {
case ex: ResponseSentMarker => throw ex
case ex: FileNotFoundException => onNotFound(ex)
case ex: Exception => onRouterError(ex)
}
}
} catch {
case e: ResponseSentMarker => WEB_LOG.trace(res)
}
}
case _ =>
}
}
/*! The `prepareContext` method populates current context with various useful
shortcuts (from `web` package):
* `param` -- the `param` object;
* `request` -- the `request` object;
* `session` -- the `session` object;
* `cookies` -- the `cookies` object;
* `headers` -- the `headers` object;
* `cfg` -- the `cx` object;
* `msg` -- the `msg` object.
If you use custom filter implementation, you are can override this method
to populate current context with global variables of your application.
*/
def prepareContext(ctx: Context) {
'param := param
'paramList := paramList
'request := request
'session := session
'cfg := cx
'msg := msg
}
/*!## Callbacks
`CircumflexFilter` allows you to override following callbacks:
* `onNoMatch` is executed if no routes match current request;
* `onNotFound` is executed if a `FileNotFoundException` is thrown from a router;
* `onRouterError` is executed if a general exception is thrown from a router;
*/
def onNoMatch() {
WEB_LOG.debug("No routes matched: " + request)
sendError(404)
}
def onRouterError(e: Throwable) {
WEB_LOG.error("Router threw an exception, see stack trace for details.", e)
sendError(500)
}
def onNotFound(e: Throwable) {
WEB_LOG.debug("Resource not found, see stack trace for details.", e)
sendError(404)
}
}
/*!## Response Sent Marker
The `ResponseSentMarker` is thrown by response helpers and routes upon successful matching
and are caught by `CircumflexFilter`. They indicate that the response has been processed
successfully (and, possibly, already flushed to the client) and that no additional actions
need to be taken.
In general `ResponseSentMarker` should not be caught by try-catch blocks. It inherits
from Scala's `ControlThrowable` to save some performance.
*/
class ResponseSentMarker extends ControlThrowable | inca/circumflex | web/src/main/scala/filter.scala | Scala | bsd-2-clause | 6,808 |
/*
* Copyright 2016 Nikolay Donets
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.nikdon.telepooz.model
import com.github.nikdon.telepooz._
sealed trait ParseMode extends Product with Serializable {
def name = this.productPrefix
}
object ParseMode {
case object HTML extends ParseMode {override val name = super.name}
case object Markdown extends ParseMode {override val name = super.name}
def unsafe(str: String): ParseMode = str match {
case HTML.name ⇒ HTML
case Markdown.name ⇒ Markdown
case _ ⇒ unexpected(str)
}
}
| nikdon/telepooz | src/main/scala/com/github/nikdon/telepooz/model/ParseMode.scala | Scala | apache-2.0 | 1,106 |
package com.twitter.finatra.http.internal.marshalling
import com.twitter.finagle.httpx.{Method, Request}
import com.twitter.finatra.http.fileupload.MultipartItem
import org.apache.commons.fileupload.{FileItemFactory, FileItemHeaders, FileItemIterator, FileUploadBase}
import org.apache.commons.io.IOUtils
import scala.collection.mutable
class FinatraFileUpload extends FileUploadBase {
def parseMultipartItems(request: Request): Map[String, MultipartItem] = {
val multipartMap = mutable.Map[String, MultipartItem]()
fileItemIterator(request) map { itr =>
while (itr.hasNext) {
val multipartItemStream = itr.next()
val multipartItemInMemory = MultipartItem(
data = IOUtils.toByteArray(multipartItemStream.openStream()),
fieldName = multipartItemStream.getFieldName,
isFormField = multipartItemStream.isFormField,
contentType = Option(multipartItemStream.getContentType),
filename = Option(multipartItemStream.getName),
headers = multipartItemStream.getHeaders)
multipartMap += multipartItemInMemory.fieldName -> multipartItemInMemory
}
}
multipartMap.toMap
}
def fileItemIterator(request: Request): Option[FileItemIterator] = {
if(isPostOrPut(request) && isMultipart(request))
Some(
getItemIterator(
new FinatraRequestContext(request)))
else
None
}
override def setFileItemFactory(factory: FileItemFactory) {
throw new UnsupportedOperationException("FileItemFactory is not supported.")
}
override def getFileItemFactory: FileItemFactory = {
throw new UnsupportedOperationException("FileItemFactory is not supported.")
}
private def isMultipart(request: Request): Boolean = {
request.contentType match {
case Some(contentType) =>
contentType.startsWith("multipart/")
case _ =>
false
}
}
private def isPostOrPut(request: Request): Boolean = {
Method.Post == request.method ||
Method.Put == request.method
}
}
| deanh/finatra | http/src/main/scala/com/twitter/finatra/http/internal/marshalling/FinatraFileUpload.scala | Scala | apache-2.0 | 2,044 |
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package adapters
package registry
// Joda-Time
import org.joda.time.DateTime
// Scalaz
import scalaz._
import Scalaz._
// Snowplow
import loaders.{
CollectorApi,
CollectorSource,
CollectorContext,
CollectorPayload
}
import utils.ConversionUtils
import SpecHelpers._
// Specs2
import org.specs2.{Specification, ScalaCheck}
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
class CallrailAdapterSpec extends Specification with DataTables with ValidationMatchers with ScalaCheck { def is =
"This is a specification to test the CallrailAdapter functionality" ^
p^
"toRawEvents should return a NEL containing one RawEvent if the querystring is correctly populated" ! e1^
"toRawEvents should return a Validation Failure if there are no parameters on the querystring" ! e2^
end
implicit val resolver = SpecHelpers.IgluResolver
object Shared {
val api = CollectorApi("com.callrail", "v1")
val source = CollectorSource("clj-tomcat", "UTF-8", None)
val context = CollectorContext(DateTime.parse("2013-08-29T00:18:48.000+00:00").some, "37.157.33.123".some, None, None, Nil, None)
}
object Expected {
val staticNoPlatform = Map(
"tv" -> "com.callrail-v1",
"e" -> "ue",
"cv" -> "clj-0.6.1-tom-0.0.4"
)
val static = staticNoPlatform ++ Map(
"p" -> "srv"
)
}
def e1 = {
val params = toNameValuePairs(
"answered" -> "true",
"callercity" -> "BAKERSFIELD",
"callercountry" -> "US",
"callername" -> "SKYPE CALLER",
"callernum" -> "+12612230240",
"callerstate" -> "CA",
"callerzip" -> "92307",
"callsource" -> "keyword",
"datetime" -> "2014-10-09 16:23:45",
"destinationnum" -> "2012032051",
"duration" -> "247",
"first_call" -> "true",
"ga" -> "",
"gclid" -> "",
"id" -> "201235151",
"ip" -> "86.178.163.7",
"keywords" -> "",
"kissmetrics_id" -> "",
"landingpage" -> "http://acme.com/",
"recording" -> "http://app.callrail.com/calls/201235151/recording/9f59ad59ba1cfa264312",
"referrer" -> "direct",
"referrermedium" -> "Direct",
"trackingnum" -> "+12012311668",
"transcription" -> "",
"utm_campaign" -> "",
"utm_content" -> "",
"utm_medium" -> "",
"utm_source" -> "",
"utm_term" -> "",
"utma" -> "",
"utmb" -> "",
"utmc" -> "",
"utmv" -> "",
"utmx" -> "",
"utmz" -> "",
"cv" -> "clj-0.6.1-tom-0.0.4",
"nuid" -> "-"
)
val payload = CollectorPayload(Shared.api, params, None, None, Shared.source, Shared.context)
val actual = CallrailAdapter.toRawEvents(payload)
val expectedJson =
"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.callrail/call_complete/jsonschema/1-0-0",
|"data":{
|"duration":247,
|"utm_source":null,
|"utmv":null,
|"ip":"86.178.163.7",
|"utmx":null,
|"ga":null,
|"destinationnum":"2012032051",
|"datetime":"2014-10-09T16:23:45.000Z",
|"kissmetrics_id":null,
|"landingpage":"http://acme.com/",
|"callerzip":"92307",
|"gclid":null,
|"callername":"SKYPE CALLER",
|"utmb":null,
|"id":"201235151",
|"callernum":"+12612230240",
|"utm_content":null,
|"trackingnum":"+12012311668",
|"referrermedium":"Direct",
|"utm_campaign":null,
|"keywords":null,
|"transcription":null,
|"utmz":null,
|"utma":null,
|"referrer":"direct",
|"callerstate":"CA",
|"recording":"http://app.callrail.com/calls/201235151/recording/9f59ad59ba1cfa264312",
|"first_call":true,
|"utmc":null,
|"callercountry":"US",
|"utm_medium":null,
|"callercity":"BAKERSFIELD",
|"utm_term":null,
|"answered":true,
|"callsource":"keyword"
|}
|}
|}""".stripMargin.replaceAll("[\\n\\r]","")
actual must beSuccessful(NonEmptyList(RawEvent(Shared.api, Expected.static ++ Map("ue_pr" -> expectedJson, "nuid" -> "-"), None, Shared.source, Shared.context)))
}
def e2 = {
val params = toNameValuePairs()
val payload = CollectorPayload(Shared.api, params, None, None, Shared.source, Shared.context)
val actual = CallrailAdapter.toRawEvents(payload)
actual must beFailing(NonEmptyList("Querystring is empty: no CallRail event to process"))
}
}
| jramos/snowplow | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/adapters/registry/CallrailAdapterSpec.scala | Scala | apache-2.0 | 6,177 |
package ch.bsisa.hyperbird.patman.simulations.actors
import akka.actor.{ Actor, ActorRef, ActorLogging }
import ch.bsisa.hyperbird.patman.simulations.messages._
import play.api.libs.concurrent.Akka
import scala.concurrent.duration._
import play.api.Play.current
import play.api.libs.concurrent.Execution.Implicits._
import ch.bsisa.hyperbird.patman.simulations.model.HospitalSimulationSummary
import ch.bsisa.hyperbird.patman.simulations.model.HospitalHelper
class ShutdownCoordinatorActor(simulationId:String) extends Actor with ActorLogging {
var shutdownSignals : List[String] = List()
var hospitalSummaryEntries : List[HospitalSimulationSummary] = List()
def receive = {
case ShutdownSignal(message, terminationSize, hssOpt) =>
log.info(s"Received ShutdownSignal(${message})")
// Store hss if present
hssOpt match {
case Some(hss) =>
hospitalSummaryEntries = hss :: hospitalSummaryEntries
log.info("Added hospital summary entry.")
case None =>
log.info("No hospital summary entry.")
}
// Store signal
shutdownSignals = message :: shutdownSignals
if (shutdownSignals.size >= terminationSize) {
log.info("Updating SIMULATION with hospital summary entries")
HospitalHelper.updateSimulationDatabaseEntry(simulationId = simulationId, hssList = hospitalSummaryEntries)
log.info(s"Obtained ${shutdownSignals.size} signals, requesting simulation to stop in 15 seconds...")
Akka.system.scheduler.scheduleOnce(15 seconds, sender, StopSimulationRequest(s"ShudownCoordinator reached terminationSize ${terminationSize}"))
//sender ! StopSimulationRequest(s"ShudownCoordinator reached terminationSize ${terminationSize}")
} else {
log.info(s"Waiting for ${terminationSize-shutdownSignals.size} signal${if ((terminationSize-shutdownSignals.size) > 1) "s" else ""}")
}
}
}
| bsisa/hb-api | app/ch/bsisa/hyperbird/patman/simulations/actors/ShutdownCoordinatorActor.scala | Scala | gpl-2.0 | 2,001 |
package com.plasmaconduit.framework
import scala.collection.immutable.HashMap
final case class HttpSession(data: Map[String, String] = HashMap()) {
def withFreshData(newData: Map[String, String]): HttpSession = {
copy(data = newData)
}
def withData(moreData: Map[String, String]): HttpSession = {
withFreshData(data ++ moreData)
}
def set(key: String, value: String): HttpSession = {
withFreshData(data + (key -> value))
}
def get(key: String): Option[String] = {
data.get(key)
}
def clear(): HttpSession = {
withFreshData(HashMap())
}
}
| plasmaconduit/plasmaconduit-framework | src/main/scala/com/plasmaconduit/framework/HttpSession.scala | Scala | mit | 587 |
/*
* Copyright 2014 Christos KK Loverdos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ckkloverdos.topsort
import com.ckkloverdos.topsort.event.{ExitCause, PrintStreamListener, TopSortListener}
import com.ckkloverdos.topsort.util.{LSet, LTopSortPerNode, MLTopSortPerNode, SymbolGraph}
import org.junit.{Assert, Test}
import scala.annotation.tailrec
import scala.collection.mutable
/**
*
*/
class SymbolGraphTest {
@tailrec
final def sameIterators[N](a: Iterator[N], b: Iterator[N]): Unit = {
if(a.hasNext) {
Assert.assertTrue(b.hasNext)
val a0 = a.next()
val b0 = b.next()
Assert.assertEquals(a0, b0)
sameIterators(a, b)
}
else
Assert.assertFalse(b.hasNext)
}
@Test def parseNoCycle1(): Unit = {
val graph = SymbolGraph("a -> b; a -> c; a -> d")
val sorted = graph.topSortEx()
sameIterators(Iterator('b, 'c, 'd, 'a), sorted.toIterator)
}
@Test def parseNoCycle2(): Unit = {
val graph = SymbolGraph("a -> b; b -> c;;;; c -> d")
val sorted = graph.topSortEx()
sameIterators(Iterator('d, 'c, 'b, 'a), sorted.toIterator)
}
@Test def parseCycle1(): Unit = {
val graph = SymbolGraph("a -> a")
try {
val _ = graph.topSortEx()
Assert.fail(""""a -> a" is a cycle""")
}
catch {
case _: TopSortCycleException[_] ⇒
}
}
@Test def checkDependents(): Unit = {
val graphStr =
"""
| A -> A_1, A_2, A_3
| B -> B_1, B_2
""".stripMargin
val graph = SymbolGraph(graphStr)
assert(graph.dependenciesOf('A).size == 3, "graph.getOrEmpty('A).size == 3")
assert(graph.dependenciesOf('B).size == 2, "graph.getOrEmpty('B).size == 2")
// A map of the direct dependencies (not top-sorted)
val map = new mutable.LinkedHashMap[Symbol, mutable.LinkedHashSet[Symbol]]
def add(from: Symbol, to: Symbol): Unit = {
map.get(from) match {
case Some(toSet) ⇒ toSet += to
case None ⇒
val toSet = new mutable.LinkedHashSet[Symbol]
toSet += to
map(from) = toSet
}
}
def addOne(node: Symbol): Unit = {
map.get(node) match {
case None ⇒ map(node) = new mutable.LinkedHashSet[Symbol]
case _ ⇒
}
}
val listener = new TopSortListener[Symbol] {
override def onEnter(dependents: List[Symbol], node: Symbol, level: Int): Unit = {
for(dependent ← dependents) {
add(dependent, node)
}
addOne(node)
}
override def onExit(
dependents: List[Symbol],
node: Symbol,
exitCause: ExitCause,
searchPath: Traversable[Symbol],
level: Int
): Unit = {
exitCause match {
case ExitCause.AlreadySorted ⇒
for(dependent ← dependents) {
add(dependent, node)
}
case _ ⇒
}
}
}
val path = graph.topSortEx(PrintStreamListener.StdOut.andThen(listener))
val sortedNodes = path.to[mutable.LinkedHashSet]
val allNodes = graph.allNodes
for {
node ← allNodes
} {
assert(sortedNodes.contains(node), s"sortedNodes.contains($node)")
}
Assert.assertEquals(Set('A, 'A_1, 'A_2, 'A_3, 'B, 'B_1, 'B_2), map.keySet.toSet)
Assert.assertEquals(Set('A_1, 'A_2, 'A_3), map('A))
Assert.assertEquals(Set('B_1, 'B_2), map('B))
}
@Test def checkYabOrders(): Unit = {
val graphStr =
"""
| /::fatjar → /a::jar, /c::jar, /b::jar
| /a::jar → /a::compile
| /b::jar → /b::compile
| /c::jar → /c::compile
| /c::compile → /b::compile, /a::compile
| /c::fatjar → /c::jar
""".stripMargin
val graph = SymbolGraph(graphStr)
val lTopSortedListener = new LTopSortPerNode[Symbol]
val mlTopSortedListener = new MLTopSortPerNode[Symbol]
val path = graph.topSortEx(
PrintStreamListener.StdOut[Symbol].
andThen(lTopSortedListener).
andThen(mlTopSortedListener)
)
val lTopSortedMap = lTopSortedListener.topSortedMap
val mlTopSortedMap = mlTopSortedListener.topSortedMap
val pathReport = path.mkString("[", ", ", "]")
println("====================")
println(pathReport)
println("====================")
for { node ← lTopSortedMap.keySet } {
val deps = lTopSortedMap(node)
val depsReport = deps.mkString(", ")
println(s"$node → $depsReport")
}
Assert.assertEquals(path.size, lTopSortedMap.allNodes.size)
Assert.assertEquals(mlTopSortedMap.size, lTopSortedMap.size)
// Every topsorted path must start with a node that depends on no other
for { node ← lTopSortedMap.keySet } {
val topSortedDeps = lTopSortedMap(node)
topSortedDeps.toSeq.headOption match {
case None ⇒
case Some(first) ⇒
Assert.assertEquals(true, lTopSortedMap.contains(node))
val firstDeps = lTopSortedMap(first)
Assert.assertEquals(
s"Every topsorted path must start with a node that depends on no other. Offending node: $first",
LSet[Symbol](),
firstDeps
)
}
}
}
}
| loverdos/topsort | src/main/test/scala/com/ckkloverdos/topsort/SymbolGraphTest.scala | Scala | apache-2.0 | 5,734 |
package edu.uwm.cs.pir.domain.features.concrete
/**
* The example will consume a list of images and desired feature names as input to allow PIR to perform indexing
* based on provided API name
*
*/
object DeclarativeTransformation extends App {
trait SpecEngine {
def parse(in: String): Spec
}
class Key(val name: String, val rawType: String, val featureName: String, val api: String)
@SerialVersionUID(1L)
class PIRSpec(val map: Map[String, Any], val innerPIRSpec: PIRSpec) extends Serializable {}
@SerialVersionUID(1L)
class Spec(val name: String, val rawType: String, val featureName: String, val api: String, val value: String) {
}
trait Transformer {
def from(spec: Spec): PIRSpec
def to(pirSpec: PIRSpec): Spec
}
case class SimpleTransformer() extends Transformer {
override def from(spec: Spec): PIRSpec = {
null
}
override def to(pirSpec: PIRSpec): Spec = {
null
}
}
import com.codahale.jerkson.Json._
// extract 'net.semanticmetadata.lire.imageanalysis.CEDD' feature from '/' with type Image
// where id = '1' and type = 'jpg' and api = 'LIRE'
// val jsonString1 = parse[Map[String, Any]](""" {
// "spec" : {
// "extract": {
// "from": {"path" : "/", "type" : "Image"},
// "to": {"type" : "net.semanticmetadata.lire.imageanalysis.CEDD"},
// "where" : {"id" : "1", "type" : "jpg", "api" : "LIRE"}
// }
// }
// }""")
// transform CEDD feature to org.apache.lucene.document.Document
// where id = 'cedd1' and type = 'jpg' and api = 'LIRE'
// val jsonString2 = parse[Map[String, Any]](""" {
// "spec" : {
// "transform": {
// "from": {"fqn" : "net.semanticmetadata.lire.imageanalysis", "type" : "CEDD"},
// "to": {"type" : "org.apache.lucene.document.Document"},
// "where" : {"id" : "cedd1", "api" : "LIRE"}
// }
// }
// }""")
//
// println(jsonString1)
// println(jsonString2)
// //Testing code
// val transformer = new SimpleTransformer
//
// println(transformer.from(new Spec("", "", "", "", "")))
} | pir-dsl/pir-embedding | core/src/main/scala/edu/uwm/cs/pir/domain/features/concrete/DeclarativeTransformation.scala | Scala | gpl-2.0 | 2,160 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.exchange
import java.util.Random
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions.{Attribute, UnsafeProjection}
import org.apache.spark.sql.catalyst.expressions.codegen.LazilyGeneratedOrdering
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.util.MutablePair
/**
* Performs a shuffle that will result in the desired `newPartitioning`.
*/
case class ShuffleExchange(
var newPartitioning: Partitioning,
child: SparkPlan,
@transient coordinator: Option[ExchangeCoordinator]) extends Exchange {
override lazy val metrics = Map(
"dataSize" -> SQLMetrics.createSizeMetric(sparkContext, "data size"))
override def nodeName: String = {
val extraInfo = coordinator match {
case Some(exchangeCoordinator) =>
s"(coordinator id: ${System.identityHashCode(coordinator)})"
case None => ""
}
val simpleNodeName = "Exchange"
s"$simpleNodeName$extraInfo"
}
override def outputPartitioning: Partitioning = newPartitioning
private val serializer: Serializer =
new UnsafeRowSerializer(child.output.size, longMetric("dataSize"))
override protected def doPrepare(): Unit = {
// If an ExchangeCoordinator is needed, we register this Exchange operator
// to the coordinator when we do prepare. It is important to make sure
// we register this operator right before the execution instead of register it
// in the constructor because it is possible that we create new instances of
// Exchange operators when we transform the physical plan
// (then the ExchangeCoordinator will hold references of unneeded Exchanges).
// So, we should only call registerExchange just before we start to execute
// the plan.
coordinator match {
case Some(exchangeCoordinator) => exchangeCoordinator.registerExchange(this)
case None =>
}
}
/**
* Returns a [[ShuffleDependency]] that will partition rows of its child based on
* the partitioning scheme defined in `newPartitioning`. Those partitions of
* the returned ShuffleDependency will be the input of shuffle.
*/
private[exchange] def prepareShuffleDependency()
: ShuffleDependency[Int, InternalRow, InternalRow] = {
ShuffleExchange.prepareShuffleDependency(
child.execute(), child.output, newPartitioning, serializer)
}
/**
* Returns a [[ShuffledRowRDD]] that represents the post-shuffle dataset.
* This [[ShuffledRowRDD]] is created based on a given [[ShuffleDependency]] and an optional
* partition start indices array. If this optional array is defined, the returned
* [[ShuffledRowRDD]] will fetch pre-shuffle partitions based on indices of this array.
*/
private[exchange] def preparePostShuffleRDD(
shuffleDependency: ShuffleDependency[Int, InternalRow, InternalRow],
specifiedPartitionStartIndices: Option[Array[Int]] = None): ShuffledRowRDD = {
// If an array of partition start indices is provided, we need to use this array
// to create the ShuffledRowRDD. Also, we need to update newPartitioning to
// update the number of post-shuffle partitions.
specifiedPartitionStartIndices.foreach { indices =>
assert(newPartitioning.isInstanceOf[HashPartitioning])
newPartitioning = UnknownPartitioning(indices.length)
}
new ShuffledRowRDD(shuffleDependency, specifiedPartitionStartIndices)
}
/**
* Caches the created ShuffleRowRDD so we can reuse that.
*/
private var cachedShuffleRDD: ShuffledRowRDD = null
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
// Returns the same ShuffleRowRDD if this plan is used by multiple plans.
if (cachedShuffleRDD == null) {
cachedShuffleRDD = coordinator match {
case Some(exchangeCoordinator) =>
val shuffleRDD = exchangeCoordinator.postShuffleRDD(this)
assert(shuffleRDD.partitions.length == newPartitioning.numPartitions)
shuffleRDD
case None =>
val shuffleDependency = prepareShuffleDependency()
preparePostShuffleRDD(shuffleDependency)
}
}
cachedShuffleRDD
}
}
object ShuffleExchange {
def apply(newPartitioning: Partitioning, child: SparkPlan): ShuffleExchange = {
ShuffleExchange(newPartitioning, child, coordinator = Option.empty[ExchangeCoordinator])
}
/**
* Determines whether records must be defensively copied before being sent to the shuffle.
* Several of Spark's shuffle components will buffer deserialized Java objects in memory. The
* shuffle code assumes that objects are immutable and hence does not perform its own defensive
* copying. In Spark SQL, however, operators' iterators return the same mutable `Row` object. In
* order to properly shuffle the output of these operators, we need to perform our own copying
* prior to sending records to the shuffle. This copying is expensive, so we try to avoid it
* whenever possible. This method encapsulates the logic for choosing when to copy.
*
* In the long run, we might want to push this logic into core's shuffle APIs so that we don't
* have to rely on knowledge of core internals here in SQL.
*
* See SPARK-2967, SPARK-4479, and SPARK-7375 for more discussion of this issue.
*
* @param partitioner the partitioner for the shuffle
* @param serializer the serializer that will be used to write rows
* @return true if rows should be copied before being shuffled, false otherwise
*/
private def needToCopyObjectsBeforeShuffle(
partitioner: Partitioner,
serializer: Serializer): Boolean = {
// Note: even though we only use the partitioner's `numPartitions` field, we require it to be
// passed instead of directly passing the number of partitions in order to guard against
// corner-cases where a partitioner constructed with `numPartitions` partitions may output
// fewer partitions (like RangePartitioner, for example).
val conf = SparkEnv.get.conf
val shuffleManager = SparkEnv.get.shuffleManager
val sortBasedShuffleOn = shuffleManager.isInstanceOf[SortShuffleManager]
val bypassMergeThreshold = conf.getInt("spark.shuffle.sort.bypassMergeThreshold", 200)
if (sortBasedShuffleOn) {
val bypassIsSupported = SparkEnv.get.shuffleManager.isInstanceOf[SortShuffleManager]
if (bypassIsSupported && partitioner.numPartitions <= bypassMergeThreshold) {
// If we're using the original SortShuffleManager and the number of output partitions is
// sufficiently small, then Spark will fall back to the hash-based shuffle write path, which
// doesn't buffer deserialized records.
// Note that we'll have to remove this case if we fix SPARK-6026 and remove this bypass.
false
} else if (serializer.supportsRelocationOfSerializedObjects) {
// SPARK-4550 and SPARK-7081 extended sort-based shuffle to serialize individual records
// prior to sorting them. This optimization is only applied in cases where shuffle
// dependency does not specify an aggregator or ordering and the record serializer has
// certain properties. If this optimization is enabled, we can safely avoid the copy.
//
// Exchange never configures its ShuffledRDDs with aggregators or key orderings, so we only
// need to check whether the optimization is enabled and supported by our serializer.
false
} else {
// Spark's SortShuffleManager uses `ExternalSorter` to buffer records in memory, so we must
// copy.
true
}
} else {
// Catch-all case to safely handle any future ShuffleManager implementations.
true
}
}
/**
* Returns a [[ShuffleDependency]] that will partition rows of its child based on
* the partitioning scheme defined in `newPartitioning`. Those partitions of
* the returned ShuffleDependency will be the input of shuffle.
*/
def prepareShuffleDependency(
rdd: RDD[InternalRow],
outputAttributes: Seq[Attribute],
newPartitioning: Partitioning,
serializer: Serializer): ShuffleDependency[Int, InternalRow, InternalRow] = {
val part: Partitioner = newPartitioning match {
case RoundRobinPartitioning(numPartitions) => new HashPartitioner(numPartitions)
case p@HashPartitioning(_, n) => new Partitioner {
override def numPartitions: Int = n
// For HashPartitioning, the partitioning key is already a valid partition ID, as we use
// `HashPartitioning.partitionIdExpression` to produce partitioning key.
override def getPartition(key: Any): Int = key.asInstanceOf[Int]
}
case RangePartitioning(sortingExpressions, numPartitions) =>
// Internally, RangePartitioner runs a job on the RDD that samples keys to compute
// partition bounds. To get accurate samples, we need to copy the mutable keys.
val rddForSampling = rdd.mapPartitionsInternal { iter =>
val mutablePair = new MutablePair[InternalRow, Null]()
iter.map(row => mutablePair.update(row.copy(), null))
}
implicit val ordering = new LazilyGeneratedOrdering(sortingExpressions, outputAttributes)
new RangePartitioner(numPartitions, rddForSampling, ascending = true)
case SinglePartition =>
new Partitioner {
override def numPartitions: Int = 1
override def getPartition(key: Any): Int = 0
}
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
// TODO: Handle BroadcastPartitioning.
}
def getPartitionKeyExtractor(): InternalRow => Any = newPartitioning match {
case RoundRobinPartitioning(numPartitions) =>
// Distributes elements evenly across output partitions, starting from a random partition.
var position = new Random(TaskContext.get().partitionId()).nextInt(numPartitions)
(row: InternalRow) => {
// The HashPartitioner will handle the `mod` by the number of partitions
position += 1
position
}
case h: HashPartitioning =>
val projection = UnsafeProjection.create(h.partitionIdExpression :: Nil, outputAttributes)
row => projection(row).getInt(0)
case RangePartitioning(_, _) | SinglePartition => identity
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
}
val rddWithPartitionIds: RDD[Product2[Int, InternalRow]] = {
if (needToCopyObjectsBeforeShuffle(part, serializer)) {
rdd.mapPartitionsInternal { iter =>
val getPartitionKey = getPartitionKeyExtractor()
iter.map { row => (part.getPartition(getPartitionKey(row)), row.copy()) }
}
} else {
rdd.mapPartitionsInternal { iter =>
val getPartitionKey = getPartitionKeyExtractor()
val mutablePair = new MutablePair[Int, InternalRow]()
iter.map { row => mutablePair.update(part.getPartition(getPartitionKey(row)), row) }
}
}
}
// Now, we manually create a ShuffleDependency. Because pairs in rddWithPartitionIds
// are in the form of (partitionId, row) and every partitionId is in the expected range
// [0, part.numPartitions - 1]. The partitioner of this is a PartitionIdPassthrough.
val dependency =
new ShuffleDependency[Int, InternalRow, InternalRow](
rddWithPartitionIds,
new PartitionIdPassthrough(part.numPartitions),
serializer)
dependency
}
}
| SnappyDataInc/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchange.scala | Scala | apache-2.0 | 12,686 |
package org.jetbrains.plugins.scala.codeInspection
package unused
import com.intellij.codeInspection.LocalInspectionTool
import com.intellij.testFramework.EditorTestUtil
import org.jetbrains.plugins.scala.codeInspection.varCouldBeValInspection.VarCouldBeValInspection
/**
* Created by Svyatoslav Ilinskiy on 11.07.16.
*/
class VarCouldBeValInspectionTest extends ScalaQuickFixTestBase {
import EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START}
override protected val classOfInspection: Class[_ <: LocalInspectionTool] =
classOf[VarCouldBeValInspection]
import VarCouldBeValInspection._
override protected val description: String = DESCRIPTION
def testPrivateField(): Unit = testQuickFix(
text =
s"""
|class Foo {
| private ${START}var$END s = 0
| println(s)
|}
""".stripMargin,
expected =
"""
|class Foo {
| private val s = 0
| println(s)
|}
""".stripMargin
)
def testLocalVar(): Unit = testQuickFix(
text =
s"""
|object Foo {
| def foo(): Unit = {
| ${START}var$END s = 0
| val z = s
| }
|}
""".stripMargin,
expected =
"""
|object Foo {
| def foo(): Unit = {
| val s = 0
| val z = s
| }
|}
""".stripMargin
)
def testNonPrivateField(): Unit = checkTextHasNoErrors(
text =
"""
|class Foo {
| var s: String = ""
| protected var z: Int = 2
| println(s)
| println(z)
|}
""".stripMargin
)
def testMultiDeclaration(): Unit = testQuickFix(
text =
s"""
|class Foo {
| private ${START}var$END (a, b): String = ???
| println(b)
| println(a)
|}
""".stripMargin,
expected =
"""
|class Foo {
| private val (a, b): String = ???
| println(b)
| println(a)
|}
""".stripMargin
)
def testSuppressed(): Unit = checkTextHasNoErrors(
text =
"""
|class Bar {
| //noinspection VarCouldBeVal
| private var f = 2
| println(f)
|
| def aa(): Unit = {
| //noinspection VarCouldBeVal
| var d = 2
| val s = d
| }
|}
""".stripMargin
)
def testAssignmentDetectedNoError(): Unit = checkTextHasNoErrors(
text =
"""
|object Moo {
| def method(): Unit = {
| var b = 1
| b.+=(2)
|
| var c = 1
| c += 2
| }
|}
""".stripMargin
)
def testAdd(): Unit = testQuickFix(
text =
s"""
|object Koo {
| def foo(): Unit = {
| ${START}var$END d = 1
| d + 1
| }
|}
""".stripMargin,
expected =
"""
|object Koo {
| def foo(): Unit = {
| val d = 1
| d + 1
| }
|}
""".stripMargin
)
private def testQuickFix(text: String, expected: String): Unit =
testQuickFix(text, expected, VarToValFix.HINT)
}
| jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/unused/VarCouldBeValInspectionTest.scala | Scala | apache-2.0 | 3,236 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.layers
import com.intel.analytics.bigdl.dllib.nn.Graph.ModuleNode
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.dllib.nn.internal.KerasLayer
import com.intel.analytics.bigdl.dllib.nn.{CAddTable, CAveTable, CDivTable, CMaxTable, CMinTable, CMulTable, CSubTable, CosineDistance, DotProduct, JoinTable, ParallelTable, Sequential => TSequential}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.{MultiShape, Shape}
import com.intel.analytics.bigdl.dllib.keras.Net
import com.intel.analytics.bigdl.dllib.keras.layers.utils.{KerasLayerRef, KerasUtils}
import scala.reflect.ClassTag
/**
* Used to merge a list of inputs into a single output, following some merge mode.
* To merge layers, it must take at least two input layers.
*
* When using this layer as the first layer in a model, you need to provide the argument
* inputShape for input layers (each as a Single Shape, does not include the batch dimension).
*
* @param layers A list of layer instances. Must be more than one layer.
* @param mode Merge mode. String, must be one of: 'sum', 'mul', 'concat', 'ave', 'cos',
* 'dot', 'max', 'sub', 'div', 'min'. Default is 'sum'.
* @param concatAxis Integer, axis to use when concatenating layers. Only specify this when merge
* mode is 'concat'. Default is -1, meaning the last axis of the input.
* @param inputShape A Multi Shape, which contains the inputShape of the input layers,
* each does not include the batch dimension.
* @tparam T The numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class Merge[T: ClassTag](
val layers: Array[AbstractModule[Activity, Activity, T]] = null,
val mode: String = "sum",
val concatAxis: Int = -1,
// MultiShape isn't directly supported for serialization. Use Shape instead.
val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends KerasLayer[Tensor[T], Tensor[T], T](Merge.calcBatchInputShape(inputShape, layers))
with Net {
override private[bigdl] def toKeras2(): String = {
var params = Net.inputShapeToString(inputShape) ++
Net.param(getName())
val kerasLayerName = mode match {
case "sum" => "Add"
case "mul" => "Multiply"
case "max" => "Maximum"
case "ave" => "Average"
case "sub" => "Subtract"
case "min" => "Minimum"
case "concat" =>
params ++= Net.param(concatAxis, "axis")
"Concatenate"
case "dot" => "Dot"
case _ =>
throw new IllegalArgumentException(s"Merge ${mode} is not supported in Keras2")
}
Net.kerasDef(kerasLayerName, params)
}
private val mergeMode = mode.toLowerCase()
private var axis = concatAxis
require(mergeMode == "sum" || mergeMode == "mul" || mergeMode == "concat" || mergeMode == "ave"
|| mergeMode == "cos" || mergeMode == "dot" || mergeMode == "max" || mergeMode == "sub"
|| mergeMode == "min" || mergeMode == "div", s"Invalid merge mode: $mergeMode")
if (layers != null) {
require(layers.length >= 2, s"Merge must take at least two input layers " +
s"but found ${layers.length}")
layers.foreach(layer => require(layer.isInstanceOf[KerasLayer[Activity, Activity, T]],
"Each input layer for Merge should be a Keras-Style layer"))
KerasLayerRef(this).excludeInvalidLayers(layers)
}
private def computeOutputShapeForConcat(input: List[Shape]): Shape = {
val input1 = input.head.toSingle().toArray
val output = input1.clone()
require(Math.abs(concatAxis) < output.length, s"Invalid concat axis $concatAxis")
axis = if (concatAxis < 0) concatAxis + output.length else concatAxis
var i = 1
while (i < input.length) {
val input_i = input(i).toSingle().toArray
var j = 0
while (j < input_i.length) {
if (j != axis && (input_i(j) != -1 || output(j) != -1)) require(input_i(j)==output(j),
s"Incompatible input dimension for merge " +
s"mode concat: (${output.deep.mkString(", ")}), " +
s"(${input_i.deep.mkString(", ")})")
j += 1
}
if (output(axis) != -1) {
output(axis) += input_i(axis)
}
i += 1
}
Shape(output)
}
private def checkSameInputShape(input: List[Shape]): Unit = {
val input1 = input.head.toSingle().toArray
var i = 1
while (i < input.length) {
val input_i = input(i).toSingle().toArray
require(input_i.sameElements(input1), s"Incompatible input dimension for " +
s"merge mode $mergeMode: (${input1.deep.mkString(", ")}), " +
s"(${input_i.deep.mkString(", ")})")
i += 1
}
}
override def computeOutputShape(inputShape: Shape): Shape = {
val input = inputShape.toMulti()
val input1 = input.head.toSingle().toArray
if (mergeMode == "concat") {
computeOutputShapeForConcat(input)
}
else {
checkSameInputShape(input)
if (mergeMode == "dot" || mergeMode == "cos") {
require(input.head.toSingle().length <=2, s"For merge mode $mergeMode, 3D input " +
s"or above is currently not supported, got input dim ${input.head.toSingle().length}")
require(input.length == 2, s"Merge mode $mergeMode takes exactly two layers, " +
s"but got ${input.length}")
if (mergeMode == "dot") Shape(-1, 1) else Shape(-1, 1, 1)
}
else {
input.head
}
}
}
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val input = inputShape.toMulti()
val mergeLayer = mergeMode match {
case "sum" => CAddTable()
case "mul" => CMulTable()
case "max" => CMaxTable()
case "ave" => CAveTable()
case "sub" => CSubTable()
case "div" => CDivTable()
case "min" => CMinTable()
case "concat" =>
val input1 = input.head.toSingle().toArray
JoinTable(axis, input1.length -1)
case "dot" =>
val seq = TSequential[T]()
seq.add(DotProduct())
seq.add(com.intel.analytics.bigdl.dllib.nn.Reshape(Array(1), Some(true)))
seq
case "cos" =>
val seq = TSequential[T]()
seq.add(CosineDistance())
seq.add(com.intel.analytics.bigdl.dllib.nn.Reshape(Array(1, 1), Some(true)))
seq
}
if (layers != null) { // In the case `layers != null`, return a ParallelTable to merge layers.
val model = TSequential[T]()
val parallel = ParallelTable()
var i = 0
while(i < layers.length) {
parallel.add(layers(i).asInstanceOf[KerasLayer[Activity, Activity, T]].labor)
i += 1
}
model.add(parallel)
model.add(mergeLayer)
model.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
else { // In the case `layers == null`, only return a merge layer to merge nodes not layers.
mergeLayer.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
}
}
object Merge {
def calcBatchInputShape[T: ClassTag](
inputShape: Shape = null,
layers: Array[AbstractModule[Activity, Activity, T]]): Shape = {
val batchInputShape = KerasUtils.addBatch(inputShape)
val actualInputShape = if (layers != null) {
layers.foreach(layer => require(layer.isInstanceOf[KerasLayer[Activity, Activity, T]],
"Each input layer for Merge should be a Keras-Style layer"))
MultiShape(layers.map { layer =>
if (layer.asInstanceOf[KerasLayer[Activity, Activity, T]]
.isBuilt()) { // it's possible while reloaded from file
layer.getOutputShape()
} else {
layer.asInstanceOf[KerasLayer[Activity, Activity, T]].build(layer.getInputShape())
}
}.toList)
} else null
if (batchInputShape != null) {
require(batchInputShape.isInstanceOf[MultiShape],
"Merge requires inputShape to be MultiShape")
require(batchInputShape.toMulti().equals(actualInputShape.toMulti()),
"Actual layer input shapes are not the same as expected layer input shapes")
}
actualInputShape
}
def apply[@specialized(Float, Double) T: ClassTag](
layers: List[AbstractModule[Activity, Activity, T]] = null,
mode: String = "sum",
concatAxis: Int = -1,
inputShape: Shape = null)(implicit ev: TensorNumeric[T]): Merge[T] = {
val layersArray = if (layers != null) {
layers.toArray
} else null
new Merge[T](layersArray, mode, concatAxis, inputShape)
}
def merge[@specialized(Float, Double) T: ClassTag](
inputs: List[ModuleNode[T]],
mode: String = "sum",
concatAxis: Int = -1,
name: String = null)(implicit ev: TensorNumeric[T]): ModuleNode[T] = {
val mergeLayer = new Merge[T](mode = mode, concatAxis = concatAxis)
if (name != null) mergeLayer.setName(name)
mergeLayer.inputs(inputs.toArray)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/keras/layers/Merge.scala | Scala | apache-2.0 | 9,617 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.integrationtest.backend
import io.fabric8.kubernetes.client.DefaultKubernetesClient
import org.apache.spark.deploy.k8s.integrationtest.ProcessUtils
import org.apache.spark.deploy.k8s.integrationtest.TestConstants._
import org.apache.spark.deploy.k8s.integrationtest.backend.cloud.KubeConfigBackend
import org.apache.spark.deploy.k8s.integrationtest.backend.docker.DockerForDesktopBackend
import org.apache.spark.deploy.k8s.integrationtest.backend.minikube.MinikubeTestBackend
private[spark] trait IntegrationTestBackend {
def initialize(): Unit
def getKubernetesClient: DefaultKubernetesClient
def cleanUp(): Unit = {}
def describePods(labels: String): Seq[String] =
ProcessUtils.executeProcess(
Array("bash", "-c", s"kubectl describe pods --all-namespaces -l $labels"),
timeout = 60, dumpOutput = false).filter { !_.contains("https://github.com/kubernetes") }
}
private[spark] object IntegrationTestBackendFactory {
def getTestBackend: IntegrationTestBackend = {
val deployMode = Option(System.getProperty(CONFIG_KEY_DEPLOY_MODE))
.getOrElse(BACKEND_MINIKUBE)
deployMode match {
case BACKEND_MINIKUBE => MinikubeTestBackend
case BACKEND_CLOUD =>
new KubeConfigBackend(System.getProperty(CONFIG_KEY_KUBE_CONFIG_CONTEXT))
case BACKEND_DOCKER_FOR_DESKTOP => DockerForDesktopBackend
case _ => throw new IllegalArgumentException("Invalid " +
CONFIG_KEY_DEPLOY_MODE + ": " + deployMode)
}
}
}
| shaneknapp/spark | resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/backend/IntegrationTestBackend.scala | Scala | apache-2.0 | 2,316 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming.sources
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.streaming.{RateStreamOffset, Sink, StreamingQueryWrapper}
import org.apache.spark.sql.execution.streaming.continuous.ContinuousTrigger
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.{DataSourceRegister, StreamSinkProvider}
import org.apache.spark.sql.sources.v2._
import org.apache.spark.sql.sources.v2.reader.{InputPartition, PartitionReaderFactory, ScanConfig, ScanConfigBuilder}
import org.apache.spark.sql.sources.v2.reader.streaming._
import org.apache.spark.sql.sources.v2.writer.streaming.StreamingWriteSupport
import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery, StreamTest, Trigger}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
case class FakeReadSupport() extends MicroBatchReadSupport with ContinuousReadSupport {
override def deserializeOffset(json: String): Offset = RateStreamOffset(Map())
override def commit(end: Offset): Unit = {}
override def stop(): Unit = {}
override def mergeOffsets(offsets: Array[PartitionOffset]): Offset = RateStreamOffset(Map())
override def fullSchema(): StructType = StructType(Seq())
override def newScanConfigBuilder(start: Offset, end: Offset): ScanConfigBuilder = null
override def initialOffset(): Offset = RateStreamOffset(Map())
override def latestOffset(): Offset = RateStreamOffset(Map())
override def newScanConfigBuilder(start: Offset): ScanConfigBuilder = null
override def createReaderFactory(config: ScanConfig): PartitionReaderFactory = {
throw new IllegalStateException("fake source - cannot actually read")
}
override def createContinuousReaderFactory(
config: ScanConfig): ContinuousPartitionReaderFactory = {
throw new IllegalStateException("fake source - cannot actually read")
}
override def planInputPartitions(config: ScanConfig): Array[InputPartition] = {
throw new IllegalStateException("fake source - cannot actually read")
}
}
trait FakeMicroBatchReadSupportProvider extends MicroBatchReadSupportProvider {
override def createMicroBatchReadSupport(
checkpointLocation: String,
options: DataSourceOptions): MicroBatchReadSupport = {
LastReadOptions.options = options
FakeReadSupport()
}
}
trait FakeContinuousReadSupportProvider extends ContinuousReadSupportProvider {
override def createContinuousReadSupport(
checkpointLocation: String,
options: DataSourceOptions): ContinuousReadSupport = {
LastReadOptions.options = options
FakeReadSupport()
}
}
trait FakeStreamingWriteSupportProvider extends StreamingWriteSupportProvider {
override def createStreamingWriteSupport(
queryId: String,
schema: StructType,
mode: OutputMode,
options: DataSourceOptions): StreamingWriteSupport = {
LastWriteOptions.options = options
throw new IllegalStateException("fake sink - cannot actually write")
}
}
class FakeReadMicroBatchOnly
extends DataSourceRegister
with FakeMicroBatchReadSupportProvider
with SessionConfigSupport {
override def shortName(): String = "fake-read-microbatch-only"
override def keyPrefix: String = shortName()
}
class FakeReadContinuousOnly
extends DataSourceRegister
with FakeContinuousReadSupportProvider
with SessionConfigSupport {
override def shortName(): String = "fake-read-continuous-only"
override def keyPrefix: String = shortName()
}
class FakeReadBothModes extends DataSourceRegister
with FakeMicroBatchReadSupportProvider with FakeContinuousReadSupportProvider {
override def shortName(): String = "fake-read-microbatch-continuous"
}
class FakeReadNeitherMode extends DataSourceRegister {
override def shortName(): String = "fake-read-neither-mode"
}
class FakeWriteSupportProvider
extends DataSourceRegister
with FakeStreamingWriteSupportProvider
with SessionConfigSupport {
override def shortName(): String = "fake-write-microbatch-continuous"
override def keyPrefix: String = shortName()
}
class FakeNoWrite extends DataSourceRegister {
override def shortName(): String = "fake-write-neither-mode"
}
case class FakeWriteV1FallbackException() extends Exception
class FakeSink extends Sink {
override def addBatch(batchId: Long, data: DataFrame): Unit = {}
}
class FakeWriteSupportProviderV1Fallback extends DataSourceRegister
with FakeStreamingWriteSupportProvider with StreamSinkProvider {
override def createSink(
sqlContext: SQLContext,
parameters: Map[String, String],
partitionColumns: Seq[String],
outputMode: OutputMode): Sink = {
new FakeSink()
}
override def shortName(): String = "fake-write-v1-fallback"
}
object LastReadOptions {
var options: DataSourceOptions = _
def clear(): Unit = {
options = null
}
}
object LastWriteOptions {
var options: DataSourceOptions = _
def clear(): Unit = {
options = null
}
}
class StreamingDataSourceV2Suite extends StreamTest {
override def beforeAll(): Unit = {
super.beforeAll()
val fakeCheckpoint = Utils.createTempDir()
spark.conf.set("spark.sql.streaming.checkpointLocation", fakeCheckpoint.getCanonicalPath)
}
override def afterEach(): Unit = {
LastReadOptions.clear()
LastWriteOptions.clear()
}
val readFormats = Seq(
"fake-read-microbatch-only",
"fake-read-continuous-only",
"fake-read-microbatch-continuous",
"fake-read-neither-mode")
val writeFormats = Seq(
"fake-write-microbatch-continuous",
"fake-write-neither-mode")
val triggers = Seq(
Trigger.Once(),
Trigger.ProcessingTime(1000),
Trigger.Continuous(1000))
private def testPositiveCase(readFormat: String, writeFormat: String, trigger: Trigger): Unit = {
testPositiveCaseWithQuery(readFormat, writeFormat, trigger)(() => _)
}
private def testPositiveCaseWithQuery(
readFormat: String,
writeFormat: String,
trigger: Trigger)(check: StreamingQuery => Unit): Unit = {
val query = spark.readStream
.format(readFormat)
.load()
.writeStream
.format(writeFormat)
.trigger(trigger)
.start()
check(query)
query.stop()
}
private def testNegativeCase(
readFormat: String,
writeFormat: String,
trigger: Trigger,
errorMsg: String) = {
val ex = intercept[UnsupportedOperationException] {
testPositiveCase(readFormat, writeFormat, trigger)
}
assert(ex.getMessage.contains(errorMsg))
}
private def testPostCreationNegativeCase(
readFormat: String,
writeFormat: String,
trigger: Trigger,
errorMsg: String) = {
val query = spark.readStream
.format(readFormat)
.load()
.writeStream
.format(writeFormat)
.trigger(trigger)
.start()
eventually(timeout(streamingTimeout)) {
assert(query.exception.isDefined)
assert(query.exception.get.cause != null)
assert(query.exception.get.cause.getMessage.contains(errorMsg))
}
}
test("disabled v2 write") {
// Ensure the V2 path works normally and generates a V2 sink..
testPositiveCaseWithQuery(
"fake-read-microbatch-continuous", "fake-write-v1-fallback", Trigger.Once()) { v2Query =>
assert(v2Query.asInstanceOf[StreamingQueryWrapper].streamingQuery.sink
.isInstanceOf[FakeWriteSupportProviderV1Fallback])
}
// Ensure we create a V1 sink with the config. Note the config is a comma separated
// list, including other fake entries.
val fullSinkName = classOf[FakeWriteSupportProviderV1Fallback].getName
withSQLConf(SQLConf.DISABLED_V2_STREAMING_WRITERS.key -> s"a,b,c,test,$fullSinkName,d,e") {
testPositiveCaseWithQuery(
"fake-read-microbatch-continuous", "fake-write-v1-fallback", Trigger.Once()) { v1Query =>
assert(v1Query.asInstanceOf[StreamingQueryWrapper].streamingQuery.sink
.isInstanceOf[FakeSink])
}
}
}
Seq(
Tuple2(classOf[FakeReadMicroBatchOnly], Trigger.Once()),
Tuple2(classOf[FakeReadContinuousOnly], Trigger.Continuous(1000))
).foreach { case (source, trigger) =>
test(s"SPARK-25460: session options are respected in structured streaming sources - $source") {
// `keyPrefix` and `shortName` are the same in this test case
val readSource = source.newInstance().shortName()
val writeSource = "fake-write-microbatch-continuous"
val readOptionName = "optionA"
withSQLConf(s"spark.datasource.$readSource.$readOptionName" -> "true") {
testPositiveCaseWithQuery(readSource, writeSource, trigger) { _ =>
eventually(timeout(streamingTimeout)) {
// Write options should not be set.
assert(LastWriteOptions.options.getBoolean(readOptionName, false) == false)
assert(LastReadOptions.options.getBoolean(readOptionName, false) == true)
}
}
}
val writeOptionName = "optionB"
withSQLConf(s"spark.datasource.$writeSource.$writeOptionName" -> "true") {
testPositiveCaseWithQuery(readSource, writeSource, trigger) { _ =>
eventually(timeout(streamingTimeout)) {
// Read options should not be set.
assert(LastReadOptions.options.getBoolean(writeOptionName, false) == false)
assert(LastWriteOptions.options.getBoolean(writeOptionName, false) == true)
}
}
}
}
}
// Get a list of (read, write, trigger) tuples for test cases.
val cases = readFormats.flatMap { read =>
writeFormats.flatMap { write =>
triggers.map(t => (write, t))
}.map {
case (write, t) => (read, write, t)
}
}
for ((read, write, trigger) <- cases) {
testQuietly(s"stream with read format $read, write format $write, trigger $trigger") {
val readSource = DataSource.lookupDataSource(read, spark.sqlContext.conf).newInstance()
val writeSource = DataSource.lookupDataSource(write, spark.sqlContext.conf).newInstance()
(readSource, writeSource, trigger) match {
// Valid microbatch queries.
case (_: MicroBatchReadSupportProvider, _: StreamingWriteSupportProvider, t)
if !t.isInstanceOf[ContinuousTrigger] =>
testPositiveCase(read, write, trigger)
// Valid continuous queries.
case (_: ContinuousReadSupportProvider, _: StreamingWriteSupportProvider,
_: ContinuousTrigger) =>
testPositiveCase(read, write, trigger)
// Invalid - can't read at all
case (r, _, _)
if !r.isInstanceOf[MicroBatchReadSupportProvider]
&& !r.isInstanceOf[ContinuousReadSupportProvider] =>
testNegativeCase(read, write, trigger,
s"Data source $read does not support streamed reading")
// Invalid - can't write
case (_, w, _) if !w.isInstanceOf[StreamingWriteSupportProvider] =>
testNegativeCase(read, write, trigger,
s"Data source $write does not support streamed writing")
// Invalid - trigger is continuous but reader is not
case (r, _: StreamingWriteSupportProvider, _: ContinuousTrigger)
if !r.isInstanceOf[ContinuousReadSupportProvider] =>
testNegativeCase(read, write, trigger,
s"Data source $read does not support continuous processing")
// Invalid - trigger is microbatch but reader is not
case (r, _, t)
if !r.isInstanceOf[MicroBatchReadSupportProvider] &&
!t.isInstanceOf[ContinuousTrigger] =>
testPostCreationNegativeCase(read, write, trigger,
s"Data source $read does not support microbatch processing")
}
}
}
}
| michalsenkyr/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/sources/StreamingDataSourceV2Suite.scala | Scala | apache-2.0 | 12,607 |
package lightning.server
import scala.util.Properties
import scalaz.NonEmptyList
import scalaz.Validation
import lightning.configuration.loader.FallbackConfigurationLoader
object Server extends App {
private def loadConfiguration() = {
lazy val config: String = Properties.propOrNone("config").getOrElse {
val err =
s"""
|No config was specified so the system will shutdown.
|
|Please set system property config as follows:
|
| -Dconfig=local
|
|Where 'local' should reflect the configuration for your environment.
|
|Have a nice day.
""".stripMargin
println(err)
System.exit(1)
throw new RuntimeException(err)
}
Validation.fromTryCatchNonFatal(FallbackConfigurationLoader.load(config)).fold(shutdownErr, identity)
}
private def shutdownErr(err: Throwable): Nothing = {
err.printStackTrace()
die()
}
def shutdown(err: NonEmptyList[String]): Nothing = {
val errors: String = err.list.mkString("\\n")
println(errors)
die()
}
private def die(): Nothing = sys.exit(-1)
}
| lancewalton/lightning | server/src/main/scala/lightning/server/Server.scala | Scala | mit | 1,140 |
package com.tutorial.sparksql.DataSources
import com.tutorial.utils.SparkCommon
import org.apache.spark.sql.SQLContext
/**
* Created by ved on 29/1/16.
*/
object CsvFile {
val sc = SparkCommon.sparkContext
val sqlContext = SparkCommon.sparkSQLContext
def main(args: Array[String]) {
val sqlContext = new SQLContext(sc)
val df = sqlContext.read
.format("com.databricks.spark.csv")
.option("header", "true") // Use first line of all files as header
.option("inferSchema", "true") // Automatically infer data types
.load("src/main/resources/cars.csv")
df.show()
df.printSchema()
val selectedData = df.select("year", "model")
selectedData.write
.format("com.databricks.spark.csv")
.option("header", "true")
selectedData.show()
//.save(s"src/main/resources/${UUID.randomUUID()}")
//println("OK")
}
}
| rklick-solutions/spark-tutorial | src/main/scala/com/tutorial/sparksql/DataSources/CsvFile.scala | Scala | apache-2.0 | 892 |
package com.rayrobdod.boardGame
import org.scalatest.FunSpec
final class HorizontalHexagonalRoomTest extends FunSpec
with RoomTests
{
singleElementRoom("A Room containing a single space and no warps")(
idx = (0,0)
, unequalIndex = (1,1)
, clazz = " "
, generator = Room.horizontalHexagonalSpaceGenerator[String]
)
describe("A room with one space that is surrounded by warps") {
def targetSpaceFun(sc:String) = {() => new NoAdjacentsHorizHexSpace(sc)}
val field = HorizontalHexagonalRoom(
Map(
(0,0) -> "center"
),
Map(
(1,0) -> targetSpaceFun("east"),
(-1,0) -> targetSpaceFun("west"),
(0,-1) -> targetSpaceFun("northwest"),
(0,1) -> targetSpaceFun("southeast"),
(1,-1) -> targetSpaceFun("northeast"),
(-1,1) -> targetSpaceFun("southwest")
)
)
val center = field.space((0,0)).get
it ("center.northwest warps to northwest") {
assertResult("northwest"){center.northwest.get.typeOfSpace}
}
it ("center.southwest warps to southwest") {
assertResult("southwest"){center.southwest.get.typeOfSpace}
}
it ("center.northeast warps to northeast") {
assertResult("northeast"){center.northeast.get.typeOfSpace}
}
it ("center.southeast warps to southeast") {
assertResult("southeast"){center.southeast.get.typeOfSpace}
}
it ("center.east warps to east") {
assertResult("east"){center.east.get.typeOfSpace}
}
it ("center.west warps to west") {
assertResult("west"){center.west.get.typeOfSpace}
}
}
describe("A room with one space that is surrounded by local spaces") {
val field = HorizontalHexagonalRoom(
Map(
(0,0) -> "center",
(1,0) -> "east",
(-1,0) -> "west",
(0,-1) -> "northwest",
(0,1) -> "southeast",
(1,-1) -> "northeast",
(-1,1) -> "southwest"
),
Map.empty[RectangularIndex, Function0[HorizontalHexagonalSpace[String]]]
)
val center = field.space((0,0)).get
it ("center.northwest warps to northwest") {
assertResult("northwest"){center.northwest.get.typeOfSpace}
}
it ("center.southwest warps to southwest") {
assertResult("southwest"){center.southwest.get.typeOfSpace}
}
it ("center.northeast warps to northeast") {
assertResult("northeast"){center.northeast.get.typeOfSpace}
}
it ("center.southeast warps to southeast") {
assertResult("southeast"){center.southeast.get.typeOfSpace}
}
it ("center.east warps to east") {
assertResult("east"){center.east.get.typeOfSpace}
}
it ("center.west warps to west") {
assertResult("west"){center.west.get.typeOfSpace}
}
}
private final class NoAdjacentsHorizHexSpace(override val typeOfSpace:String) extends HorizontalHexagonalSpace[String] {
override def northwest:Option[Nothing] = None
override def northeast:Option[Nothing] = None
override def southwest:Option[Nothing] = None
override def southeast:Option[Nothing] = None
override def east:Option[Nothing] = None
override def west:Option[Nothing] = None
}
}
| rayrobdod/boardGame | Model/src/test/scala/HorizontalHexagonalRoomTest.scala | Scala | gpl-3.0 | 2,981 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.batch.table
import java.math.BigDecimal
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api.{TableEnvironment, Types}
import org.apache.flink.table.runtime.utils.JavaUserDefinedAggFunctions.{CountDistinctWithMergeAndReset, WeightedAvgWithMergeAndReset}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.functions.aggfunctions.CountAggFunction
import org.apache.flink.table.runtime.utils.TableProgramsCollectionTestBase
import org.apache.flink.table.runtime.utils.TableProgramsTestBase.TableConfigMode
import org.apache.flink.table.utils.{NonMergableCount, Top10}
import org.apache.flink.test.util.TestBaseUtils
import org.apache.flink.types.Row
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.JavaConverters._
import scala.collection.mutable
@RunWith(classOf[Parameterized])
class AggregationsITCase(
configMode: TableConfigMode)
extends TableProgramsCollectionTestBase(configMode) {
@Test
def testAggregationWithCaseClass(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val inputTable = CollectionDataSets.getSmallNestedTupleDataSet(env).toTable(tEnv, 'a, 'b)
tEnv.registerDataSet("MyTable", inputTable)
val result = tEnv.scan("MyTable")
.where('a.get("_1") > 0)
.select('a.get("_1").avg, 'a.get("_2").sum, 'b.count)
val expected = "2,6,3"
val results = result.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
.select('_1.sum, '_1.sum0, '_1.min, '_1.max, '_1.count, '_1.avg)
val results = t.toDataSet[Row].collect()
val expected = "231,231,1,21,21,11"
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testWorkingAggregationDataTypes(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, "Hello"),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, "Ciao")).toTable(tEnv)
.select('_1.avg, '_2.avg, '_3.avg, '_4.avg, '_5.avg, '_6.avg, '_7.count)
val expected = "1,1,1,1,1.5,1.5,2"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testProjection(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = env.fromElements(
(1: Byte, 1: Short),
(2: Byte, 2: Short)).toTable(tEnv)
.select('_1.avg, '_1.sum, '_1.count, '_2.avg, '_2.sum)
val expected = "1,3,2,1,3"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationWithArithmetic(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = env.fromElements((1f, "Hello"), (2f, "Ciao")).toTable(tEnv)
.select(('_1 + 2).avg + 2, '_2.count + 5)
val expected = "5.5,7"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationWithTwoCount(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = env.fromElements((1f, "Hello"), (2f, "Ciao")).toTable(tEnv)
.select('_1.count, '_2.count)
val expected = "2,2"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregationAfterProjection(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, "Hello"),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, "Ciao")).toTable(tEnv)
.select('_1, '_2, '_3)
.select('_1.avg, '_2.sum, '_3.count)
val expected = "1,3,2"
val result = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(result.asJava, expected)
}
@Test
def testSQLStyleAggregations(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.select(
"""Sum( a) as a1, a.sum as a2,
|Min (a) as b1, a.min as b2,
|Max (a ) as c1, a.max as c2,
|Avg ( a ) as d1, a.avg as d2,
|Count(a) as e1, a.count as e2
""".stripMargin)
val expected = "231,231,1,1,21,21,11,11,21,21"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testPojoAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val input = env.fromElements(
WC("hello", 1),
WC("hello", 1),
WC("ciao", 1),
WC("hola", 1),
WC("hola", 1))
val expr = input.toTable(tEnv)
val result = expr
.groupBy('word)
.select('word, 'frequency.sum as 'frequency)
.filter('frequency === 2)
.toDataSet[WC]
val mappedResult = result.map(w => (w.word, w.frequency * 10)).collect()
val expected = "(hello,20)\\n" + "(hola,20)"
TestBaseUtils.compareResultAsText(mappedResult.asJava, expected)
}
@Test
def testDistinct(): Unit = {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val ds = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
val distinct = ds.select('b).distinct()
val expected = "1\\n" + "2\\n" + "3\\n" + "4\\n" + "5\\n" + "6\\n"
val results = distinct.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testDistinctAfterAggregate(): Unit = {
val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val ds = CollectionDataSets.get5TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
val distinct = ds.groupBy('a, 'e).select('e).distinct()
val expected = "1\\n" + "2\\n" + "3\\n"
val results = distinct.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val countFun = new CountAggFunction
val wAvgFun = new WeightedAvgWithMergeAndReset
val countDistinct = new CountDistinctWithMergeAndReset
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, 'a.sum, countFun('c), wAvgFun('b, 'a), wAvgFun('a, 'a), countDistinct('c))
val expected = "1,1,1,1,1,1\\n" + "2,5,2,2,2,2\\n" + "3,15,3,3,5,3\\n" + "4,34,4,4,8,4\\n" +
"5,65,5,5,13,5\\n" + "6,111,6,6,18,6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupingKeyForwardIfNotUsed(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('a.sum)
val expected = "1\\n" + "5\\n" + "15\\n" + "34\\n" + "65\\n" + "111\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupNoAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env)
.toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('a.sum as 'd, 'b)
.groupBy('b, 'd)
.select('b)
val expected = "1\\n" + "2\\n" + "3\\n" + "4\\n" + "5\\n" + "6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAggregateEmptyDataSets(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val myAgg = new NonMergableCount
val t1 = env.fromCollection(new mutable.MutableList[(Int, String)]).toTable(tEnv, 'a, 'b)
.select('a.sum, 'a.count)
val t2 = env.fromCollection(new mutable.MutableList[(Int, String)]).toTable(tEnv, 'a, 'b)
.select('a.sum, myAgg('b), 'a.count)
val expected1 = "null,0"
val expected2 = "null,0,0"
val results1 = t1.toDataSet[Row].collect()
val results2 = t2.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results1.asJava, expected1)
TestBaseUtils.compareResultAsText(results2.asJava, expected2)
}
@Test
def testGroupedAggregateWithLongKeys(): Unit = {
// This uses very long keys to force serialized comparison.
// With short keys, the normalized key is sufficient.
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val ds = env.fromElements(
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhaa", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhab", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhab", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhab", 1, 2),
("hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhab", 1, 2))
.rebalance().setParallelism(2).toTable(tEnv, 'a, 'b, 'c)
.groupBy('a, 'b)
.select('c.sum)
val expected = "10\\n" + "8\\n"
val results = ds.collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregateWithConstant1(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.select('a, 4 as 'four, 'b)
.groupBy('four, 'a)
.select('four, 'b.sum)
val expected = "4,2\\n" + "4,3\\n" + "4,5\\n" + "4,5\\n" + "4,5\\n" + "4,6\\n" +
"4,6\\n" + "4,6\\n" + "4,3\\n" + "4,4\\n" + "4,6\\n" + "4,1\\n" + "4,4\\n" +
"4,4\\n" + "4,5\\n" + "4,6\\n" + "4,2\\n" + "4,3\\n" + "4,4\\n" + "4,5\\n" + "4,6\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregateWithConstant2(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.select('b, 4 as 'four, 'a)
.groupBy('b, 'four)
.select('four, 'a.sum)
val expected = "4,1\\n" + "4,5\\n" + "4,15\\n" + "4,34\\n" + "4,65\\n" + "4,111\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregateWithExpression(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get5TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c, 'd, 'e)
.groupBy('e, 'b % 3)
.select('c.min, 'e, 'a.avg, 'd.count)
val expected = "0,1,1,1\\n" + "3,2,3,3\\n" + "7,1,4,2\\n" + "14,2,5,1\\n" +
"5,3,4,2\\n" + "2,1,3,2\\n" + "1,2,3,3\\n" + "12,3,5,1"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testGroupedAggregateWithFilter(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, 'a.sum)
.where('b === 2)
val expected = "2,5\\n"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testAnalyticAggregation(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val ds = env.fromElements(
(1: Byte, 1: Short, 1, 1L, 1.0f, 1.0d, BigDecimal.ONE),
(2: Byte, 2: Short, 2, 2L, 2.0f, 2.0d, new BigDecimal(2))).toTable(tEnv)
val res = ds.select(
'_1.stddevPop, '_2.stddevPop, '_3.stddevPop, '_4.stddevPop, '_5.stddevPop,
'_6.stddevPop, '_7.stddevPop,
'_1.stddevSamp, '_2.stddevSamp, '_3.stddevSamp, '_4.stddevSamp, '_5.stddevSamp,
'_6.stddevSamp, '_7.stddevSamp,
'_1.varPop, '_2.varPop, '_3.varPop, '_4.varPop, '_5.varPop,
'_6.varPop, '_7.varPop,
'_1.varSamp, '_2.varSamp, '_3.varSamp, '_4.varSamp, '_5.varSamp,
'_6.varSamp, '_7.varSamp)
val expected =
"0,0,0," +
"0,0.5,0.5,0.5," +
"1,1,1," +
"1,0.70710677,0.7071067811865476,0.7071067811865476," +
"0,0,0," +
"0,0.25,0.25,0.25," +
"1,1,1," +
"1,0.5,0.5,0.5"
val results = res.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testComplexAggregate(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val top10Fun = new Top10
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, top10Fun('b.cast(Types.INT), 'a.cast(Types.FLOAT)))
val expected =
"1,[(1,1.0), null, null, null, null, null, null, null, null, null]\\n" +
"2,[(2,3.0), (2,2.0), null, null, null, null, null, null, null, null]\\n" +
"3,[(3,6.0), (3,5.0), (3,4.0), null, null, null, null, null, null, null]\\n" +
"4,[(4,10.0), (4,9.0), (4,8.0), (4,7.0), null, null, null, null, null, null]\\n" +
"5,[(5,15.0), (5,14.0), (5,13.0), (5,12.0), (5,11.0), null, null, null, null, null]\\n" +
"6,[(6,21.0), (6,20.0), (6,19.0), (6,18.0), (6,17.0), (6,16.0), null, null, null, null]"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
@Test
def testCollect(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env, config)
val t = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv, 'a, 'b, 'c)
.groupBy('b)
.select('b, 'a.collect)
val expected =
"1,{1=1}\\n2,{2=1, 3=1}\\n3,{4=1, 5=1, 6=1}\\n4,{8=1, 9=1, 10=1, 7=1}\\n" +
"5,{11=1, 12=1, 13=1, 14=1, 15=1}\\n6,{16=1, 17=1, 18=1, 19=1, 20=1, 21=1}"
val results = t.toDataSet[Row].collect()
TestBaseUtils.compareResultAsText(results.asJava, expected)
}
}
case class WC(word: String, frequency: Long)
| mylog00/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/batch/table/AggregateITCase.scala | Scala | apache-2.0 | 16,814 |
package mesosphere.marathon
package core.storage.backup
import akka.Done
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import ch.qos.logback.classic.{ Level, Logger }
import com.typesafe.scalalogging.StrictLogging
import kamon.Kamon
import mesosphere.marathon.core.base.LifecycleState
import mesosphere.marathon.storage.{ StorageConf, StorageModule }
import org.rogach.scallop.ScallopConf
import org.slf4j.LoggerFactory
import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, Future }
import scala.util.control.NonFatal
/**
* Base class for backup and restore command line utility.
*/
abstract class BackupRestoreAction extends StrictLogging {
class BackupConfig(args: Seq[String]) extends ScallopConf(args) with StorageConf with NetworkConf {
override def availableFeatures: Set[String] = Set.empty
verify()
require(backupLocation.isDefined, "--backup_location needs to be defined!")
}
/**
* Can either run a backup or restore operation.
*/
@SuppressWarnings(Array("AsInstanceOf"))
def action(conf: BackupConfig, fn: PersistentStoreBackup => Future[Done]): Unit = {
Kamon.start()
implicit val system = ActorSystem("Backup")
implicit val materializer = ActorMaterializer()
implicit val scheduler = system.scheduler
import scala.concurrent.ExecutionContext.Implicits.global
try {
val storageModule = StorageModule(conf, LifecycleState.WatchingJVM)
storageModule.persistenceStore.markOpen()
val backup = storageModule.persistentStoreBackup
Await.result(fn(backup), Duration.Inf)
storageModule.persistenceStore.markClosed()
logger.info("Action complete.")
} catch {
case NonFatal(ex) =>
logger.error(s"Error: ${ex.getMessage}", ex)
sys.exit(1) // signal a problem to the caller
} finally {
Await.result(Http().shutdownAllConnectionPools(), Duration.Inf)
Kamon.shutdown()
// akka http has an issue tearing down the connection pool: https://github.com/akka/akka-http/issues/907
// We will hide the fail message from the user until this is fixed
LoggerFactory.getLogger("akka.actor.ActorSystemImpl").asInstanceOf[Logger].setLevel(Level.OFF)
materializer.shutdown()
Await.ready(system.terminate(), Duration.Inf)
sys.exit(0)
}
}
}
/**
* Command line utility to backup the current Marathon state to an external storage location.
*
* Please note: if you start Marathon with a backup location, it will automatically create a backup,
* for every new Marathon version, before it runs a migration.
* This is the preferred way to handle upgrades.
*
* Snapshot backups can be created at all time.
*
* There are several command line parameters to define the exact behaviour and location.
* Please use --help to see all command line parameters
*/
object Backup extends BackupRestoreAction {
def main(args: Array[String]): Unit = {
val config = new BackupConfig(args.toVector)
action(config, _.backup(config.backupLocation()))
}
}
/**
* Command line utility to restore a Marathon state from an external storage location.
*
* Please note: restoring a backup will overwrite all existing data in the store.
* All changes that were applied between the creation of this snapshot to the current state will be lost!
*
* There are several command line parameters to define the exact behaviour and location.
* Please use --help to see all command line parameters
*/
object Restore extends BackupRestoreAction {
def main(args: Array[String]): Unit = {
val config = new BackupConfig(args.toVector)
action(config, _.restore(config.backupLocation()))
}
}
| guenter/marathon | src/main/scala/mesosphere/marathon/core/storage/backup/Backup.scala | Scala | apache-2.0 | 3,741 |
package sangria.validation.rules
import sangria.ast
import sangria.ast.AstVisitorCommand._
import sangria.renderer.{QueryRenderer, SchemaRenderer}
import sangria.schema.LeafType
import sangria.validation._
import scala.language.postfixOps
/**
* Variables are input types
*
* A GraphQL operation is only valid if all the variables it defines are of
* input types (scalar, enum, or input object).
*/
class VariablesAreInputTypes extends ValidationRule {
override def visitor(ctx: ValidationContext) = new AstValidatingVisitor {
override val onEnter: ValidationVisit = {
case ast.VariableDefinition(name, tpe, _, pos) =>
ctx.schema.getInputType(tpe) match {
case Some(_) => Right(Continue)
case None => Left(Vector(
NonInputTypeOnVarViolation(name, QueryRenderer.render(tpe), ctx.sourceMapper, tpe.position.toList)))
}
}
}
} | narahari92/sangria | src/main/scala/sangria/validation/rules/VariablesAreInputTypes.scala | Scala | apache-2.0 | 895 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert
package network
package netty
import java.util.concurrent.Executors
import org.jboss.netty.bootstrap.ServerBootstrap
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory
import org.jboss.netty.handler.logging.LoggingHandler
import org.jboss.netty.handler.codec.frame.{LengthFieldBasedFrameDecoder, LengthFieldPrepender}
import org.jboss.netty.handler.codec.protobuf.{ProtobufDecoder, ProtobufEncoder}
import org.jboss.netty.channel.group.DefaultChannelGroup
import server._
import cluster.{ClusterClient, ClusterClientComponent}
import protos.NorbertProtos
import norbertutils.NamedPoolThreadFactory
import org.jboss.netty.channel.{Channels, ChannelPipelineFactory}
import scala.beans.BeanProperty
class NetworkServerConfig {
@BeanProperty var clusterClient: ClusterClient = _
@BeanProperty var serviceName: String = _
@BeanProperty var zooKeeperConnectString: String = _
@BeanProperty var zooKeeperSessionTimeoutMillis = 30000
@BeanProperty var requestTimeoutMillis = NetworkDefaults.REQUEST_TIMEOUT_MILLIS
@BeanProperty var responseGenerationTimeoutMillis = -1//turned off by default
@BeanProperty var requestThreadCorePoolSize = NetworkDefaults.REQUEST_THREAD_CORE_POOL_SIZE
@BeanProperty var requestThreadMaxPoolSize = NetworkDefaults.REQUEST_THREAD_MAX_POOL_SIZE
@BeanProperty var requestThreadKeepAliveTimeSecs = NetworkDefaults.REQUEST_THREAD_KEEP_ALIVE_TIME_SECS
@BeanProperty var threadPoolQueueSize = NetworkDefaults.REQUEST_THREAD_POOL_QUEUE_SIZE
@BeanProperty var requestStatisticsWindow = NetworkDefaults.REQUEST_STATISTICS_WINDOW
@BeanProperty var avoidByteStringCopy = NetworkDefaults.AVOID_BYTESTRING_COPY
@BeanProperty var shutdownPauseMultiplier = NetworkDefaults.SHUTDOWN_PAUSE_MULTIPLIER
}
class NettyNetworkServer(serverConfig: NetworkServerConfig) extends NetworkServer with ClusterClientComponent with NettyClusterIoServerComponent
with MessageHandlerRegistryComponent with MessageExecutorComponent {
val clusterClient = if (serverConfig.clusterClient != null) serverConfig.clusterClient else ClusterClient(serverConfig.serviceName, serverConfig.zooKeeperConnectString,
serverConfig.zooKeeperSessionTimeoutMillis)
val messageHandlerRegistry = new MessageHandlerRegistry
val messageExecutor = new ThreadPoolMessageExecutor(clientName = clusterClient.clientName,
serviceName = clusterClient.serviceName,
messageHandlerRegistry = messageHandlerRegistry,
requestTimeout = serverConfig.requestTimeoutMillis,
corePoolSize = serverConfig.requestThreadCorePoolSize,
maxPoolSize = serverConfig.requestThreadMaxPoolSize,
keepAliveTime = serverConfig.requestThreadKeepAliveTimeSecs,
maxWaitingQueueSize = serverConfig.threadPoolQueueSize,
requestStatisticsWindow = serverConfig.requestStatisticsWindow,
responseGenerationTimeoutMillis = serverConfig.responseGenerationTimeoutMillis)
val executor = Executors.newCachedThreadPool(new NamedPoolThreadFactory("norbert-server-pool-%s".format(clusterClient.serviceName)))
val bootstrap = new ServerBootstrap(new NioServerSocketChannelFactory(executor, executor))
val channelGroup = new DefaultChannelGroup("norbert-server-group-%s".format(clusterClient.serviceName))
val requestContextEncoder = new RequestContextEncoder()
bootstrap.setOption("reuseAddress", true)
bootstrap.setOption("tcpNoDelay", true)
bootstrap.setOption("child.tcpNoDelay", true)
bootstrap.setOption("child.reuseAddress", true)
val serverFilterChannelHandler = new ServerFilterChannelHandler(messageExecutor)
val serverChannelHandler = new ServerChannelHandler(
clientName = clusterClient.clientName,
serviceName = clusterClient.serviceName,
channelGroup = channelGroup,
messageHandlerRegistry = messageHandlerRegistry,
messageExecutor = messageExecutor,
requestStatisticsWindow = serverConfig.requestStatisticsWindow,
avoidByteStringCopy = serverConfig.avoidByteStringCopy)
bootstrap.setPipelineFactory(new ChannelPipelineFactory {
val loggingHandler = new LoggingHandler
val protobufDecoder = new ProtobufDecoder(NorbertProtos.NorbertMessage.getDefaultInstance)
val requestContextDecoder = new RequestContextDecoder
val frameEncoder = new LengthFieldPrepender(4)
val protobufEncoder = new ProtobufEncoder
val handler = serverChannelHandler
def getPipeline = {
val p = Channels.pipeline
if (log.debugEnabled) p.addFirst("logging", loggingHandler)
p.addLast("frameDecoder", new LengthFieldBasedFrameDecoder(Int.MaxValue, 0, 4, 0, 4))
p.addLast("protobufDecoder", protobufDecoder)
p.addLast("frameEncoder", frameEncoder)
p.addLast("protobufEncoder", protobufEncoder)
p.addLast("requestContextDecoder", requestContextDecoder)
p.addLast("requestContextEncoder", requestContextEncoder)
p.addLast("requestFilterHandler", serverFilterChannelHandler)
p.addLast("requestHandler", handler)
p
}
})
def setRequestTimeoutMillis(newValue : Long) = {
messageExecutor.setRequestTimeout(newValue)
}
val clusterIoServer = new NettyClusterIoServer(bootstrap, channelGroup)
override def shutdown = {
if (serverConfig.shutdownPauseMultiplier > 0)
{
markUnavailable
Thread.sleep(serverConfig.shutdownPauseMultiplier * serverConfig.zooKeeperSessionTimeoutMillis)
}
if (serverConfig.clusterClient == null) clusterClient.shutdown else super.shutdown
//change the sequence so that we do not accept any more connections from clients
//are existing connections could feed us new norbert messages
serverChannelHandler.shutdown
messageExecutor.shutdown
// requestContextEncoder.shutdown
}
}
| nickhristov/norbert | network/src/main/scala/com/linkedin/norbert/network/netty/NettyNetworkServer.scala | Scala | apache-2.0 | 6,797 |
/*
* Copyright 2017 Zhang Di
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dizhang.seqspark.assoc
import breeze.numerics._
import breeze.linalg.{CSCMatrix => CM, DenseMatrix => DM, DenseVector => DV, _}
import breeze.numerics.{lgamma, pow}
import breeze.stats.distributions.ChiSquared
import org.dizhang.seqspark.assoc.SKATO._
import org.dizhang.seqspark.stat.HypoTest.{NullModel => NM}
import org.dizhang.seqspark.stat._
import org.dizhang.seqspark.util.General._
import org.dizhang.seqspark.numerics.Integrate
import org.slf4j.LoggerFactory
import scala.language.existentials
/**
* optimal SKAT test
*/
object SKATO {
val logger = LoggerFactory.getLogger(getClass)
val RhosOld = (0 to 9).map(x => x * 1.0/10.0).toArray :+ 0.999
val RhosAdj = Array(0.0, 0.01, 0.04, 0.09, 0.16, 0.25, 0.5, 0.999)
/** the GK integration size */
val GKSize: Int = 21
def apply(nullModel: NM,
x: Encode.Coding,
method: String): SKATO = {
val nmf = nullModel match {
case NM.Simple(y, b) => NM.Fit(y, b)
case NM.Mutiple(y, c, b) => NM.Fit(y, c, b)
case nm: NM.Fitted => nm
}
method match {
case "liu"|"liu.mod"|"optimal.moment"|"optimal.moment.adj" =>
LiuModified(nmf, x.asInstanceOf[Encode.Rare], method)
case _ =>
Davies(nmf, x.asInstanceOf[Encode.Rare], method)
}
}
def getParameters(p0sqrtZ: DM[Double],
rs: Array[Double],
pi: Option[DV[Double]] = None,
resampled: Option[DM[Double]] = None) : Option[Parameters] = {
val numSamples = p0sqrtZ.rows
val numVars = p0sqrtZ.cols
val meanZ: DV[Double] = sum(p0sqrtZ(*, ::))/numVars.toDouble
val meanZmat: DM[Double] = DM.zeros[Double](numSamples, numVars)
meanZmat(::, *) := meanZ
//val cof1 = (meanZ.t * meanZmat).t/sum(pow(meanZ, 2))
val cof1 = (meanZ.t * p0sqrtZ).t / sum(pow(meanZ, 2))
val iterm1 = meanZmat * diag(cof1)
val iterm2 = p0sqrtZ - iterm1
/**
* w3 is the mixture chisq term
* */
val w3 = iterm2.t * iterm2
val varZeta = sum((iterm1.t * iterm1) *:* w3) * 4
val moments: Option[(Double, Double, Double, DV[Double])] =
if (resampled.isEmpty) {
SKAT.getLambda(w3).map{l =>
val m = sum(l)
val v = 2 * sum(pow(l, 2)) + varZeta
val k = sum(pow(l, 4))/sum(pow(l, 2)).square * 12
(m,v,k, l)
}
} else {
SKAT.getLambdaU(w3).map{
case (l, u) =>
val m = sum(l)
val qTmp = pow(resampled.get * iterm2, 2)
val qs = sum(qTmp(*, ::))
val dis = new LCCSResampling(l, u, pi.get, qs)
(m, dis.varQ + varZeta, dis.kurQ, l)
}
}
moments.map{
case (m, v, k, l) =>
val sumCof2 = sum(pow(cof1, 2))
val meanZ2 = sum(pow(meanZ, 2))
lazy val taus = rs.map{r =>
meanZ2 * (numVars.toDouble.square * r + (1 - r) * sumCof2)}
Parameters(m, v, k, l, varZeta, taus)
}
}
/**
* Why Not use the Kurtosis in the LCCSResampling???
*
* Compute the Kurtosis for the generalized SKAT statistic Q(rho)
* (1 - rho) * kappa + tau(rho) * eta
*
* kappa ~ LCCS(lambda, df = Ones) + Zeta
* eta ~ ChiSquared(df = 1)
* */
def getKurtosis(df1: Double, // df of kappa
df2: Double, // df of eta, usually 1.0
v1: Double, // varQ = Var(Qs) + Var(Zeta)
a1: Double, // 1 - rho(i)
a2: Double // tau(i)
): Double = {
val v2 = 2 * df2
val s41 = (12/df1 + 3) * v1.square
val s42 = (12/df2 + 3) * v2.square
val s4 = pow(a1, 4) * s41 + pow(a2, 4) * s42 + 6 * a1.square * a2.square * v1 * v2
val s2 = a1.square * v1 + a2.square * v2
val k = s4/s2.square - 3
if (k < 0) 0.0001 else k
}
@SerialVersionUID(303L)
case class Parameters(muQ: Double,
varQ: Double,
kurQ: Double,
lambda: DV[Double],
varZeta: Double,
taus: Array[Double]) extends Serializable {
def df: Double = 12.0/kurQ
override def toString: String = {
s"""muQ: $muQ
|varQ: $varQ
|kurQ: $kurQ
|lambda: ${lambda.toArray.mkString(",")}
|varZeta: $varZeta
|taus: ${taus.mkString(",")}
""".stripMargin
}
}
final case class Moments(muQ: Double, varQ: Double, df: Double)
trait AsymptoticKur extends SKATO {
lazy val paramOpt = getParameters(P0SqrtZ, rhos)
lazy val lambdasOpt: Option[Array[DV[Double]]] = {
val res = vcs.map(vc => SKAT.getLambda(vc))
if (res.exists(_.isEmpty)) {
None
} else {
Some(res.map(_.get))
}
}
def isDefined = paramOpt.isDefined && lambdasOpt.isDefined
def lambdas = lambdasOpt.get
def pValues = {
method match {
case "optimal.mod"|"optimal.adj"|"optimal.moment.adj" =>
lambdas.zip(qScores).map{
case (l, q) =>
val cdf = LCCSDavies.Simple(l).cdf(q)
if (cdf.pvalue >= 1.0 || cdf.pvalue <= 0.0) {
1.0 - LCCSLiu.Modified(l).cdf(q).pvalue
} else {
1.0 - cdf.pvalue
}
}
case _ =>
lambdas.zip(qScores).map{case (l, q) =>
1.0 - LCCSLiu.Modified(l).cdf(q).pvalue
}
}
}
def pMinQuantiles = {
lambdas.map{lb =>
val lm = LCCSLiu.Modified(lb)
val df = lm.df
val chiDis = new ChiSquared(df)
val qOrig = chiDis.inverseCdf(1.0 - pMin)
(qOrig - df)/(2 * df).sqrt * lm.sigmaQ + lm.muQ
}
}
}
@SerialVersionUID(7727760101L)
case class Davies(nullModel: NM.Fitted,
x: Encode.Rare,
method: String) extends SKATO with AsymptoticKur {
def integrand(x: DV[Double]): DV[Double] = {
require(x.length == GKSize)
val tmp = (pmqDM - (tauDM(*, ::) *:* x)) /:/ rhoDM
val kappa = min(tmp(::, *)).t
val F = kappa.map{k =>
if (k > sum(param.lambda) * 1e4) {
1.0
} else {
val cutoff = (k - param.muQ) * (param.varQ - param.varZeta).sqrt/param.varQ.sqrt + param.muQ
LCCSDavies.Simple(param.lambda).cdf(cutoff).pvalue
}
}
//logger.info(s"F: $F")
F *:* df1pdf(x)
}
}
@SerialVersionUID(7727760201L)
trait LiuPValue extends SKATO {
/**
def integralFunc(x: Double): Double = {
val tmp1 = tauDV * x
val tmp = (pmqDV - tmp1) :/ rDV
val tmpMin = min(tmp)
val tmpQ = (tmpMin - param.muQ)/param.varQ.sqrt * (2 * df).sqrt + df
term1.cdf(tmpQ) * term2.pdf(x)
}
*/
def integrand(x: DV[Double]): DV[Double] = {
require(x.length == GKSize)
val tmp = (pmqDM - (tauDM(*, ::) *:* x)) /:/ rhoDM
val kappa = min(tmp(::, *)).t
//val cutoff = (kappa - param.muQ) * (param.varQ - param.varZeta).sqrt/param.varQ.sqrt + param.muQ
val cutoff = (kappa - param.muQ)/param.varQ.sqrt * (2 * df).sqrt + df
//logger.debug(s"x: $x")
//logger.debug(s"tmpMin: $tmpMin")
//logger.debug(s"tmpQ: $tmpQ")
/**
if (tmpQ.exists(_.isInfinity)) {
(s"x:${x.toArray.mkString(",")}\n" +
s"lambdas: ${lambdas.map(_.toArray.mkString(",")).mkString("\n")}\n" +
s"lambdas2: ${lambdas2.map(_.toArray.mkString(",")).mkString("\n")}\n" +
s"qscores: ${qScores.mkString(",")}\n" +
s"pvalues: ${pValues.mkString(",")}\n" +
s"pmqDV: ${pmqDV.toArray.mkString(",")}\n" +
s"rDV: ${rDV.toArray.mkString(",")}\n" +
s"tauDV: ${tauDV.toArray.mkString(",")}\n" +
s"tmpMin: ${tmpMin.toArray.mkString(",")}\n" +
s"param: ${param.toString}\n" +
s"tmpQ: ${tmpQ.toArray.mkString(",")}\n").toDouble
}
*/
val res = dfcdf(cutoff.map(q => if (q < 0) 0.0 else q)) *:* df1pdf(x)
//logger.debug(s"res: $res")
res
}
}
@SerialVersionUID(7727760301L)
case class LiuModified(nullModel: NM.Fitted,
x: Encode.Rare,
method: String)
extends LiuPValue with AsymptoticKur
case class SmallSampleAdjust(nullModel: NM.Fitted,
x: Encode.Rare,
resampled: DM[Double],
method: String)
extends LiuPValue
{
lazy val paramOpt = getParameters(P0SqrtZ, rhos, Some(nullModel.b), Some(resampled))
lazy val lambdasUsOpt = {
val res = vcs.map { vc => SKAT.getLambdaU(vc) }
if (res.exists(_.isEmpty)) {
None
} else {
Some((res.map(_.get._1), res.map(_.get._2)))
}
}
def isDefined = paramOpt.isDefined && lambdasUsOpt.isDefined
lazy val lambdas = lambdasUsOpt.get._1
lazy val us = lambdasUsOpt.get._2
lazy val simScores = resampled * geno
lazy val pValues = {
rhos.indices.map{i =>
val simQs = simScores(*, ::).map(s => s.t * kernels(i) * s)
1.0 - new LCCSResampling(lambdas(i), us(i), nullModel.b, simQs).cdf(qScores(i)).pvalue
}.toArray
}
lazy val pMinQuantiles = {
rhos.indices.map{i =>
val varRho = (1 - rhos(i)).sqrt * param.varQ + param.taus(i).sqrt * 2
val kurRho = getKurtosis(param.df, 1.0, param.varQ, 1 - rhos(i), param.taus(i))
val dfRho = 12.0/kurRho
val qOrig = new ChiSquared(dfRho).inverseCdf(1 - pMin)
(qOrig - dfRho)/(2 * dfRho).sqrt * varRho.sqrt + sum(lambdas(i))
}.toArray
}
}
}
@SerialVersionUID(7727760001L)
trait SKATO extends AssocMethod with AssocMethod.AnalyticTest {
def nullModel: NM.Fitted
def x: Encode.Rare
def geno: CM[Double] = x.coding
def numVars: Int = x.coding.cols
//lazy val misc = x.config.misc
def method: String
lazy val rhos: Array[Double] = {
method match {
case "optimal.adj" => RhosAdj
case "optimal" => RhosOld
case _ => RhosAdj
//case _ => misc.rCorr
}
}
lazy val P0SqrtZ: DM[Double] = {
val z: CM[Double] = geno
val nm = nullModel
val sigma = sqrt(nm.b)
val xsInfoInv = (nm.xs(::, *) *:* sigma) * nm.invInfo * nm.a
(- xsInfoInv * (nm.xs.t * colMultiply(z, nm.b)) + colMultiply(z, sigma)) / sqrt(nm.a)
}
def paramOpt: Option[Parameters]
def param: Parameters = paramOpt.get
def df = param.df
lazy val scoreTest: ScoreTest = ScoreTest(nullModel, geno)
lazy val score = scoreTest.score
lazy val kernels: Array[DM[Double]] = {
val i = DM.eye[Double](numVars)
val o = DM.ones[Double](numVars, numVars)
rhos.map(r => (1 - r) * i + r * o)
}
lazy val LTs: Array[DM[Double]] = kernels.map(x => cholesky(x))
lazy val qScores: Array[Double] = kernels.map(k => score.t * k * score)
lazy val P0Z = P0SqrtZ.t * P0SqrtZ
lazy val vcs = LTs.map(lt => lt.t * P0Z * lt)
/**
lazy val vcs2 = kernels.map{k =>
ScoreTest(nullModel.STNullModel, geno * cholesky(k).t).variance
}
lazy val lambdas2 = vcs2.map(vc =>
eigSym.justEigenvalues(vc)
)
*/
def isDefined: Boolean
def pValues: Array[Double]
def pMin = min(pValues)
def lambdas: Array[DV[Double]]
def pMinQuantiles: Array[Double]
def result: AssocMethod.SKATOResult = {
if (isDefined) {
try {
val res = Integrate(integrand, 0.0, 40.0, 1e-25, 1e-4, 200)
val info = s"pvalues=${pValues.mkString(",")};abserr=${res.abserr};ier=${res.iEr};nsub=${res.nSub};neval=${res.nEval}"
AssocMethod.SKATOResult(x.vars, Some(pMin), Some(1 - res.value), info)
} catch {
case e: Exception =>
val info = s"InegrationError;pvalues=${pValues.mkString(",")}"
AssocMethod.SKATOResult(x.vars, Some(pMin), None, info)
}
} else {
AssocMethod.SKATOResult(x.vars, None, None, "failed to get the p values")
}
}
/**
* help variables for the integrand function
* basically
*
* */
//lazy val term1 = new ChiSquared(df)
//lazy val term2 = new ChiSquared(1)
//lazy val tauDV = DV(param.taus)
//lazy val pmqDV = DV(pMinQuantiles)
//lazy val rDV = DV(rhos.map(1.0 - _))
lazy val tauDM = tile(DV(param.taus), 1, GKSize)
lazy val pmqDM = tile(DV(pMinQuantiles), 1, GKSize)
lazy val rhoDM = tile(DV(rhos.map(1.0 - _)), 1, GKSize)
def df1pdf(x: DV[Double]): DV[Double] = {
(pow(x, -0.5) *:* exp(-x/2.0))/(2.sqrt * exp(lgamma(0.5)))
}
def dfcdf(x: DV[Double]): DV[Double] = {
try {
gammp(df/2, x/2.0)
} catch {
case e: Exception =>
val xs = x.toArray.mkString(",")
println(s"error: param: ${param.toString}")
DV[Double](s"param: ${param.toString}".toDouble)
}
}
def integrand(x: DV[Double]): DV[Double]
/** adaptive pvalue
* use the quadpack QAGS now
def pValue: Option[Double] = {
if (isDefined) {
val res = Integrate(integrand, 0.0, 40.0, 1e-25, 1e-6, 200)
if (res.iEr == 0) {
Some(1.0 - res.value)
} else {
None
}
} else {
None
}
}
*/
}
| statgenetics/seqspark | src/main/scala/org/dizhang/seqspark/assoc/SKATO.scala | Scala | apache-2.0 | 13,746 |
package pimpathon
import scala.language.{dynamics, higherKinds, implicitConversions}
import _root_.argonaut.Json.{jFalse, jNull, jString, jTrue}
import _root_.argonaut.JsonObjectMonocle.{jObjectEach, jObjectFilterIndex}
import _root_.argonaut.JsonMonocle.{jArrayPrism, jObjectPrism}
import _root_.argonaut.{CodecJson, DecodeJson, DecodeResult, EncodeJson, HCursor, Json, JsonMonocle, JsonNumber, JsonObject, JsonParser, Parse, PrettyParams}
import _root_.java.io.File
import _root_.scalaz.{Applicative, \/}
import io.gatling.jsonpath.AST._
import io.gatling.jsonpath._
import monocle.function.Each.{each, listEach}
import monocle.function.FilterIndex.{filterIndex, listFilterIndex}
import monocle.{Iso, Optional, Prism, Traversal}
import pimpathon.any.AnyPimps
import pimpathon.boolean.BooleanPimps
import pimpathon.file.FilePimps
import pimpathon.function.{Predicate, PredicatePimps}
import pimpathon.list.ListOfTuple2Pimps
import pimpathon.map.MapPimps
import pimpathon.string.StringPimps
import scala.{PartialFunction ⇒ ~>}
import scala.collection.immutable.{ListMap, Map ⇒ ▶:}
import scala.util.matching.Regex
import pimpathon.either._
import pimpathon.list._
import scala.util.Try
object argonaut {
private[pimpathon] implicit class RegexMatcher(val self: StringContext) extends AnyVal {
def r: Regex = self.parts.mkString("(.+)").r
}
implicit class JsonCompanionFrils(val self: Json.type) extends AnyVal {
def fromProperties(properties: Map[String, String]): Either[(String, String), Json] = {
properties.apoFold[Json, (String, String)](Json.jEmptyObject) {
case (acc, (key, value)) => for {
json <- JsonParser.parse(value).leftMap(key -> _)
} yield acc.append(key.split("\\.").toList.emptyTo(List(key)), json)
}
}
def readFrom(file: File): Option[Json] = for {
content <- if (file.exists()) Some(file.readString) else None
json <- Parse.parse(content) match {
case Left(_) ⇒ None
case Right(json) ⇒ Some(json)
}
} yield json
}
implicit class JsonFrills(val self: Json) extends AnyVal {
def descendant: Descendant[Json, Json, Json] =
Descendant(self, List(Traversal.id[Json]), () ⇒ List("" -> Traversal.id[Json]))
def descendant(paths: String*): Descendant[Json, Json, Json] = Descendant(self,
paths.map(Descendant.Descender.traversal)(collection.breakOut),
() ⇒ paths.flatMap(Descendant.Descender.ancestors)(collection.breakOut)
)
def compact: Json = filterNulls
def filterNulls: Json = filterR(_ != jNull)
def filterKeys(p: Predicate[String]): Json = self.withObject(_.filterKeys(p))
def filterValues(p: Predicate[Json]): Json = self.withObject(_.filterValues(p))
def renameField(from: String, to: String): Json = self.withObject(_.renameField(from, to))
def renameFields(fromTos: (String, String)*): Json = self.withObject(_.renameFields(fromTos: _*))
def addIfMissing(name: String, value: Json): Json = self.withObject(_.addIfMissing(name, value))
def addIfMissing(assocs: Json.JsonAssoc*): Json = self.withObject(_.addIfMissing(assocs: _*))
def removeIfPresent(name: String, value: Json): Json = self.withObject(_.removeIfPresent(name, value))
def removeIfPresent(assocs: Json.JsonAssoc*): Json = self.withObject(_.removeIfPresent(assocs: _*))
def removeFields(names: String*): Json = self.withObject(_.removeFields(names: _*))
// def delete(path: String): Json = {
// path.split("/").toList.reverse match {
// case head :: Nil ⇒ descendant("").obj.delete(head)
// case head :: tail ⇒ descendant(tail.reverse.mkString("/")).obj.delete(head)
// case _ ⇒ Json.jNull
// }
// }
def filterR(p: Predicate[Json]): Json =
p.cond(self.withObject(_.filterR(p)).withArray(_.filterR(p)), jNull)(self)
def writeTo(file: File): Json =
self.tap(_ ⇒ file.writeString(indent2))
def indent2: String =
PrettyParams.spaces2.copy(preserveOrder = true).pretty(self)
def append(keys: List[String], value: Json): Json = keys match {
case Nil => self
case ArrayIndex(head, _) :: Nil ⇒ self.withObject(obj ⇒ {
obj + (head, obj(head).fold(Json.jArrayElements(value))(_.withArray(arr ⇒ arr :+ value)))
})
case head :: Nil => self.withObject(obj ⇒ obj + (head, value))
case head :: tail => self.withObject(obj ⇒ {
val subObject = obj(head).getOrElse(Json.jEmptyObject)
obj + (head, subObject.append(tail, value))
})
}
def pivot: List[(String, Json)] = {
def recurse(path: String, current: Json): List[(String, Json)] = current match {
case jObjectPrism(obj) => obj.toList.flatMap {
case (r":$field", value) => recurse(s"$path:$field", value)
case (field, value) => recurse(s"$path/$field", value)
}
case jArrayPrism(arr) ⇒ arr.zipWithIndex.flatMap {
case (value, index) ⇒ recurse(s"$path[$index]", value)
}
case other => List(path -> other)
}
recurse("", self).mapFirst(_.stripPrefix("/"))
}
def unpivot: Json = self.obj.fold(Json.jNull)(_.toList.map {
case (path, value) => path.split("/").toList.filter(_.nonEmpty) -> value
}.sortBy(_._1)(Ordering.Implicits.seqDerivedOrdering[List, String](keyOrdering)).foldLeft(Json.jEmptyObject) {
case (json, (fragments, value)) => json.append(fragments, value)
})
def merge(other: Json): Json = {
def recurse(lhsJson: Json, rhsJson: Json): Option[Json] = (lhsJson, rhsJson) partialMatch {
case (jObjectPrism(lhs), jObjectPrism(rhs)) => Json.jObjectFields(lhs.toMap.zipWith[Json, Json](rhs.toMap) {
case (Some(l), Some(r)) if r != Json.jNull => recurse(l, r).getOrElse(r)
case (Some(l), None) => l
case (None, Some(r)) => r
}.toList: _*)
}
recurse(self, other).getOrElse(self)
}
}
private val keyOrdering: Ordering[String] = Ordering.fromLessThan {
case (ArrayIndex(lprefix, l), ArrayIndex(rprefix, r)) if lprefix == rprefix ⇒ l < r
case (l, r) ⇒ l < r
}
implicit class CodecJsonCompanionFrills(val self: CodecJson.type) extends AnyVal {
def defer[A](deferred: ⇒ CodecJson[A]): CodecJson[A] =
CodecJson.derived(EncodeJson.defer(deferred.Encoder), DecodeJson.defer(deferred.Decoder))
}
implicit class CodecJsonFrills[A](val self: CodecJson[A]) extends AnyVal {
def renameField(from: String, to: String): CodecJson[A] = afterEncode(_.renameField(from, to))
def renameFields(fromTos: (String, String)*): CodecJson[A] = afterEncode(_.renameFields(fromTos: _*))
def addIfMissing(name: String, value: Json): CodecJson[A] = afterEncode(_.addIfMissing(name, value))
def addIfMissing(assocs: Json.JsonAssoc*): CodecJson[A] = afterEncode(_.addIfMissing(assocs: _*))
def removeIfPresent(name: String, value: Json): CodecJson[A] = afterEncode(_.removeIfPresent(name, value))
def removeIfPresent(assocs: Json.JsonAssoc*): CodecJson[A] = afterEncode(_.removeIfPresent(assocs: _*))
def removeFields(names: String*): CodecJson[A] = afterEncode(_.removeFields(names: _*))
def beforeDecode(f: Json ⇒ Json): CodecJson[A] = compose(f)
def afterDecode(f: A ⇒ A): CodecJson[A] = derived(encoder ⇒ encoder)(_ map f)
def beforeEncode(f: A ⇒ A): CodecJson[A] = derived(_ contramap f)(decoder ⇒ decoder)
def afterEncode(f: Json ⇒ Json): CodecJson[A] = andThen(f)
def andThen(f: Json ⇒ Json): CodecJson[A] = derived(_ andThen f)(decoder ⇒ decoder)
def compose(f: Json ⇒ Json): CodecJson[A] = derived(encoder ⇒ encoder)(_ compose f)
def xmapDisjunction[B](f: A ⇒ String \/ B)(g: B ⇒ A): CodecJson[B] = derived(_ beforeEncode g)(_ afterDecode f)
def derived[B](f: EncodeJson[A] ⇒ EncodeJson[B])(g: DecodeJson[A] ⇒ DecodeJson[B]): CodecJson[B] =
CodecJson.derived[B](f(self.Encoder), g(self.Decoder))
def traversalToJson: Traversal[A, Json] = new Traversal[A, Json] {
def modifyF[F[_]](f: Json ⇒ F[Json])(a: A)(implicit F: Applicative[F]): F[A] = {
F.map[Json, A](f(self.encode(a)))(json ⇒ self.decodeJson(json).getOr(a))
}
}
def wrapExceptions(name: String): CodecJson[A] = CodecJson.derived[A](
EncodeJson[A](a => wrapExceptions(s"Encode($name)", self.encode(a))),
DecodeJson[A](c => wrapExceptions(s"Decode($name)", self.decode(c)))
)
private def wrapExceptions[X](description: String, f: => X): X = try { f } catch {
case ce: CodecException => throw description :: ce
case e: Exception => throw CodecException(List(description), e)
}
}
implicit class CodecJsonMapFrills[K, V](val self: CodecJson[K ▶: V]) extends AnyVal {
def xmapEntries[C, W](kvcw: (K, V) ⇒ (C, W))(cwkv: (C, W) ⇒ (K, V)): CodecJson[C ▶: W] =
self.derived[C ▶: W](_ contramapEntries cwkv)(_ mapEntries kvcw)
def xmapKeys[C](kc: K ⇒ C)(ck: C ⇒ K): CodecJson[C ▶: V] = self.derived(_ contramapKeys ck)(_ mapKeys kc)
def xmapValues[W](vw: V ⇒ W)(wv: W ⇒ V): CodecJson[K ▶: W] = self.derived(_ contramapValues wv)(_ mapValues vw)
}
implicit class DecodeJsonCompanionFrills(val self: DecodeJson.type) extends AnyVal {
def defer[A](deferred: ⇒ DecodeJson[A]): DecodeJson[A] = new DecodeJson[A] {
def decode(c: HCursor): DecodeResult[A] = _deferred.decode(c)
private lazy val _deferred: DecodeJson[A] = deferred
}
}
implicit class DecodeJsonFrills[A](val self: DecodeJson[A]) extends AnyVal {
def renameField(from: String, to: String): DecodeJson[A] = beforeDecode(_.renameField(from, to))
def renameFields(fromTos: (String, String)*): DecodeJson[A] = beforeDecode(_.renameFields(fromTos: _*))
def addIfMissing(name: String, value: Json): DecodeJson[A] = beforeDecode(_.addIfMissing(name, value))
def addIfMissing(assocs: Json.JsonAssoc*): DecodeJson[A] = beforeDecode(_.addIfMissing(assocs: _*))
def removeIfPresent(name: String, value: Json): DecodeJson[A] = beforeDecode(_.removeIfPresent(name, value))
def removeIfPresent(assocs: Json.JsonAssoc*): DecodeJson[A] = beforeDecode(_.removeIfPresent(assocs: _*))
def removeFields(names: String*): DecodeJson[A] = beforeDecode(_.removeFields(names: _*))
def beforeDecode(f: Json ⇒ Json): DecodeJson[A] = compose(f)
def compose(f: Json ⇒ Json): DecodeJson[A] = DecodeJson[A](hc ⇒ self.decode(hc >-> f))
def upcast[B >: A]: DecodeJson[B] = self.map[B](a ⇒ a: B)
private[argonaut] def afterDecode[B](f: A ⇒ String \/ B): DecodeJson[B] = // Probably publish later
DecodeJson[B](c ⇒ self.decode(c).flatMap(a ⇒ DecodeResult[B](f(a).leftMap(_ → c.history).toEither)))
}
implicit class DecodeJsonMapFrills[K, V](val self: DecodeJson[K ▶: V]) extends AnyVal {
def mapEntries[C, W](f: (K, V) ⇒ (C, W)): DecodeJson[C ▶: W] =
self.map(_.mapEntries(k ⇒ v ⇒ f(k, v)))
def mapKeys[C](f: K ⇒ C): DecodeJson[C ▶: V] = self.map(_.mapKeysEagerly(f))
def mapValues[W](f: V ⇒ W): DecodeJson[K ▶: W] = self.map(_.mapValuesEagerly(f))
}
implicit class EncodeJsonCompanionFrills(val self: EncodeJson.type) extends AnyVal {
def defer[A](deferred: ⇒ EncodeJson[A]): EncodeJson[A] = new EncodeJson[A] {
def encode(a: A): Json = _deferred.encode(a)
private lazy val _deferred: EncodeJson[A] = deferred
}
}
implicit class EncodeJsonFrills[A](val self: EncodeJson[A]) extends AnyVal {
def renameField(from: String, to: String): EncodeJson[A] = afterEncode(_.renameField(from, to))
def renameFields(fromTos: (String, String)*): EncodeJson[A] = afterEncode(_.renameFields(fromTos: _*))
def addIfMissing(name: String, value: Json): EncodeJson[A] = afterEncode(_.addIfMissing(name, value))
def addIfMissing(assocs: Json.JsonAssoc*): EncodeJson[A] = afterEncode(_.addIfMissing(assocs: _*))
def removeIfPresent(name: String, value: Json): EncodeJson[A] = afterEncode(_.removeIfPresent(name, value))
def removeIfPresent(assocs: Json.JsonAssoc*): EncodeJson[A] = afterEncode(_.removeIfPresent(assocs: _*))
def removeFields(names: String*): EncodeJson[A] = afterEncode(_.removeFields(names: _*))
def afterEncode(f: Json ⇒ Json): EncodeJson[A] = andThen(f)
def andThen(f: Json ⇒ Json): EncodeJson[A] = EncodeJson[A](a ⇒ f(self.encode(a)))
def downcast[B <: A]: EncodeJson[B] = self.contramap[B](b ⇒ b: A)
def add(assocsFn: (A ⇒ Json.JsonAssoc)*): EncodeJson[A] = {
EncodeJson[A](a ⇒ self.encode(a).addIfMissing(assocsFn.map(assoc ⇒ assoc.apply(a)): _*))
}
private[argonaut] def beforeEncode[B](f: B ⇒ A): EncodeJson[B] = self contramap f // Probably publish later
}
implicit class EncodeJsonMapFrills[K, V](val self: EncodeJson[K ▶: V]) extends AnyVal {
def contramapEntries[C, W](f: (C, W) ⇒ (K, V)): EncodeJson[C ▶: W] =
self.contramap[C ▶: W](_.mapEntries(c ⇒ w ⇒ f(c, w)))
def contramapKeys[C](f: C ⇒ K): EncodeJson[C ▶: V] = self.contramap[C ▶: V](_.mapKeysEagerly(f))
def contramapValues[W](f: W ⇒ V): EncodeJson[K ▶: W] = self.contramap[K ▶: W](_.mapValuesEagerly(f))
}
implicit class TraversalFrills[A, B](val self: Traversal[A, B]) extends AnyVal {
def bool[That]( implicit cpf: CanPrismFrom[B, Boolean, That]): Traversal[A, That] = apply(cpf)
def number[That](implicit cpf: CanPrismFrom[B, JsonNumber, That]): Traversal[A, That] = apply(cpf)
def string[That](implicit cpf: CanPrismFrom[B, String, That]): Traversal[A, That] = apply(cpf)
def array[That]( implicit cpf: CanPrismFrom[B, List[Json], That]): Traversal[A, That] = apply(cpf)
def obj[That]( implicit cpf: CanPrismFrom[B, JsonObject, That]): Traversal[A, That] = apply(cpf)
def double[That]( implicit cpf: CanPrismFrom[B, Double, That]): Traversal[A, That] = apply(cpf)
def int[That]( implicit cpf: CanPrismFrom[B, Int, That]): Traversal[A, That] = apply(cpf)
def float[That]( implicit cpf: CanPrismFrom[B, Float, That]): Traversal[A, That] = apply(cpf)
def short[That]( implicit cpf: CanPrismFrom[B, Short, That]): Traversal[A, That] = apply(cpf)
def byte[That]( implicit cpf: CanPrismFrom[B, Byte, That]): Traversal[A, That] = apply(cpf)
def bigDecimal[That](implicit cpf: CanPrismFrom[B, BigDecimal, That]): Traversal[A, That] = apply(cpf)
def bigInt[That]( implicit cpf: CanPrismFrom[B, BigInt, That]): Traversal[A, That] = apply(cpf)
private def apply[Elem, That](canPrismFrom: CanPrismFrom[B, Elem, That]): Traversal[A, That] =
self composePrism canPrismFrom.prism
}
implicit class JsonObjectFrills(val self: JsonObject) extends AnyVal {
def filterKeys(p: Predicate[String]): JsonObject = mapCollect { case entry@(k, _) if p(k) ⇒ entry }
def filterValues(p: Predicate[Json]): JsonObject = mapCollect { case entry@(_, j) if p(j) ⇒ entry }
def removeFields(names: String*): JsonObject = {
val namesSet: Set[String] = names.toSet
mapCollect { case entry@(k, _) if !namesSet.contains(k) ⇒ entry }
}
def renameFields(fromTos: (String, String)*): JsonObject = fromTos.foldLeft(self) {
case (acc, (from, to)) ⇒ acc.renameField(from, to)
}
def renameField(from: String, to: String): JsonObject =
self(from).fold(self)(value ⇒ (self - from) + (to, value))
def addIfMissing(assocs: Json.JsonAssoc*): JsonObject = assocs.foldLeft(self) {
case (acc, (name, value)) ⇒ acc.addIfMissing(name, value)
}
def addIfMissing(name: String, value: Json): JsonObject =
self(name).fold(self + (name, value))(_ ⇒ self)
def removeIfPresent(assocs: Json.JsonAssoc*): JsonObject = assocs.foldLeft(self) {
case (acc, (name, value)) ⇒ acc.removeIfPresent(name, value)
}
def removeIfPresent(name: String, value: Json): JsonObject =
self(name).fold(self)(existing ⇒ if (existing == value) self - name else self)
def filterR(p: Predicate[Json]): JsonObject =
mapCollect { case (k, j) if p(j) ⇒ k -> j.filterR(p) }
private def mapCollect(pf: (String, Json) ~> (String, Json)): JsonObject = {
val from: ListMap[String, Json] = ListMap[String, Json](self.toList: _*)
val to: ListMap[String, Json] = from.collect(pf)
JsonObject.fromIterable(to)
}
}
implicit class TraversalToJsonFrills[A](val self: Traversal[A, Json]) extends AnyVal {
def renameField(from: String, to: String): Traversal[A, Json] =
self composeIso Iso[Json, Json](_.renameField(from, to))(_.renameField(to, from))
def renameFields(fromTos: (String, String)*): Traversal[A, Json] =
self composeIso Iso[Json, Json](_.renameFields(fromTos: _*))(_.renameFields(fromTos.map(_.swap): _*))
def descendant(path: String): Traversal[A, Json] =
Descendant.Descender(path).traversal(self, path)
}
implicit class JsonArrayFrills(val self: List[Json]) extends AnyVal {
def filterR(p: Predicate[Json]): List[Json] = self.collect { case j if p(j) ⇒ j.filterR(p) }
}
private object ArrayIndex {
def unapply(value: String): Option[(String, Int)] = value partialMatch {
case r"${head}\[${Int(index)}\]" ⇒ (head, index)
}
}
private object Int {
def unapply(value: String): Option[Int] = Try(value.toInt).toOption
}
}
private case class CodecException(descriptions: List[String], cause: Exception) extends Exception("", cause) {
setStackTrace(descriptions.map(new StackTraceElement(_, "", "", 0)).toArray)
def ::(description: String): CodecException = copy(description :: descriptions)
}
case class CanPrismFrom[From, Elem, To](prism: Prism[From, To]) {
def toList: CanPrismFrom[List[From], Elem, List[To]] =
CanPrismFrom(Prism[List[From], List[To]](la ⇒ Some(la.flatMap(prism.getOption)))(_.map(prism.reverseGet)))
def toMap[K]: CanPrismFrom[K ▶: From, Elem, K ▶: To] = CanPrismFrom(Prism[K ▶: From, K ▶: To](mapKA ⇒ {
Some(mapKA.updateValues(a ⇒ prism.getOption(a)))
})((mapKB: K ▶: To) ⇒ {
mapKB.mapValuesEagerly(prism.reverseGet)
}))
}
object CanPrismFrom {
implicit val cpfJsonToBoolean: CanPrismFrom[Json, Boolean, Boolean] = apply(JsonMonocle.jBoolPrism)
implicit val cpfJsonToJsonNumber: CanPrismFrom[Json, JsonNumber, JsonNumber] = apply(JsonMonocle.jNumberPrism)
implicit val cpfJsonToString: CanPrismFrom[Json, String, String] = apply(JsonMonocle.jStringPrism)
implicit val cpfJsonToJsonArray: CanPrismFrom[Json, List[Json], List[Json]] = apply(JsonMonocle.jArrayPrism)
implicit val cpfJsonToJsonObject: CanPrismFrom[Json, JsonObject, JsonObject] = apply(JsonMonocle.jObjectPrism)
implicit val cpfJsonToBigDecimal: CanPrismFrom[Json, BigDecimal, BigDecimal] = apply(JsonMonocle.jBigDecimalPrism)
// implicit val cpfJsonToDouble: CanPrismFrom[Json, Double, Double] = apply(jDoublePrism)
// implicit val cpfJsonToFloat: CanPrismFrom[Json, Float, Float] = apply(jFloatPrism)
implicit val cpfJsonToBigInt: CanPrismFrom[Json, BigInt, BigInt] = apply(JsonMonocle.jBigIntPrism)
implicit val cpfJsonToLong: CanPrismFrom[Json, Long, Long] = apply(JsonMonocle.jLongPrism)
implicit val cpfJsonToInt: CanPrismFrom[Json, Int, Int] = apply(JsonMonocle.jIntPrism)
implicit val cpfJsonToShort: CanPrismFrom[Json, Short, Short] = apply(JsonMonocle.jShortPrism)
implicit val cpfJsonToByte: CanPrismFrom[Json, Byte, Byte] = apply(JsonMonocle.jBytePrism)
implicit def cpfJsonToCodec[A: CodecJson]: CanPrismFrom[Json, A, A] = {
val A = CodecJson.derived[A]
apply(Prism[Json, A](json ⇒ A.decodeJson(json).toOption)(A.encode))
}
implicit def cpfl[From, Elem, To](implicit cpf: CanPrismFrom[From, Elem, To])
: CanPrismFrom[List[From], Elem, List[To]] = cpf.toList
implicit def cpfm[From, Elem, To](implicit cpf: CanPrismFrom[From, Elem, To])
: CanPrismFrom[String ▶: From, Elem, String ▶: To] = cpf.toMap
implicit def cpfJsonObjectToTypedMap[V](implicit cpf: CanPrismFrom[Json, V, V])
: CanPrismFrom[JsonObject, V, String ▶: V] = apply(jsonObjectMapIso.composePrism(cpf.toMap[String].prism))
private val jsonObjectMapIso: Iso[JsonObject, String ▶: Json] =
Iso[JsonObject, String ▶: Json](_.toMap)(map ⇒ JsonObject.fromIterable(map))
}
object Descendant {
import pimpathon.argonaut.{JsonFrills, JsonObjectFrills, TraversalFrills}
implicit class DescendantToJsonFrills[From](private val self: Descendant[From, Json, Json]) {
def renameField(from: String, to: String): From = self.modify(_.renameField(from, to))
def renameFields(fromTos: (String, String)*): From = self.modify(_.renameFields(fromTos: _*))
def addIfMissing(name: String, value: Json): From = self.modify(_.addIfMissing(name, value))
def addIfMissing(assocs: Json.JsonAssoc*): From = self.modify(_.addIfMissing(assocs: _*))
def removeIfPresent(name: String, value: Json): From = self.modify(_.removeIfPresent(name, value))
def removeIfPresent(assocs: Json.JsonAssoc*): From = self.modify(_.removeIfPresent(assocs: _*))
def removeFields(names: String*): From = self.modify(_.removeFields(names: _*))
def each: Descendant[From, Json, Json] = self composeTraversal objectValuesOrArrayElements
}
implicit class DescendantToJsonObjectFrills[From](private val self: Descendant[From, Json, JsonObject]) {
def renameField(from: String, to: String): From = self.modify(_.renameField(from, to))
def renameFields(fromTos: (String, String)*): From = self.modify(_.renameFields(fromTos: _*))
def addIfMissing(name: String, value: Json): From = self.modify(_.addIfMissing(name, value))
def addIfMissing(assocs: Json.JsonAssoc*): From = self.modify(_.addIfMissing(assocs: _*))
def removeIfPresent(name: String, value: Json): From = self.modify(_.removeIfPresent(name, value))
def removeIfPresent(assocs: Json.JsonAssoc*): From = self.modify(_.removeIfPresent(assocs: _*))
def removeFields(names: String*): From = self.modify(_.removeFields(names: _*))
def each: Descendant[From, Json, Json] = self composeTraversal monocle.function.Each.each
// def delete(key: String): From = {
// (descendant.traversal composeLens At.at(key)).set(None).apply(descendant.from)
// }
}
implicit class DescendantViaJsonFrills[From, To](private val self: Descendant[From, Json, To]) {
def firstEmptyAt: Option[String] = ancestorsList.collectFirst {
case (path, Nil) ⇒ path
}
def ancestors: Json =
Json.jObjectAssocList(ancestorsList.mapSecond(Json.jArray))
private def ancestorsList: List[(String, List[Json])] =
self.ancestorsFn().mapSecond(ancestor ⇒ ancestor.getAll(self.from))
}
object Descender {
def apply(path: String): Descender = if (path.startsWith("$")) JsonPath else Pimpathon
def traversal(path: String): Traversal[Json, Json] = traversal(Traversal.id[Json], path)
def traversal[A](start: Traversal[A, Json], path: String): Traversal[A, Json] = apply(path).traversal(start, path)
def ancestors(path: String): List[(String, Traversal[Json, Json])] = ancestors(Traversal.id[Json], path)
def ancestors[A](start: Traversal[A, Json], path: String): List[(String, Traversal[A, Json])] = apply(path).ancestors(start, path)
}
sealed trait Descender {
def descendant[A](from: A, start: Traversal[A, Json], path: String): Descendant[A, Json, Json] =
Descendant(from, List(traversal(start, path)), () ⇒ ancestors(start, path))
def traversal[A](from: Traversal[A, Json], path: String): Traversal[A, Json]
def ancestors[A](from: Traversal[A, Json], path: String): List[(String, Traversal[A, Json])]
}
case object JsonPath extends Descender {
def traversal[A](from: Traversal[A, Json], path: String): Traversal[A, Json] = {
new Parser().compile(path) match {
case Parser.Success(pathTokens, _) ⇒ new JsonPathIntegration().traversal(pathTokens, from)
case Parser.NoSuccess(msg, _) ⇒ sys.error(s"Could not parse json path: $path, $msg")
}
}
def ancestors[A](from: Traversal[A, Json], path: String): List[(String, Traversal[A, Json])] = {
new Parser().compile(path) match {
case Parser.Success(pathTokens, _) ⇒ new JsonPathIntegration().ancestors(pathTokens, from)
case Parser.NoSuccess(msg, _) ⇒ sys.error(s"Could not parse json path: $path, $msg")
}
}
private class JsonPathIntegration[A] {
def traversal(tokens: List[PathToken], start: Traversal[A, Json]): Traversal[A, Json] = tokens.foldLeft(start)(step)
def ancestors(tokens: List[PathToken], start: Traversal[A, Json]): List[(String, Traversal[A, Json])] = {
val traversals = tokens.scanLeft(start)(step)
anotate(tokens, traversals).tail
}
private def step(acc: Traversal[A, Json], token: PathToken): Traversal[A, Json] = token match {
case RecursiveField(name) ⇒ notSupported(s"RecursiveField($name)")
case RootNode ⇒ acc
case AnyField ⇒ acc.obj composeTraversal each
case MultiField(names) ⇒ acc.obj composeTraversal filterIndex(names.toSet: Set[String])
case Field(name) ⇒ acc.obj composeTraversal filterIndex(Set(name))
case RecursiveAnyField ⇒ notSupported("RecursiveAnyField")
case CurrentNode ⇒ acc
case filterToken: FilterToken ⇒ filterArrayOrObject(filterObject(filterTokenStep(filterToken)))(acc)
case ArraySlice(None, None, 1) ⇒ acc.array composeTraversal each
case ArraySlice(begin, end, step) ⇒ notSupported(s"ArraySlice($begin, $end, $step)")
case ArrayRandomAccess(indecies) ⇒ acc.array composeTraversal filterIndex(indecies.toSet: Set[Int])
case RecursiveFilterToken(filter) ⇒ notSupported(s"RecursiveFilterToken($filter)")
}
private def filterTokenStep(token: FilterToken): Predicate[Json] = token match {
case ComparisonFilter(op, lhs, rhs) ⇒ comparisonFilter(op, lhs, rhs)
case BooleanFilter(AndOperator, lhs, rhs) ⇒ filterTokenStep(lhs) and filterTokenStep(rhs)
case BooleanFilter(OrOperator, lhs, rhs) ⇒ filterTokenStep(lhs) or filterTokenStep(rhs)
case HasFilter(SubQuery(subTokens)) ⇒ hasFilter(subTokens)
}
private def anotate(tokens: List[PathToken], traversals: List[Traversal[A, Json]]): List[(String, Traversal[A, Json])] = {
tokens.map(toString).inits.map(_.mkString("")).toList.reverse.zip(traversals)
}
private def toString(token: PathToken): String = token match {
case RootNode ⇒ "$"
case AnyField ⇒ ".*"
case MultiField(names) ⇒ names.map(_.quoteWith('\'')).mkString(", ")
case Field(name) ⇒ s".$name"
case CurrentNode ⇒ "@"
case ComparisonFilter(op, lhs, rhs) ⇒ s"?(${toString(lhs)} ${toString(op)} ${toString(rhs)})"
case HasFilter(SubQuery(subTokens)) ⇒ subTokens.map(toString).mkString("")
case ArraySlice(None, None, 1) ⇒ "[*]"
case ArrayRandomAccess(indecies) ⇒ indecies.mkString(", ")
case BooleanFilter(AndOperator, lhs, rhs) ⇒ s"${toString(lhs)} && ${toString(rhs)}"
case BooleanFilter(OrOperator, lhs, rhs) ⇒ s"${toString(lhs)} || ${toString(rhs)}"
case other ⇒ throw new MatchError(s"not implemented for: $other")
}
private def toString(op: ComparisonOperator): String = op match {
case EqOperator ⇒ "=="
case NotEqOperator ⇒ "!="
case GreaterOrEqOperator ⇒ ">="
case LessOperator ⇒ "<"
case LessOrEqOperator ⇒ "<="
case GreaterOperator ⇒ ">"
}
private def toString(fv: FilterValue): String = fv match {
case JPTrue ⇒ "true"
case JPFalse ⇒ "false"
case JPLong(value) ⇒ value.toString
case JPString(value) ⇒ value.quoteWith('\'')
case SubQuery(tokens) ⇒ tokens.map(toString).mkString("")
case _ ⇒ sys.error(fv.toString)
}
private def comparisonFilter(op: ComparisonOperator, lhs: FilterValue, rhs: FilterValue): Predicate[Json] = (op, lhs, rhs) match {
case (EqOperator, EQ(fn), value) ⇒ fn(value)
case (EqOperator, value, EQ(fn)) ⇒ fn(value)
case (GreaterOrEqOperator, GTEQ(fn), value) ⇒ fn(value)
case _ ⇒ notSupported((op, lhs, rhs))
}
// TODO make this fully recursive
val EQ = ComparisonArgument {
case SubQuery(List(CurrentNode)) ⇒ toPredicate(rhs ⇒ _ == json(rhs))
case SubQuery(List(CurrentNode, Field(name))) ⇒ toPredicate(rhs ⇒ _.field(name).contains(json(rhs)))
case SubQuery(List(CurrentNode, Field(first), Field(second))) ⇒ toPredicate(rhs ⇒ _.fieldOrEmptyObject(first).field(second).contains(json(rhs)))
}
val GTEQ = {
implicit val orderingJson: Ordering[Json] = {
Ordering.Tuple4[Option[Boolean], Option[Int], Option[Double], Option[String]].on[Json](json ⇒ {
(json.bool, json.number.flatMap(_.toInt), json.number.flatMap(_.toDouble), json.string)
})
}
implicit def orderOps[B](a: B)(implicit O: Ordering[B]): O.Ops = O.mkOrderingOps(a)
ComparisonArgument {
case SubQuery(List(CurrentNode, Field(name))) ⇒ toPredicate(rhs ⇒ lhs ⇒ lhs.field(name) >= Some(json(rhs)))
case other ⇒ notSupported(other)
}
}
private def notSupported[X](x: X): Nothing = sys.error(s"$x not supported !")
private def hasFilter(tokens: List[PathToken]): Predicate[Json] = tokens match {
case List(CurrentNode, Field(name)) ⇒ _.hasField(name)
}
trait ComparisonArgument {
def unapply(lhs: FilterValue): Option[FN]
}
object ComparisonArgument {
def apply(pf: PartialFunction[FilterValue, FN]): ComparisonArgument = (lhs: FilterValue) ⇒ pf.lift(lhs)
}
type FN = FilterValue ⇒ Predicate[Json]
private def toPredicate(f: FilterValue ⇒ Predicate[Json]): FN = f
private def json(fdv: FilterValue): Json = fdv match {
case JPTrue ⇒ jTrue
case JPFalse ⇒ jFalse
case JPDouble(value) ⇒ Json.jNumberOrNull(value)
case JPLong(value) ⇒ Json.jNumber(value)
case JPString(value) ⇒ jString(value)
case JPNull ⇒ jNull
case unknown ⇒ sys.error(s"boom: $unknown")
}
def filterArrayOrObject(prism: Prism[Json, Json])(acc: Traversal[A, Json]): Traversal[A, Json] =
acc composeTraversal objectValuesOrArrayElements composePrism prism
}
}
case object Pimpathon extends Descender {
def traversal[A](start: Traversal[A, Json], path: String): Traversal[A, Json] =
path.split("/").toList.filter(_.nonEmpty).foldLeft(start)(step)
def ancestors[A](start: Traversal[A, Json], path: String): List[(String, Traversal[A, Json])] = {
val tokens: List[String] = path.split("/").toList.filter(_.nonEmpty)
val traversals: List[Traversal[A, Json]] = tokens.scanLeft(start)(step)
tokens.inits.map(_.mkString("/")).toList.reverse.zip(traversals)
}
import argonaut.RegexMatcher
private def step[A](acc: Traversal[A, Json], token: String): Traversal[A, Json] = token match {
case "*" ⇒ acc composeTraversal objectValuesOrArrayElements
case r"""\*\[${key}='${value}'\]""" ⇒ acc.array composeTraversal each composePrism filterObject(key, jString(value))
case r"""\[${Split(indices)}\]""" ⇒ acc.array composeTraversal filterIndex(indices.map(_.toInt))
case r"""\{${Split(keys)}\}""" ⇒ acc.obj composeTraversal filterIndex(keys)
case key ⇒ acc.obj composeTraversal filterIndex(Set(key))
}
private object Split { def unapply(value: String): Option[Set[String]] = Some(value.split(",").map(_.trim).toSet) }
}
private def filterObject(key: String, value: Json): Prism[Json, Json] =
filterObject(_.field(key).contains(value))
private def filterObject(p: Predicate[Json]): Prism[Json, Json] =
Prism[Json, Json](json ⇒ p(json).option(json))(json ⇒ json)
private final lazy val objectValuesOrArrayElements: Traversal[Json, Json] = new Traversal[Json, Json] {
def modifyF[F[_]](f: Json ⇒ F[Json])(j: Json)(implicit F: Applicative[F]): F[Json] = j.fold(
jsonNull = F.pure(j), jsonBool = _ ⇒ F.pure(j), jsonNumber = _ ⇒ F.pure(j), jsonString = _ ⇒ F.pure(j),
jsonArray = arr ⇒ F.map(each[List[Json], Json].modifyF(f)(arr))(Json.array(_: _*)),
jsonObject = obj ⇒ F.map(each[JsonObject, Json].modifyF(f)(obj))(Json.jObject)
)
}
case class As[From, Via, To, A: CodecJson](from: Descendant[From, Via, To])
object As {
implicit def asToDescendant[From, Via, To, A, That](as: As[From, Via, To, A])
(implicit cpf: CanPrismFrom[To, A, That]): Descendant[From, Via, That] = as.from.composePrism(cpf.prism)
}
}
case class Descendant[From, Via, To](
from: From, traversals: List[Traversal[From, To]], ancestorsFn: () ⇒ List[(String, Traversal[From, Via])]
) extends Dynamic {
def bool[That]( implicit cpf: CanPrismFrom[To, Boolean, That]): Descendant[From, Via, That] = apply(cpf)
def number[That](implicit cpf: CanPrismFrom[To, JsonNumber, That]): Descendant[From, Via, That] = apply(cpf)
def string[That](implicit cpf: CanPrismFrom[To, String, That]): Descendant[From, Via, That] = apply(cpf)
def array[That]( implicit cpf: CanPrismFrom[To, List[Json], That]): Descendant[From, Via, That] = apply(cpf)
def obj[That]( implicit cpf: CanPrismFrom[To, JsonObject, That]): Descendant[From, Via, That] = apply(cpf)
def double[That]( implicit cpf: CanPrismFrom[To, Double, That]): Descendant[From, Via, That] = apply(cpf)
def int[That]( implicit cpf: CanPrismFrom[To, Int, That]): Descendant[From, Via, That] = apply(cpf)
def float[That]( implicit cpf: CanPrismFrom[To, Float, That]): Descendant[From, Via, That] = apply(cpf)
def short[That]( implicit cpf: CanPrismFrom[To, Short, That]): Descendant[From, Via, That] = apply(cpf)
def byte[That]( implicit cpf: CanPrismFrom[To, Byte, That]): Descendant[From, Via, That] = apply(cpf)
def bigDecimal[That](implicit cpf: CanPrismFrom[To, BigDecimal, That]): Descendant[From, Via, That] = apply(cpf)
def bigInt[That]( implicit cpf: CanPrismFrom[To, BigInt, That]): Descendant[From, Via, That] = apply(cpf)
def as[A: CodecJson]: Descendant.As[From, Via, To, A] = Descendant.As[From, Via, To, A](this)
def selectDynamic(key: String)(implicit cpf: CanPrismFrom[To, JsonObject, JsonObject]): Descendant[From, Via, Json] =
obj[JsonObject] composeTraversal filterIndex(Set(key))
private def apply[Elem, That](cpf: CanPrismFrom[To, Elem, That]): Descendant[From, Via, That] = composePrism(cpf.prism)
def composePrism[That](next: Prism[To, That]): Descendant[From, Via, That] = withTraversal(_ composePrism next)
def composeTraversal[That](next: Traversal[To, That]): Descendant[From, Via, That] = withTraversal(_ composeTraversal next)
def composeOptional[That](next: Optional[To, That]): Descendant[From, Via, That] = withTraversal(_ composeOptional next)
def composeIso[That](next: Iso[To, That]): Descendant[From, Via, That] = withTraversal(_ composeIso next)
def headOption: Option[To] = traversals.flatMap(_.headOption(from)).headOption
def getAll: List[To] = traversals.flatMap(_.getAll(from))
def set(to: To): From = foldLeft(_.set(to))
def modify(f: To ⇒ To): From = foldLeft(_.modify(f))
private def foldLeft(f: Traversal[From, To] ⇒ From ⇒ From): From = traversals.foldLeft(from) {
case (acc, traversal) ⇒ f(traversal)(acc)
}
private def withTraversal[That](fn: Traversal[From, To] ⇒ Traversal[From, That]): Descendant[From, Via, That] =
copy(traversals = traversals.map(fn))
}
| stacycurl/pimpathon | src/main/scala/pimpathon/argonaut.scala | Scala | apache-2.0 | 37,104 |
package memnets.model.impl
import memnets.linalg._
import memnets.model.Tick._
import memnets.model.VariableType.Continuous
import memnets.model._
import memnets.utils._
import scala.collection.mutable.ArrayBuffer
private[model] final class DynamicSystemImpl(private val _matrix: WMatrix, val params: Params = new ParamsImpl)
extends DynamicSystem
with ConfigMap
with Logging {
import DynamicSystem._
import collection.mutable.AnyRefMap
val elements = ArrayBuffer[Element]()
val triggers = ArrayBuffer[Trigger]()
val props = new AnyRefMap[String, Any]()
val noise: Param = params.create("noise", system = false)(this)
noise.desc = "Change the global noise for all variables. Use Y.noiseScale for individual variable"
var now: Tick = NullTick
protected val _layers = ArrayBuffer[AbstractLayer]()
protected val _links = ArrayBuffer[LayerLink]()
protected val _variables = ArrayBuffer[Y]()
protected var _onReset: Procedure = NULL_PROCEDURE
protected var _onTrial: Procedure = NULL_PROCEDURE
protected var _onTick: TickListener = NULL_TICK_LISTENER
protected var _onLayout: Option[ModelLayout] = None
protected var _oscCount = 0
def layers: scala.collection.IndexedSeq[AbstractLayer] = _layers
def links: scala.collection.IndexedSeq[LayerLink] = _links
def variables: scala.collection.IndexedSeq[Y] = _variables
lazy val variablesShown: Int = variables.iterator.count(_.ui.isShown)
def onLayout: Option[ModelLayout] = _onLayout
def onLayout_=(f: => Any): Unit = { _onLayout = Option(new ModelLayoutImpl(f)) }
def setOnLayout(f: ModelLayout): Unit = { _onLayout = Option(f) }
def onReset: Procedure = _onReset
def onReset_=(f: => Any): Unit = { _onReset = new ProcedureImpl(f) }
def setOnReset(f: Procedure): Unit = { _onReset = f }
def onTick: TickListener = _onTick
def onTick_=(tl: TickListener): Unit = { _onTick = tl ? NULL_TICK_LISTENER }
def setOnTick(tl: TickListener): Unit = { onTick = tl }
def onTrial: Procedure = _onTrial
def onTrial_=(f: => Any): Unit = { _onTrial = new ProcedureImpl(f) }
def setOnTrial(f: Procedure): Unit = { _onTrial = f }
override def destroy(): Unit = {
super.destroy()
_layers.clear()
_links.clear()
_variables.clear()
elements.clear()
triggers.clear()
props.clear()
_onTick = NULL_TICK_LISTENER
_onReset = NULL_PROCEDURE
_onLayout = None
_onTrial = NULL_PROCEDURE
sparse.destroy()
}
/** not a bad idea to call gc shortly after this... */
override def compact(): Unit = {
logger.debug("optimize")
sparse.matrix.sortWeights(variables.length)
for (link <- _links)
link.optimize()
}
def addLayer[T <: AbstractLayer](layer: T): T = {
val n = layer.length
require(n > 0, "layer size must be > 0")
require(n % 2 == 0, "layer size must be divisible by 2")
_layers += layer
layer
}
def addLink[T <: LayerLink](link: T, srcOut: Boolean = true): T = {
_links += link
if (srcOut) link.src.asInstanceOf[LayerBase].outLinks += link
link
}
def addOsc(osc: Osc): Unit = {
_oscCount += 1
osc._oscId = _oscCount
elements += osc
}
def oscCount = _oscCount
override def equals(obj: Any): Boolean = obj match {
case ref: AnyRef =>
this.eq(ref)
case default =>
false
}
override def toString = s"DynamicSystem[name= $name, props= ${props.take(16).mkString("[", ",", "]")}]"
object sparse extends Sparse with LayerLikeUI {
protected var _activation: Option[Activation] = None
protected var _onSpike: Option[SpikeListener] = None
protected val _funcs = ArrayBuffer[F]()
protected var _lastTopK: Option[TopK] = None
var name = "sparse"
val id = 0
val system: DynamicSystem = DynamicSystemImpl.this
var loc = Loc().down(150)
var viz = Viz.Default
var numericalType: VariableType = Continuous
var width = 1000.0
var height = 300.0
var showText = false
var format: IndexFormat = new Layer.DefaultIndexFormat
var plot = Plot(this)
plot.height = 250.0
plot.loc = Loc().down(125.0)
def rangeDefault: YRange = {
if (activation.find(_ == Activation.Relu).isDefined)
YRange(min = 0.0, max = YRange.scale)
else if (length > 0 && length < 200) {
val ranges = variables.map(_.ui.rangeDefault)
val rMin = ranges.map(_.min).min
val rMax = ranges.map(_.max).max
YRange(min = rMin, max = rMax)
} else
YRange(-YRange.scale, YRange.scale)
}
def owner = this
def gradient: Option[GradientHints] = get[GradientHints](Config.GRAD_HINTS)
def gradient_=(hints: GradientHints): Unit = update(Config.GRAD_HINTS, hints)
def gridHints: Option[GridHints] = get[GridHints](Config.GRID_HINTS)
def gridHints_=(hints: GridHints): Unit = update(Config.GRID_HINTS, hints)
def funcs: scala.collection.IndexedSeq[F] = _funcs
def y(i: Int) = variables(i)
def lastTopK = _lastTopK
def lastTopK_=(topK: TopK): Unit = { _lastTopK = Option(topK) }
def addFunc(f: F): Unit = _funcs += f
def matrix: WMatrix = _matrix
def props = DynamicSystemImpl.this.props
def length = _variables.length
def ui: LayerLikeUI = this
def outLinks: Iterable[LayerLink] = EMPTY_LAYERLINKS
def nextId: Int = variables.length
def activation: Option[Activation] = _activation
def activation_=(act: Activation): Unit = _activation = Option(act)
def onSpike = _onSpike
def onSpike_=(sl: SpikeListener) = _onSpike = Option(sl)
def setOnSpike(f: SpikeListener): Unit = { onSpike = f }
def destroy(): Unit = {
_onSpike = None
_lastTopK = None
_funcs.clear()
}
// Graph
def nodes: Iterable[Y] = _variables
def add(n: Y) = _variables += n
def remove(ys: Y): Unit = {
logger.debug(s"$ys")
val id = ys.id
val delEdges = matrix.weights.filter(e => e.src == id || e.tgt == id).toList
for (e <- delEdges)
matrix.remove(e)
ys.ui.viz = Viz.Dead
_variables -= ys // but remove from sparse
}
@inline def find(src: Y, tgt: Y) = matrix.outEdges(src.id).find(_.tgt == tgt.id)
@inline def inEdges(n: Y) = matrix.inEdges(n.id)
@inline def outEdges(n: Y) = matrix.outEdges(n.id)
def addEdgeUnique(src: Y, tgt: Y): E = find(src, tgt).getOrElse(addEdge(src, tgt))
override def addEdge(src: Y, tgt: Y): E = {
val e = matrix.create(src.id, tgt.id)
if (e.isLoop)
e.w = -1.0f
e
}
def removeEdge(e: E) = matrix.remove(e)
// NOTE : could be sending events out for this.
def modifyWs(edges: Array[E], w: Array[Double]): Unit = {
require(edges.length == w.length)
var i = 0
while (i < edges.length) {
edges(i).w = w(i)
i += 1
}
}
def modifyWs(edges: Iterable[E], w: Double) = for (e <- edges) e.w = w
}
}
private final class ModelLayoutImpl(f: => Unit) extends ModelLayout { @inline def layout() = { f } }
| MemoryNetworks/memnets | api/src/main/scala/memnets/model/impl/DynamicSystemImpl.scala | Scala | apache-2.0 | 7,002 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import org.apache.accumulo.core.client.IteratorSetting
import org.locationtech.geomesa.index.filters.Z2Filter
import org.locationtech.geomesa.index.index.z2.Z2IndexValues
class Z2Iterator extends RowFilterIterator[Z2Filter](Z2Filter)
object Z2Iterator {
def configure(values: Z2IndexValues, offset: Int, priority: Int): IteratorSetting = {
val is = new IteratorSetting(priority, "z2", classOf[Z2Iterator])
// index space values for comparing in the iterator
Z2Filter.serializeToStrings(Z2Filter(values)).foreach { case (k, v) => is.addOption(k, v) }
// account for shard and table sharing bytes
is.addOption(RowFilterIterator.RowOffsetKey, offset.toString)
is
}
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/iterators/Z2Iterator.scala | Scala | apache-2.0 | 1,212 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.commons.lang
import java.text.SimpleDateFormat
import java.time.{ LocalDate, LocalTime }
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpec
import java.time.format.DateTimeFormatter
class DatesTest extends AnyFunSpec with Matchers {
describe("Dates") {
it("join") {
val date = LocalDate.parse("2014-09-09")
val time = LocalTime.parse("09:09:10")
val datetime = Dates.join(date, time)
datetime.format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH-mm-ss")) should equal("2014-09-09 09-09-10")
}
it("Normalize date string") {
Dates.normalize("1980-9-1") should equal("1980-09-01")
Dates.normalize("1980-09-1") should equal("1980-09-01")
Dates.normalize("1980-9-01") should equal("1980-09-01")
Dates.normalize("1980-09-01") should equal("1980-09-01")
Dates.normalize("1980.9.1") should equal("1980-09-01")
}
}
}
| beangle/commons | core/src/test/scala/org/beangle/commons/lang/DatesTest.scala | Scala | lgpl-3.0 | 1,661 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.planning
import java.util.Locale
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.Geometry
import org.geotools.data.Query
import org.geotools.feature.AttributeTypeBuilder
import org.geotools.feature.simple.SimpleFeatureTypeBuilder
import org.geotools.filter.expression.PropertyAccessors
import org.geotools.filter.{FunctionExpressionImpl, MathExpressionImpl}
import org.geotools.process.vector.TransformProcess
import org.geotools.process.vector.TransformProcess.Definition
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.api.{GeoMesaFeatureIndex, QueryPlan, WrappedFeature}
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.conf.QueryHints.RichHints
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore
import org.locationtech.geomesa.index.utils.{ExplainLogging, Explainer}
import org.locationtech.geomesa.utils.cache.SoftThreadLocal
import org.locationtech.geomesa.utils.collection.{CloseableIterator, SelfClosingIterator}
import org.locationtech.geomesa.utils.index.IndexMode
import org.locationtech.geomesa.utils.iterators.{DeduplicatingSimpleFeatureIterator, SortingSimpleFeatureIterator}
import org.locationtech.geomesa.utils.stats.{MethodProfiling, TimingsImpl}
import org.opengis.feature.`type`.{AttributeDescriptor, GeometryDescriptor}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.expression.PropertyName
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
/**
* Plans and executes queries against geomesa
*/
class QueryPlanner[DS <: GeoMesaDataStore[DS, F, W], F <: WrappedFeature, W](ds: DS)
extends QueryRunner with MethodProfiling with LazyLogging {
/**
* Plan the query, but don't execute it - used for m/r jobs and explain query
*
* @param sft simple feature type
* @param query query to plan
* @param index override index to use for executing the query
* @param output planning explanation output
* @return
*/
def planQuery(sft: SimpleFeatureType,
query: Query,
index: Option[GeoMesaFeatureIndex[DS, F, W]] = None,
output: Explainer = new ExplainLogging): Seq[QueryPlan[DS, F, W]] = {
getQueryPlans(sft, query, index, output).toList // toList forces evaluation of entire iterator
}
override def runQuery(sft: SimpleFeatureType, query: Query, explain: Explainer): CloseableIterator[SimpleFeature] =
runQuery(sft, query, None, explain)
/**
* Execute a query
*
* @param sft simple feature type
* @param query query to execute
* @param index override index to use for executing the query
* @param explain planning explanation output
* @return
*/
def runQuery(sft: SimpleFeatureType,
query: Query,
index: Option[GeoMesaFeatureIndex[DS, F, W]],
explain: Explainer): CloseableIterator[SimpleFeature] = {
import org.locationtech.geomesa.utils.conversions.ScalaImplicits.RichTraversableLike
val plans = getQueryPlans(sft, query, index, explain)
var iterator = SelfClosingIterator(plans.iterator).flatMap(p => p.scan(ds))
if (plans.exists(_.hasDuplicates)) {
iterator = new DeduplicatingSimpleFeatureIterator(iterator)
}
if (!query.getHints.isSkipReduce) {
// note: reduce must be the same across query plans
val reduce = plans.headOption.flatMap(_.reduce)
require(plans.tailOption.forall(_.reduce == reduce), "Reduce must be the same in all query plans")
reduce.foreach(r => iterator = r(iterator))
}
if (query.getSortBy != null && query.getSortBy.length > 0) {
iterator = new SortingSimpleFeatureIterator(iterator, query.getSortBy)
}
iterator
}
/**
* Set up the query plans and strategies used to execute them
*
* @param sft simple feature type
* @param original query to plan
* @param requested override index to use for executing the query
* @param output planning explanation output
* @return
*/
protected def getQueryPlans(sft: SimpleFeatureType,
original: Query,
requested: Option[GeoMesaFeatureIndex[DS, F, W]],
output: Explainer): Seq[QueryPlan[DS, F, W]] = {
import org.locationtech.geomesa.filter.filterToString
implicit val timings = new TimingsImpl
val plans = profile("all") {
// set hints that we'll need later on, fix the query filter so it meets our expectations going forward
val query = configureQuery(sft, original)
optimizeFilter(sft, query)
val hints = query.getHints
output.pushLevel(s"Planning '${query.getTypeName}' ${filterToString(query.getFilter)}")
output(s"Original filter: ${filterToString(original.getFilter)}")
output(s"Hints: bin[${hints.isBinQuery}] arrow[${hints.isArrowQuery}] density[${hints.isDensityQuery}] " +
s"stats[${hints.isStatsQuery}] map-aggregate[${hints.isMapAggregatingQuery}] " +
s"sampling[${hints.getSampling.map { case (s, f) => s"$s${f.map(":" + _).getOrElse("")}"}.getOrElse("none")}]")
output(s"Sort: ${Option(query.getSortBy).filter(_.nonEmpty).map(_.mkString(", ")).getOrElse("none")}")
output(s"Transforms: ${query.getHints.getTransformDefinition.getOrElse("None")}")
output.pushLevel("Strategy selection:")
val requestedIndex = requested.orElse(hints.getRequestedIndex.flatMap(toIndex(sft, _)))
val transform = query.getHints.getTransformSchema
val evaluation = query.getHints.getCostEvaluation
val strategies = StrategyDecider.getFilterPlan(ds, sft, query.getFilter, transform, evaluation, requestedIndex, output)
output.popLevel()
var strategyCount = 1
strategies.map { strategy =>
output.pushLevel(s"Strategy $strategyCount of ${strategies.length}: ${strategy.index}")
strategyCount += 1
output(s"Strategy filter: $strategy")
val plan = profile(s"s$strategyCount")(strategy.index.getQueryPlan(sft, ds, strategy, hints, output))
plan.explain(output)
output(s"Plan creation took ${timings.time(s"s$strategyCount")}ms").popLevel()
plan
}
}
output(s"Query planning took ${timings.time("all")}ms")
plans
}
private def toIndex(sft: SimpleFeatureType, name: String): Option[GeoMesaFeatureIndex[DS, F, W]] = {
val check = name.toLowerCase(Locale.US)
val indices = ds.manager.indices(sft, IndexMode.Read)
val value = if (check.contains(":")) {
indices.find(_.identifier.toLowerCase(Locale.US) == check)
} else {
indices.find(_.name.toLowerCase(Locale.US) == check)
}
if (value.isEmpty) {
logger.error(s"Ignoring invalid strategy name: $name. Valid values " +
s"are ${indices.map(i => s"${i.name}, ${i.identifier}").mkString(", ")}")
}
value
}
}
object QueryPlanner extends LazyLogging {
private [planning] val threadedHints = new SoftThreadLocal[Map[AnyRef, AnyRef]]
object CostEvaluation extends Enumeration {
type CostEvaluation = Value
val Stats, Index = Value
}
def setPerThreadQueryHints(hints: Map[AnyRef, AnyRef]): Unit = threadedHints.put(hints)
def getPerThreadQueryHints: Option[Map[AnyRef, AnyRef]] = threadedHints.get
def clearPerThreadQueryHints(): Unit = threadedHints.clear()
/**
* Checks for attribute transforms in the query and sets them as hints if found
*
* @param query query
* @param sft simple feature type
* @return
*/
def setQueryTransforms(query: Query, sft: SimpleFeatureType): Unit = {
val properties = query.getPropertyNames
query.setProperties(Query.ALL_PROPERTIES)
if (properties != null && properties.nonEmpty &&
properties.toSeq != sft.getAttributeDescriptors.map(_.getLocalName)) {
val (transforms, derivedSchema) = buildTransformSFT(sft, properties)
query.getHints.put(QueryHints.Internal.TRANSFORMS, transforms)
query.getHints.put(QueryHints.Internal.TRANSFORM_SCHEMA, derivedSchema)
}
}
def buildTransformSFT(sft: SimpleFeatureType, properties: Seq[String]): (String, SimpleFeatureType) = {
val (transformProps, regularProps) = properties.partition(_.contains('='))
val convertedRegularProps = regularProps.map { p => s"$p=$p" }
val allTransforms = convertedRegularProps ++ transformProps
// ensure that the returned props includes geometry, otherwise we get exceptions everywhere
val geomTransform = {
val allGeoms = sft.getAttributeDescriptors.collect {
case d if classOf[Geometry].isAssignableFrom(d.getType.getBinding) => d.getLocalName
}
val geomMatches = for (t <- allTransforms.iterator; g <- allGeoms) yield {
t.matches(s"$g\\\\s*=.*")
}
if (geomMatches.contains(true)) {
Nil
} else {
Option(sft.getGeometryDescriptor).map(_.getLocalName).map(geom => s"$geom=$geom").toSeq
}
}
val transforms = (allTransforms ++ geomTransform).mkString(";")
val transformDefs = TransformProcess.toDefinition(transforms)
val derivedSchema = computeSchema(sft, transformDefs.asScala)
(transforms, derivedSchema)
}
private def computeSchema(origSFT: SimpleFeatureType, transforms: Seq[Definition]): SimpleFeatureType = {
val descriptors: Seq[AttributeDescriptor] = transforms.map { definition =>
val name = definition.name
val cql = definition.expression
cql match {
case p: PropertyName =>
val prop = p.getPropertyName
if (origSFT.getAttributeDescriptors.exists(_.getLocalName == prop)) {
val origAttr = origSFT.getDescriptor(prop)
val ab = new AttributeTypeBuilder()
ab.init(origAttr)
val descriptor = if (origAttr.isInstanceOf[GeometryDescriptor]) {
ab.buildDescriptor(name, ab.buildGeometryType())
} else {
ab.buildDescriptor(name, ab.buildType())
}
descriptor.getUserData.putAll(origAttr.getUserData)
descriptor
} else if (PropertyAccessors.findPropertyAccessors(new ScalaSimpleFeature(origSFT, ""), prop, null, null).nonEmpty) {
// note: we return String as we have to use a concrete type, but the json might return anything
val ab = new AttributeTypeBuilder().binding(classOf[String])
ab.buildDescriptor(name, ab.buildType())
} else {
throw new IllegalArgumentException(s"Attribute '$prop' does not exist in SFT '${origSFT.getTypeName}'.")
}
case f: FunctionExpressionImpl =>
val clazz = f.getFunctionName.getReturn.getType
val ab = new AttributeTypeBuilder().binding(clazz)
if (classOf[Geometry].isAssignableFrom(clazz)) {
ab.buildDescriptor(name, ab.buildGeometryType())
} else {
ab.buildDescriptor(name, ab.buildType())
}
// Do math ops always return doubles?
case a: MathExpressionImpl =>
val ab = new AttributeTypeBuilder().binding(classOf[java.lang.Double])
ab.buildDescriptor(name, ab.buildType())
// TODO: Add support for LiteralExpressionImpl and/or ClassificationFunction?
}
}
val geomAttributes = descriptors.filter(_.isInstanceOf[GeometryDescriptor]).map(_.getLocalName)
val sftBuilder = new SimpleFeatureTypeBuilder()
sftBuilder.setName(origSFT.getName)
sftBuilder.addAll(descriptors.toArray)
if (geomAttributes.nonEmpty) {
val defaultGeom = if (geomAttributes.size == 1) { geomAttributes.head } else {
// try to find a geom with the same name as the original default geom
val origDefaultGeom = origSFT.getGeometryDescriptor.getLocalName
geomAttributes.find(_ == origDefaultGeom).getOrElse(geomAttributes.head)
}
sftBuilder.setDefaultGeometry(defaultGeom)
}
val schema = sftBuilder.buildFeatureType()
schema.getUserData.putAll(origSFT.getUserData)
schema
}
}
| ronq/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/planning/QueryPlanner.scala | Scala | apache-2.0 | 12,652 |
package odfi.h2dl.indesign.top
import java.io.File
import odfi.h2dl.indesign.h2dl.interpreter.H2DLInterpreter
class TopScript(val path: File) {
// Intepreter
val interpreter = new H2DLInterpreter
interpreter.open
// State
var error: Option[Throwable] = None
def clean = error == None
def rerun = {
try {
interpreter.eval(path)
this.error = None
} catch {
case e: Throwable =>this.error = Some(e)
}
}
} | richnou/h2dl-indesign | src/main/scala/odfi/h2dl/indesign/top/TopScript.scala | Scala | gpl-2.0 | 453 |
package hr.fer.ztel.thesis.spark
import breeze.linalg.{SparseVector => BreezeSparseVector, DenseVector=>BreezeDenseVector}
import com.esotericsoftware.kryo.Kryo
import org.apache.spark.serializer.KryoRegistrator
class SparkKryoRegistrator extends KryoRegistrator {
override def registerClasses(kryo : Kryo) {
kryo.register(classOf[Array[Int]]) // bool vector
kryo.register(classOf[Map[Int, Double]]) // vector
kryo.register(classOf[Map[(Int, Int), Double]]) // matrix
kryo.register(classOf[BreezeDenseVector[Double]])
kryo.register(classOf[BreezeSparseVector[Double]])
kryo.register(classOf[Array[Double]])
}
} | fpopic/master_thesis | src/main/scala/hr/fer/ztel/thesis/spark/SparkKryoRegistrator.scala | Scala | mit | 642 |
package controllers
import play.api.Play
import play.api.mvc._
import models._
import scala.xml.PCData
import org.joda.time.format.DateTimeFormat
import org.joda.time.DateTime
import javax.inject.Inject
class Rss @Inject() (
dumpDb: DumpDB,
tagDb: TagDB) extends Controller {
def dumps = Action { request =>
val baseUrl = "http://" + request.host;
val feedUrl = baseUrl + request.uri;
val dumps = dumpDb.all;
val updatedTime =
if (dumps.isEmpty)
new DateTime()
else
dumps.map(d => d.timestamp).reduceLeft((lhs, rhs) => (if (lhs.compareTo(rhs) > 0) lhs else rhs))
val feedXml =
<feed xmlns="http://www.w3.org/2005/Atom">
<author>
<name>dmpster</name>
</author>
<title>All Dumps</title>
<id>{ feedUrl }</id>
<link href={ feedUrl } rel="self" type="application/atom+xml"/>
<updated>{ updatedTime }</updated>
{
dumps.map(d => {
val dumpDetailsUrl = baseUrl + "/dmpster/dmp/" + d.id + "/details"
val tagsAsText = tagDb.forDump(d).map(tag => tag.name).mkString(", ");
val timeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss");
val formattedTimestamp = timeFormatter.print(d.timestamp);
val bucketName = d.bucket.name;
val entryContent =
<html>
<body>
<h1>{ bucketName }<br/><a href={ dumpDetailsUrl }>{ d.filename }</a></h1>
<p>{ formattedTimestamp }</p>
<p>Tags: { tagsAsText }</p>
<p><pre>{ d.content }</pre></p>
</body>
</html>.toString;
<entry>
<id>{ dumpDetailsUrl + "__" + d.timestamp.toString() }</id>
<title>{ d.filename }</title>
<updated>{ d.timestamp }</updated>
<link href={ dumpDetailsUrl }></link>
<summary>{ d.filename + " (" + d.timestamp + ") \n" + bucketName }</summary>
<content type="html">{ new PCData(entryContent) }</content>
</entry>
})
}
</feed>;
val prettyPrinter = new scala.xml.PrettyPrinter(100, 2)
val feedFormatted = prettyPrinter.format(feedXml);
Ok(feedFormatted).as("application/rss+xml, application/rdf+xml, application/atom+xml, application/xml, text/xml")
}
} | alexanderfloh/dmpster | app/controllers/Rss.scala | Scala | mit | 2,402 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.