code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.plugins
import java.io.File
import org.clapper.classutil.{Modifier, ClassFinder}
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.{OneInstancePerTest, Matchers, FunSpec}
import org.mockito.Mockito._
import test.utils.TestClassInfo
class PluginSearcherSpec extends FunSpec with Matchers
with OneInstancePerTest with MockitoSugar
{
private val mockClassFinder = mock[ClassFinder]
private val pluginSearcher = new PluginSearcher {
override protected def newClassFinder(): ClassFinder = mockClassFinder
override protected def newClassFinder(paths: Seq[File]): ClassFinder =
mockClassFinder
}
private val pluginClassInfo = TestClassInfo(
name = classOf[Plugin].getName,
modifiers = Set(Modifier.Interface)
)
private val directPluginClassInfo = TestClassInfo(
name = "direct.plugin",
superClassName = pluginClassInfo.name
)
private val directAsInterfacePluginClassInfo = TestClassInfo(
name = "direct.interface.plugin",
interfaces = List(pluginClassInfo.name)
)
private val indirectPluginClassInfo = TestClassInfo(
name = "indirect.plugin",
superClassName = directPluginClassInfo.name
)
private val indirectAsInterfacePluginClassInfo = TestClassInfo(
name = "indirect.interface.plugin",
interfaces = List(directAsInterfacePluginClassInfo.name)
)
private val traitPluginClassInfo = TestClassInfo(
name = "trait.plugin",
modifiers = Set(Modifier.Interface)
)
private val abstractClassPluginClassInfo = TestClassInfo(
name = "abstract.plugin",
modifiers = Set(Modifier.Abstract)
)
private val classInfos = Seq(
pluginClassInfo,
directPluginClassInfo, directAsInterfacePluginClassInfo,
indirectPluginClassInfo, indirectAsInterfacePluginClassInfo,
traitPluginClassInfo, abstractClassPluginClassInfo
)
describe("PluginSearcher") {
describe("#internal") {
it("should find any plugins directly extending the Plugin class") {
val expected = directPluginClassInfo.name
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.internal.map(_.name)
actual should contain (expected)
}
it("should find any plugins directly extending the Plugin trait") {
val expected = directAsInterfacePluginClassInfo.name
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.internal.map(_.name)
actual should contain (expected)
}
it("should find any plugins indirectly extending the Plugin class") {
val expected = indirectPluginClassInfo.name
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.internal.map(_.name)
actual should contain (expected)
}
it("should find any plugins indirectly extending the Plugin trait") {
val expected = indirectAsInterfacePluginClassInfo.name
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.internal.map(_.name)
actual should contain (expected)
}
it("should not include any traits or abstract classes") {
val expected = Seq(
abstractClassPluginClassInfo.name,
traitPluginClassInfo.name
)
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.internal.map(_.name)
actual should not contain atLeastOneOf (expected.head, expected.tail)
}
}
describe("#search") {
it("should find any plugins directly extending the Plugin class") {
val expected = directPluginClassInfo.name
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.search().map(_.name).toSeq
actual should contain (expected)
}
it("should find any plugins directly extending the Plugin trait") {
val expected = directAsInterfacePluginClassInfo.name
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.search().map(_.name).toSeq
actual should contain (expected)
}
it("should find any plugins indirectly extending the Plugin class") {
val expected = indirectPluginClassInfo.name
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.search().map(_.name).toSeq
actual should contain (expected)
}
it("should find any plugins indirectly extending the Plugin trait") {
val expected = indirectAsInterfacePluginClassInfo.name
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.search().map(_.name).toSeq
actual should contain (expected)
}
it("should not include any traits or abstract classes") {
val expected = Seq(
abstractClassPluginClassInfo.name,
traitPluginClassInfo.name
)
doReturn(classInfos.toStream).when(mockClassFinder).getClasses()
val actual = pluginSearcher.search().map(_.name).toSeq
actual should not contain atLeastOneOf (expected.head, expected.tail)
}
}
}
}
| lresende/incubator-toree | plugins/src/test/scala/org/apache/toree/plugins/PluginSearcherSpec.scala | Scala | apache-2.0 | 6,132 |
/**
* Created by scotts on 11/12/15.
*/
class EnergyMonitorRecord(val userId:Int, val usedAmount:Long, val time:Long)
| scottsappen/EnergyMonitorHBaseExample | src/main/scala/EnergyMonitorRecord.scala | Scala | apache-2.0 | 122 |
/*
* Copyright (c) 2014. Regents of the University of California
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.berkeley.cs.amplab.avocado.partitioners
import edu.berkeley.cs.amplab.adam.models.ReferenceRegion
import scala.annotation.tailrec
import scala.collection.immutable.{SortedMap, TreeSet, SortedSet}
class PartitionSet (protected val regionMapping: SortedMap[ReferenceRegion, Int]) extends Serializable {
/**
* Merges a region into a sorted set of regions.
*
* @note Assumes that the region being merged in is ordered after all regions in the
* current set of regions.
*
* @param set Sorted set of previously merged regions.
* @param region Region to merge in to set.
* @return New set with new region merged in.
*/
private def mergeRegions (set: TreeSet[ReferenceRegion],
region: ReferenceRegion): TreeSet[ReferenceRegion] = {
if (set.isEmpty) {
set + region
} else {
val t = set.last
if (t.overlaps(region) || t.isAdjacent(region)) {
set.dropRight(1) + t.merge(region)
} else {
set + region
}
}
}
/**
* Merges overlapping/adjacent regions together across the set of regions.
*
* @param regions Input set of regions.
* @return Sorted set with overlapping regions merged together.
*/
private def merge (regions: SortedSet[ReferenceRegion]): TreeSet[ReferenceRegion] = {
regions.foldLeft(TreeSet[ReferenceRegion]())(mergeRegions)
}
lazy val mergedPartitions = merge(regionMapping.keySet)
/**
* Returns a list of all integer partition mappings that a region overlaps with.
*
* @note Can be overridden if a more performant partition mapping function can be provided.
*
* @param region Region of interest.
* @return List of all partition indexes that this region overlaps with.
*/
def getPartition (region: ReferenceRegion): List[Int] = {
regionMapping.filterKeys(_.overlaps(region))
.values
.toList
}
/**
* Returns whether a region is contained in a partition inside of this set.
*
* @param region Region of interest.
* @return True if region overlaps with any region inside of this partition.
*/
def isInSet (region: ReferenceRegion): Boolean = {
!mergedPartitions.filter(_.refId == region.refId)
.forall(r => !(r.contains(region) || r.overlaps(region)))
}
/**
* Returns whether a region is not wholly contained inside of this set.
*
* @param region Region of interest.
* @return True if region is not wholly contained inside of this set.
*/
def isOutsideOfSet (region: ReferenceRegion): Boolean = {
mergedPartitions.filter(_.refId == region.refId)
.forall(r => !r.contains(region))
}
}
| fnothaft/avocado | avocado-core/src/main/scala/edu/berkeley/cs/amplab/avocado/partitioners/PartitionSet.scala | Scala | apache-2.0 | 3,291 |
package eu.ace_design.island.viewer.svg
import java.awt.geom.Line2D
import java.awt.{BasicStroke, Color, Graphics2D}
import eu.ace_design.island.map.{PropertySet, IsWater, IslandMap, Property}
/**
* An heat-map displays the map as polygons, and map a color to each face, based on a given property. Faces tagged
* as water has a blue dot in their center.
*
* The drawn color is defined as a gradient between a color (e.g., blue for moisture, red for temperature) and white.
* The gradient factor is computed as 1 for the highest value, and 0 fot the lowest one. Faces that are not annotated
* with the property are painted in black (can be configured).
*
* By default, the heat map generator uses the face property set, and uses each face reference to look for prop value.
* One can specify a mapper to use another property set, and a selector to select the associated reference. This is for
* example useful for the Elevation property, as it is defined on vertices instead of faces.
*
* @param prop the property to map.
* @param c the color to use as a start, going from this color for high values of prop to white for low values.
* @param centers the color to be used to differentiate water faces (default to black)
* @param selector a function to select the property set that contains prop (default to face)
* @param mapper a function to map a face to the index associated to prop in the selected property set (default to face
* reference)
*/
case class HeatMap(prop: Property[Double], c: Color = Color.RED, centers: Color = Color.BLACK,
selector: IslandMap => PropertySet = Selectors.faces,
mapper: (IslandMap, Int) => Int = Mappers.faceRef ) extends SVGViewer {
protected def draw(m: IslandMap, g: Graphics2D): Unit = {
val propValues = selector(m).restrictedTo(prop)
def factor(v: Double): Double = v / propValues.values.max
// drawing each faces
m.faceRefs foreach { ref =>
val path = buildPath(ref, m)
g.setStroke(new BasicStroke(1f))
g.setColor(Color.BLACK); g.draw(path)
val color = try {
val value = propValues(mapper(m, ref))
gradient(c, Color.WHITE, factor(value))
} catch {
case e: NoSuchElementException => Color.BLACK
}
g.setColor(color); g.fill(path)
if(m.faceProps.check(ref, IsWater())) {
g.setColor(centers)
g.setStroke(new BasicStroke(2f))
val center = m.vertex(m.face(ref).center)
g.draw(new Line2D.Double(center.x, center.y,center.x, center.y))
}
}
}
}
object Selectors {
val vertices: IslandMap => PropertySet = m => m.vertexProps
val faces: IslandMap => PropertySet = m => m.faceProps
}
object Mappers {
val faceRef: (IslandMap, Int) => Int = (m,f) => f
val faceCenterRef: (IslandMap, Int) => Int = (m,f) => m.face(f).center
}
| ace-design/island | engine/src/main/scala/eu/ace_design/island/viewer/svg/HeatMap.scala | Scala | lgpl-3.0 | 2,881 |
package org.au9ustine.merry.model.identicon
import java.net.InetAddress
import java.security.MessageDigest
import java.util.logging.Logger
import scala.util.Random
/**
* IdentIcon
*
* A Wrapper class for Donpark's original identicon implementation
*
* Created by au9ustine on Dec 31st, 2013.
*/
class IdentIcon {
// fields
private var _inetMask: Int = IdentIcon.DEFAULT_INET_MASK
private var _inetSalt: String = Random.nextString(64)
def inetMask = _inetMask
def inetMask_=(value: Int): Unit = _inetMask = value
def inetSalt = _inetSalt
def inetSalt_= (value: String): Unit = _inetSalt = value
/**
* Generate identicon code from IP address
* @param inetAddress
* @return
*/
def genCode(inetAddress: InetAddress): Int = {
// Check if inetSalt is valid
_inetSalt match {
case null => throw new IllegalArgumentException("Salt value cannot be null")
case s: String if s.length == 0 => throw new IllegalArgumentException("Salt value cannot be dummy")
case x: String => x
}
// Prerequisites
val ip: Array[Byte] = inetAddress.getAddress
val s: StringBuilder = new StringBuilder // TODO: elaborate it with functional style
val ipInt: Int = (((ip(0) & 0xff) << 24) |
((ip(1) & 0xff) << 16) |
((ip(2) & 0xff) << 8) |
(ip(3) & 0xff)) & this.inetMask
val m: MessageDigest = MessageDigest.getInstance("SHA1")
// Append and assembly
s.append(ipInt)
s.append("+")
s.append(_inetSalt)
// Calculate hash and code
val buffer: Array[Byte] = m.digest(s.toString().getBytes("UTF-8"))
val code: Int = ((buffer(0) & 0xff) << 24) |
((buffer(1) & 0xff) << 16) |
((buffer(2) & 0xff) << 8) |
buffer(3) & 0xff
code
}
/**
* Generate identicon code from remote address (Wrapper)
* @param codeParam
* @param remoteAddress
* @return newly-generated identicon code
*/
def genCode(codeParam: String, remoteAddress: String): Int = {
try {
val code = codeParam match {
case null => this.genCode(InetAddress.getByName(remoteAddress))
case _ => Integer.parseInt(codeParam)
}
code
} catch {
case e: Exception => Logger.getGlobal.fine(e.toString)
-1
}
}
def getIdenticonSize(param: String): Int = {
param match {
case s: String if s != null && s.length > 0 =>
Integer.parseInt(param) match {
case least: Int if least < IdentIcon.MINIMUM_IDENTICON_SIZE => IdentIcon.MINIMUM_IDENTICON_SIZE
case most: Int if most > IdentIcon.MAXIMUM_IDENTICON_SIZE => IdentIcon.MAXIMUM_IDENTICON_SIZE
case x: Int => x
}
case _ => IdentIcon.DEFAULT_IDENTICON_SIZE
}
}
def getIdenticonETag(code: Int, size: Int, version: Int): String = {
Seq("W/\\"", Integer.toHexString(code), "@", size, 'v', version, "\\"").mkString
}
}
object IdentIcon {
val DEFAULT_IDENTICON_SIZE: Int = 16
val MINIMUM_IDENTICON_SIZE: Int = 15
val MAXIMUM_IDENTICON_SIZE: Int = 64
val DEFAULT_INET_MASK: Int = 0xFFFFFFFF
}
| au9ustine/org.au9ustine.merry | src/main/scala/org/au9ustine/merry/model/identicon/IdentIcon.scala | Scala | apache-2.0 | 3,174 |
/**
* CSPFJ - CSP solving API for Java
* Copyright (C) 2006 Julien VION
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package concrete
package heuristic
package variable
class WDegOnDom(val pool: Seq[Variable])
extends ScoredVariableHeuristic with ConstraintWeighting {
def score(variable: Variable, dom: Domain, state: ProblemState): Double = {
state.wDeg(variable).toDouble / dom.size
}
override def toString = s"max-wdeg/dom"
override def shouldRestart = true
}
| concrete-cp/concrete | src/main/scala/concrete/heuristic/variable/WDegOnDom.scala | Scala | lgpl-2.1 | 1,206 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.box
import org.joda.time.LocalDate
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.box.retriever.BoxRetriever
import uk.gov.hmrc.ct.ct600.v3._
import uk.gov.hmrc.ct.ct600.v3.retriever.CT600BoxRetriever
import uk.gov.hmrc.ct.domain.ValidationConstants._
class ValidatableBoxSpec extends WordSpec with MockitoSugar with Matchers with ValidatableBox[BoxRetriever]{
override def validate(boxRetriever: BoxRetriever): Set[CtValidation] = ???
"validateBooleanAsMandatory" should {
"return error if None" in {
validateBooleanAsMandatory("testBox", testOptBooleanBox(None)) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.required"))
}
"return no errors if any value present" in {
validateBooleanAsMandatory("testBox", testOptBooleanBox(Some(true))) shouldBe Set()
validateBooleanAsMandatory("testBox", testOptBooleanBox(Some(false))) shouldBe Set()
}
}
"validateIntegerAsMandatory" should {
"return error if None" in {
validateIntegerAsMandatory("testBox", testOptIntegerBox(None)) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.required"))
}
"return no errors if any value present" in {
validateIntegerAsMandatory("testBox", testOptIntegerBox(Some(0))) shouldBe Set()
}
}
"validateStringAsMandatory" should {
"return error if None" in {
validateStringAsMandatory("testBox", testOptStringBox(None)) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.required"))
}
"return no errors if any value present" in {
validateStringAsMandatory("testBox", testOptStringBox(Some("wibble"))) shouldBe Set()
}
}
"validateStringAsBlank" should {
"return error if any value is present" in {
validateStringAsBlank("testBox", testOptStringBox(Some("wibble"))) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.nonBlankValue"))
}
"return no errors if None" in {
validateStringAsBlank("testBox", testOptStringBox(None)) shouldBe Set()
}
"return no errors if empty string passed" in {
validateStringAsBlank("testBox", testOptStringBox(Some(""))) shouldBe Set()
}
}
"validateDateAsMandatory" should {
"return error if None" in {
validateDateAsMandatory("testBox", testOptDateBox(None)) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.required"))
}
"return no errors if any value present" in {
validateDateAsMandatory("testBox", testOptDateBox(Some(new LocalDate))) shouldBe Set()
}
}
"validateDateAsBlank" should {
"return error if not blank" in {
validateDateAsBlank("testBox", testOptDateBox(Some(new LocalDate()))) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.nonBlankValue"))
}
"return no errors if blank" in {
validateDateAsBlank("testBox", testOptDateBox(None)) shouldBe Set()
}
}
"validateDateAsBefore" should {
"return error if date is after" in {
validateDateAsBefore("testBox", testOptDateBox(Some(new LocalDate("2013-01-01"))), new LocalDate("2012-12-31")) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.not.before"))
}
"return error if date is the same" in {
validateDateAsBefore("testBox", testOptDateBox(Some(new LocalDate("2013-12-31"))), new LocalDate("2012-12-31")) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.not.before"))
}
"return no errors if date is before" in {
validateDateAsBefore("testBox", testOptDateBox(Some(new LocalDate("2012-12-30"))), new LocalDate("2012-12-31")) shouldBe Set()
}
}
"validateDateAsAfter" should {
"return error if date is before" in {
validateDateAsAfter("testBox", testOptDateBox(Some(new LocalDate("2012-12-30"))), new LocalDate("2012-12-31")) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.not.after"))
}
"return error if date is the same" in {
validateDateAsAfter("testBox", testOptDateBox(Some(new LocalDate("2012-12-31"))), new LocalDate("2012-12-31")) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.not.after"))
}
"return no errors if date is after" in {
validateDateAsAfter("testBox", testOptDateBox(Some(new LocalDate("2013-01-01"))), new LocalDate("2012-12-31")) shouldBe Set()
}
}
"validateDateBetweenInclusive" should {
val minDate = new LocalDate("2012-12-31")
val maxDate = new LocalDate("2013-12-31")
"return error if date is before start date" in {
validateDateAsBetweenInclusive("testBox", testOptDateBox(Some(new LocalDate("2012-12-30"))), minDate, maxDate) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.not.betweenInclusive", Some(Seq(toErrorArgsFormat(minDate), toErrorArgsFormat(maxDate)))))
}
"return no errors if date is on start date" in {
validateDateAsBetweenInclusive("testBox", testOptDateBox(Some(new LocalDate("2012-12-31"))), minDate, maxDate) shouldBe Set()
}
"return error if date is after end date" in {
validateDateAsBetweenInclusive("testBox", testOptDateBox(Some(new LocalDate("2014-01-01"))), minDate, maxDate) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.not.betweenInclusive", Some(Seq(toErrorArgsFormat(minDate), toErrorArgsFormat(maxDate)))))
}
"return no errors if date is on end date" in {
validateDateAsBetweenInclusive("testBox", testOptDateBox(Some(new LocalDate("2013-12-31"))), minDate, maxDate) shouldBe Set()
}
}
"validateNumberRange" should {
"return error if number too small" in {
validateIntegerRange("testBox", testOptIntegerBox(Some(0)), min = 1, max = 2) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.outOfRange", Some(Seq("1","2"))))
}
"return error if number too large" in {
validateIntegerRange("testBox", testOptIntegerBox(Some(3)), min = 1, max = 2) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.outOfRange", Some(Seq("1","2"))))
}
"return no errors if within range" in {
validateIntegerRange("testBox", testOptIntegerBox(Some(0)), min = 0, max =2) shouldBe Set()
}
"return no errors if no value present" in {
validateIntegerRange("testBox", testOptIntegerBox(None), min = 0, max =2) shouldBe Set()
}
}
"validateOptionalStringByRegex" should {
"return error if it does not match" in {
validateOptionalStringByRegex("testBox", testOptStringBox(Some("1234567")), regexString) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.regexFailure"))
}
"return no errors if it matches" in {
validateOptionalStringByRegex("testBox", testOptStringBox(Some("12345678")), regexString) shouldBe Set()
}
"return no errors if no value set" in {
validateOptionalStringByRegex("testBox", testOptStringBox(None), regexString) shouldBe Set()
}
"return no errors if empty string" in {
validateOptionalStringByRegex("testBox", testOptStringBox(Some("")), regexString) shouldBe Set()
}
}
"validateStringByLength" should {
"pass if in range #1" in {
validateStringByLength("testBox", testStringBox("1234567"), 7,8) shouldBe Set()
}
"pass if in range #2" in {
validateStringByLength("testBox", testStringBox("12345678"), 7,8) shouldBe Set()
}
"pass if empty" in {
validateStringByLength("testBox", testStringBox(""), 7,8) shouldBe Set()
}
"return error if too short" in {
validateStringByLength("testBox", testStringBox("123456"), 7,8) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.text.sizeRange", Some(Seq("7","8"))))
}
"return error if too long" in {
validateStringByLength("testBox", testStringBox("123456789"), 7,8) shouldBe Set(CtValidation(Some("testBox"), "error.testBox.text.sizeRange", Some(Seq("7","8"))))
}
}
"validateAllFilledOrEmptyStrings" should {
"pass if all strings non-empty" in {
validateAllFilledOrEmptyStrings("testBox", Set(testStringBox("something"),testStringBox("something"))) shouldBe Set()
}
"pass if all string empty" in {
validateAllFilledOrEmptyStrings("testBox", Set(testStringBox(""),testStringBox(""))) shouldBe Set()
}
"return error if mix of empty and non-empty" in {
validateAllFilledOrEmptyStrings("testBox", Set(testStringBox("something"),testStringBox(""))) shouldBe Set(CtValidation(Some("testBox"),"error.testBox.allornone"))
}
}
"validateAllFilledOrEmptyStringsForBankDetails" should {
"return error if mixing empty and non-empty" in {
val mockBoxRetriever = mock[CT600BoxRetriever]
when(mockBoxRetriever.retrieveB920()).thenReturn(B920(""))
when(mockBoxRetriever.retrieveB925()).thenReturn(B925(""))
when(mockBoxRetriever.retrieveB930()).thenReturn(B930(""))
when(mockBoxRetriever.retrieveB935()).thenReturn(B935("something"))
validateAllFilledOrEmptyStringsForBankDetails(mockBoxRetriever, "testBox") shouldBe Set(CtValidation(Some("testBox"),"error.testBox.allornone"))
verify(mockBoxRetriever).retrieveB920()
verify(mockBoxRetriever).retrieveB925()
verify(mockBoxRetriever).retrieveB930()
verify(mockBoxRetriever).retrieveB935()
verifyNoMoreInteractions(mockBoxRetriever)
}
}
"validateStringAsMandatoryIfPAYEEQ1False" should {
"return is-required error if PAYEEQ1 is false" in {
val mockBoxRetriever = mock[CT600BoxRetriever]
when(mockBoxRetriever.retrievePAYEEQ1()).thenReturn(PAYEEQ1(Some(false)))
validateStringAsMandatoryIfPAYEEQ1False(mockBoxRetriever, "testBox",testOptStringBox(None)) shouldBe Set(CtValidation(Some("testBox"),"error.testBox.required"))
verify(mockBoxRetriever).retrievePAYEEQ1()
verifyNoMoreInteractions(mockBoxRetriever)
}
"do not return is-required error if PAYEEQ1 is true" in {
val mockBoxRetriever = mock[CT600BoxRetriever]
when(mockBoxRetriever.retrievePAYEEQ1()).thenReturn(PAYEEQ1(Some(true)))
validateStringAsMandatoryIfPAYEEQ1False(mockBoxRetriever, "testBox",testOptStringBox(None)) shouldBe Set()
verify(mockBoxRetriever).retrievePAYEEQ1()
verifyNoMoreInteractions(mockBoxRetriever)
}
}
"validateAsMandatory" should {
"return error if None" in {
validateAsMandatory( testOptStringBox(None)) shouldBe Set(CtValidation(Some("testOptStringBox"), "error.testOptStringBox.required"))
}
"return no errors if any value present" in {
validateAsMandatory(testOptStringBox(Some("This is a string."))) shouldBe Set()
}
}
"validatePositiveInteger" should {
"return error if number is negative" in {
validateZeroOrPositiveInteger(testOptIntegerBox(Some(-1))) shouldBe Set(CtValidation(Some("testOptIntegerBox"), "error.testOptIntegerBox.mustBeZeroOrPositive"))
}
"return no errors if positive" in {
validateZeroOrPositiveInteger(testOptIntegerBox(Some(0))) shouldBe Set()
validateZeroOrPositiveInteger(testOptIntegerBox(Some(1))) shouldBe Set()
}
"return no errors if no value present" in {
validateZeroOrPositiveInteger(testOptIntegerBox(None)) shouldBe Set()
}
}
case class testOptBooleanBox(value: Option[Boolean]) extends CtBoxIdentifier("testBox") with CtOptionalBoolean{}
case class testOptIntegerBox(value: Option[Int]) extends CtBoxIdentifier("testBox") with CtOptionalInteger{}
case class testOptStringBox(value: Option[String]) extends CtBoxIdentifier("testBox") with CtOptionalString{}
case class testStringBox(value: String) extends CtBoxIdentifier("testBox") with CtString{}
case class testOptDateBox(value: Option[LocalDate]) extends CtBoxIdentifier("testBox") with CtOptionalDate{}
val regexString = "[0-9]{8}" // numbers only and 8 numbers long
}
| ahudspith-equalexperts/ct-calculations | src/test/scala/uk/gov/hmrc/ct/box/ValidatableBoxSpec.scala | Scala | apache-2.0 | 12,405 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.logical
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan._
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core.Values
import org.apache.calcite.rel.logical.LogicalValues
import org.apache.calcite.rel.metadata.{RelMdCollation, RelMetadataQuery}
import org.apache.calcite.rel.{RelCollation, RelCollationTraitDef, RelNode}
import org.apache.calcite.rex.RexLiteral
import java.util
import java.util.function.Supplier
/**
* Sub-class of [[Values]] that is a relational expression
* whose value is a sequence of zero or more literal row values in Flink.
*/
class FlinkLogicalValues(
cluster: RelOptCluster,
traitSet: RelTraitSet,
rowRelDataType: RelDataType,
tuples: ImmutableList[ImmutableList[RexLiteral]])
extends Values(cluster, rowRelDataType, tuples, traitSet)
with FlinkLogicalRel {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new FlinkLogicalValues(cluster, traitSet, rowRelDataType, tuples)
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val dRows = mq.getRowCount(this)
// Assume CPU is negligible since values are precomputed.
val dCpu = 1
val dIo = 0
planner.getCostFactory.makeCost(dRows, dCpu, dIo)
}
}
private class FlinkLogicalValuesConverter
extends ConverterRule(
classOf[LogicalValues],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalValuesConverter") {
override def convert(rel: RelNode): RelNode = {
val values = rel.asInstanceOf[LogicalValues]
FlinkLogicalValues.create(
rel.getCluster, values.getTraitSet, values.getRowType, values.getTuples())
}
}
object FlinkLogicalValues {
val CONVERTER: ConverterRule = new FlinkLogicalValuesConverter()
def create(
cluster: RelOptCluster,
traitSet: RelTraitSet,
rowType: RelDataType,
tuples: ImmutableList[ImmutableList[RexLiteral]]): FlinkLogicalValues = {
val mq = cluster.getMetadataQuery
var newTraitSet = cluster.traitSetOf(FlinkConventions.LOGICAL)
if (tuples.isEmpty && traitSet != null) {
// ReduceExpressionsRule will produce emtpy values
// And PruneEmptyRules will remove sort rel node
// So there will be a empty values with useless collation, in this case we need keep
// original collation to make this conversion success.
newTraitSet = newTraitSet.replaceIf(
RelCollationTraitDef.INSTANCE.asInstanceOf[RelTraitDef[RelTrait]],
new Supplier[RelTrait]() {
def get: RelTrait = {
traitSet.getTrait(RelCollationTraitDef.INSTANCE)
}
})
} else {
newTraitSet = newTraitSet.replaceIfs(
RelCollationTraitDef.INSTANCE, new Supplier[util.List[RelCollation]]() {
def get: util.List[RelCollation] = {
RelMdCollation.values(mq, rowType, tuples)
}
})
}
new FlinkLogicalValues(cluster, newTraitSet.simplify(), rowType, tuples)
}
}
| apache/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/logical/FlinkLogicalValues.scala | Scala | apache-2.0 | 4,012 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti AeonDB
.t1i .,::;;; ;1tt Copyright (c) 2014 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: [email protected]
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.aeondb.impl
import s_mach.aeondb.LocalProjection
object EmptyLocalProjection extends LocalProjection[Any,Nothing] {
override val size = 0
override val keys = Iterable.empty
override def find(key: Any) = None
override def filterKeys(f: (Any) => Boolean) = this
override val toMap = Map.empty[Any,Nothing]
} | S-Mach/aeondb | src/main/scala/s_mach/aeondb/impl/EmptyLocalProjection.scala | Scala | apache-2.0 | 1,071 |
package shop.model
import shop.infrastructure._
case class ShoppingItem(id: Option[Long], name: String, description: Option[String]){
def this(id: Long,name: String) = this(Some(id),name,None)
def this(name: String) = this(None,name,None)
def update(implicit registry: ComponentRegistry): Option[ShoppingItem] = {
registry.shoppingItemRepository.update(this)
}
def save(list: ShoppingList)(implicit registry: ComponentRegistry): Option[ShoppingItem] = {
registry.shoppingItemRepository.save(list,this).map( newId => this.copy(id=Some(newId)) )
}
private def findList(implicit registry: ComponentRegistry): Option[ShoppingList] = {
id.flatMap(registry.shoppingListRepository.findItemList(_))
}
}
| flurdy/shoppinglist | shopservice/src/main/scala/model/ShoppingItem.scala | Scala | mit | 757 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.File
/**
* Resolves paths to files added through `SparkContext.addFile()`.
* 解析通过SparkContext.addFile()添加的文件的路径。
*/
object SparkFiles {
/**
* Get the absolute path of a file added through `SparkContext.addFile()`.
* 获取通过“SparkContext.addFile()”添加的文件的绝对路径。
*/
def get(filename: String): String =
new File(getRootDirectory(), filename).getAbsolutePath()
/**
* Get the root directory that contains files added through `SparkContext.addFile()`.
* 获取包含通过SparkContext.addFile()添加的文件的根目录。
*/
def getRootDirectory(): String =
SparkEnv.get.sparkFilesDir
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/SparkFiles.scala | Scala | apache-2.0 | 1,534 |
// A contains a field A.theValue$$local accessible using the
// generated getter A.theValue()
class A(val theValue: Int) {
val theValueInA = theValue // use the constructor parameter theValue
def getTheValue = theValue // virtual call to the getter theValue()
}
// B contains a field B.theValue$$local accessible using the getter
// B.theValue() which overrides A.theValue()
class B(override val theValue: Int) extends A(42) {
val theValueInB = theValue
}
// Bz contains a field Bz.theValue$$local accessible using the getter
// Bz.theValue() which overrides A.theValue()
class Bz extends A(42) {
override val theValue: Int = 10
val theValueInBz = theValue
}
// C does not contains a field C.theValue$$local, it contains
// a getter C.theValue() which only calls super.theValue()
class C(override val theValue: Int) extends A(theValue)
// D contains a field D.other$$local and a corresponding getter.
class D(val other: Int) extends A(other)
// NonVal does not contain a field NonVal.theValue$$local.
class NonVal(theValue: Int) extends A(theValue) {
def getTheValueInNonVal = theValue // use the constructor parameter theValue
}
// X contains a field X.theValue$$local accessible using the getter
// X.theValue() which overrides A.theValue()
class X(override val theValue: Int) extends NonVal(0)
// Y contains a field Y.theValue$$local accessible using the getter
// Y.theValue() which overrides A.theValue()
class Y(override val theValue: Int) extends NonVal(theValue)
object Test {
def printFields(obj: Any) =
println(obj.getClass.getDeclaredFields.map(_.toString).sorted.deep.mkString("\\n"))
def main(args: Array[String]): Unit = {
val b10 = new B(10)
val bz = new Bz
val c11 = new C(11)
val d12 = new D(12)
val nv13 = new NonVal(13)
val x14 = new X(14)
val y15 = new Y(15)
println("B:")
printFields(b10)
println("Bz:")
printFields(bz)
println("C:")
printFields(c11)
println("D:")
printFields(d12)
println("NonVal:")
printFields(nv13)
println("X:")
printFields(x14)
println("Y:")
printFields(y15)
assert(b10.getTheValue == 10)
assert(b10.theValue == 10)
assert(b10.theValueInB == 10)
assert(b10.theValueInA == 42)
assert(bz.getTheValue == 10)
assert(bz.theValue == 10)
assert(bz.theValueInBz == 10)
assert(bz.theValueInA == 42)
assert(x14.theValue == 14)
assert(x14.getTheValue == 14)
assert(x14.getTheValueInNonVal == 0)
assert(x14.theValueInA == 0)
}
}
| yusuke2255/dotty | tests/run/paramForwarding.scala | Scala | bsd-3-clause | 2,531 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah
*/
package sbt
import java.io.ByteArrayInputStream
import java.net.URL
import org.apache.ivy.{ core, plugins }
import core.module.descriptor.{ DefaultDependencyDescriptor, DefaultModuleDescriptor }
import core.settings.IvySettings
import plugins.parser.xml.XmlModuleDescriptorParser
import plugins.repository.Resource
import plugins.repository.url.URLResource
/** Subclasses the default Ivy file parser in order to provide access to protected methods.*/
private[sbt] object CustomXmlParser extends XmlModuleDescriptorParser {
import XmlModuleDescriptorParser.Parser
class CustomParser(settings: IvySettings, defaultConfig: Option[String]) extends Parser(CustomXmlParser, settings) {
def setSource(url: URL) =
{
super.setResource(new URLResource(url))
super.setInput(url)
}
def setInput(bytes: Array[Byte]) { setInput(new ByteArrayInputStream(bytes)) }
/** Overridden because the super implementation overwrites the module descriptor.*/
override def setResource(res: Resource) {}
override def setMd(md: DefaultModuleDescriptor) =
{
super.setMd(md)
if (defaultConfig.isDefined) setDefaultConfMapping("*->default(compile)")
}
override def parseDepsConfs(confs: String, dd: DefaultDependencyDescriptor) = super.parseDepsConfs(confs, dd)
override def getDefaultConf = defaultConfig.getOrElse(super.getDefaultConf)
}
} | xeno-by/old-scalameta-sbt | ivy/src/main/scala/sbt/CustomXmlParser.scala | Scala | bsd-3-clause | 1,477 |
/*
* Copyright 2007-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package widgets {
package logchanger {
import scala.xml._
import scala.xml.transform._
import common._
import http._
import js._
import SHtml._
import util.Helpers._
import sitemap._
import Loc._
import org.slf4j.{Logger => SLF4JLogger, LoggerFactory}
/**
* Abstraction of a logging backend where the loglevel can be set
*/
trait LoggingBackend {
type LoggerType
type LevelType
/**
* All the loggers defined by this backend
*/
def loggers: Seq[LoggerType]
/**
* Enable the specified level on the logger
*/
def enableTrace(logger: LoggerType)
def enableDebug(logger: LoggerType)
def enableInfo(logger: LoggerType)
def enableWarn(logger: LoggerType)
def enableError(logger: LoggerType)
/**
* Is the level enabled for the logger
*/
def isTraceEnabled(logger: LoggerType): Boolean
def isDebugEnabled(logger: LoggerType): Boolean
def isInfoEnabled(logger: LoggerType): Boolean
def isWarnEnabled(logger: LoggerType): Boolean
def isErrorEnabled(logger: LoggerType): Boolean
/**
* Get Logger name
*/
def getName(logger: LoggerType): String
/**
* Get the level that is explicitly set for this logger or Empty if
* level is inherited from parent logger
*/
def getLevel(logger: LoggerType): Box[LevelType]
}
/**
* The Logback backend
*/
trait LogbackLoggingBackend extends LoggingBackend {
import ch.qos.logback.classic.{Level, LoggerContext};
import scala.collection.JavaConversions._
type LoggerType = ch.qos.logback.classic.Logger
type LevelType = Level
def loggers: Seq[LoggerType] = {
val context = LoggerFactory.getILoggerFactory().asInstanceOf[LoggerContext]
context.getLoggerList
}
def enableTrace(logger: LoggerType) = logger.setLevel(Level.TRACE)
def enableDebug(logger: LoggerType) = logger.setLevel(Level.DEBUG)
def enableInfo(logger: LoggerType) = logger.setLevel(Level.INFO)
def enableWarn(logger: LoggerType) = logger.setLevel(Level.WARN)
def enableError(logger: LoggerType) = logger.setLevel(Level.ERROR)
def isTraceEnabled(logger: LoggerType) = logger.isTraceEnabled
def isDebugEnabled(logger: LoggerType) = logger.isDebugEnabled
def isInfoEnabled(logger: LoggerType) = logger.isInfoEnabled
def isWarnEnabled(logger: LoggerType) = logger.isWarnEnabled
def isErrorEnabled(logger: LoggerType) = logger.isErrorEnabled
def getName(logger: LoggerType) = logger.getName
def getLevel(logger: LoggerType) = (Box !! logger.getLevel)
}
/**
* The Log4j backend
*/
trait Log4jLoggingBackend extends LoggingBackend {
import org.apache.log4j.{Level, LogManager};
type LoggerType = org.apache.log4j.Logger
type LevelType = Level
def loggers: Seq[LoggerType] = {
val javaLoggers = LogManager.getCurrentLoggers()
var ls:List[LoggerType] = org.apache.log4j.Logger.getRootLogger() :: Nil
while (javaLoggers.hasMoreElements()) {
ls = javaLoggers.nextElement().asInstanceOf[LoggerType] :: ls
}
ls
}
def enableTrace(logger: LoggerType) = logger.setLevel(Level.TRACE)
def enableDebug(logger: LoggerType) = logger.setLevel(Level.DEBUG)
def enableInfo(logger: LoggerType) = logger.setLevel(Level.INFO)
def enableWarn(logger: LoggerType) = logger.setLevel(Level.WARN)
def enableError(logger: LoggerType) = logger.setLevel(Level.ERROR)
def isTraceEnabled(logger: LoggerType) = logger.isTraceEnabled
def isDebugEnabled(logger: LoggerType) = logger.isDebugEnabled
def isInfoEnabled(logger: LoggerType) = logger.isInfoEnabled
def isWarnEnabled(logger: LoggerType) = logger.isEnabledFor(Level.WARN)
def isErrorEnabled(logger: LoggerType) = logger.isEnabledFor(Level.ERROR)
def getName(logger: LoggerType) = logger.getName
def getLevel(logger: LoggerType) = (Box !! logger.getLevel)
}
object LogLevelChanger {
/**
* register the resources with lift (typically in boot)
*/
def init() {
ResourceServer.allow({
case "logchanger" :: _ => true
})
}
}
/**
* Mixin for creating a page that allows dynamic changing of log levels
*
* Generates a list of all defined loggers, their current level and links for
* changing the level.
*
* Must be mixed into a LoggingBackend for the logging system used
*
* ie.
*
* object LogLevelChanger extends Log4jLoggingBackend with LogLevelChanger
*
* Then add LogLevelChanger.menu to the SiteMap
*/
trait LogLevelChanger {
self: LoggingBackend =>
/**
* Override to include new Params for menu
*/
def menuLocParams: List[Loc.AnyLocParam] = Nil
/**
* Override to change the path
*/
def path = List("loglevel", "change")
/**
* Override to change the display of the list
*/
def screenWrap: Box[Node] = Full(<lift:surround with="default" at="content"><lift:bind /></lift:surround>)
protected def wrapIt(in: NodeSeq): NodeSeq = screenWrap.map(new RuleTransformer(new RewriteRule {
override def transform(n: Node) = n match {
case e: Elem if "bind" == e.label && "lift" == e.prefix => in
case _ => n
}
})) openOr in
/**
* Add this to the SiteMap in order to get access to the page
*/
def menu: Menu = Menu(Loc("Change Loglevels", path, "Change Loglevels",
Template(() => wrapIt(changeLogLevel))::menuLocParams))
/**
* CSS styles used to style the log levels
*/
def css:NodeSeq = <head>
<link rel="stylesheet" href={"/" + LiftRules.resourceServerPath +"/logchanger/logchanger.css"} type="text/css" />
</head>
/**
* Template used to render the loggers
*/
def xhtml:NodeSeq = css ++
<div id="logLevels">
<table>
<thead>
<tr>
<th>Logger name</th>
<th>Level</th>
</tr>
</thead>
<tbody>
<logLevels:rows>
<tr>
<td><row:name/></td><td><row:level/></td>
</tr>
</logLevels:rows>
</tbody>
</table>
</div>
def changeLogLevel: NodeSeq = {
def doRows(in: NodeSeq): NodeSeq = {
val ls = loggers.toList sort {getName(_) < getName(_)}
ls flatMap {l =>
def loggerChoices(logger: LoggerType): NodeSeq = {
val levelTexts:List[(String, Boolean, LoggerType => Unit)] = List(
("trace", isTraceEnabled(logger), enableTrace _),
("debug", isDebugEnabled(logger) && !isTraceEnabled(logger), enableDebug _),
("info", isInfoEnabled(logger) && !isDebugEnabled(logger), enableInfo _),
("warn", isWarnEnabled(logger) && !isInfoEnabled(logger), enableWarn _),
("error", isErrorEnabled(logger) && !isWarnEnabled(logger), enableError _))
val t:List[NodeSeq] = levelTexts.map(t =>
if (t._2) // Current level, render span with no action
<span class={"l_"+t._1}>{t._1}</span>
else // Not current level, render a tag that enables the level when clicked
a(() => {t._3(logger); JsCmds.Replace("logLevels", changeLogLevel)},
Text(t._1),
"class" -> ("l_"+t._1),
"title" -> "Set log level for [%s] to %s".format(getName(logger),t._1))
)
t.reduceLeft(_ ++ Text("|") ++ _)
}
bind("row", in,
"name" -> getLevel(l).dmap(Text(getName(l)):NodeSeq)(lv => <b>{getName(l)}</b>),
"level" -> loggerChoices(l))
}
}
bind("logLevels", xhtml, "rows" -> doRows _)
}
}
}}}
| wsaccaco/lift | framework/lift-modules/lift-widgets/src/main/scala/net/liftweb/widgets/logchanger/LogLevelChanger.scala | Scala | apache-2.0 | 8,311 |
import java.util.Dictionary
object HelloWorld {
// Function arguments are typed
// Note the implicit return statement
def add(firstVal: Int, secondVal: Int): Int = firstVal + secondVal
def myName(): String = "Luke"
// This is a "curried" function that allows us to apply it's arguments at different times.
def multiply(m: Int)(n: Int): Int = m * n
// The suffixed wildcard indicates that 0..* strings can be passed into this function
def capitalizeAll(args: String*) = {
// Map allows us to apply operations to all elements within a collection
args.map { arg =>
arg.capitalize
}
}
// A basic class definition
class Calculator {
def add(firstVal: Int, secondVal: Int): Int = firstVal + secondVal
def subtract(firstVal: Int, secondVal: Int): Int = firstVal - secondVal
def multiply(firstVal: Int, secondVal: Int): Int = firstVal * secondVal
def divide(firstVal: Int, secondVal: Int): Int = firstVal / secondVal
}
// A class with a constructor
// Unlike other languages, constructors aren't explicit (no "Register" method, no __init__)
// Instead all passed in values are immediately available as if they were fields in the class
// Any code outside function definitions is part of the constructor
// Note again the implicit return statement
class Register(names: String*) {
def getAttendees(): String = {
var attendees = "Today's attendees are: "
for (n <- names)
attendees += n + " "
attendees
}
}
// Demonstrates inheritance of a super class
class ScientificCalculator(brand: String) extends Calculator() {
def log(m: Double, base: Double) = math.log(m) / math.log(base)
}
// Demonstrates inheritance and method overloading (again it's implicit)
class EvenMoreScientificCalculator(brand: String) extends ScientificCalculator(brand) {
def log(m: Int): Double = log(m, math.exp(1))
}
// Abstract classes are lightweight. As with standard OOP these cannot be instantiated
abstract class Shape {
def getArea():Int // subclass should define this
}
// Concrete class. The compiler will not allow it to exist without implementing "getArea"
class Circle(r: Int) extends Shape {
def getArea():Int = { r * r * 3 }
}
// Traits allow for polymorphism. Any class inheriting both Car and Shiny will have "band" and "shineRefraction"
trait Car { val brand: String }
trait Shiny { val shineRefraction: Int }
trait Clean { val amountOfDirt: Int }
// The "with" keyword allows for polymorphism using multiple traits
class BMW extends Car with Shiny with Clean {
val brand = "BMW"
val shineRefraction = 12
val amountOfDirt = 0
}
// Traits can also be used for generics
trait Cache[K, V] {
def get(key: K): V
def put(key: K, value: V)
def delete(key: K)
}
// By extending the trait and defining the expected types, we've got a powerful generics engine
// Note "???" is actually a thing in Scala, it's like NotImplementedException
// Also, Unit is an explicit "Void" return type
class MemoryCache extends Cache[Int, Int] {
override def get(key: Int): Int = ???
override def put(key: Int, value: Int): Unit = ???
override def delete(key: Int): Unit = ???
}
def main(args: Array[String]): Unit = {
// "val" are immutable constants
val two = 1 + 1
//two = 3 <- this would error, you cannot reassign a declared val
// "var" are mutable variables
var name = "steve"
name = "steve's mate" // Totally ok
val five = add(2,3)
// Functions without arguments can be called without parenthesis
println(myName)
// This is an anonymous inline function
var addANumber = (x: Int) => x + 1
// ...which can be reassigned if a var is used
addANumber = (x: Int) => x + 2
println(addANumber(1))
// "Curried" functions can be used as a normal function
println(multiply(2)(3))
// Alternatively, "curried" functions can be assigned arguments over time
// Note the trailing underscore
val timesTwo = multiply(2) _
println(timesTwo(3))
// This will print "ArrayBuffer(Bob, Jane, Dingo)"
// That is because the map in the function uses deferred execution (lazy evaluation)
val names = capitalizeAll("Bob", "Jane", "Dingo")
println(names)
// For allows us to iterate over the array
for (n <- names) println(n)
// Instantiating a basic class
val calc = new Calculator
calc.subtract(10, 2)
val register = new Register("Jim", "Carl", "Frank")
println(register.getAttendees())
}
} | lukemerrett/LearningScala | learning/src/1-Basics.scala | Scala | mit | 4,604 |
/*******************************************************************************
Copyright 2009,2011, Oracle and/or its affiliates.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.scala_src.useful
import _root_.java.util.{HashMap => JHashMap}
import _root_.java.util.{HashSet => JHashSet}
import _root_.java.util.{List => JList}
import _root_.java.util.{ArrayList => JArrayList}
import _root_.java.util.{Map => JMap}
import _root_.java.util.{Set => JSet}
import _root_.java.util.{Collections => JCollections}
import _root_.java.lang.{Integer => JInteger}
import edu.rice.cs.plt.tuple.{Option => JOption}
import kr.ac.kaist.jsaf.nodes
import scala.collection.JavaConversions
object ASTGenHelper {
def needsScalafication(o: Any): Boolean = o match {
case o: JOption[_] => true
case o: JList[_] => true
case o: JMap[_, _] => true
case o: JSet[_] => true
case o: JInteger => true
case _ => false
}
def scalaify(typ: Any): Any = typ match {
case o: JOption[_] => {
if (o.isSome)
Some(scalaify(o.unwrap))
else
None
}
case l: JList[_] => {
if (l.isEmpty) {
Nil
} else {
val r = l.toArray.toList
if (needsScalafication(r.head)) {
r.map(scalaify)
} else {
r
}
}
}
case m: JMap[_, _] => {
var accum = Map[Any, Any]()
for (k <- (Map.empty ++ JavaConversions.mapAsScalaMap(m)).keySet) {
accum += ((scalaify(k), scalaify(m.get(k))))
}
accum
}
case s: JSet[_] => {
var accum = Set[Any]()
for (e <- Set.empty ++ JavaConversions.asScalaSet(s)) {
accum = accum + scalaify(e)
}
accum
}
case i: JInteger => i.intValue
case _ => typ
}
def needsJavafication(o: Any): Boolean = o match {
case o: Option[_] => true
case o: List[_] => true
case o: Map[_,_] => true
case o: Set[_] => true
case o: Int => true
case _ => false
}
def javaify(typ: Any): Object = typ match {
case Some(t) => JOption.some(javaify(t))
case None => JOption.none
case l: List[_] => {
val m = l match {
case head::_ if needsJavafication(l.head) => l.map(javaify)
case _ => l
}
Lists.toJavaList(m)
}
case m: Map[_, _] => {
val accum = new JHashMap[Object, Object]()
val keyset = m.keysIterator
for (k <- keyset) {
accum.put(javaify(k), javaify(m.apply(k)))
}
accum
}
case s: Set[_] => {
val accum = new JHashSet[Object]()
for (e <- s) {
accum.add(javaify(e))
}
accum
}
case i: Int => JInteger.valueOf(i)
case _ => typ.asInstanceOf[Object]
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/scala_src/useful/ASTGenHelper.scala | Scala | bsd-3-clause | 2,947 |
package models
import play.api.libs.json._
case class JsonResponse(status:String, data:String) {
implicit val JsonResponseWrites = new Writes[JsonResponse] {
def writes(jsonresp: JsonResponse) = Json.obj(
"status" -> jsonresp.status,
"data" -> jsonresp.data
)
}
}
| odnanref/carros | app/models/JsonResponse.scala | Scala | apache-2.0 | 286 |
package com.automatak.render.dnp3.objects.generators
import java.nio.file.Path
import com.automatak.render.dnp3.objects._
import com.automatak.render._
import com.automatak.render.cpp._
import com.automatak.render.LicenseHeader
object GroupVariationFileGenerator {
def apply(path: Path) = {
implicit val indent = CppIndentation()
def headerPath(group: ObjectGroup): Path = path.resolve(group.name+".h")
def implPath(group: ObjectGroup): Path = path.resolve(group.name+".cpp")
def definitions(group: ObjectGroup): Iterator[String] = spaced(group.objects.iterator.map(o => comment(o.fullDesc) ++ o.declaration))
def implementations(group: ObjectGroup): Iterator[String] = spaced(group.objects.iterator.map(o => o.implLines))
def optionalIncludes(group: ObjectGroup) : Set[String] = {
def getEnums(gv: GroupVariation): List[String] = {
def extract(typ: FixedSizeFieldType): Option[String] = typ match {
case EnumFieldType(model) => Some(quoted("opendnp3/gen/"+model.name+".h"))
case _ => None
}
gv match {
case fs : FixedSize => fs.fields.flatMap(f => extract(f.typ))
case _ => Nil
}
}
def include(file: String): String = "#include " + file
group.objects.flatMap(o => getEnums(o)).map(include).toSet
}
def includeHeader(group: ObjectGroup): Iterator[String] = Iterator("#include " + quoted(group.name+".h"))
def headerFile(group: ObjectGroup): Iterator[String] = {
commented(LicenseHeader()) ++ space ++
includeGuards(group.name.toUpperCase) {
headerIncludes(group) ++
optionalIncludes(group) ++ space ++
namespace("opendnp3") {
definitions(group)
}
}
}
def implFile(group: ObjectGroup): Iterator[String] = {
val defs = implementations(group)
if(defs.isEmpty) Iterator.empty
else
{
commented(LicenseHeader()) ++ space ++
includeHeader(group) ++ space ++
implIncludes(group) ++ space ++
Iterator("using namespace openpal;") ++ space ++
namespace("opendnp3") {
defs
}
}
}
def headerIncludes(group: ObjectGroup): Iterator[String] = {
group.objects.flatMap(_.headerIncludes).distinct.map(x => "#include %s".format(x)).toIterator
}
def implIncludes(group: ObjectGroup): Iterator[String] = {
group.objects.flatMap(_.implIncludes).distinct.map(x => "#include %s".format(x)).toIterator
}
ObjectGroup.all.foreach { g =>
writeTo(headerPath(g))(headerFile(g))
(writeTo(implPath(g))(implFile(g)))
}
}
}
| thiagoralves/OpenPLC_v2 | dnp3/generation/dnp3/src/main/scala/com/automatak/render/dnp3/objects/generators/GroupVariationFileGenerator.scala | Scala | gpl-3.0 | 2,670 |
package com.anadathur.elastic.easymapping
import javax.lang.model.element.Modifier
import com.squareup.javapoet.MethodSpec.Builder
import com.squareup.javapoet._
class DefaultMappingGen extends TypeGenerator {
import scala.collection.JavaConversions._
val generators = List(
generateMappingType _,
generateDefaultMappingType _,
generateMappingTypeProperty _,
generateDefaultMappingTypeProperty _
)
override def apply(config: Config): List[TypeSpec] = {
generators.map(_.apply(config)).toList
}
def generateDefaultMappingType(config: Config): TypeSpec = {
val params = getParamsOfMappingType(config)
val builder =
TypeSpec.classBuilder("DefaultMappingType")
.addModifiers(Modifier.PUBLIC)
.addSuperinterface(config.mappingTypeClass)
.addFields(params.values.map {_.fieldBuilder.addModifiers(privFinal: _*).build() }.toList)
val constructorParams = params.values.map { _.paramBuilder.build() }
val codeBlockBuilder = CodeBlock.builder()
for (param <- params.values) {
codeBlockBuilder.addStatement(s"this.${param.name} = ${param.name}")
}
val constructorBuilder =
MethodSpec.constructorBuilder()
.addModifiers(Modifier.PUBLIC)
.addParameters(constructorParams.toList)
.addCode(codeBlockBuilder.build())
val methods = getMethodBuilders(params).map { case (name, typeName, bldr) =>
bldr.addCode(CodeBlock.builder().addStatement(s"return $name").build())
.build()
}
val hashSet: ClassName = ClassName.get("java.util", "HashSet")
val simpleMethod =
MethodSpec.methodBuilder("isSimpleType")
.returns(TypeName.BOOLEAN)
.addModifiers(Modifier.PUBLIC)
.addStatement("$T set = new $T<>()", hashSet, hashSet)
Array("string", "integer", "long", "float", "double", "boolean", "date").foreach {name =>
simpleMethod.addStatement("set.add($S)", name)
}
simpleMethod.addStatement("return set.contains(this.typeName)")
builder.addMethod(constructorBuilder.build())
.addMethods(methods.toList)
.addMethod(simpleMethod.build())
.build()
}
def getMethodBuilders(params: Map[String, MetaParam]) =
params.values.map { method =>
(method.name, method.typeName, method.methodBuilder)
}
def getMethodName(name: String, typeName: TypeName) =
(if (typeName.equals(TypeName.BOOLEAN)) "is" else "get") + name.capitalize
def generateMappingType(config: Config): TypeSpec = {
val fields =
List("string", "integer", "long", "float", "double", "boolean", "date", "binary", "object", "nested")
.map { name =>
FieldSpec.builder(classOf[String], name.toUpperCase + "_TYPE")
.addModifiers(Modifier.PUBLIC, Modifier.STATIC, Modifier.FINAL)
.initializer("\\"" + name + "\\"")
.build()
}
val builders: Iterable[(String, TypeName, Builder)] = getMethodBuilders(getParamsOfMappingType(config))
val builder =
TypeSpec.interfaceBuilder("MappingType")
.addModifiers(Modifier.PUBLIC)
.addMethods(builders.map {_._3.addModifiers(Modifier.ABSTRACT, Modifier.PUBLIC).build()}.toList)
.addMethod(
MethodSpec.methodBuilder("isSimpleType")
.addModifiers(Modifier.PUBLIC, Modifier.ABSTRACT)
.returns(TypeName.BOOLEAN)
.build()
)
.addFields(fields)
builder.build()
}
def generateMappingTypeProperty(config: Config): TypeSpec = {
val methods = getMethodBuilders(getParamsOfMappingTypeParam(config)).map { case (name, tName, bldr) =>
bldr.addModifiers(pubAbs: _*).build()
}
TypeSpec.interfaceBuilder("MappingTypeProperty")
.addModifiers(Modifier.PUBLIC)
.addMethods(methods.toList)
.build()
}
def generateDefaultMappingTypeProperty(config: Config): TypeSpec = {
val param: Map[String, MetaParam] = getParamsOfMappingTypeParam(config)
val methods = getMethodBuilders(param).map { case (name, tName, bldr) =>
bldr.addModifiers(Modifier.PUBLIC)
.addStatement("return $N", name)
.build()
}
val block = CodeBlock.builder()
param.values.foreach { p => block.addStatement("this.$N = $N", p.name, p.name)}
TypeSpec.classBuilder("DefaultMappingTypeProperty")
.addModifiers(Modifier.PUBLIC)
.addMethod(
MethodSpec.constructorBuilder()
.addModifiers(Modifier.PUBLIC)
.addParameters(param.values.map(_.paramBuilder.build()).toList)
.addCode(block.build())
.build()
)
.addFields(param.values.map(_.fieldBuilder.addModifiers(privFinal: _*).build()).toList)
.addMethods(methods.toList)
.build()
}
} | ajaykumarns/easymapping | src/main/scala/com/anadathur/elastic/easymapping/generators.scala | Scala | apache-2.0 | 4,756 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.web
package services
import model.JsNodeId
import com.normation.inventory.domain._
import com.normation.inventory.ldap.core.LDAPConstants
import com.unboundid.ldap.sdk.DN
import com.normation.rudder.web.components.DateFormaterService
import com.normation.inventory.services.core.ReadOnlySoftwareDAO
import scala.xml._
import net.liftweb.common._
import net.liftweb.http._
import net.liftweb.util._
import Helpers._
import net.liftweb.http.js._
import JsCmds._
import JE.{JsRaw, JsVar, JsArray, Str}
import net.liftweb.http.SHtml._
import com.normation.exceptions.TechnicalException
import net.liftweb.http.Templates
import org.joda.time.DateTime
import com.normation.rudder.services.servers.RemoveNodeService
import com.normation.rudder.web.model.CurrentUser
import com.normation.rudder.batch.AsyncDeploymentAgent
import com.normation.rudder.batch.AutomaticStartDeployment
import com.normation.utils.StringUuidGenerator
import com.normation.eventlog.ModificationId
import bootstrap.liftweb.RudderConfig
import com.normation.rudder.web.model.JsInitContextLinkUtil
import com.normation.rudder.domain.nodes.NodeProperty
import com.normation.rudder.domain.nodes.{Node => RudderNode}
import com.normation.cfclerk.xmlparsers.CfclerkXmlConstants.DEFAULT_COMPONENT_KEY
import com.normation.rudder.domain.nodes.NodeInfo
import com.normation.rudder.domain.policies.PolicyMode._
import com.normation.rudder.domain.policies.PolicyModeOverrides._
import com.normation.rudder.domain.policies.GlobalPolicyMode
/**
* A service used to display details about a server
* inventory with tabs like:
* [general][software][network][file system]
*
* Use it by calling:
* # head if not yet called in that page
* # show(nodeId) : NodeSeq
* where you want to display node information
* # jsInit(nodeId) : Cmd
* to init javascript for it
*/
object DisplayNode extends Loggable {
private[this] val getSoftwareService = RudderConfig.readOnlySoftwareDAO
private[this] val removeNodeService = RudderConfig.removeNodeService
private[this] val asyncDeploymentAgent = RudderConfig.asyncDeploymentAgent
private[this] val uuidGen = RudderConfig.stringUuidGenerator
private[this] val nodeInfoService = RudderConfig.nodeInfoService
private[this] val configService = RudderConfig.configService
private[this] val deleteNodePopupHtmlId = "deleteNodePopupHtmlId"
private[this] val errorPopupHtmlId = "errorPopupHtmlId"
private[this] val successPopupHtmlId = "successPopupHtmlId"
private def loadSoftware(jsId:JsNodeId, softIds:Seq[SoftwareUuid])(nodeId:String):JsCmd = {
(for {
seq <- getSoftwareService.getSoftware(softIds)
gridDataId = htmlId(jsId,"soft_grid_data_")
gridId = "soft"
} yield SetExp(JsVar(gridDataId),JsArray(seq.map { x => JsArray(
Str(x.name.getOrElse("")),
Str(x.version.map(_.value).getOrElse("")),
Str(x.description.getOrElse(""))
)}:_*) ) & JsRaw(s"""
$$('#${htmlId(jsId,gridId+"_")}').dataTable({
"aaData":${gridDataId},
"bJQueryUI": true,
"bPaginate": true,
"bRetrieve": true,
"bLengthChange": true,
"sPaginationType": "full_numbers",
"asStripeClasses": [ 'color1', 'color2' ] ,
"oLanguage": {
"sSearch": ""
},
"bLengthChange": true,
"bStateSave": true,
"fnStateSave": function (oSettings, oData) {
localStorage.setItem( 'DataTables_${gridId}', JSON.stringify(oData) );
},
"fnStateLoad": function (oSettings) {
return JSON.parse( localStorage.getItem('DataTables_${gridId}') );
},
"bAutoWidth": false,
"aoColumns": [ {"sWidth": "200px"},{"sWidth": "150px"},{"sWidth": "350px"}],
"sDom": '<"dataTables_wrapper_top"fl>rt<"dataTables_wrapper_bottom"ip>',
"lengthMenu": [ [10, 25, 50, 100, 500, 1000, -1], [10, 25, 50, 100, 500, 1000, "All"] ],
"pageLength": 25
});
$$('.dataTables_filter input').attr("placeholder", "Filter");
""")
) match {
case Empty => Alert("No software found for that server")
case Failure(m,_,_) => Alert("Error when trying to fetch software. Reported message: "+m)
case Full(js) => js
}
}
def jsInit(nodeId:NodeId, softIds:Seq[SoftwareUuid], salt:String=""):JsCmd = {
val jsId = JsNodeId(nodeId,salt)
val detailsId = htmlId(jsId,"details_")
val softGridDataId = htmlId(jsId,"soft_grid_data_")
val softGridId = htmlId(jsId,"soft_")
val softPanelId = htmlId(jsId,"sd_soft_")
var eltIdswidth = List( ("process",List("50","50","50","60","120","50","100","850"),1),("var",List("200","800"),0))
val eltIds = List( "vm", "fs", "net","bios", "controllers", "memories", "ports", "processors", "slots", "sounds", "storages", "videos")
JsRaw("var "+softGridDataId +"= null") &
OnLoad(
JsRaw("$('#"+detailsId+"').tabs()") &
{ eltIds.map { i =>
JsRaw(s"""
$$('#${htmlId(jsId,i+"_")}').dataTable({
"bJQueryUI": true,
"bRetrieve": true,
"bFilter": true,
"asStripeClasses": [ 'color1', 'color2' ],
"oLanguage": {
"sSearch": ""
},
"bLengthChange": true,
"bStateSave": true,
"fnStateSave": function (oSettings, oData) {
localStorage.setItem( 'DataTables_${i}', JSON.stringify(oData) );
},
"fnStateLoad": function (oSettings) {
return JSON.parse( localStorage.getItem('DataTables_${i}') );
},
"sPaginationType": "full_numbers",
"bPaginate": true,
"bAutoWidth": false,
"bInfo":true,
"sDom": '<"dataTables_wrapper_top"fl>rt<"dataTables_wrapper_bottom"ip>',
"lengthMenu": [ [10, 25, 50, 100, 500, 1000, -1], [10, 25, 50, 100, 500, 1000, "All"] ],
"pageLength": 25
});
$$('.dataTables_filter input').attr("placeholder", "Filter");
| """.stripMargin('|')):JsCmd
}.reduceLeft( (i,acc) => acc & i )
} &
{ eltIdswidth.map { case (id,columns,sorting) =>
JsRaw(s"""
$$('#${htmlId(jsId,id+"_")}').dataTable({
"bJQueryUI": true,
"bRetrieve": true,
"sPaginationType": "full_numbers",
"bFilter": true,
"asStripeClasses": [ 'color1', 'color2' ],
"bPaginate": true,
"aoColumns": ${columns.map(col => s"{'sWidth': '${col}px'}").mkString("[",",","]")} ,
"aaSorting": [[ ${sorting}, "asc" ]],
"oLanguage": {
"sSearch": ""
},
"bLengthChange": true,
"bStateSave": true,
"fnStateSave": function (oSettings, oData) {
localStorage.setItem( 'DataTables_${id}', JSON.stringify(oData) );
},
"fnStateLoad": function (oSettings) {
return JSON.parse( localStorage.getItem('DataTables_${id}') );
},
"bAutoWidth": false,
"bInfo":true,
"sDom": '<"dataTables_wrapper_top"fl>rt<"dataTables_wrapper_bottom"ip>',
"lengthMenu": [ [10, 25, 50, 100, 500, 1000, -1], [10, 25, 50, 100, 500, 1000, "All"] ],
"pageLength": 25
});
$$('.dataTables_filter input').attr("placeholder", "Filter");
""") : JsCmd
}.reduceLeft( (i,acc) => acc & i )
} &
// for the software tab, we check for the panel id, and the firstChild id
// if the firstChild.id == softGridId, then it hasn't been loaded, otherwise it is softGridId_wrapper
JsRaw(s"""
$$("#${detailsId}").on( "tabsactivate", function(event, ui) {
if(ui.newPanel.attr('id')== '${softPanelId}' ){
${ SHtml.ajaxCall(JsRaw("'"+nodeId.value+"'"), loadSoftware(jsId, softIds) )._2.toJsCmd}
}
});
""")
)
}
/**
* Show details about the server in a tabbed fashion if
* the server exists, display an error message if the
* server is not found or if a problem occurred when fetching it
*
* showExtraFields : if true, then everything is shown, otherwise, the extrafileds are not in the main tabs.
* To show then, look at showExtraHeader
*
* Salt is a string added to every used id.
* It useful only if you have several DisplayNode element on a single page.
*/
def show(sm:FullInventory, showExtraFields : Boolean = true, salt:String = "") : NodeSeq = {
val jsId = JsNodeId(sm.node.main.id,salt)
val mainTabDeclaration : List[NodeSeq] =
<li><a href={htmlId_#(jsId,"sd_bios_")}>BIOS</a></li> ::
<li><a href={htmlId_#(jsId,"sd_controllers_")}>Controllers</a></li> ::
<li><a href={htmlId_#(jsId,"sd_memories_")}>Memory</a></li> ::
<li><a href={htmlId_#(jsId,"sd_ports_")}>Ports</a></li> ::
<li><a href={htmlId_#(jsId,"sd_processors_")}>Processors</a></li> ::
<li><a href={htmlId_#(jsId,"sd_slots_")}>Slots</a></li> ::
<li><a href={htmlId_#(jsId,"sd_sounds_")}>Sound</a></li> ::
<li><a href={htmlId_#(jsId,"sd_storages_")}>Storage</a></li> ::
<li><a href={htmlId_#(jsId,"sd_videos_")}>Video</a></li> ::
Nil
val tabContent =
{ if (showExtraFields) displayTabFilesystems(jsId, sm) else Nil } ::
{ if (showExtraFields) displayTabNetworks(jsId, sm) else Nil } ::
{ if (showExtraFields) displayTabSoftware(jsId) else Nil } ::
displayTabBios(jsId, sm) ::
displayTabControllers(jsId, sm) ::
displayTabMemories(jsId, sm) ::
displayTabPorts(jsId, sm) ::
displayTabProcessors(jsId, sm) ::
displayTabSlots(jsId, sm) ::
displayTabSounds(jsId, sm) ::
displayTabStorages(jsId, sm) ::
displayTabVideos(jsId, sm) ::
Nil
val tabId = htmlId(jsId,"hardware_details_")
<div id={tabId} class="sInventory ui-tabs-vertical">
<ul>{mainTabDeclaration}</ul>
{tabContent.flatten}
</div> ++ Script(OnLoad(JsRaw(s"$$('#${tabId}').tabs()")))
}
/**
* show the extra tabs header part
*/
def showExtraHeader(sm:FullInventory, salt:String = "") : NodeSeq = {
val jsId = JsNodeId(sm.node.main.id,salt)
<xml:group>
<li><a href={htmlId_#(jsId,"sd_fs_")}>File systems</a></li>
<li><a href={htmlId_#(jsId,"sd_net_")}>Network interfaces</a></li>
<li><a href={htmlId_#(jsId,"sd_soft_")}>Software</a></li>
<li><a href={htmlId_#(jsId,"sd_var_")}>Environment</a></li>
<li><a href={htmlId_#(jsId,"sd_process_")}>Processes</a></li>
<li><a href={htmlId_#(jsId,"sd_vm_")}>Virtual machines</a></li>
<li><a href={htmlId_#(jsId,"sd_props_")}>Properties</a></li>
</xml:group>
}
/**
* show the extra part
* If there is no node available (pending inventory), there is nothing to show
*/
def showExtraContent(node: Option[NodeInfo], sm: FullInventory, salt:String = "") : NodeSeq = {
val jsId = JsNodeId(sm.node.main.id,salt)
displayTabFilesystems(jsId, sm) ++
displayTabNetworks(jsId, sm) ++
displayTabVariable(jsId, sm) ++
displayTabProcess(jsId, sm) ++
displayTabVM(jsId, sm) ++
node.map(displayTabProperties(jsId, _)).getOrElse(Nil) ++
displayTabSoftware(jsId)
}
/**
* Show the details in a panned version, with Node Summary, Inventory, Network, Software
* Should be used with jsInit(dn:String, softIds:Seq[SoftwareUuid], salt:String="")
*/
def showPannedContent(
nodeAndGlobalMode: Option[(NodeInfo,GlobalPolicyMode)]
, sm : FullInventory
, inventoryStatus : InventoryStatus
, salt : String = ""
) : NodeSeq = {
val jsId = JsNodeId(sm.node.main.id,salt)
val detailsId = htmlId(jsId,"details_")
<div id={detailsId} class="tabs">
<ul>
<li><a href={htmlId_#(jsId,"node_summary_")}>Summary</a></li>
<li><a href={htmlId_#(jsId,"node_inventory_")}>Hardware</a></li>
{showExtraHeader(sm, salt)}
</ul>
<div id="node_inventory">
<div id={htmlId(jsId,"node_inventory_")}>
{show(sm, false, "")}
</div>
</div>
{showExtraContent(nodeAndGlobalMode.map(_._1), sm, salt)}
<div id={htmlId(jsId,"node_summary_")}>
{showNodeDetails(sm, nodeAndGlobalMode, None, inventoryStatus, salt)}
</div>
</div>
}
// mimic the content of server_details/ShowNodeDetailsFromNode
def showNodeDetails(
sm : FullInventory
, nodeAndGlobalMode : Option[(NodeInfo,GlobalPolicyMode)]
, creationDate : Option[DateTime]
, inventoryStatus : InventoryStatus
, salt : String = ""
, isDisplayingInPopup: Boolean = false
) : NodeSeq = {
val nodePolicyMode = nodeAndGlobalMode match {
case Some((node,globalMode)) =>
Some((globalMode.overridable,node.policyMode) match {
case (Always, Some(mode)) =>
(mode,"<p>This mode is an override applied to this node. You can change it in the <i><b>node settings</b></i>.</p>")
case (Always,None) =>
val expl = """<p>This mode is the globally defined default. You can change it in <i><b>settings</b></i>.</p><p>You can also override it on this node in the <i><b>node's settings</b></i>.</p>"""
(globalMode.mode, expl)
case (Unoverridable,_) =>
(globalMode.mode, "<p>This mode is the globally defined default. You can change it in <i><b>Settings</b></i>.</p>")
}
)
case None =>
None
}
val deleteButton : NodeSeq= {
sm.node.main.status match {
case AcceptedInventory =>
<div class="tw-bs">
<div id={deleteNodePopupHtmlId} class="modal fade" data-keyboard="true" tabindex="-1" />
<div id={errorPopupHtmlId} class="modal fade" data-keyboard="true" tabindex="-1" />
<div id={successPopupHtmlId} class="modal fade" data-keyboard="true" tabindex="-1" />
</div>
<lift:authz role="node_write">
{
if(!isRootNode(sm.node.main.id)) {
<div class="tw-bs">
<div class="col-xs-12">
{ showDeleteButton(sm.node.main.id) }
</div>
</div>
} else {NodeSeq.Empty}
}
</lift:authz>
case _ => NodeSeq.Empty
}
}
<div id="nodeDetails" >
<h3> Node characteristics</h3>
<h4 class="tablemargin">General</h4>
<div class="tablepadding">
<b>Hostname:</b> {sm.node.main.hostname}<br/>
<b>Machine type:</b> {displayMachineType(sm.machine)}<br/>
<b>Manufacturer:</b> {sm.machine.flatMap(x => x.manufacturer).map(x => x.name).getOrElse("-")}<br/>
<b>Total physical memory (RAM):</b> {sm.node.ram.map( _.toStringMo).getOrElse("-")}<br/>
<b>Total swap space:</b> {sm.node.swap.map( _.toStringMo).getOrElse("-")}<br/>
<b>Motherboard UUID:</b> {sm.machine.map(_.id.value).getOrElse("-")}<br/>
<b>System Serial Number:</b> {sm.machine.flatMap(x => x.systemSerialNumber).getOrElse("-")}<br/>
</div>
<h4 class="tablemargin">Operating system details</h4>
<div class="tablepadding">
<b>Operating System:</b> {sm.node.main.osDetails.fullName}<br/>
<b>Operating System Type:</b> {sm.node.main.osDetails.os.kernelName}<br/>
<b>Operating System Name:</b> {S.?("os.name."+sm.node.main.osDetails.os.name)}<br/>
<b>Operating System Version:</b> {sm.node.main.osDetails.version.value}<br/>
<b>Operating System Service Pack:</b> {sm.node.main.osDetails.servicePack.getOrElse("None")}<br/>
<b>Operating System Architecture Description:</b> {sm.node.archDescription.getOrElse("None")}<br/>
</div>
<h4 class="tablemargin">Rudder information</h4>
<div class="tablepadding">
{ nodePolicyMode match {
case None => NodeSeq.Empty
case Some((mode,explanation)) =>
<b>Agent policy mode :</b><span id="badge-apm" class="tw-bs"></span><br/> ++
Script(OnLoad(JsRaw(s"""
$$('#badge-apm').append(createBadgeAgentPolicyMode('node',"${mode}","${explanation}"));
$$('.rudder-label').bsTooltip();
""")))
} }
{ displayServerRole(sm, inventoryStatus) }
<b>Inventory date:</b> {sm.node.inventoryDate.map(DateFormaterService.getFormatedDate(_)).getOrElse("Unknown")}<br/>
<b>Date inventory last received:</b> {sm.node.receiveDate.map(DateFormaterService.getFormatedDate(_)).getOrElse("Unknown")}<br/>
{creationDate.map { creation =>
<xml:group><b>Date first accepted in Rudder:</b> {DateFormaterService.getFormatedDate(creation)}<br/></xml:group>
}.getOrElse(NodeSeq.Empty) }
<b>Rudder agent version:</b> {sm.node.agents.map(_.version.map(_.value)).headOption.flatten.getOrElse("Not found")
}<br/>
<b>Agent name:</b> {sm.node.agents.map(_.name.fullname).mkString(";")}<br/>
<b>Rudder ID:</b> {sm.node.main.id.value}<br/>
{ displayPolicyServerInfos(sm) }<br/>
{
sm.node.publicKeys.headOption match {
case Some(key) =>
val checked = (sm.node.main.status, sm.node.main.keyStatus) match {
case (AcceptedInventory, CertifiedKey) => <span class="tw-bs">
<span class="glyphicon glyphicon-ok text-success tooltipable" title="" tooltipid={s"tooltip-key-${sm.node.main.id.value}"}></span>
<span class="tooltipContent" id={s"tooltip-key-${sm.node.main.id.value}"}>
Inventories for this Node must be signed with this key
</span>
</span>
case (AcceptedInventory, UndefinedKey) => <span class="tw-bs">
<span class="glyphicon glyphicon-ok tooltipable" title="" tooltipid={s"tooltip-key-${sm.node.main.id.value}"}></span>
<span class="tooltipContent" id={s"tooltip-key-${sm.node.main.id.value}"}>
Inventories for this Node are not signed
</span>
</span>
case _ => NodeSeq.Empty
}
val publicKeyId = s"publicKey-${sm.node.main.id.value}"
<b><a href="#" onclick={s"$$('#publicKey-${sm.node.main.id.value}').toggle(300); return false;"}>Display Node key {checked}</a></b>
<div style="width=100%; overflow:auto;"><pre id={s"publicKey-${sm.node.main.id.value}"} style="display:none;">{key.key}</pre></div> ++
Script(OnLoad(JsRaw(s"""createTooltip();""")))
case None => NodeSeq.Empty
}
}
</div>
<h4 class="tablemargin">Accounts</h4>
<div class="tablepadding">
<b>Administrator account:</b> {sm.node.main.rootUser}<br/>
<b>Local account(s):</b> {displayAccounts(sm.node)}<br/>
</div> <br/>
{deleteButton}
</div>
}
private def htmlId(jsId:JsNodeId, prefix:String="") : String = prefix + jsId.toString
private def htmlId_#(jsId:JsNodeId, prefix:String="") : String = "#" + prefix + jsId.toString
private def ?(in:Option[String]) : NodeSeq = in.map(Text(_)).getOrElse(NodeSeq.Empty)
// Display the role of the node
private def displayServerRole(sm:FullInventory, inventoryStatus : InventoryStatus) : NodeSeq = {
val nodeId = sm.node.main.id
inventoryStatus match {
case AcceptedInventory =>
val nodeInfoBox = nodeInfoService.getNodeInfo(nodeId)
nodeInfoBox match {
case Full(Some(nodeInfo)) =>
val kind = {
if(nodeInfo.isPolicyServer) {
if(isRootNode(nodeId) ) {
"server"
} else {
"relay server"
}
} else {
if (nodeInfo.serverRoles.isEmpty){
"node"
} else {
"server component"
}
}
}
val roles = if (nodeInfo.serverRoles.isEmpty) {
""
} else {
nodeInfo.serverRoles.map(_.value).mkString("(",", ",")")
}
<span><b>Role: </b>Rudder {kind} {roles}</span><br/>
case Full(None) =>
logger.error(s"Could not fetch node details for node with id ${sm.node.main.id}")
<span class="error"><b>Role: </b>Could not fetch Role for this node</span><br/>
case eb:EmptyBox =>
val e = eb ?~! s"Could not fetch node details for node with id ${sm.node.main.id}"
logger.error(e.messageChain)
<span class="error"><b>Role: </b>Could not fetch Role for this node</span><br/>
}
case RemovedInventory =>
<span><b>Role: </b>Deleted node</span><br/>
case PendingInventory =>
<span><b>Role: </b>Pending node</span><br/>
}
}
private def displayPolicyServerInfos(sm:FullInventory) : NodeSeq = {
nodeInfoService.getNodeInfo(sm.node.main.policyServerId) match {
case eb:EmptyBox =>
val e = eb ?~! s"Could not fetch policy server details (id ${sm.node.main.policyServerId}) for node with id ${sm.node.main.id}"
logger.error(e.messageChain)
<span class="error"><b>Rudder Policy Server: </b>Could not fetch details about the policy server</span>
case Full(Some(policyServerDetails)) =>
<span><b>Rudder Policy Server: </b><a href={JsInitContextLinkUtil.baseNodeLink(policyServerDetails.id)}>{policyServerDetails.hostname}</a></span>
case Full(None) =>
logger.error(s"Could not fetch policy server details (id ${sm.node.main.policyServerId}) for node with id ${sm.node.main.id}")
<span class="error"><b>Rudder Policy Server: </b>Could not fetch details about the policy server</span>
}
}
private def displayMachineType(opt:Option[MachineInventory]) : NodeSeq = {
opt match {
case None => NodeSeq.Empty
case Some(machine) => (
machine.machineType match {
case PhysicalMachineType => Text("Physical machine")
case VirtualMachineType(vmType) => Text("Virtual machine (%s)".format(S.?("vm.type." + vmType.name)))
}
)
}
}
private def displayPublicKeys(node:NodeInventory) : NodeSeq = <b>Public Key(s): </b> ++ {if(node.publicKeys.isEmpty) {
Text(DEFAULT_COMPONENT_KEY)
} else <ul>{node.publicKeys.zipWithIndex.flatMap{ case (x,i) => (<b>{"[" + i + "] "}</b> ++ {Text(x.key.grouped(65).toList.mkString("\\n"))})}}</ul> }
private def displayNodeInventoryInfo(node:NodeInventory) : NodeSeq = {
val details : NodeSeq = node.main.osDetails match {
case Linux(os, osFullName, osVersion, osServicePack, kernelVersion) => //display kernelVersion, distribution, distributionVersion
(<li><b>Distribution (version): </b> {os.name} ({osVersion.value})</li>
<li><b>Kernel version: </b> {kernelVersion.value}</li>
<li><b>Service Pack: </b> {?(osServicePack)}</li>)
case Windows(os, osFullName, osVersion, osServicePack, kernelVersion, domain, company, key, id) =>
(<li><b>Version:</b>: {osVersion.value}</li>
<li><b>Kernel version: </b> {kernelVersion.value}</li>
<li><b>Service Pack: </b> {?(osServicePack)}</li>
<li><b>User Domain:</b> {domain}</li>
<li><b>Company:</b> {company}</li>
<li><b>Id:</b> {id}</li>
<li><b>Key:</b> {key}</li>)
case _ => NodeSeq.Empty
}
<li><b>Complete name: </b> {node.main.osDetails.fullName}</li> ++
details
}
//show a comma separated list with description in tooltip
private def displayAccounts(node:NodeInventory) : NodeSeq = {
Text{if(node.accounts.isEmpty) {
"None"
} else {
node.accounts.sortWith(_ < _).mkString(", ")
}
}
}
private def displayTabGrid[T](jsId:JsNodeId)(eltName:String, optSeq:Box[Seq[T]],title:Option[String]=None)(columns:List[(String, T => NodeSeq)]) = {
<div id={htmlId(jsId,"sd_"+eltName +"_")} class="sInventory overflow_auto" style="display:none;">{
optSeq match {
case Empty =>
<div class="tw-bs">
<div class="col-xs-12 alert alert-warning">
<span>No matching components detected on this node</span>
</div>
</div>
case Failure(m,_,_) => <span class="error">Error when trying to fetch file systems. Reported message: {m}</span>
case Full(seq) if (seq.isEmpty && eltName != "soft") =>
<div class="tw-bs">
<div class="col-xs-12 alert alert-warning">
<span>No matching components detected on this node</span>
</div>
</div>
case Full(seq) =>
<table cellspacing="0" id={htmlId(jsId,eltName+"_")} class="tablewidth">
{ title match {
case None => NodeSeq.Empty
case Some(title) => <div style="text-align:center"><b>{title}</b></div>
}
}
<thead>
<tr class="head">
</tr>
<tr class="head">{
columns.map {h => <th>{h._1}</th> }.toSeq
}</tr>
</thead>
<tbody class="toggle-color">{ seq.flatMap { x =>
<tr>{ columns.flatMap{ case(header,renderLine) => <td>{renderLine(x)}</td> } }</tr>
} }</tbody>
</table>
}
}<div id={htmlId(jsId,eltName + "_grid_") + "_paginate_area"} class="paginate"/>
</div>
}
private def displayTabSoftware(jsId:JsNodeId) : NodeSeq =
displayTabGrid(jsId)("soft",
//do not retrieve software here
Full(Seq())
){
("Name", {x:Software => ?(x.name)} ) ::
("Version", {x:Software => ?(x.version.map(_.value)) } ) ::
("Description", {x:Software => ?(x.description) } ) ::
Nil
}
private def displayTabNetworks(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("net", Full(sm.node.networks)){
("Interface", {x:Network => Text(x.name)}) ::
("IP address", {x:Network => Text(x.ifAddresses.map{ _.getHostAddress }.mkString(", "))}) ::
("Mask", {x:Network => Text(x.ifMask.map{ _.getHostAddress }.mkString(", "))}) ::
("DHCP server", {x:Network => Text(x.ifDhcp.map{ _.getHostAddress }.mkString(", "))}) ::
("MAC address", {x:Network => ?(x.macAddress)}) ::
("Type", {x:Network => ?(x.ifType)}) ::
("Speed", {x:Network => ?(x.speed)}) ::
("Status", {x:Network => ?(x.status)}) ::
Nil
}
private def displayTabFilesystems(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("fs", Full(sm.node.fileSystems)){
("Mount point", {x:FileSystem => Text(x.mountPoint)}) ::
("Filesystem", {x:FileSystem => ?(x.name)}) ::
("Free space", {x:FileSystem => ?(x.freeSpace.map(_.toStringMo))}) ::
("Total space", {x:FileSystem => ?(x.totalSpace.map(_.toStringMo))}) ::
("File count", {x:FileSystem => ?(x.fileCount.map(_.toString))}) ::
Nil
}
private def displayTabVariable(jsId:JsNodeId,sm:FullInventory) : NodeSeq = {
val title = sm.node.inventoryDate.map(date => "Environment variable status on %s".format(DateFormaterService.getFormatedDate(date)))
displayTabGrid(jsId)("var", Full(sm.node.environmentVariables),title){
("Name", {x:EnvironmentVariable => Text(x.name)}) ::
("Value", {x:EnvironmentVariable => Text(x.value.getOrElse("Unspecified"))}) ::
Nil
}
}
private def displayTabProperties(jsId:JsNodeId, node: NodeInfo) : NodeSeq = {
import com.normation.rudder.domain.nodes.JsonSerialisation._
import com.normation.rudder.authorization._
import net.liftweb.json._
val nodeId = node.id.value
val jsonProperties = compactRender(node.properties.toApiJson())
val userHasRights = CurrentUser.checkRights(Write("node"))
def tabProperties = ChooseTemplate(List("templates-hidden", "components", "ComponentNodeProperties") , "nodeproperties-tab")
val css: CssSel = "#tabPropsId [id]" #> htmlId(jsId,"sd_props_")
css(tabProperties) ++ Script(OnLoad(JsRaw(s"""
angular.bootstrap('#nodeProp', ['nodeProperties']);
var scope = angular.element($$("#nodeProp")).scope();
scope.$$apply(function(){
scope.init(${jsonProperties},"${nodeId}",${userHasRights});
});
""")))
}
private def displayTabProcess(jsId:JsNodeId,sm:FullInventory) : NodeSeq = {
val title = sm.node.inventoryDate.map(date => "Process status on %s".format(DateFormaterService.getFormatedDate(date)))
displayTabGrid(jsId)("process", Full(sm.node.processes),title){
("User", {x:Process => ?(x.user)}) ::
("PID", {x:Process => Text(x.pid.toString())}) ::
("% CPU", {x:Process => ?(x.cpuUsage.map(_.toString()))}) ::
("% Memory", {x:Process => ?(x.memory.map(_.toString()))}) ::
("Virtual memory", {x:Process => ?(x.virtualMemory.map(memory => MemorySize(memory.toLong).toStringMo()))}) ::
("TTY", {x:Process => ?(x.tty)}) ::
("Started on", {x:Process => ?(x.started)}) ::
("Command", { x:Process => ?(x.commandName) }) ::
Nil
}
}
private def displayTabVM(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("vm", Full(sm.node.vms)){
("Name", {x:VirtualMachine => ?(x.name)}) ::
("Type", {x:VirtualMachine => ?(x.vmtype)}) ::
("SubSystem", {x:VirtualMachine => ?(x.subsystem)}) ::
("Uuid", {x:VirtualMachine => Text(x.uuid.value)}) ::
("Status", {x:VirtualMachine => ?(x.status)}) ::
("Owner", {x:VirtualMachine => ?(x.owner)}) ::
("# Cpu", {x:VirtualMachine => ?(x.vcpu.map(_.toString()))}) ::
("Memory", { x:VirtualMachine => ?(x.memory) }) ::
Nil
}
private def displayTabBios(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("bios", sm.machine.map(fm => fm.bios)){
("Name", {x:Bios => Text(x.name)}) ::
("Editor", {x:Bios => ?(x.editor.map( _.name))}) ::
("Version", {x:Bios => ?(x.version.map( _.value))}) ::
("Release Date", {x:Bios => ?(x.releaseDate.map(DateFormaterService.getFormatedDate(_)))}) ::
Nil
}
private def displayTabControllers(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("controllers", sm.machine.map(fm => fm.controllers)){
("Name", {x:Controller => Text(x.name)}) ::
("Manufacturer", {x:Controller => ?(x.manufacturer.map( _.name))}) ::
("Type", {x:Controller => ?(x.cType)}) ::
("Quantity", {x:Controller => Text(x.quantity.toString)}) ::
Nil
}
private def displayTabMemories(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("memories", sm.machine.map(fm => fm.memories)){
("Slot", {x:MemorySlot => Text(x.slotNumber)}) ::
("Capacity", {x:MemorySlot => ?(x.capacity.map( _.toStringMo ))}) ::
("Description", {x:MemorySlot => ?(x.description)}) ::
("Serial Number", {x:MemorySlot => ?(x.serialNumber)}) ::
("Speed", {x:MemorySlot => ?(x.speed)}) ::
("Type", {x:MemorySlot => ?(x.memType)}) ::
("Quantity", {x:MemorySlot => Text(x.quantity.toString)}) ::
Nil
}
private def displayTabPorts(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("ports", sm.machine.map(fm => fm.ports)){
("Name", {x:Port => Text(x.name)}) ::
("Type", {x:Port => ?(x.pType )}) ::
("Description", {x:Port => ?(x.description)}) ::
("Quantity", {x:Port => Text(x.quantity.toString)}) ::
Nil
}
private def displayTabProcessors(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("processors", sm.machine.map(fm => fm.processors)){
("Name", {x:Processor => Text(x.name)}) ::
("Speed", {x:Processor => ?(x.speed.map(_.toString))}) ::
("Model", {x:Processor => ?(x.model.map(_.toString()))}) ::
("Family", {x:Processor => ?(x.family.map(_.toString()))}) ::
("Family Name", {x:Processor => ?(x.familyName)}) ::
("Manufacturer", {x:Processor => ?(x.manufacturer.map(_.name))}) ::
("Thread", {x:Processor => ?(x.thread.map(_.toString()))}) ::
("Core", {x:Processor => ?(x.core.map(_.toString()))}) ::
("CPUID", {x:Processor => ?(x.cpuid)}) ::
("Architecture", {x:Processor => ?(x.arch)}) ::
("Stepping", {x:Processor => ?(x.stepping.map(_.toString))}) ::
("Quantity", {x:Processor => Text(x.quantity.toString)}) ::
Nil
}
private def displayTabSlots(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("slots", sm.machine.map(fm => fm.slots)){
("Name" , {x:Slot => Text(x.name)}) ::
( "Description" , {x:Slot => ?(x.description)}) ::
( "Status" , {x:Slot => ?(x.status)}) ::
( "Quantity" , {x:Slot => Text(x.quantity.toString)}) ::
Nil
}
private def displayTabSounds(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("sounds", sm.machine.map(fm => fm.sounds)){
("Name" , {x:Sound => Text(x.name)}) ::
( "Description" , {x:Sound => ?(x.description)}) ::
( "Quantity" , {x:Sound => Text(x.quantity.toString)}) ::
Nil
}
private def displayTabStorages(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("storages", sm.machine.map(fm => fm.storages)){
( "Name" , {x:Storage => Text(x.name)}) ::
( "Description" , {x:Storage => ?(x.description)}) ::
( "Size" , {x:Storage => ?(x.size.map( _.toStringMo))}) ::
( "Firmware" , {x:Storage => ?(x.firmware)}) ::
( "Manufacturer" , {x:Storage => ?(x.manufacturer.map(_.name))}) ::
( "Model" , {x:Storage => ?(x.model)}) ::
( "Serial" , {x:Storage => ?(x.serialNumber)}) ::
( "Type" , {x:Storage => ?(x.sType)}) ::
( "Quantity" , {x:Storage => Text(x.quantity.toString)}) ::
Nil
}
private def displayTabVideos(jsId:JsNodeId,sm:FullInventory) : NodeSeq =
displayTabGrid(jsId)("videos", sm.machine.map(fm => fm.videos)){
("Name" , {x:Video => Text(x.name)}) ::
( "Chipset" , {x:Video => ?(x.chipset)}) ::
( "Memory" , {x:Video => ?(x.memory.map( _.toStringMo))}) ::
( "Resolution" , {x:Video => ?(x.resolution)}) ::
( "Quantity" , {x:Video => Text(x.quantity.toString)}) ::
Nil
}
private[this] def showPopup(nodeId : NodeId) : JsCmd = {
val popupHtml =
<div class="modal-backdrop fade in" style="height: 100%;"></div>
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<div class="close" data-dismiss="modal">
<span aria-hidden="true">×</span>
<span class="sr-only">Close</span>
</div>
<h4 class="modal-title">
Remove a node from Rudder
</h4>
</div>
<div class="modal-body">
<h4 class="text-center">If you choose to remove this node from Rudder, it won't be managed anymore, and all information about it will be removed from the application</h4>
</div>
<div class="modal-footer">
{
SHtml.ajaxButton("Delete this node", { () => {removeNode(nodeId) } },("class","btn btn-danger"))
}
<button class="btn btn-default" type="button" data-dismiss="modal">
Cancel
</button>
</div>
</div>
</div>; ;
SetHtml(deleteNodePopupHtmlId, popupHtml) &
JsRaw(s""" createPopup("${deleteNodePopupHtmlId}") """)
}
private[this] def showDeleteButton(nodeId : NodeId) = {
def toggleDeletion() : JsCmd = {
JsRaw(""" $('#deleteButton').toggle(300); $('#confirmDeletion').toggle(300) """)
}
SHtml.ajaxButton(
"Delete"
, { () => {toggleDeletion() } }
, ("id", "deleteButton")
, ("class", "btn btn-danger")
) ++ <div style="display:none" id="confirmDeletion">
<div style="margin:5px;">
<div>
<div>
<img src="/images/icWarn.png" alt="Warning!" height="25" width="25" class="warnicon"
style="vertical-align: middle; padding: 0px 0px 2px 0px;"
/>
<b>Are you sure you want to delete this node?</b>
</div>
<div style="margin-top:7px">If you choose to remove this node from Rudder, it won't be managed anymore,
and all information about it will be removed from the application.</div>
</div>
<div>
<div style="margin-top:7px">
<span >
{
SHtml.ajaxButton("Cancel", { () => { toggleDeletion } } , ("class", "btn btn-default"))
}
{
SHtml.ajaxButton("Confirm", { () => {removeNode(nodeId) }}, ("class", "btn btn-danger") )
}
</span>
</div>
</div>
</div>
</div>
}
private[this] def removeNode(nodeId: NodeId) : JsCmd = {
val modId = ModificationId(uuidGen.newUuid)
removeNodeService.removeNode(nodeId, modId, CurrentUser.getActor) match {
case Full(entry) =>
asyncDeploymentAgent ! AutomaticStartDeployment(modId, CurrentUser.getActor)
onSuccess
case eb:EmptyBox =>
val e = eb ?~! "Could not remove node %s from Rudder".format(nodeId.value)
logger.error(e.messageChain)
onFailure(nodeId)
}
}
private[this] def onFailure(nodeId: NodeId) : JsCmd = {
val popupHtml =
<div class="modal-backdrop fade in" style="height: 100%;"></div>
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<div class="close" data-dismiss="modal">
<span aria-hidden="true">×</span>
<span class="sr-only">Close</span>
</div>
<h4 class="modal-title">
Error while removing a node from Rudder
</h4>
</div>
<div class="modal-body">
<h4 class="text-center">
<p>There was an error while deleting the Node with ID :</p>
<p><b class="text-danger">{nodeId.value}</b></p>
<p>Please contact your administrator.</p>
</h4>
</div>
<div class="modal-footer">
<button class="btn btn-default" type="button" data-dismiss="modal">
Close
</button>
</div>
</div>
</div>;
JsRaw( """$('#errorPopupHtmlId').bsModal('hide');""") &
SetHtml(errorPopupHtmlId, popupHtml) &
JsRaw( s""" callPopupWithTimeout(200,"${errorPopupHtmlId}")""")
}
private[this] def onSuccess : JsCmd = {
val popupHtml =
<div class="modal-backdrop fade in" style="height: 100%;"></div>
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<div class="close" data-dismiss="modal">
<span aria-hidden="true">×</span>
<span class="sr-only">Close</span>
</div>
<h4 class="modal-title">
Success
</h4>
</div>
<div class="modal-body">
<h4 class="text-center">The node has been successfully removed from Rudder.</h4>
</div>
<div class="modal-footer">
<button class="btn btn-default" type="button" data-dismiss="modal">
Close
</button>
</div>
</div>
</div>
JsRaw(s"updateHashString('nodeId', undefined); forceParseHashtag()") &
SetHtml("serverDetails", NodeSeq.Empty) &
JsRaw( """$('#successPopupHtmlId').bsModal('hide');""") &
SetHtml(successPopupHtmlId, popupHtml) &
JsRaw( s""" callPopupWithTimeout(200,"${successPopupHtmlId}") """)
}
private [this] def isRootNode(n: NodeId): Boolean = {
return n.value.equals("root");
}
}
| armeniaca/rudder | rudder-web/src/main/scala/com/normation/rudder/web/services/DisplayNode.scala | Scala | gpl-3.0 | 43,100 |
package com.cdegroot.sgame
/**
* Minimal replacement for AWT Color class
*/
case class SGColor(red: Int, green: Int, blue: Int, alpha: Int = 255)
/**
* Some predefined colors
*/
object SGColor {
val Black = SGColor(0,0,0)
val White = SGColor(255,255,255)
val Yellow = SGColor(255,255,0)
val Green = SGColor(0,255,0)
val Cyan = SGColor(0,255,255)
val Blue = SGColor(0,0,255)
val Magenta= SGColor(255,0,255)
val Red = SGColor(255,0,0)
val Pink = SGColor(255,140,140)
val Orange = SGColor(255,140,0)
/**
* Alternative constructor with Double values between 0.0 and 1.0
*/
def apply(r: Double, g: Double, b: Double, a: Double): SGColor = new SGColor(
(r * 255.95).toInt,
(g * 255.95).toInt,
(b * 255.95).toInt,
(a * 255.95).toInt)
def apply(r: Double, g: Double, b: Double): SGColor = apply(r, g, b, 1.0)
}
| cdegroot/sgame | src/main/scala/com/cdegroot/sgame/SGColor.scala | Scala | bsd-3-clause | 860 |
package pl.touk.nussknacker.engine.definition.parameter.validator
import pl.touk.nussknacker.engine.api.definition
import javax.validation.constraints.{Max, Min, NotBlank}
import pl.touk.nussknacker.engine.api.definition.{MaximalNumberValidator, MinimalNumberValidator, NotBlankParameterValidator, ParameterValidator}
import pl.touk.nussknacker.engine.api.validation.JsonValidator
object ValidatorsExtractor {
def extract(params: ValidatorExtractorParameters): List[ParameterValidator] = {
val fromValidatorExtractors = List(
MandatoryValidatorExtractor,
EditorBasedValidatorExtractor,
AnnotationValidatorExtractor[JsonValidator](definition.JsonValidator),
LiteralValidatorExtractor,
AnnotationValidatorExtractor[NotBlank](NotBlankParameterValidator),
AnnotationValidatorExtractor[Min]((annotation: Min) => MinimalNumberValidator(annotation.value())),
AnnotationValidatorExtractor[Max]((annotation: Max) => MaximalNumberValidator(annotation.value()))
).flatMap(_.extract(params))
//TODO: should validators from config override or append those from annotations, types etc.?
(fromValidatorExtractors ++ params.parameterConfig.validators.toList.flatten).distinct
}
}
| TouK/nussknacker | interpreter/src/main/scala/pl/touk/nussknacker/engine/definition/parameter/validator/ValidatorsExtractor.scala | Scala | apache-2.0 | 1,229 |
package dbtarzan.db
import java.nio.file.Path
import akka.actor.{ActorContext, ActorRef, Props}
import akka.routing.RoundRobinPool
import dbtarzan.config.connections.ConnectionData
import dbtarzan.config.password.EncryptionKey
import dbtarzan.db.actor.{CopyActor, DatabaseActor}
import dbtarzan.localization.Localization
private class ConnectionBuilder(registerDriver: RegisterDriver, data : ConnectionData, encriptionKey : EncryptionKey, guiActor : ActorRef, connectionContext : ActorContext, localization : Localization, keyFilesDirPath: Path) {
def buildDBWorker() : ActorRef = try {
registerDriver.registerDriverIfNeeded(DriverSpec(data.jar, data.driver))
val instances = data.instances.getOrElse(1)
connectionContext.actorOf(RoundRobinPool(instances).props(buildSubWorkerProps()))
} catch {
case c: ClassNotFoundException => throw new Exception("Building the dbWorker with the driver "+data.driver+" got ClassNotFoundException:",c)
case t: Throwable => throw new Exception("Building the dbWorker with the driver "+data.driver+" got the exception of type "+t.getClass.getCanonicalName+":",t)
}
def buildCopyWorker() : ActorRef = try {
registerDriver.registerDriverIfNeeded(DriverSpec(data.jar, data.driver))
val name = "copyworker" + data.name
connectionContext.actorOf(Props(new CopyActor(data, encriptionKey, guiActor, localization, keyFilesDirPath)).withDispatcher("my-pinned-dispatcher"), name)
} catch {
case c: ClassNotFoundException => throw new Exception("Getting the copyworker with the driver "+data.driver+" got ClassNotFoundException:",c)
case t: Throwable => throw new Exception("Getting the copyworker with the driver "+data.driver+" got the exception of type "+t.getClass.getCanonicalName+":",t)
}
private def buildSubWorkerProps() : Props = {
Props(classOf[DatabaseActor], encriptionKey, data, guiActor, connectionContext.self, localization, keyFilesDirPath).withDispatcher("my-pinned-dispatcher")
}
private def buildSubWorkerName(index : Int) : String = {
"dbworker" + data.name + index
}
}
object ConnectionBuilder {
def buildDBWorker(registerDriver: RegisterDriver, data : ConnectionData, encriptionKey : EncryptionKey, guiActor : ActorRef, connectionContext : ActorContext, localization : Localization, keyFilesDirPath: Path) : ActorRef = {
val builder = new ConnectionBuilder(registerDriver, data, encriptionKey, guiActor, connectionContext, localization, keyFilesDirPath)
builder.buildDBWorker()
}
def buildCopyWorker(registerDriver: RegisterDriver, data : ConnectionData, encriptionKey : EncryptionKey, guiActor : ActorRef, connectionContext : ActorContext, localization : Localization, keyFilesDirPath: Path) : ActorRef = {
val builder = new ConnectionBuilder(registerDriver, data, encriptionKey, guiActor, connectionContext, localization, keyFilesDirPath)
builder.buildCopyWorker()
}
}
| aferrandi/dbtarzan | src/main/scala/dbtarzan/db/ConnectionBuilder.scala | Scala | apache-2.0 | 2,889 |
package xyz.hyperreal.prolog
import java.io.{File, PrintStream}
import jline.console.ConsoleReader
import jline.console.history.FileHistory
import xyz.hyperreal.bvm.VM
import xyz.hyperreal.char_reader.CharReader
import xyz.hyperreal.recursive_descent_parser.{Failure, Success}
object Main extends App {
REPL()
def REPL(): Unit = {
val reader = new ConsoleReader
val out = new PrintStream(reader.getTerminal.wrapOutIfNeeded(System.out), true)
var line: String = null
val historyFile = new File(System.getProperty("user.home") + "/.prolog-repl-history")
var program = new Program
implicit var vars: Vars = null
var block: Block = null
var vm: VM = null
var stackTrack = false
program.loadPredef
if (!historyFile.exists)
historyFile.createNewFile
val history = new FileHistory(historyFile)
sys.ShutdownHookThread {
history.flush()
}
reader.setBellEnabled(false)
reader.setPrompt("> ")
reader.setHistory(history)
out.println("""
|Welcome to the Prolog REPL v0.2
|Prolog comes with ABSOLUTELY NO WARRANTY. This is free software.
|Please type “;license” for legal details.
|
|Type “;help” for list of commands.
""".trim.stripMargin)
out.println()
while ({ line = reader.readLine; line != null }) if (line.trim nonEmpty) {
try {
if (line.headOption contains ';') {
val command = line.drop(1).trim split "\\s+" toList
command match {
case List("help" | "h") =>
out.println("""
|help (h) print this summary
|compile (c) <file> save the compiled Prolog database as <file>.pcc
|import (i) <file> import (load) the compiled Prolog file <file>.pcc into current database
|license print the license
|load (l) <file> load and compile source file <file>.prolog into new database
|new (n) new database (current database is lost)
|quit (q) exit the REPL
""".trim.stripMargin)
case List("compile" | "c", module) =>
program.save(module + ".pcc")
case List("import" | "i", module) =>
out.println(program.load(module) mkString "\n")
case List("license") =>
out.println("""
|ISC License (ISC)
|
|Copyright 2018 Edward A. Maxedon, Sr.
|
|Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted,
|provided that the above copyright notice and this permission notice appear in all copies.
|
|THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
|IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
|THIS SOFTWARE.
""".trim.stripMargin)
case List("quit" | "q") => sys.exit()
case List("load" | "l", file) =>
program = new Program
program.loadPredef
PrologParser.parseSource(CharReader.fromFile(file + ".prolog")) match {
case Success(ast, _) =>
Compilation.compile(ast, program)
out.println(program.procedures map (_.ind) mkString "\n")
case f: Failure => sys.error(f.msg)
}
case List("new" | "n") =>
program = new Program
program.loadPredef
case List("stack" | "s", s @ ("on" | "off")) =>
stackTrack = s == "on"
case List("") =>
if (vm.fail)
vm.run(block) match {
case Some(r) if r isEmpty => println("yes")
case Some(r) => println(displayResult(r))
case None => println("no")
} else
println("no")
case _ => println("unrecognized command")
}
} else {
val all = line endsWith "*"
val queryline = if (all) line dropRight 1 else line
PrologParser.expression(PrologParser.lexer.tokenStream(CharReader.fromString(queryline))) match {
case Success(ast, _) =>
implicit val query: Program = new Program
vars = new Vars
block = query.block("query")
vm = new VM(program)
Compilation.compileGoal(ast, program)
val result = if (all) vm.runall(block) else vm.runfirst(block).toList
result match {
case Nil => println("no")
case List(r) if r isEmpty => println("yes")
case _ => println(result map displayResult mkString "\n\n")
}
case f: Failure => f.error
}
}
out.println()
} catch {
case e: Exception =>
if (stackTrack)
e.printStackTrace(out)
else
out.println(e)
//out.println( e.getMessage )
out.println()
}
}
def displayResult(r: Map[String, Any]) = r map { case (k, v) => s"$k = ${display(v)}" } mkString "\n"
}
}
| edadma/funl | prolog/src/main/scala/xyz/hyperreal/prolog/Main.scala | Scala | mit | 5,948 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.usc.irds.sparkler.pipeline
import java.io.ByteArrayInputStream
import java.text.{ParseException, SimpleDateFormat}
import java.util
import java.util.Date
import edu.usc.irds.sparkler.base.Loggable
import edu.usc.irds.sparkler.model.{CrawlData, ParsedData}
import org.apache.commons.io.IOUtils
import org.apache.tika.metadata.Metadata
import org.apache.tika.parser.AutoDetectParser
import org.apache.tika.sax.{BodyContentHandler, LinkContentHandler, WriteOutContentHandler}
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import scala.collection.mutable
/**
* This is a transformation function for transforming raw data from crawler to parsed data
*/
object ParseFunction extends ((CrawlData) => (ParsedData)) with Serializable with Loggable {
override def apply(data: CrawlData): (ParsedData) = {
val parseData = new ParsedData()
var stream = new ByteArrayInputStream(data.fetchedData.getContent)
val linkHandler = new LinkContentHandler()
val parser = new AutoDetectParser()
var meta = new Metadata()
val outHandler = new WriteOutContentHandler()
val contentHandler = new BodyContentHandler(outHandler)
LOG.info("PARSING {}", data.fetchedData.getResource.getUrl)
// parse outlinks
try {
// Parse OutLinks
meta.set("resourceName", data.fetchedData.getResource.getUrl)
parser.parse(stream, linkHandler, meta)
parseData.outlinks = linkHandler.getLinks.asScala.map(_.getUri.trim).filter(!_.isEmpty).toSet
} catch {
case e: Throwable =>
LOG.warn("PARSING-OUTLINKS-ERROR {}", data.fetchedData.getResource.getUrl)
LOG.warn(e.getMessage, e)
} finally { IOUtils.closeQuietly(stream) }
//parse main text content
try {
meta = new Metadata
meta.set("resourceName", data.fetchedData.getResource.getUrl)
// Parse Text
stream = new ByteArrayInputStream(data.fetchedData.getContent)
parser.parse(stream, contentHandler, meta)
parseData.extractedText = outHandler.toString
parseData.metadata = meta
} catch {
case e: Throwable =>
LOG.warn("PARSING-CONTENT-ERROR {}", data.fetchedData.getResource.getUrl)
LOG.warn(e.getMessage, e)
parseData
} finally { IOUtils.closeQuietly(stream) }
// parse headers
val headers = data.fetchedData.getHeaders
if (headers.containsKey("Location")) { // redirect
val redirectUrls = headers.get("Location")
parseData.outlinks ++= redirectUrls.asScala.filter(u => u != null && !u.isEmpty)
}
parseData.headers = parseHeaders(headers)
parseData
}
def parseHeaders(headers: util.Map[String, util.List[String]]): Map[String, AnyRef] = {
val dateHeaders = Set("Date", "Last-Modified", "Expires")
val intHeaders = Set("ContentLength")
val dateFmt = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss zzz")
val result = mutable.Map[String, AnyRef]()
for (name <- headers.keySet()) {
val values = headers.get(name)
var parsed: AnyRef = values
if (values.size() == 1){
val value = values.get(0)
parsed = value
try {
if (dateHeaders contains name) {
parsed = parseDate(value)
} else if (intHeaders contains name) {
parsed = new java.lang.Long(value.toLong)
}
} catch {
case e: Exception => LOG.debug(e.getMessage, e)
} finally {
result(name) = parsed
}
}
}
result.toMap
}
/**
* Parse date string as per RFC7231 https://tools.ietf.org/html/rfc7231#section-7.1.1.1
*/
val httpDateFormat = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss zzz")
@throws[ParseException] //but be aware of errors
def parseDate(dateStr:String): Date = httpDateFormat.parse(dateStr.trim)
}
| sujen1412/sparkler | sparkler-app/src/main/scala/edu/usc/irds/sparkler/pipeline/ParseFunction.scala | Scala | apache-2.0 | 4,648 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
package templates
import org.jetbrains.plugins.scala.lang.psi.api.base.ScConstructor
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
* Time: 9:24:03
*/
trait ScClassParents extends ScTemplateParents {
def constructor = findChild(classOf[ScConstructor])
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/templates/ScClassParents.scala | Scala | apache-2.0 | 357 |
package com.rasterfoundry.batch.removescenes
import com.rasterfoundry.batch.Job
import com.rasterfoundry.database.util.RFTransactor
import cats.effect.IO
import cats.syntax.apply._
import cats.syntax.either._
import doobie._
import doobie.implicits._
import doobie.implicits.javasql._
import doobie.postgres.implicits._
import io.chrisdavenport.log4cats.Logger
import io.chrisdavenport.log4cats.slf4j.Slf4jLogger
import java.sql.Timestamp
import java.time.LocalDate
import java.time.temporal.TemporalAdjusters
import java.util.UUID
class RemoveScenes(
datasourceId: UUID,
startDate: LocalDate,
xa: Transactor[IO]
) {
val endDate = startDate `with` TemporalAdjusters.firstDayOfNextMonth()
val startTs = Timestamp.valueOf(startDate.atStartOfDay())
val endTs = Timestamp.valueOf(endDate.atStartOfDay())
def removeScenes: IO[Int] =
fr"""
DELETE FROM scenes WHERE id IN (
SELECT id FROM scenes LEFT JOIN scenes_to_layers ON scenes.id = scenes_to_layers.scene_id
WHERE
-- scene_id NULL means we didn't find a scene_id in the layers table
scene_id IS NULL
AND scenes.acquisition_date >= ${startTs}
AND scenes.acquisition_date <= ${endTs}
AND scenes.datasource = $datasourceId
)
""".update.run.transact(xa)
}
object RemoveScenes extends Job {
val name = "remove-scenes"
implicit val unsafeLoggerIO = Slf4jLogger.getLogger[IO]
val xa = RFTransactor.nonHikariTransactor(RFTransactor.TransactorConfig())
def runJob(args: List[String]): IO[Unit] =
args match {
case datasourceIdString :: startDateString :: Nil =>
(
Either.catchNonFatal(UUID.fromString(datasourceIdString)),
Either.catchNonFatal(LocalDate.parse(startDateString))
).tupled
.traverse({
case (datasourceId, startDate) =>
val runner = new RemoveScenes(datasourceId, startDate, xa)
runner.removeScenes
}) flatMap {
case Left(err) =>
Logger[IO].error(err)(
s"Failed to delete scenes for datasource $datasourceIdString"
)
IO { sys.exit(1) }
case Right(nRemoved) =>
Logger[IO]
.info(
s"Removed $nRemoved scenes for datasource $datasourceIdString"
)
}
case _ =>
IO.raiseError(
new Exception(
"Incorrect arguments -- I expected a UUID datasource ID and a YYYY-MM-DD date"
)
)
}
}
| raster-foundry/raster-foundry | app-backend/batch/src/main/scala/removeScenes/RemoveScenes.scala | Scala | apache-2.0 | 2,512 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import java.nio.file.{Files, Paths}
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dataset.{DistributedDataSet, MiniBatch}
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl.tensor.{Storage, Tensor}
import com.intel.analytics.bigdl.utils._
import com.intel.analytics.bigdl.visualization.TrainSummary
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
object DistriOptimizerSpec {
val input1: Tensor[Double] = Tensor[Double](Storage[Double](Array(0.0, 1.0, 0.0, 1.0)))
val output1 = 0.0
val input2: Tensor[Double] = Tensor[Double](Storage[Double](Array(1.0, 0.0, 1.0, 0.0)))
val output2 = 1.0
var plusOne = 0.0
val nodeNumber = 4
val coreNumber = 4
Engine.init(nodeNumber, coreNumber, true)
val batchSize = 2 * coreNumber
val prepareData: Int => (MiniBatch[Double]) = index => {
val input = Tensor[Double]().resize(batchSize, 4)
val target = Tensor[Double]().resize(batchSize)
var i = 0
while (i < batchSize) {
if (i % 2 == 0) {
target.setValue(i + 1, output1 + plusOne)
input.select(1, i + 1).copy(input1)
} else {
target.setValue(i + 1, output2 + plusOne)
input.select(1, i + 1).copy(input2)
}
i += 1
}
MiniBatch(input, target)
}
}
object DistriOptimizerSpecModel {
def mse: Module[Double] = {
val mlp = new Sequential[Double]
mlp.add(new Linear(4, 2))
mlp.add(new Sigmoid)
mlp.add(new Linear(2, 1))
mlp.add(new Sigmoid)
mlp
}
def bn: Module[Double] = {
val mlp = Sequential[Double]
mlp.add(Linear(4, 2))
mlp.add(BatchNormalization(2))
mlp.add(ReLU())
mlp.add(Linear(2, 1))
mlp.add(Sigmoid())
mlp
}
def cre: Module[Double] = {
val mlp = new Sequential[Double]
mlp.add(new Linear(4, 2))
mlp.add(new LogSoftMax)
mlp
}
def mserf(failCountNumberLists: Array[Int], sleep: Boolean = false): Module[Double] = {
val mlp = new Sequential[Double]
mlp.add(new Linear(4, 2))
mlp.add(new Sigmoid)
mlp.add(new Linear(2, 1))
mlp.add(new Sigmoid)
mlp.add(new ExceptionTest(failCountNumberLists, sleep))
mlp
}
}
@com.intel.analytics.bigdl.tags.Serial
class DistriOptimizerSpec extends FlatSpec with Matchers with BeforeAndAfter {
import DistriOptimizerSpec._
import DistriOptimizerSpecModel._
Logger.getLogger("org").setLevel(Level.WARN)
Logger.getLogger("akka").setLevel(Level.WARN)
var sc: SparkContext = null
var dataSet: DistributedDataSet[MiniBatch[Double]] = null
before {
sc = new SparkContext("local[1]", "RDDOptimizerSpec")
val rdd = sc.parallelize(1 to (256 * nodeNumber), nodeNumber).map(prepareData)
dataSet = new DistributedDataSet[MiniBatch[Double]] {
override def originRDD(): RDD[_] = rdd
override def data(train : Boolean): RDD[MiniBatch[Double]] = rdd
override def size(): Long = 256 * nodeNumber
override def shuffle(): Unit = {}
}
plusOne = 0.0
System.setProperty("bigdl.check.singleton", false.toString)
Engine.model.setPoolSize(1)
}
after {
if (sc != null) {
sc.stop()
}
}
"Train with MSE and LBFGS" should "be good" in {
RandomGenerator.RNG.setSeed(10)
val optimizer = new DistriOptimizer(
mse,
dataSet,
new MSECriterion[Double]())
.setOptimMethod(new LBFGS)
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 1e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 1e-2)
}
"Train with MSE and SGD" should "be trained with good result" in {
val mm = mse
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 20.0))
.setEndWhen(Trigger.maxEpoch(5))
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 5e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 5e-2)
}
"Train with MSE and SGD" should "be trained with good result after reset model" in {
var mm = bn
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 20.0))
.setEndWhen(Trigger.maxEpoch(5))
optimizer.optimize()
mm = mse
mm.getParameters()._1.fill(0.125)
optimizer.setModel(mm)
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 5e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 5e-2)
}
it should "be same compare to ref optimizer" in {
RandomGenerator.RNG.setSeed(10)
val optimizer = new DistriOptimizer(
mse,
dataSet,
new MSECriterion[Double]())
val model = optimizer.optimize()
RandomGenerator.RNG.setSeed(10)
val optimizerRef = new RefDistriOptimizer(
mse,
dataSet,
new MSECriterion[Double]()
)
val modelRef = optimizerRef.optimize()
model.getParameters()._1 should be(modelRef.getParameters()._1)
}
"An Artificial Neural Network with Cross Entropy and LBFGS" should
"be trained with good result" in {
plusOne = 1.0
val optimizer = new DistriOptimizer[Double](cre, dataSet,
new ClassNLLCriterion[Double]())
.setEndWhen(Trigger.maxEpoch(3)).setOptimMethod(new LBFGS)
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1.max(1)._2(Array(1)) should be(1.0)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2.max(1)._2(Array(1)) should be(2.0)
}
"An Artificial Neural Network with Cross Entropy and SGD" should
"be trained with good result" in {
plusOne = 1.0
RandomGenerator.RNG.setSeed(10)
val optimizer = new DistriOptimizer[Double](cre, dataSet,
new ClassNLLCriterion[Double]())
.setState(T("learningRate" -> 20.0))
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1.max(1)._2(Array(1)) should be(1.0)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2.max(1)._2(Array(1)) should be(2.0)
}
it should "be same compare to ref optimizer" in {
plusOne = 1.0
RandomGenerator.RNG.setSeed(10)
val optimizer = new DistriOptimizer[Double](
cre,
dataSet,
new ClassNLLCriterion[Double]()
).setState(T("learningRate" -> 20.0))
val model = optimizer.optimize()
RandomGenerator.RNG.setSeed(10)
val optimizerRef = new RefDistriOptimizer(
cre,
dataSet,
new ClassNLLCriterion[Double]()
).setState(T("learningRate" -> 20.0))
val modelRef = optimizerRef.optimize()
model.getParameters()._1 should be(modelRef.getParameters()._1)
}
"Train with BatchNormalization" should "return with state" in {
RandomGenerator.RNG.setSeed(10)
val mm = bn
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 20.0))
.setEndWhen(Trigger.maxEpoch(5))
val model = optimizer.optimize()
val batchNormalization = model.asInstanceOf[Sequential[Double]].modules(1).
asInstanceOf[BatchNormalization[Double]]
batchNormalization.runningMean.storage().array() should be (
Array(0.37499998210083496, 0.37499998210083496)
)
batchNormalization.runningVar.storage().array() should be (
Array(1188.2811870277535, 1188.2811870277535)
)
}
"Train with one partition one executor" should "won't throw mult-task exception" in {
System.setProperty("bigdl.check.singleton", true.toString)
RandomGenerator.RNG.setSeed(10)
Engine.setNodeNumber(1)
val mm = bn
mm.getParameters()._1.fill(0.125)
val rdd = sc.parallelize(1 to (256 * nodeNumber), 1).map(prepareData)
val dataSet = new DistributedDataSet[MiniBatch[Double]] {
override def originRDD(): RDD[_] = rdd
override def data(train : Boolean): RDD[MiniBatch[Double]] = rdd
override def size(): Long = 256 * nodeNumber
override def shuffle(): Unit = {}
}
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 20.0))
.setEndWhen(Trigger.maxEpoch(5))
.optimize()
Engine.setNodeNumber(nodeNumber)
}
"DistriOptimizer checkpoint" should "work correctly" in {
val filePath = java.io.File.createTempFile("OptimizerSpec", "model").getAbsolutePath
Files.delete(Paths.get(filePath))
Files.createDirectory(Paths.get(filePath))
import com.intel.analytics.bigdl._
plusOne = 1.0
RandomGenerator.RNG.setSeed(10)
val optimizer = new DistriOptimizer[Double](
cre,
dataSet,
new ClassNLLCriterion[Double]()
)
optimizer.setState(T("learningRate" -> 20.0))
.setCheckpoint(filePath, Trigger.everyEpoch)
.setEndWhen(Trigger.maxEpoch(1))
.optimize()
val optimMethod =
OptimMethod.load[Double](optimizer.getCheckpointPath().get + "/optimMethod.33")
optimMethod.state.get[Int]("epoch").get should be (2)
optimMethod.state.get[Int]("neval").get should be (33)
}
"TrainSummary with MSE and LBFGS" should "work correctly" in {
TestUtils.cancelOnWindows()
RandomGenerator.RNG.setSeed(10)
val logdir = com.google.common.io.Files.createTempDir()
val trainSummary = TrainSummary(logdir.getPath, "lbfgs")
val optimizer = new DistriOptimizer(
mse,
dataSet,
new MSECriterion[Double]())
.setOptimMethod(new LBFGS)
.setTrainSummary(trainSummary)
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 1e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 1e-2)
trainSummary.readScalar("Loss").last._2 should be (0.0f +- 1e-3f)
trainSummary.close()
}
"TrainSummary with MSE and SGD" should "work correctly" in {
TestUtils.cancelOnWindows()
RandomGenerator.RNG.setSeed(10)
val logdir = com.google.common.io.Files.createTempDir()
val trainSummary = TrainSummary(logdir.getPath, "sgd")
val mm = mse
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 20.0))
.setEndWhen(Trigger.maxEpoch(5))
.setTrainSummary(trainSummary)
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 5e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 5e-2)
trainSummary.readScalar("Loss").last._2 should be (0.0f +- 1e-3f)
trainSummary.close()
}
"TrainSummary with MSE and Adagrad" should "work correctly" in {
TestUtils.cancelOnWindows()
RandomGenerator.RNG.setSeed(10)
val logdir = com.google.common.io.Files.createTempDir()
val trainSummary = TrainSummary(logdir.getPath, "adagrad")
val mm = mse
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 1.0))
.setOptimMethod(new Adagrad[Double]())
.setEndWhen(Trigger.maxEpoch(5))
.setTrainSummary(trainSummary)
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 5e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 5e-2)
trainSummary.readScalar("Loss").last._2 should be (0.0f +- 1e-3f)
trainSummary.close()
}
"Train with MSE and SGD" should "be trained with good result with failures in small interval" in {
val filePath = java.io.File.createTempFile("OptimizerSpec", "model").getAbsolutePath
Files.delete(Paths.get(filePath))
Files.createDirectory(Paths.get(filePath))
val failCountNumberList = Array(800, 850, 900)
val mm = mserf(failCountNumberList)
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 20.0))
.setEndWhen(Trigger.maxEpoch(5))
.setCheckpoint(filePath, Trigger.everyEpoch)
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 5e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 5e-2)
ExceptionTest.resetCount()
}
"Train with MSE and SGD" should "be trained with good result with failures in big interval" in {
val filePath = java.io.File.createTempFile("OptimizerSpec", "model").getAbsolutePath
Files.delete(Paths.get(filePath))
Files.createDirectory(Paths.get(filePath))
val failCountNumberList = Array(800, 850, 900, 1500)
System.setProperty("bigdl.failure.retryTimeInterval", "3")
System.setProperty("bigdl.failure.retryTimes", "2")
val mm = mserf(failCountNumberList, true)
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 20.0))
.setEndWhen(Trigger.maxEpoch(5))
.setCheckpoint(filePath, Trigger.everyEpoch)
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 5e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 5e-2)
ExceptionTest.resetCount()
}
"Train with MSE and SGD" should "throw exception after retry times exceed settings" in {
val filePath = java.io.File.createTempFile("OptimizerSpec", "model").getAbsolutePath
Files.delete(Paths.get(filePath))
Files.createDirectory(Paths.get(filePath))
val failCountNumberList = Array(800, 850, 900)
System.setProperty("bigdl.failure.retryTimes", "3")
val mm = mserf(failCountNumberList)
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](mm, dataSet, new MSECriterion[Double]())
.setState(T("learningRate" -> 20.0))
.setEndWhen(Trigger.maxEpoch(5))
intercept[Exception] {
optimizer.optimize()
}
ExceptionTest.resetCount()
optimizer.setCheckpoint(filePath, Trigger.everyEpoch)
intercept[Exception] {
optimizer.optimize()
}
ExceptionTest.resetCount()
}
"Train with Plateau" should "work properly" in {
LoggerFilter.redirectSparkInfoLogs()
Logger.getLogger("com.intel.analytics.bigdl.optim").setLevel(Level.INFO)
Logger.getLogger("com.intel.analytics.bigdl").setLevel(Level.INFO)
RandomGenerator.RNG.setSeed(10)
val logdir = com.google.common.io.Files.createTempDir()
val mm = mse
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](
_model = mm,
dataset = dataSet,
criterion = new MSECriterion[Double]()
)
val optimMethod = new SGD[Double](learningRate = 20.0, learningRateSchedule =
SGD.Plateau("Loss", epsilon = 0, patience = 1, mode = "min"))
optimizer.setOptimMethod(optimMethod)
.setEndWhen(Trigger.maxEpoch(10))
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 5e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 5e-2)
}
"Train with Plateau Score" should "work properly" in {
LoggerFilter.redirectSparkInfoLogs()
Logger.getLogger("com.intel.analytics.bigdl.optim").setLevel(Level.INFO)
Logger.getLogger("com.intel.analytics.bigdl").setLevel(Level.INFO)
RandomGenerator.RNG.setSeed(10)
val logdir = com.google.common.io.Files.createTempDir()
val mm = mse
mm.getParameters()._1.fill(0.125)
val optimizer = new DistriOptimizer[Double](
_model = mm,
dataset = dataSet,
criterion = new MSECriterion[Double]()
)
val optimMethod = new SGD[Double](learningRate = 20.0, learningRateSchedule =
SGD.Plateau("score", epsilon = 0, patience = 1, mode = "max"))
optimizer.setOptimMethod(optimMethod)
.setEndWhen(Trigger.maxEpoch(10))
optimizer.setValidation(Trigger.everyEpoch, dataSet,
Array(new Top1Accuracy[Double]()))
val model = optimizer.optimize()
val result1 = model.forward(input1).asInstanceOf[Tensor[Double]]
result1(Array(1)) should be(0.0 +- 5e-2)
val result2 = model.forward(input2).asInstanceOf[Tensor[Double]]
result2(Array(1)) should be(1.0 +- 5e-2)
}
}
| JerryYanWan/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/optim/DistriOptimizerSpec.scala | Scala | apache-2.0 | 17,936 |
import android.Keys._
import sbt.Keys._
import sbt._
import scala.collection.JavaConversions._
object BuildDependencies {
val appCompat = aar("com.android.support" % "appcompat-v7" % "21.0.0")
val cardView = aar("com.android.support" % "cardview-v7" % "21.0.0")
val support = aar("com.android.support" % "support-v4" % "21.0.0")
// Test Dependencies
val junit = "junit" % "junit" % "4.8.2" % "test"
val junit_interface = "com.novocode" % "junit-interface" % "0.8" % "test->default"
val robolectric = "org.robolectric" % "robolectric" % "2.3" % "test"
}
object BuildSettings {
import BuildDependencies._
val SCALA_VERSION = "2.11.4"
val APP_VERSION = "0.1"
lazy val commonSettings = Seq(
organization := "$package$",
version := APP_VERSION,
scalaVersion := SCALA_VERSION,
scalacOptions ++= Seq("-feature", "-deprecation"),
libraryDependencies ++= Seq(
appCompat,
support,
cardView,
junit,
junit_interface,
robolectric
),
// android-sbt-plugin settings
platformTarget in Android := "android-21",
minSdkVersion in Android := "16",
typedResources := true,
useProguard in Android := true,
apkbuildExcludes in Android += "LICENSE.txt",
proguardOptions in Android ++= IO.readLines(new File("project/proguard.txt")),
// android-sbt-plugin settings specific to testing
debugIncludesTests := false,
debugIncludesTests in Android := false,
// or else @Config throws an exception, yay
unmanagedClasspath in Test ++= (builder in Android).value.getBootClasspath map
Attributed.blank,
managedClasspath in Test <++= (platformJars in Android, baseDirectory) map {
case ((j,_), b) =>
Seq(Attributed.blank(b / "bin" / "classes"), Attributed.blank(file(j)))
},
fullClasspath in Test <+= (sourceDirectory in Test) map { s =>
Attributed.blank(s / "resources")
},
testOptions in Test ++= Seq(
Tests.Argument("-oD"),
Tests.Argument("sequential")
),
// Robolectric tests should run in main thread
parallelExecution in Test := false
)
}
object AndroidBuild extends android.AutoBuild {
import BuildSettings._
lazy val root = Project(
id = "$name$",
base = file(".")
)
.settings(commonSettings: _*)
}
| ryanbrozo/android-sbt-starter.g8 | src/main/g8/project/build.scala | Scala | mit | 2,468 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.planning
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.filter._
import org.locationtech.geomesa.filter.visitor.IdDetectingFilterVisitor
import org.locationtech.geomesa.index.api.{FilterPlan, FilterStrategy, GeoMesaFeatureIndex, WrappedFeature}
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter._
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
/**
* Class for splitting queries up based on Boolean clauses and the available query strategies.
*/
class FilterSplitter[DS <: GeoMesaDataStore[DS, F, W], F <: WrappedFeature, W]
(sft: SimpleFeatureType, indices: Seq[GeoMesaFeatureIndex[DS, F, W]]) extends LazyLogging {
import FilterSplitter._
type TypedFilterPlan = FilterPlan[DS, F, W]
type TypedFilterStrategy = FilterStrategy[DS, F, W]
/**
* Splits the query up into different filter plans to be evaluated. Each filter plan will consist of one or
* more query plans. Each query plan will have a primary part (that would be used for query planning)
* and an optional secondary part (that would be applied as a secondary filter).
*
* Examples:
*
* bbox(geom) AND attr1 = ? =>
*
* Seq(FilterPlan(Seq(QueryFilter(ST,Some([ geom bbox ]),Some([ attr1 = ? ])))))
*
* bbox(geom) OR attr1 = ? =>
*
* Seq(FilterPlan(Seq(QueryFilter(ST,Some([ geom bbox ]),None), QueryFilter(ATTRIBUTE,Some([ attr1 = ? ]),None))))
*
* bbox(geom) AND dtg DURING ? AND attr1 = ? =>
*
* Seq(FilterPlan(Seq(QueryFilter(Z3,Some([ geom bbox AND dtg during ? ]),Some([ attr1 = ? ])))),
* FilterPlan(Seq(QueryFilter(ATTRIBUTE,Some([ attr1 = ? ]),Some([ geom bbox AND dtg during ? ])))))
*
* note: spatial and temporal filters are combined.
*
* (bbox(geom) OR geom INTERSECT) AND attr1 = ? =>
*
* Seq(FilterPlan(Seq(QueryFilter(ST,Some([ geom bbox OR geom intersect ]),Some([ attr1 = ? ]))))
* FilterPlan(Seq(QueryFilter(ATTRIBUTE,Seq([ attr1 = ? ]),Some([ geom bbox OR geom intersect ])))))
*
* note: ORs will not be split if they operate on a single attribute
*
*/
def getQueryOptions(filter: Filter, transform: Option[SimpleFeatureType] = None): Seq[TypedFilterPlan] = {
// cnf gives us a top level AND with ORs as first children
rewriteFilterInCNF(filter) match {
case a: And =>
// look for ORs across attributes, e.g. bbox OR dtg
val (complex, simple) = a.getChildren.partition(f => f.isInstanceOf[Or] && attributeAndIdCount(f, sft) > 1)
if (complex.isEmpty) {
// no cross-attribute ORs
getSimpleQueryOptions(a, transform).map(FilterPlan.apply[DS, F, W])
} else if (simple.nonEmpty) {
logger.warn("Not considering complex OR predicates in query planning: " +
s"${complex.map(filterToString).mkString("(", ") AND (", ")")}")
def addComplexPredicates(qf: TypedFilterStrategy) =
qf.copy(secondary = andOption(qf.secondary.toSeq ++ complex))
val simpleOptions = getSimpleQueryOptions(andFilters(simple), transform)
simpleOptions.map(addComplexPredicates).map(FilterPlan.apply[DS, F, W])
} else {
logger.warn(s"Falling back to expand/reduce query splitting for filter ${filterToString(filter)}")
val dnf = rewriteFilterInDNF(filter).asInstanceOf[Or]
expandReduceOrOptions(dnf, transform).map(makeDisjoint)
}
case o: Or =>
// there are no ands - just ors between fields
// this implies that each child has only a single property or ID
def getGroup(f: Filter) = (FilterHelper.propertyNames(f, sft), FilterHelper.hasIdFilter(f))
// group and then recombine the OR'd filters by the attribute they operate on
val groups = o.getChildren.groupBy(getGroup).values.map(ff.or(_)).toSeq
val perAttributeOptions = groups.flatMap { g =>
val options = getSimpleQueryOptions(g, transform)
require(options.length < 2, s"Expected only a single option for ${filterToString(g)} but got $options")
options.headOption
}
if (perAttributeOptions.exists(_.primary.isEmpty)) {
// we have to do a full table scan for part of the query, just append everything to that
Seq(FilterPlan(fullTableScanOption(o, transform)))
} else {
Seq(makeDisjoint(FilterPlan(perAttributeOptions)))
}
case f =>
getSimpleQueryOptions(f, transform).map(qf => FilterPlan(Seq(qf)))
}
}
/**
* Gets options for a 'simple' filter, where each OR is on a single attribute, e.g.
* (bbox1 OR bbox2) AND dtg
* bbox AND dtg AND (attr1 = foo OR attr = bar)
* not:
* bbox OR dtg
*
* Because the inputs are simple, each one can be satisfied with a single query filter.
* The returned values will each satisfy the query, using a different strategy.
*
* @param filter input filter
* @return sequence of options, any of which can satisfy the query
*/
private def getSimpleQueryOptions(filter: Filter, transform: Option[SimpleFeatureType]): Seq[TypedFilterStrategy] = {
val options = indices.flatMap(_.getFilterStrategy(sft, filter, transform))
if (options.isEmpty) {
Seq.empty
} else {
val (fullScans, indexScans) = options.partition(_.primary.isEmpty)
if (indexScans.nonEmpty) {
indexScans
} else {
Seq(fullScans.head)
}
}
}
/**
* Calculates all possible options for each part of the filter, then determines all permutations of
* the options. This can end up being expensive (O(2^n)), so is only used as a fall-back.
*/
private def expandReduceOrOptions(filter: Or, transform: Option[SimpleFeatureType]): Seq[TypedFilterPlan] = {
// for each child of the or, get the query options
// each filter plan should only have a single query filter
def getChildOptions: Seq[Seq[TypedFilterPlan]] =
filter.getChildren.map(getSimpleQueryOptions(_, transform).map(qf => FilterPlan(Seq(qf))))
// combine the filter plans so that each plan has multiple query filters
// use the permutations of the different options for each child
// TODO GEOMESA-941 Fix algorithmically dangerous (2^N exponential runtime)
def reduceChildOptions(childOptions: Seq[Seq[TypedFilterPlan]]): Seq[TypedFilterPlan] =
childOptions.reduce { (left, right) =>
left.flatMap(l => right.map(r => FilterPlan(l.strategies ++ r.strategies)))
}
// try to combine query filters in each filter plan if they have the same primary filter
// this avoids scanning the same ranges twice with different secondary predicates
def combineSecondaryFilters(options: Seq[TypedFilterPlan]): Seq[TypedFilterPlan] = options.map { r =>
// build up the result array instead of using a group by to preserve filter order
val groups = ArrayBuffer.empty[TypedFilterStrategy]
r.strategies.distinct.foreach { f =>
val i = groups.indexWhere(g => g.index == f.index && g.primary == f.primary)
if (i == -1) {
groups.append(f)
} else {
val current = groups(i).secondary match {
case Some(o) if o.isInstanceOf[Or] => o.asInstanceOf[Or].getChildren.toSeq
case Some(n) => Seq(n)
case None => Seq.empty
}
groups.update(i, f.copy(secondary = orOption(current ++ f.secondary)))
}
}
FilterPlan(groups)
}
// if a filter plan has any query filters that scan a subset of the range of a different query filter,
// then we can combine them, as we have to scan the larger range anyway
def mergeOverlappedFilters(options: Seq[TypedFilterPlan]): Seq[TypedFilterPlan] = options.map { filterPlan =>
val filters = ArrayBuffer(filterPlan.strategies: _*)
var merged: TypedFilterStrategy = null
var i = 0
while (i < filters.length) {
val toMerge = filters(i)
if (toMerge != null) {
var j = 0
while (j < filters.length && merged == null) {
if (i != j) {
val mergeTo = filters(j)
if (mergeTo != null) {
merged = tryMerge(toMerge, mergeTo)
}
}
j += 1
}
if (merged != null) {
// remove the merged query filter and replace the one merged into
filters.update(i, null)
filters.update(j - 1, merged)
merged = null
}
}
i += 1
}
// if we have replaced anything, recreate the filter plan
val overlapped = filters.filter(_ != null)
if (overlapped.length < filterPlan.strategies.length) {
FilterPlan(overlapped)
} else {
filterPlan
}
}
val childOpts = getChildOptions
val reducedOpts = reduceChildOptions(childOpts)
val combinedSec = combineSecondaryFilters(reducedOpts)
val merged = mergeOverlappedFilters(combinedSec)
if (merged.nonEmpty) {
merged
} else {
Seq(FilterPlan(Seq(fullTableScanOption(filter, transform))))
}
}
/**
* Will perform a full table scan - used when we don't have anything better. Currently z3, z2 and record
* tables support full table scans.
*/
private def fullTableScanOption(filter: Filter, transform: Option[SimpleFeatureType]): TypedFilterStrategy = {
val secondary = if (filter == Filter.INCLUDE) None else Some(filter)
val options = indices.toStream.flatMap(_.getFilterStrategy(sft, Filter.INCLUDE, transform))
options.headOption.map(o => o.copy(secondary = secondary)).getOrElse {
throw new UnsupportedOperationException(s"Configured indices do not support the query ${filterToString(filter)}")
}
}
}
object FilterSplitter {
/**
* Gets the count of distinct attributes being queried - ID is treated as an attribute
*/
def attributeAndIdCount(filter: Filter, sft: SimpleFeatureType): Int = {
val attributeCount = FilterHelper.propertyNames(filter, sft).size
val idCount = if (filter.accept(new IdDetectingFilterVisitor, false).asInstanceOf[Boolean]) 1 else 0
attributeCount + idCount
}
/**
* Try to merge the two query filters. Return the merged query filter if successful, else null.
*/
def tryMerge[DS <: GeoMesaDataStore[DS, F, W], F <: WrappedFeature, W]
(toMerge: FilterStrategy[DS, F, W], mergeTo: FilterStrategy[DS, F, W]): FilterStrategy[DS, F, W] = {
if (mergeTo.primary.forall(_ == Filter.INCLUDE)) {
// this is a full table scan, we can just append the OR to the secondary filter
val secondary = orOption(mergeTo.secondary.toSeq ++ toMerge.filter)
mergeTo.copy(secondary = secondary)
} else if (toMerge.index.name == "attr" && mergeTo.index.name == "attr") {
// TODO extract this out into the API?
tryMergeAttrStrategy(toMerge, mergeTo)
} else {
// overlapping geoms, date ranges, attribute ranges, etc will be handled when extracting bounds
null
}
}
/**
* Tries to merge the two filters that are OR'd together into a single filter that can be queried in one pass.
* Will return the merged filter, or null if they can't be merged.
*
* We can merge filters if they have the same secondary filter AND:
* 1. One of them does not have a primary filter
* 2. They both have a primary filter on the same attribute
*
* @param toMerge first filter
* @param mergeTo second filter
* @return merged filter that satisfies both inputs, or null if that isn't possible
*/
def tryMergeAttrStrategy[DS <: GeoMesaDataStore[DS, F, W], F <: WrappedFeature, W]
(toMerge: FilterStrategy[DS, F, W], mergeTo: FilterStrategy[DS, F, W]): FilterStrategy[DS, F, W] = {
// TODO this will be incorrect for multi-valued properties where we have an AND in the primary filter
val leftAttributes = toMerge.primary.map(FilterHelper.propertyNames(_, null))
val rightAttributes = mergeTo.primary.map(FilterHelper.propertyNames(_, null))
val canMergePrimary = (leftAttributes, rightAttributes) match {
case (Some(left), Some(right)) => left.length == 1 && right.length == 1 && left.head == right.head
case _ => true
}
if (canMergePrimary && toMerge.secondary == mergeTo.secondary) {
FilterStrategy(mergeTo.index, orOption(toMerge.primary.toSeq ++ mergeTo.primary), mergeTo.secondary)
} else {
null
}
}
/**
* Make a filter plan disjoint ORs - this way we don't have to deduplicate results
*
* @param option filter plan
* @return same filter plan with disjoint ORs
*/
def makeDisjoint[DS <: GeoMesaDataStore[DS, F, W], F <: WrappedFeature, W](option: FilterPlan[DS, F, W]): FilterPlan[DS, F, W] = {
if (option.strategies.length < 2) {
option
} else {
// A OR B OR C becomes... A OR (B NOT A) OR (C NOT A and NOT B)
def extractNot(qp: FilterStrategy[DS, F, W]) = qp.filter.map(ff.not)
// keep track of our current disjoint clause
val nots = ArrayBuffer[Filter]()
extractNot(option.strategies.head).foreach(nots.append(_))
val filters = Seq(option.strategies.head) ++ option.strategies.tail.map { filter =>
val sec = Some(andFilters(nots ++ filter.secondary))
extractNot(filter).foreach(nots.append(_)) // note - side effect
filter.copy(secondary = sec)
}
FilterPlan(filters)
}
}
} | ddseapy/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/planning/FilterSplitter.scala | Scala | apache-2.0 | 14,131 |
/*
* Copyright (C) 2020 MapRoulette contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.maproulette.data
/**
* A class primarily of case classes that is used in place of java's enums. This is better for pattern
* matching. Enum's in Scala are really only useful in the simpliest of cases.
*
* @author cuthbertm
*/
/**
* This is the sealed base class for an Action Type, {@link Actions}
*
* @param id The id of the action { @see Actions}
* @param level The level at which the action will be stored in the database. The level is set in the
* application config. And any action at that level and below will be written to the
* database, anything above will be ignored.
*/
class ActionType(id: Int, level: Int) {
def getId: Int = id
def getLevel: Int = level
}
/**
* This is the sealed base class for the type of item for the action, {@link Actions}
*
* @param id The id of the action { @see Actions}
*/
class ItemType(id: Int) {
val typeId = id
def convertToItem(itemId: Long): Item with ItemType = {
this match {
case p: ProjectType => new ProjectItem(itemId)
case c: ChallengeType => new ChallengeItem(itemId)
case t: TaskType => new TaskItem(itemId)
case ta: TagType => new TagItem(itemId)
case u: UserType => new UserItem(itemId)
case group: GroupType => new GroupItem(itemId)
case vc: VirtualChallengeType => new VirtualChallengeItem(itemId)
case b: BundleType => new BundleItem(itemId)
}
}
}
trait Item {
def itemId: Long
}
case class ProjectType() extends ItemType(Actions.ITEM_TYPE_PROJECT)
case class ChallengeType() extends ItemType(Actions.ITEM_TYPE_CHALLENGE)
case class TaskType() extends ItemType(Actions.ITEM_TYPE_TASK)
case class TagType() extends ItemType(Actions.ITEM_TYPE_TAG)
case class UserType() extends ItemType(Actions.ITEM_TYPE_USER)
case class GroupType() extends ItemType(Actions.ITEM_TYPE_GROUP)
case class VirtualChallengeType() extends ItemType(Actions.ITEM_TYPE_VIRTUAL_CHALLENGE)
case class BundleType() extends ItemType(Actions.ITEM_TYPE_BUNDLE)
class ProjectItem(override val itemId: Long) extends ProjectType with Item
class ChallengeItem(override val itemId: Long) extends ChallengeType with Item
class TaskItem(override val itemId: Long) extends TaskType with Item
class TagItem(override val itemId: Long) extends TagType with Item
class UserItem(override val itemId: Long) extends UserType with Item
class GroupItem(override val itemId: Long) extends GroupType with Item
class VirtualChallengeItem(override val itemId: Long) extends VirtualChallengeType with Item
class BundleItem(override val itemId: Long) extends BundleType with Item
case class Updated() extends ActionType(Actions.ACTION_TYPE_UPDATED, Actions.ACTION_LEVEL_2)
case class Created() extends ActionType(Actions.ACTION_TYPE_CREATED, Actions.ACTION_LEVEL_2)
case class Deleted() extends ActionType(Actions.ACTION_TYPE_DELETED, Actions.ACTION_LEVEL_2)
case class TaskViewed() extends ActionType(Actions.ACTION_TYPE_TASK_VIEWED, Actions.ACTION_LEVEL_3)
case class TaskStatusSet(status: Int)
extends ActionType(Actions.ACTION_TYPE_TASK_STATUS_SET, Actions.ACTION_LEVEL_1)
case class TaskReviewStatusSet(status: Int)
extends ActionType(Actions.ACTION_TYPE_TASK_REVIEW_STATUS_SET, Actions.ACTION_LEVEL_1)
case class MetaReviewStatusSet(status: Int)
extends ActionType(Actions.ACTION_TYPE_META_REVIEW_STATUS_SET, Actions.ACTION_LEVEL_1)
case class TagAdded() extends ActionType(Actions.ACTION_TYPE_TAG_ADDED, Actions.ACTION_LEVEL_2)
case class TagRemoved() extends ActionType(Actions.ACTION_TYPE_TAG_REMOVED, Actions.ACTION_LEVEL_2)
case class QuestionAnswered(answerId: Long)
extends ActionType(Actions.ACTION_TYPE_QUESTION_ANSWERED, Actions.ACTION_LEVEL_1)
case class GrantType() extends ItemType(Actions.ITEM_TYPE_GRANT)
object Actions {
val ACTION_LEVEL_1 = 1
val ACTION_LEVEL_2 = 2
val ACTION_LEVEL_3 = 3
val ITEM_TYPE_PROJECT = 0
val ITEM_TYPE_PROJECT_NAME = "Project"
val ITEM_TYPE_CHALLENGE = 1
val ITEM_TYPE_CHALLENGE_NAME = "Challenge"
val ITEM_TYPE_TASK = 2
val ITEM_TYPE_TASK_NAME = "Task"
val ITEM_TYPE_TAG = 3
val ITEM_TYPE_TAG_NAME = "Tag"
val ITEM_TYPE_SURVEY = 4
val ITEM_TYPE_SURVEY_NAME = "Survey"
val ITEM_TYPE_USER = 5
val ITEM_TYPE_USER_NAME = "User"
val ITEM_TYPE_GROUP = 6
val ITEM_TYPE_GROUP_NAME = "Group"
val ITEM_TYPE_VIRTUAL_CHALLENGE = 7
val ITEM_TYPE_VIRTUAL_CHALLENGE_NAME = "VirtualChallenge"
val ITEM_TYPE_BUNDLE = 8
val ITEM_TYPE_BUNDLE_NAME = "Bundle"
val ITEM_TYPE_GRANT = 9
val ITEM_TYPE_GRANT_NAME = "Grant"
val itemIDMap = Map(
ITEM_TYPE_PROJECT -> (ITEM_TYPE_PROJECT_NAME, ProjectType()),
ITEM_TYPE_CHALLENGE -> (ITEM_TYPE_CHALLENGE_NAME, ChallengeType()),
ITEM_TYPE_TASK -> (ITEM_TYPE_TASK_NAME, TaskType()),
ITEM_TYPE_TAG -> (ITEM_TYPE_TAG_NAME, TagType()),
ITEM_TYPE_USER -> (ITEM_TYPE_USER_NAME, UserType()),
ITEM_TYPE_GROUP -> (ITEM_TYPE_GROUP_NAME, GroupType()),
ITEM_TYPE_VIRTUAL_CHALLENGE -> (ITEM_TYPE_VIRTUAL_CHALLENGE_NAME, VirtualChallengeType()),
ITEM_TYPE_BUNDLE -> (ITEM_TYPE_BUNDLE_NAME, BundleType()),
ITEM_TYPE_GRANT -> (ITEM_TYPE_GRANT_NAME, GrantType())
)
val ACTION_TYPE_UPDATED = 0
val ACTION_TYPE_UPDATED_NAME = "Updated"
val ACTION_TYPE_CREATED = 1
val ACTION_TYPE_CREATED_NAME = "Created"
val ACTION_TYPE_DELETED = 2
val ACTION_TYPE_DELETED_NAME = "Deleted"
val ACTION_TYPE_TASK_VIEWED = 3
val ACTION_TYPE_TASK_VIEWED_NAME = "Task_Viewed"
val ACTION_TYPE_TASK_STATUS_SET = 4
val ACTION_TYPE_TASK_STATUS_SET_NAME = "Task_Status_Set"
val ACTION_TYPE_TAG_ADDED = 5
val ACTION_TYPE_TAG_ADDED_NAME = "Tag_Added"
val ACTION_TYPE_TAG_REMOVED = 6
val ACTION_TYPE_TAG_REMOVED_NAME = "Tag_Removed"
val ACTION_TYPE_QUESTION_ANSWERED = 7
val ACTION_TYPE_QUESTION_ANSWERED_NAME = "Question_Answered"
val ACTION_TYPE_TASK_REVIEW_STATUS_SET = 8
val ACTION_TYPE_TASK_REVIEW_STATUS_SET_NAME = "Task_Review_Status_Set"
val ACTION_TYPE_META_REVIEW_STATUS_SET = 9
val ACTION_TYPE_META_REVIEW_STATUS_SET_NAME = "Task_Meta_Review_Status_Set"
val actionIDMap = Map(
ACTION_TYPE_UPDATED -> ACTION_TYPE_UPDATED_NAME,
ACTION_TYPE_CREATED -> ACTION_TYPE_CREATED_NAME,
ACTION_TYPE_DELETED -> ACTION_TYPE_DELETED_NAME,
ACTION_TYPE_TASK_VIEWED -> ACTION_TYPE_TASK_VIEWED_NAME,
ACTION_TYPE_TASK_STATUS_SET -> ACTION_TYPE_TASK_STATUS_SET_NAME,
ACTION_TYPE_TAG_ADDED -> ACTION_TYPE_TAG_ADDED_NAME,
ACTION_TYPE_TAG_REMOVED -> ACTION_TYPE_TAG_REMOVED_NAME,
ACTION_TYPE_QUESTION_ANSWERED -> ACTION_TYPE_QUESTION_ANSWERED_NAME,
ACTION_TYPE_TASK_REVIEW_STATUS_SET -> ACTION_TYPE_TASK_REVIEW_STATUS_SET_NAME,
ACTION_TYPE_META_REVIEW_STATUS_SET -> ACTION_TYPE_META_REVIEW_STATUS_SET_NAME
)
/**
* Validates whether the provided id is actually an action type id
*
* @param actionType The id to test
* @return true if valid action type id
*/
def validActionType(actionType: Int): Boolean = actionIDMap.contains(actionType)
/**
* Validates the provided action type name
*
* @param actionType The action type name to validate
* @return true if valid action type
*/
def validActionTypeName(actionType: String): Boolean = getActionID(actionType) match {
case Some(_) => true
case None => false
}
/**
* Based on a string will return the action id that it matches, None otherwise
*
* @param action The string to match against
* @return Option[Int] if found, None otherwise.
*/
def getActionID(action: String): Option[Int] =
actionIDMap.find(_._2.equalsIgnoreCase(action)) match {
case Some(a) => Some(a._1)
case None => None
}
/**
* Validates whether the provided id is actually an item type id
*
* @param itemType The id to test
* @return true if valid item type id
*/
def validItemType(itemType: Int): Boolean = actionIDMap.contains(itemType)
/**
* Validates the provided item name
*
* @param itemType The item type name to test
* @return true if a valid item type
*/
def validItemTypeName(itemType: String): Boolean = getTypeID(itemType) match {
case Some(_) => true
case None => false
}
/**
* Based on a string will return the item type id that the string matches, None otherwise
*
* @param itemType The string to match against
* @return Option[Int] if found, None otherwise
*/
def getTypeID(itemType: String): Option[Int] =
itemIDMap.find(_._2._1.equalsIgnoreCase(itemType)) match {
case Some(it) => Some(it._1)
case None => None
}
/**
* Based on an id will return the Item type name it matches, None otherwise
*
* @param itemType The id to find
* @return Option[String] if found, None otherwise
*/
def getTypeName(itemType: Int): Option[String] = itemIDMap.get(itemType) match {
case Some(it) => Some(it._1)
case None => None
}
/**
* Gets the ItemType based on the Item Type Id
*
* @param itemType The item type id
* @return The ItemType matching the supplied item type id
*/
def getItemType(itemType: Int): Option[ItemType] = itemIDMap.get(itemType) match {
case Some(it) => Some(it._2)
case None => None
}
/**
* Gets the ItemType based on the Item Type name
*
* @param itemType The item type name
* @return The ItemType matching the supplied item type name
*/
def getItemType(itemType: String): Option[ItemType] =
itemIDMap.find(_._2._1.equalsIgnoreCase(itemType)) match {
case Some(a) => Some(a._2._2)
case None => None
}
/**
* Based on an id will return the action name that the id matches, None otherwise
*
* @param action The id to match against
* @return Option[String] if found, None otherwise.
*/
def getActionName(action: Int): Option[String] = actionIDMap.get(action)
}
| mgcuthbert/maproulette2 | app/org/maproulette/data/Actions.scala | Scala | apache-2.0 | 10,839 |
package com.xiaoguangchen.spa
import com.typesafe.config._
import com.xiaoguangchen.spa.annotation.Column
import java.sql.Connection
/**
* author: Chester Chen
* Date: 1/23/13 9:15 AM
*/
class BaseTest {
val config = ConfigFactory.load()
val database = getDatabaseVendor
def getDatabaseVendor:Database = {
config.getString("db.vendor") match {
case "mysql" => MySQL
case "postgres" => PostgreSQL
case "oracle" => Oracle
case _ => OtherDatabase
}
}
def getConnectionProps : (String, String, String, String ) = {
val (usernameKey, passwordKey, urlKey, driverKey ) = getDatabaseVendor match {
case MySQL =>
( "db.mysql.username" , "db.mysql.password", "db.mysql.driver.url" , "db.mysql.driver.name")
case PostgreSQL =>
( "db.postgres.username" , "db.postgres.password", "db.postgres.driver.url" , "db.postgres.driver.name")
case Oracle =>
( "db.oracle.username" , "db.oracle.password", "db.oracle.driver.url" , "db.oracle.driver.name")
case OtherDatabase =>
( "db.username" , "db.password", "db.driver.url" , "db.driver.name")
}
val userName = config.getString(usernameKey)
val password = config.getString(passwordKey)
val url = config.getString(urlKey)
val driver = config.getString(driverKey)
(userName, password, url, driver)
}
def getConnection: Option[Connection] = {
val (userName , password , url, driver) = getConnectionProps
QueryManager.getConnection(driver, url, userName, password)
}
}
//constructor annotation
case class Coffee(@Column("COF_NAME") name: String,
@Column("SUP_ID") supId: Int,
@Column("PRICE") price: Double)
case class CoffeePrice( name: String,
price: Double)
| chesterxgchen/spa | src/test/scala/com/xiaoguangchen/spa/BaseTest.scala | Scala | apache-2.0 | 1,828 |
package br.ufmg.cs.util.sorter
// command line parsing
import br.ufmg.cs.util.ParamsParser
// utils
import br.ufmg.cs.util.Common
import org.apache.log4j.{Logger, Level}
// spark
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.rdd.RDD
import org.apache.hadoop.io._
import org.apache.hadoop.fs._
import scala.concurrent._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Try, Success, Failure}
object KeySorter {
val appName = "KeySorter"
val log = Logger.getLogger(appName)
case class Params(inputFile: String = null,
logLevel: Level = Level.OFF) {
def getValues = (inputFile, logLevel)
override def toString =
"&%@ params " + getValues.productIterator.mkString (" ")
}
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new ParamsParser[Params](appName)
parser.opt ("inputFile",
s"path to transactions input file, default: ${defaultParams.inputFile}",
(value,tmpParams) => tmpParams.copy (inputFile = value)
)
parser.opt ("logLevel",
s"log4j level, default: ${defaultParams.logLevel}",
(value,tmpParams) => tmpParams.copy (logLevel = Level.toLevel(value))
)
parser.parse (args, defaultParams) match {
case Some(params) => run(params)
case None => sys.exit(1)
}
}
def run(params: Params) {
val (inputFile, logLevel) = params.getValues
// set log levels
log.setLevel(logLevel)
val conf = new SparkConf().setAppName(appName).
set ("spark.instrumentation.keydist", "false")
val sc = new SparkContext(conf)
sc.hadoopConfiguration.set ("mapreduce.input.fileinputformat.input.dir.recursive", "true")
val fs = FileSystem.get (sc.hadoopConfiguration)
log.debug (s"applicationId = ${sc.applicationId}")
var jobFutures = List.empty[Future[Unit]]
val appsPath = new Path ("keydist/*")
val appStatuses = fs.globStatus (appsPath)
for (appStatus <- appStatuses) {
var lens = Map.empty[Long,String]
val stagesPath = new Path (appStatus.getPath, "*")
val stageStatuses = fs.globStatus (stagesPath)
for (stageStatus <- stageStatuses) {
val depsPath = new Path (stageStatus.getPath, "*")
val depStatuses = fs.globStatus (depsPath)
for (depStatus <- depStatuses) {
val parts = fs.globStatus (new Path(depStatus.getPath, "*"))
val newLen = parts.map (_.getLen).sum
if (!lens.contains(newLen)) {
// sort for all partitions
jobFutures = Future {
log.info (s"${newLen} reading for dependency ${depStatus.getPath.toString}")
val keys = sc.textFile (depStatus.getPath.toString)
val freqs = keys.map (k => (k,1L)).reduceByKey (_ + _)
freqs.sortByKey().map (tup => s"${tup._1} ${tup._2}").saveAsTextFile (
depStatus.getPath.toString.replaceAll ("keydist", "keydist-aggregated")
)
log.info (s"${newLen} finished for dependency ${depStatus.getPath.toString}")
} :: jobFutures
lens += (newLen -> depsPath.toString)
} else {
log.info (s"${newLen} skipping dependency ${depStatus.getPath.toString}")
}
}
}
}
val completionFuture = Future.sequence (jobFutures)
Await.ready (completionFuture, Duration.Inf)
completionFuture.value.get match {
case Success(_) =>
println (s"Success !!")
case Failure(e) =>
println (s"Failure !!")
throw e
}
sc.stop
}
}
| viniciusvdias/fim-spark | src/main/scala/br/ufmg/cs/util/sorter/KeySorter.scala | Scala | apache-2.0 | 3,738 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.Eq
import cats.data.EitherT
import cats.effect.IO
import cats.laws.discipline.{
CoflatMapTests,
DeferTests,
FunctorFilterTests,
MonadErrorTests,
MonoidKTests,
SemigroupalTests
}
import cats.laws.discipline.arbitrary.catsLawsArbitraryForPartialFunction
import monix.execution.schedulers.TestScheduler
object TypeClassLawsForIterantIOSuite extends BaseLawsSuite {
type F[α] = Iterant[IO, α]
implicit lazy val ec: TestScheduler = TestScheduler()
// Explicit instance due to weird implicit resolution problem
implicit lazy val iso: SemigroupalTests.Isomorphisms[F] =
SemigroupalTests.Isomorphisms.invariant[F]
// Explicit instance, since Scala can't figure it out below :-(
lazy val eqEitherT: Eq[EitherT[F, Throwable, Int]] =
implicitly[Eq[EitherT[F, Throwable, Int]]]
checkAllAsync("Defer[Iterant[IO]]") { implicit ec =>
DeferTests[F].defer[Int]
}
checkAllAsync("MonadError[Iterant[IO]]") { _ =>
implicit val eqE = eqEitherT
MonadErrorTests[F, Throwable].monadError[Int, Int, Int]
}
checkAllAsync("MonoidK[Iterant[IO]]") { implicit ec =>
MonoidKTests[F].monoidK[Int]
}
checkAllAsync("CoflatMap[Iterant[IO]]") { implicit ec =>
CoflatMapTests[F].coflatMap[Int, Int, Int]
}
checkAllAsync("FunctorFilter[Iterant[IO]]") { implicit ec =>
FunctorFilterTests[F].functorFilter[Int, Int, Int]
}
}
| monixio/monix | monix-tail/shared/src/test/scala/monix/tail/TypeClassLawsForIterantIOSuite.scala | Scala | apache-2.0 | 2,079 |
package com.lljv.analytics.analyzerengine
import java.util.Date
case class TextAnalysisSamples(
Samples: Array[TextAnalysisSample],
TimestampLatest: Option[Date],
KafkaKey: Option[String]
)
| dotdeb/Science-Finder | Analytics/AnalyzerEngine/src/main/scala/com/lljv/analytics/analyzerengine/TextAnalysisSamples.scala | Scala | apache-2.0 | 198 |
package io.eels.component.avro
import io.eels.schema.{ArrayType, Field, IntType, StructType}
import io.eels.Row
import org.apache.avro.SchemaBuilder
import org.scalatest.{Matchers, WordSpec}
class AvroSerializerTest extends WordSpec with Matchers {
val avroSchema = SchemaBuilder.record("row").fields().requiredString("s").requiredLong("l").requiredBoolean("b").endRecord()
val serializer = new RecordSerializer(avroSchema)
"AvroRecordMarshaller" should {
"createReader field from values in row" in {
val eelSchema = StructType(Field("s"), Field("l"), Field("b"))
val record = serializer.serialize(Row(eelSchema, "a", 1L, false))
record.get("s") shouldBe "a"
record.get("l") shouldBe 1L
record.get("b") shouldBe false
}
"only accept rows with same number of values as schema fields" in {
intercept[IllegalArgumentException] {
val eelSchema = StructType(Field("a"), Field("b"))
serializer.serialize(Row(eelSchema, "a", 1L))
}
intercept[IllegalArgumentException] {
val eelSchema = StructType(Field("a"), Field("b"), Field("c"), Field("d"))
serializer.serialize(Row(eelSchema, "1", "2", "3", "4"))
}
}
"support rows with a different ordering to the write schema" in {
val eelSchema = StructType(Field("l"), Field("b"), Field("s"))
val record = serializer.serialize(Row(eelSchema, 1L, false, "a"))
record.get("s") shouldBe "a"
record.get("l") shouldBe 1L
record.get("b") shouldBe false
}
"convert strings to longs" in {
val record = serializer.serialize(Row(AvroSchemaFns.fromAvroSchema(avroSchema), "1", "2", "true"))
record.get("l") shouldBe 2L
}
"convert strings to booleans" in {
val record = serializer.serialize(Row(AvroSchemaFns.fromAvroSchema(avroSchema), "1", "2", "true"))
record.get("b") shouldBe true
}
"convert longs to strings" in {
val record = serializer.serialize(Row(AvroSchemaFns.fromAvroSchema(avroSchema), 1L, "2", "true"))
record.get("s") shouldBe "1"
}
"convert booleans to strings" in {
val record = serializer.serialize(Row(AvroSchemaFns.fromAvroSchema(avroSchema), true, "2", "true"))
record.get("s") shouldBe "true"
}
"support arrays" in {
val schema = StructType(Field("a", ArrayType(IntType(true))))
val serializer = new RecordSerializer(AvroSchemaFns.toAvroSchema(schema))
val record = serializer.serialize(Row(schema, Array(1, 2)))
record.get("a").asInstanceOf[Array[_]].toList shouldBe List(1, 2)
}
"support lists" in {
val schema = StructType(Field("a", ArrayType(IntType(true))))
val serializer = new RecordSerializer(AvroSchemaFns.toAvroSchema(schema))
val record = serializer.serialize(Row(schema, Array(1, 2)))
record.get("a") shouldBe List(1, 2)
}
"support sets" in {
val schema = StructType(Field("a", ArrayType(IntType(true))))
val serializer = new RecordSerializer(AvroSchemaFns.toAvroSchema(schema))
val record = serializer.serialize(Row(schema, Set(1, 2)))
record.get("a").asInstanceOf[Array[_]].toSet shouldBe Set(1, 2)
}
"support iterables" in {
val schema = StructType(Field("a", ArrayType(IntType(true))))
val serializer = new RecordSerializer(AvroSchemaFns.toAvroSchema(schema))
val record = serializer.serialize(Row(schema, Iterable(1, 2)))
record.get("a").asInstanceOf[Array[_]].toList shouldBe List(1, 2)
}
}
}
// "AvroRecordFn" should
// in {
// "replace missing values if flag set" in in {
// val schema = Schema(Column("a"), Column("b"), Column("c"))
// toRecord(listOf("1", "3"), schema, Schema(Column("a"), Column("c")), config).toString shouldBe
// """{"a": "1", "b": null, "c": "3"}"""
// }
// }
// | stheppi/eel | eel-components/src/test/scala/io/eels/component/avro/AvroSerializerTest.scala | Scala | apache-2.0 | 3,887 |
package konstructs.shard
import konstructs.api.Position
import konstructs.Db
case class ShardPosition(m: Int, n: Int, o: Int) {
def local(chunk: ChunkPosition): ChunkPosition =
ChunkPosition(
chunk.p - m * Db.ShardSize,
chunk.q - n * Db.ShardSize,
chunk.k - o * Db.ShardSize
)
}
object ShardPosition {
def apply(c: ChunkPosition): ShardPosition = {
// For negative values we need to "round down", i.e. -0.01 should be -1 and not 0
val m = (if (c.p < 0) (c.p - Db.ShardSize + 1) else c.p) / Db.ShardSize
val n = (if (c.q < 0) (c.q - Db.ShardSize + 1) else c.q) / Db.ShardSize
val o = (if (c.k < 0) (c.k - Db.ShardSize + 1) else c.k) / Db.ShardSize
ShardPosition(m, n, o)
}
def apply(p: Position): ShardPosition =
ShardPosition(ChunkPosition(p))
}
| konstructs/server | src/main/scala/konstructs/shard/ShardPosition.scala | Scala | mit | 809 |
import java.util.{Calendar, GregorianCalendar}
case class Meetup(month: Int, year: Int) {
private val thirteenth = new GregorianCalendar(year, month - 1, 13)
private val first = new GregorianCalendar(year, month - 1, 1)
private val nextMonth = first.addMonths(1)
def teenth(day: Int): Calendar = thirteenth.next(day)
def first(day: Int): Calendar = first.next(day)
def second(day: Int): Calendar = first(day).addDays(7)
def third(day: Int): Calendar = second(day).addDays(7)
def fourth(day: Int): Calendar = third(day).addDays(7)
def last(day: Int): Calendar = nextMonth.next(day).addDays(-7)
implicit class ImmutableCalendar(calendar: Calendar) {
def next(dayOfWeek: Int): Calendar = addDays(daysUntil(dayOfWeek))
def addDays(count: Int): Calendar = copyAnd(_.add(Calendar.DAY_OF_YEAR, count))
def addMonths(count: Int): Calendar = copyAnd(_.add(Calendar.MONTH, count))
def daysUntil(dayOfWeek: Int): Int = (Meetup.Sat - this.dayOfWeek + dayOfWeek) % 7
def dayOfWeek: Int = calendar.get(Calendar.DAY_OF_WEEK)
private def copy: Calendar = calendar.clone.asInstanceOf[Calendar]
private def copyAnd(f: Calendar => Unit) = {
val c = copy
f(c)
c
}
}
}
object Meetup {
val Mon = Calendar.MONDAY
val Tue = Calendar.TUESDAY
val Wed = Calendar.WEDNESDAY
val Thu = Calendar.THURSDAY
val Fri = Calendar.FRIDAY
val Sat = Calendar.SATURDAY
val Sun = Calendar.SUNDAY
}
| nlochschmidt/xscala | meetup/example.scala | Scala | mit | 1,452 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.mllib.regression
import scala.util.Random
import org.scalatest.BeforeAndAfterAll
import org.scalatest.FunSuite
import spark.SparkContext
import spark.SparkContext._
class LogisticRegressionSuite extends FunSuite with BeforeAndAfterAll {
val sc = new SparkContext("local", "test")
override def afterAll() {
sc.stop()
System.clearProperty("spark.driver.port")
}
// Generate input of the form Y = logistic(offset + scale*X)
def generateLogisticInput(
offset: Double,
scale: Double,
nPoints: Int) : Seq[(Double, Array[Double])] = {
val rnd = new Random(42)
val x1 = Array.fill[Double](nPoints)(rnd.nextGaussian())
// NOTE: if U is uniform[0, 1] then ln(u) - ln(1-u) is Logistic(0,1)
val unifRand = new scala.util.Random(45)
val rLogis = (0 until nPoints).map { i =>
val u = unifRand.nextDouble()
math.log(u) - math.log(1.0-u)
}
// y <- A + B*x + rLogis()
// y <- as.numeric(y > 0)
val y = (0 until nPoints).map { i =>
val yVal = offset + scale * x1(i) + rLogis(i)
if (yVal > 0) 1.0 else 0.0
}
val testData = (0 until nPoints).map(i => (y(i).toDouble, Array(x1(i))))
testData
}
// Test if we can correctly learn A, B where Y = logistic(A + B*X)
test("logistic regression") {
val nPoints = 10000
val A = 2.0
val B = -1.5
val testData = generateLogisticInput(A, B, nPoints)
val testRDD = sc.parallelize(testData, 2)
testRDD.cache()
val lr = new LogisticRegression().setStepSize(10.0)
.setNumIterations(20)
val model = lr.train(testRDD)
val weight0 = model.weights(0)
assert(weight0 >= -1.60 && weight0 <= -1.40, weight0 + " not in [-1.6, -1.4]")
assert(model.intercept >= 1.9 && model.intercept <= 2.1, model.intercept + " not in [1.9, 2.1]")
}
test("logistic regression with initial weights") {
val nPoints = 10000
val A = 2.0
val B = -1.5
val testData = generateLogisticInput(A, B, nPoints)
val initialB = -1.0
val initialWeights = Array(initialB)
val testRDD = sc.parallelize(testData, 2)
testRDD.cache()
// Use half as many iterations as the previous test.
val lr = new LogisticRegression().setStepSize(10.0)
.setNumIterations(10)
val model = lr.train(testRDD, initialWeights)
val weight0 = model.weights(0)
assert(weight0 >= -1.60 && weight0 <= -1.40, weight0 + " not in [-1.6, -1.4]")
assert(model.intercept >= 1.9 && model.intercept <= 2.1, model.intercept + " not in [1.9, 2.1]")
}
}
| wgpshashank/spark | mllib/src/test/scala/spark/mllib/regression/LogisticRegressionSuite.scala | Scala | apache-2.0 | 3,412 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.runtime.java8
@FunctionalInterface trait JFunction0$mcJ$sp extends Function0[Any] with Serializable {
def apply$mcJ$sp: Long
override def apply(): Any = scala.runtime.BoxesRunTime.boxToLong(apply$mcJ$sp)
}
| martijnhoekstra/scala | src/library/scala/runtime/java8/JFunction0$mcJ$sp.scala | Scala | apache-2.0 | 515 |
package us.my_family.metrics.networking
import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.nio.channels.DatagramChannel
import java.nio.charset.Charset
import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory
import java.util.concurrent.TimeUnit
import scala.util.Try
import scala.util.control.NonFatal
import com.typesafe.scalalogging.LazyLogging
import us.my_family.metrics.configuration.ConfigurationProvider
trait UdpConnectionProvider {
lazy val udpConnection = UdpConnection()
}
trait ExecutorFactory {
def newSingleThreadExecutor(factory : ThreadFactory) = Executors.newSingleThreadExecutor(factory)
def defaultThreadFactory() = Executors.defaultThreadFactory()
def threadFactory() : ThreadFactory = new ThreadFactory() {
val delegate = defaultThreadFactory()
def newThread(r : Runnable) = {
val result = delegate.newThread(r)
result.setName("MetricsClients-" + result.getName())
result.setDaemon(true)
result
}
}
}
trait DatagramFactory {
def openDatagramChannel() = DatagramChannel.open()
}
case class UdpConnection() extends ConfigurationProvider
with ExecutorFactory
with DatagramFactory
with LazyLogging {
lazy val executor = newSingleThreadExecutor(threadFactory())
lazy val channel = {
val channel = openDatagramChannel()
channel.connect(new InetSocketAddress(configuration.host, configuration.port))
channel
}
def close() = {
Try {
executor.shutdown()
executor.awaitTermination(30000, TimeUnit.MILLISECONDS)
} recover {
case NonFatal(cause) => logger.warn("Failed to shut down thread executor", cause)
}
Try {
channel.close()
} recover {
case NonFatal(cause) => logger.warn("Failed to close datagram channel", cause)
}
}
def send(message : String) = {
Try {
executor.execute(new Runnable() {
def run() = {
logger.trace(s"Sending message: ${message}")
channel.write(ByteBuffer.wrap(message.getBytes(Charset.forName("UTF8"))))
}
})
} recover {
case NonFatal(cause) => {
logger.warn("Failed to send message", cause)
logger.debug(s"Failed message: ${message}")
}
}
}
}
| thrykol/metrics-client | client/src/main/scala/us/my_family/metrics/networking/UdpConnection.scala | Scala | gpl-3.0 | 2,156 |
package autolift.cats
import cats.{Functor, Foldable}
import autolift.{LiftForAll, LiftForAllSyntax, LiftForAllContext}
//TODO: syntax is currently forAll vs forall. Make consistent?
trait CatsLiftForAll[Obj, Fn] extends LiftForAll[Obj, Fn] with Serializable
object CatsLiftForAll extends LowPriorityCatsLiftForAll {
def apply[Obj, Fn](implicit lift: CatsLiftForAll[Obj, Fn]): Aux[Obj, Fn, lift.Out] = lift
implicit def base[F[_], A, C >: A](implicit fold: Foldable[F]): Aux[F[A], C => Boolean, Boolean] =
new CatsLiftForAll[F[A], C => Boolean]{
type Out = Boolean
def apply(fa: F[A], f: C => Boolean) = fold.forall(fa)(f)
}
}
trait LowPriorityCatsLiftForAll{
type Aux[Obj, Fn, Out0] = CatsLiftForAll[Obj, Fn]{ type Out = Out0 }
implicit def recur[F[_], G, Fn](implicit functor: Functor[F], lift: LiftForAll[G, Fn]): Aux[F[G], Fn, F[lift.Out]] =
new CatsLiftForAll[F[G], Fn]{
type Out = F[lift.Out]
def apply(fg: F[G], f: Fn) = functor.map(fg){ g: G => lift(g, f) }
}
}
trait LiftForAllPackage extends LiftForAllSyntax with LiftForAllContext{
implicit def mkAll[Obj, Fn](implicit lift: CatsLiftForAll[Obj, Fn]): CatsLiftForAll.Aux[Obj, Fn, lift.Out] = lift
} | wheaties/AutoLifts | autolift-cats/src/main/scala/autolift/cats/LiftForAll.scala | Scala | apache-2.0 | 1,219 |
import scala.math.Ordering
import scala.reflect.ClassTag
trait Sam { def apply(x: Int): String }
trait SamP[U] { def apply(x: Int): U }
class OverloadedFun[T](x: T) {
def foo(f: T => String): String = f(x)
def foo(f: Any => T): T = f("a")
def poly[U](f: Int => String): String = f(1)
def poly[U](f: Int => U): U = f(1)
def polySam[U](f: Sam): String = f(1)
def polySam[U](f: SamP[U]): U = f(1)
// check that we properly instantiate java.util.function.Function's type param to String
def polyJavaSam(f: String => String) = 1
def polyJavaSam(f: java.util.function.Function[String, String]) = 2
}
class StringLike(xs: String) {
def map[A](f: Char => A): Array[A] = ???
def map(f: Char => Char): String = ???
}
object Test {
val of = new OverloadedFun[Int](1)
// of.foo(_.toString) // not allowed -- different argument types for the hof arg
of.poly(x => x / 2 )
// of.polySam(x => x / 2) // not allowed -- need at least one regular function type in the mix
of.polyJavaSam(x => x)
val sl = new StringLike("a")
sl.map(_ == 'a') // : Array[Boolean]
sl.map(x => 'a') // : String
}
object sorting {
def stableSort[K: ClassTag](a: Seq[K], f: (K, K) => Boolean): Array[K] = ???
def stableSort[L: ClassTag](a: Array[L], f: (L, L) => Boolean): Unit = ???
stableSort(??? : Seq[Boolean], (x: Boolean, y: Boolean) => x && !y)
}
// trait Bijection[A, B] extends (A => B) {
// def andThen[C](g: Bijection[B, C]): Bijection[A, C] = ???
// def compose[T](g: Bijection[T, A]) = g andThen this
// }
object SI10194 {
trait X[A] {
def map[B](f: A => B): Unit
}
trait Y[A] extends X[A] {
def map[B](f: A => B)(implicit ordering: Ordering[B]): Unit
}
trait Z[A] extends Y[A]
(null: Y[Int]).map(x => x.toString) // compiled
(null: Z[Int]).map(x => x.toString) // didn't compile
}
| lrytz/scala | test/files/pos/overloaded_ho_fun.scala | Scala | apache-2.0 | 1,840 |
/*
* Copyright © 2014 TU Berlin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package lib.ml
import api._
class KFoldFlinkSpec extends KFoldSpec with FlinkAware {
override protected def split(pdf: Seq[Double], seed: Long, xs: Seq[LDPoint[Int, Int]]) =
withDefaultFlinkEnv(implicit flink => emma.onFlink {
val folds = kfold.split(pdf)(DataBag(xs))(seed)
folds.collect()
})
override protected def splitAndCount(pdf: Seq[Double], seed: Long, xs: Seq[LDPoint[Int, Int]]) =
withDefaultFlinkEnv(implicit flink => emma.onFlink {
val folds = kfold.split(pdf)(DataBag(xs))(seed)
val sizes = for (g <- folds.groupBy(_.foldID)) yield g.key -> g.values.size
sizes.collect().toMap
})
override protected def splitAndProject(pdf: Seq[Double], seed: Long, xs: Seq[LDPoint[Int, Int]]) =
withDefaultFlinkEnv(implicit flink => emma.onFlink {
val folds = kfold.split(pdf)(DataBag(xs))(seed)
for (k <- pdf.indices) yield {
val us = kfold.select(k)(folds).collect()
val vs = kfold.except(k)(folds).collect()
(us, vs)
}
})
}
| emmalanguage/emma | emma-lib-flink/src/test/scala/org/emmalanguage/lib/ml/KFoldFlinkSpec.scala | Scala | apache-2.0 | 1,677 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert
package jmx
import org.specs2.mutable.SpecificationWithJUnit
class JMXSpec extends SpecificationWithJUnit {
"JMX" should {
"add to map correctly" in {
JMX.getUniqueName("hello world") must be_==("hello world")
JMX.map.getOrElse("hello world", -1) must be_==(0)
}
}
} | linkedin/norbert | cluster/src/test/scala/com/linkedin/norbert/jmx/JMXUnregisterSpec.scala | Scala | apache-2.0 | 919 |
package org.multibot
import org.pircbotx.cap.SASLCapHandler
object Multibottest {
val cache = InterpretersCache(List("#scala", "#scalaz", "#scala/scala"))
val PRODUCTION = Option(System getenv "multibot.production") exists (_.toBoolean)
//should be def so that security manager is enabled
private def gitterPass = Option(System getenv "multibot.gitter.pass").getOrElse(
"709182327498f5ee393dbb0bc6e440975fa316e5")
private def freenodePass = Option(System getenv "multibot.freenode.pass")
val NUMLINES = 5
def gitterOutputSanitizer(message: String): Array[String] =
message
.replace("\\r", "")
.replace("`", "\\'")
.split("\\n")
.filter(_.nonEmpty)
.take(NUMLINES)
.map(m => s"`$m`")
def ircOutputSanitizer(message: String): Array[String] =
message
.replace("\\r", "")
.split("\\n")
.filter(_.nonEmpty)
.take(NUMLINES)
val ircMultibot = Multibot(
inputSanitizer = identity,
outputSanitizer = ircOutputSanitizer,
cache = cache,
botname = if (PRODUCTION) "multibot" else "multibot_test",
channels = if (PRODUCTION)
List("#scala", "#scalaz", "#playframework", "#fp-in-scala", "#CourseraProgfun", "#sbt", "#scala.pl")
else
List("#multibottest", "#multibottest2"),
settings = _.addServer("irc.freenode.net")
.setCapEnabled(freenodePass.nonEmpty)
.addCapHandler(new SASLCapHandler("multibot", freenodePass.getOrElse("")))
)
val gitterMultibot = Multibot(
inputSanitizer = GitterInputSanitizer.sanitize,
outputSanitizer = gitterOutputSanitizer,
cache = cache,
botname = if (PRODUCTION) "multibot1" else "multibot2",
channels = if (PRODUCTION) List("#scala/scala", "#scalaz/scalaz", "#OlegYch/multibot") else List("#OlegYch/multibot"),
settings = _.addServer("irc.gitter.im").setServerPassword(gitterPass).
setSocketFactory(javax.net.ssl.SSLSocketFactory.getDefault)
)
def main(args: Array[String]): Unit = {
ircMultibot.start()
gitterMultibot.start()
while (scala.io.StdIn.readLine() != "exit") Thread.sleep(1000)
sys.exit()
}
}
| OlegYch/multibot | src/main/scala/org/multibot/Multibottest.scala | Scala | apache-2.0 | 2,122 |
package coder.simon.ch2
/**
* @author simon
*/
object E23 {
def curry[A, B, C](f: (A, B) => C): A => (B => C) = a => (b => f(a, b))
def uncurry[A, B, C](f: A => (B => C)): (A, B) => C = (a, b) => f(a)(b)
def compose[A, B, C](f: B => C, g: A => B): A => C = a => f(g(a))
} | erlangxk/fpscala | src/main/scala/coder/simon/ch2/E23.scala | Scala | mit | 282 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util.regex
import scala.annotation.switch
import scala.scalajs.js
import Pattern.IndicesArray
final class Matcher private[regex] (
private var pattern0: Pattern, private var input0: String)
extends AnyRef with MatchResult {
import Matcher._
def pattern(): Pattern = pattern0
// Region configuration (updated by reset() and region())
private var regionStart0 = 0
private var regionEnd0 = input0.length()
private var inputstr = input0
// Match result (updated by successful matches)
private var position: Int = 0 // within `inputstr`, not `input0`
private var lastMatch: js.RegExp.ExecResult = null
private var lastMatchIsForMatches = false
// Append state (updated by replacement methods)
private var appendPos: Int = 0
// Lookup methods
def matches(): Boolean = {
resetMatch()
lastMatch = pattern().execMatches(inputstr)
lastMatchIsForMatches = true
lastMatch ne null
}
def lookingAt(): Boolean = {
resetMatch()
find()
if ((lastMatch ne null) && (ensureLastMatch.index != 0))
resetMatch()
lastMatch ne null
}
def find(): Boolean = {
val (mtch, end) = pattern().execFind(inputstr, position)
position =
if (mtch ne null) (if (end == mtch.index) end + 1 else end)
else inputstr.length() + 1 // cannot find anymore
lastMatch = mtch
lastMatchIsForMatches = false
mtch ne null
}
def find(start: Int): Boolean = {
reset()
position = start
find()
}
// Replace methods
def appendReplacement(sb: StringBuffer, replacement: String): Matcher = {
sb.append(inputstr.substring(appendPos, start()))
@inline def isDigit(c: Char) = c >= '0' && c <= '9'
val len = replacement.length
var i = 0
while (i < len) {
replacement.charAt(i) match {
case '$' =>
i += 1
val j = i
while (i < len && isDigit(replacement.charAt(i)))
i += 1
val group = Integer.parseInt(replacement.substring(j, i))
val replaced = this.group(group)
if (replaced != null)
sb.append(replaced)
case '\\\\' =>
i += 1
if (i < len)
sb.append(replacement.charAt(i))
i += 1
case c =>
sb.append(c)
i += 1
}
}
appendPos = end()
this
}
def appendTail(sb: StringBuffer): StringBuffer = {
sb.append(inputstr.substring(appendPos))
appendPos = inputstr.length
sb
}
def replaceFirst(replacement: String): String = {
reset()
if (find()) {
val sb = new StringBuffer
appendReplacement(sb, replacement)
appendTail(sb)
sb.toString
} else {
inputstr
}
}
def replaceAll(replacement: String): String = {
reset()
val sb = new StringBuffer
while (find()) {
appendReplacement(sb, replacement)
}
appendTail(sb)
sb.toString
}
// Reset methods
private def resetMatch(): Matcher = {
position = 0
lastMatch = null
appendPos = 0
this
}
def reset(): Matcher = {
regionStart0 = 0
regionEnd0 = input0.length()
inputstr = input0
resetMatch()
}
@inline // `input` is almost certainly a String at call site
def reset(input: CharSequence): Matcher = {
input0 = input.toString()
reset()
}
def usePattern(pattern: Pattern): Matcher = {
// note that `position` and `appendPos` are left unchanged
pattern0 = pattern
lastMatch = null
this
}
// Query state methods - implementation of MatchResult
private def ensureLastMatch: js.RegExp.ExecResult = {
if (lastMatch == null)
throw new IllegalStateException("No match available")
lastMatch
}
def groupCount(): Int = pattern().groupCount
def start(): Int = ensureLastMatch.index + regionStart()
def end(): Int = start() + group().length
def group(): String = ensureLastMatch(0).get
private def indices: IndicesArray =
pattern().getIndices(ensureLastMatch, lastMatchIsForMatches)
private def startInternal(compiledGroup: Int): Int =
indices(compiledGroup).fold(-1)(_._1 + regionStart())
def start(group: Int): Int =
startInternal(pattern().numberedGroup(group))
def start(name: String): Int =
startInternal(pattern().namedGroup(name))
private def endInternal(compiledGroup: Int): Int =
indices(compiledGroup).fold(-1)(_._2 + regionStart())
def end(group: Int): Int =
endInternal(pattern().numberedGroup(group))
def end(name: String): Int =
endInternal(pattern().namedGroup(name))
def group(group: Int): String =
ensureLastMatch(pattern().numberedGroup(group)).orNull
def group(name: String): String =
ensureLastMatch(pattern().namedGroup(name)).orNull
// Seal the state
def toMatchResult(): MatchResult =
new SealedResult(lastMatch, lastMatchIsForMatches, pattern(), regionStart())
// Other query state methods
// Cannot be implemented (see #3454)
//def hitEnd(): Boolean
// Similar difficulties as with hitEnd()
//def requireEnd(): Boolean
// Region management
def regionStart(): Int = regionStart0
def regionEnd(): Int = regionEnd0
def region(start: Int, end: Int): Matcher = {
regionStart0 = start
regionEnd0 = end
inputstr = input0.substring(start, end)
resetMatch()
}
def hasTransparentBounds(): Boolean = false
//def useTransparentBounds(b: Boolean): Matcher
def hasAnchoringBounds(): Boolean = true
//def useAnchoringBounds(b: Boolean): Matcher
}
object Matcher {
def quoteReplacement(s: String): String = {
var result = ""
var i = 0
while (i < s.length) {
val c = s.charAt(i)
result += ((c: @switch) match {
case '\\\\' | '$' => "\\\\"+c
case _ => c
})
i += 1
}
result
}
private final class SealedResult(lastMatch: js.RegExp.ExecResult,
lastMatchIsForMatches: Boolean, pattern: Pattern, regionStart: Int)
extends MatchResult {
def groupCount(): Int = pattern.groupCount
def start(): Int = ensureLastMatch.index + regionStart
def end(): Int = start() + group().length
def group(): String = ensureLastMatch(0).get
private def indices: IndicesArray =
pattern.getIndices(ensureLastMatch, lastMatchIsForMatches)
/* Note that MatchResult does *not* define the named versions of `group`,
* `start` and `end`, so we don't have them here either.
*/
def start(group: Int): Int =
indices(pattern.numberedGroup(group)).fold(-1)(_._1 + regionStart)
def end(group: Int): Int =
indices(pattern.numberedGroup(group)).fold(-1)(_._2 + regionStart)
def group(group: Int): String =
ensureLastMatch(pattern.numberedGroup(group)).orNull
private def ensureLastMatch: js.RegExp.ExecResult = {
if (lastMatch == null)
throw new IllegalStateException("No match available")
lastMatch
}
}
}
| scala-js/scala-js | javalib/src/main/scala/java/util/regex/Matcher.scala | Scala | apache-2.0 | 7,208 |
package edison.cli
import java.io.FileNotFoundException
import edison.cli.actions.{ ResultRecorder, SampleGenerator }
import edison.cli.io.IO
import edison.model.domain._
import edison.util.IntBytes.IntBytes
import edison.util.SmartFreeSpec
import edison.yaml.project.ParseError
import org.scalamock.scalatest.MockFactory
import scaldi.Module
import scala.language.postfixOps
import scala.util.{ Failure, Success }
class MockableSampleGenerator extends SampleGenerator(null)
class MockableResultRecorder extends ResultRecorder(null)
class EdisonCliTest extends SmartFreeSpec with MockFactory {
val ioMock = mock[IO]
val sampleGeneratorMock = mock[MockableSampleGenerator]
val resultRecorderMock = mock[MockableResultRecorder]
def cli = {
implicit val injector = new Module {
bind[IO] to ioMock
bind[EdisonOptionParser] to new OptionParserForTests
bind[SampleGenerator] to sampleGeneratorMock
bind[ResultRecorder] to resultRecorderMock
} :: new CliModule
new EdisonCli
}
def createConfig(action: CliAction): Config =
Config(definitionFilePath = "projectFile", journalFilePath = "journalFile", action = action)
val sampleProject = Project(
"cache-tuning",
SearchDomain(ParamDefs(ParamDef("CacheSize", ParamDomainInteger(Range.inclusive(4 MB, 100 MB, 1 MB)))))
)
val sampleProjectDefinitionYaml =
"""
|project-name: cache-tuning
|search-domain:
| -
| param-name: CacheSize
| domain:
| type: Integer
| start: 4194304
| end: 104857600
| step: 1048576
| default-value: 20971520
""".strip
"EdisonCli" - {
"when parsing command line options" - {
"must handle invalid command line options" in {
cli.createEnvironment(Array.empty).failed.get.getMessage should include("Failed to parse command line options")
}
}
"when parsing project definition file" - {
val config = createConfig(GenerateSampleAction())
"must handle IO errors correctly" in {
(ioMock.readFile _).expects(config.definitionFilePath).returning(Failure(new FileNotFoundException))
cli.readProjectDefinitionFile(config).failed.get shouldBe a[FileNotFoundException]
}
"must handle YAML parser errors correctly" in {
(ioMock.readFile _).expects(*).returning(Success("!invalid-yaml"))
cli.readProjectDefinitionFile(config).failed.get shouldBe a[ParseError]
}
"must handle correct project definition" in {
(ioMock.readFile _).expects(*).returning(Success(sampleProjectDefinitionYaml))
cli.readProjectDefinitionFile(config).get shouldBe sampleProject
}
}
"must forward actions correctly" - {
"when sample generation is requested" in {
val env = Environment(createConfig(GenerateSampleAction()), sampleProject)
(sampleGeneratorMock.generateSample _).expects(env)
cli.executeAction(env)
}
"when storing result is requested" in {
val action = StoreResultAction("{ 'CacheSize': 123 }", 456.0)
val env = Environment(createConfig(action), sampleProject)
(resultRecorderMock.storeResult _).expects(action, env).returning(Success(()))
cli.executeAction(env)
}
}
"must coordinate option parsing, project parsing and action execution" in {
(ioMock.readFile _).expects("projectFilePath").returning(Success(sampleProjectDefinitionYaml))
(resultRecorderMock.storeResult _).expects(*, *).returning(Success(()))
cli.main(Array("store", "-d", "projectFilePath", "-j", "journalFilePath", "-s", "sample", "-r", "0.5"))
}
"must have valid Dependency Injection bindings defined" in {
new EdisonCli()(new CliModule) // will fail in case of missing/invalid DI bindings
}
}
}
| pawel-wiejacha/edison | service/src/test/scala/edison/cli/EdisonCliTest.scala | Scala | mit | 3,841 |
package models.support
case class UserId(v: Long) {
override def toString = "Us"+v
}
case class OrganizationId(v: Long) {
override def toString = "Or"+v
}
case class CourseId(v: Long) {
override def toString = "Co"+v
}
case class GameId(v: Long) {
override def toString = "Ga"+v
}
case class QuizId(v: Long) {
override def toString = "Qz"+v
}
case class QuestionId(v: Long) {
override def toString = "Qn"+v
}
case class AnswerId(v: Long) {
override def toString = "An"+v
}
case class AlertId(v: Long) {
override def toString = "Al"+v
} | kristiankime/web-education-games | app/models/support/Ids.scala | Scala | mit | 559 |
/**
* This file is part of mycollab-services.
*
* mycollab-services is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-services is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-services. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.schedule.email
import com.esofthead.mycollab.common.domain.SimpleRelayEmailNotification
/**
* @author MyCollab Ltd
* @since 5.1.0
*/
trait SendingRelayEmailNotificationAction {
def sendNotificationForCreateAction(notification: SimpleRelayEmailNotification): Unit
def sendNotificationForUpdateAction(notification: SimpleRelayEmailNotification): Unit
def sendNotificationForCommentAction(notification: SimpleRelayEmailNotification): Unit
}
| uniteddiversity/mycollab | mycollab-services/src/main/scala/com/esofthead/mycollab/schedule/email/SendingRelayEmailNotificationAction.scala | Scala | agpl-3.0 | 1,203 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.model.religions.{Amaunator, Religion}
/**
* Created by adarr on 5/2/2017.
*/
trait AmaunatorFeatBase extends ReligionFeatBase { self: Amaunator =>
abstract override def allowedReligions: List[Religion] =
super.allowedReligions ++ List(Religion.Amaunator)
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/AmaunatorFeatBase.scala | Scala | apache-2.0 | 1,004 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.orc
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.FileStatus
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.connector.write.{LogicalWriteInfo, Write, WriteBuilder}
import org.apache.spark.sql.execution.datasources.FileFormat
import org.apache.spark.sql.execution.datasources.orc.OrcUtils
import org.apache.spark.sql.execution.datasources.v2.FileTable
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
case class OrcTable(
name: String,
sparkSession: SparkSession,
options: CaseInsensitiveStringMap,
paths: Seq[String],
userSpecifiedSchema: Option[StructType],
fallbackFileFormat: Class[_ <: FileFormat])
extends FileTable(sparkSession, options, paths, userSpecifiedSchema) {
override def newScanBuilder(options: CaseInsensitiveStringMap): OrcScanBuilder =
new OrcScanBuilder(sparkSession, fileIndex, schema, dataSchema, options)
override def inferSchema(files: Seq[FileStatus]): Option[StructType] =
OrcUtils.inferSchema(sparkSession, files, options.asScala.toMap)
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder =
new WriteBuilder {
override def build(): Write = OrcWrite(paths, formatName, supportsDataType, info)
}
override def supportsDataType(dataType: DataType): Boolean = dataType match {
case _: AtomicType => true
case st: StructType => st.forall { f => supportsDataType(f.dataType) }
case ArrayType(elementType, _) => supportsDataType(elementType)
case MapType(keyType, valueType, _) =>
supportsDataType(keyType) && supportsDataType(valueType)
case udt: UserDefinedType[_] => supportsDataType(udt.sqlType)
case _ => false
}
override def formatName: String = "ORC"
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/orc/OrcTable.scala | Scala | apache-2.0 | 2,643 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn.{CAddTable, ConcatTable, Linear, Sequential}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class CAddTableSpec extends TorchSpec {
"CAddTable with ConcatTable" should "return right output" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val model = new Sequential[Double]()
val ctable = new ConcatTable[Double]()
ctable.add(new Linear(5, 3))
ctable.add(new Linear(5, 3))
model.add(ctable)
model.add(CAddTable())
val input = Tensor[Double](5).apply1(_ => Random.nextDouble())
val gradOutput = Tensor[Double](3).apply1(_ => Random.nextDouble())
val output = model.forward(input)
val gradInput = model.updateGradInput(input, gradOutput)
val code = "torch.manualSeed(" + seed + ")\\n" +
"""model = nn.Sequential()
ctable = nn.ConcatTable():add(nn.Linear(5, 3)):add(nn.Linear(5, 3))
model:add(ctable)
model:add(nn.CAddTable())
output = model:forward(input)
gradInput = model:backward(input, gradOutput)
"""
val (luaTime, torchResult) = TH.run(code,
Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be (luaOutput)
gradInput should be (luaGradInput)
}
"CAddTable inplace with ConcatTable" should "return right output" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val model = new Sequential[Double]()
val ctable = new ConcatTable[Double]()
ctable.add(new Linear(5, 3))
ctable.add(new Linear(5, 3))
model.add(ctable)
model.add(CAddTable(true))
val input = Tensor[Double](5).apply1(_ => Random.nextDouble())
val gradOutput = Tensor[Double](3).apply1(_ => Random.nextDouble())
val output = model.forward(input)
val gradInput = model.updateGradInput(input, gradOutput)
model.accGradParameters(input, gradOutput)
val code = "torch.manualSeed(" + seed + ")\\n" +
"""model = nn.Sequential()
ctable = nn.ConcatTable():add(nn.Linear(5, 3)):add(nn.Linear(5, 3))
model:add(ctable)
model:add(nn.CAddTable(true))
output = model:forward(input)
gradInput = model:backward(input, gradOutput)
"""
val (luaTime, torchResult) = TH.run(code,
Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be (luaOutput)
gradInput should be (luaGradInput)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/CAddTableSpec.scala | Scala | apache-2.0 | 3,513 |
package framian
package csv
import spire.algebra._
import spire.std.string._
import spire.std.double._
import spire.std.int._
import spire.std.iterable._
import shapeless._
import java.io.{File, BufferedReader, FileReader}
class CsvSpec extends FramianSpec {
val csvRoot = "framian/src/test/resources/csvs/"
val airPassengers = csvRoot +"AirPassengers-test.csv"
val airPassengersBadComma = csvRoot +"AirPassengers-badcomma.csv"
val autoMPG = csvRoot +"auto-mpg-test.tsv"
val defaultRowIndex = Index.fromKeys(0, 1, 2, 3, 4)
val withColumnRowIndex = Index.fromKeys(0, 1, 2, 3)
val defaultAPColumnIndex = Index.fromKeys(0, 1, 2)
val defaultAirPassengers = ColOrientedFrame(
Index.fromKeys(0, 1, 2, 3, 4),
Series(
0 -> TypedColumn(Column[Int](
NA,
Value(1),
Value(2),
Value(3),
Value(4))
).orElse(TypedColumn(Column[String](
Value("")
))),
1 -> TypedColumn(Column[BigDecimal](
NA,
Value(BigDecimal("1949")),
Value(BigDecimal("1949.08333333333")),
Value(BigDecimal("1949.16666666667")),
Value(BigDecimal("1949.25")))
).orElse(TypedColumn(Column[String](
Value("time")
))),
2 -> TypedColumn(Column[BigDecimal](
NA,
Value(BigDecimal("112")),
Value(BigDecimal("118")),
Value(BigDecimal("132")),
Value(BigDecimal("129")))
).orElse(TypedColumn(Column[String](
Value("AirPassengers")
)))))
val columnAirPassengers = Frame.fromRows(
1 :: BigDecimal(1949) :: 112 :: HNil,
2 :: BigDecimal(1949.08333333333) :: 118 :: HNil,
3 :: BigDecimal(1949.16666666667) :: 132 :: HNil,
4 :: BigDecimal(1949.25) :: 129 :: HNil)
.withColIndex(Index.fromKeys("", "time", "AirPassengers"))
.withRowIndex(withColumnRowIndex)
val defaultMPG = Frame.fromRows(
18.0 :: 8 :: 307.0 :: 130.0 :: 3504 :: 12.0 :: 70 :: 1 :: "chevrolet chevelle malibu" :: HNil,
15.0 :: 8 :: 350.0 :: 165.0 :: 3693 :: 11.5 :: 70 :: 1 :: "buick skylark 320" :: HNil,
18.0 :: 8 :: 318.0 :: 150.0 :: 3436 :: 11.0 :: 70 :: 1 :: "plymouth satellite" :: HNil,
16.0 :: 8 :: 304.0 :: 150.0 :: 3433 :: 12.0 :: 70 :: 1 :: "amc rebel sst" :: HNil,
17.0 :: 8 :: 302.0 :: 140.0 :: 3449 :: 10.5 :: 70 :: 1 :: "ford torino" :: HNil)
.withRowIndex(defaultRowIndex)
.withColIndex(Index.fromKeys(0, 1, 2, 3, 4, 5, 6, 7, 8))
"CsvParser" should {
import CsvCell._
val TestFormat = CsvFormat(
separator = ",",
quote = "'",
quoteEscape = "'",
empty = "N/A",
invalid = "N/M",
header = false,
rowDelim = CsvRowDelim.Custom("|"),
allowRowDelimInQuotes = true
)
"parse air passengers as unlabeled CSV" in {
Csv.parsePath(airPassengers).unlabeled.toFrame should === (defaultAirPassengers)
}
"parse air passengers as labeled CSV" in {
Csv.parsePath(airPassengers).labeled.toFrame should === (columnAirPassengers)
}
"parse autoMPG as unlabeled TSV" in {
Csv.parsePath(autoMPG).unlabeled.toFrame should === (defaultMPG)
}
"parse CSV with separator in quote" in {
val data = """a,"b","c,d"|"e,f,g""""
val csv = Csv.parseString(data, CsvFormat.Guess.withRowDelim("|"))
val frame = csv.unlabeled.toFrame
frame.getRow(0) should === (Some(Rec(0 -> "a", 1 -> "b", 2 -> "c,d")))
frame[String](1, 0) should === (Value("e,f,g"))
frame[String](1, 1) should === (NA)
frame[String](1, 2) should === (NA)
}
"parse escaped quotes" in {
Csv.parseString(
"a,'''','c'''|'''''d''''', ''''",
TestFormat
).rows should === (Vector(
Right(CsvRow(Vector(Data("a"), Data("'"), Data("c'")))),
Right(CsvRow(Vector(Data("''d''"), Data(" ''''")))))
)
}
"respect CsvFormat separator" in {
Csv.parseString("a,b,c|d,e,f", TestFormat).rows should === (
Csv.parseString("a;b;c|d;e;f", TestFormat.withSeparator(";")).rows)
}
"respect CsvFormat quote" in {
Csv.parseString("'a,b','b'|d,e", TestFormat).rows should === (
Csv.parseString("^a,b^,^b^|d,e", TestFormat.withQuote("^")).rows)
}
"respect CsvFormat quote escape" in {
Csv.parseString("'a''b',''''|' '", TestFormat).rows should === (
Csv.parseString("'a\\\\'b','\\\\''|' '", TestFormat.withQuoteEscape("\\\\")).rows)
}
"respect CsvFormat empty" in {
Csv.parseString("a,N/A,b|N/A,N/A", TestFormat).rows should === (
Csv.parseString("a,,b|,", TestFormat.withEmpty("")).rows)
}
"respect CsvFormat invalid" in {
Csv.parseString("a,N/M,b|N/M,N/M", TestFormat).rows should === (
Csv.parseString("a,nm,b|nm,nm", TestFormat.withInvalid("nm")).rows)
}
"respect CsvFormat row delimiter" in {
Csv.parseString("a,b|c,d|e,f", TestFormat).rows should === (
Csv.parseString("a,b\\nc,d\\ne,f", TestFormat.withRowDelim(CsvRowDelim.Unix)).rows)
}
"parse CSV with row delimiter in quote" in {
Csv.parseString("a,'b|c'|'d|e',f", TestFormat).rows should === (Vector(
Right(CsvRow(Vector(Data("a"), Data("b|c")))),
Right(CsvRow(Vector(Data("d|e"), Data("f"))))))
}
"parser respects whitespace" in {
val data = " a , , 'a','b'| b ,c , "
val csv = Csv.parseString(data, CsvFormat.Guess.withRowDelim("|"))
csv.rows should === (Vector(
Right(CsvRow(Vector(Data(" a "), Data(" "), Data(" 'a'"), Data("b")))),
Right(CsvRow(Vector(Data(" b "), Data("c "), Data(" "))))))
}
}
}
| tixxit/framian | framian/src/test/scala/framian/CsvSpec.scala | Scala | apache-2.0 | 5,673 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.io._
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{Files, Paths}
import java.util.{Locale, Optional}
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
import scala.io.Source
import scala.util.Random
import org.apache.commons.io.FileUtils
import org.apache.kafka.clients.producer.{ProducerRecord, RecordMetadata}
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.SpanSugar._
import org.apache.spark.sql.{Dataset, ForeachWriter, Row, SparkSession}
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.connector.read.streaming.SparkDataStream
import org.apache.spark.sql.execution.datasources.v2.StreamingDataSourceV2Relation
import org.apache.spark.sql.execution.exchange.ReusedExchangeExec
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.continuous.ContinuousExecution
import org.apache.spark.sql.functions.{count, window}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.kafka010.KafkaSourceProvider._
import org.apache.spark.sql.streaming.{StreamingQuery, StreamTest, Trigger}
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
abstract class KafkaSourceTest extends StreamTest with SharedSparkSession with KafkaTest {
protected var testUtils: KafkaTestUtils = _
override val streamingTimeout = 30.seconds
protected val brokerProps = Map[String, Object]()
override def beforeAll(): Unit = {
super.beforeAll()
testUtils = new KafkaTestUtils(brokerProps)
testUtils.setup()
}
override def afterAll(): Unit = {
if (testUtils != null) {
testUtils.teardown()
testUtils = null
}
super.afterAll()
}
protected def makeSureGetOffsetCalled = AssertOnQuery { q =>
// Because KafkaSource's initialPartitionOffsets is set lazily, we need to make sure
// its "getOffset" is called before pushing any data. Otherwise, because of the race condition,
// we don't know which data should be fetched when `startingOffsets` is latest.
q match {
case c: ContinuousExecution => c.awaitEpoch(0)
case m: MicroBatchExecution => m.processAllAvailable()
}
true
}
protected def setTopicPartitions(topic: String, newCount: Int, query: StreamExecution) : Unit = {
testUtils.addPartitions(topic, newCount)
}
/**
* Add data to Kafka.
*
* `topicAction` can be used to run actions for each topic before inserting data.
*/
case class AddKafkaData(topics: Set[String], data: Int*)
(implicit ensureDataInMultiplePartition: Boolean = false,
concurrent: Boolean = false,
message: String = "",
topicAction: (String, Option[Int]) => Unit = (_, _) => {}) extends AddData {
override def addData(query: Option[StreamExecution]): (SparkDataStream, Offset) = {
query match {
// Make sure no Spark job is running when deleting a topic
case Some(m: MicroBatchExecution) => m.processAllAvailable()
case _ =>
}
val existingTopics = testUtils.getAllTopicsAndPartitionSize().toMap
val newTopics = topics.diff(existingTopics.keySet)
for (newTopic <- newTopics) {
topicAction(newTopic, None)
}
for (existingTopicPartitions <- existingTopics) {
topicAction(existingTopicPartitions._1, Some(existingTopicPartitions._2))
}
require(
query.nonEmpty,
"Cannot add data when there is no query for finding the active kafka source")
val sources: Seq[SparkDataStream] = {
query.get.logicalPlan.collect {
case StreamingExecutionRelation(source: KafkaSource, _) => source
case r: StreamingDataSourceV2Relation if r.stream.isInstanceOf[KafkaMicroBatchStream] ||
r.stream.isInstanceOf[KafkaContinuousStream] =>
r.stream
}
}.distinct
if (sources.isEmpty) {
throw new Exception(
"Could not find Kafka source in the StreamExecution logical plan to add data to")
} else if (sources.size > 1) {
throw new Exception(
"Could not select the Kafka source in the StreamExecution logical plan as there" +
"are multiple Kafka sources:\\n\\t" + sources.mkString("\\n\\t"))
}
val kafkaSource = sources.head
val topic = topics.toSeq(Random.nextInt(topics.size))
val sentMetadata = testUtils.sendMessages(topic, data.map { _.toString }.toArray)
def metadataToStr(m: (String, RecordMetadata)): String = {
s"Sent ${m._1} to partition ${m._2.partition()}, offset ${m._2.offset()}"
}
// Verify that the test data gets inserted into multiple partitions
if (ensureDataInMultiplePartition) {
require(
sentMetadata.groupBy(_._2.partition).size > 1,
s"Added data does not test multiple partitions: ${sentMetadata.map(metadataToStr)}")
}
val offset = KafkaSourceOffset(testUtils.getLatestOffsets(topics))
logInfo(s"Added data, expected offset $offset")
(kafkaSource, offset)
}
override def toString: String =
s"AddKafkaData(topics = $topics, data = $data, message = $message)"
}
object WithOffsetSync {
/**
* Run `func` to write some Kafka messages and wait until the latest offset of the given
* `TopicPartition` is not less than `expectedOffset`.
*/
def apply(
topicPartition: TopicPartition,
expectedOffset: Long)(func: () => Unit): StreamAction = {
Execute("Run Kafka Producer")(_ => {
func()
// This is a hack for the race condition that the committed message may be not visible to
// consumer for a short time.
testUtils.waitUntilOffsetAppears(topicPartition, expectedOffset)
})
}
}
private val topicId = new AtomicInteger(0)
protected def newTopic(): String = s"topic-${topicId.getAndIncrement()}"
}
abstract class KafkaMicroBatchSourceSuiteBase extends KafkaSourceSuiteBase {
import testImplicits._
private def waitUntilBatchProcessed(clock: StreamManualClock) = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (!q.exception.isDefined) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
test("Trigger.AvailableNow") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, (0 until 15).map { case x =>
s"foo-$x"
}.toArray, Some(0))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("maxOffsetsPerTrigger", 5)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
.load()
var index: Int = 0
def startTriggerAvailableNowQuery(): StreamingQuery = {
reader.writeStream
.foreachBatch((_: Dataset[Row], _: Long) => {
index += 1
})
.trigger(Trigger.AvailableNow)
.start()
}
val query = startTriggerAvailableNowQuery()
try {
assert(query.awaitTermination(streamingTimeout.toMillis))
} finally {
query.stop()
}
// should have 3 batches now i.e. 15 / 5 = 3
assert(index == 3)
}
test("(de)serialization of initial offsets") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
testStream(reader.load)(
makeSureGetOffsetCalled,
StopStream,
StartStream(),
StopStream)
}
test("SPARK-26718 Rate limit set to Long.Max should not overflow integer " +
"during end offset calculation") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
// fill in 5 messages to trigger potential integer overflow
testUtils.sendMessages(topic, (0 until 5).map(_.toString).toArray, Some(0))
val partitionOffsets = Map(
new TopicPartition(topic, 0) -> 5L
)
val startingOffsets = JsonUtils.partitionOffsets(partitionOffsets)
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
// use latest to force begin to be 5
.option("startingOffsets", startingOffsets)
// use Long.Max to try to trigger overflow
.option("maxOffsetsPerTrigger", Long.MaxValue)
.option("subscribe", topic)
.option("kafka.metadata.max.age.ms", "1")
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 30, 31, 32, 33, 34),
CheckAnswer(30, 31, 32, 33, 34),
StopStream
)
}
test("maxOffsetsPerTrigger") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (100 to 200).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (10 to 20).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, Array("1"), Some(2))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("maxOffsetsPerTrigger", 10)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
testStream(mapped)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed(clock),
// 1 from smallest, 1 from middle, 8 from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107),
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116
),
StopStream,
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed(clock),
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 121, 122, 123, 124, 125
),
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 121, 122, 123, 124, 125,
13, 126, 127, 128, 129, 130, 131, 132, 133, 134
)
)
// When Trigger.Once() is used, the read limit should be ignored
val allData = Seq(1) ++ (10 to 20) ++ (100 to 200)
withTempDir { dir =>
testStream(mapped)(
StartStream(Trigger.Once(), checkpointLocation = dir.getCanonicalPath),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer(allData: _*),
StopStream,
AddKafkaData(Set(topic), 1000 to 1010: _*),
StartStream(Trigger.Once(), checkpointLocation = dir.getCanonicalPath),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer((allData ++ 1000.to(1010)): _*)
)
}
}
test("minOffsetsPerTrigger") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (100 to 109).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (10 to 14).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, Array("1"), Some(2))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("minOffsetsPerTrigger", 15)
.option("maxTriggerDelay", "5s")
.option("subscribe", topic)
.option("startingOffsets", "earliest")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
testStream(mapped)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed(clock),
// First Batch is always processed
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 12, 13, 14),
// Adding more data but less than minOffsetsPerTrigger
Assert {
testUtils.sendMessages(topic, (15 to 20).map(_.toString).toArray, Some(1))
true
},
// No data is processed for next batch as data is less than minOffsetsPerTrigger
// and maxTriggerDelay is not expired
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
CheckNewAnswer(),
Assert {
testUtils.sendMessages(topic, (110 to 120).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, Array("2"), Some(2))
true
},
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
// Running batch now as number of records is greater than minOffsetsPerTrigger
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 13, 14, 15, 16, 17, 18, 19, 2, 20),
// Testing maxTriggerDelay
// Adding more data but less than minOffsetsPerTrigger
Assert {
testUtils.sendMessages(topic, (121 to 125).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (21 to 25).map(_.toString).toArray, Some(1))
true
},
// No data is processed for next batch till maxTriggerDelay is expired
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
CheckNewAnswer(),
// Sleeping for 5s to let maxTriggerDelay expire
Assert {
Thread.sleep(5 * 1000)
true
},
AdvanceManualClock(100),
// Running batch as maxTriggerDelay is expired
waitUntilBatchProcessed(clock),
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 121, 122, 123, 124, 125,
13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 22, 23, 24, 25)
)
// When Trigger.Once() is used, the read limit should be ignored
val allData = Seq(1, 2) ++ (10 to 25) ++ (100 to 125)
withTempDir { dir =>
testStream(mapped)(
StartStream(Trigger.Once(), checkpointLocation = dir.getCanonicalPath),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer(allData: _*),
StopStream,
AddKafkaData(Set(topic), 1000 to 1010: _*),
StartStream(Trigger.Once(), checkpointLocation = dir.getCanonicalPath),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer((allData ++ 1000.to(1010)): _*)
)
}
}
test("compositeReadLimit") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (100 to 120).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (10 to 20).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, Array("1"), Some(2))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("minOffsetsPerTrigger", 15)
.option("maxTriggerDelay", "5s")
.option("maxOffsetsPerTrigger", 20)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
testStream(mapped)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed(clock),
// First Batch is always processed but it will process only 20
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111,
12, 13, 14, 15),
// Pending data is less than minOffsetsPerTrigger
// No data is processed for next batch as data is less than minOffsetsPerTrigger
// and maxTriggerDelay is not expired
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
CheckNewAnswer(),
Assert {
testUtils.sendMessages(topic, (121 to 128).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (21 to 30).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, Array("2"), Some(2))
true
},
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
// Running batch now as number of new records is greater than minOffsetsPerTrigger
// but reading limited data as per maxOffsetsPerTrigger
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 121,
13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 22, 23, 24),
// Testing maxTriggerDelay
// No data is processed for next batch till maxTriggerDelay is expired
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
CheckNewAnswer(),
// Sleeping for 5s to let maxTriggerDelay expire
Assert {
Thread.sleep(5 * 1000)
true
},
AdvanceManualClock(100),
// Running batch as maxTriggerDelay is expired
waitUntilBatchProcessed(clock),
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
12, 120, 121, 122, 123, 124, 125, 126, 127, 128,
13, 14, 15, 16, 17, 18, 19, 2, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)
)
// When Trigger.Once() is used, the read limit should be ignored
val allData = Seq(1, 2) ++ (10 to 30) ++ (100 to 128)
withTempDir { dir =>
testStream(mapped)(
StartStream(Trigger.Once(), checkpointLocation = dir.getCanonicalPath),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer(allData: _*),
StopStream,
AddKafkaData(Set(topic), 1000 to 1010: _*),
StartStream(Trigger.Once(), checkpointLocation = dir.getCanonicalPath),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer((allData ++ 1000.to(1010)): _*)
)
}
}
test("input row metrics") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val kafka = spark
.readStream
.format("kafka")
.option("subscribe", topic)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
StartStream(trigger = Trigger.ProcessingTime(1)),
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
AssertOnQuery { query =>
val recordsRead = query.recentProgress.map(_.numInputRows).sum
recordsRead == 3
}
)
}
test("subscribing topic by pattern with topic deletions") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-seems"
val topic2 = topicPrefix + "-bad"
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.request.timeout.ms", "3000")
.option("kafka.default.api.timeout.ms", "3000")
.option("subscribePattern", s"$topicPrefix-.*")
.option("failOnDataLoss", "false")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
Assert {
testUtils.deleteTopic(topic)
testUtils.createTopic(topic2, partitions = 5)
true
},
AddKafkaData(Set(topic2), 4, 5, 6),
CheckAnswer(2, 3, 4, 5, 6, 7)
)
}
test("subscribe topic by pattern with topic recreation between batches") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-good"
val topic2 = topicPrefix + "-bad"
testUtils.createTopic(topic, partitions = 1)
testUtils.sendMessages(topic, Array("1", "3"))
testUtils.createTopic(topic2, partitions = 1)
testUtils.sendMessages(topic2, Array("2", "4"))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.request.timeout.ms", "3000")
.option("kafka.default.api.timeout.ms", "3000")
.option("startingOffsets", "earliest")
.option("subscribePattern", s"$topicPrefix-.*")
val ds = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
.map(kv => kv._2.toInt)
testStream(ds)(
StartStream(),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer(1, 2, 3, 4),
// Restart the stream in this test to make the test stable. When recreating a topic when a
// consumer is alive, it may not be able to see the recreated topic even if a fresh consumer
// has seen it.
StopStream,
// Recreate `topic2` and wait until it's available
WithOffsetSync(new TopicPartition(topic2, 0), expectedOffset = 1) { () =>
testUtils.deleteTopic(topic2)
testUtils.createTopic(topic2)
testUtils.sendMessages(topic2, Array("6"))
},
StartStream(),
ExpectFailure[IllegalStateException](e => {
// The offset of `topic2` should be changed from 2 to 1
assert(e.getMessage.contains("was changed from 2 to 1"))
})
)
}
test("ensure that initial offset are written with an extra byte in the beginning (SPARK-19517)") {
withTempDir { metadataPath =>
val topic = "kafka-initial-offset-current"
testUtils.createTopic(topic, partitions = 1)
val initialOffsetFile = Paths.get(s"${metadataPath.getAbsolutePath}/sources/0/0").toFile
val df = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.option("startingOffsets", s"earliest")
.load()
// Test the written initial offset file has 0 byte in the beginning, so that
// Spark 2.1.0 can read the offsets (see SPARK-19517)
testStream(df)(
StartStream(checkpointLocation = metadataPath.getAbsolutePath),
makeSureGetOffsetCalled)
val binarySource = Source.fromFile(initialOffsetFile)
try {
assert(binarySource.next().toInt == 0) // first byte is binary 0
} finally {
binarySource.close()
}
}
}
test("deserialization of initial offset written by Spark 2.1.0 (SPARK-19517)") {
withTempDir { metadataPath =>
val topic = "kafka-initial-offset-2-1-0"
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, Array("0", "1", "2"), Some(0))
testUtils.sendMessages(topic, Array("0", "10", "20"), Some(1))
testUtils.sendMessages(topic, Array("0", "100", "200"), Some(2))
// Copy the initial offset file into the right location inside the checkpoint root directory
// such that the Kafka source can read it for initial offsets.
val from = new File(
getClass.getResource("/kafka-source-initial-offset-version-2.1.0.bin").toURI).toPath
val to = Paths.get(s"${metadataPath.getAbsolutePath}/sources/0/0")
Files.createDirectories(to.getParent)
Files.copy(from, to)
val df = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.option("startingOffsets", s"earliest")
.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(_.toInt)
// Test that the query starts from the expected initial offset (i.e. read older offsets,
// even though startingOffsets is latest).
testStream(df)(
StartStream(checkpointLocation = metadataPath.getAbsolutePath),
AddKafkaData(Set(topic), 1000),
CheckAnswer(0, 1, 2, 10, 20, 200, 1000))
}
}
test("deserialization of initial offset written by future version") {
withTempDir { metadataPath =>
val topic = "kafka-initial-offset-future-version"
testUtils.createTopic(topic, partitions = 3)
// Copy the initial offset file into the right location inside the checkpoint root directory
// such that the Kafka source can read it for initial offsets.
val from = new File(
getClass.getResource("/kafka-source-initial-offset-future-version.bin").toURI).toPath
val to = Paths.get(s"${metadataPath.getAbsolutePath}/sources/0/0")
Files.createDirectories(to.getParent)
Files.copy(from, to)
val df = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(_.toInt)
testStream(df)(
StartStream(checkpointLocation = metadataPath.getAbsolutePath),
ExpectFailure[IllegalStateException](e => {
Seq(
s"maximum supported log version is v1, but encountered v99999",
"produced by a newer version of Spark and cannot be read by this version"
).foreach { message =>
assert(e.toString.contains(message))
}
}))
}
}
test("KafkaSource with watermark") {
val now = System.currentTimeMillis()
val topic = newTopic()
testUtils.createTopic(newTopic(), partitions = 1)
testUtils.sendMessages(topic, Array(1).map(_.toString))
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("startingOffsets", s"earliest")
.option("subscribe", topic)
.load()
val windowedAggregation = kafka
.withWatermark("timestamp", "10 seconds")
.groupBy(window($"timestamp", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start") as 'window, $"count")
val query = windowedAggregation
.writeStream
.format("memory")
.outputMode("complete")
.queryName("kafkaWatermark")
.start()
query.processAllAvailable()
val rows = spark.table("kafkaWatermark").collect()
assert(rows.length === 1, s"Unexpected results: ${rows.toList}")
val row = rows(0)
// We cannot check the exact window start time as it depends on the time that messages were
// inserted by the producer. So here we just use a low bound to make sure the internal
// conversion works.
assert(
row.getAs[java.sql.Timestamp]("window").getTime >= now - 5 * 1000,
s"Unexpected results: $row")
assert(row.getAs[Int]("count") === 1, s"Unexpected results: $row")
query.stop()
}
test("delete a topic when a Spark job is running") {
KafkaSourceSuite.collectedData.clear()
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
testUtils.sendMessages(topic, (1 to 10).map(_.toString).toArray)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.request.timeout.ms", "3000")
.option("kafka.default.api.timeout.ms", "3000")
.option("subscribe", topic)
// If a topic is deleted and we try to poll data starting from offset 0,
// the Kafka consumer will just block until timeout and return an empty result.
// So set the timeout to 1 second to make this test fast.
.option("kafkaConsumer.pollTimeoutMs", "1000")
.option("startingOffsets", "earliest")
.option("failOnDataLoss", "false")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
KafkaSourceSuite.globalTestUtils = testUtils
// The following ForeachWriter will delete the topic before fetching data from Kafka
// in executors.
val query = kafka.map(kv => kv._2.toInt).writeStream.foreach(new ForeachWriter[Int] {
override def open(partitionId: Long, version: Long): Boolean = {
// Re-create topic since Kafka auto topic creation is not supported by Spark
KafkaSourceSuite.globalTestUtils.deleteTopic(topic)
KafkaSourceSuite.globalTestUtils.createTopic(topic)
true
}
override def process(value: Int): Unit = {
KafkaSourceSuite.collectedData.add(value)
}
override def close(errorOrNull: Throwable): Unit = {}
}).start()
query.processAllAvailable()
query.stop()
// `failOnDataLoss` is `false`, we should not fail the query
assert(query.exception.isEmpty)
}
test("SPARK-22956: currentPartitionOffsets should be set when no new data comes in") {
def getSpecificDF(range: Range.Inclusive): org.apache.spark.sql.Dataset[Int] = {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
testUtils.sendMessages(topic, range.map(_.toString).toArray, Some(0))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("maxOffsetsPerTrigger", 5)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
reader.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(k => k.toInt)
}
val df1 = getSpecificDF(0 to 9)
val df2 = getSpecificDF(100 to 199)
val kafka = df1.union(df2)
val clock = new StreamManualClock
testStream(kafka)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed(clock),
// 5 from smaller topic, 5 from bigger one
CheckLastBatch((0 to 4) ++ (100 to 104): _*),
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
// 5 from smaller topic, 5 from bigger one
CheckLastBatch((5 to 9) ++ (105 to 109): _*),
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
// smaller topic empty, 5 from bigger one
CheckLastBatch(110 to 114: _*),
StopStream,
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed(clock),
// smallest now empty, 5 from bigger one
CheckLastBatch(115 to 119: _*),
AdvanceManualClock(100),
waitUntilBatchProcessed(clock),
// smallest now empty, 5 from bigger one
CheckLastBatch(120 to 124: _*)
)
}
test("allow group.id prefix") {
// Group ID prefix is only supported by consumer based offset reader
if (spark.conf.get(SQLConf.USE_DEPRECATED_KAFKA_OFFSET_FETCHING)) {
testGroupId("groupIdPrefix", (expected, actual) => {
assert(actual.exists(_.startsWith(expected)) && !actual.exists(_ === expected),
"Valid consumer groups don't contain the expected group id - " +
s"Valid consumer groups: $actual / expected group id: $expected")
})
}
}
test("allow group.id override") {
// Group ID override is only supported by consumer based offset reader
if (spark.conf.get(SQLConf.USE_DEPRECATED_KAFKA_OFFSET_FETCHING)) {
testGroupId("kafka.group.id", (expected, actual) => {
assert(actual.exists(_ === expected), "Valid consumer groups don't " +
s"contain the expected group id - Valid consumer groups: $actual / " +
s"expected group id: $expected")
})
}
}
private def testGroupId(groupIdKey: String,
validateGroupId: (String, Iterable[String]) => Unit): Unit = {
// Tests code path KafkaSourceProvider.{sourceSchema(.), createSource(.)}
// as well as KafkaOffsetReader.createConsumer(.)
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (1 to 10).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (11 to 20).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, (21 to 30).map(_.toString).toArray, Some(2))
val customGroupId = "id-" + Random.nextInt()
val dsKafka = spark
.readStream
.format("kafka")
.option(groupIdKey, customGroupId)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(_.toInt)
testStream(dsKafka)(
makeSureGetOffsetCalled,
CheckAnswer(1 to 30: _*),
Execute { _ =>
val consumerGroups = testUtils.listConsumerGroups()
val validGroups = consumerGroups.valid().get()
val validGroupsId = validGroups.asScala.map(_.groupId())
validateGroupId(customGroupId, validGroupsId)
}
)
}
test("ensure stream-stream self-join generates only one offset in log and correct metrics") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 2)
require(testUtils.getLatestOffsets(Set(topic)).size === 2)
val kafka = spark
.readStream
.format("kafka")
.option("subscribe", topic)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.load()
val values = kafka
.selectExpr("CAST(CAST(value AS STRING) AS INT) AS value",
"CAST(CAST(value AS STRING) AS INT) % 5 AS key")
val join = values.join(values, "key")
def checkQuery(check: AssertOnQuery): Unit = {
testStream(join)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2),
CheckAnswer((1, 1, 1), (2, 2, 2)),
AddKafkaData(Set(topic), 6, 3),
CheckAnswer((1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 6, 1), (1, 1, 6), (1, 6, 6)),
check
)
}
withSQLConf(SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
checkQuery(AssertOnQuery { q =>
assert(q.availableOffsets.iterator.size == 1)
// The kafka source is scanned twice because of self-join
assert(q.recentProgress.map(_.numInputRows).sum == 8)
true
})
}
withSQLConf(SQLConf.EXCHANGE_REUSE_ENABLED.key -> "true") {
checkQuery(AssertOnQuery { q =>
assert(q.availableOffsets.iterator.size == 1)
assert(q.lastExecution.executedPlan.collect {
case r: ReusedExchangeExec => r
}.length == 1)
// The kafka source is scanned only once because of exchange reuse.
assert(q.recentProgress.map(_.numInputRows).sum == 4)
true
})
}
}
test("read Kafka transactional messages: read_committed") {
// This test will cover the following cases:
// 1. the whole batch contains no data messages
// 2. the first offset in a batch is not a committed data message
// 3. the last offset in a batch is not a committed data message
// 4. there is a gap in the middle of a batch
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.isolation.level", "read_committed")
.option("maxOffsetsPerTrigger", 3)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
// Set a short timeout to make the test fast. When a batch doesn't contain any visible data
// messages, "poll" will wait until timeout.
.option("kafkaConsumer.pollTimeoutMs", 5000)
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
// Wait until the manual clock is waiting on further instructions to move forward. Then we can
// ensure all batches we are waiting for have been processed.
val waitUntilBatchProcessed = Execute { q =>
eventually(Timeout(streamingTimeout)) {
if (!q.exception.isDefined) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
}
val topicPartition = new TopicPartition(topic, 0)
// The message values are the same as their offsets to make the test easy to follow
testUtils.withTransactionalProducer { producer =>
testStream(mapped)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed,
CheckAnswer(),
WithOffsetSync(topicPartition, expectedOffset = 5) { () =>
// Send 5 messages. They should be visible only after being committed.
producer.beginTransaction()
(0 to 4).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
// Should not see any uncommitted messages
CheckNewAnswer(),
WithOffsetSync(topicPartition, expectedOffset = 6) { () =>
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(0, 1, 2), // offset 0, 1, 2
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(3, 4), // offset: 3, 4, 5* [* means it's not a committed data message]
WithOffsetSync(topicPartition, expectedOffset = 12) { () =>
// Send 5 messages and abort the transaction. They should not be read.
producer.beginTransaction()
(6 to 10).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.abortTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(), // offset: 6*, 7*, 8*
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(), // offset: 9*, 10*, 11*
WithOffsetSync(topicPartition, expectedOffset = 18) { () =>
// Send 5 messages again. The consumer should skip the above aborted messages and read
// them.
producer.beginTransaction()
(12 to 16).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(12, 13, 14), // offset: 12, 13, 14
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(15, 16), // offset: 15, 16, 17*
WithOffsetSync(topicPartition, expectedOffset = 25) { () =>
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "18")).get()
producer.commitTransaction()
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "20")).get()
producer.commitTransaction()
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "22")).get()
producer.send(new ProducerRecord[String, String](topic, "23")).get()
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(18, 20), // offset: 18, 19*, 20
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(22, 23), // offset: 21*, 22, 23
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer() // offset: 24*
)
}
}
test("read Kafka transactional messages: read_uncommitted") {
// This test will cover the following cases:
// 1. the whole batch contains no data messages
// 2. the first offset in a batch is not a committed data message
// 3. the last offset in a batch is not a committed data message
// 4. there is a gap in the middle of a batch
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.isolation.level", "read_uncommitted")
.option("maxOffsetsPerTrigger", 3)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
// Set a short timeout to make the test fast. When a batch doesn't contain any visible data
// messages, "poll" will wait until timeout.
.option("kafkaConsumer.pollTimeoutMs", 5000)
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
// Wait until the manual clock is waiting on further instructions to move forward. Then we can
// ensure all batches we are waiting for have been processed.
val waitUntilBatchProcessed = Execute { q =>
eventually(Timeout(streamingTimeout)) {
if (!q.exception.isDefined) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
}
val topicPartition = new TopicPartition(topic, 0)
// The message values are the same as their offsets to make the test easy to follow
testUtils.withTransactionalProducer { producer =>
testStream(mapped)(
StartStream(Trigger.ProcessingTime(100), clock),
waitUntilBatchProcessed,
CheckNewAnswer(),
WithOffsetSync(topicPartition, expectedOffset = 5) { () =>
// Send 5 messages. They should be visible only after being committed.
producer.beginTransaction()
(0 to 4).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(0, 1, 2), // offset 0, 1, 2
WithOffsetSync(topicPartition, expectedOffset = 6) { () =>
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(3, 4), // offset: 3, 4, 5* [* means it's not a committed data message]
WithOffsetSync(topicPartition, expectedOffset = 12) { () =>
// Send 5 messages and abort the transaction. They should not be read.
producer.beginTransaction()
(6 to 10).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.abortTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(6, 7, 8), // offset: 6, 7, 8
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(9, 10), // offset: 9, 10, 11*
WithOffsetSync(topicPartition, expectedOffset = 18) { () =>
// Send 5 messages again. The consumer should skip the above aborted messages and read
// them.
producer.beginTransaction()
(12 to 16).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(12, 13, 14), // offset: 12, 13, 14
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(15, 16), // offset: 15, 16, 17*
WithOffsetSync(topicPartition, expectedOffset = 25) { () =>
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "18")).get()
producer.commitTransaction()
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "20")).get()
producer.commitTransaction()
producer.beginTransaction()
producer.send(new ProducerRecord[String, String](topic, "22")).get()
producer.send(new ProducerRecord[String, String](topic, "23")).get()
producer.commitTransaction()
},
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(18, 20), // offset: 18, 19*, 20
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer(22, 23), // offset: 21*, 22, 23
AdvanceManualClock(100),
waitUntilBatchProcessed,
CheckNewAnswer() // offset: 24*
)
}
}
test("SPARK-25495: FetchedData.reset should reset all fields") {
val topic = newTopic()
val topicPartition = new TopicPartition(topic, 0)
testUtils.createTopic(topic, partitions = 1)
val ds = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("kafka.isolation.level", "read_committed")
.option("subscribe", topic)
.option("startingOffsets", "earliest")
.load()
.select($"value".as[String])
testUtils.withTransactionalProducer { producer =>
producer.beginTransaction()
(0 to 3).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.commitTransaction()
}
testUtils.waitUntilOffsetAppears(topicPartition, 5)
val q = ds.writeStream.foreachBatch { (ds: Dataset[String], epochId: Long) =>
if (epochId == 0) {
// Send more message before the tasks of the current batch start reading the current batch
// data, so that the executors will prefetch messages in the next batch and drop them. In
// this case, if we forget to reset `FetchedData._nextOffsetInFetchedData` or
// `FetchedData._offsetAfterPoll` (See SPARK-25495), the next batch will see incorrect
// values and return wrong results hence fail the test.
testUtils.withTransactionalProducer { producer =>
producer.beginTransaction()
(4 to 7).foreach { i =>
producer.send(new ProducerRecord[String, String](topic, i.toString)).get()
}
producer.commitTransaction()
}
testUtils.waitUntilOffsetAppears(topicPartition, 10)
checkDatasetUnorderly(ds, (0 to 3).map(_.toString): _*)
} else {
checkDatasetUnorderly(ds, (4 to 7).map(_.toString): _*)
}
}.start()
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
test("SPARK-27494: read kafka record containing null key/values.") {
testNullableKeyValue(Trigger.ProcessingTime(100))
}
test("SPARK-30656: minPartitions") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (0 to 9).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (10 to 19).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, Array("20"), Some(2))
val ds = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribe", topic)
.option("startingOffsets", "earliest")
.option("minPartitions", "6")
.load()
.select($"value".as[String])
val q = ds.writeStream.foreachBatch { (batch: Dataset[String], _: Long) =>
val partitions = batch.rdd.collectPartitions()
assert(partitions.length >= 6)
assert(partitions.flatten.toSet === (0 to 20).map(_.toString).toSet): Unit
}.start()
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
}
class KafkaMicroBatchV1SourceWithAdminSuite extends KafkaMicroBatchV1SourceSuite {
override def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(SQLConf.USE_DEPRECATED_KAFKA_OFFSET_FETCHING.key, "false")
}
}
class KafkaMicroBatchV2SourceWithAdminSuite extends KafkaMicroBatchV2SourceSuite {
override def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(SQLConf.USE_DEPRECATED_KAFKA_OFFSET_FETCHING.key, "false")
}
}
class KafkaMicroBatchV1SourceSuite extends KafkaMicroBatchSourceSuiteBase {
override def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(
SQLConf.DISABLED_V2_STREAMING_MICROBATCH_READERS.key,
classOf[KafkaSourceProvider].getCanonicalName)
}
test("V1 Source is used when disabled through SQLConf") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", s"$topic.*")
.load()
testStream(kafka)(
makeSureGetOffsetCalled,
AssertOnQuery { query =>
query.logicalPlan.collect {
case StreamingExecutionRelation(_: KafkaSource, _) => true
}.nonEmpty
}
)
}
}
class KafkaMicroBatchV2SourceSuite extends KafkaMicroBatchSourceSuiteBase {
test("V2 Source is used by default") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", s"$topic.*")
.load()
testStream(kafka)(
makeSureGetOffsetCalled,
AssertOnQuery { query =>
query.logicalPlan.find {
case r: StreamingDataSourceV2Relation => r.stream.isInstanceOf[KafkaMicroBatchStream]
case _ => false
}.isDefined
}
)
}
testWithUninterruptibleThread("minPartitions is supported") {
val topic = newTopic()
val tp = new TopicPartition(topic, 0)
testUtils.createTopic(topic, partitions = 1)
def test(
minPartitions: String,
numPartitionsGenerated: Int,
reusesConsumers: Boolean): Unit = {
SparkSession.setActiveSession(spark)
withTempDir { dir =>
val provider = new KafkaSourceProvider()
val options = Map(
"kafka.bootstrap.servers" -> testUtils.brokerAddress,
"subscribe" -> topic
) ++ Option(minPartitions).map { p => "minPartitions" -> p}
val dsOptions = new CaseInsensitiveStringMap(options.asJava)
val table = provider.getTable(dsOptions)
val stream = table.newScanBuilder(dsOptions).build().toMicroBatchStream(dir.getAbsolutePath)
val inputPartitions = stream.planInputPartitions(
KafkaSourceOffset(Map(tp -> 0L)),
KafkaSourceOffset(Map(tp -> 100L))).map(_.asInstanceOf[KafkaBatchInputPartition])
withClue(s"minPartitions = $minPartitions generated factories $inputPartitions\\n\\t") {
assert(inputPartitions.size == numPartitionsGenerated)
}
}
}
// Test cases when minPartitions is used and not used
test(minPartitions = null, numPartitionsGenerated = 1, reusesConsumers = true)
test(minPartitions = "1", numPartitionsGenerated = 1, reusesConsumers = true)
test(minPartitions = "4", numPartitionsGenerated = 4, reusesConsumers = false)
// Test illegal minPartitions values
intercept[IllegalArgumentException] { test(minPartitions = "a", 1, true) }
intercept[IllegalArgumentException] { test(minPartitions = "1.0", 1, true) }
intercept[IllegalArgumentException] { test(minPartitions = "0", 1, true) }
intercept[IllegalArgumentException] { test(minPartitions = "-1", 1, true) }
}
test("default config of includeHeader doesn't break existing query from Spark 2.4") {
import testImplicits._
// This topic name is migrated from Spark 2.4.3 test run
val topic = "spark-test-topic-2b8619f5-d3c4-4c2d-b5d1-8d9d9458aa62"
// create same topic and messages as test run
testUtils.createTopic(topic, partitions = 5, overwrite = true)
testUtils.sendMessages(topic, Array(-20, -21, -22).map(_.toString), Some(0))
testUtils.sendMessages(topic, Array(-10, -11, -12).map(_.toString), Some(1))
testUtils.sendMessages(topic, Array(0, 1, 2).map(_.toString), Some(2))
testUtils.sendMessages(topic, Array(10, 11, 12).map(_.toString), Some(3))
testUtils.sendMessages(topic, Array(20, 21, 22).map(_.toString), Some(4))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val headers = Seq(("a", "b".getBytes(UTF_8)), ("c", "d".getBytes(UTF_8)))
(31 to 35).map { num =>
new RecordBuilder(topic, num.toString).partition(num - 31).headers(headers).build()
}.foreach { rec => testUtils.sendMessage(rec) }
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", topic)
.option("startingOffsets", "earliest")
.load()
val query = kafka.dropDuplicates()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
.map(kv => kv._2.toInt + 1)
val resourceUri = this.getClass.getResource(
"/structured-streaming/checkpoint-version-2.4.3-kafka-include-headers-default/").toURI
val checkpointDir = Utils.createTempDir().getCanonicalFile
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(new File(resourceUri), checkpointDir)
testStream(query)(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
/*
Note: The checkpoint was generated using the following input in Spark version 2.4.3
testUtils.createTopic(topic, partitions = 5, overwrite = true)
testUtils.sendMessages(topic, Array(-20, -21, -22).map(_.toString), Some(0))
testUtils.sendMessages(topic, Array(-10, -11, -12).map(_.toString), Some(1))
testUtils.sendMessages(topic, Array(0, 1, 2).map(_.toString), Some(2))
testUtils.sendMessages(topic, Array(10, 11, 12).map(_.toString), Some(3))
testUtils.sendMessages(topic, Array(20, 21, 22).map(_.toString), Some(4))
*/
makeSureGetOffsetCalled,
CheckNewAnswer(32, 33, 34, 35, 36)
)
}
test("test custom metrics - with rate limit") {
import testImplicits._
val topic = newTopic()
val data = 1 to 10
testUtils.createTopic(topic, partitions = 2)
testUtils.sendMessages(topic, (1 to 5).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (6 to 10).map(_.toString).toArray, Some(1))
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.option("maxOffsetsPerTrigger", 1)
.option(STARTING_OFFSETS_OPTION_KEY, "earliest")
.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(_.toInt)
testStream(kafka)(
StartStream(),
makeSureGetOffsetCalled,
CheckAnswer(data: _*),
Execute { query =>
// The rate limit is 1, so there must be some delay in offsets per partition.
val progressWithDelay = query.recentProgress.map(_.sources.head).reverse.find { progress =>
// find the metrics that has non-zero average offsetsBehindLatest greater than 0.
!progress.metrics.isEmpty && progress.metrics.get("avgOffsetsBehindLatest").toDouble > 0
}
assert(progressWithDelay.nonEmpty)
val metrics = progressWithDelay.get.metrics
assert(metrics.keySet() ===
Set("minOffsetsBehindLatest",
"maxOffsetsBehindLatest",
"avgOffsetsBehindLatest").asJava)
assert(metrics.get("minOffsetsBehindLatest").toLong > 0)
assert(metrics.get("maxOffsetsBehindLatest").toLong > 0)
assert(metrics.get("avgOffsetsBehindLatest").toDouble > 0)
}
)
}
test("test custom metrics - no rate limit") {
import testImplicits._
val topic = newTopic()
val data = 1 to 10
testUtils.createTopic(topic, partitions = 2)
testUtils.sendMessages(topic, (1 to 5).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (6 to 10).map(_.toString).toArray, Some(1))
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
.option(STARTING_OFFSETS_OPTION_KEY, "earliest")
.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
.map(_.toInt)
testStream(kafka)(
StartStream(),
makeSureGetOffsetCalled,
CheckAnswer(data: _*),
Execute { query =>
val progress = query.recentProgress.map(_.sources.head).lastOption
assert(progress.nonEmpty)
val metrics = progress.get.metrics
// When there is no rate limit, there shouldn't be any delay in the current stream.
assert(metrics.keySet() ===
Set("minOffsetsBehindLatest",
"maxOffsetsBehindLatest",
"avgOffsetsBehindLatest").asJava)
assert(metrics.get("minOffsetsBehindLatest").toLong === 0)
assert(metrics.get("maxOffsetsBehindLatest").toLong === 0)
assert(metrics.get("avgOffsetsBehindLatest").toDouble === 0)
}
)
}
test("test custom metrics - corner cases") {
val topicPartition1 = new TopicPartition(newTopic(), 0)
val topicPartition2 = new TopicPartition(newTopic(), 0)
val latestOffset = Map[TopicPartition, Long]((topicPartition1, 3L), (topicPartition2, 6L))
// test empty offset.
assert(KafkaMicroBatchStream.metrics(Optional.ofNullable(null), latestOffset).isEmpty)
// test valid offsetsBehindLatest
val offset = KafkaSourceOffset(
Map[TopicPartition, Long]((topicPartition1, 1L), (topicPartition2, 2L)))
assert(
KafkaMicroBatchStream.metrics(Optional.ofNullable(offset), latestOffset) ===
Map[String, String](
"minOffsetsBehindLatest" -> "2",
"maxOffsetsBehindLatest" -> "4",
"avgOffsetsBehindLatest" -> "3.0").asJava)
// test null latestAvailablePartitionOffsets
assert(KafkaMicroBatchStream.metrics(Optional.ofNullable(offset), null).isEmpty)
}
}
abstract class KafkaSourceSuiteBase extends KafkaSourceTest {
import testImplicits._
test("cannot stop Kafka stream") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, (101 to 105).map { _.toString }.toArray)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", s"$topic.*")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
StopStream
)
}
for (failOnDataLoss <- Seq(true, false)) {
test(s"assign from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromLatestOffsets(
topic,
addPartitions = false,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4))
}
test(s"assign from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromEarliestOffsets(
topic,
addPartitions = false,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4))
}
test(s"assign from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificOffsets(
topic,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4))
}
test(s"assign from specific timestamps (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificTimestamps(
topic,
failOnDataLoss = failOnDataLoss,
addPartitions = false,
"assign" -> assignString(topic, 0 to 4))
}
test(s"assign from global timestamp per topic (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromGlobalTimestamp(
topic,
failOnDataLoss = failOnDataLoss,
addPartitions = false,
"assign" -> assignString(topic, 0 to 4))
}
test(s"subscribing topic by name from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromLatestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribe" -> topic)
}
test(s"subscribing topic by name from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromEarliestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribe" -> topic)
}
test(s"subscribing topic by name from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificOffsets(topic, failOnDataLoss = failOnDataLoss, "subscribe" -> topic)
}
test(s"subscribing topic by name from specific timestamps (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificTimestamps(topic, failOnDataLoss = failOnDataLoss, addPartitions = true,
"subscribe" -> topic)
}
test(s"subscribing topic by name from global timestamp per topic" +
s" (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromGlobalTimestamp(topic, failOnDataLoss = failOnDataLoss, addPartitions = true,
"subscribe" -> topic)
}
test(s"subscribing topic by pattern from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromLatestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromEarliestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromSpecificOffsets(
topic,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from specific timestamps " +
s"(failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromSpecificTimestamps(
topic,
failOnDataLoss = failOnDataLoss,
addPartitions = true,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from global timestamp per topic " +
s"(failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromGlobalTimestamp(
topic,
failOnDataLoss = failOnDataLoss,
addPartitions = true,
"subscribePattern" -> s"$topicPrefix-.*")
}
}
test("subscribing topic by name from specific timestamps with non-matching starting offset") {
val topic = newTopic()
testFromSpecificTimestampsWithNoMatchingStartingOffset(topic, "subscribe" -> topic)
}
test("subscribing topic by name from global timestamp per topic with " +
"non-matching starting offset") {
val topic = newTopic()
testFromGlobalTimestampWithNoMatchingStartingOffset(topic, "subscribe" -> topic)
}
test("subscribing topic by pattern from specific timestamps with " +
"non-matching starting offset") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromSpecificTimestampsWithNoMatchingStartingOffset(topic,
"subscribePattern" -> s"$topicPrefix-.*")
}
test("subscribing topic by pattern from global timestamp per topic with " +
"non-matching starting offset") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromGlobalTimestampWithNoMatchingStartingOffset(topic,
"subscribePattern" -> s"$topicPrefix-.*")
}
private def testFromSpecificTimestampsWithNoMatchingStartingOffset(
topic: String,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
val firstTimestamp = System.currentTimeMillis() - 5000
val secondTimestamp = firstTimestamp + 1000
setupTestMessagesForTestOnTimestampOffsets(topic, firstTimestamp, secondTimestamp)
// no data after second timestamp for partition 4
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
// here we starts from second timestamp for all partitions, whereas we know there's
// no data in partition 4 matching second timestamp
val startPartitionTimestamps: Map[TopicPartition, Long] =
(0 to 4).map(new TopicPartition(topic, _) -> secondTimestamp).toMap
val startingTimestamps = JsonUtils.partitionTimestamps(startPartitionTimestamps)
val mapped = setupDataFrameForTestOnTimestampOffsets(startingTimestamps, failOnDataLoss = true,
options: _*)
assertQueryFailOnStartOffsetStrategyAsError(mapped)
val mapped2 = setupDataFrameForTestOnTimestampOffsets(startingTimestamps, failOnDataLoss = true,
options :+ ("startingoffsetsbytimestampstrategy", "error"): _*)
assertQueryFailOnStartOffsetStrategyAsError(mapped2)
val mapped3 = setupDataFrameForTestOnTimestampOffsets(startingTimestamps, failOnDataLoss = true,
options :+ ("startingoffsetsbytimestampstrategy", "latest"): _*)
testStream(mapped3)(
makeSureGetOffsetCalled,
Execute { q =>
val partitions = (0 to 4).map(new TopicPartition(topic, _))
// wait to reach the last offset in every partition
q.awaitOffset(
0, KafkaSourceOffset(partitions.map(tp => tp -> 3L).toMap), streamingTimeout.toMillis)
},
CheckAnswer(-21, -22, -11, -12, 2, 12),
Execute { q =>
sendMessagesWithTimestamp(topic, Array(23, 24, 25).map(_.toString), 4, secondTimestamp)
// wait to reach the new last offset in every partition
val partitions = (0 to 3).map(new TopicPartition(topic, _)).map(tp => tp -> 3L) ++
Seq(new TopicPartition(topic, 4) -> 6L)
q.awaitOffset(
0, KafkaSourceOffset(partitions.toMap), streamingTimeout.toMillis)
},
CheckNewAnswer(23, 24, 25)
)
}
private def testFromGlobalTimestampWithNoMatchingStartingOffset(
topic: String,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
val firstTimestamp = System.currentTimeMillis() - 5000
val secondTimestamp = firstTimestamp + 1000
setupTestMessagesForTestOnTimestampOffsets(topic, firstTimestamp, secondTimestamp)
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
// here we starts from second timestamp for all partitions, whereas we know there's
// no data in partition 4 matching second timestamp
val mapped = setupDataFrameForTestOnGlobalTimestamp(secondTimestamp, failOnDataLoss = true,
options: _*)
assertQueryFailOnStartOffsetStrategyAsError(mapped)
val mapped2 = setupDataFrameForTestOnGlobalTimestamp(secondTimestamp, failOnDataLoss = true,
options :+ ("startingoffsetsbytimestampstrategy", "error"): _*)
assertQueryFailOnStartOffsetStrategyAsError(mapped2)
val mapped3 = setupDataFrameForTestOnGlobalTimestamp(secondTimestamp, failOnDataLoss = true,
options :+ ("startingoffsetsbytimestampstrategy", "latest"): _*)
testStream(mapped3)(
makeSureGetOffsetCalled,
Execute { q =>
val partitions = (0 to 4).map(new TopicPartition(topic, _))
// wait to reach the last offset in every partition
q.awaitOffset(
0, KafkaSourceOffset(partitions.map(tp => tp -> 3L).toMap), streamingTimeout.toMillis)
},
CheckAnswer(-21, -22, -11, -12, 2, 12),
Execute { q =>
sendMessagesWithTimestamp(topic, Array(23, 24, 25).map(_.toString), 4, secondTimestamp)
// wait to reach the new last offset in every partition
val partitions = (0 to 3).map(new TopicPartition(topic, _)).map(tp => tp -> 3L) ++
Seq(new TopicPartition(topic, 4) -> 6L)
q.awaitOffset(
0, KafkaSourceOffset(partitions.toMap), streamingTimeout.toMillis)
},
CheckNewAnswer(23, 24, 25)
)
}
private def assertQueryFailOnStartOffsetStrategyAsError(df: Dataset[_]): Unit = {
// In continuous mode, the origin exception is not caught here unfortunately, so we have to
// stick with checking general exception instead of verifying IllegalArgumentException.
intercept[Exception] {
testStream(df)(makeSureGetOffsetCalled)
}
}
test("bad source options") {
def testBadOptions(options: (String, String)*)(expectedMsgs: String*): Unit = {
val ex = intercept[IllegalArgumentException] {
val reader = spark
.readStream
.format("kafka")
options.foreach { case (k, v) => reader.option(k, v) }
reader.load()
}
expectedMsgs.foreach { m =>
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains(m.toLowerCase(Locale.ROOT)))
}
}
// Specifying an ending offset
testBadOptions("endingOffsets" -> "latest")("Ending offset not valid in streaming queries")
testBadOptions("subscribe" -> "t", "endingOffsetsByTimestamp" -> "{\\"t\\": {\\"0\\": 1000}}")(
"Ending timestamp not valid in streaming queries")
// No strategy specified
testBadOptions()("options must be specified", "subscribe", "subscribePattern")
// Multiple strategies specified
testBadOptions("subscribe" -> "t", "subscribePattern" -> "t.*")(
"only one", "options can be specified")
testBadOptions("subscribe" -> "t", "assign" -> """{"a":[0]}""")(
"only one", "options can be specified")
testBadOptions("assign" -> "")("no topicpartitions to assign")
testBadOptions("subscribe" -> "")("no topics to subscribe")
testBadOptions("subscribePattern" -> "")("pattern to subscribe is empty")
testBadOptions(
"kafka.bootstrap.servers" -> "fake", "subscribe" -> "t", "minOffsetsPerTrigger" -> "20",
"maxOffsetsPerTrigger" -> "15")(
"value of minOffsetPerTrigger(20) is higher than the maxOffsetsPerTrigger(15)")
}
test("unsupported kafka configs") {
def testUnsupportedConfig(key: String, value: String = "someValue"): Unit = {
val ex = intercept[IllegalArgumentException] {
val reader = spark
.readStream
.format("kafka")
.option("subscribe", "topic")
.option("kafka.bootstrap.servers", "somehost")
.option(s"$key", value)
reader.load()
}
assert(ex.getMessage.toLowerCase(Locale.ROOT).contains("not supported"))
}
testUnsupportedConfig("kafka.auto.offset.reset")
testUnsupportedConfig("kafka.enable.auto.commit")
testUnsupportedConfig("kafka.interceptor.classes")
testUnsupportedConfig("kafka.key.deserializer")
testUnsupportedConfig("kafka.value.deserializer")
testUnsupportedConfig("kafka.auto.offset.reset", "none")
testUnsupportedConfig("kafka.auto.offset.reset", "someValue")
testUnsupportedConfig("kafka.auto.offset.reset", "earliest")
testUnsupportedConfig("kafka.auto.offset.reset", "latest")
}
test("get offsets from case insensitive parameters") {
for ((optionKey, optionValue, answer) <- Seq(
(STARTING_OFFSETS_OPTION_KEY, "earLiEst", EarliestOffsetRangeLimit),
(ENDING_OFFSETS_OPTION_KEY, "laTest", LatestOffsetRangeLimit),
(STARTING_OFFSETS_OPTION_KEY, """{"topic-A":{"0":23}}""",
SpecificOffsetRangeLimit(Map(new TopicPartition("topic-A", 0) -> 23))))) {
val offset = getKafkaOffsetRangeLimit(
CaseInsensitiveMap[String](Map(optionKey -> optionValue)), "dummy", "dummy", optionKey,
answer)
assert(offset === answer)
}
for ((optionKey, answer) <- Seq(
(STARTING_OFFSETS_OPTION_KEY, EarliestOffsetRangeLimit),
(ENDING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit))) {
val offset = getKafkaOffsetRangeLimit(
CaseInsensitiveMap[String](Map.empty), "dummy", "dummy", optionKey, answer)
assert(offset === answer)
}
}
private def assignString(topic: String, partitions: Iterable[Int]): String = {
JsonUtils.partitions(partitions.map(p => new TopicPartition(topic, p)))
}
private def testFromSpecificOffsets(
topic: String,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
val partitionOffsets = Map(
new TopicPartition(topic, 0) -> -2L,
new TopicPartition(topic, 1) -> -1L,
new TopicPartition(topic, 2) -> 0L,
new TopicPartition(topic, 3) -> 1L,
new TopicPartition(topic, 4) -> 2L
)
val startingOffsets = JsonUtils.partitionOffsets(partitionOffsets)
testUtils.createTopic(topic, partitions = 5)
// part 0 starts at earliest, these should all be seen
testUtils.sendMessages(topic, Array(-20, -21, -22).map(_.toString), Some(0))
// part 1 starts at latest, these should all be skipped
testUtils.sendMessages(topic, Array(-10, -11, -12).map(_.toString), Some(1))
// part 2 starts at 0, these should all be seen
testUtils.sendMessages(topic, Array(0, 1, 2).map(_.toString), Some(2))
// part 3 starts at 1, first should be skipped
testUtils.sendMessages(topic, Array(10, 11, 12).map(_.toString), Some(3))
// part 4 starts at 2, first and second should be skipped
testUtils.sendMessages(topic, Array(20, 21, 22).map(_.toString), Some(4))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("startingOffsets", startingOffsets)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
testStream(mapped)(
makeSureGetOffsetCalled,
Execute { q =>
// wait to reach the last offset in every partition
q.awaitOffset(0,
KafkaSourceOffset(partitionOffsets.mapValues(_ => 3L).toMap), streamingTimeout.toMillis)
},
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22),
StopStream,
StartStream(),
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22), // Should get the data back on recovery
AddKafkaData(Set(topic), 30, 31, 32, 33, 34)(ensureDataInMultiplePartition = true),
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22, 30, 31, 32, 33, 34),
StopStream
)
}
private def testFromSpecificTimestamps(
topic: String,
failOnDataLoss: Boolean,
addPartitions: Boolean,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
val firstTimestamp = System.currentTimeMillis() - 5000
val secondTimestamp = firstTimestamp + 1000
setupTestMessagesForTestOnTimestampOffsets(topic, firstTimestamp, secondTimestamp)
// no data after second timestamp for partition 4
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
// we intentionally starts from second timestamp,
// except for partition 4 - it starts from first timestamp
val startPartitionTimestamps: Map[TopicPartition, Long] = Map(
(0 to 3).map(new TopicPartition(topic, _) -> secondTimestamp): _*
) ++ Map(new TopicPartition(topic, 4) -> firstTimestamp)
val startingTimestamps = JsonUtils.partitionTimestamps(startPartitionTimestamps)
val mapped = setupDataFrameForTestOnTimestampOffsets(startingTimestamps, failOnDataLoss,
options: _*)
testStream(mapped)(
makeSureGetOffsetCalled,
Execute { q =>
val partitions = (0 to 4).map(new TopicPartition(topic, _))
// wait to reach the last offset in every partition
q.awaitOffset(
0, KafkaSourceOffset(partitions.map(tp => tp -> 3L).toMap), streamingTimeout.toMillis)
},
CheckAnswer(-21, -22, -11, -12, 2, 12, 20, 21, 22),
StopStream,
StartStream(),
CheckAnswer(-21, -22, -11, -12, 2, 12, 20, 21, 22), // Should get the data back on recovery
StopStream,
AddKafkaData(Set(topic), 30, 31, 32), // Add data when stream is stopped
StartStream(),
CheckAnswer(-21, -22, -11, -12, 2, 12, 20, 21, 22, 30, 31, 32), // Should get the added data
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) setTopicPartitions(topic, 10, query)
true
},
AddKafkaData(Set(topic), 40, 41, 42, 43, 44)(ensureDataInMultiplePartition = true),
CheckAnswer(-21, -22, -11, -12, 2, 12, 20, 21, 22, 30, 31, 32, 40, 41, 42, 43, 44),
StopStream
)
}
private def testFromGlobalTimestamp(
topic: String,
failOnDataLoss: Boolean,
addPartitions: Boolean,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
val firstTimestamp = System.currentTimeMillis() - 5000
val secondTimestamp = firstTimestamp + 1000
setupTestMessagesForTestOnTimestampOffsets(topic, firstTimestamp, secondTimestamp)
// here we should add records in partition 4 which match with second timestamp
// as the query will break if there's no matching records
sendMessagesWithTimestamp(topic, Array(23, 24).map(_.toString), 4, secondTimestamp)
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
// we intentionally starts from second timestamp for all partitions
// via setting global partition
val mapped = setupDataFrameForTestOnGlobalTimestamp(secondTimestamp, failOnDataLoss,
options: _*)
testStream(mapped)(
makeSureGetOffsetCalled,
Execute { q =>
// wait to reach the last offset in every partition
val partAndOffsets = (0 to 4).map(new TopicPartition(topic, _)).map { tp =>
if (tp.partition() < 4) {
tp -> 3L
} else {
tp -> 5L // we added 2 more records to partition 4
}
}.toMap
q.awaitOffset(0, KafkaSourceOffset(partAndOffsets), streamingTimeout.toMillis)
},
CheckAnswer(-21, -22, -11, -12, 2, 12, 23, 24),
StopStream,
StartStream(),
CheckAnswer(-21, -22, -11, -12, 2, 12, 23, 24), // Should get the data back on recovery
StopStream,
AddKafkaData(Set(topic), 30, 31, 32), // Add data when stream is stopped
StartStream(),
CheckAnswer(-21, -22, -11, -12, 2, 12, 23, 24, 30, 31, 32), // Should get the added data
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) setTopicPartitions(topic, 10, query)
true
},
AddKafkaData(Set(topic), 40, 41, 42, 43, 44)(ensureDataInMultiplePartition = true),
CheckAnswer(-21, -22, -11, -12, 2, 12, 23, 24, 30, 31, 32, 40, 41, 42, 43, 44),
StopStream
)
}
private def sendMessagesWithTimestamp(
topic: String,
msgs: Seq[String],
part: Int,
ts: Long): Unit = {
val records = msgs.map { msg =>
new RecordBuilder(topic, msg).partition(part).timestamp(ts).build()
}
testUtils.sendMessages(records)
}
private def setupTestMessagesForTestOnTimestampOffsets(
topic: String,
firstTimestamp: Long,
secondTimestamp: Long): Unit = {
sendMessagesWithTimestamp(topic, Array(-20).map(_.toString), 0, firstTimestamp)
sendMessagesWithTimestamp(topic, Array(-10).map(_.toString), 1, firstTimestamp)
sendMessagesWithTimestamp(topic, Array(0, 1).map(_.toString), 2, firstTimestamp)
sendMessagesWithTimestamp(topic, Array(10, 11).map(_.toString), 3, firstTimestamp)
sendMessagesWithTimestamp(topic, Array(20, 21, 22).map(_.toString), 4, firstTimestamp)
sendMessagesWithTimestamp(topic, Array(-21, -22).map(_.toString), 0, secondTimestamp)
sendMessagesWithTimestamp(topic, Array(-11, -12).map(_.toString), 1, secondTimestamp)
sendMessagesWithTimestamp(topic, Array(2).map(_.toString), 2, secondTimestamp)
sendMessagesWithTimestamp(topic, Array(12).map(_.toString), 3, secondTimestamp)
// no data after second timestamp for partition 4
}
private def setupDataFrameForTestOnTimestampOffsets(
startingTimestamps: String,
failOnDataLoss: Boolean,
options: (String, String)*): Dataset[_] = {
val reader = spark
.readStream
.format("kafka")
.option("startingOffsetsByTimestamp", startingTimestamps)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
mapped
}
private def setupDataFrameForTestOnGlobalTimestamp(
startingTimestamp: Long,
failOnDataLoss: Boolean,
options: (String, String)*): Dataset[_] = {
val reader = spark
.readStream
.format("kafka")
.option("startingTimestamp", startingTimestamp)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
mapped
}
test("Kafka column types") {
val now = System.currentTimeMillis()
val topic = newTopic()
testUtils.createTopic(newTopic(), partitions = 1)
testUtils.sendMessage(
new RecordBuilder(topic, "1")
.headers(Seq(("a", "b".getBytes(UTF_8)), ("c", "d".getBytes(UTF_8)))).build()
)
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("startingOffsets", s"earliest")
.option("subscribe", topic)
.option("includeHeaders", "true")
.load()
val query = kafka
.writeStream
.format("memory")
.queryName("kafkaColumnTypes")
.trigger(defaultTrigger)
.start()
eventually(timeout(streamingTimeout)) {
assert(spark.table("kafkaColumnTypes").count == 1,
s"Unexpected results: ${spark.table("kafkaColumnTypes").collectAsList()}")
}
val row = spark.table("kafkaColumnTypes").head()
assert(row.getAs[Array[Byte]]("key") === null, s"Unexpected results: $row")
assert(row.getAs[Array[Byte]]("value") === "1".getBytes(UTF_8), s"Unexpected results: $row")
assert(row.getAs[String]("topic") === topic, s"Unexpected results: $row")
assert(row.getAs[Int]("partition") === 0, s"Unexpected results: $row")
assert(row.getAs[Long]("offset") === 0L, s"Unexpected results: $row")
// We cannot check the exact timestamp as it's the time that messages were inserted by the
// producer. So here we just use a low bound to make sure the internal conversion works.
assert(row.getAs[java.sql.Timestamp]("timestamp").getTime >= now, s"Unexpected results: $row")
assert(row.getAs[Int]("timestampType") === 0, s"Unexpected results: $row")
def checkHeader(row: Row, expected: Seq[(String, Array[Byte])]): Unit = {
// array<struct<key:string,value:binary>>
val headers = row.getList[Row](row.fieldIndex("headers")).asScala
assert(headers.length === expected.length)
(0 until expected.length).foreach { idx =>
val key = headers(idx).getAs[String]("key")
val value = headers(idx).getAs[Array[Byte]]("value")
assert(key === expected(idx)._1)
assert(value === expected(idx)._2)
}
}
checkHeader(row, Seq(("a", "b".getBytes(UTF_8)), ("c", "d".getBytes(UTF_8))))
query.stop()
}
private def testFromLatestOffsets(
topic: String,
addPartitions: Boolean,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("startingOffsets", "latest")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
StopStream,
StartStream(),
CheckAnswer(2, 3, 4), // Should get the data back on recovery
StopStream,
AddKafkaData(Set(topic), 4, 5, 6), // Add data when stream is stopped
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7), // Should get the added data
AddKafkaData(Set(topic), 7, 8),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9),
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) setTopicPartitions(topic, 10, query)
true
},
AddKafkaData(Set(topic), 9, 10, 11, 12, 13, 14, 15, 16),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
)
}
private def testFromEarliestOffsets(
topic: String,
addPartitions: Boolean,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, (1 to 3).map { _.toString }.toArray)
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark.readStream
reader
.format(classOf[KafkaSourceProvider].getCanonicalName.stripSuffix("$"))
.option("startingOffsets", s"earliest")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
AddKafkaData(Set(topic), 4, 5, 6), // Add data when stream is stopped
CheckAnswer(2, 3, 4, 5, 6, 7),
StopStream,
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7),
StopStream,
AddKafkaData(Set(topic), 7, 8),
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9),
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) setTopicPartitions(topic, 10, query)
true
},
AddKafkaData(Set(topic), 9, 10, 11, 12, 13, 14, 15, 16),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
)
}
protected def testNullableKeyValue(trigger: Trigger): Unit = {
val table = "kafka_null_key_value_source_test"
withTable(table) {
val topic = newTopic()
testUtils.createTopic(topic)
testUtils.withTransactionalProducer { producer =>
val df = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.isolation.level", "read_committed")
.option("startingOffsets", "earliest")
.option("subscribe", topic)
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val q = df
.writeStream
.format("memory")
.queryName(table)
.trigger(trigger)
.start()
try {
var idx = 0
producer.beginTransaction()
val expected1 = Seq.tabulate(5) { _ =>
producer.send(new ProducerRecord[String, String](topic, null, null)).get()
(null, null)
}.asInstanceOf[Seq[(String, String)]]
val expected2 = Seq.tabulate(5) { _ =>
idx += 1
producer.send(new ProducerRecord[String, String](topic, idx.toString, null)).get()
(idx.toString, null)
}.asInstanceOf[Seq[(String, String)]]
val expected3 = Seq.tabulate(5) { _ =>
idx += 1
producer.send(new ProducerRecord[String, String](topic, null, idx.toString)).get()
(null, idx.toString)
}.asInstanceOf[Seq[(String, String)]]
producer.commitTransaction()
eventually(timeout(streamingTimeout)) {
checkAnswer(spark.table(table), (expected1 ++ expected2 ++ expected3).toDF())
}
} finally {
q.stop()
}
}
}
}
}
object KafkaSourceSuite {
@volatile var globalTestUtils: KafkaTestUtils = _
val collectedData = new ConcurrentLinkedQueue[Any]()
}
class KafkaSourceStressSuite extends KafkaSourceTest {
import testImplicits._
val topicId = new AtomicInteger(1)
@volatile var topics: Seq[String] = (1 to 5).map(_ => newStressTopic)
def newStressTopic: String = s"stress${topicId.getAndIncrement()}"
private def nextInt(start: Int, end: Int): Int = {
start + Random.nextInt(start + end - 1)
}
test("stress test with multiple topics and partitions") {
topics.foreach { topic =>
testUtils.createTopic(topic, partitions = nextInt(1, 6))
testUtils.sendMessages(topic, (101 to 105).map { _.toString }.toArray)
}
// Create Kafka source that reads from latest offset
val kafka =
spark.readStream
.format(classOf[KafkaSourceProvider].getCanonicalName.stripSuffix("$"))
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", "stress.*")
.option("failOnDataLoss", "false")
.option("kafka.request.timeout.ms", "3000")
.option("kafka.default.api.timeout.ms", "3000")
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
runStressTest(
mapped,
Seq(makeSureGetOffsetCalled),
(d, running) => {
Random.nextInt(5) match {
case 0 => // Add a new topic
val newTopic = newStressTopic
topics = topics ++ Seq(newTopic)
AddKafkaData(topics.toSet, d: _*)(message = s"Add topic $newTopic",
topicAction = (topic, partition) => {
if (partition.isEmpty) {
testUtils.createTopic(topic, partitions = nextInt(1, 6))
}
})
case 1 if running =>
// Only delete a topic when the query is running. Otherwise, we may lost data and
// cannot check the correctness.
val deletedTopic = topics(Random.nextInt(topics.size))
if (deletedTopic != topics.head) {
topics = topics.filterNot(_ == deletedTopic)
}
AddKafkaData(topics.toSet, d: _*)(message = s"Delete topic $deletedTopic",
topicAction = (topic, partition) => {
// Never remove the first topic to make sure we have at least one topic
if (topic == deletedTopic && deletedTopic != topics.head) {
testUtils.deleteTopic(deletedTopic)
}
})
case 2 => // Add new partitions
AddKafkaData(topics.toSet, d: _*)(message = "Add partition",
topicAction = (topic, partition) => {
testUtils.addPartitions(topic, partition.get + nextInt(1, 6))
})
case _ => // Just add new data
AddKafkaData(topics.toSet, d: _*)
}
},
iterations = 50)
}
}
| WeichenXu123/spark | external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaMicroBatchSourceSuite.scala | Scala | apache-2.0 | 96,231 |
package io.getquill.context.sql.norm
import io.getquill.Spec
import io.getquill.context.sql.testContext._
import io.getquill.context.sql.testContext
class JoinSpec extends Spec {
"join + filter" in {
val q = quote {
qr1.leftJoin(qr2)
.on((a, b) => a.i == b.i)
.filter(_._2.map(_.i).forall(_ == 1))
}
testContext.run(q).string mustEqual
"SELECT a.s, a.i, a.l, a.o, a.b, b.s, b.i, b.l, b.o FROM TestEntity a LEFT JOIN TestEntity2 b ON a.i = b.i WHERE b.i IS NULL OR b.i = 1"
}
"join + filter with null-check" in {
val q = quote {
qr1.leftJoin(qr2)
.on((a, b) => a.i == b.i)
.filter(_._2.map(_.i).forall(b => if (b == 1) true else false))
}
testContext.run(q).string mustEqual
"SELECT a.s, a.i, a.l, a.o, a.b, b.s, b.i, b.l, b.o FROM TestEntity a LEFT JOIN TestEntity2 b ON a.i = b.i WHERE b.i IS NULL OR b.i IS NOT NULL AND CASE WHEN b.i = 1 THEN true ELSE false END"
}
"join + map + filter" in {
val q = quote {
qr1.leftJoin(qr2)
.on((a, b) => a.i == b.i)
.map(t => (t._1.i, t._2.map(_.i)))
.filter(_._2.forall(_ == 1))
}
testContext.run(q).string mustEqual
"SELECT a.i, b.i FROM TestEntity a LEFT JOIN TestEntity2 b ON a.i = b.i WHERE b.i IS NULL OR b.i = 1"
}
"join + map + filter with null-check" in {
val q = quote {
qr1.leftJoin(qr2)
.on((a, b) => a.i == b.i)
.map(t => (t._1.i, t._2.map(_.i)))
.filter(_._2.forall(b => if (b == 1) true else false))
}
testContext.run(q).string mustEqual
"SELECT a.i, b.i FROM TestEntity a LEFT JOIN TestEntity2 b ON a.i = b.i WHERE b.i IS NULL OR b.i IS NOT NULL AND CASE WHEN b.i = 1 THEN true ELSE false END"
}
"join + filter + leftjoin" in {
val q = quote {
qr1.leftJoin(qr2).on {
(a, b) => a.i == b.i
}.filter {
ab =>
ab._2.map(_.l).contains(3L)
}.leftJoin(qr3).on {
(ab, c) =>
ab._2.map(_.i).contains(ab._1.i) && ab._2.map(_.i).contains(c.i)
}
}
testContext.run(q).string mustEqual
"SELECT ab._1s, ab._1i, ab._1l, ab._1o, ab._1b, ab._2s, ab._2i, ab._2l, ab._2o, c.s, c.i, c.l, c.o FROM (SELECT a.s AS _1s, a.i AS _1i, a.l AS _1l, a.o AS _1o, a.b AS _1b, b.s AS _2s, b.i AS _2i, b.l AS _2l, b.o AS _2o FROM TestEntity a LEFT JOIN TestEntity2 b ON a.i = b.i WHERE b.l = 3) AS ab LEFT JOIN TestEntity3 c ON ab._2i = ab._1i AND ab._2i = c.i"
}
"join + distinct + leftjoin" in {
val q = quote {
qr1.leftJoin(qr2).on {
(a, b) => a.i == b.i
}.distinct.leftJoin(qr3).on {
(ab, c) =>
ab._2.map(_.i).contains(ab._1.i) && ab._2.map(_.i).contains(c.i)
}
}
testContext.run(q).string mustEqual
"SELECT ab._1s, ab._1i, ab._1l, ab._1o, ab._1b, ab._2s, ab._2i, ab._2l, ab._2o, c.s, c.i, c.l, c.o FROM (SELECT DISTINCT a.s AS _1s, a.i AS _1i, a.l AS _1l, a.o AS _1o, a.b AS _1b, b.s AS _2s, b.i AS _2i, b.l AS _2l, b.o AS _2o FROM TestEntity a LEFT JOIN TestEntity2 b ON a.i = b.i) AS ab LEFT JOIN TestEntity3 c ON ab._2i = ab._1i AND ab._2i = c.i"
}
"multiple joins + filter + map + distinct" in {
val q = quote {
qr1.join(qr2)
.on { (d, a) => d.i == a.i }
.join {
qr3.filter(rp => rp.s == lift("a"))
}
.on { (da, p) => da._1.i == p.i }
.leftJoin(qr4)
.on { (dap, n) => dap._2.l == n.i }
.map { case (dap, n) => (dap._1._2.s, dap._1._1.l, n.map(_.i)) }
.distinct
}
testContext.run(q).string mustEqual
"SELECT DISTINCT a.s, d.l, n.i FROM TestEntity d INNER JOIN TestEntity2 a ON d.i = a.i INNER JOIN (SELECT rp.i, rp.l FROM TestEntity3 rp WHERE rp.s = ?) AS rp ON d.i = rp.i LEFT JOIN TestEntity4 n ON rp.l = n.i"
}
"multiple joins + map" in {
val q = quote {
qr1.leftJoin(qr2).on((a, b) => a.s == b.s).leftJoin(qr2).on((a, b) => a._1.s == b.s).map(_._1._1)
}
testContext.run(q).string mustEqual
"SELECT a.s, a.i, a.l, a.o, a.b FROM TestEntity a LEFT JOIN TestEntity2 b ON a.s = b.s LEFT JOIN TestEntity2 b1 ON a.s = b1.s"
}
} | getquill/quill | quill-sql/src/test/scala/io/getquill/context/sql/norm/JoinSpec.scala | Scala | apache-2.0 | 4,140 |
package org.jetbrains.plugins.scala.debugger.exactBreakpoints
import com.intellij.debugger.SourcePosition
import com.intellij.debugger.engine.SourcePositionHighlighter
import com.intellij.psi.PsiDocumentManager
import com.intellij.util.DocumentUtil
import com.intellij.xdebugger.XDebuggerUtil
import org.jetbrains.plugins.scala.debugger.{ScalaDebuggerTestCase, ScalaPositionManager, ScalaVersion_2_11, ScalaVersion_2_12}
import org.jetbrains.plugins.scala.extensions.inReadAction
import org.junit.Assert
import scala.collection.JavaConverters._
/**
* @author Nikolay.Tropin
*/
class ExactBreakpointTest extends ExactBreakpointTestBase with ScalaVersion_2_11
class ExactBreakpointTest_212 extends ExactBreakpointTestBase with ScalaVersion_2_12
abstract class ExactBreakpointTestBase extends ScalaDebuggerTestCase {
case class Breakpoint(line: Int, ordinal: Integer) {
override def toString: String = s"line = $line, ordinal=$ordinal"
}
private def addBreakpoint(b: Breakpoint): Unit = addBreakpoint(b.line, mainFileName, b.ordinal)
protected def checkVariants(lineNumber: Int, variants: String*) = {
val xSourcePosition = XDebuggerUtil.getInstance().createPosition(getVirtualFile(getFileInSrc(mainFileName)), lineNumber)
val foundVariants = scalaLineBreakpointType.computeVariants(getProject, xSourcePosition).asScala.map(_.getText)
Assert.assertEquals("Wrong set of variants found: ", variants, foundVariants)
}
protected def checkStoppedAtBreakpointAt(breakpoints: Breakpoint*)(sourcePositionText: String) = {
checkStopResumeSeveralTimes(breakpoints: _*)(sourcePositionText)
}
protected def checkStopResumeSeveralTimes(breakpoints: Breakpoint*)(sourcePositions: String*) = {
def message(expected: String, actual: String) = {
s"Wrong source position. Expected: $expected, actual: $actual"
}
clearBreakpoints()
breakpoints.foreach(addBreakpoint)
runDebugger() {
for (expected <- sourcePositions) {
waitForBreakpoint()
managed {
val location = suspendContext.getFrameProxy.getStackFrame.location
inReadAction {
val sourcePosition = new ScalaPositionManager(getDebugProcess).getSourcePosition(location)
val text: String = highlightedText(sourcePosition)
Assert.assertTrue(message(expected, text), text.startsWith(expected.stripSuffix("...")))
}
}
resume()
}
}
}
private def highlightedText(sourcePosition: SourcePosition): String = {
val elemRange = SourcePositionHighlighter.getHighlightRangeFor(sourcePosition)
val document = PsiDocumentManager.getInstance(getProject).getDocument(sourcePosition.getFile)
val lineRange = DocumentUtil.getLineTextRange(document, sourcePosition.getLine)
val textRange = if (elemRange != null) elemRange.intersection(lineRange) else lineRange
document.getText(textRange).trim
}
protected def checkNotStoppedAtBreakpointAt(breakpoint: Breakpoint) = {
clearBreakpoints()
addBreakpoint(breakpoint)
runDebugger() {
Assert.assertTrue(s"Stopped at breakpoint: $breakpoint", processTerminatedNoBreakpoints())
}
}
addSourceFile("OneLine.scala",
"""object OneLine {
| def main(args: Array[String]) {
| Seq(1).map(x => x + 1).filter(_ > 10).foreach(println)
| }
|}""".stripMargin.trim
)
def testOneLine(): Unit = {
checkVariants(lineNumber = 2, "All", "line in function main", "x => x + 1", "_ > 10", "println")
checkStopResumeSeveralTimes(Breakpoint(2, null))("Seq(1).map(...", "x => x + 1", "_ > 10")
checkStoppedAtBreakpointAt(Breakpoint(2, -1))("Seq(1).map(...")
checkStoppedAtBreakpointAt(Breakpoint(2, 0))("x => x + 1")
checkStoppedAtBreakpointAt(Breakpoint(2, 1))("_ > 10")
checkNotStoppedAtBreakpointAt(Breakpoint(2, 2))
}
addSourceFile("Either.scala",
"""object Either {
| def main(args: Array[String]) {
| val x: Either[String, Int] = Right(1)
| val y: Either[String, Int] = Left("aaa")
|
| x.fold(_.substring(1), _ + 1)
| y.fold(_.substring(2), _ + 2)
| }
|}""".stripMargin.trim
)
def testEither(): Unit = {
checkVariants(lineNumber = 5, "All", "line in function main", "_.substring(1)", "_ + 1")
checkStopResumeSeveralTimes(Breakpoint(5, null), Breakpoint(6, null))("x.fold(...", "_ + 1", "y.fold(...", "_.substring(2)")
checkStoppedAtBreakpointAt(Breakpoint(5, 1))("_ + 1")
checkStoppedAtBreakpointAt(Breakpoint(6, 0))("_.substring(2)")
checkNotStoppedAtBreakpointAt(Breakpoint(5, 0))
checkNotStoppedAtBreakpointAt(Breakpoint(6, 1))
}
addSourceFile("SeveralLines.scala",
"""object SeveralLines {
| def main(args: Array[String]) {
| Option("aaa").flatMap(_.headOption)
| .find(c => c.isDigit).getOrElse('0')
| }
|}""".stripMargin.trim
)
def testSeveralLines(): Unit = {
checkVariants(2, "All", "line in function main", "_.headOption")
checkVariants(3, "All", "line in function main", "c => c.isDigit", "'0'")
checkStopResumeSeveralTimes(Breakpoint(2, null), Breakpoint(3, null))("Option(\"aaa\")...", "_.headOption", ".find(...", "c => c.isDigit", "'0'")
checkStopResumeSeveralTimes(Breakpoint(2, -1), Breakpoint(3, -1))("Option(...", ".find(...")
checkStopResumeSeveralTimes(Breakpoint(2, 0), Breakpoint(3, 0))("_.headOption", "c => c.isDigit")
}
addSourceFile("NestedLambdas.scala",
"""object NestedLambdas {
| def main(args: Array[String]) {
| Seq("a").flatMap(x => x.find(_ == 'a').getOrElse('a').toString).foreach(c => println(Some(c).filter(_ == 'a').getOrElse('b')))
| }
|}""".stripMargin.trim
)
def testNestedLambdas(): Unit = {
checkVariants(2,
"All",
"line in function main",
"x => x.find(_ == 'a').getOrElse('a').toString",
"_ == 'a'",
"'a'",
"c => println(Some(c).filter(_ == 'a').getOrElse('b'))",
"_ == 'a'",
"'b'")
checkStopResumeSeveralTimes(Breakpoint(2, null))("Seq(\"a\").flatMap(...", "x => x.find(...", "_ == 'a'", "c => println...", "_ == 'a'")
checkNotStoppedAtBreakpointAt(Breakpoint(2, 2))
checkNotStoppedAtBreakpointAt(Breakpoint(2, 5))
}
addSourceFile("NestedLambdas2.scala",
"""object NestedLambdas2 {
| def main(args: Array[String]) {
| Seq("b").flatMap(x => x.find(_ == 'a').getOrElse('a').toString).foreach(c => println(Some(c).filter(_ == 'b').getOrElse('a')))
| }
|}""".stripMargin.trim
)
def testNestedLambdas2(): Unit = {
checkVariants(2,
"All",
"line in function main",
"x => x.find(_ == 'a').getOrElse('a').toString",
"_ == 'a'",
"'a'",
"c => println(Some(c).filter(_ == 'b').getOrElse('a'))",
"_ == 'b'",
"'a'")
checkStopResumeSeveralTimes(Breakpoint(2, null))("Seq(\"b\").flatMap(...", "x => x.find(...", "_ == 'a'", "'a'", "c => println...", "_ == 'b'", "'a'")
checkStoppedAtBreakpointAt(Breakpoint(2, 1))("_ == 'a'")
checkStoppedAtBreakpointAt(Breakpoint(2, 2))("'a'")
checkStoppedAtBreakpointAt(Breakpoint(2, 4))("_ == 'b'")
checkStoppedAtBreakpointAt(Breakpoint(2, 5))("'a'")
}
addSourceFile("ConstructorAndClassParam.scala",
"""object ConstructorAndClassParam {
| def main(args: Array[String]) {
| new BBB()
| }
|}
|
|class BBB extends AAA("a3".filter(_.isDigit)) {
| Seq(1).map(x => x + 1).filter(_ > 10)
|}
|
|class AAA(s: String)""".stripMargin.trim
)
def testConstructorAndClassParam(): Unit = {
checkVariants(6, "All", "constructor of BBB", "_.isDigit")
checkStopResumeSeveralTimes(Breakpoint(6, null), Breakpoint(10, null))("class BBB ...", "_.isDigit", "_.isDigit", "class AAA(...")
}
addSourceFile("EarlyDefAndTemplateBody.scala",
"""object EarlyDefAndTemplateBody {
| def main(args: Array[String]) {
| new CCC()
| }
|}
|
|class CCC extends {
| val x = None.getOrElse(Seq(1)).filter(_ > 0)
|} with DDD("") {
| Seq(1).map(x => x + 1).filter(_ > 10)
|}
|
|class DDD(s: String)""".stripMargin.trim
)
def testEarlyDefAndTemplateBody(): Unit = {
checkVariants(7, "All", "early definitions of CCC", "Seq(1)", "_ > 0")
checkVariants(9, "All", "line in containing block", "x => x + 1", "_ > 10")
checkStopResumeSeveralTimes(Breakpoint(7, null), Breakpoint(9, null))("val x = ...", "Seq(1)", "_ > 0", "Seq(1).map...", "x => x + 1", "_ > 10")
}
addSourceFile("NewTemplateDefinitionAsLambda.scala",
"""object NewTemplateDefinitionAsLambda {
| def main(args: Array[String]) {
| Seq("a").map(new ZZZ(_)).filter(_ => false).headOption.getOrElse(new ZZZ("1"))
| }
|}
|
|class ZZZ(s: String)""".stripMargin.trim
)
def testNewTemplateDefinitionAsLambda(): Unit = {
checkVariants(2, "All", "line in function main", "new ZZZ(_)", "_ => false", "new ZZZ(\"1\")")
checkStopResumeSeveralTimes(Breakpoint(2, null))("Seq(\"a\")...", "new ZZZ(_)", "_ => false", "new ZZZ(\"1\")")
}
addSourceFile("LineStartsWithDot.scala",
"""object LineStartsWithDot {
| def main(args: Array[String]) {
| Some(1)
| .map(_ + 1)
| .filter(i => i % 2 == 0)
| .foreach(println)
| }
|}""".stripMargin
)
def testLineStartsWithDot(): Unit = {
checkVariants(2) //no variants
checkVariants(3, "All", "line in function main", "_ + 1")
checkVariants(4, "All", "line in function main", "i => i % 2 == 0")
checkVariants(5, "All", "line in function main", "println")
checkStopResumeSeveralTimes(Breakpoint(2, null), Breakpoint(3, -1), Breakpoint(4, 0), Breakpoint(5, null))(
"Some(1)", ".map...", "i => i % 2 == 0", ".foreach...", "println"
)
}
addSourceFile("PartialFunctionArg.scala",
s"""object PartialFunctionArg {
| def main(args: Array[String]) {
| Seq(Option(1)).exists {
| case None =>
| true
| case Some(i) =>
| false
| }
| }
|}
""".stripMargin.trim)
def testPartialFunctionArg(): Unit = {
checkStopResumeSeveralTimes(Breakpoint(5, null), Breakpoint(6, null))(
"case Some(i) =>", "false"
)
}
}
| ghik/intellij-scala | test/org/jetbrains/plugins/scala/debugger/exactBreakpoints/ExactBreakpointTest.scala | Scala | apache-2.0 | 10,420 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.views.ihtHelpers.custom
import iht.views.ViewTestHelper
import iht.views.html.ihtHelpers.custom.generic_overview_table_item
class GenericOverviewTableItemTest extends ViewTestHelper {
lazy val genericOverviewTableItemView: generic_overview_table_item = app.injector.instanceOf[generic_overview_table_item]
"generic overview table item" must {
"contain correct message for date with an answer value" in {
val view = genericOverviewTableItemView(
id = "",
questionText = "",
questionScreenReaderText = "",
questionCategory = "date",
answerValue = "a",
link = None,
linkScreenReader = ""
)
val viewAsString = view.toString
messagesShouldBePresent(viewAsString, messagesApi("site.changeDate"))
}
"contain correct message for name with an answer value" in {
val view = genericOverviewTableItemView(
id = "",
questionText = "",
questionScreenReaderText = "",
questionCategory = "name",
answerValue = "a",
link = None,
linkScreenReader = ""
)
val viewAsString = view.toString
messagesShouldBePresent(viewAsString, messagesApi("site.changeName"))
}
"contain correct message for date with no answer value" in {
val view = genericOverviewTableItemView(
id = "",
questionText = "",
questionScreenReaderText = "",
questionCategory = "date",
link = None,
linkScreenReader = ""
)
val viewAsString = view.toString
messagesShouldBePresent(viewAsString, messagesApi("site.link.giveDate"))
}
"contain correct message for name with no answer value" in {
val view = genericOverviewTableItemView(
id = "",
questionText = "",
questionScreenReaderText = "",
questionCategory = "name",
link = None,
linkScreenReader = ""
)
val viewAsString = view.toString
messagesShouldBePresent(viewAsString, messagesApi("site.link.giveName"))
}
}
}
| hmrc/iht-frontend | test/iht/views/ihtHelpers/custom/GenericOverviewTableItemTest.scala | Scala | apache-2.0 | 2,676 |
package com.ponkotuy.proxy
import java.net.InetSocketAddress
import javax.net.ssl.SSLSession
import io.netty.handler.codec.http.{HttpRequest,HttpResponse}
import io.netty.handler.codec.http.HttpHeaders
import org.littleshoot.proxy._
import com.ponkotuy.util.Log
class LoggingActivityTracker extends ActivityTrackerAdapter with Log {
override def requestReceivedFromClient(flowContext: FlowContext,httpRequest: HttpRequest): Unit = {
logger.debug("request received from client to proxy. URL:{}",httpRequest.getUri)
}
override def requestSentToServer(flowConext: FullFlowContext,httpRequest: HttpRequest): Unit = {
logger.debug("request sent proxy to server. URL:{}",httpRequest.getUri)
}
override def bytesReceivedFromServer(flowConext: FullFlowContext,numberOfBytes: Int): Unit = {
logger.trace("response received from server to proxy. {} bytes",numberOfBytes)
}
override def responseReceivedFromServer(flowContext: FullFlowContext,httpResponse: HttpResponse): Unit = {
logger.debug(
"response received from server to proxy. Status:{}, Transfer:{}, Content:{}",
httpResponse.getStatus,
httpResponse.headers.get(HttpHeaders.Names.TRANSFER_ENCODING),
httpResponse.headers.get(HttpHeaders.Names.CONTENT_ENCODING)
)
}
override def responseSentToClient(flowContext: FlowContext,httpResponse: HttpResponse): Unit = {
logger.debug(
"response sent to client from proxy. Status:{}, Transfer:{}, Content:{}",
httpResponse.getStatus,
httpResponse.headers.get(HttpHeaders.Names.TRANSFER_ENCODING),
httpResponse.headers.get(HttpHeaders.Names.CONTENT_ENCODING)
)
}
override def clientConnected(clientAddress: InetSocketAddress): Unit = {
logger.info("Client Connected from:{}",clientAddress);
}
override def clientDisconnected(clientAddress: InetSocketAddress,sslSession: SSLSession): Unit = {
logger.info("Client DisConnected from:{}",clientAddress)
}
}
| ttdoda/MyFleetGirls | client/src/main/scala/com/ponkotuy/proxy/LoggingActivityTracker.scala | Scala | mit | 1,966 |
package com.datastax.spark.connector.japi
import scala.reflect.runtime.universe._
import com.datastax.spark.connector.types.{NullableTypeConverter, TypeConverter}
import com.datastax.spark.connector.{CassandraRowMetadata, UDTValue => ConnectorUDTValue}
final class UDTValue(val metaData: CassandraRowMetadata, val columnValues: IndexedSeq[AnyRef])
extends JavaGettableData with Serializable
object UDTValue {
val UDTValueTypeTag = implicitly[TypeTag[UDTValue]]
implicit object UDTValueConverter extends NullableTypeConverter[UDTValue] {
def targetTypeTag = UDTValueTypeTag
def convertPF = {
case x: UDTValue => x
case x: ConnectorUDTValue =>
new UDTValue(x.metaData, x.columnValues)
}
}
TypeConverter.registerConverter(UDTValueConverter)
} | ponkin/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/japi/UDTValue.scala | Scala | apache-2.0 | 789 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.secondaryindex.events
import scala.collection.JavaConverters._
import org.apache.log4j.Logger
import org.apache.spark.internal.Logging
import org.apache.spark.sql.CarbonEnv
import org.apache.spark.sql.execution.command.AlterTableDataTypeChangeModel
import org.apache.spark.sql.execution.command.schema.CarbonAlterTableColRenameDataTypeChangeCommand
import org.apache.spark.sql.hive.CarbonHiveIndexMetadataUtil
import org.apache.spark.sql.index.CarbonIndexUtil
import org.apache.spark.util.AlterTableUtil
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.index.IndexType
import org.apache.carbondata.core.metadata.schema.indextable.IndexTableInfo
import org.apache.carbondata.events._
import org.apache.carbondata.events.exception.PostEventException
import org.apache.carbondata.format.TableInfo
/**
* Listener class to rename the column present in index tables
*/
class AlterTableColumnRenameEventListener extends OperationEventListener with Logging {
val LOGGER: Logger = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* Called on a specified event occurrence
*
*/
override protected def onEvent(event: Event,
operationContext: OperationContext): Unit = {
event match {
case alterTableColRenameAndDataTypeChangePreEvent
: AlterTableColRenameAndDataTypeChangePreEvent =>
val carbonTable = alterTableColRenameAndDataTypeChangePreEvent.carbonTable
// direct column rename on index table is not allowed
if (carbonTable.isIndexTable) {
if (!operationContext.getProperty("childTableColumnRename").toString.toBoolean) {
throw new MalformedCarbonCommandException(
"Alter table column rename is not allowed on index table.")
}
}
case alterTableColRenameAndDataTypeChangePostEvent
: AlterTableColRenameAndDataTypeChangePostEvent
if alterTableColRenameAndDataTypeChangePostEvent
.alterTableDataTypeChangeModel.isColumnRename =>
val alterTableDataTypeChangeModel = alterTableColRenameAndDataTypeChangePostEvent
.alterTableDataTypeChangeModel
val sparkSession = alterTableColRenameAndDataTypeChangePostEvent.sparkSession
val databaseName = alterTableDataTypeChangeModel.databaseName
val carbonTable = alterTableColRenameAndDataTypeChangePostEvent.carbonTable
val catalog = CarbonEnv
.getInstance(alterTableColRenameAndDataTypeChangePostEvent.sparkSession).carbonMetaStore
val newColumnName = alterTableDataTypeChangeModel.newColumnName
val oldColumnName = alterTableDataTypeChangeModel.columnName
val dataTypeInfo = alterTableDataTypeChangeModel.dataTypeInfo
val carbonColumns = carbonTable
.getCreateOrderColumn.asScala
.filter(!_.isInvisible)
var indexTablesToRenameColumn: Seq[String] = Seq.empty
val secondaryIndexMap =
carbonTable.getIndexesMap.get(IndexType.SI.getIndexProviderName)
if (null != secondaryIndexMap) {
val iterator = secondaryIndexMap.entrySet().iterator()
while (iterator.hasNext) {
val indexTable = iterator.next()
val indexCols = indexTable.getValue.get(CarbonCommonConstants.INDEX_COLUMNS).split(",")
indexCols.foreach(column =>
if (oldColumnName.equalsIgnoreCase(column)) {
indexTablesToRenameColumn ++= Seq(indexTable.getKey)
})
}
}
val indexTablesRenamedSuccess = indexTablesToRenameColumn
.takeWhile { indexTable =>
val alterTableColRenameAndDataTypeChangeModel =
AlterTableDataTypeChangeModel(
dataTypeInfo,
databaseName,
indexTable,
oldColumnName,
newColumnName,
alterTableDataTypeChangeModel.isColumnRename
)
// Fire CarbonAlterTableColRenameDataTypeChangeCommand for each index tables
try {
CarbonAlterTableColRenameDataTypeChangeCommand(
alterTableColRenameAndDataTypeChangeModel, childTableColumnRename = true)
.run(alterTableColRenameAndDataTypeChangePostEvent.sparkSession)
LOGGER
.info(s"Column rename for index $indexTable is successful. Index column " +
s"$oldColumnName is successfully renamed to $newColumnName")
true
} catch {
case ex: Exception =>
LOGGER
.error(
"column rename is failed for index table, reverting the changes for all the " +
"successfully renamed index tables.",
ex)
false
}
}
// if number of successful index table column rename should be equal to total index tables
// to rename column, else revert the successful ones
val needRevert = indexTablesToRenameColumn.length != indexTablesRenamedSuccess.length
if (needRevert) {
indexTablesRenamedSuccess.foreach { indexTable =>
val indexCarbonTable = CarbonEnv.getCarbonTable(databaseName, indexTable)(sparkSession)
if (indexCarbonTable != null) {
// failure tables will be automatically taken care in
// CarbonAlterTableColRenameDataTypeChangeCommand, just need to revert the success
// tables, so get the latest timestamp for evolution history
val thriftTable: TableInfo = catalog.getThriftTableInfo(indexCarbonTable)
val evolutionEntryList = thriftTable.fact_table.schema_evolution
.schema_evolution_history
AlterTableUtil
.revertColumnRenameAndDataTypeChanges(indexCarbonTable.getDatabaseName,
indexCarbonTable.getTableName,
evolutionEntryList.get(evolutionEntryList.size() - 1).time_stamp)(
alterTableColRenameAndDataTypeChangePostEvent.sparkSession)
}
}
throw PostEventException("Alter table column rename failed for index tables")
} else {
val database = sparkSession.catalog.currentDatabase
if (indexTablesRenamedSuccess.nonEmpty) {
// set the new indexInfo after column rename
val oldIndexInfo = carbonTable.getIndexInfo
val indexInfo = IndexTableInfo
.updateIndexColumns(oldIndexInfo, oldColumnName, newColumnName)
sparkSession.sql(
s"""ALTER TABLE $database.${
carbonTable.getTableName
} SET SERDEPROPERTIES ('indexInfo' = '$indexInfo')""".stripMargin).collect()
CarbonEnv.getInstance(sparkSession).carbonMetaStore
.removeTableFromMetadata(carbonTable.getDatabaseName, carbonTable.getTableName)
}
CarbonHiveIndexMetadataUtil.refreshTable(database, carbonTable.getTableName, sparkSession)
}
case _ =>
}
}
}
| zzcclp/carbondata | integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala | Scala | apache-2.0 | 8,104 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package statements
import com.intellij.psi.PsiClass
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject, ScTrait}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.{Invariant, TypeParameterType}
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.macroAnnotations.{CachedInUserData, ModCount}
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
trait ScTypeAliasDefinition extends ScTypeAlias {
override def isDefinition: Boolean = true
def aliasedTypeElement: Option[ScTypeElement]
@CachedInUserData(this, ModCount.getBlockModificationCount)
def aliasedType: TypeResult =
aliasedTypeElement.map {
_.`type`()
}.getOrElse(Failure("No alias type"))
def lowerBound: TypeResult = aliasedType
def upperBound: TypeResult = aliasedType
def isExactAliasFor(cls: PsiClass): Boolean = {
val isDefinedInObject = containingClass match {
case obj: ScObject if obj.isStatic => true
case _ => false
}
isDefinedInObject && isAliasFor(cls)
}
def isAliasFor(cls: PsiClass): Boolean = {
if (cls.getTypeParameters.length != typeParameters.length) false
else if (cls.hasTypeParameters) {
val typeParamsAreAppliedInOrderToCorrectClass = aliasedType.getOrAny match {
case pte: ScParameterizedType =>
val refersToClass = pte.designator.equiv(ScalaType.designator(cls))
val typeParamsAppliedInOrder = (pte.typeArguments corresponds typeParameters) {
case (tpt: TypeParameterType, tp) if tpt.psiTypeParameter == tp => true
case _ => false
}
refersToClass && typeParamsAppliedInOrder
case _ => false
}
val varianceAndBoundsMatch = cls match {
case sc0@(_: ScClass | _: ScTrait) =>
val sc = sc0.asInstanceOf[ScTypeParametersOwner]
(typeParameters corresponds sc.typeParameters) {
case (tp1, tp2) => tp1.variance == tp2.variance && tp1.upperBound == tp2.upperBound && tp1.lowerBound == tp2.lowerBound &&
tp1.contextBound.isEmpty && tp2.contextBound.isEmpty && tp1.viewBound.isEmpty && tp2.viewBound.isEmpty
}
case _ => // Java class
(typeParameters corresponds cls.getTypeParameters) {
case (tp1, tp2) => tp1.variance == Invariant && tp1.upperTypeElement.isEmpty && tp2.getExtendsListTypes.isEmpty &&
tp1.lowerTypeElement.isEmpty && tp1.contextBound.isEmpty && tp1.viewBound.isEmpty
}
}
typeParamsAreAppliedInOrderToCorrectClass && varianceAndBoundsMatch
}
else {
val clsType = ScalaType.designator(cls)
typeParameters.isEmpty && aliasedType.getOrElse(return false).equiv(clsType)
}
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScTypeAliasDefinition.scala | Scala | apache-2.0 | 3,058 |
package ch.uzh.dyndco.algorithms.mgm
import collection.mutable.Map
import collection.mutable.Set
import com.signalcollect.AbstractVertex
import ch.uzh.dyndco.problems.MeetingConstraints
import scala.collection.mutable.MutableList
import com.signalcollect.Graph
import com.signalcollect.StateForwarderEdge
import ch.uzh.dyndco.problems.MeetingSchedulingProblem
import com.signalcollect.GraphBuilder
import ch.uzh.dyndco.problems.Problem
import ch.uzh.dyndco.stack.graph.DynamicGraph
import ch.uzh.dyndco.stack.vertex.DynamicVertex
import ch.uzh.dyndco.stack.graph.GraphFactory
import ch.uzh.dyndco.stack.vertex.MeetingSchedulingVertex
class MGMGraph (
vertices_ : Set[MGMVertex],
neighbourhoods_ : Map[Int, Set[MGMVertex]],
agentIndices_ : Map[Int, Map[Any,Int]],
meetingIndices_ : Map[Int, Map[Any,Int]],
graph_ : Graph[Any,Any]) extends DynamicGraph {
var vertices = vertices_
var neighbourhoods = neighbourhoods_
var agentIndices = agentIndices_
var meetingIndices = meetingIndices_
var graph = graph_
def nextNeighbourhood() : Int = neighbourhoods.size + 1
def nextAgent : Int = vertices.size + 1
def numOfAgents : Int = vertices.size
def numOfNeighbourhoods : Int = neighbourhoods.size
def getAgents : Set[DynamicVertex] = vertices.asInstanceOf[Set[DynamicVertex]]
def getFactory : GraphFactory[DynamicGraph, Problem] = MGMGraphFactory.asInstanceOf[GraphFactory[DynamicGraph, Problem]]
def show {
showMeetingResults(neighbourhoods
.asInstanceOf[Map[Int, Set[MeetingSchedulingVertex]]])
showAgentResults(vertices
.asInstanceOf[Set[MeetingSchedulingVertex]])
}
}
| danihegglin/DynDCO | src/main/scala/ch/uzh/dyndco/algorithms/mgm/MGMGraph.scala | Scala | apache-2.0 | 1,664 |
package dc.json
import spray.json.DefaultJsonProtocol
case class CheckpointRow(id: String,
key: (Int, String),
value: Checkpoint)
object CheckpointRow extends DefaultJsonProtocol {
implicit val checkpointRowFormat = jsonFormat3(CheckpointRow.apply)
} | MagnusAk78/dynamic-checklist-server | tools/src/main/scala/dc/json/CheckpointRow.scala | Scala | gpl-3.0 | 305 |
/*
* Copyright (c) 2013-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the
* Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the Apache License Version 2.0 for the specific
* language governing permissions and limitations there under.
*/
import sbt._
import Keys._
object BuildSettings {
// Basic settings for our app
lazy val basicSettings = Seq[Setting[_]](
organization := "com.snowplowanalytics",
version := "0.7.0",
description := "Scala Stream Collector for Snowplow raw events",
scalaVersion := "2.10.5",
scalacOptions := Seq("-deprecation", "-encoding", "utf8",
"-unchecked", "-feature", "-target:jvm-1.7"),
scalacOptions in Test := Seq("-Yrangepos"),
maxErrors := 5,
// http://www.scala-sbt.org/0.13.0/docs/Detailed-Topics/Forking.html
fork in run := true,
resolvers ++= Dependencies.resolutionRepos
)
// Makes our SBT app settings available from within the app
lazy val scalifySettings = Seq(sourceGenerators in Compile <+=
(sourceManaged in Compile, version, name, organization) map
{ (d, v, n, o) =>
val file = d / "settings.scala"
IO.write(file, s"""package com.snowplowanalytics.snowplow.collectors.scalastream.generated
|object Settings {
| val organization = "$o"
| val version = "$v"
| val name = "$n"
| val shortName = "ssc"
|}
|""".stripMargin)
Seq(file)
})
// sbt-assembly settings for building an executable
import sbtassembly.Plugin._
import AssemblyKeys._
lazy val sbtAssemblySettings = assemblySettings ++ Seq(
// Executable jarfile
assemblyOption in assembly ~= { _.copy(prependShellScript = Some(defaultShellScript)) },
// Name it as an executable
jarName in assembly := { s"${name.value}-${version.value}" }
)
lazy val buildSettings = basicSettings ++ scalifySettings ++ sbtAssemblySettings
}
| bigdecisions/snowplow | 2-collectors/scala-stream-collector/project/BuildSettings.scala | Scala | apache-2.0 | 2,476 |
import sbt._
object Version {
final val Scala = "2.11.8"
final val ScalaTest = "3.0.0"
}
object Library {
val scalaTest = "org.scalatest" %% "scalatest" % Version.ScalaTest
}
| beikern/functional-programming-scala | project/Dependencies.scala | Scala | apache-2.0 | 187 |
def contains[T](elem: T): Boolean = this match {
case Niil => false
case head ::: tail => head == elem || tail.contains(elem)
}
| grzegorzbalcerek/scala-exercises | Liist/stepLiistContains.scala | Scala | bsd-2-clause | 132 |
package de.choffmeister.auth.common.util
private[auth] object SequenceUtils {
def compareConstantTime[T](s1: Seq[T], s2: Seq[T]): Boolean = {
var res = true
val l = Math.max(s1.length, s2.length)
for (i <- 0 until l) {
if (s1.length <= i || s2.length <= i || s1(i) != s2(i)) {
res = false
}
}
res
}
}
| choffmeister/auth-utils | auth-common/src/main/scala/de/choffmeister/auth/common/util/SequenceUtils.scala | Scala | mit | 346 |
package com.mindcandy.data.kryo.serializer
import com.esotericsoftware.kryo.io.{ Input, Output }
import com.esotericsoftware.kryo.{ Kryo, Serializer }
import com.googlecode.javaewah.{ EWAHCompressedBitmap => CBitSet }
import java.io.{ DataInputStream, DataOutputStream }
import scalaz.syntax.id._
class CBitSetSerializer extends Serializer[CBitSet] {
setAcceptsNull(false)
setImmutable(true)
def write(kryo: Kryo, output: Output, value: CBitSet): Unit =
value.serialize(new DataOutputStream(output))
def read(kryo: Kryo, input: Input, clazz: Class[CBitSet]): CBitSet =
new CBitSet() <| (bitset => bitset.deserialize(new DataInputStream(input)))
}
| lvicentesanchez/fast-gt-perfect | src/main/scala/com/mindcandy/data/kryo/serializer/CBitSetSerializer.scala | Scala | mit | 667 |
package ilc
package feature
package maps
trait Types extends base.Types {
case class MapType(keyType: Type, valType: Type) extends Type {
override def traverse(f: Type => Type): Type = copy(f(keyType), f(valType))
}
}
| inc-lc/ilc-scala | src/main/scala/ilc/feature/maps/Types.scala | Scala | mit | 227 |
/*
* Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate.adapter.vertx
import akka.actor.{ ActorRef, Props }
import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EndpointRouter, StorageProvider }
import io.vertx.core.Vertx
import scala.collection.immutable.Seq
import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration._
private[vertx] object VertxBatchConfirmationSender {
def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, storageProvider: StorageProvider, batchSize: Int, confirmationTimeout: FiniteDuration): Props =
Props(new VertxBatchConfirmationSender(id, eventLog, endpointRouter, vertx, storageProvider, batchSize, confirmationTimeout))
}
private[vertx] class VertxBatchConfirmationSender(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, val storageProvider: StorageProvider, batchSize: Int, val confirmationTimeout: FiniteDuration)
extends VertxEventDispatcher[Long, Long] with VertxSender with SequenceNumberProgressStore {
override def replayBatchSize: Int = batchSize
override def dispatch(events: Seq[EventEnvelope])(implicit ec: ExecutionContext): Future[Unit] =
Future.sequence(events.map(e => send[Any](e.address, e.evt, confirmationTimeout))).map(_ => Unit)
}
| RBMHTechnology/eventuate | eventuate-adapter-vertx/src/main/scala/com/rbmhtechnology/eventuate/adapter/vertx/VertxBatchConfirmationSender.scala | Scala | apache-2.0 | 1,970 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.sql.Date
import java.util.concurrent.ConcurrentHashMap
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.SparkException
import org.apache.spark.api.java.function.FlatMapGroupsWithStateFunction
import org.apache.spark.sql.Encoder
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.catalyst.plans.logical.FlatMapGroupsWithState
import org.apache.spark.sql.catalyst.plans.physical.UnknownPartitioning
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.execution.RDDScanExec
import org.apache.spark.sql.execution.streaming.{FlatMapGroupsWithStateExec, GroupStateImpl, MemoryStream}
import org.apache.spark.sql.execution.streaming.state.{StateStore, StateStoreId, StateStoreMetrics, UnsafeRowPair}
import org.apache.spark.sql.streaming.FlatMapGroupsWithStateSuite.MemoryStateStore
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.sql.types.{DataType, IntegerType}
/** Class to check custom state types */
case class RunningCount(count: Long)
case class Result(key: Long, count: Int)
class FlatMapGroupsWithStateSuite extends StateStoreMetricsTest with BeforeAndAfterAll {
import testImplicits._
import GroupStateImpl._
import GroupStateTimeout._
override def afterAll(): Unit = {
super.afterAll()
StateStore.stop()
}
test("GroupState - get, exists, update, remove") {
var state: GroupStateImpl[String] = null
def testState(
expectedData: Option[String],
shouldBeUpdated: Boolean = false,
shouldBeRemoved: Boolean = false): Unit = {
if (expectedData.isDefined) {
assert(state.exists)
assert(state.get === expectedData.get)
} else {
assert(!state.exists)
intercept[NoSuchElementException] {
state.get
}
}
assert(state.getOption === expectedData)
assert(state.hasUpdated === shouldBeUpdated)
assert(state.hasRemoved === shouldBeRemoved)
}
// === Tests for state in streaming queries ===
// Updating empty state
state = GroupStateImpl.createForStreaming(None, 1, 1, NoTimeout, hasTimedOut = false)
testState(None)
state.update("")
testState(Some(""), shouldBeUpdated = true)
// Updating exiting state
state = GroupStateImpl.createForStreaming(Some("2"), 1, 1, NoTimeout, hasTimedOut = false)
testState(Some("2"))
state.update("3")
testState(Some("3"), shouldBeUpdated = true)
// Removing state
state.remove()
testState(None, shouldBeRemoved = true, shouldBeUpdated = false)
state.remove() // should be still callable
state.update("4")
testState(Some("4"), shouldBeRemoved = false, shouldBeUpdated = true)
// Updating by null throw exception
intercept[IllegalArgumentException] {
state.update(null)
}
}
test("GroupState - setTimeout - with NoTimeout") {
for (initValue <- Seq(None, Some(5))) {
val states = Seq(
GroupStateImpl.createForStreaming(initValue, 1000, 1000, NoTimeout, hasTimedOut = false),
GroupStateImpl.createForBatch(NoTimeout)
)
for (state <- states) {
// for streaming queries
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
// for batch queries
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
}
}
}
test("GroupState - setTimeout - with ProcessingTimeTimeout") {
// for streaming queries
var state: GroupStateImpl[Int] = GroupStateImpl.createForStreaming(
None, 1000, 1000, ProcessingTimeTimeout, hasTimedOut = false)
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
state.setTimeoutDuration(500)
assert(state.getTimeoutTimestamp === 1500) // can be set without initializing state
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
state.update(5)
assert(state.getTimeoutTimestamp === 1500) // does not change
state.setTimeoutDuration(1000)
assert(state.getTimeoutTimestamp === 2000)
state.setTimeoutDuration("2 second")
assert(state.getTimeoutTimestamp === 3000)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
state.remove()
assert(state.getTimeoutTimestamp === 3000) // does not change
state.setTimeoutDuration(500) // can still be set
assert(state.getTimeoutTimestamp === 1500)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
// for batch queries
state = GroupStateImpl.createForBatch(ProcessingTimeTimeout).asInstanceOf[GroupStateImpl[Int]]
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
state.setTimeoutDuration(500)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
state.update(5)
state.setTimeoutDuration(1000)
state.setTimeoutDuration("2 second")
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
state.remove()
state.setTimeoutDuration(500)
testTimeoutTimestampNotAllowed[UnsupportedOperationException](state)
}
test("GroupState - setTimeout - with EventTimeTimeout") {
var state: GroupStateImpl[Int] = GroupStateImpl.createForStreaming(
None, 1000, 1000, EventTimeTimeout, false)
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
state.setTimeoutTimestamp(5000)
assert(state.getTimeoutTimestamp === 5000) // can be set without initializing state
state.update(5)
assert(state.getTimeoutTimestamp === 5000) // does not change
state.setTimeoutTimestamp(10000)
assert(state.getTimeoutTimestamp === 10000)
state.setTimeoutTimestamp(new Date(20000))
assert(state.getTimeoutTimestamp === 20000)
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
state.remove()
assert(state.getTimeoutTimestamp === 20000)
state.setTimeoutTimestamp(5000)
assert(state.getTimeoutTimestamp === 5000) // can be set after removing state
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
// for batch queries
state = GroupStateImpl.createForBatch(EventTimeTimeout).asInstanceOf[GroupStateImpl[Int]]
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
state.setTimeoutTimestamp(5000)
state.update(5)
state.setTimeoutTimestamp(10000)
state.setTimeoutTimestamp(new Date(20000))
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
state.remove()
state.setTimeoutTimestamp(5000)
testTimeoutDurationNotAllowed[UnsupportedOperationException](state)
}
test("GroupState - illegal params to setTimeout") {
var state: GroupStateImpl[Int] = null
// Test setTimeout****() with illegal values
def testIllegalTimeout(body: => Unit): Unit = {
intercept[IllegalArgumentException] {
body
}
assert(state.getTimeoutTimestamp === NO_TIMESTAMP)
}
state = GroupStateImpl.createForStreaming(
Some(5), 1000, 1000, ProcessingTimeTimeout, hasTimedOut = false)
testIllegalTimeout {
state.setTimeoutDuration(-1000)
}
testIllegalTimeout {
state.setTimeoutDuration(0)
}
testIllegalTimeout {
state.setTimeoutDuration("-2 second")
}
testIllegalTimeout {
state.setTimeoutDuration("-1 month")
}
testIllegalTimeout {
state.setTimeoutDuration("1 month -1 day")
}
state = GroupStateImpl.createForStreaming(
Some(5), 1000, 1000, EventTimeTimeout, hasTimedOut = false)
testIllegalTimeout {
state.setTimeoutTimestamp(-10000)
}
testIllegalTimeout {
state.setTimeoutTimestamp(10000, "-3 second")
}
testIllegalTimeout {
state.setTimeoutTimestamp(10000, "-1 month")
}
testIllegalTimeout {
state.setTimeoutTimestamp(10000, "1 month -1 day")
}
testIllegalTimeout {
state.setTimeoutTimestamp(new Date(-10000))
}
testIllegalTimeout {
state.setTimeoutTimestamp(new Date(-10000), "-3 second")
}
testIllegalTimeout {
state.setTimeoutTimestamp(new Date(-10000), "-1 month")
}
testIllegalTimeout {
state.setTimeoutTimestamp(new Date(-10000), "1 month -1 day")
}
}
test("GroupState - hasTimedOut") {
for (timeoutConf <- Seq(NoTimeout, ProcessingTimeTimeout, EventTimeTimeout)) {
// for streaming queries
for (initState <- Seq(None, Some(5))) {
val state1 = GroupStateImpl.createForStreaming(
initState, 1000, 1000, timeoutConf, hasTimedOut = false)
assert(state1.hasTimedOut === false)
val state2 = GroupStateImpl.createForStreaming(
initState, 1000, 1000, timeoutConf, hasTimedOut = true)
assert(state2.hasTimedOut === true)
}
// for batch queries
assert(GroupStateImpl.createForBatch(timeoutConf).hasTimedOut === false)
}
}
test("GroupState - primitive type") {
var intState = GroupStateImpl.createForStreaming[Int](
None, 1000, 1000, NoTimeout, hasTimedOut = false)
intercept[NoSuchElementException] {
intState.get
}
assert(intState.getOption === None)
intState = GroupStateImpl.createForStreaming[Int](
Some(10), 1000, 1000, NoTimeout, hasTimedOut = false)
assert(intState.get == 10)
intState.update(0)
assert(intState.get == 0)
intState.remove()
intercept[NoSuchElementException] {
intState.get
}
}
// Values used for testing StateStoreUpdater
val currentBatchTimestamp = 1000
val currentBatchWatermark = 1000
val beforeTimeoutThreshold = 999
val afterTimeoutThreshold = 1001
// Tests for StateStoreUpdater.updateStateForKeysWithData() when timeout = NoTimeout
for (priorState <- Seq(None, Some(0))) {
val priorStateStr = if (priorState.nonEmpty) "prior state set" else "no prior state"
val testName = s"NoTimeout - $priorStateStr - "
testStateUpdateWithData(
testName + "no update",
stateUpdates = state => { /* do nothing */ },
timeoutConf = GroupStateTimeout.NoTimeout,
priorState = priorState,
expectedState = priorState) // should not change
testStateUpdateWithData(
testName + "state updated",
stateUpdates = state => { state.update(5) },
timeoutConf = GroupStateTimeout.NoTimeout,
priorState = priorState,
expectedState = Some(5)) // should change
testStateUpdateWithData(
testName + "state removed",
stateUpdates = state => { state.remove() },
timeoutConf = GroupStateTimeout.NoTimeout,
priorState = priorState,
expectedState = None) // should be removed
}
// Tests for StateStoreUpdater.updateStateForKeysWithData() when timeout != NoTimeout
for (priorState <- Seq(None, Some(0))) {
for (priorTimeoutTimestamp <- Seq(NO_TIMESTAMP, 1000)) {
var testName = ""
if (priorState.nonEmpty) {
testName += "prior state set, "
if (priorTimeoutTimestamp == 1000) {
testName += "prior timeout set"
} else {
testName += "no prior timeout"
}
} else {
testName += "no prior state"
}
for (timeoutConf <- Seq(ProcessingTimeTimeout, EventTimeTimeout)) {
testStateUpdateWithData(
s"$timeoutConf - $testName - no update",
stateUpdates = state => { /* do nothing */ },
timeoutConf = timeoutConf,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = priorState, // state should not change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should be reset
testStateUpdateWithData(
s"$timeoutConf - $testName - state updated",
stateUpdates = state => { state.update(5) },
timeoutConf = timeoutConf,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should be reset
testStateUpdateWithData(
s"$timeoutConf - $testName - state removed",
stateUpdates = state => { state.remove() },
timeoutConf = timeoutConf,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = None) // state should be removed
}
testStateUpdateWithData(
s"ProcessingTimeTimeout - $testName - state and timeout duration updated",
stateUpdates =
(state: GroupState[Int]) => { state.update(5); state.setTimeoutDuration(5000) },
timeoutConf = ProcessingTimeTimeout,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = currentBatchTimestamp + 5000) // timestamp should change
testStateUpdateWithData(
s"EventTimeTimeout - $testName - state and timeout timestamp updated",
stateUpdates =
(state: GroupState[Int]) => { state.update(5); state.setTimeoutTimestamp(5000) },
timeoutConf = EventTimeTimeout,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = 5000) // timestamp should change
testStateUpdateWithData(
s"EventTimeTimeout - $testName - timeout timestamp updated to before watermark",
stateUpdates =
(state: GroupState[Int]) => {
state.update(5)
intercept[IllegalArgumentException] {
state.setTimeoutTimestamp(currentBatchWatermark - 1) // try to set to < watermark
}
},
timeoutConf = EventTimeTimeout,
priorState = priorState,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should not update
}
}
// Currently disallowed cases for StateStoreUpdater.updateStateForKeysWithData(),
// Try to remove these cases in the future
for (priorTimeoutTimestamp <- Seq(NO_TIMESTAMP, 1000)) {
val testName =
if (priorTimeoutTimestamp != NO_TIMESTAMP) "prior timeout set" else "no prior timeout"
testStateUpdateWithData(
s"ProcessingTimeTimeout - $testName - setting timeout without init state not allowed",
stateUpdates = state => { state.setTimeoutDuration(5000) },
timeoutConf = ProcessingTimeTimeout,
priorState = None,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedException = classOf[IllegalStateException])
testStateUpdateWithData(
s"ProcessingTimeTimeout - $testName - setting timeout with state removal not allowed",
stateUpdates = state => { state.remove(); state.setTimeoutDuration(5000) },
timeoutConf = ProcessingTimeTimeout,
priorState = Some(5),
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedException = classOf[IllegalStateException])
testStateUpdateWithData(
s"EventTimeTimeout - $testName - setting timeout without init state not allowed",
stateUpdates = state => { state.setTimeoutTimestamp(10000) },
timeoutConf = EventTimeTimeout,
priorState = None,
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedException = classOf[IllegalStateException])
testStateUpdateWithData(
s"EventTimeTimeout - $testName - setting timeout with state removal not allowed",
stateUpdates = state => { state.remove(); state.setTimeoutTimestamp(10000) },
timeoutConf = EventTimeTimeout,
priorState = Some(5),
priorTimeoutTimestamp = priorTimeoutTimestamp,
expectedException = classOf[IllegalStateException])
}
// Tests for StateStoreUpdater.updateStateForTimedOutKeys()
val preTimeoutState = Some(5)
for (timeoutConf <- Seq(ProcessingTimeTimeout, EventTimeTimeout)) {
testStateUpdateWithTimeout(
s"$timeoutConf - should not timeout",
stateUpdates = state => { assert(false, "function called without timeout") },
timeoutConf = timeoutConf,
priorTimeoutTimestamp = afterTimeoutThreshold,
expectedState = preTimeoutState, // state should not change
expectedTimeoutTimestamp = afterTimeoutThreshold) // timestamp should not change
testStateUpdateWithTimeout(
s"$timeoutConf - should timeout - no update/remove",
stateUpdates = state => { /* do nothing */ },
timeoutConf = timeoutConf,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = preTimeoutState, // state should not change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should be reset
testStateUpdateWithTimeout(
s"$timeoutConf - should timeout - update state",
stateUpdates = state => { state.update(5) },
timeoutConf = timeoutConf,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = NO_TIMESTAMP) // timestamp should be reset
testStateUpdateWithTimeout(
s"$timeoutConf - should timeout - remove state",
stateUpdates = state => { state.remove() },
timeoutConf = timeoutConf,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = None, // state should be removed
expectedTimeoutTimestamp = NO_TIMESTAMP)
}
testStateUpdateWithTimeout(
"ProcessingTimeTimeout - should timeout - timeout duration updated",
stateUpdates = state => { state.setTimeoutDuration(2000) },
timeoutConf = ProcessingTimeTimeout,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = preTimeoutState, // state should not change
expectedTimeoutTimestamp = currentBatchTimestamp + 2000) // timestamp should change
testStateUpdateWithTimeout(
"ProcessingTimeTimeout - should timeout - timeout duration and state updated",
stateUpdates = state => { state.update(5); state.setTimeoutDuration(2000) },
timeoutConf = ProcessingTimeTimeout,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = currentBatchTimestamp + 2000) // timestamp should change
testStateUpdateWithTimeout(
"EventTimeTimeout - should timeout - timeout timestamp updated",
stateUpdates = state => { state.setTimeoutTimestamp(5000) },
timeoutConf = EventTimeTimeout,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = preTimeoutState, // state should not change
expectedTimeoutTimestamp = 5000) // timestamp should change
testStateUpdateWithTimeout(
"EventTimeTimeout - should timeout - timeout and state updated",
stateUpdates = state => { state.update(5); state.setTimeoutTimestamp(5000) },
timeoutConf = EventTimeTimeout,
priorTimeoutTimestamp = beforeTimeoutThreshold,
expectedState = Some(5), // state should change
expectedTimeoutTimestamp = 5000) // timestamp should change
test("flatMapGroupsWithState - streaming") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count if state is defined, otherwise does not return anything
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
if (count == 3) {
state.remove()
Iterator.empty
} else {
state.update(RunningCount(count))
Iterator((key, count.toString))
}
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Update, GroupStateTimeout.NoTimeout)(stateFunc)
testStream(result, Update)(
AddData(inputData, "a"),
CheckLastBatch(("a", "1")),
assertNumStateRows(total = 1, updated = 1),
AddData(inputData, "a", "b"),
CheckLastBatch(("a", "2"), ("b", "1")),
assertNumStateRows(total = 2, updated = 2),
StopStream,
StartStream(),
AddData(inputData, "a", "b"), // should remove state for "a" and not return anything for a
CheckLastBatch(("b", "2")),
assertNumStateRows(total = 1, updated = 2),
StopStream,
StartStream(),
AddData(inputData, "a", "c"), // should recreate state for "a" and return count as 1 and
CheckLastBatch(("a", "1"), ("c", "1")),
assertNumStateRows(total = 3, updated = 2)
)
}
test("flatMapGroupsWithState - streaming + func returns iterator that updates state lazily") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count if state is defined, otherwise does not return anything
// Additionally, it updates state lazily as the returned iterator get consumed
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
values.flatMap { _ =>
val count = state.getOption.map(_.count).getOrElse(0L) + 1
if (count == 3) {
state.remove()
None
} else {
state.update(RunningCount(count))
Some((key, count.toString))
}
}
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Update, GroupStateTimeout.NoTimeout)(stateFunc)
testStream(result, Update)(
AddData(inputData, "a", "a", "b"),
CheckLastBatch(("a", "1"), ("a", "2"), ("b", "1")),
StopStream,
StartStream(),
AddData(inputData, "a", "b"), // should remove state for "a" and not return anything for a
CheckLastBatch(("b", "2")),
StopStream,
StartStream(),
AddData(inputData, "a", "c"), // should recreate state for "a" and return count as 1 and
CheckLastBatch(("a", "1"), ("c", "1"))
)
}
test("flatMapGroupsWithState - streaming + aggregation") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count (-1 if count reached beyond 2 and state was just removed)
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
if (count == 3) {
state.remove()
Iterator(key -> "-1")
} else {
state.update(RunningCount(count))
Iterator(key -> count.toString)
}
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Append, GroupStateTimeout.NoTimeout)(stateFunc)
.groupByKey(_._1)
.count()
testStream(result, Complete)(
AddData(inputData, "a"),
CheckLastBatch(("a", 1)),
AddData(inputData, "a", "b"),
// mapGroups generates ("a", "2"), ("b", "1"); so increases counts of a and b by 1
CheckLastBatch(("a", 2), ("b", 1)),
StopStream,
StartStream(),
AddData(inputData, "a", "b"),
// mapGroups should remove state for "a" and generate ("a", "-1"), ("b", "2") ;
// so increment a and b by 1
CheckLastBatch(("a", 3), ("b", 2)),
StopStream,
StartStream(),
AddData(inputData, "a", "c"),
// mapGroups should recreate state for "a" and generate ("a", "1"), ("c", "1") ;
// so increment a and c by 1
CheckLastBatch(("a", 4), ("b", 2), ("c", 1))
)
}
test("flatMapGroupsWithState - batch") {
// Function that returns running count only if its even, otherwise does not return
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
if (state.exists) throw new IllegalArgumentException("state.exists should be false")
Iterator((key, values.size))
}
val df = Seq("a", "a", "b").toDS
.groupByKey(x => x)
.flatMapGroupsWithState(Update, GroupStateTimeout.NoTimeout)(stateFunc).toDF
checkAnswer(df, Seq(("a", 2), ("b", 1)).toDF)
}
test("flatMapGroupsWithState - streaming with processing time timeout") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count (-1 if count reached beyond 2 and state was just removed)
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
if (state.hasTimedOut) {
state.remove()
Iterator((key, "-1"))
} else {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
state.update(RunningCount(count))
state.setTimeoutDuration("10 seconds")
Iterator((key, count.toString))
}
}
val clock = new StreamManualClock
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.flatMapGroupsWithState(Update, ProcessingTimeTimeout)(stateFunc)
testStream(result, Update)(
StartStream(Trigger.ProcessingTime("1 second"), triggerClock = clock),
AddData(inputData, "a"),
AdvanceManualClock(1 * 1000),
CheckLastBatch(("a", "1")),
assertNumStateRows(total = 1, updated = 1),
AddData(inputData, "b"),
AdvanceManualClock(1 * 1000),
CheckLastBatch(("b", "1")),
assertNumStateRows(total = 2, updated = 1),
AddData(inputData, "b"),
AdvanceManualClock(10 * 1000),
CheckLastBatch(("a", "-1"), ("b", "2")),
assertNumStateRows(total = 1, updated = 2),
StopStream,
StartStream(Trigger.ProcessingTime("1 second"), triggerClock = clock),
AddData(inputData, "c"),
AdvanceManualClock(11 * 1000),
CheckLastBatch(("b", "-1"), ("c", "1")),
assertNumStateRows(total = 1, updated = 2),
AddData(inputData, "c"),
AdvanceManualClock(20 * 1000),
CheckLastBatch(("c", "2")),
assertNumStateRows(total = 1, updated = 1)
)
}
test("flatMapGroupsWithState - streaming with event time timeout + watermark") {
// Function to maintain the max event time
// Returns the max event time in the state, or -1 if the state was removed by timeout
val stateFunc = (
key: String,
values: Iterator[(String, Long)],
state: GroupState[Long]) => {
val timeoutDelay = 5
if (key != "a") {
Iterator.empty
} else {
if (state.hasTimedOut) {
state.remove()
Iterator((key, -1))
} else {
val valuesSeq = values.toSeq
val maxEventTime = math.max(valuesSeq.map(_._2).max, state.getOption.getOrElse(0L))
val timeoutTimestampMs = maxEventTime + timeoutDelay
state.update(maxEventTime)
state.setTimeoutTimestamp(timeoutTimestampMs * 1000)
Iterator((key, maxEventTime.toInt))
}
}
}
val inputData = MemoryStream[(String, Int)]
val result =
inputData.toDS
.select($"_1".as("key"), $"_2".cast("timestamp").as("eventTime"))
.withWatermark("eventTime", "10 seconds")
.as[(String, Long)]
.groupByKey(_._1)
.flatMapGroupsWithState(Update, EventTimeTimeout)(stateFunc)
testStream(result, Update)(
StartStream(Trigger.ProcessingTime("1 second")),
AddData(inputData, ("a", 11), ("a", 13), ("a", 15)), // Set timeout timestamp of ...
CheckLastBatch(("a", 15)), // "a" to 15 + 5 = 20s, watermark to 5s
AddData(inputData, ("a", 4)), // Add data older than watermark for "a"
CheckLastBatch(), // No output as data should get filtered by watermark
AddData(inputData, ("dummy", 35)), // Set watermark = 35 - 10 = 25s
CheckLastBatch(), // No output as no data for "a"
AddData(inputData, ("a", 24)), // Add data older than watermark, should be ignored
CheckLastBatch(("a", -1)) // State for "a" should timeout and emit -1
)
}
test("mapGroupsWithState - streaming") {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count (-1 if count reached beyond 2 and state was just removed)
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
if (count == 3) {
state.remove()
(key, "-1")
} else {
state.update(RunningCount(count))
(key, count.toString)
}
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.mapGroupsWithState(stateFunc) // Types = State: MyState, Out: (Str, Str)
testStream(result, Update)(
AddData(inputData, "a"),
CheckLastBatch(("a", "1")),
assertNumStateRows(total = 1, updated = 1),
AddData(inputData, "a", "b"),
CheckLastBatch(("a", "2"), ("b", "1")),
assertNumStateRows(total = 2, updated = 2),
StopStream,
StartStream(),
AddData(inputData, "a", "b"), // should remove state for "a" and return count as -1
CheckLastBatch(("a", "-1"), ("b", "2")),
assertNumStateRows(total = 1, updated = 2),
StopStream,
StartStream(),
AddData(inputData, "a", "c"), // should recreate state for "a" and return count as 1
CheckLastBatch(("a", "1"), ("c", "1")),
assertNumStateRows(total = 3, updated = 2)
)
}
test("mapGroupsWithState - batch") {
// Test the following
// - no initial state
// - timeouts operations work, does not throw any error [SPARK-20792]
// - works with primitive state type
val stateFunc = (key: String, values: Iterator[String], state: GroupState[Int]) => {
if (state.exists) throw new IllegalArgumentException("state.exists should be false")
state.setTimeoutTimestamp(0, "1 hour")
state.update(10)
(key, values.size)
}
checkAnswer(
spark.createDataset(Seq("a", "a", "b"))
.groupByKey(x => x)
.mapGroupsWithState(EventTimeTimeout)(stateFunc)
.toDF,
spark.createDataset(Seq(("a", 2), ("b", 1))).toDF)
}
testQuietly("StateStore.abort on task failure handling") {
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => {
if (FlatMapGroupsWithStateSuite.failInTask) throw new Exception("expected failure")
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
state.update(RunningCount(count))
(key, count)
}
val inputData = MemoryStream[String]
val result =
inputData.toDS()
.groupByKey(x => x)
.mapGroupsWithState(stateFunc) // Types = State: MyState, Out: (Str, Str)
def setFailInTask(value: Boolean): AssertOnQuery = AssertOnQuery { q =>
FlatMapGroupsWithStateSuite.failInTask = value
true
}
testStream(result, Update)(
setFailInTask(false),
AddData(inputData, "a"),
CheckLastBatch(("a", 1L)),
AddData(inputData, "a"),
CheckLastBatch(("a", 2L)),
setFailInTask(true),
AddData(inputData, "a"),
ExpectFailure[SparkException](), // task should fail but should not increment count
setFailInTask(false),
StartStream(),
CheckLastBatch(("a", 3L)) // task should not fail, and should show correct count
)
}
test("output partitioning is unknown") {
val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => key
val inputData = MemoryStream[String]
val result = inputData.toDS.groupByKey(x => x).mapGroupsWithState(stateFunc)
testStream(result, Update)(
AddData(inputData, "a"),
CheckLastBatch("a"),
AssertOnQuery(_.lastExecution.executedPlan.outputPartitioning === UnknownPartitioning(0))
)
}
test("disallow complete mode") {
val stateFunc = (key: String, values: Iterator[String], state: GroupState[Int]) => {
Iterator[String]()
}
var e = intercept[IllegalArgumentException] {
MemoryStream[String].toDS().groupByKey(x => x).flatMapGroupsWithState(
OutputMode.Complete, GroupStateTimeout.NoTimeout)(stateFunc)
}
assert(e.getMessage === "The output mode of function should be append or update")
val javaStateFunc = new FlatMapGroupsWithStateFunction[String, String, Int, String] {
import java.util.{Iterator => JIterator}
override def call(
key: String,
values: JIterator[String],
state: GroupState[Int]): JIterator[String] = { null }
}
e = intercept[IllegalArgumentException] {
MemoryStream[String].toDS().groupByKey(x => x).flatMapGroupsWithState(
javaStateFunc, OutputMode.Complete,
implicitly[Encoder[Int]], implicitly[Encoder[String]], GroupStateTimeout.NoTimeout)
}
assert(e.getMessage === "The output mode of function should be append or update")
}
def testWithTimeout(timeoutConf: GroupStateTimeout): Unit = {
test("SPARK-20714: watermark does not fail query when timeout = " + timeoutConf) {
// Function to maintain running count up to 2, and then remove the count
// Returns the data and the count (-1 if count reached beyond 2 and state was just removed)
val stateFunc =
(key: String, values: Iterator[(String, Long)], state: GroupState[RunningCount]) => {
if (state.hasTimedOut) {
state.remove()
Iterator((key, "-1"))
} else {
val count = state.getOption.map(_.count).getOrElse(0L) + values.size
state.update(RunningCount(count))
state.setTimeoutDuration("10 seconds")
Iterator((key, count.toString))
}
}
val clock = new StreamManualClock
val inputData = MemoryStream[(String, Long)]
val result =
inputData.toDF().toDF("key", "time")
.selectExpr("key", "cast(time as timestamp) as timestamp")
.withWatermark("timestamp", "10 second")
.as[(String, Long)]
.groupByKey(x => x._1)
.flatMapGroupsWithState(Update, ProcessingTimeTimeout)(stateFunc)
testStream(result, Update)(
StartStream(Trigger.ProcessingTime("1 second"), triggerClock = clock),
AddData(inputData, ("a", 1L)),
AdvanceManualClock(1 * 1000),
CheckLastBatch(("a", "1"))
)
}
}
testWithTimeout(NoTimeout)
testWithTimeout(ProcessingTimeTimeout)
def testStateUpdateWithData(
testName: String,
stateUpdates: GroupState[Int] => Unit,
timeoutConf: GroupStateTimeout,
priorState: Option[Int],
priorTimeoutTimestamp: Long = NO_TIMESTAMP,
expectedState: Option[Int] = None,
expectedTimeoutTimestamp: Long = NO_TIMESTAMP,
expectedException: Class[_ <: Exception] = null): Unit = {
if (priorState.isEmpty && priorTimeoutTimestamp != NO_TIMESTAMP) {
return // there can be no prior timestamp, when there is no prior state
}
test(s"StateStoreUpdater - updates with data - $testName") {
val mapGroupsFunc = (key: Int, values: Iterator[Int], state: GroupState[Int]) => {
assert(state.hasTimedOut === false, "hasTimedOut not false")
assert(values.nonEmpty, "Some value is expected")
stateUpdates(state)
Iterator.empty
}
testStateUpdate(
testTimeoutUpdates = false, mapGroupsFunc, timeoutConf,
priorState, priorTimeoutTimestamp,
expectedState, expectedTimeoutTimestamp, expectedException)
}
}
def testStateUpdateWithTimeout(
testName: String,
stateUpdates: GroupState[Int] => Unit,
timeoutConf: GroupStateTimeout,
priorTimeoutTimestamp: Long,
expectedState: Option[Int],
expectedTimeoutTimestamp: Long = NO_TIMESTAMP): Unit = {
test(s"StateStoreUpdater - updates for timeout - $testName") {
val mapGroupsFunc = (key: Int, values: Iterator[Int], state: GroupState[Int]) => {
assert(state.hasTimedOut === true, "hasTimedOut not true")
assert(values.isEmpty, "values not empty")
stateUpdates(state)
Iterator.empty
}
testStateUpdate(
testTimeoutUpdates = true, mapGroupsFunc, timeoutConf = timeoutConf,
preTimeoutState, priorTimeoutTimestamp, expectedState, expectedTimeoutTimestamp, null)
}
}
def testStateUpdate(
testTimeoutUpdates: Boolean,
mapGroupsFunc: (Int, Iterator[Int], GroupState[Int]) => Iterator[Int],
timeoutConf: GroupStateTimeout,
priorState: Option[Int],
priorTimeoutTimestamp: Long,
expectedState: Option[Int],
expectedTimeoutTimestamp: Long,
expectedException: Class[_ <: Exception]): Unit = {
val store = newStateStore()
val mapGroupsSparkPlan = newFlatMapGroupsWithStateExec(
mapGroupsFunc, timeoutConf, currentBatchTimestamp)
val updater = new mapGroupsSparkPlan.StateStoreUpdater(store)
val key = intToRow(0)
// Prepare store with prior state configs
if (priorState.nonEmpty) {
val row = updater.getStateRow(priorState.get)
updater.setTimeoutTimestamp(row, priorTimeoutTimestamp)
store.put(key.copy(), row.copy())
}
// Call updating function to update state store
def callFunction() = {
val returnedIter = if (testTimeoutUpdates) {
updater.updateStateForTimedOutKeys()
} else {
updater.updateStateForKeysWithData(Iterator(key))
}
returnedIter.size // consume the iterator to force state updates
}
if (expectedException != null) {
// Call function and verify the exception type
val e = intercept[Exception] { callFunction() }
assert(e.getClass === expectedException, "Exception thrown but of the wrong type")
} else {
// Call function to update and verify updated state in store
callFunction()
val updatedStateRow = store.get(key)
assert(
Option(updater.getStateObj(updatedStateRow)).map(_.toString.toInt) === expectedState,
"final state not as expected")
if (updatedStateRow != null) {
assert(
updater.getTimeoutTimestamp(updatedStateRow) === expectedTimeoutTimestamp,
"final timeout timestamp not as expected")
}
}
}
def newFlatMapGroupsWithStateExec(
func: (Int, Iterator[Int], GroupState[Int]) => Iterator[Int],
timeoutType: GroupStateTimeout = GroupStateTimeout.NoTimeout,
batchTimestampMs: Long = NO_TIMESTAMP): FlatMapGroupsWithStateExec = {
MemoryStream[Int]
.toDS
.groupByKey(x => x)
.flatMapGroupsWithState[Int, Int](Append, timeoutConf = timeoutType)(func)
.logicalPlan.collectFirst {
case FlatMapGroupsWithState(f, k, v, g, d, o, s, m, _, t, _) =>
FlatMapGroupsWithStateExec(
f, k, v, g, d, o, None, s, m, t,
Some(currentBatchTimestamp), Some(currentBatchWatermark), RDDScanExec(g, null, "rdd"))
}.get
}
def testTimeoutDurationNotAllowed[T <: Exception: Manifest](state: GroupStateImpl[_]): Unit = {
val prevTimestamp = state.getTimeoutTimestamp
intercept[T] { state.setTimeoutDuration(1000) }
assert(state.getTimeoutTimestamp === prevTimestamp)
intercept[T] { state.setTimeoutDuration("2 second") }
assert(state.getTimeoutTimestamp === prevTimestamp)
}
def testTimeoutTimestampNotAllowed[T <: Exception: Manifest](state: GroupStateImpl[_]): Unit = {
val prevTimestamp = state.getTimeoutTimestamp
intercept[T] { state.setTimeoutTimestamp(2000) }
assert(state.getTimeoutTimestamp === prevTimestamp)
intercept[T] { state.setTimeoutTimestamp(2000, "1 second") }
assert(state.getTimeoutTimestamp === prevTimestamp)
intercept[T] { state.setTimeoutTimestamp(new Date(2000)) }
assert(state.getTimeoutTimestamp === prevTimestamp)
intercept[T] { state.setTimeoutTimestamp(new Date(2000), "1 second") }
assert(state.getTimeoutTimestamp === prevTimestamp)
}
def newStateStore(): StateStore = new MemoryStateStore()
val intProj = UnsafeProjection.create(Array[DataType](IntegerType))
def intToRow(i: Int): UnsafeRow = {
intProj.apply(new GenericInternalRow(Array[Any](i))).copy()
}
def rowToInt(row: UnsafeRow): Int = row.getInt(0)
}
object FlatMapGroupsWithStateSuite {
var failInTask = true
class MemoryStateStore extends StateStore() {
import scala.collection.JavaConverters._
private val map = new ConcurrentHashMap[UnsafeRow, UnsafeRow]
override def iterator(): Iterator[UnsafeRowPair] = {
map.entrySet.iterator.asScala.map { case e => new UnsafeRowPair(e.getKey, e.getValue) }
}
override def get(key: UnsafeRow): UnsafeRow = map.get(key)
override def put(key: UnsafeRow, newValue: UnsafeRow): Unit = {
map.put(key.copy(), newValue.copy())
}
override def remove(key: UnsafeRow): Unit = { map.remove(key) }
override def commit(): Long = version + 1
override def abort(): Unit = { }
override def id: StateStoreId = null
override def version: Long = 0
override def metrics: StateStoreMetrics = new StateStoreMetrics(map.size, 0, Map.empty)
override def hasCommitted: Boolean = true
}
}
| someorz/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/FlatMapGroupsWithStateSuite.scala | Scala | apache-2.0 | 42,956 |
package codesniffer.deckard.search
import java.io.File
import java.lang.reflect.Modifier
import java.util
import java.util.concurrent.atomic.{AtomicInteger, AtomicLong, AtomicReference}
import codesniffer.core._
import codesniffer.deckard.vgen.{SkipLocksVecGen, DirScanConfig, SrcScanner, Context}
import codesniffer.deckard._
import codesniffer.api.Node
import codesniffer.api.body.MethodDeclaration
import codesniffer.api.expr.ThisExpr
import codesniffer.api.stmt.EmptyStmt
import scala.collection.convert.wrapAsScala._
/**
* Created by Bowen Cai on 5/1/2015.
*/
object NodeCount {
val methodCount = new AtomicLong(0L)
// top level statement
val topStmtCount = new AtomicLong(0L)
val stmtGrp = new util.TreeMap[Int, Int]()
val nodeCount = new AtomicLong(0L)
val nodeGrp = new util.TreeMap[Int, Int]()
val maxNodeNumber = new AtomicInteger(0)
val maxNodeLoc = new AtomicReference[Location](null)
def main(args: Array[String]): Unit = {
// var path = "D:\\\\__TEMP__\\\\src\\\\Src1.java"
// var path: String = "D:\\\\Program Files\\\\adt-bundle-windows-x86_64-20130219\\\\sdk\\\\sources\\\\android-19"
// var path: String = "E:\\\\research\\\\top\\\\spring-framework"
var path = "E:\\\\research\\\\top\\\\guava"
// var path: String = _
// if (args != null && args.length == 1) {
// path = args(0)
// println(s"Scanning directory $path")
// } else {
// println("Usage: <path to source directory>")
// sys.exit(1)
// }
val dir = new File(path)
require(dir.exists() && dir.canRead)
val config = new DirScanConfig
config.filterDirName = (name: String) => (
name.equals("package-info.java") // filter out package file
|| name.endsWith("Tests.java") // filter out test file
|| name.endsWith("Test.java")
)
config.filterNode = (node: Node) => node.isInstanceOf[EmptyStmt] || node.isInstanceOf[ThisExpr]
config.filterMethod = (m: MethodDeclaration) => !Modifier.isPublic(m.getModifiers)
val vecCollector = new MemWriter[String]
val scanner = new SrcScanner(new Context(config, null, null, new Indexer[String], vecCollector))
val mv = new SkipLocksVecGen[String]
scanner.methodVisitor = mv;
mv.classVisitor = scanner.classVisitor
scanner.classVisitor.setMethodVisitor(mv)
mv.before = (method: MethodDeclaration, c: Context[String])=> {
methodCount.incrementAndGet()
val _stmtNum = method.getBody.getStmts.size()
topStmtCount.addAndGet(_stmtNum)
stmtGrp.synchronized {
val old = stmtGrp.getOrDefault(_stmtNum, 0)
stmtGrp.update(_stmtNum, old + 1)
}
new CounterVec[String](c.currentLocation)
}
mv.after = (method, ctx)=> {
val last = ctx.data.get.asInstanceOf[CharacVec[String]]
val c = last.count
nodeCount.addAndGet(c)
nodeGrp.synchronized{
val old = nodeGrp.getOrDefault(c, 0)
nodeGrp.update(c, old + 1)
}
if (c > maxNodeNumber.get()) {
maxNodeNumber.set(c)
maxNodeLoc.set(last.location)
}
}
dir match {
case where if where.isDirectory => scanner.scanDir(where, recursive = true)
case src if src.isFile => scanner.processFile(src)
}
println(s"method count: $methodCount, top level stmt count: $topStmtCount")
println(s"stmt per method: ${topStmtCount.doubleValue() / methodCount.doubleValue()}")
println("stmt count:")
stmtGrp.foreach(println)
println("nodeCount: " + methodCount)
println(s"node per method: ${nodeCount.doubleValue() / methodCount.doubleValue()}")
println("node count:")
nodeGrp.foreach(println)
println(s"max node ${maxNodeNumber.get()}, appeared at $maxNodeLoc")
println(scanner.context.indexer)
}
} | xkommando/CodeSniffer | deckard/src/main/scala/codesniffer/deckard/search/NodeCount.scala | Scala | lgpl-3.0 | 3,859 |
package views.vrm_assign
object Error {
final val StartAgainId = "start-again"
} | dvla/vrm-assign-online | app/views/vrm_assign/Error.scala | Scala | mit | 84 |
package igor
import igor.experiment.{Experiment, Stage}
/**
* @author jda
*/
class PrecomputeFeatures(val corpusPath: Int) extends Stage {
override def run(experiment: Experiment): Unit = {
logger.info("Hello, world!")
}
}
| dlwh/igor | src/test/scala/igor/PrecomputeFeatures.scala | Scala | apache-2.0 | 235 |
package shield.actors.config.domain
import akka.actor.{ActorSystem, Props}
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import org.scalatest.{BeforeAndAfterAll, MustMatchers, WordSpecLike}
import shield.actors.ShieldActorMsgs
import shield.config.Settings
class StaticDomainWatcherSpec extends TestKit(ActorSystem("testSystem"))
with WordSpecLike
with MustMatchers
with BeforeAndAfterAll {
val settings = Settings(system)
"StaticDomainWatcher" should {
"notify shield about domains found" in {
val parent = TestProbe()
TestActorRef(Props(new StaticDomainWatcher()), parent.ref, "static-domain-watcher")
val msg: ShieldActorMsgs.DomainsUpdated = parent.expectMsgClass(classOf[ShieldActorMsgs.DomainsUpdated])
msg.domains.size must equal (settings.config.getConfigList("shield.domains").size)
}
}
}
| RetailMeNot/shield | src/test/scala/shield/actors/config/domain/StaticDomainWatcherSpec.scala | Scala | mit | 858 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.swagger
import com.wordnik.swagger.core.util.ReaderUtil
import com.wordnik.swagger.config.SwaggerConfig
import com.wordnik.swagger.model.ApiListing
import org.apache.camel.model.rest.RestDefinition
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
/**
* To cache the RestSwaggerReader
*/
object RestApiListingCache extends ReaderUtil {
var cache: Option[Map[String, ApiListing]] = None
val reader = new RestSwaggerReader()
def listing(rests: mutable.Buffer[RestDefinition], config: SwaggerConfig): Option[Map[String, ApiListing]] = {
cache.orElse {
val listings = new ListBuffer[ApiListing]
for (rest <- rests) {
val some = reader.read(rest, config)
if (!some.isEmpty) {
listings += some.get
}
}
if (listings.size > 0) {
val mergedListings = groupByResourcePath(listings.toList)
cache = Some(mergedListings.map(m => (m.resourcePath, m)).toMap)
}
cache
}
}
}
| snadakuduru/camel | components/camel-swagger/src/main/scala/org/apache/camel/component/swagger/RestApiListingCache.scala | Scala | apache-2.0 | 1,836 |
/**
* Copyright 2011 Havoc Pennington
* derived from Salat,
* Copyright 2010-2011 Novus Partners, Inc. <http://novus.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ometer
import java.lang.reflect.AnnotatedElement
import java.lang.annotation.Annotation
import scala.annotation.target.getter
import scala.tools.scalap.scalax.rules.scalasig._
import scala.reflect.generic.ByteCodecs
import scala.reflect.ScalaSignature
import java.lang.reflect.{ InvocationTargetException, Constructor, Method }
import java.math.{ RoundingMode, MathContext }
import scala.collection.mutable.{ Map => MMap, HashMap }
import java.lang.reflect.Modifier
class ClassAnalysis[X <: ClassAnalysis.CaseClass](val clazz : Class[X]) {
import ClassAnalysis._
/**
* Get field names in the object.
*/
def fieldNamesIterator : Iterator[String] = {
indexedFields.iterator.map(_.name)
}
/**
* Iterate over the types of fields.
*/
def fieldTypesIterator : Iterator[Type] = {
indexedFields.iterator.map(_.typeRefType)
}
/**
* Iterate over whether the fields are optional
*/
def fieldOptionalityIterator : Iterator[Boolean] = {
indexedFields.iterator.map(_.optional)
}
/**
* Iterate over field names and current values for an instance.
*/
def fieldIterator(o : X) : Iterator[(String, Any)] = {
val valuesAndFields = o.productIterator.zip(indexedFields.iterator)
valuesAndFields.map({ valueAndField =>
(valueAndField._2.name -> valueAndField._1)
})
}
/**
* Pull fields from an object as map from field names to values.
*/
def asMap(o : X) : Map[String, Any] = {
val builder = Map.newBuilder[String, Any]
for ((name, value) <- fieldIterator(o)) {
value match {
case None =>
// Option-valued field is not present, omit
case Some(x) =>
// Option-valued field is present, include
builder += name -> x
case _ =>
// regular (non optional) field
builder += name -> value
}
}
builder.result
}
/**
* Construct an object from a map of field names and values.
*/
def fromMap(m : Map[String, Any]) : X = {
if (sym.isModule) {
companionObject.asInstanceOf[X]
} else {
val args = indexedFields.map {
case field => {
val optionalValue = m.get(field.name)
if (field.optional) {
optionalValue
} else {
optionalValue match {
case Some(value) =>
value
case None =>
throw new Exception("%s requires value for '%s' map was '%s'".format(clazz, field.name, m))
}
}
}
}.map(_.asInstanceOf[AnyRef])
try {
constructor.newInstance(args : _*)
} catch {
// when something bad happens feeding args into constructor, catch these exceptions and
// wrap them in a custom exception that will provide detailed information about what's happening.
case e : InstantiationException => throw new ToObjectGlitch(this, sym, constructor, args, e)
case e : IllegalAccessException => throw new ToObjectGlitch(this, sym, constructor, args, e)
case e : IllegalArgumentException => throw new ToObjectGlitch(this, sym, constructor, args, e)
case e : InvocationTargetException => throw new ToObjectGlitch(this, sym, constructor, args, e)
case e => throw e
}
}
}
override def toString = "ClassAnalysis(%s)".format(clazz)
override def equals(that : Any) = that.isInstanceOf[ClassAnalysis[_]] && that.asInstanceOf[ClassAnalysis[_]].sym.path == this.sym.path
override def hashCode = sym.path.hashCode
private def parseScalaSig[A](clazz : Class[A]) : Option[ScalaSig] = {
val firstPass = parseScalaSig0(clazz)
firstPass match {
case Some(x) => {
Some(x)
}
case None if clazz.getName.endsWith("$") => {
val clayy = Class.forName(clazz.getName.replaceFirst("\\\\$$", ""))
val secondPass = parseScalaSig0(clayy)
secondPass
}
case x => x
}
}
// this returns something like: ClassSymbol(IntAndString, owner=com.example.somepackage, flags=40, info=9 ,None)
private def findSym[A](clazz : Class[A]) = {
val pss = parseScalaSig(clazz)
pss match {
case Some(x) => {
val topLevelClasses = x.topLevelClasses
topLevelClasses.headOption match {
case Some(tlc) => {
//System.out.println("tlc=" + tlc)
tlc
}
case None => {
val topLevelObjects = x.topLevelObjects
topLevelObjects.headOption match {
case Some(tlo) => {
tlo
}
case _ => throw new MissingExpectedType(clazz)
}
}
}
}
case None => throw new MissingPickledSig(clazz)
}
}
private lazy val sym = findSym(clazz)
// annotations on a getter don't actually inherit from a trait or an abstract superclass,
// but dragging them down manually allows for much nicer behaviour - this way you can specify @Persist or @Key
// on a trait and have it work all the way down
private def interestingClass(clazz : Class[_]) = clazz match {
case clazz if clazz == null => false // inconceivably, this happens!
case clazz if clazz.getName.startsWith("java.") => false
case clazz if clazz.getName.startsWith("javax.") => false
case clazz if clazz.getName.startsWith("scala.") => false
case clazz if clazz.getEnclosingClass != null => false // filter out nested traits and superclasses
case _ => true
}
private lazy val interestingInterfaces : List[(Class[_], SymbolInfoSymbol)] = {
val interfaces = clazz.getInterfaces // this should return an empty array, but... sometimes returns null!
if (interfaces != null && interfaces.nonEmpty) {
val builder = List.newBuilder[(Class[_], SymbolInfoSymbol)]
for (interface <- interfaces) {
if (interestingClass(interface)) {
builder += ((interface, findSym(interface)))
}
}
builder.result()
} else Nil
}
private lazy val interestingSuperclass : List[(Class[_], SymbolInfoSymbol)] = clazz.getSuperclass match {
case superClazz if interestingClass(superClazz) => List((superClazz, findSym(superClazz)))
case _ => Nil
}
// for use when you just want to find something and whether it was declared in clazz, some trait clazz extends, or clazz' own superclass
// is not a concern
private lazy val allTheChildren : Seq[Symbol] = sym.children ++ interestingInterfaces.map(_._2.children).flatten ++ interestingSuperclass.map(_._2.children).flatten
// sym.children would look at objects like these two for a field "foo"
// MethodSymbol(foo, owner=0, flags=29400200, info=32 ,None)
// MethodSymbol(foo , owner=0, flags=21080004, info=33 ,None)
private lazy val indexedFields = {
// don't use allTheChildren here! this is the indexed fields for clazz and clazz alone
sym.children
.filter({ c => c.isCaseAccessor && !c.isPrivate })
.map(_.asInstanceOf[MethodSymbol])
.zipWithIndex
.map {
case (ms, idx) => {
//printf("indexedFields: clazz=%s, ms=%s, idx=%s\\n", clazz, ms, idx)
Field(idx, ms.name, typeRefType(ms), clazz.getMethod(ms.name))
}
}
}
private lazy val companionClass = ClassAnalysis.companionClass(clazz)
private lazy val companionObject = ClassAnalysis.companionObject(clazz)
private lazy val constructor : Constructor[X] = {
val cl = clazz.getConstructors.asInstanceOf[Array[Constructor[X]]].filter(_.getParameterTypes().length > 0)
// I'm seeing two constructors, the regular case class one and one with no arguments. Above we filter the
// no arguments one (which I don't understand) and then we get upset if we find more.
// Case classes can have extra constructors but overloading apply() is more the usual thing to do.
if (cl.size > 1) {
throw new RuntimeException("constructor: clazz=%s, expected 1 constructor but found %d\\n%s".format(clazz, cl.size, cl.mkString("\\n")))
}
val c = cl.headOption.getOrElse(throw new MissingConstructor(sym))
//printf("constructor: clazz=%s ---> constructor=%s\\n", clazz, c)
c
}
private def typeRefType(ms : MethodSymbol) : TypeRefType = ms.infoType match {
case PolyType(tr @ TypeRefType(_, _, _), _) => tr
}
}
object ClassAnalysis {
class MissingPickledSig(clazz : Class[_]) extends Exception("Failed to parse pickled Scala signature from: %s".format(clazz))
class MissingExpectedType(clazz : Class[_]) extends Exception("Parsed pickled Scala signature, but no expected type found: %s"
.format(clazz))
class MissingTopLevelClass(clazz : Class[_]) extends Exception("Parsed pickled scala signature but found no top level class for: %s"
.format(clazz))
class NestingGlitch(clazz : Class[_], owner : String, outer : String, inner : String) extends Exception("Didn't find owner=%s, outer=%s, inner=%s in pickled scala sig for %s"
.format(owner, outer, inner, clazz))
class MissingConstructor(sym : SymbolInfoSymbol) extends Exception("Couldn't find a constructor for %s".format(sym.path))
class ToObjectGlitch[X <: ClassAnalysis.CaseClass](classAnalysis : ClassAnalysis[X], sym : SymbolInfoSymbol, constructor : Constructor[X], args : Seq[AnyRef], cause : Throwable) extends Exception(
"""
%s
%s toObject failed on:
SYM: %s
CONSTRUCTOR: %s
ARGS:
%s
""".format(cause.getMessage, classAnalysis.toString, sym.path, constructor, args), cause)
private type CaseClass = AnyRef with Product
private def annotation[A <: Annotation : Manifest](x : AnnotatedElement) : Option[A] = {
x.getAnnotation[A](manifest[A].erasure.asInstanceOf[Class[A]]) match {
case a if a != null => Some(a)
case _ => None
}
}
private def annotated_?[A <: Annotation : Manifest](x : AnnotatedElement) : Boolean = {
annotation[A](x)(manifest[A]).isDefined
}
private def extractSingleTypeArg(t : Type, typeName : String) : Option[Type] = {
t match {
case TypeRefType(_, symbol, Seq(typeArg)) if symbol.path == typeName =>
Some(typeArg)
case _ => None
}
}
private def companionClass(clazz : Class[_]) : Class[_] =
Class.forName(if (clazz.getName.endsWith("$")) clazz.getName else "%s$".format(clazz.getName))
private def companionObject(clazz : Class[_]) = companionClass(clazz).getField("MODULE$").get(null)
private object Field {
def apply(idx : Int, name : String, t : TypeRefType, method : Method) : Field = {
new Field(idx, name, t)
}
}
private class Field(val idx : Int, val name : String, val typeRefType : TypeRefType) {
override def toString = "Field[%d/%s]".format(idx, name)
val optionalType : Option[Type] = extractSingleTypeArg(typeRefType, "scala.Option")
val optional : Boolean = optionalType.isDefined
}
private def parseScalaSig0(clazz : Class[_]) : Option[ScalaSig] = {
//println("parseScalaSig0 " + clazz)
if (clazz != null) {
ScalaSigParser.parse(clazz) match {
case Some(sig) if sig != null => Some(sig)
case _ => annotation[ScalaSignature](clazz) match {
case Some(sig) if sig != null => {
val bytes = sig.bytes.getBytes("UTF-8")
val len = ByteCodecs.decode(bytes)
val parsedSig = ScalaSigAttributeParsers.parse(ByteCode(bytes.take(len)))
Option(parsedSig)
}
case _ => {
//log.error("parseScalaSig: could not parse clazz='%s' from class or scala.reflect.ScalaSignature", clazz.getName)
None
}
}
}
} else {
//log.error("parseScalaSig: clazz was null!")
None
}
}
}
| havocp/mongo-scala-thingy | src/main/scala/com/ometer/ClassAnalysis.scala | Scala | apache-2.0 | 13,725 |
package com.twitter.finagle.stream
import com.twitter.concurrent._
import com.twitter.conversions.time._
import com.twitter.finagle.builder.{ClientBuilder, ServerBuilder}
import com.twitter.finagle.{Service, ServiceProxy, TooManyConcurrentRequestsException}
import com.twitter.util._
import java.net.{InetAddress, InetSocketAddress, SocketAddress}
import java.nio.charset.Charset
import java.util.concurrent.Executors
import org.jboss.netty.bootstrap.ClientBootstrap
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.channel._
import org.jboss.netty.handler.codec.http._
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class EndToEndTest extends FunSuite {
case class MyStreamResponse(
httpResponse: HttpResponse,
messages: Offer[ChannelBuffer],
error: Offer[Throwable]
) extends StreamResponse {
val released = new Promise[Unit]
def release() = released.updateIfEmpty(Return(()))
}
class MyService(response: StreamResponse) extends Service[HttpRequest, StreamResponse] {
def apply(request: HttpRequest) = Future.value(response)
}
class WorkItContext(){
val httpRequest: DefaultHttpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/")
val httpResponse: DefaultHttpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK)
val messages: Broker[ChannelBuffer] = new Broker[ChannelBuffer]
val error: Broker[Throwable] = new Broker[Throwable]
val serverRes = MyStreamResponse(httpResponse, messages.recv, error.recv)
}
def workIt(what: String)(mkClient: (MyStreamResponse) => (Service[HttpRequest, StreamResponse], SocketAddress)) {
test("Streams %s: writes from the server arrive on the client's channel".format(what)) {
val c = new WorkItContext()
import c._
val (client, _) = mkClient(serverRes)
val clientRes = Await.result(client(httpRequest), 1.second)
var result = ""
val latch = new CountDownLatch(1)
(clientRes.error?) ensure {
Future { latch.countDown() }
}
clientRes.messages foreach { channelBuffer =>
Future {
result += channelBuffer.toString(Charset.defaultCharset)
}
}
messages !! ChannelBuffers.wrappedBuffer("1".getBytes)
messages !! ChannelBuffers.wrappedBuffer("2".getBytes)
messages !! ChannelBuffers.wrappedBuffer("3".getBytes)
error !! EOF
latch.within(1.second)
assert(result === "123")
client.close()
}
test("Streams %s: writes from the server are queued before the client responds".format(what)) {
val c = new WorkItContext()
import c._
val (client, _) = mkClient(serverRes)
val clientRes = Await.result(client(httpRequest), 1.second)
messages !! ChannelBuffers.wrappedBuffer("1".getBytes)
messages !! ChannelBuffers.wrappedBuffer("2".getBytes)
messages !! ChannelBuffers.wrappedBuffer("3".getBytes)
val latch = new CountDownLatch(3)
var result = ""
clientRes.messages foreach { channelBuffer =>
Future {
result += channelBuffer.toString(Charset.defaultCharset)
latch.countDown()
}
}
latch.within(1.second)
error !! EOF
assert(result === "123")
client.close()
}
test("Streams %s: the client does not admit concurrent requests".format(what)) {
val c = new WorkItContext()
import c._
val (client, _) = mkClient(serverRes)
val clientRes = Await.result(client(httpRequest), 15.seconds)
assert(client(httpRequest).poll match {
case Some(Throw(_: TooManyConcurrentRequestsException)) => true
case _ => false
})
client.close()
}
if (!sys.props.contains("SKIP_FLAKY"))
test("Streams %s: the server does not admit concurrent requests".format(what)) {
val c = new WorkItContext()
import c._
val (client, address) = mkClient(serverRes)
// The finagle client, by nature, doesn't allow for this, so
// we need to go through the trouble of establishing our own
// pipeline.
val recvd = new Broker[ChannelEvent]
val bootstrap = new ClientBootstrap(
new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(),
Executors.newCachedThreadPool()))
bootstrap.setPipelineFactory(new ChannelPipelineFactory {
override def getPipeline() = {
val pipeline = Channels.pipeline()
pipeline.addLast("httpCodec", new HttpClientCodec)
pipeline.addLast("recvd", new ChannelUpstreamHandler {
override def handleUpstream(ctx: ChannelHandlerContext, e: ChannelEvent) {
val keep = e match {
case se: ChannelStateEvent =>
se.getState == ChannelState.OPEN
case _: WriteCompletionEvent => false
case _ => true
}
if (keep) recvd ! e
}
})
pipeline
}
})
val connectFuture = bootstrap
.connect(address)
.awaitUninterruptibly()
assert(connectFuture.isSuccess)
val channel = connectFuture.getChannel
// first request is accepted
assert(channel
.write(httpRequest)
.awaitUninterruptibly()
.isSuccess)
messages !! ChannelBuffers.wrappedBuffer("chunk1".getBytes)
assert(Await.result(recvd?, 1.second) match {
case e: ChannelStateEvent =>
e.getState == ChannelState.OPEN && (java.lang.Boolean.TRUE equals e.getValue)
case _ => false
})
assert(Await.result(recvd?, 1.second) match {
case m: MessageEvent =>
m.getMessage match {
case res: HttpResponse => res.isChunked
case _ => false
}
case _ => false
})
assert(Await.result(recvd?, 1.second) match {
case m: MessageEvent =>
m.getMessage match {
case res: HttpChunk => !res.isLast // get "chunk1"
case _ => false
}
case _ => false
})
// The following requests should be ignored
assert(channel
.write(httpRequest)
.awaitUninterruptibly()
.isSuccess)
// the streaming should continue
messages !! ChannelBuffers.wrappedBuffer("chunk2".getBytes)
assert(Await.result(recvd?, 1.second) match {
case m: MessageEvent =>
m.getMessage match {
case res: HttpChunk => !res.isLast // get "chunk2"
case _ => false
}
case _ => false
})
error !! EOF
assert(Await.result(recvd?, 1.second) match {
// Flaky because ChannelEvent can be an ExceptionEvent of
// "java.io.IOException: Connection reset by peer". Uncomment the
// following line to observe.
// case e: ExceptionEvent => throw new Exception(e.getCause)
case m: MessageEvent =>
m.getMessage match {
case res: HttpChunkTrailer => res.isLast
case _ => false
}
case _ => false
})
// And finally it's closed.
assert(Await.result(recvd?, 1.second) match {
case e: ChannelStateEvent =>
e.getState == ChannelState.OPEN && (java.lang.Boolean.FALSE equals e.getValue)
case _ => false
})
bootstrap.releaseExternalResources()
channel.close()
}
test("Streams %s: server ignores channel buffer messages after channel close".format(what)) {
val c = new WorkItContext()
import c._
val (client, address) = mkClient(serverRes)
val clientRes = Await.result(client(httpRequest), 1.second)
var result = ""
val latch = new CountDownLatch(1)
(clientRes.error?) ensure {
Future { latch.countDown() }
}
clientRes.messages foreach { channelBuffer =>
Future {
result += channelBuffer.toString(Charset.defaultCharset)
}
}
FuturePool.unboundedPool {
messages !! ChannelBuffers.wrappedBuffer("12".getBytes)
messages !! ChannelBuffers.wrappedBuffer("23".getBytes)
error !! EOF
messages !! ChannelBuffers.wrappedBuffer("34".getBytes)
}
latch.within(1.second)
assert(result === "1223")
}
}
workIt("straight") { serverRes =>
val server = ServerBuilder()
.codec(new Stream)
.bindTo(new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
.name("Streams")
.build(new MyService(serverRes))
val address = server.boundAddress
val factory = ClientBuilder()
.codec(new Stream)
.hosts(Seq(address))
.hostConnectionLimit(1)
.buildFactory()
val underlying = Await.result(factory())
val client = new ServiceProxy[HttpRequest, StreamResponse](underlying) {
override def close(deadline: Time) =
Closable.all(underlying, server, factory).close(deadline)
}
(client, address)
}
workIt("proxy") { serverRes =>
val server = ServerBuilder()
.codec(new Stream)
.bindTo(new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
.name("streamserver")
.build(new MyService(serverRes))
val serverClient = ClientBuilder()
.codec(new Stream)
.hosts(Seq(server.boundAddress))
.hostConnectionLimit(1)
.build()
val proxy = ServerBuilder()
.codec(new Stream)
.bindTo(new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
.name("streamproxy")
.build(serverClient)
val factory = ClientBuilder()
.codec(new Stream)
.hosts(Seq(proxy.boundAddress))
.hostConnectionLimit(1)
.buildFactory()
val underlying = Await.result(factory())
val client = new ServiceProxy[HttpRequest, StreamResponse](underlying) {
override def close(deadline: Time) =
Closable.all(server, serverClient, proxy, factory).close(deadline)
}
(client, proxy.boundAddress)
}
test("Streams: delay release until complete response") {
@volatile var count: Int = 0
val c = new WorkItContext()
import c.{synchronized => _sync, _}
val server = ServerBuilder()
.codec(new Stream)
.bindTo(new InetSocketAddress(InetAddress.getLoopbackAddress, 0))
.name("Streams")
.build((new MyService(serverRes)) map { r: HttpRequest =>
synchronized { count += 1 }
r
})
val client = ClientBuilder()
.codec(new Stream)
.hosts(Seq(server.boundAddress))
.hostConnectionLimit(1)
.retries(2)
.build()
val res = Await.result(client(httpRequest), 1.second)
assert(count === 1)
val f2 = client(httpRequest)
assert(f2.poll.isEmpty) // because of the host connection limit
messages !! ChannelBuffers.wrappedBuffer("1".getBytes)
assert((res.messages??).toString(Charset.defaultCharset) === "1")
assert(count === 1)
error !! EOF
res.release()
val res2 = Await.result(f2, 1.second)
assert(count === 2)
res2.release()
Closable.all(client, server)
}
}
| mosesn/finagle | finagle-stream/src/test/scala/com/twitter/finagle/stream/EndToEndTest.scala | Scala | apache-2.0 | 11,272 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.IOException
import java.util.Locale
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning._
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoDir, InsertIntoStatement, LogicalPlan, ScriptTransformation, Statistics}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.connector.catalog.CatalogV2Util.assertNoNullTypeInSchema
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.command.{CreateTableCommand, DDLUtils}
import org.apache.spark.sql.execution.datasources.{CreateTable, DataSourceStrategy}
import org.apache.spark.sql.hive.execution._
import org.apache.spark.sql.hive.execution.HiveScriptTransformationExec
import org.apache.spark.sql.internal.HiveSerDe
/**
* Determine the database, serde/format and schema of the Hive serde table, according to the storage
* properties.
*/
class ResolveHiveSerdeTable(session: SparkSession) extends Rule[LogicalPlan] {
private def determineHiveSerde(table: CatalogTable): CatalogTable = {
if (table.storage.serde.nonEmpty) {
table
} else {
if (table.bucketSpec.isDefined) {
throw new AnalysisException("Creating bucketed Hive serde table is not supported yet.")
}
val defaultStorage = HiveSerDe.getDefaultStorage(conf)
val options = new HiveOptions(table.storage.properties)
val fileStorage = if (options.fileFormat.isDefined) {
HiveSerDe.sourceToSerDe(options.fileFormat.get) match {
case Some(s) =>
CatalogStorageFormat.empty.copy(
inputFormat = s.inputFormat,
outputFormat = s.outputFormat,
serde = s.serde)
case None =>
throw new IllegalArgumentException(s"invalid fileFormat: '${options.fileFormat.get}'")
}
} else if (options.hasInputOutputFormat) {
CatalogStorageFormat.empty.copy(
inputFormat = options.inputFormat,
outputFormat = options.outputFormat)
} else {
CatalogStorageFormat.empty
}
val rowStorage = if (options.serde.isDefined) {
CatalogStorageFormat.empty.copy(serde = options.serde)
} else {
CatalogStorageFormat.empty
}
val storage = table.storage.copy(
inputFormat = fileStorage.inputFormat.orElse(defaultStorage.inputFormat),
outputFormat = fileStorage.outputFormat.orElse(defaultStorage.outputFormat),
serde = rowStorage.serde.orElse(fileStorage.serde).orElse(defaultStorage.serde),
properties = options.serdeProperties)
table.copy(storage = storage)
}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case c @ CreateTable(t, _, query) if DDLUtils.isHiveTable(t) =>
// Finds the database name if the name does not exist.
val dbName = t.identifier.database.getOrElse(session.catalog.currentDatabase)
val table = t.copy(identifier = t.identifier.copy(database = Some(dbName)))
// Determines the serde/format of Hive tables
val withStorage = determineHiveSerde(table)
// Infers the schema, if empty, because the schema could be determined by Hive
// serde.
val withSchema = if (query.isEmpty) {
val inferred = HiveUtils.inferSchema(withStorage)
if (inferred.schema.length <= 0) {
throw new AnalysisException("Unable to infer the schema. " +
s"The schema specification is required to create the table ${inferred.identifier}.")
}
inferred
} else {
withStorage
}
c.copy(tableDesc = withSchema)
}
}
class DetermineTableStats(session: SparkSession) extends Rule[LogicalPlan] {
private def hiveTableWithStats(relation: HiveTableRelation): HiveTableRelation = {
val table = relation.tableMeta
val partitionCols = relation.partitionCols
// For partitioned tables, the partition directory may be outside of the table directory.
// Which is expensive to get table size. Please see how we implemented it in the AnalyzeTable.
val sizeInBytes = if (conf.fallBackToHdfsForStatsEnabled && partitionCols.isEmpty) {
try {
val hadoopConf = session.sessionState.newHadoopConf()
val tablePath = new Path(table.location)
val fs: FileSystem = tablePath.getFileSystem(hadoopConf)
fs.getContentSummary(tablePath).getLength
} catch {
case e: IOException =>
logWarning("Failed to get table size from HDFS.", e)
conf.defaultSizeInBytes
}
} else {
conf.defaultSizeInBytes
}
val stats = Some(Statistics(sizeInBytes = BigInt(sizeInBytes)))
relation.copy(tableStats = stats)
}
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case relation: HiveTableRelation
if DDLUtils.isHiveTable(relation.tableMeta) && relation.tableMeta.stats.isEmpty =>
hiveTableWithStats(relation)
// handles InsertIntoStatement specially as the table in InsertIntoStatement is not added in its
// children, hence not matched directly by previous HiveTableRelation case.
case i @ InsertIntoStatement(relation: HiveTableRelation, _, _, _, _, _)
if DDLUtils.isHiveTable(relation.tableMeta) && relation.tableMeta.stats.isEmpty =>
i.copy(table = hiveTableWithStats(relation))
}
}
/**
* Replaces generic operations with specific variants that are designed to work with Hive.
*
* Note that, this rule must be run after `PreprocessTableCreation` and
* `PreprocessTableInsertion`.
*/
object HiveAnalysis extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case InsertIntoStatement(
r: HiveTableRelation, partSpec, _, query, overwrite, ifPartitionNotExists)
if DDLUtils.isHiveTable(r.tableMeta) =>
InsertIntoHiveTable(r.tableMeta, partSpec, query, overwrite,
ifPartitionNotExists, query.output.map(_.name))
case CreateTable(tableDesc, mode, None) if DDLUtils.isHiveTable(tableDesc) =>
CreateTableCommand(tableDesc, ignoreIfExists = mode == SaveMode.Ignore)
case CreateTable(tableDesc, mode, Some(query))
if DDLUtils.isHiveTable(tableDesc) && query.resolved =>
CreateHiveTableAsSelectCommand(tableDesc, query, query.output.map(_.name), mode)
case InsertIntoDir(isLocal, storage, provider, child, overwrite)
if DDLUtils.isHiveTable(provider) && child.resolved =>
val outputPath = new Path(storage.locationUri.get)
if (overwrite) DDLUtils.verifyNotReadPath(child, outputPath)
InsertIntoHiveDirCommand(isLocal, storage, child, overwrite, child.output.map(_.name))
}
}
/**
* Relation conversion from metastore relations to data source relations for better performance
*
* - When writing to non-partitioned Hive-serde Parquet/Orc tables
* - When scanning Hive-serde Parquet/ORC tables
*
* This rule must be run before all other DDL post-hoc resolution rules, i.e.
* `PreprocessTableCreation`, `PreprocessTableInsertion`, `DataSourceAnalysis` and `HiveAnalysis`.
*/
case class RelationConversions(
sessionCatalog: HiveSessionCatalog) extends Rule[LogicalPlan] {
private def isConvertible(relation: HiveTableRelation): Boolean = {
isConvertible(relation.tableMeta)
}
private def isConvertible(tableMeta: CatalogTable): Boolean = {
val serde = tableMeta.storage.serde.getOrElse("").toLowerCase(Locale.ROOT)
serde.contains("parquet") && conf.getConf(HiveUtils.CONVERT_METASTORE_PARQUET) ||
serde.contains("orc") && conf.getConf(HiveUtils.CONVERT_METASTORE_ORC)
}
private val metastoreCatalog = sessionCatalog.metastoreCatalog
override def apply(plan: LogicalPlan): LogicalPlan = {
plan resolveOperators {
// Write path
case InsertIntoStatement(
r: HiveTableRelation, partition, cols, query, overwrite, ifPartitionNotExists)
if query.resolved && DDLUtils.isHiveTable(r.tableMeta) &&
(!r.isPartitioned || conf.getConf(HiveUtils.CONVERT_INSERTING_PARTITIONED_TABLE))
&& isConvertible(r) =>
InsertIntoStatement(metastoreCatalog.convert(r), partition, cols,
query, overwrite, ifPartitionNotExists)
// Read path
case relation: HiveTableRelation
if DDLUtils.isHiveTable(relation.tableMeta) && isConvertible(relation) =>
metastoreCatalog.convert(relation)
// CTAS
case CreateTable(tableDesc, mode, Some(query))
if query.resolved && DDLUtils.isHiveTable(tableDesc) &&
tableDesc.partitionColumnNames.isEmpty && isConvertible(tableDesc) &&
conf.getConf(HiveUtils.CONVERT_METASTORE_CTAS) =>
// validation is required to be done here before relation conversion.
DDLUtils.checkDataColNames(tableDesc.copy(schema = query.schema))
// This is for CREATE TABLE .. STORED AS PARQUET/ORC AS SELECT null
assertNoNullTypeInSchema(query.schema)
OptimizedCreateHiveTableAsSelectCommand(
tableDesc, query, query.output.map(_.name), mode)
}
}
}
private[hive] trait HiveStrategies {
// Possibly being too clever with types here... or not clever enough.
self: SparkPlanner =>
val sparkSession: SparkSession
object HiveScripts extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case ScriptTransformation(script, output, child, ioschema) =>
val hiveIoSchema = ScriptTransformationIOSchema(ioschema)
HiveScriptTransformationExec(script, output, planLater(child), hiveIoSchema) :: Nil
case _ => Nil
}
}
/**
* Retrieves data using a HiveTableScan. Partition pruning predicates are also detected and
* applied.
*/
object HiveTableScans extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case ScanOperation(projectList, filters, relation: HiveTableRelation) =>
// Filter out all predicates that only deal with partition keys, these are given to the
// hive table scan operator to be used for partition pruning.
val partitionKeyIds = AttributeSet(relation.partitionCols)
val normalizedFilters = DataSourceStrategy.normalizeExprs(
filters.filter(_.deterministic), relation.output)
val partitionKeyFilters = DataSourceStrategy.getPushedDownFilters(relation.partitionCols,
normalizedFilters)
pruneFilterProject(
projectList,
filters.filter(f => f.references.isEmpty || !f.references.subsetOf(partitionKeyIds)),
identity[Seq[Expression]],
HiveTableScanExec(_, relation, partitionKeyFilters.toSeq)(sparkSession)) :: Nil
case _ =>
Nil
}
}
}
| maropu/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala | Scala | apache-2.0 | 11,720 |
package com.faacets
package object families {
def write {
Acin2004.write
CGLMP2002.write
AcinCGLMP2005.write
CollinsGisin2004.write
Grandjean2012.write
Mermin1990.write
Sliwa2003.write
}
}
| denisrosset/faacets-families | src/main/scala/families/package.scala | Scala | bsd-3-clause | 222 |
package de.csmath.scalog
import de.csmath.scalog.Types._
import de.csmath.scalog.substitution.Substitution
object Unifier {
def apply(t1: Term, t2: Term): Option[Substitution] = (t1,t2) match {
case (Var(_),_) if t1 == t2 =>
Some(Substitution())
case (Var(x),Var(_)) =>
Some(Substitution(Map(Var(x) -> t2)))
case (v@Var(x),_) if notOccurs(v,t2) =>
Some(Substitution(Map(Var(x) -> t2)))
case (x,v@Var(_)) if notOccurs(v,t1) =>
Some(Substitution(Map(v -> t1)))
case (_: Const,_) if t1 == t2 =>
Some(Substitution())
case (PlNil,PlNil) =>
Some(Substitution())
case (Struct(f1,elems1),Struct(f2,elems2)) if f1 == f2 =>
apply(elems1,elems2)
case (PlCons(head1,tail1),PlCons(head2,tail2)) =>
apply(List(head1,tail1),List(head2,tail2))
case _ =>
None
}
def apply(terms1: List[Term], terms2: List[Term]): Option[Substitution] = (terms1,terms2) match {
case (Nil,Nil) => Some(Substitution())
case (x :: tail1, y :: tail2) =>
val sub1 = apply(x,y)
if (sub1.isDefined) {
val sub2 = apply(tail1.map(sub1.get(_)), tail2.map(sub1.get(_)))
if (sub2.isDefined)
Some(sub2.get compose sub1.get)
else
None
} else
None
case _ =>
None
}
private def notOccurs(variable: Var, term: Term): Boolean = term match {
case v@Var(_) => v != variable
case Struct(_,terms) =>
terms.forall(notOccurs(variable,_))
case _ => true
}
}
| lpcsmath/scalog | src/main/scala/de/csmath/scalog/Unifier.scala | Scala | bsd-2-clause | 1,512 |
// scalac: -Werror -Wunused:nowarn -Xsource:3
//
class C extends java.lang.CharSequence {
def charAt(x$1: Int): Char = ???
def length: Int = ???
def subSequence(x$1: Int, x$2: Int): CharSequence = ???
}
// Welcome to the Happy J
class J { override def toString = "Happy J" }
| scala/scala | test/files/pos/nullary-override-3.scala | Scala | apache-2.0 | 282 |
package eu.phisikus.plotka.framework.fsm
import scala.beans.BeanProperty
/**
* It represents a finite-state machine (Mealy machine)
*/
trait StateMachine {
/**
* Call this function to make state machine aware of a new event.
*
* @param event event to be published
*/
def push(event: Event): Unit
/**
* @return current state of the machine
*/
@BeanProperty def currentState: State
}
| phisikus/plotka | framework/src/main/scala/eu/phisikus/plotka/framework/fsm/StateMachine.scala | Scala | bsd-3-clause | 423 |
package io.github.marad.swt.builder
import org.eclipse.swt.SWT
import org.eclipse.swt.widgets._
import scala.util.DynamicVariable
trait Controls {
protected def context: DynamicVariable[Composite]
def label() : Label = new Label(context.value, SWT.NONE)
def label(style:Int) : Label = new Label(context.value, style)
def button() : Button = new Button(context.value, SWT.PUSH)
def button(style:Int) : Button = new Button(context.value, style)
def radio() : Button = new Button(context.value, SWT.RADIO)
def checkbox() : Button = new Button(context.value, SWT.CHECK)
def edit() : Text = new Text(context.value, SWT.SINGLE)
def edit(style:Int) : Text = new Text(context.value, style)
def spinner() : Spinner = new Spinner(context.value, SWT.NONE)
def spinner(style:Int) : Spinner = new Spinner(context.value, style)
def composite() : Composite = new Composite(context.value, SWT.NONE)
def composite(style:Int) : Composite = new Composite(context.value, style)
def group() : Group = new Group(context.value, SWT.NONE)
def group(style:Int) : Group = new Group(context.value, style)
def canvas() : Canvas = new Canvas(context.value, SWT.NONE)
def canvas(style:Int) : Canvas = new Canvas(context.value, style)
}
| marad/scala-swt-builder | src/main/scala/io/github/marad/swt/builder/Controls.scala | Scala | lgpl-3.0 | 1,244 |
/**
* Copyright (c) 2015, Cloudera, Inc. All Rights Reserved.
*
* Cloudera, Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the
* License.
*/
package com.cloudera.sparkts
import java.nio.ByteBuffer
import java.time._
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.mllib.linalg.{DenseVector, Vector}
import org.apache.spark.api.java.function.{PairFunction, Function}
import PythonConnector._
/**
* This file contains utilities used by the spark-timeseries Python bindings to communicate with
* the JVM. BytesToKeyAndSeries and KeyAndSeriesToBytes write and read bytes in the format
* read and written by the Python TimeSeriesSerializer class.
*/
private object PythonConnector {
val INT_SIZE = 4
val DOUBLE_SIZE = 8
val LONG_SIZE = 8
def putVector(buf: ByteBuffer, vec: Vector): Unit = {
buf.putInt(vec.size)
var i = 0
while (i < vec.size) {
buf.putDouble(vec(i))
i += 1
}
}
def arrayListToSeq(list: java.util.ArrayList[Any]): Seq[Any] = {
// implement with ArrayBuffer
var result = ArrayBuffer[Any]()
if (list != null) {
result = ArrayBuffer[Any](list.toArray: _*)
}
result
}
}
private class BytesToKeyAndSeries extends PairFunction[Array[Byte], String, Vector] {
override def call(arr: Array[Byte]): (String, Vector) = {
val buf = ByteBuffer.wrap(arr)
val keySize = buf.getInt()
val keyBytes = new Array[Byte](keySize)
buf.get(keyBytes)
val seriesSize = buf.getInt()
val series = new Array[Double](seriesSize)
var i = 0
while (i < seriesSize) {
series(i) = buf.getDouble()
i += 1
}
(new String(keyBytes, "UTF8"), new DenseVector(series))
}
}
private class KeyAndSeriesToBytes extends Function[(String, Vector), Array[Byte]] {
override def call(keyVec: (String, Vector)): Array[Byte] = {
val keyBytes = keyVec._1.getBytes("UTF-8")
val vec = keyVec._2
val arr = new Array[Byte](INT_SIZE + keyBytes.length + INT_SIZE + DOUBLE_SIZE * vec.size)
val buf = ByteBuffer.wrap(arr)
buf.putInt(keyBytes.length)
buf.put(keyBytes)
putVector(buf, vec)
arr
}
}
private class InstantToBytes extends Function[(ZonedDateTime, Vector), Array[Byte]] {
override def call(instant: (ZonedDateTime, Vector)): Array[Byte] = {
val arr = new Array[Byte](LONG_SIZE + INT_SIZE + DOUBLE_SIZE * instant._2.size)
val buf = ByteBuffer.wrap(arr)
buf.putLong(TimeSeriesUtils.zonedDateTimeToLong(instant._1))
putVector(buf, instant._2)
arr
}
}
| cloudera/spark-timeseries | src/main/scala/com/cloudera/sparkts/PythonConnector.scala | Scala | apache-2.0 | 2,964 |
class Hi(name: String) {
def hi = "Hi "+name+"!"
}
| grzegorzbalcerek/scala-book-examples | examples/TraitParams1.scala | Scala | mit | 53 |
package blended.updater.config
import org.scalatest.FreeSpec
import org.scalatest.prop.PropertyChecks
class ProfileSpec extends FreeSpec with PropertyChecks {
import TestData._
"Profile maps to SingleProfile" in {
forAll { profile: ProfileGroup =>
val singles = profile.toSingle
assert(singles.size === profile.overlays.size)
assert(ProfileGroup.fromSingleProfiles(singles) === List(profile))
}
}
"SingleProfile maps to Profile" in {
forAll { singles: Seq[Profile] =>
val profiles = ProfileGroup.fromSingleProfiles(singles)
assert(profiles.flatMap(_.toSingle).toSet === singles.toSet)
}
}
}
| lefou/blended | blended.updater.config/shared/src/test/scala/blended/updater/config/ProfileSpec.scala | Scala | apache-2.0 | 653 |
package examples
object properConstantFunction extends App {
// https://apocalisp.wordpress.com/2010/04/21/a-proper-constant-function-in-scala/
def const[A, B](a: A) = (b: B) => a
// problems
// 1. b is always evaluated
// 2. it is not properly quantified
// > const(7)(error("too strict"))
type Id[A] = A
val x = 10
def xToString = x.toString
}
| adilakhter/scalaznoob | src/main/scala/examples/properConstantFunction.scala | Scala | apache-2.0 | 375 |
package remote
import java.net.URL
import java.net.HttpURLConnection
import java.util.Scanner
object `package` {
def get(url: String): String =
try {
val conn =
(new URL(url)).openConnection.asInstanceOf[HttpURLConnection]
conn.setInstanceFollowRedirects(false)
conn.setRequestMethod("GET")
conn.setDoOutput(false)
val s = new java.util.Scanner(conn.getInputStream)
.useDelimiter("""\\A""")
val response = if (s.hasNext()) s.next() else ""
conn.getInputStream.close
response
} catch {
case e: Exception => ""
}
}
| earldouglas/xsbt-web-plugin | src/sbt-test/container/multi-module-multi-webapp/remoteweb/src/main/scala/remote.scala | Scala | bsd-3-clause | 600 |
package chandu0101.scalajs.react.components.materialui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import materialui.Mui
import scala.scalajs.js
case class MuiRaisedButton(onBlur: js.UndefOr[ReactEventH => Unit] = js.undefined,
labelStyle: js.UndefOr[js.Any] = js.undefined,
onTouchStart: js.UndefOr[ReactEventH => Unit] = js.undefined,
rippleColor: js.UndefOr[String] = js.undefined,
style: js.UndefOr[js.Any] = js.undefined,
label: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
secondary: js.UndefOr[Boolean] = js.undefined,
onMouseUp: js.UndefOr[ReactEventH => Unit] = js.undefined,
onTouchEnd: js.UndefOr[ReactEventH => Unit] = js.undefined,
key: js.UndefOr[String] = js.undefined,
onMouseOver: js.UndefOr[ReactEventH => Unit] = js.undefined,
linkButton: js.UndefOr[Boolean] = js.undefined,
onTouchTap: js.UndefOr[ReactEventH => Unit] = js.undefined,
className: js.UndefOr[String] = js.undefined,
onMouseOut: js.UndefOr[ReactEventH => Unit] = js.undefined,
hoverColor: js.UndefOr[String] = js.undefined,
onFocus: js.UndefOr[ReactEventH => Unit] = js.undefined,
disabled: js.UndefOr[Boolean] = js.undefined,
href: js.UndefOr[String] = js.undefined,
primary: js.UndefOr[Boolean] = js.undefined,
onMouseDown: js.UndefOr[ReactEventH => Unit] = js.undefined) {
def apply(children: ReactNode*) = {
val props = JSMacro[MuiRaisedButton](this)
val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.RaisedButton)
f(props, children.toJsArray).asInstanceOf[ReactComponentU_]
}
}
| mproch/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiRaisedButton.scala | Scala | apache-2.0 | 2,117 |
package com.telrikk.archivist.storage
case class Parent (`type`: String)
| Telrikk/Archivist | src/main/scala/com.telrikk/archivist/storage/Parent.scala | Scala | mit | 74 |
package playground
import org.scalatest.FunSuite
class FactorialPlaygroundTests extends FunSuite {
test("Factorial of method with TailRecursion 3 should return 6"){
assert(new FactorialPlayground().fact(3) == 6)
}
test("Factorial of 3 should return 6"){
assert(new FactorialPlayground().factNoTailRecursion(3) == 6)
}
} | sagasu/scalaPlayground | sbt/src/test/scala-2.11/playground/FactorialPlaygroundTests.scala | Scala | apache-2.0 | 340 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicemix.itests
import org.apache.camel.impl.DefaultProducerTemplate
import org.apache.camel.{ProducerTemplate, CamelContext}
/**
* Provides access to a Camel context and producer to use for integration testing.
*/
trait CamelTestSupport extends Await {
def camelContext = await(CamelContextHolder.context)
lazy val camelProducer : ProducerTemplate = {
val producer = new DefaultProducerTemplate(camelContext.getOrElse(throw new RuntimeException("Gave up waiting for a CamelContext")))
producer.start()
producer
}
/**
* Convenience method to perform a Camel request and return a String
*/
def requestString(url: String) : String = camelProducer.requestBody(url, null, classOf[String])
}
/**
* Singleton object that gets a CamelContext injected through Blueprint
*/
object CamelContextHolder {
var context: Option[CamelContext] = None
def apply(c: CamelContext) = {
context = Option(c)
context
}
}
| mohanaraosv/servicemix | itests/src/test/scala/org/apache/servicemix/itests/CamelTestSupport.scala | Scala | apache-2.0 | 1,778 |
package com.twitter.server.handler
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.Service
import com.twitter.finagle.http.{Request, Response, Status, Uri}
import com.twitter.io.Buf
import com.twitter.jvm.CpuProfile
import com.twitter.server.util.HttpUtils.newResponse
import com.twitter.util.logging.Logger
import com.twitter.util.{Duration, Future, Return, Throw}
import java.io.ByteArrayOutputStream
class ProfileResourceHandler(which: Thread.State) extends Service[Request, Response] {
private[this] val log = Logger[ProfileResourceHandler]
case class Params(pause: Duration, frequency: Int)
def apply(req: Request): Future[Response] = {
val uri = Uri.fromRequest(req)
val params = uri.params.foldLeft(Params(10.seconds, 100)) {
case (parameters, ("seconds", pauseVal)) =>
parameters.copy(pause = pauseVal.toInt.seconds)
case (parameters, ("hz", hz)) =>
parameters.copy(frequency = hz.toInt)
case (parameters, _) =>
parameters
}
log.info(
s"[${req.uri}] collecting CPU profile ($which) for ${params.pause} seconds at ${params.frequency}Hz"
)
CpuProfile.recordInThread(params.pause, params.frequency, which) transform {
case Return(prof) =>
// Write out the profile verbatim. It's a pprof "raw" profile.
val bos = new ByteArrayOutputStream
prof.writeGoogleProfile(bos)
newResponse(
contentType = "pprof/raw",
content = Buf.ByteArray.Owned(bos.toByteArray)
)
case Throw(exc) =>
newResponse(
status = Status.InternalServerError,
contentType = "text/plain;charset=UTF-8",
content = Buf.Utf8(exc.toString)
)
}
}
}
| twitter/twitter-server | server/src/main/scala/com/twitter/server/handler/ProfileResourceHandler.scala | Scala | apache-2.0 | 1,747 |
package io.gatling
import io.gatling.core.check.CheckBuilder
import io.gatling.tcp.check.TcpCheck
package object tcp {
type TcpCheckBuilder = CheckBuilder[TcpCheck, String, _, String]
}
| snripa/gatling-tcp-extensions | src/main/scala/io/gatling/tcp/package.scala | Scala | mit | 190 |
package edu.rice.habanero.actors
import java.util.concurrent.atomic.AtomicBoolean
import edu.rice.hj.runtime.util.ModCountDownLatch
import scala.actors.Actor
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> ([email protected])
*/
object ScalaActorState {
val actorLatch = new ModCountDownLatch(0)
def awaitTermination() {
try {
actorLatch.await()
} catch {
case ex: InterruptedException => {
ex.printStackTrace()
}
}
}
}
class ScalaActor[MsgType] extends Actor {
private val startTracker = new AtomicBoolean(false)
private val exitTracker = new AtomicBoolean(false)
final def act() {
loop {
react {
case msg: Any =>
process(msg.asInstanceOf[MsgType])
}
}
}
def process(msg: MsgType): Unit = {
throw new IllegalStateException("Must be overridden in child class")
}
def send(msg: MsgType) {
this ! msg
}
final def hasStarted() = {
startTracker.get()
}
override final def start() = {
if (!hasStarted()) {
ScalaActorState.actorLatch.updateCount()
onPreStart()
val res = super.start()
onPostStart()
startTracker.set(true)
res
}
this
}
/**
* Convenience: specify code to be executed before actor is started
*/
protected def onPreStart() = {
}
/**
* Convenience: specify code to be executed after actor is started
*/
protected def onPostStart() = {
}
final def hasExited() = {
exitTracker.get()
}
override final def exit() = {
val success = exitTracker.compareAndSet(false, true)
if (success) {
onPreExit()
onPostExit()
ScalaActorState.actorLatch.countDown()
}
super.exit()
}
/**
* Convenience: specify code to be executed before actor is terminated
*/
protected def onPreExit() = {
}
/**
* Convenience: specify code to be executed after actor is terminated
*/
protected def onPostExit() = {
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/actors/ScalaActor.scala | Scala | gpl-2.0 | 1,986 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.sql.{Date, Timestamp}
import scala.collection.immutable.HashSet
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.RandomDataGenerator
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.ExamplePointUDT
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenContext
import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData}
import org.apache.spark.sql.types._
class PredicateSuite extends SparkFunSuite with ExpressionEvalHelper {
private def booleanLogicTest(
name: String,
op: (Expression, Expression) => Expression,
truthTable: Seq[(Any, Any, Any)]) {
test(s"3VL $name") {
truthTable.foreach {
case (l, r, answer) =>
val expr = op(NonFoldableLiteral.create(l, BooleanType),
NonFoldableLiteral.create(r, BooleanType))
checkEvaluation(expr, answer)
}
}
}
// scalastyle:off
/**
* Checks for three-valued-logic. Based on:
* http://en.wikipedia.org/wiki/Null_(SQL)#Comparisons_with_NULL_and_the_three-valued_logic_.283VL.29
* I.e. in flat cpo "False -> Unknown -> True",
* OR is lowest upper bound,
* AND is greatest lower bound.
* p q p OR q p AND q p = q
* True True True True True
* True False True False False
* True Unknown True Unknown Unknown
* False True True False False
* False False False False True
* False Unknown Unknown False Unknown
* Unknown True True Unknown Unknown
* Unknown False Unknown False Unknown
* Unknown Unknown Unknown Unknown Unknown
*
* p NOT p
* True False
* False True
* Unknown Unknown
*/
// scalastyle:on
test("3VL Not") {
val notTrueTable =
(true, false) ::
(false, true) ::
(null, null) :: Nil
notTrueTable.foreach { case (v, answer) =>
checkEvaluation(Not(NonFoldableLiteral.create(v, BooleanType)), answer)
}
checkConsistencyBetweenInterpretedAndCodegen(Not, BooleanType)
}
test("AND, OR, EqualTo, EqualNullSafe consistency check") {
checkConsistencyBetweenInterpretedAndCodegen(And, BooleanType, BooleanType)
checkConsistencyBetweenInterpretedAndCodegen(Or, BooleanType, BooleanType)
DataTypeTestUtils.propertyCheckSupported.foreach { dt =>
checkConsistencyBetweenInterpretedAndCodegen(EqualTo, dt, dt)
checkConsistencyBetweenInterpretedAndCodegen(EqualNullSafe, dt, dt)
}
}
booleanLogicTest("AND", And,
(true, true, true) ::
(true, false, false) ::
(true, null, null) ::
(false, true, false) ::
(false, false, false) ::
(false, null, false) ::
(null, true, null) ::
(null, false, false) ::
(null, null, null) :: Nil)
booleanLogicTest("OR", Or,
(true, true, true) ::
(true, false, true) ::
(true, null, true) ::
(false, true, true) ::
(false, false, false) ::
(false, null, null) ::
(null, true, true) ::
(null, false, null) ::
(null, null, null) :: Nil)
booleanLogicTest("=", EqualTo,
(true, true, true) ::
(true, false, false) ::
(true, null, null) ::
(false, true, false) ::
(false, false, true) ::
(false, null, null) ::
(null, true, null) ::
(null, false, null) ::
(null, null, null) :: Nil)
test("basic IN predicate test") {
checkEvaluation(In(NonFoldableLiteral.create(null, IntegerType), Seq(Literal(1),
Literal(2))), null)
checkEvaluation(In(NonFoldableLiteral.create(null, IntegerType),
Seq(NonFoldableLiteral.create(null, IntegerType))), null)
checkEvaluation(In(NonFoldableLiteral.create(null, IntegerType), Seq.empty), null)
checkEvaluation(In(Literal(1), Seq.empty), false)
checkEvaluation(In(Literal(1), Seq(NonFoldableLiteral.create(null, IntegerType))), null)
checkEvaluation(In(Literal(1), Seq(Literal(1), NonFoldableLiteral.create(null, IntegerType))),
true)
checkEvaluation(In(Literal(2), Seq(Literal(1), NonFoldableLiteral.create(null, IntegerType))),
null)
checkEvaluation(In(Literal(1), Seq(Literal(1), Literal(2))), true)
checkEvaluation(In(Literal(2), Seq(Literal(1), Literal(2))), true)
checkEvaluation(In(Literal(3), Seq(Literal(1), Literal(2))), false)
checkEvaluation(
And(In(Literal(1), Seq(Literal(1), Literal(2))), In(Literal(2), Seq(Literal(1),
Literal(2)))),
true)
val ns = NonFoldableLiteral.create(null, StringType)
checkEvaluation(In(ns, Seq(Literal("1"), Literal("2"))), null)
checkEvaluation(In(ns, Seq(ns)), null)
checkEvaluation(In(Literal("a"), Seq(ns)), null)
checkEvaluation(In(Literal("^Ba*n"), Seq(Literal("^Ba*n"), ns)), true)
checkEvaluation(In(Literal("^Ba*n"), Seq(Literal("aa"), Literal("^Ba*n"))), true)
checkEvaluation(In(Literal("^Ba*n"), Seq(Literal("aa"), Literal("^n"))), false)
}
test("IN with different types") {
def testWithRandomDataGeneration(dataType: DataType, nullable: Boolean): Unit = {
val maybeDataGen = RandomDataGenerator.forType(dataType, nullable = nullable)
// Actually we won't pass in unsupported data types, this is a safety check.
val dataGen = maybeDataGen.getOrElse(
fail(s"Failed to create data generator for type $dataType"))
val inputData = Seq.fill(10) {
val value = dataGen.apply()
def cleanData(value: Any) = value match {
case d: Double if d.isNaN => 0.0d
case f: Float if f.isNaN => 0.0f
case _ => value
}
value match {
case s: Seq[_] => s.map(cleanData(_))
case m: Map[_, _] =>
val pair = m.unzip
val newKeys = pair._1.map(cleanData(_))
val newValues = pair._2.map(cleanData(_))
newKeys.zip(newValues).toMap
case _ => cleanData(value)
}
}
val input = inputData.map(NonFoldableLiteral.create(_, dataType))
val expected = if (inputData(0) == null) {
null
} else if (inputData.slice(1, 10).contains(inputData(0))) {
true
} else if (inputData.slice(1, 10).contains(null)) {
null
} else {
false
}
checkEvaluation(In(input(0), input.slice(1, 10)), expected)
}
val atomicTypes = DataTypeTestUtils.atomicTypes.filter { t =>
RandomDataGenerator.forType(t).isDefined && !t.isInstanceOf[DecimalType]
} ++ Seq(DecimalType.USER_DEFAULT)
val atomicArrayTypes = atomicTypes.map(ArrayType(_, containsNull = true))
// Basic types:
for (
dataType <- atomicTypes;
nullable <- Seq(true, false)) {
testWithRandomDataGeneration(dataType, nullable)
}
// Array types:
for (
arrayType <- atomicArrayTypes;
nullable <- Seq(true, false)
if RandomDataGenerator.forType(arrayType.elementType, arrayType.containsNull).isDefined) {
testWithRandomDataGeneration(arrayType, nullable)
}
// Struct types:
for (
colOneType <- atomicTypes;
colTwoType <- atomicTypes;
nullable <- Seq(true, false)) {
val structType = StructType(
StructField("a", colOneType) :: StructField("b", colTwoType) :: Nil)
testWithRandomDataGeneration(structType, nullable)
}
// Map types: not supported
for (
keyType <- atomicTypes;
valueType <- atomicTypes;
nullable <- Seq(true, false)) {
val mapType = MapType(keyType, valueType)
val e = intercept[Exception] {
testWithRandomDataGeneration(mapType, nullable)
}
if (e.getMessage.contains("Code generation of")) {
// If the `value` expression is null, `eval` will be short-circuited.
// Codegen version evaluation will be run then.
assert(e.getMessage.contains("cannot generate equality code for un-comparable type"))
} else {
assert(e.getMessage.contains("Exception evaluating"))
}
}
}
test("SPARK-22501: In should not generate codes beyond 64KB") {
val N = 3000
val sets = (1 to N).map(i => Literal(i.toDouble))
checkEvaluation(In(Literal(1.0D), sets), true)
}
test("SPARK-22705: In should use less global variables") {
val ctx = new CodegenContext()
In(Literal(1.0D), Seq(Literal(1.0D), Literal(2.0D))).genCode(ctx)
assert(ctx.inlinedMutableStates.isEmpty)
}
test("INSET") {
val hS = HashSet[Any]() + 1 + 2
val nS = HashSet[Any]() + 1 + 2 + null
val one = Literal(1)
val two = Literal(2)
val three = Literal(3)
val nl = Literal(null)
checkEvaluation(InSet(one, hS), true)
checkEvaluation(InSet(two, hS), true)
checkEvaluation(InSet(two, nS), true)
checkEvaluation(InSet(three, hS), false)
checkEvaluation(InSet(three, nS), null)
checkEvaluation(InSet(nl, hS), null)
checkEvaluation(InSet(nl, nS), null)
val primitiveTypes = Seq(IntegerType, FloatType, DoubleType, StringType, ByteType, ShortType,
LongType, BinaryType, BooleanType, DecimalType.USER_DEFAULT, TimestampType)
primitiveTypes.foreach { t =>
val dataGen = RandomDataGenerator.forType(t, nullable = true).get
val inputData = Seq.fill(10) {
val value = dataGen.apply()
value match {
case d: Double if d.isNaN => 0.0d
case f: Float if f.isNaN => 0.0f
case _ => value
}
}
val input = inputData.map(Literal(_))
val expected = if (inputData(0) == null) {
null
} else if (inputData.slice(1, 10).contains(inputData(0))) {
true
} else if (inputData.slice(1, 10).contains(null)) {
null
} else {
false
}
checkEvaluation(InSet(input(0), inputData.slice(1, 10).toSet), expected)
}
}
private case class MyStruct(a: Long, b: String)
private case class MyStruct2(a: MyStruct, b: Array[Int])
private val udt = new ExamplePointUDT
private val smallValues =
Seq(1.toByte, 1.toShort, 1, 1L, Decimal(1), Array(1.toByte), Date.valueOf("2000-01-01"),
new Timestamp(1), "a", 1f, 1d, 0f, 0d, false, Array(1L, 2L))
.map(Literal(_)) ++ Seq(Literal.create(MyStruct(1L, "b")),
Literal.create(MyStruct2(MyStruct(1L, "a"), Array(1, 1))),
Literal.create(ArrayData.toArrayData(Array(1.0, 2.0)), udt))
private val largeValues =
Seq(2.toByte, 2.toShort, 2, 2L, Decimal(2), Array(2.toByte), Date.valueOf("2000-01-02"),
new Timestamp(2), "b", 2f, 2d, Float.NaN, Double.NaN, true, Array(2L, 1L))
.map(Literal(_)) ++ Seq(Literal.create(MyStruct(2L, "b")),
Literal.create(MyStruct2(MyStruct(1L, "a"), Array(1, 2))),
Literal.create(ArrayData.toArrayData(Array(1.0, 3.0)), udt))
private val equalValues1 =
Seq(1.toByte, 1.toShort, 1, 1L, Decimal(1), Array(1.toByte), Date.valueOf("2000-01-01"),
new Timestamp(1), "a", 1f, 1d, Float.NaN, Double.NaN, true, Array(1L, 2L))
.map(Literal(_)) ++ Seq(Literal.create(MyStruct(1L, "b")),
Literal.create(MyStruct2(MyStruct(1L, "a"), Array(1, 1))),
Literal.create(ArrayData.toArrayData(Array(1.0, 2.0)), udt))
private val equalValues2 =
Seq(1.toByte, 1.toShort, 1, 1L, Decimal(1), Array(1.toByte), Date.valueOf("2000-01-01"),
new Timestamp(1), "a", 1f, 1d, Float.NaN, Double.NaN, true, Array(1L, 2L))
.map(Literal(_)) ++ Seq(Literal.create(MyStruct(1L, "b")),
Literal.create(MyStruct2(MyStruct(1L, "a"), Array(1, 1))),
Literal.create(ArrayData.toArrayData(Array(1.0, 2.0)), udt))
test("BinaryComparison consistency check") {
DataTypeTestUtils.ordered.foreach { dt =>
checkConsistencyBetweenInterpretedAndCodegen(LessThan, dt, dt)
checkConsistencyBetweenInterpretedAndCodegen(LessThanOrEqual, dt, dt)
checkConsistencyBetweenInterpretedAndCodegen(GreaterThan, dt, dt)
checkConsistencyBetweenInterpretedAndCodegen(GreaterThanOrEqual, dt, dt)
}
}
test("BinaryComparison: lessThan") {
for (i <- 0 until smallValues.length) {
checkEvaluation(LessThan(smallValues(i), largeValues(i)), true)
checkEvaluation(LessThan(equalValues1(i), equalValues2(i)), false)
checkEvaluation(LessThan(largeValues(i), smallValues(i)), false)
}
}
test("BinaryComparison: LessThanOrEqual") {
for (i <- 0 until smallValues.length) {
checkEvaluation(LessThanOrEqual(smallValues(i), largeValues(i)), true)
checkEvaluation(LessThanOrEqual(equalValues1(i), equalValues2(i)), true)
checkEvaluation(LessThanOrEqual(largeValues(i), smallValues(i)), false)
}
}
test("BinaryComparison: GreaterThan") {
for (i <- 0 until smallValues.length) {
checkEvaluation(GreaterThan(smallValues(i), largeValues(i)), false)
checkEvaluation(GreaterThan(equalValues1(i), equalValues2(i)), false)
checkEvaluation(GreaterThan(largeValues(i), smallValues(i)), true)
}
}
test("BinaryComparison: GreaterThanOrEqual") {
for (i <- 0 until smallValues.length) {
checkEvaluation(GreaterThanOrEqual(smallValues(i), largeValues(i)), false)
checkEvaluation(GreaterThanOrEqual(equalValues1(i), equalValues2(i)), true)
checkEvaluation(GreaterThanOrEqual(largeValues(i), smallValues(i)), true)
}
}
test("BinaryComparison: EqualTo") {
for (i <- 0 until smallValues.length) {
checkEvaluation(EqualTo(smallValues(i), largeValues(i)), false)
checkEvaluation(EqualTo(equalValues1(i), equalValues2(i)), true)
checkEvaluation(EqualTo(largeValues(i), smallValues(i)), false)
}
}
test("BinaryComparison: EqualNullSafe") {
for (i <- 0 until smallValues.length) {
checkEvaluation(EqualNullSafe(smallValues(i), largeValues(i)), false)
checkEvaluation(EqualNullSafe(equalValues1(i), equalValues2(i)), true)
checkEvaluation(EqualNullSafe(largeValues(i), smallValues(i)), false)
}
}
test("BinaryComparison: null test") {
// Use -1 (default value for codegen) which can trigger some weird bugs, e.g. SPARK-14757
val normalInt = Literal(-1)
val nullInt = NonFoldableLiteral.create(null, IntegerType)
val nullNullType = Literal.create(null, NullType)
def nullTest(op: (Expression, Expression) => Expression): Unit = {
checkEvaluation(op(normalInt, nullInt), null)
checkEvaluation(op(nullInt, normalInt), null)
checkEvaluation(op(nullInt, nullInt), null)
checkEvaluation(op(nullNullType, nullNullType), null)
}
nullTest(LessThan)
nullTest(LessThanOrEqual)
nullTest(GreaterThan)
nullTest(GreaterThanOrEqual)
nullTest(EqualTo)
checkEvaluation(EqualNullSafe(normalInt, nullInt), false)
checkEvaluation(EqualNullSafe(nullInt, normalInt), false)
checkEvaluation(EqualNullSafe(nullInt, nullInt), true)
checkEvaluation(EqualNullSafe(nullNullType, nullNullType), true)
}
test("EqualTo on complex type") {
val array = new GenericArrayData(Array(1, 2, 3))
val struct = create_row("a", 1L, array)
val arrayType = ArrayType(IntegerType)
val structType = new StructType()
.add("1", StringType)
.add("2", LongType)
.add("3", ArrayType(IntegerType))
val projection = UnsafeProjection.create(
new StructType().add("array", arrayType).add("struct", structType))
val unsafeRow = projection(InternalRow(array, struct))
val unsafeArray = unsafeRow.getArray(0)
val unsafeStruct = unsafeRow.getStruct(1, 3)
checkEvaluation(EqualTo(
Literal.create(array, arrayType),
Literal.create(unsafeArray, arrayType)), true)
checkEvaluation(EqualTo(
Literal.create(struct, structType),
Literal.create(unsafeStruct, structType)), true)
}
test("EqualTo double/float infinity") {
val infinity = Literal(Double.PositiveInfinity)
checkEvaluation(EqualTo(infinity, infinity), true)
}
test("SPARK-22693: InSet should not use global variables") {
val ctx = new CodegenContext
InSet(Literal(1), Set(1, 2, 3, 4)).genCode(ctx)
assert(ctx.inlinedMutableStates.isEmpty)
}
test("SPARK-24007: EqualNullSafe for FloatType and DoubleType might generate a wrong result") {
checkEvaluation(EqualNullSafe(Literal(null, FloatType), Literal(-1.0f)), false)
checkEvaluation(EqualNullSafe(Literal(-1.0f), Literal(null, FloatType)), false)
checkEvaluation(EqualNullSafe(Literal(null, DoubleType), Literal(-1.0d)), false)
checkEvaluation(EqualNullSafe(Literal(-1.0d), Literal(null, DoubleType)), false)
}
}
| ddna1021/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala | Scala | apache-2.0 | 17,394 |
package templemore.sbt
import _root_.sbt._
import java.io.File
/**
* @author Chris Turner
*/
class JRuby(val classpath: PathFinder,
val scalaLibraryPath: Path,
val defaultArgs: List[String],
val jRubyHome: Path,
val gemPath: Path,
val maxMemory: String,
val maxPermGen: String,
val log: Logger) {
if ( !jRubyHome.exists ) jRubyHome.asFile.mkdirs
def apply(args: List[String]): Int = {
log.debug("Launching JRuby")
log.debug("classpath: " + classpathAsString)
log.debug("javaArgs: " + defaultArgs)
log.debug("args: " + args)
log.debug("jRubyHome: " + jRubyHome)
log.debug("gemPath: " + gemPath)
Fork.java(None, javaArgs ++ args, None, jRubyEnv, LoggedOutput(log))
}
def installGem(gem:String) = {
val args = List("-S", "gem", "install", "--no-ri", "--no-rdoc", "--install-dir", gemPath.absolutePath) ++ gem.split("\\\\s+")
if ( log.atLevel(Level.Debug) ) apply("-d" :: args)
else apply(args)
}
private def classpathAsString =
(scalaLibraryPath.asFile :: classpath.getFiles.toList).map(_.getAbsolutePath).mkString(File.pathSeparator)
private def javaArgs = defaultArgs ++ ("-Xmx%s".format(maxMemory) :: "-XX:MaxPermSize=%s".format(maxPermGen) ::
"-classpath" :: classpathAsString :: "org.jruby.Main" :: Nil)
private def jRubyEnv = Map("GEM_PATH" -> gemPath.absolutePath,
"HOME" -> jRubyHome.absolutePath)
}
| skipoleschris/cucumber-sbt-plugin | src/main/scala/templemore/sbt/JRuby.scala | Scala | apache-2.0 | 1,526 |
Subsets and Splits