code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
//-----------------------------------------------------------------------
// FILE : MainSuite.scala
// SUBJECT : Class to do various testing operations on the Sprocket main.
// AUTHOR : (C) Copyright 2012 by Peter C. Chapin <[email protected]>
//
//-----------------------------------------------------------------------
package edu.uvm.sprocket
import org.scalatest.{Assertions, FunSuite}
import org.scalatest.Matchers
class MainSuite extends FunSuite with Assertions with Matchers {
test("Dummy Test") {
println("Hello, World!")
}
}
| pchapin/sprocket | test/edu/uvm/sprocket/MainSuite.scala | Scala | bsd-3-clause | 553 |
package com.sksamuel.elastic4s.searches.queries.matches
import com.sksamuel.elastic4s.searches.queries.Query
case class MatchAllQuery(boost: Option[Double] = None, queryName: Option[String] = None) extends Query {
def boost(boost: Double): MatchAllQuery = copy(boost = Option(boost))
def withBoost(boost: Double): MatchAllQuery = copy(boost = Option(boost))
def queryName(queryName: String): MatchAllQuery = copy(queryName = Option(queryName))
def withQueryName(queryName: String): MatchAllQuery = copy(queryName = Option(queryName))
}
case class MatchNoneQuery(queryName: Option[String] = None) extends Query {
def queryName(queryName: String): MatchNoneQuery = copy(queryName = Option(queryName))
def withQueryName(queryName: String): MatchNoneQuery = copy(queryName = Option(queryName))
}
| Tecsisa/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/queries/matches/MatchAllQuery.scala | Scala | apache-2.0 | 835 |
package org.jetbrains.plugins.scala.editor.importOptimizer
/**
* @author Nikolay.Tropin
*/
case class RangeInfo(namesAtRangeStart: Set[String],
importInfos: Seq[ImportInfo],
usedImportedNames: Set[String],
isLocal: Boolean)
| katejim/intellij-scala | src/org/jetbrains/plugins/scala/editor/importOptimizer/RangeInfo.scala | Scala | apache-2.0 | 292 |
package com.whitepages.cloudmanager.operation
import org.apache.solr.client.solrj.impl.{CloudSolrClient}
import com.whitepages.cloudmanager.action.{StateCondition, Action}
import com.whitepages.cloudmanager.state.ClusterManager
import java.util.Calendar
import com.whitepages.cloudmanager.ManagerSupport
import scala.collection.JavaConverters._
object Operation {
def empty = Operation(Seq())
def apply(action: Action): Operation = Operation(Seq(action))
}
case class Operation(actions: Seq[Action]) extends ManagerSupport {
def this(actions: java.util.List[Action]) = this(actions.asScala)
private val calendar = Calendar.getInstance()
def prettyPrint = {
"Operation: \\n" + actions.map("\\t" + _.toString).mkString("\\n")
}
def execute(client: CloudSolrClient): Boolean = execute(ClusterManager(client))
def execute(clusterManager: ClusterManager): Boolean = {
if (actions.isEmpty) {
true
} else {
comment.debug(calendar.getTime + " Beginning " + this)
val success = actions.foldLeft(true)((goodSoFar, action) => {
if (goodSoFar) {
comment.debug(s"Starting to apply $action")
val actionSuccess = if (action.perform(clusterManager)) {
comment.debug(s"Finished applying $action")
true
}
else {
comment.warn(s"Could not apply $action")
false
}
actionSuccess
}
else goodSoFar
})
comment.debug(calendar.getTime + " Finished " + this)
success
}
}
// TODO: More collection-y api? These are all I really needed so far.
def isEmpty = actions.isEmpty
def nonEmpty = actions.nonEmpty
def ++(that: Operation) = Operation(this.actions ++ that.actions)
}
| randomstatistic/solrcloud_manager | src/main/scala/com/whitepages/cloudmanager/operation/Operation.scala | Scala | apache-2.0 | 1,761 |
import sbt._
object Version {
val akka = "2.4.1"
val hadoop = "2.7.1"
val logback = "1.1.3"
val mockito = "1.10.19"
val scala = "2.11.7"
val scalaTest = "2.2.5"
val slf4j = "1.7.6"
val spark = "1.5.2"
}
object Library {
val akkaActor = "com.typesafe.akka" %% "akka-actor" % Version.akka
val akkaTestKit = "com.typesafe.akka" %% "akka-testkit" % Version.akka
val hadoopClient = "org.apache.hadoop" % "hadoop-client" % Version.hadoop
val logbackClassic = "ch.qos.logback" % "logback-classic" % Version.logback
val mockitoAll = "org.mockito" % "mockito-all" % Version.mockito
val scalaTest = "org.scalatest" %% "scalatest" % Version.scalaTest
val slf4jApi = "org.slf4j" % "slf4j-api" % Version.slf4j
val sparkStreaming = "org.apache.spark" %% "spark-streaming" % Version.spark
}
object Dependencies {
import Library._
val sparkAkkaHadoop = Seq(
sparkStreaming,
akkaActor,
akkaTestKit,
hadoopClient,
logbackClassic % "test",
scalaTest % "test",
mockitoAll % "test"
)
}
| Bongani/series-monitor | project/Dependencies.scala | Scala | apache-2.0 | 1,146 |
package scala.meta.tests
package io
import java.io.File
import munit.FunSuite
import scala.meta._
class MultipathSuite extends FunSuite {
val tmp: File = File.createTempFile("src", "main")
assert(tmp.delete())
assert(tmp.mkdirs())
val files = List("a", "b")
files.foreach(file => {
assert(new File(tmp, file).createNewFile())
})
test("Classpath.syntax") {
val fromPath = Classpath(List(AbsolutePath(tmp)))
val fromSyntax = Classpath(tmp.getAbsolutePath)
assert(fromPath == fromSyntax)
assert(fromPath.syntax == fromSyntax.syntax)
}
}
| scalameta/scalameta | tests/jvm/src/test/scala/scala/meta/tests/io/MultipathSuite.scala | Scala | bsd-3-clause | 573 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.cancelables
import monix.execution.Cancelable
import monix.execution.Cancelable.IsDummy
import monix.execution.atomic.{AtomicAny, PaddingStrategy}
import scala.annotation.tailrec
/** Represents a [[monix.execution.Cancelable Cancelable]] whose
* underlying cancelable reference can be swapped for another.
*
* Example:
* {{{
* val s = MultiAssignmentCancelable()
* s := c1 // sets the underlying cancelable to c1
* s := c2 // swaps the underlying cancelable to c2
*
* s.cancel() // also cancels c2
*
* s := c3 // also cancels c3, because s is already canceled
* }}}
*
* Also see:
*
* - [[SerialCancelable]], which is similar, except that it
* cancels the old cancelable upon assigning a new cancelable
* - [[SingleAssignCancelable]] that is effectively a forward
* reference that can be assigned at most once
* - [[OrderedCancelable]] that's very similar with
* `MultiAssignCancelable`, but with the capability of forcing
* ordering on concurrent updates
*/
final class MultiAssignCancelable private (initial: Cancelable)
extends AssignableCancelable.Multi {
private[this] val state = {
AtomicAny.withPadding(initial, PaddingStrategy.LeftRight128)
}
override def isCanceled: Boolean =
state.get match {
case null => true
case _ => false
}
override def cancel(): Unit = {
// Using getAndSet, which on Java 8 should be faster than
// a compare-and-set.
val oldState: Cancelable = state.getAndSet(null)
if (oldState ne null) oldState.cancel()
}
@tailrec def `:=`(value: Cancelable): this.type =
state.get match {
case null =>
value.cancel()
this
case `value` =>
// ignore
this
case current =>
if (state.compareAndSet(current, value)) {
this
} else {
// $COVERAGE-OFF$
:=(value) // retry
// $COVERAGE-ON$
}
}
/** Clears the underlying reference, setting it to a
* [[Cancelable.empty]] (if not cancelled).
*
* This is equivalent with:
* {{{
* ref := Cancelable.empty
* }}}
*
* The purpose of this method is to release any underlying
* reference for GC purposes, however if the underlying reference
* is a [[monix.execution.Cancelable.IsDummy dummy]] then the
* assignment doesn't happen because we don't care about releasing
* dummy references.
*/
@tailrec def clear(): Cancelable = {
val current: Cancelable = state.get
if ((current ne null) && !current.isInstanceOf[IsDummy]) {
if (state.compareAndSet(current, Cancelable.empty)) {
current
} else {
// $COVERAGE-OFF$
clear() // retry
// $COVERAGE-ON$
}
} else {
Cancelable.empty
}
}
}
object MultiAssignCancelable {
/** Builder for [[MultiAssignCancelable]]. */
def apply(): MultiAssignCancelable =
new MultiAssignCancelable(Cancelable.empty)
/** Builder for [[MultiAssignCancelable]]. */
def apply(s: Cancelable): MultiAssignCancelable =
new MultiAssignCancelable(s)
} | Wogan/monix | monix-execution/shared/src/main/scala/monix/execution/cancelables/MultiAssignCancelable.scala | Scala | apache-2.0 | 3,820 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.dynamicpruning
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.expressions.{Alias, BindReferences, DynamicPruningExpression, DynamicPruningSubquery, Expression, ListQuery, Literal, PredicateHelper}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan}
import org.apache.spark.sql.catalyst.plans.physical.BroadcastMode
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.{InSubqueryExec, QueryExecution, SparkPlan, SubqueryBroadcastExec}
import org.apache.spark.sql.execution.exchange.BroadcastExchangeExec
import org.apache.spark.sql.execution.joins._
import org.apache.spark.sql.internal.SQLConf
/**
* This planner rule aims at rewriting dynamic pruning predicates in order to reuse the
* results of broadcast. For joins that are not planned as broadcast hash joins we keep
* the fallback mechanism with subquery duplicate.
*/
case class PlanDynamicPruningFilters(sparkSession: SparkSession)
extends Rule[SparkPlan] with PredicateHelper {
private def reuseBroadcast: Boolean =
SQLConf.get.dynamicPartitionPruningReuseBroadcast && SQLConf.get.exchangeReuseEnabled
/**
* Identify the shape in which keys of a given plan are broadcasted.
*/
private def broadcastMode(keys: Seq[Expression], plan: LogicalPlan): BroadcastMode = {
val packedKeys = BindReferences.bindReferences(HashJoin.rewriteKeyExpr(keys), plan.output)
HashedRelationBroadcastMode(packedKeys)
}
override def apply(plan: SparkPlan): SparkPlan = {
if (!SQLConf.get.dynamicPartitionPruningEnabled) {
return plan
}
plan transformAllExpressions {
case DynamicPruningSubquery(
value, buildPlan, buildKeys, broadcastKeyIndex, onlyInBroadcast, exprId) =>
val qe = new QueryExecution(sparkSession, buildPlan)
// Using `sparkPlan` is a little hacky as it is based on the assumption that this rule is
// the first to be applied (apart from `InsertAdaptiveSparkPlan`).
val canReuseExchange = reuseBroadcast && buildKeys.nonEmpty &&
plan.find {
case BroadcastHashJoinExec(_, _, _, BuildLeft, _, left, _) =>
left.sameResult(qe.sparkPlan)
case BroadcastHashJoinExec(_, _, _, BuildRight, _, _, right) =>
right.sameResult(qe.sparkPlan)
case _ => false
}.isDefined
if (canReuseExchange) {
val mode = broadcastMode(buildKeys, buildPlan)
// plan a broadcast exchange of the build side of the join
val exchange = BroadcastExchangeExec(mode, qe.executedPlan)
val name = s"dynamicpruning#${exprId.id}"
// place the broadcast adaptor for reusing the broadcast results on the probe side
val broadcastValues =
SubqueryBroadcastExec(name, broadcastKeyIndex, buildKeys, exchange)
DynamicPruningExpression(InSubqueryExec(value, broadcastValues, exprId))
} else if (onlyInBroadcast) {
// it is not worthwhile to execute the query, so we fall-back to a true literal
DynamicPruningExpression(Literal.TrueLiteral)
} else {
// we need to apply an aggregate on the buildPlan in order to be column pruned
val alias = Alias(buildKeys(broadcastKeyIndex), buildKeys(broadcastKeyIndex).toString)()
val aggregate = Aggregate(Seq(alias), Seq(alias), buildPlan)
DynamicPruningExpression(expressions.InSubquery(
Seq(value), ListQuery(aggregate, childOutputs = aggregate.output)))
}
}
}
}
| pgandhi999/spark | sql/core/src/main/scala/org/apache/spark/sql/dynamicpruning/PlanDynamicPruningFilters.scala | Scala | apache-2.0 | 4,458 |
package ru.org.codingteam.cttalk
import java.security.MessageDigest
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mock._
import play.api.test.PlaySpecification
import reactivemongo.api.commands.WriteResult
import ru.org.codingteam.cttalk.model.{Token, User, UserHandle}
import ru.org.codingteam.cttalk.services.messaging.MessageReceiver
import ru.org.codingteam.cttalk.services.{MessagesService, TokensRepository, UserRepository, UserServiceImpl}
import scala.concurrent.Future
/**
* Created by hgn on 25.10.2015.
*/
class UserServiceSpec extends PlaySpecification with Mockito {
sequential
def mockTokensRepository = {
val mockRepository = mock[TokensRepository]
mockRepository.create(any[User]) answers { user =>
Future.successful(Token("token", UserHandle(user.asInstanceOf[User].name)))
}
mockRepository
}
def mockMessagesService = {
val mockService = mock[MessagesService]
mockService.register(any[Token], any[MessageReceiver]) answers {args =>
args match {
case Array(token, _) => Future.successful(token.asInstanceOf[Token])
}
}
mockService
}
"UserService.createUser" should {
"-- write user to db if there is no user with given name" in { implicit ee: ExecutionEnv =>
val mockUserRepository = mock[UserRepository]
mockUserRepository.save(org.mockito.Matchers.any[User]) returns Future.successful(User("", "", Seq()))
val service = new UserServiceImpl(mockUserRepository, mockTokensRepository, mockMessagesService)
service.createUser("testname", "test") map { user => user must beAnInstanceOf[User] } await
}
"-- fail when user with given name already exists" in { implicit ee: ExecutionEnv =>
val mockUsersRepository = mock[UserRepository]
val username = "testname"
mockUsersRepository.save(org.mockito.Matchers.any[User]) answers { user =>
val u = user.asInstanceOf[User]
if (username.equals(u.name)) {
Future.failed(new RuntimeException)
} else {
Future.successful(u)
}
}
val service = new UserServiceImpl(mockUsersRepository, mockTokensRepository, mockMessagesService)
service.createUser(username, "test") must throwA[Exception].await
}
}
"UserService.auth" should {
"-- return auth token if auth is successful" in { implicit ee: ExecutionEnv =>
val digest: MessageDigest = MessageDigest.getInstance("SHA-256")
val hash: String = BigInt(digest.digest("testpassword".getBytes("UTF-8"))).toString(16)
val newToken = "token"
val mockUsersRepository = mock[UserRepository]
val successfulResult = mock[WriteResult]
successfulResult.ok returns true
mockUsersRepository.getByNameAndPasswordHash(anyString, anyString) returns Future.successful(Some(User("testname", hash, Seq())))
val service = new UserServiceImpl(mockUsersRepository, mockTokensRepository, mockMessagesService)
service.auth("testname", "testpassword") map { result => result must beAnInstanceOf[Token] } await
}
"-- register token after successful authentication" in { implicit ee: ExecutionEnv =>
val digest: MessageDigest = MessageDigest.getInstance("SHA-256")
val hash: String = BigInt(digest.digest("testpassword".getBytes("UTF-8"))).toString(16)
val newToken = "token"
val mockUsersRepository = mock[UserRepository]
val successfulResult = mock[WriteResult]
successfulResult.ok returns true
mockUsersRepository.getByNameAndPasswordHash(anyString, anyString) returns Future.successful(Some(User("testname", hash, Seq())))
val messagesService = mockMessagesService
val service = new UserServiceImpl(mockUsersRepository, mockTokensRepository, messagesService)
service.auth("testname", "testpassword") map { result => result must beAnInstanceOf[Token] } await
there was one(messagesService).register(any[Token], any[MessageReceiver])
}
"-- fail if user does not exists" in { implicit ee: ExecutionEnv =>
val digest: MessageDigest = MessageDigest.getInstance("SHA-256")
val hash: String = BigInt(digest.digest("testpassword".getBytes("UTF-8"))).toString(16)
val newToken = "token"
val mockUsersRepository = mock[UserRepository]
mockUsersRepository.getByNameAndPasswordHash(anyString, anyString) returns Future.successful(None)
val service = new UserServiceImpl(mockUsersRepository, mockTokensRepository, mockMessagesService)
service.auth("testname", "testpassword") must throwA[Exception].await
}
"-- fail if password does not match" in { implicit ee: ExecutionEnv =>
val digest: MessageDigest = MessageDigest.getInstance("SHA-256")
val hash: String = BigInt(digest.digest("testpassword".getBytes("UTF-8"))).toString(16)
val newToken = "token"
val mockUsersRepository = mock[UserRepository]
mockUsersRepository.getByNameAndPasswordHash(anyString, anyString) returns Future.successful(Some(User("testname", hash, Seq())))
val service = new UserServiceImpl(mockUsersRepository, mockTokensRepository, mockMessagesService)
service.auth("testname", "wrongpassword") must throwA[Exception].await
}
}
}
| hagane/cttalk | server/test/ru/org/codingteam/cttalk/UserServiceSpec.scala | Scala | mit | 5,248 |
package scanalyzer
package analysis
package interpreter
import cfg._
import util._
case class InterpretationException(msg:String) extends ScanalyzerException(msg)
/**
* An Interpreter for the SSA-CFG format that implicitly defines its semantics.
*/
class Interpreter(fun: Function) extends ValueAnalysis[Option[BigInt]](fun) {
private def err(msg: String) = throw new InterpretationException(msg)
private def acc[A](a: Option[A]): A =
a.getOrElse(err("Use of undefined Value!"))
private def eval(i: Named): Unit = {
i match {
case BinOp(n, op, a, b) => {
val aval = acc(getVal(a))
val bval = acc(getVal(b))
symtab(n) = Some(op match {
case ADD() => aval + bval
case SUB() => aval - bval
case MUL() => aval * bval
case DIV() => if (bval != 0) {
aval / bval
} else {
err("Division by zero!")
}
case SLT() => if (aval < bval) 1 else 0
})
}
case _ => err("Invalid named non-PHI Instruction: `" + i + "`!")
}
}
override protected def fromBigInt(x: BigInt) = Some(x)
override def run(): Unit = {
populateSymbolTable(None)
var prevBB: BasicBlock = null
var currBB: BasicBlock = fun.first
while (currBB != null) {
// PHIs are evaluated in parallel
val (phis, rest) = currBB.splitPhis
// collect the appropriate values from all PHIs
var phi_res = phis map (p => {
p.getValForBB(prevBB) match {
case Some(x) => Some(acc(getVal(x)))
case None => err("Insufficient PHI Instruction: `" + p +"`!")
}
})
// assign the new values
for ((p, r) <- phis zip phi_res)
symtab(p.Name) = r
// the non-PHI instructions are evaluated sequentially
for (i <- rest) {
i match {
case B(c, t, f) => {
prevBB = currBB
currBB = if (acc(getVal(c)) != 0) t else f
}
case RET(x) => {
symtab("__RES__") = getVal(x)
prevBB = currBB
currBB = null
}
case x: Named => eval (x)
case _ => err( "Invalid unnamed non-PHI Instruction: `" + i + "`!")
}
}
}
}
}
| fabian-r/scanalyzer | src/main/scala/analysis/Interpreter.scala | Scala | mit | 2,257 |
/*
* Copyright 2016 OSBI Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bi.meteorite.core.security.authentication
import javax.inject.{Named, Singleton}
import javax.ws.rs.container.{ContainerRequestContext, ContainerResponseContext, ContainerResponseFilter}
import javax.ws.rs.core.{HttpHeaders, NewCookie}
import bi.meteorite.core.api.security.tokenprovider.TokenProvider
/**
* Token Response Filter for JAAS.
*/
@Singleton
@Named("tokenResponseFilter")
class TokenResponseFilter extends ContainerResponseFilter {
override def filter(requestContext: ContainerRequestContext, responseContext: ContainerResponseContext) {
val value = requestContext.getProperty("token").asInstanceOf[String]
if (value != null) {
val newcookie = new NewCookie(TokenProvider.TOKEN_COOKIE_NAME, value)
responseContext.getHeaders.putSingle(HttpHeaders.SET_COOKIE, newcookie)
}
}
}
| OSBI/meteorite-core | security-scala/src/main/scala/bi/meteorite/core/security/authentication/TokenResponseFilter.scala | Scala | apache-2.0 | 1,421 |
/*
* Copyright (C) 2015 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.workflow.tools
import java.util.concurrent.ConcurrentHashMap
import collection.JavaConverters._
import org.osgi.framework.{ BundleContext, BundleActivator }
object PluginInfo {
val plugins = new ConcurrentHashMap[Class[_], PluginInfo]().asScala
def pluginsInfo = plugins.values
}
case class PluginInfo(namespaces: List[String])
trait PluginInfoActivator extends BundleActivator {
def info = PluginInfo(List(this.getClass.getPackage.getName))
override def start(bundleContext: BundleContext): Unit =
PluginInfo.plugins += this.getClass -> info
override def stop(bundleContext: BundleContext): Unit =
PluginInfo.plugins -= this.getClass
}
| ISCPIF/PSEExperiments | openmole-src/openmole/core/org.openmole.core.workflow/src/main/scala/org/openmole/core/workflow/tools/PluginInfoActivator.scala | Scala | agpl-3.0 | 1,404 |
package model.services
import com.mohiva.play.silhouette.api.services.IdentityService
import com.mohiva.play.silhouette.impl.providers.CommonSocialProfile
import model.dtos.User
import scala.concurrent.Future
/**
* Handles actions to users.
*/
trait UserService extends IdentityService[User] {
/**
* Saves a user.
*
* @param user The user to save.
* @return The saved user.
*/
def save(user: User): Future[User]
/**
* Saves the social profile for a user.
*
* If a user exists for this profile then update the user, otherwise create a new user with the given profile.
*
* @param profile The social profile to save.
* @return The user for whom the profile was saved.
*/
def save(profile: CommonSocialProfile): Future[User]
}
| scify/DemocracIT-Web | app/model/services/UserService.scala | Scala | apache-2.0 | 774 |
package com.sksamuel.elastic4s.fields.builders
import com.sksamuel.elastic4s.fields.GeoShapeField
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
object GeoShapeFieldBuilderFn {
def build(field: GeoShapeField): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.field("type", field.`type`)
field.boost.foreach(builder.field("boost", _))
if (field.copyTo.nonEmpty) builder.array("copy_to", field.copyTo.toArray)
field.docValues.foreach(builder.field("doc_values", _))
field.ignoreMalformed.foreach(builder.field("ignore_malformed", _))
field.ignoreZValue.foreach(builder.field("ignore_z_value", _))
field.index.foreach(builder.field("index", _))
field.norms.foreach(builder.field("norms", _))
field.nullValue.foreach(builder.field("null_value", _))
field.tree.foreach(builder.field("tree", _))
field.precision.foreach(builder.field("precision", _))
field.treeLevels.foreach(builder.field("tree_levels", _))
field.strategy.foreach(builder.field("strategy", _))
field.distanceErrorPct.foreach(builder.field("distance_error_pct", _))
field.orientation.foreach(builder.field("orientation", _))
field.pointsOnly.foreach(builder.field("points_only", _))
field.store.foreach(builder.field("store", _))
builder.endObject()
}
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/fields/builders/GeoShapeFieldBuilderFn.scala | Scala | apache-2.0 | 1,344 |
package org.opencompare.stats.utils
import org.opencompare.io.wikipedia.io.MediaWikiAPI
import org.opencompare.stats.interfaces.RevisionsParserInterface
import play.api.libs.json._
import scala.collection.mutable.ListBuffer
/**
* Created by smangin on 23/07/15.
*
* Used to get all revisions from a single wikipedia page by abstracting Xpath calls
*
*/
class RevisionsParser (api : MediaWikiAPI, lang : String, title : String, direction : String = "newer") extends RevisionsParserInterface {
var skipUndo = false
var skipBlank = false
private val revisions = api.getRevisionFromTitle(lang, title, direction)
private val ids = for (revision <- revisions) yield {
(revision \\ "revid").as[JsNumber].value.toIntExact
}
private var currentId = -1
private val blankValues= List(
"WP:AES",
"WP:BLANK",
"WP:PAGEBLANKING",
"WP:AUTOSUMM"
)
private val undoValues= List(
"WP:UNDO",
"WP:CLUEBOT",
"WP:REVERT",
"WP:REV",
"WP:RV",
"Undid revision"
)
private def getRevision(id: Int): Option[JsObject] = {
revisions.find( revision => id == getId(revision))
}
private def getId(revision: JsObject): Int = {
(revision \\ "revid").as[JsNumber].value.toIntExact
}
def getIds(skipUndo : Boolean = false, skipBlank : Boolean = false): Map[String, List[Int]] = {
val undoBlackList = ListBuffer[Int]()
val suppressedBlackList = ListBuffer[Int]()
val blankBlackList = ListBuffer[Int]()
ids.foreach(id => {
if (skipUndo && isUndo(id)) {
undoBlackList.append(getParentId(id))
}
if (isSuppressed(id)) {
suppressedBlackList.append(id)
}
if (skipBlank && isBlank(id)) {
blankBlackList.append(id)
}
})
Map[String, List[Int]](
("ids", ids.diff(undoBlackList ++ blankBlackList).toList),
("undo", undoBlackList.toList),
("blank", blankBlackList.toList),
("suppressed", suppressedBlackList.toList)
)
}
def getDate(revid: Int): Option[String] = {
val revision = getRevision(revid)
if (revision.isDefined) {
Some((revision.get \\ "timestamp").as[JsString].value)
} else {
None
}
}
def isSuppressed(revid: Int): Boolean = {
var suppressed = false
val revision = getRevision(revid)
if (revision.isDefined) {
suppressed = revision.get.keys.contains("suppressed")
}
suppressed
}
def isBlank(revid: Int): Boolean = {
var blank = false
val revision = getRevision(revid)
if (revision.isDefined) {
if (revision.get.keys.contains("comment")) {
val comment = (revision.get \\ "comment")
blankValues.foreach(value => {
if (comment.as[JsString].value.contains(value)) {
blank = true
}
})
}
}
blank
}
def isUndo(revid: Int): Boolean = {
var undo = false
val revision = getRevision(revid)
if (revision.isDefined) {
if (revision.get.keys.contains("comment")) {
val comment = (revision.get \\ "comment")
undoValues.foreach(value => {
if (comment.as[JsString].value.contains(value)) {
undo = true
}
})
}
}
undo
}
def getParentId(revid: Int): Int = {
val revision = getRevision(revid)
if (revision.isDefined) {
(revision.get \\ "parentid").as[JsNumber].value.toIntExact
} else {
0
}
}
def getAuthor(revid: Int): String = {
var author = ""
val revision = getRevision(revid)
if (revision.isDefined) {
if (revision.get.keys.contains("comment")) {
author = (revision.get \\ "user").as[JsString].value
}
}
author
}
def getWikitext(revid: Int): String = {
api.getContentFromRevision(lang, revid)
}
}
| OpenCompare/pcm-stats | src/main/scala/org/opencompare/stats/utils/RevisionsParser.scala | Scala | apache-2.0 | 3,781 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.stream
import org.apache.flink.api.dag.Transformation
import org.apache.flink.configuration.Configuration
import org.apache.flink.core.memory.ManagedMemoryUseCase
import org.apache.flink.streaming.api.operators.OneInputStreamOperator
import org.apache.flink.streaming.api.transformations.OneInputTransformation
import org.apache.flink.table.api.TableException
import org.apache.flink.table.data.RowData
import org.apache.flink.table.functions.python.PythonFunctionInfo
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.delegation.StreamPlanner
import org.apache.flink.table.planner.plan.nodes.exec.common.CommonExecPythonAggregate
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamExecPythonOverAggregate
.{ARROW_PYTHON_OVER_WINDOW_RANGE_PROC_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME,
ARROW_PYTHON_OVER_WINDOW_RANGE_ROW_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME,
ARROW_PYTHON_OVER_WINDOW_ROWS_PROC_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME,
ARROW_PYTHON_OVER_WINDOW_ROWS_ROW_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME}
import org.apache.flink.table.planner.plan.utils.{KeySelectorUtil, OverAggregateUtil}
import org.apache.flink.table.runtime.typeutils.InternalTypeInfo
import org.apache.flink.table.types.logical.RowType
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.RelFieldCollation.Direction.ASCENDING
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Window.Group
import org.apache.calcite.rel.core.{AggregateCall, Window}
import java.util
import scala.collection.JavaConverters._
/**
* Stream physical RelNode for python time-based over [[Window]].
*/
class StreamExecPythonOverAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
inputRowType: RelDataType,
logicWindow: Window)
extends StreamExecOverAggregateBase(
cluster,
traitSet,
inputRel,
outputRowType,
inputRowType,
logicWindow)
with CommonExecPythonAggregate {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new StreamExecPythonOverAggregate(
cluster,
traitSet,
inputs.get(0),
outputRowType,
inputRowType,
logicWindow
)
}
override protected def translateToPlanInternal(
planner: StreamPlanner): Transformation[RowData] = {
val tableConfig = planner.getTableConfig
val overWindow: Group = logicWindow.groups.get(0)
val orderKeys = overWindow.orderKeys.getFieldCollations
if (orderKeys.size() != 1) {
throw new TableException(
"The window can only be ordered by a single time column.")
}
val orderKey = orderKeys.get(0)
if (!orderKey.direction.equals(ASCENDING)) {
throw new TableException(
"The window can only be ordered in ASCENDING mode.")
}
val inputDS = getInputNodes.get(0).translateToPlan(planner)
.asInstanceOf[Transformation[RowData]]
if (!logicWindow.groups.get(0).keys.isEmpty && tableConfig.getMinIdleStateRetentionTime < 0) {
LOG.warn(
"No state retention interval configured for a query which accumulates state. " +
"Please provide a query configuration with valid retention interval to prevent " +
"excessive state size. You may specify a retention time of 0 to not clean up the state.")
}
val timeType = outputRowType.getFieldList.get(orderKey.getFieldIndex).getType
// check time field
if (!FlinkTypeFactory.isRowtimeIndicatorType(timeType)
&& !FlinkTypeFactory.isProctimeIndicatorType(timeType)) {
throw new TableException(
"OVER windows' ordering in stream mode must be defined on a time attribute.")
}
// identify window rowtime attribute
val rowTimeIdx: Option[Int] = if (FlinkTypeFactory.isRowtimeIndicatorType(timeType)) {
Some(orderKey.getFieldIndex)
} else if (FlinkTypeFactory.isProctimeIndicatorType(timeType)) {
None
} else {
throw new TableException(
"OVER windows can only be applied on time attributes.")
}
if (overWindow.lowerBound.isPreceding
&& overWindow.lowerBound.isUnbounded) {
throw new TableException(
"Python UDAF is not supported to be used in UNBOUNDED PRECEDING OVER windows."
)
} else if (!overWindow.upperBound.isCurrentRow) {
throw new TableException(
"Python UDAF is not supported to be used in UNBOUNDED FOLLOWING OVER windows."
)
}
val aggregateCalls = logicWindow.groups.get(0).getAggregateCalls(logicWindow).asScala
val inRowType = FlinkTypeFactory.toLogicalRowType(inputRel.getRowType)
val outRowType = FlinkTypeFactory.toLogicalRowType(outputRowType)
val partitionKeys: Array[Int] = overWindow.keys.toArray
val inputTypeInfo = InternalTypeInfo.of(inRowType)
val selector = KeySelectorUtil.getRowDataSelector(partitionKeys, inputTypeInfo)
val boundValue = OverAggregateUtil.getBoundary(logicWindow, overWindow.lowerBound)
val isRowsClause = overWindow.isRows
if (boundValue.isInstanceOf[BigDecimal]) {
throw new TableException(
"the specific value is decimal which haven not supported yet.")
}
// bounded OVER window
val precedingOffset = -1 * boundValue.asInstanceOf[Long]
val ret = createPythonOneInputTransformation(
inputDS,
inRowType,
outRowType,
rowTimeIdx,
aggregateCalls,
precedingOffset,
isRowsClause,
partitionKeys,
tableConfig.getMinIdleStateRetentionTime,
tableConfig.getMaxIdleStateRetentionTime,
getConfig(planner.getExecEnv, tableConfig))
if (isPythonWorkerUsingManagedMemory(tableConfig.getConfiguration)) {
ret.declareManagedMemoryUseCaseAtSlotScope(ManagedMemoryUseCase.PYTHON)
}
if (inputsContainSingleton()) {
ret.setParallelism(1)
ret.setMaxParallelism(1)
}
// set KeyType and Selector for state
ret.setStateKeySelector(selector)
ret.setStateKeyType(selector.getProducedType)
ret
}
private[this] def createPythonOneInputTransformation(
inputTransform: Transformation[RowData],
inputRowType: RowType,
outputRowType: RowType,
rowTimeIdx: Option[Int],
aggCalls: Seq[AggregateCall],
lowerBoundary: Long,
isRowsClause: Boolean,
grouping: Array[Int],
minIdleStateRetentionTime: Long,
maxIdleStateRetentionTime: Long,
config: Configuration): OneInputTransformation[RowData, RowData] = {
val (pythonUdafInputOffsets, pythonFunctionInfos) =
extractPythonAggregateFunctionInfosFromAggregateCall(aggCalls)
val pythonOperator = getPythonOverWindowAggregateFunctionOperator(
config,
inputRowType,
outputRowType,
rowTimeIdx,
lowerBoundary,
isRowsClause,
grouping,
pythonUdafInputOffsets,
pythonFunctionInfos,
minIdleStateRetentionTime,
maxIdleStateRetentionTime)
new OneInputTransformation(
inputTransform,
"StreamExecPythonOverAggregate",
pythonOperator,
InternalTypeInfo.of(outputRowType),
inputTransform.getParallelism)
}
private[this] def getPythonOverWindowAggregateFunctionOperator(
config: Configuration,
inputRowType: RowType,
outputRowType: RowType,
rowTimeIdx: Option[Int],
lowerBinary: Long,
isRowsClause: Boolean,
grouping: Array[Int],
udafInputOffsets: Array[Int],
pythonFunctionInfos: Array[PythonFunctionInfo],
minIdleStateRetentionTime: Long,
maxIdleStateRetentionTime: Long): OneInputStreamOperator[RowData, RowData] = {
val inputTimeFieldIndex = if (rowTimeIdx.isDefined) {
rowTimeIdx.get
} else {
-1
}
if (isRowsClause) {
val className = if (rowTimeIdx.isDefined) {
ARROW_PYTHON_OVER_WINDOW_ROWS_ROW_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME
} else {
ARROW_PYTHON_OVER_WINDOW_ROWS_PROC_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME
}
val clazz = loadClass(className)
val ctor = clazz.getConstructor(
classOf[Configuration],
classOf[Long],
classOf[Long],
classOf[Array[PythonFunctionInfo]],
classOf[RowType],
classOf[RowType],
classOf[Int],
classOf[Long],
classOf[Array[Int]],
classOf[Array[Int]])
ctor.newInstance(
config,
minIdleStateRetentionTime.asInstanceOf[AnyRef],
maxIdleStateRetentionTime.asInstanceOf[AnyRef],
pythonFunctionInfos,
inputRowType,
outputRowType,
inputTimeFieldIndex.asInstanceOf[AnyRef],
lowerBinary.asInstanceOf[AnyRef],
grouping,
udafInputOffsets)
.asInstanceOf[OneInputStreamOperator[RowData, RowData]]
} else {
val className = if (rowTimeIdx.isDefined) {
ARROW_PYTHON_OVER_WINDOW_RANGE_ROW_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME
} else {
ARROW_PYTHON_OVER_WINDOW_RANGE_PROC_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME
}
val clazz = loadClass(className)
val ctor = clazz.getConstructor(
classOf[Configuration],
classOf[Array[PythonFunctionInfo]],
classOf[RowType],
classOf[RowType],
classOf[Int],
classOf[Long],
classOf[Array[Int]],
classOf[Array[Int]])
ctor.newInstance(
config,
pythonFunctionInfos,
inputRowType,
outputRowType,
inputTimeFieldIndex.asInstanceOf[AnyRef],
lowerBinary.asInstanceOf[AnyRef],
grouping,
udafInputOffsets)
.asInstanceOf[OneInputStreamOperator[RowData, RowData]]
}
}
}
object StreamExecPythonOverAggregate {
val ARROW_PYTHON_OVER_WINDOW_RANGE_ROW_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME : String =
"org.apache.flink.table.runtime.operators.python.aggregate.arrow.stream." +
"StreamArrowPythonRowTimeBoundedRangeOperator"
val ARROW_PYTHON_OVER_WINDOW_RANGE_PROC_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME : String =
"org.apache.flink.table.runtime.operators.python.aggregate.arrow.stream." +
"StreamArrowPythonProcTimeBoundedRangeOperator"
val ARROW_PYTHON_OVER_WINDOW_ROWS_ROW_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME : String =
"org.apache.flink.table.runtime.operators.python.aggregate.arrow.stream." +
"StreamArrowPythonRowTimeBoundedRowsOperator"
val ARROW_PYTHON_OVER_WINDOW_ROWS_PROC_TIME_AGGREGATE_FUNCTION_OPERATOR_NAME : String =
"org.apache.flink.table.runtime.operators.python.aggregate.arrow.stream." +
"StreamArrowPythonProcTimeBoundedRowsOperator"
}
| aljoscha/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamExecPythonOverAggregate.scala | Scala | apache-2.0 | 11,586 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.utils
import java.io.OutputStream
class ConditionalOutputStream(
private val outputStream: OutputStream,
condition: => Boolean
) extends OutputStream {
require(outputStream != null)
override def write(b: Int): Unit = if (condition) outputStream.write(b)
}
| bpburns/spark-kernel | kernel-api/src/main/scala/com/ibm/spark/utils/ConditionalOutputStream.scala | Scala | apache-2.0 | 883 |
package dsmoq.taskServer
import java.io.{ File, FileInputStream, IOException, InputStream }
import java.nio.file.{ Files, Paths }
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.services.s3.model.{ GetObjectRequest, ObjectMetadata }
import com.amazonaws.services.s3.transfer.{ TransferManager, TransferManagerConfiguration }
import scala.collection.JavaConversions._
object FileManager {
private val PART_SIZE = 5 * 1024L * 1024L
def moveFromLocalToS3(filePath: String, client: AmazonS3Client) {
if (client.listObjects(AppConf.s3UploadRoot).getObjectSummaries.map(_.getKey).contains(filePath)) {
return
}
if (!client.doesBucketExist(AppConf.s3UploadRoot)) {
throw new BucketNotFoundException("対象のBucket(%s)が作成されていません。".format(AppConf.s3UploadRoot))
}
val path = Paths.get(AppConf.fileDir, filePath)
val fileSize = Files.size(path)
val putMetaData = new ObjectMetadata()
putMetaData.setContentLength(fileSize)
val c = new TransferManagerConfiguration()
c.setMinimumUploadPartSize(PART_SIZE)
val manager = new TransferManager(client)
manager.setConfiguration(c)
val in = Files.newInputStream(path)
try {
val upload = manager.upload(AppConf.s3UploadRoot, filePath, in, putMetaData)
upload.waitForCompletion()
} catch {
case e: InterruptedException => throw new IOException(e)
} finally {
in.close()
}
// val fullPath = Paths.get(AppConf.fileDir, filePath).toFile
// val file = new File(fullPath.toString)
// loanStream(file)( x => uploadToS3(filePath, x, client) )
}
// private def loanStream(file: File)(f: InputStream => Unit)
// {
// var stream :InputStream = null
// try
// {
// stream = new FileInputStream(file)
// f(stream)
// }
// finally
// {
// try {
// if (stream != null) {
// stream.close()
// }
// }
// catch
// {
// case _:IOException =>
// }
// }
// }
def moveFromS3ToLocal(filePath: String)(implicit client: AmazonS3Client) {
if (Paths.get(AppConf.fileDir, filePath).toFile.exists()) {
return
}
val request = new GetObjectRequest(AppConf.s3UploadRoot, filePath)
val manager = new TransferManager(client)
val pattern = """([^/]+)/([^/]+)/[^/]+""".r
filePath match {
case pattern(datasetId, fileId) => {
val datasetDir = Paths.get(AppConf.fileDir, datasetId).toFile
if (!datasetDir.exists()) datasetDir.mkdir()
val fileDir = datasetDir.toPath.resolve(fileId).toFile
if (!fileDir.exists()) fileDir.mkdir()
}
case _ => // do nothing
}
val download = manager.download(request, Paths.get(AppConf.fileDir, filePath).toFile)
try {
download.waitForCompletion()
} catch {
case e: InterruptedException => throw new IOException(e)
}
}
private def uploadToS3(filePath: String, in: InputStream, client: AmazonS3Client) {
if (client.listObjects(AppConf.s3UploadRoot).getObjectSummaries.map(_.getKey).contains(filePath)) {
return
}
if (!client.doesBucketExist(AppConf.s3UploadRoot)) {
throw new BucketNotFoundException("対象のBucket(%s)が作成されていません。".format(AppConf.s3UploadRoot))
}
val contentLength = in.available
val putMetaData = new ObjectMetadata()
putMetaData.setContentLength(contentLength)
val manager = new TransferManager(client)
// 分割サイズを設定
val c = new TransferManagerConfiguration()
c.setMinimumUploadPartSize(PART_SIZE)
manager.setConfiguration(c)
val upload = manager.upload(AppConf.s3UploadRoot, filePath, in, putMetaData)
try {
upload.waitForCompletion()
} catch {
case e: InterruptedException => throw new IOException(e)
}
}
}
| nkawa/dsmoq | server/taskServer/src/main/scala/dsmoq/taskServer/FileManager.scala | Scala | apache-2.0 | 3,919 |
/*
* Copyright 2010-2011 Vilius Normantas <[email protected]>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.indicators
import lt.norma.crossbow.core._
import lt.norma.crossbow.messages.EmptyMessage
import org.scalatest.FunSuite
class FirstValueTest extends FunSuite {
test("name") {
val target = new Variable[Int] {
override def name = "T"
}
val indicator = new FirstValue(target)
expect("First value of T") {
indicator.name
}
}
test("dependency") {
val target = new Variable[Int] {
override def name = "T"
}
val indicator = new FirstValue(target)
expect(2) {
indicator.dependencies.size
}
}
test("value - Int") {
val target = new Variable[Int] {
override def name = "T"
}
val indicator = new FirstValue(target)
val list = new IndicatorList(indicator)
expect(None) {
indicator()
}
list.send(EmptyMessage)
expect(None) {
indicator()
}
target.set(5)
list.send(EmptyMessage)
expect(Some(5)) {
indicator()
}
target.set(8)
list.send(EmptyMessage)
expect(Some(5)) {
indicator()
}
target.set(10)
list.send(EmptyMessage)
expect(Some(5)) {
indicator()
}
target.unset()
list.send(EmptyMessage)
expect(Some(5)) {
indicator()
}
target.set(1)
list.send(EmptyMessage)
expect(Some(5)) {
indicator()
}
}
test("value - String") {
val target = new Variable[String] {
override def name = "T"
}
val indicator = new FirstValue(target)
val list = new IndicatorList(indicator)
expect(None) {
indicator()
}
list.send(EmptyMessage)
expect(None) {
indicator()
}
target.set("A")
list.send(EmptyMessage)
expect(Some("A")) {
indicator()
}
target.set("B")
list.send(EmptyMessage)
expect(Some("A")) {
indicator()
}
target.set("C")
list.send(EmptyMessage)
expect(Some("A")) {
indicator()
}
target.unset()
list.send(EmptyMessage)
expect(Some("A")) {
indicator()
}
target.set("D")
list.send(EmptyMessage)
expect(Some("A")) {
indicator()
}
}
test("initial value") {
val target = new Variable(55) {
override def name = "T"
}
val indicator = new FirstValue(target)
val list = new IndicatorList(indicator)
expect(Some(55)) {
indicator()
}
target.set(8)
list.send(EmptyMessage)
expect(Some(55)) {
indicator()
}
}
}
| ViliusN/Crossbow | crossbow-core/test/lt/norma/crossbow/indicators/FirstValueTest.scala | Scala | gpl-3.0 | 3,210 |
/*******************************************************************************
* Copyright 2010 Maxime Lévesque
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
***************************************************************************** */
package org.squeryl
import dsl.ast.ViewExpressionNode
import dsl.{TypedExpression, QueryDsl}
import internals._
import java.sql.ResultSet
/**
* This class can be used for read only tables or (database) views
* for an updatable view, or table use Table[T]
*/
class View[T] private [squeryl](_name: String, private[squeryl] val classOfT: Class[T], schema: Schema, _prefix: Option[String], val ked: Option[KeyedEntityDef[T,_]]) extends Queryable[T] {
//2.9.x approach for LyfeCycle events :
// private [squeryl] var _callbacks: PosoLifecycleEventListener = NoOpPosoLifecycleEventListener
////2.8.x approach for LyfeCycle events :
private [squeryl] lazy val _callbacks =
schema._callbacks.get(this).getOrElse(NoOpPosoLifecycleEventListener)
def name = schema.tableNameFromClassName(_name)
def prefix: Option[String] =
if(_prefix != None)
_prefix
else
schema.name
def prefixedName =
if(prefix != None)
prefix.get + "." + name
else
name
/**
* Suppose you have : prefix.MyTable
* myTable.prefixedPrefixedName("z") will yield : prefix.zMyTable
* used for creating names for objects derived from a table, ex.: a sequence
*/
def prefixedPrefixedName(s: String) =
if(prefix != None)
prefix.get + "." + s + name
else
s + name
private [squeryl] def findFieldMetaDataForProperty(name: String) = posoMetaData.findFieldMetaDataForProperty(name)
val posoMetaData = new PosoMetaData(classOfT, schema, this)
private [squeryl] def allFieldsMetaData: Iterable[FieldMetaData] = posoMetaData.fieldsMetaData
private val _emptyArray = new Array[Object](0);
protected val _setPersisted =
if(classOf[PersistenceStatus].isAssignableFrom(classOfT))
(t:T) => t.asInstanceOf[PersistenceStatus]._isPersisted = true
else
(t:T) => {}
private val _posoFactory =
FieldMetaData.factory.createPosoFactory(posoMetaData)
private [squeryl] def _createInstanceOfRowObject =
_posoFactory()
private [squeryl] def give(resultSetMapper: ResultSetMapper, resultSet: ResultSet) : T = {
var o = _callbacks.create
if(o == null)
o = _createInstanceOfRowObject
resultSetMapper.map(o, resultSet);
val t = o.asInstanceOf[T]
_setPersisted(t)
_callbacks.afterSelect(t.asInstanceOf[AnyRef]).asInstanceOf[T]
}
def lookup[K](k: K)(implicit ked: KeyedEntityDef[T,K], dsl: QueryDsl, toCanLookup: K => CanLookup): Option[T] = {
//TODO: find out why scalac won't let dsl be passed to another method
import dsl._
val q = from(this)(a => dsl.where {
FieldReferenceLinker.createEqualityExpressionWithLastAccessedFieldReferenceAndConstant(ked.getId(a), k, toCanLookup(k))
} select(a))
val it = q.iterator
if(it.hasNext) {
val ret = Some(it.next)
// Forces statement to be closed.
it.hasNext
ret
}
else
None
}
/**
* Will throw an exception if the given key (k) returns no row.
*/
def get[K](k: K)(implicit ked: KeyedEntityDef[T,K], dsl: QueryDsl, toCanLookup: K => CanLookup): T =
lookup(k).getOrElse(throw new NoSuchElementException("Found no row with key '"+ k + "' in " + name + "."))
def allRows(implicit dsl: QueryDsl): Iterable[T] = {
import dsl._
dsl.queryToIterable(from(this)(a=> select(a)))
}
def viewExpressionNode: ViewExpressionNode[T] = new ViewExpressionNode[T](this)
}
sealed trait CanLookup
private [squeryl] case object CompositeKeyLookup extends CanLookup
private [squeryl] case object UnknownCanLookup extends CanLookup
private [squeryl] case class SimpleKeyLookup[T](convert: T => TypedExpression[T, _]) extends CanLookup
| wukaikailive/squeryl | src/main/scala/org/squeryl/View.scala | Scala | apache-2.0 | 4,588 |
package models
import play.api.libs.json.Json
case class Wine(name: String, vintage: String)
object Wine {
implicit val wineFormat = Json.format[Wine]
} | benhalton/angular-play-mongo-starter | app/models/Wine.scala | Scala | gpl-3.0 | 159 |
/*
* (C) Copyright 2015 Atomic BITS (http://atomicbits.io).
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Affero General Public License
* (AGPL) version 3.0 which accompanies this distribution, and is available in
* the LICENSE file or at http://www.gnu.org/licenses/agpl-3.0.en.html
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Affero General Public License for more details.
*
* Contributors:
* Peter Rigole
*
*/
package io.atomicbits.scraml.parser.model
/**
* Created by peter on 17/05/15, Atomic BITS bvba (http://atomicbits.io).
*/
case class Response(headers: Map[String, Parameter], body: Map[String, MimeType])
object Response {
def apply(response: org.raml.model.Response): Response = {
val headers: Map[String, Parameter] =
Transformer.transformMap[org.raml.model.parameter.Header, Parameter](Parameter(_))(response.getHeaders)
val body: Map[String, MimeType] =
Transformer.transformMap[org.raml.model.MimeType, MimeType](MimeType(_))(response.getBody)
Response(headers, body)
}
}
| rcavalcanti/scraml | modules/scraml-parser/src/main/scala/io/atomicbits/scraml/parser/model/Response.scala | Scala | agpl-3.0 | 1,278 |
/**
* Copyright (C) 2015-2016 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.api.sword2
import java.io.{File, IOException}
import java.net.{MalformedURLException, URI, URL, UnknownHostException}
import java.nio.file._
import java.nio.file.attribute.{BasicFileAttributes, PosixFilePermissions}
import java.util.Collections
import java.util.regex.Pattern
import gov.loc.repository.bagit.FetchTxt.FilenameSizeUrl
import gov.loc.repository.bagit.utilities.SimpleResult
import gov.loc.repository.bagit.verify.CompleteVerifier
import gov.loc.repository.bagit.{Bag, BagFactory, FetchTxt}
import net.lingala.zip4j.core.ZipFile
import nl.knaw.dans.lib.error.{CompositeException, TraversableTryExtensions}
import org.apache.abdera.i18n.iri.IRI
import org.apache.commons.codec.digest.DigestUtils
import org.apache.commons.io.FileUtils._
import org.joda.time.{DateTime, DateTimeZone}
import org.slf4j.LoggerFactory
import org.swordapp.server.{Deposit, DepositReceipt, SwordError}
import resource.Using
import rx.lang.scala.schedulers.NewThreadScheduler
import rx.lang.scala.subjects.PublishSubject
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
object DepositHandler {
val log = LoggerFactory.getLogger(getClass)
implicit val bagFactory = new BagFactory
val depositProcessingStream = PublishSubject[(String, Deposit)]()
def startDepositProcessingStream(settings: Settings): Unit = {
depositProcessingStream
.onBackpressureBuffer
.observeOn(NewThreadScheduler())
.doOnEach(_ match { case (id, deposit) => finalizeDeposit(deposit.getMimeType)(settings, id) })
.subscribe(_ match { case (id, deposit) => log.info(s"Done finalizing deposit $id") })
}
def handleDeposit(deposit: Deposit)(implicit settings: Settings, id: String): Try[DepositReceipt] = {
val payload = Paths.get(settings.tempDir.toString, id, deposit.getFilename.split("/").last).toFile
for {
_ <- copyPayloadToFile(deposit, payload)
_ <- doesHashMatch(payload, deposit.getMd5)
_ <- handleDepositAsync(deposit)
} yield createDepositReceipt(deposit, settings, id)
}
def genericErrorMessage(implicit settings: Settings, id: String): String = {
val mailaddress = settings.supportMailAddress
val timestamp = DateTime.now(DateTimeZone.UTC).toString
s"""The server encountered an unexpected condition.
|Please contact the SWORD service administrator at $mailaddress.
|The error occurred at $timestamp. Your 'DepositID' is $id.
""".stripMargin
}
def finalizeDeposit(mimeType: String)(implicit settings: Settings, id: String): Try[Unit] = {
log.info(s"[$id] Finalizing deposit")
// TODO pass on the combination object of baseDir and baseURL
implicit val baseDir: File = new File(settings.bagStoreBaseDir)
implicit val baseUrl: URI = new URI(settings.bagStoreBaseUri)
val tempDir = new File(settings.tempDir, id)
val result = for {
_ <- checkBagStoreBaseDir()
_ <- extractBag(mimeType)
bagitDir <- getBagDir(tempDir)
_ <- checkFetchItemUrls(bagitDir, settings.urlPattern)
_ <- checkBagVirtualValidity(bagitDir)
_ <- DepositProperties.set(id, "SUBMITTED", "Deposit is valid and ready for post-submission processing", lookInTempFirst = true)
dataDir <- moveBagToStorage()
} yield ()
result.recover {
case InvalidDepositException(_, msg, cause) =>
log.error(s"[$id] Invalid deposit", cause)
DepositProperties.set(id, "INVALID", msg, lookInTempFirst = true)
case NonFatal(e) =>
log.error(s"[$id] Internal failure in deposit service", e)
DepositProperties.set(id, "FAILED", genericErrorMessage, lookInTempFirst = true)
}
}
private def extractBag(mimeType: String)(implicit settings: Settings, id: String): Try[File] = {
def extract(file: File, outputPath: String): Unit = new ZipFile(file.getPath).extractAll(outputPath)
def getSequenceNumber(f: File): Int = {
try {
val seqNumber = f.getName
.split('.')
.lastOption
.getOrElse(throw InvalidDepositException(id, s"Partial file ${f.getName} has no extension. It should be a positive sequence number."))
.toInt
if (seqNumber > 0) seqNumber
else throw InvalidDepositException(id, s"Partial file ${f.getName} has an incorrect extension. It should be a positive sequence number (> 0), but was: $seqNumber")
}
catch {
case _: NumberFormatException =>
throw InvalidDepositException(id, s"Partial file ${f.getName} has an incorrect extension. Should be a positive sequence number.")
}
}
Try {
log.debug(s"[$id] Extracting bag")
val depositDir: File = new File(settings.tempDir, id)
val files = depositDir.listFilesSafe.filter(isPartOfDeposit)
mimeType match {
case "application/zip" =>
files.foreach(file => {
if (!file.isFile)
throw InvalidDepositException(id, s"Inconsistent dataset: non-file object found: ${file.getName}")
extract(file, depositDir.getPath)
deleteQuietly(file)
})
case "application/octet-stream" =>
val mergedZip = new File(depositDir, "merged.zip")
files.foreach(f => log.debug(s"[$id] Merging file: ${f.getName}"))
MergeFiles.merge(mergedZip, files.sortBy(getSequenceNumber))
extract(mergedZip, depositDir.getPath)
files.foreach(deleteQuietly)
deleteQuietly(mergedZip)
case _ =>
throw InvalidDepositException(id, s"Invalid content type: $mimeType")
}
depositDir
}
}
def checkBagStoreBaseDir()(implicit id: String, baseDir: File): Try[Unit] = {
if (!baseDir.exists) Failure(new IOException(s"Bag store base directory ${baseDir.getAbsolutePath} doesn't exist"))
else if (!baseDir.canRead) Failure(new IOException(s"Bag store base directory ${baseDir.getAbsolutePath} is not readable"))
else Success(())
}
private def getBagDir(depositDir: File): Try[File] = Try {
depositDir.listFiles.find(f => f.isDirectory && isPartOfDeposit(f)).get
}
def checkDepositIsInDraft(id: String)(implicit settings: Settings): Try[Unit] =
DepositProperties.getState(id)
.filter(_.label == "DRAFT")
.map(_ => ())
.recoverWith {
case t => Failure(new SwordError("http://purl.org/net/sword/error/MethodNotAllowed", 405, s"Deposit $id is not in DRAFT state."))
}
def copyPayloadToFile(deposit: Deposit, zipFile: File)(implicit id: String): Try[Unit] =
try {
log.debug(s"[$id] Copying payload to: $zipFile")
Success(copyInputStreamToFile(deposit.getInputStream, zipFile))
} catch {
case t: Throwable => Failure(new SwordError("http://purl.org/net/sword/error/ErrorBadRequest", t))
}
def handleDepositAsync(deposit: Deposit)(implicit settings: Settings, id: String): Try[Unit] = Try {
if (!deposit.isInProgress) {
log.info(s"[$id] Scheduling deposit to be finalized")
DepositProperties.set(id, "FINALIZING", "Deposit is being reassembled and validated", lookInTempFirst = true)
depositProcessingStream.onNext((id, deposit))
} else {
log.info(s"[$id] Received continuing deposit: ${deposit.getFilename}")
}
}
private def getFetchTxt(bagitDir: File): Option[FetchTxt] = Option {
getBagFromDir(bagitDir).getFetchTxt
}
def formatMessages(seq: Seq[String], in: String): String = {
seq match {
case Seq() => s"No errors found in $in"
case Seq(msg) => s"One error found in $in:\\n\\t- $msg"
case msgs => msgs.map(msg => s"\\t- $msg").mkString(s"Multiple errors found in $in:\\n", "\\n", "")
}
}
def checkFetchItemUrls(bagitDir: File, urlPattern: Pattern)(implicit id: String): Try[Unit] = {
log.debug(s"[$id] Checking validity of urls in fetch.txt")
getFetchTxt(bagitDir)
.map(_.asScala) // Option map
.getOrElse(Seq.empty)
.map(item => checkUrlValidity(item.getUrl, urlPattern)) // Seq map
.collectResults
.map(_ => ()) // Try map
.recoverWith {
case e@CompositeException(throwables) => Failure(InvalidDepositException(id, formatMessages(throwables.map(_.getMessage).toSeq, "fetch.txt URLs"), e))
}
}
private def checkUrlValidity(url: String, urlPattern: Pattern)(implicit id: String): Try[Unit] = {
def checkUrlSyntax: Try[URL] = {
Try(new URL(url)).recoverWith {
case e: MalformedURLException => throw InvalidDepositException(id, s"Invalid url in Fetch Items ($url)")
}
}
def checkUrlAllowed: Try[Unit] = {
if (urlPattern.matcher(url).matches()) Success(())
else Failure(InvalidDepositException(id, s"Not allowed url in Fetch Items ($url)"))
}
for {
_ <- checkUrlSyntax
_ <- checkUrlAllowed
} yield ()
}
def checkBagVirtualValidity(bagitDir: File)(implicit id: String, bagStoreBaseDir: File, bagStoreBaseUri: URI): Try[Unit] = {
log.debug(s"[$id] Verifying bag validity")
val fetchItems = getFetchTxt(bagitDir).map(_.asScala).getOrElse(Seq())
val fetchItemsInBagStore = fetchItems.filter(_.getUrl.startsWith(bagStoreBaseUri.toString))
def handleValidationResult(bag: Bag, validationResult: SimpleResult, fetchItemsInBagStore: Seq[FilenameSizeUrl]): Try[Unit] = {
(fetchItemsInBagStore, validationResult.isSuccess) match {
case (Seq(), true) => Success(())
case (Seq(), false) => Failure(InvalidDepositException(id, validationResult.messagesToString))
case (items, true) => Failure(InvalidDepositException(id, s"There is a fetch.txt file, but all the files are present in the bag."))
case (itemsFromBagStore, false) =>
val otherThanMissingPayloadFilesMessages = validationResult.getSimpleMessages
.asScala
.filterNot(_.getCode == CompleteVerifier.CODE_PAYLOAD_MANIFEST_CONTAINS_MISSING_FILE)
if (otherThanMissingPayloadFilesMessages.isEmpty) {
val missingPayloadFiles = validationResult.getSimpleMessages
.asScala
.flatMap(_.getObjects.asScala)
val fetchItemFilesFromBagStore = itemsFromBagStore.map(_.getFilename)
val missingFilesNotInFetchText = missingPayloadFiles diff fetchItemFilesFromBagStore
if (missingFilesNotInFetchText.isEmpty)
noFetchItemsAlreadyInBag(bagitDir, itemsFromBagStore)
.flatMap(_ => validateChecksumsFetchItems(bag, itemsFromBagStore))
else
Failure(InvalidDepositException(id, s"Missing payload files not in the fetch.txt: ${missingFilesNotInFetchText.mkString}."))
}
else
Failure(InvalidDepositException(id, s"Validation of bag did not succeed: ${otherThanMissingPayloadFilesMessages.mkString("\\n")}"))
}
}
for {
_ <- resolveFetchItems(bagitDir, fetchItems diff fetchItemsInBagStore)
bag = getBagFromDir(bagitDir)
validationResult = bag.verifyValid
_ <- handleValidationResult(bag, validationResult, fetchItemsInBagStore)
} yield ()
}
private def resolveFetchItems(bagitDir: File, fetchItems: Seq[FetchTxt.FilenameSizeUrl])(implicit id: String): Try[Unit] = {
if (fetchItems.nonEmpty) log.debug(s"[$id] Resolving files in fetch.txt, those referring outside the bag store.")
fetchItems
.map(item => Using.urlInputStream(new URL(item.getUrl))
.map(src => {
val file = new File(bagitDir.getAbsoluteFile, item.getFilename)
if (file.exists)
Failure(InvalidDepositException(id, s"File ${item.getFilename} in the fetch.txt is already present in the bag."))
else
Try {
file.getParentFile.mkdirs()
Files.copy(src, file.toPath)
}
})
.tried
.flatten
.recoverWith {
case e: UnknownHostException => Failure(InvalidDepositException(id, s"The URL for ${item.getFilename} contains an unknown host.", e))
case e: IOException => Failure(InvalidDepositException(id, s"File ${item.getFilename} in the fetch.txt could not be downloaded.", e))
})
.collectResults
.map(_ => ())
.recoverWith {
case e@CompositeException(throwables) => Failure(InvalidDepositException(id, formatMessages(throwables.map(_.getMessage).toSeq, "resolving files from fetch.txt"), e))
}
}
private def noFetchItemsAlreadyInBag(bagitDir: File, fetchItems: Seq[FetchTxt.FilenameSizeUrl])(implicit id: String): Try[Unit] = {
log.debug(s"[$id] Checking that the files in fetch.txt are absent in the bag.")
val presentFiles = fetchItems.filter(item => new File(bagitDir.getAbsoluteFile, item.getFilename).exists)
if (presentFiles.nonEmpty)
Failure(InvalidDepositException(id, s"Fetch.txt file ${presentFiles.head.getFilename} is already present in the bag."))
else
Success(())
}
private def validateChecksumsFetchItems(bag: Bag, fetchItems: Seq[FetchTxt.FilenameSizeUrl])(implicit id: String, baseDir: File, baseUrl: URI): Try[Unit] = {
log.debug(s"[$id] Validating checksums of those files in fetch.txt, that refer to the bag store.")
val fetchItemFiles = fetchItems.map(_.getFilename)
val urls = fetchItems.map(file => file.getFilename -> file.getUrl).toMap
val checksumMapping = bag.getPayloadManifests.asScala
.flatMap(_.asScala)
.filter { case (file, _) => fetchItemFiles.contains(file) }
.map { case (file, checksum) =>
urls.get(file)
.map(url => Try(file, checksum, url))
.getOrElse(Failure(InvalidDepositException(id, s"Checksum validation failed: missing Payload Manifest file $file not found in the fetch.txt.")))
}
.collectResults
.recoverWith {
case e@CompositeException(throwables) => Failure(InvalidDepositException(id, formatMessages(throwables.map(_.getMessage).toSeq, "validating checksums of files in fetch.txt"), e))
}
for {
csMap <- checksumMapping
valid <- validateChecksums(csMap)
} yield ()
}
private def validateChecksums(checksumMapping: Seq[(String, String, String)])(implicit id: String, baseDir: File, baseUrl: URI): Try[Unit] = {
val errors = checksumMapping.flatMap { // we use the fact that an Option is a Seq with 0 or 1 element here!
case (file, checksum, url) => compareChecksumAgainstReferredBag(file, checksum, url)
}
if (errors.isEmpty) Success(())
else Failure(InvalidDepositException(id, errors.mkString))
}
private def compareChecksumAgainstReferredBag(file: String, checksum: String, url: String)(implicit id: String, baseDir: File, baseUrl: URI): Option[String] = {
val referredFile = getReferredFile(url, baseUrl)
val referredBagChecksums = getReferredBagChecksums(url)
if (referredBagChecksums.contains(referredFile -> checksum))
Option.empty
else if (referredBagChecksums.map { case (rFile, _) => rFile }.contains(referredFile))
Option(s"Checksum $checksum of the file $file differs from checksum of the file $referredFile in the referred bag.")
else
Option(s"While validating checksums, the file $referredFile was not found in the referred bag.")
}
private def getReferredFile(url: String, baseUrl: URI): String = {
val afterBaseUrl = url.stripPrefix(baseUrl.toString)
afterBaseUrl.substring(afterBaseUrl.indexOf("/data/") + 1)
}
private def getBagFromDir(dir: File): Bag = {
bagFactory.createBag(dir, BagFactory.Version.V0_97, BagFactory.LoadOption.BY_MANIFESTS)
}
case class MakeAllGroupWritable(permissions: String) extends SimpleFileVisitor[Path] {
override def visitFile(path: Path, attrs: BasicFileAttributes): FileVisitResult = {
log.debug(s"Setting the following permissions $permissions on file $path")
try {
Files.setPosixFilePermissions(path, PosixFilePermissions.fromString(permissions))
FileVisitResult.CONTINUE
} catch {
case usoe: UnsupportedOperationException => log.error("Not on a POSIX supported file system"); FileVisitResult.TERMINATE
case cce: ClassCastException => log.error("Non file permission elements in set"); FileVisitResult.TERMINATE
case ioe: IOException => log.error(s"Could not set file permissions on $path"); FileVisitResult.TERMINATE
case se: SecurityException => log.error(s"Not enough privileges to set file permissions on $path"); FileVisitResult.TERMINATE
}
}
override def postVisitDirectory(dir: Path, ex: IOException): FileVisitResult = {
log.debug(s"Setting the following permissions $permissions on directory $dir")
Files.setPosixFilePermissions(dir, PosixFilePermissions.fromString(permissions))
if (ex == null) FileVisitResult.CONTINUE
else FileVisitResult.TERMINATE
}
}
def isOnPosixFileSystem(file: File): Boolean = {
try {
Files.getPosixFilePermissions(file.toPath)
true
}
catch {
case e: UnsupportedOperationException => false
}
}
def moveBagToStorage()(implicit settings: Settings, id: String): Try[File] =
Try {
log.debug("Moving bag to permanent storage")
val tempDir = new File(settings.tempDir, id)
val storageDir = new File(settings.depositRootDir, id)
if (isOnPosixFileSystem(tempDir))
Files.walkFileTree(tempDir.toPath, MakeAllGroupWritable(settings.depositPermissions))
if (!tempDir.renameTo(storageDir)) throw new SwordError(s"Cannot move $tempDir to $storageDir")
storageDir
}.recover { case e => throw new SwordError("Failed to move dataset to storage", e) }
def doesHashMatch(zipFile: File, MD5: String): Try[Unit] = {
log.debug(s"Checking Content-MD5 (Received: $MD5)")
lazy val fail = Failure(new SwordError("http://purl.org/net/sword/error/ErrorChecksumMismatch"))
Using.fileInputStream(zipFile)
.map(is => {
if (MD5 == DigestUtils.md5Hex(is)) Success(())
else fail
})
.tried
.flatten
}
def createDepositReceipt(deposit: Deposit, settings: Settings, id: String): DepositReceipt = {
val dr = new DepositReceipt
val editIRI = new IRI(settings.serviceBaseUrl + "/container/" + id)
val editMediaIri = new IRI(settings.serviceBaseUrl + "/media/" + id)
val stateIri = settings.serviceBaseUrl + "/statement/" + id
dr.setEditIRI(editIRI)
dr.setLocation(editIRI)
dr.setEditMediaIRI(editMediaIri)
dr.setSwordEditIRI(editMediaIri)
dr.setAtomStatementURI(stateIri)
dr.setPackaging(Collections.singletonList("http://purl.org/net/sword/package/BagIt"))
dr.setTreatment("[1] unpacking [2] verifying integrity [3] storing persistently")
dr.setVerboseDescription("received successfully: " + deposit.getFilename + "; MD5: " + deposit.getMd5)
dr
}
// TODO: RETRIEVE VIA AN INTERFACE
private def getReferredBagChecksums(url: String)(implicit baseDir: File, baseUrl: URI): Seq[(String, String)] = {
getBagFromDir(getReferredBagDir(url))
.getPayloadManifests
.asScala
.flatMap(_.asScala)
}
private def getReferredBagDir(url: String)(implicit baseDir: File, baseUrl: URI): File = {
// http://deasy.dans.knaw.nl/aips/31aef203-55ed-4b1f-81f6-b9f67f324c87.2/data/x -> 31/aef20355ed4b1f81f6b9f67f324c87/2
val Array(uuid, version) = url.stripPrefix(baseUrl.toString)
.split("/data").head.replaceAll("-", "")
.split("\\\\.")
val (topDir, uuidDir) = uuid.splitAt(3)
getFile(baseDir, topDir, uuidDir, version)
}
}
| vesaakerman/easy-sword2 | src/main/scala/nl/knaw/dans/api/sword2/DepositHandler.scala | Scala | apache-2.0 | 20,302 |
/*******************************************************************************
* Copyright (c) 2014 Łukasz Szpakowski.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
******************************************************************************/
package pl.luckboy.purfuncor.common
import scala.annotation.tailrec
import scala.util.parsing.input.Position
import scalaz._
import scalaz.Scalaz._
trait Evaluator[-T, E, V]
{
def evaluateSimpleTermS(simpleTerm: T)(env: E): (E, V)
def valueFromTermS(term: Term[T])(env: E): (E, V)
def valueArgCount(value: V): Int
def fullyAppS(funValue: V, argValues: Seq[V])(env: E): (E, V)
def partiallyAppS(funValue: V, argValues: Seq[V])(env: E): (E, V)
def isNoValue(value: V): Boolean
def forceS(value: V)(env: E): (E, V)
def withPos(res: (E, V))(pos: Position): (E, V)
}
object Evaluator
{
def evaluateS[T, E, V](term: Term[T])(env: E)(implicit eval: Evaluator[T, E, V]): (E, V) = {
val res = term match {
case App(fun, args, _) =>
val (env2, funValue) = evaluateS(fun)(env)
valuesFromTermsS(args.list)(env2) match {
case (env3, Success(argValues)) => appS(funValue, argValues)(env3)
case (env3, Failure(noValue)) => (env3, noValue)
}
case Simple(simpleTerm, _) =>
eval.evaluateSimpleTermS(simpleTerm)(env)
}
eval.withPos(res)(term.pos)
}
def evaluate[T, E, V](term: Term[T])(implicit eval: Evaluator[T, E, V]) =
State(evaluateS[T, E, V](term))
@tailrec
def appS[T, E, V](funValue: V, argValues: Seq[V])(env: E)(implicit eval: Evaluator[T, E, V]): (E, V) = {
val (env2, funValue2) = eval.forceS(funValue)(env)
val argCount = eval.valueArgCount(funValue2)
if(!eval.isNoValue(funValue2))
if(argCount === argValues.size) {
eval.fullyAppS(funValue2, argValues)(env2)
} else if(argCount > argValues.size) {
eval.partiallyAppS(funValue2, argValues)(env2)
} else {
val (passedArgValues, otherArgValues) = argValues.splitAt(argCount)
val (env3, retValue) = eval.fullyAppS(funValue2, passedArgValues)(env2)
appS[T, E, V](retValue, otherArgValues)(env3)
}
else
(env2, funValue2)
}
def app[T, E, V](funValue: V, argValues: Seq[V])(implicit eval: Evaluator[T, E, V]) =
State(appS[T, E, V](funValue, argValues))
def valuesFromTermsS[T, E, V](terms: List[Term[T]])(env: E)(implicit eval: Evaluator[T, E, V]) =
terms.foldLeft((env, Seq[V]().success[V])) {
case ((newEnv, Success(values)), term) =>
val (newEnv2, value) = eval.valueFromTermS(term)(newEnv)
(newEnv2, if(!eval.isNoValue(value)) (values :+ value).success else value.failure)
case ((newEnv, Failure(noValue)), _) =>
(newEnv, Failure(noValue))
}
def valuesFromTerms[T, E, V](terms: List[Term[T]])(implicit eval: Evaluator[T, E, V]) =
State(valuesFromTermsS[T, E, V](terms))
}
| luckboy/Purfuncor | src/main/scala/pl/luckboy/purfuncor/common/Evaluator.scala | Scala | mpl-2.0 | 3,127 |
package org.etl.util
import org.etl.command.Context
import org.apache.commons.text.StrSubstitutor
object ParameterisationEngine {
def resolve(inputString: String, context: Context): String = {
val configMap = context.getMe
val paramEngine = new StrSubstitutor(configMap)
val output = paramEngine.replace(inputString)
output
}
def doYieldtoTrue(expression: org.etl.sparrow.Expression) = {
expression match {
case null => {
true
}
case _ => {
val lhs = expression.getLhs
val rhs = expression.getRhs
val oper = expression.getOperator
oper match {
case "<" => {
val lhsInt = Integer.parseInt(lhs)
val rhsInt = Integer.parseInt(rhs)
lhsInt < rhsInt
}
case ">" => {
val lhsInt = Integer.parseInt(lhs)
val rhsInt = Integer.parseInt(rhs)
lhsInt > rhsInt
}
case "==" => {
lhs.equals(rhs)
}
case "contains" => {
lhs.contains(rhs)
}
}
}
}
}
} | jpvelsamy/sparrow | sparrow-server/src/main/scala/org/etl/util/ParameterisationEngine.scala | Scala | apache-2.0 | 1,119 |
/*
* Copyright 2015 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.concurrent.{Callable, Executors}
import java.util.concurrent.atomic.AtomicInteger
import com.google.common.util.concurrent.{AtomicDouble, AtomicDoubleArray}
import collection.JavaConversions._
/**
* Static object for functions related to set cover, specifically for computing the objective value.
*/
object SetCoverFunctions{
/**
* Computes the set cover objective value, given a graph and the output of the double greedy algorithm
* @param nElements Number of elements in graph
* @param nGroups Number of groups / covers
* @param A Output of double greedy algorithm
* @param lambda Weight of cardinality term in objective function
* @param graph Graph on which set cover is evaluated
* @param nThreads Number of threads to use to compute objective value
* @return Objective value, i.e. the set cover value
*/
def computeSetCover(nElements: Int, nGroups: Int, A: Array[Boolean], lambda: Double, graph: SparseGraph, nThreads: Int) : Double = {
val alpha = new AtomicDoubleArray(nGroups)
var i = 0
while (i < nElements){
if (A(i)){
var k = 0
val succLen = graph.nSuccessor(i)
while (k < succLen){
val ll = graph.succ(i, k)
alpha.getAndAdd(ll, 1.0)
k += 1
}
}
i += 1
}
computeSetCover(nElements, nGroups, A, lambda, alpha, nThreads)
}
/**
* Computes the set cover objective value, given the output of the double greedy algorithm
* @param nElements Number of elements in graph
* @param nGroups Number of groups / covers
* @param A Output of double greedy algorithm
* @param lambda Weight of cardinality term in objective function
* @param alpha Sketch alpha: sum of weights of elements in A
* @param nThreads Number of threads to use to compute objective value
* @return Objective value, i.e. the set cover value
*/
def computeSetCover(nElements: Int, nGroups: Int, A: Array[Boolean], lambda: Double, alpha: AtomicDoubleArray, nThreads: Int) : Double = {
val threads = Executors.newFixedThreadPool(nThreads)
val coverVal = new AtomicDouble(0.0)
val indexElement = new AtomicInteger(0)
val indexGroup = new AtomicInteger(0)
val tasks = (0 until nThreads).map(threadId => new Callable[Unit]{
override def call() = {
try{
var hasElementToProcess = true
while (hasElementToProcess){
val i = indexElement.getAndIncrement
if (i < nElements){
if (A(i)) coverVal.getAndAdd(-lambda)
}else{
hasElementToProcess = false
}
}
var hasGroupToProcess = true
while (hasGroupToProcess){
val i = indexGroup.getAndIncrement
if (i < nGroups){
if (alpha.get(i) >= 1.0) coverVal.getAndAdd(1.0)
}else{
hasGroupToProcess = false
}
}
}catch{
case e: Exception => e.printStackTrace()
}
}
}).toList
threads.invokeAll(tasks)
threads.shutdown()
coverVal.get()
}
} | pxinghao/ParallelSubmodularMaximization | src/main/scala/SetCoverFunctions.scala | Scala | apache-2.0 | 3,813 |
package node
import org.junit.Assert
import common.CommonTest
import node.api.SkillState
import node.internal.PoolSizeMissmatchError
import node.internal.SkillException
/**
* Tests the file reading capabilities.
*/
class ParseTest extends CommonTest {
@inline def read(s: String) = SkillState.read("src/test/resources/"+s)
test("two dates") {
read("date-example.sf").Node.all
}
test("simple nodes") { Assert.assertNotNull(read("node.sf")) }
test("simple test") { Assert.assertNotNull(read("date-example.sf")) }
/**
* @see § 6.2.3.Fig.3
*/
test("two node blocks") { Assert.assertNotNull(read("twoNodeBlocks.sf")) }
/**
* @see § 6.2.3.Fig.4
*/
test("colored nodes") { Assert.assertNotNull(read("coloredNodes.sf")) }
test("four colored nodes") { Assert.assertNotNull(read("fourColoredNodes.sf")) }
test("empty blocks") { Assert.assertNotNull(read("emptyBlocks.sf")) }
test("two types") { Assert.assertNotNull(read("twoTypes.sf")) }
test("trivial type definition") { Assert.assertNotNull(read("trivialType.sf")) }
/**
* null pointers are legal in regular fields if restricted to be nullable
*/
test("nullable restricted null pointer") { read("nullableNode.sf").Node.all }
/**
* null pointers are legal in annotations
*/
test("null pointer in an annotation") { read("nullAnnotation.sf").Node.all }
/**
* null pointers are not legal in regular fields
*
* @note this is the lazy case, i.e. the node pointer is never evaluated
*/
test("null pointer in a nonnull field; lazy case!") {
read("illformed/nullNode.sf").Node.all
}
test("data chunk is too long") {
val thrown = intercept[PoolSizeMissmatchError] {
read("illformed/longerDataChunk.sf").Node.all
}
assert(thrown.getMessage === "expected: 3, was: 2, field type: v64")
}
test("data chunk is too short") {
val thrown = intercept[PoolSizeMissmatchError] {
read("illformed/shorterDataChunk.sf").Node.all
}
assert(thrown.getMessage === "expected: 1, was: 2, field type: v64")
}
test("incompatible field types; lazy case!") {
read("illformed/incompatibleType.sf").Node.all
}
test("reserved type ID") {
intercept[SkillException] {
read("illformed/illegalTypeID.sf").Node.all
}
}
test("missing user type") {
intercept[SkillException] {
read("illformed/missingUserType.sf").Node.all
}
}
test("illegal string pool offset") {
intercept[SkillException] {
read("illformed/illegalStringPoolOffsets.sf").Node.all
}
}
test("missing field declarations in second block") {
intercept[SkillException] {
read("illformed/missingFieldInSecondBlock.sf").Node.all
}
}
test("duplicate type definition in the first block") {
intercept[SkillException] {
read("illformed/duplicateDefinition.sf").Node.all
}
}
test("append in the first block") {
intercept[SkillException] {
read("illformed/duplicateDefinitionMixed.sf").Node.all
}
}
test("duplicate append in the same block") {
intercept[SkillException] {
read("illformed/duplicateDefinitionSecondBlock.sf").Node.all
}
}
test("missing type block") {
intercept[SkillException] {
read("illformed/missingTypeBlock.sf").Node.all
}
}
} | XyzNobody/skillScalaTestSuite | src/test/scala/node/ParseTest.scala | Scala | bsd-3-clause | 3,290 |
class Y(x: String) {
def this(x: Double, y: String) = this(y)
}
class Test(y: String) extends Y(5.0, y) { self =>
def this(x: Int, y: String, z: Double) = this("ff")
def this() = this("gg")
def this(y: String, z: Double) = this(5, y, z)
} | VladimirNik/tasty | exttests/tests/overloadedConstr3/Test.scala | Scala | bsd-3-clause | 246 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.{TypeTag, runtimeMirror}
import org.apache.spark.sql.catalyst.ScalaReflectionLock
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.util.Utils
/**
* A non-concrete data type, reserved for internal uses.
*/
private[sql] abstract class AbstractDataType {
/**
* The default concrete type to use if we want to cast a null literal into this type.
*/
private[sql] def defaultConcreteType: DataType
/**
* Returns true if `other` is an acceptable input type for a function that expects this,
* possibly abstract DataType.
*
* {{{
* // this should return true
* DecimalType.acceptsType(DecimalType(10, 2))
*
* // this should return true as well
* NumericType.acceptsType(DecimalType(10, 2))
* }}}
*/
private[sql] def acceptsType(other: DataType): Boolean
/** Readable string representation for the type. */
private[sql] def simpleString: String
}
/**
* A collection of types that can be used to specify type constraints. The sequence also specifies
* precedence: an earlier type takes precedence over a latter type.
*
* {{{
* TypeCollection(StringType, BinaryType)
* }}}
*
* This means that we prefer StringType over BinaryType if it is possible to cast to StringType.
*/
private[sql] class TypeCollection(private val types: Seq[AbstractDataType])
extends AbstractDataType {
require(types.nonEmpty, s"TypeCollection ($types) cannot be empty")
override private[sql] def defaultConcreteType: DataType = types.head.defaultConcreteType
override private[sql] def acceptsType(other: DataType): Boolean =
types.exists(_.acceptsType(other))
override private[sql] def simpleString: String = {
types.map(_.simpleString).mkString("(", " or ", ")")
}
}
private[sql] object TypeCollection {
/**
* Types that can be ordered/compared. In the long run we should probably make this a trait
* that can be mixed into each data type, and perhaps create an [[AbstractDataType]].
*/
// TODO: Should we consolidate this with RowOrdering.isOrderable?
val Ordered = TypeCollection(
BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType,
TimestampType, DateType,
StringType, BinaryType)
/**
* Types that include numeric types and interval type. They are only used in unary_minus,
* unary_positive, add and subtract operations.
*/
val NumericAndInterval = TypeCollection(NumericType, CalendarIntervalType)
def apply(types: AbstractDataType*): TypeCollection = new TypeCollection(types)
def unapply(typ: AbstractDataType): Option[Seq[AbstractDataType]] = typ match {
case typ: TypeCollection => Some(typ.types)
case _ => None
}
}
/**
* An [[AbstractDataType]] that matches any concrete data types.
*/
protected[sql] object AnyDataType extends AbstractDataType {
// Note that since AnyDataType matches any concrete types, defaultConcreteType should never
// be invoked.
override private[sql] def defaultConcreteType: DataType = throw new UnsupportedOperationException
override private[sql] def simpleString: String = "any"
override private[sql] def acceptsType(other: DataType): Boolean = true
}
/**
* An internal type used to represent everything that is not null, UDTs, arrays, structs, and maps.
*/
protected[sql] abstract class AtomicType extends DataType {
private[sql] type InternalType
private[sql] val tag: TypeTag[InternalType]
private[sql] val ordering: Ordering[InternalType]
@transient private[sql] val classTag = ScalaReflectionLock.synchronized {
val mirror = runtimeMirror(Utils.getSparkClassLoader)
ClassTag[InternalType](mirror.runtimeClass(tag.tpe))
}
}
/**
* :: DeveloperApi ::
* Numeric data types.
*/
abstract class NumericType extends AtomicType {
// Unfortunately we can't get this implicitly as that breaks Spark Serialization. In order for
// implicitly[Numeric[JvmType]] to be valid, we have to change JvmType from a type variable to a
// type parameter and add a numeric annotation (i.e., [JvmType : Numeric]). This gets
// desugared by the compiler into an argument to the objects constructor. This means there is no
// longer an no argument constructor and thus the JVM cannot serialize the object anymore.
private[sql] val numeric: Numeric[InternalType]
}
private[sql] object NumericType extends AbstractDataType {
/**
* Enables matching against NumericType for expressions:
* {{{
* case Cast(child @ NumericType(), StringType) =>
* ...
* }}}
*/
def unapply(e: Expression): Boolean = e.dataType.isInstanceOf[NumericType]
override private[sql] def defaultConcreteType: DataType = DoubleType
override private[sql] def simpleString: String = "numeric"
override private[sql] def acceptsType(other: DataType): Boolean = other.isInstanceOf[NumericType]
}
private[sql] object IntegralType extends AbstractDataType {
/**
* Enables matching against IntegralType for expressions:
* {{{
* case Cast(child @ IntegralType(), StringType) =>
* ...
* }}}
*/
def unapply(e: Expression): Boolean = e.dataType.isInstanceOf[IntegralType]
override private[sql] def defaultConcreteType: DataType = IntegerType
override private[sql] def simpleString: String = "integral"
override private[sql] def acceptsType(other: DataType): Boolean = other.isInstanceOf[IntegralType]
}
private[sql] abstract class IntegralType extends NumericType {
private[sql] val integral: Integral[InternalType]
}
private[sql] object FractionalType {
/**
* Enables matching against FractionalType for expressions:
* {{{
* case Cast(child @ FractionalType(), StringType) =>
* ...
* }}}
*/
def unapply(e: Expression): Boolean = e.dataType.isInstanceOf[FractionalType]
}
private[sql] abstract class FractionalType extends NumericType {
private[sql] val fractional: Fractional[InternalType]
private[sql] val asIntegral: Integral[InternalType]
}
| chenc10/Spark-PAF | sql/catalyst/src/main/scala/org/apache/spark/sql/types/AbstractDataType.scala | Scala | apache-2.0 | 6,906 |
package chat.tox.antox.utils
import android.database.sqlite.SQLiteDatabase
object DatabaseUtil {
def isColumnInTable(mDb: SQLiteDatabase, table: String, column: String): Boolean = {
try {
val cursor = mDb.rawQuery("SELECT * FROM " + table + " LIMIT 0", null)
val result = cursor.getColumnIndex(column) != -1
cursor.close()
result
} catch {
case e: Exception => false
}
}
} | wiiam/Antox | app/src/main/scala/chat/tox/antox/utils/DatabaseUtil.scala | Scala | gpl-3.0 | 420 |
package monocle.std
import monocle.MonocleSuite
import monocle.function.all._
import monocle.std.byte._
import monocle.law.discipline.{OptionalTests, PrismTests}
class ByteSpec extends MonocleSuite {
checkAll("Byte index bit", OptionalTests(index[Byte, Int, Boolean](0)))
checkAll("Byte to Boolean", PrismTests(byteToBoolean))
}
| NightRa/Monocle | test/src/test/scala/monocle/std/ByteSpec.scala | Scala | mit | 335 |
package controllers
import utils.WithApplication
import java.io.InputStream
import java.net.HttpURLConnection
import java.util.UUID._
import org.specs2.mock.Mockito
import org.specs2.mutable._
import play.api.Logger
import play.api.test.FakeRequest
import play.api.test.Helpers._
/**
* Created by valtechuk on 25/03/2015.
*/
class ChannelShiftSpec extends Specification with Mockito{
section ("unit", "slow")
"Channel Shift Controller" should {
"preserve parameter data" in new WithApplication {
val params = Map("v" -> "1",
"tid" -> "UA-43115970-1",
"cid" -> randomUUID().toString(),
"t" -> "event",
"dh" -> "localhost",
"ec" -> "channelshift",
"ea" -> "card-referral")
val url = mock[URLWrapper]
url.openConnection() returns mock[HttpURLConnection]
url.openConnection().getInputStream returns mock[InputStream]
url.openConnection().getInputStream().available() returns -1
val cs = new ChannelShiftTestable(params, url)
val result = cs.redirect(FakeRequest())
status(result) mustEqual SEE_OTHER
ChannelShiftTestable.synchronized{
ChannelShiftTestable.params mustEqual params
}
}
}
section ("unit", "slow")
}
class ChannelShiftTestable(params:Map[String,String],url:URLWrapper) extends ChannelShift(params) {
override protected def writeData(cox: HttpURLConnection, postData: Array[Byte]): Unit = {
Logger.info(s"postData Array: $postData")
val s = new String(postData,"UTF-8")
Logger.info(s"postData as string:$s")
val a = s.split("&")
val map = a.map(_.split("=")).map(array => array(0)->array(1)).toMap
ChannelShiftTestable.synchronized{
ChannelShiftTestable.params = map
}
}
override protected def getUrl(): URLWrapper = {
url
}
}
object ChannelShiftTestable{
var params = Map.empty[String,String]
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/controllers/ChannelShiftSpec.scala | Scala | mit | 1,885 |
package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import scala.{specialized=>spec}
import breeze.storage.Zero
import breeze.util.Terminal
import breeze.linalg.support._
import breeze.math._
import breeze.linalg.operators._
import scala.reflect.ClassTag
import scala.annotation.unchecked.uncheckedVariance
import breeze.stats.distributions.Rand
/**
*
* @author dlwh
*/
trait MatrixLike[@spec(Double, Int, Float, Long) V, +Self <: Matrix[V]] extends Tensor[(Int, Int), V] with TensorLike[(Int, Int), V, Self] {
def map[V2, That](fn: V=>V2)(implicit canMapValues: CanMapValues[Self @uncheckedVariance , V, V2, That]):That = values map fn
}
trait Matrix[@spec(Double, Int, Float, Long) V] extends MatrixLike[V, Matrix[V]] {
final def apply(i: (Int, Int)) = apply(i._1, i._2)
final def update(i: (Int, Int), e: V): Unit = {
update(i._1, i._2, e)
}
def apply(i: Int, j: Int): V
def update(i: Int, j: Int, e: V): Unit
def size = rows * cols
def rows: Int
def cols: Int
def keySet: Set[(Int, Int)] = new Set[(Int, Int)] {
def contains(elem: (Int, Int)): Boolean = elem._1 >= 0 && elem._1 < rows && elem._2 >= 0 && elem._2 < cols
def +(elem: (Int, Int)): Set[(Int, Int)] = Set() ++ iterator + elem
def -(elem: (Int, Int)): Set[(Int, Int)] = Set() ++ iterator - elem
def iterator: Iterator[(Int, Int)] = for{ j <- Iterator.range(0, cols); i <- Iterator.range(0, rows)} yield (i, j)
}
def iterator = for(i <- Iterator.range(0, rows); j <- Iterator.range(0, cols)) yield (i -> j) -> apply(i, j)
def valuesIterator = for(i <- Iterator.range(0, rows); j <- Iterator.range(0, cols)) yield apply(i, j)
def keysIterator = for(i <- Iterator.range(0, rows); j <- Iterator.range(0, cols)) yield (i -> j)
def toString(maxLines : Int = Terminal.terminalHeight - 3,
maxWidth : Int = Terminal.terminalWidth) : String = {
val showRows = if (rows > maxLines) maxLines - 1 else rows
def colWidth(col : Int) =
if (showRows > 0) (0 until showRows).map(row => if(this(row,col)!=null) this(row,col).toString.length+2 else 3).max else 0
val colWidths = new scala.collection.mutable.ArrayBuffer[Int]
var col = 0
while (col < cols && colWidths.sum < maxWidth) {
colWidths += colWidth(col)
col += 1
}
// make space for "... (K total)"
if (colWidths.size < cols) {
while (colWidths.sum + cols.toString.length + 12 >= maxWidth) {
if (colWidths.isEmpty) {
return "%d x %d matrix".format(rows, cols)
}
colWidths.remove(colWidths.length - 1)
}
}
val newline = Terminal.newline
val rv = new scala.StringBuilder
for (row <- 0 until showRows; col <- 0 until colWidths.length) {
val cell = if (this(row,col)!=null) this(row,col).toString else "--"
rv.append(cell)
rv.append(" " * (colWidths(col) - cell.length))
if (col == colWidths.length - 1) {
if (col < cols - 1) {
rv.append("...")
if (row == 0) {
rv.append(" (")
rv.append(cols)
rv.append(" total)")
}
}
if (row + 1 < showRows) {
rv.append(newline)
}
}
}
if (rows > showRows) {
rv.append(newline)
rv.append("... (")
rv.append(rows)
rv.append(" total)")
}
rv.toString
}
override def toString : String = toString(Terminal.terminalHeight, Terminal.terminalWidth)
def toDenseMatrix(implicit cm: ClassTag[V], zero: Zero[V]) = {
DenseMatrix.tabulate(rows, cols){ (i,j) => apply(i, j)}
}
def copy: Matrix[V]
def flatten(view: View=View.Prefer): Vector[V]
override def equals(p1: Any) = p1 match {
case x: Matrix[_] =>
this.rows == x.rows && this.cols == x.cols &&
keysIterator.forall(k => this(k) == x(k))
case _ => false
}
}
object Matrix extends MatrixConstructors[Matrix]
with MatrixGenericOps
with MatrixOpsLowPrio
with MatrixOps
with MatrixMultOps {
def zeros[@spec(Double, Int, Float, Long) V: ClassTag:Zero](rows: Int, cols: Int): Matrix[V] = DenseMatrix.zeros(rows, cols)
def create[@spec(Double, Int, Float, Long) V:Zero](rows: Int, cols: Int, data: Array[V]): Matrix[V] = DenseMatrix.create(rows, cols, data)
private[linalg] def zeroRows[V:ClassTag](cols: Int):Matrix[V] = emptyMatrix(0, cols)
private[linalg] def zeroCols[V:ClassTag](rows: Int):Matrix[V] = emptyMatrix(rows, 0)
private[linalg] def emptyMatrix[V:ClassTag](_rows: Int, _cols: Int):Matrix[V] = new Matrix[V] {
def activeIterator: Iterator[((Int, Int), V)] = Iterator.empty
def activeValuesIterator: Iterator[V] = Iterator.empty
def activeKeysIterator: Iterator[(Int, Int)] = Iterator.empty
def apply(i: Int, j: Int): V = throw new IndexOutOfBoundsException("Empty matrix!")
def update(i: Int, j: Int, e: V) {
throw new IndexOutOfBoundsException("Empty matrix!")
}
def rows: Int = _rows
def cols: Int = _cols
def copy: Matrix[V] = this
def activeSize: Int = 0
def repr: Matrix[V] = this
def flatten(view: View) = Vector[V]()
}
}
trait MatrixConstructors[Mat[T]<:Matrix[T]] {
def zeros[@spec(Double, Int, Float, Long) V:ClassTag:Zero](rows: Int, cols: Int):Mat[V]
def create[@spec(Double, Int, Float, Long) V:Zero](rows: Int, cols: Int, data: Array[V]):Mat[V]
/**
* Creates a matrix of all ones.
* @param rows
* @param cols
* @tparam V
* @return
*/
def ones[@spec(Double, Int, Float, Long) V:ClassTag:Zero:Semiring](rows: Int, cols: Int):Mat[V] = {
fill(rows,cols)(implicitly[Semiring[V]].one)
}
def fill[@spec(Double, Int, Float, Long) V:ClassTag:Zero](rows: Int, cols: Int)(v: =>V):Mat[V] = create(rows, cols, Array.fill(rows * cols)(v))
def tabulate[@spec(Double, Int, Float, Long) V:ClassTag:Zero](rows: Int, cols: Int)(f: (Int,Int)=>V):Mat[V]= {
val z = zeros(rows, cols)
for(c <- 0 until cols; r <- 0 until rows) {
z(r, c) = f(r, c)
}
z
}
def rand[T:ClassTag:Zero](rows: Int, cols: Int, rand: Rand[T] = Rand.uniform): Mat[T] = {
fill(rows, cols)(rand.draw())
}
// @specialized() R because of https://issues.scala-lang.org/browse/SI-8886
/** Static constructor for a literal matrix. */
def apply[@specialized(/* Don't remove until SI-8886 is closed*/) R,
@spec(Double, Int, Float, Long) V](rows : R*)(implicit rl : LiteralRow[R,V], man : ClassTag[V], zero: Zero[V]) = {
val nRows = rows.length
val ns = rl.length(rows(0))
val rv = zeros(nRows, ns)
finishLiteral(rv, rl, rows)
rv
}
implicit def canCreateZeros[T:ClassTag:Zero]: CanCreateZeros[Mat[T],(Int,Int)] =
new CanCreateZeros[Mat[T],(Int,Int)] {
def apply(dims: (Int,Int)): Mat[T] = {
zeros[T](dims._1,dims._2)
}
}
implicit def canTabulate[T:ClassTag:Zero] = new CanTabulate[(Int,Int),Mat[T],T] {
def apply(d: (Int, Int), f: ((Int, Int)) => T): Mat[T] = tabulate[T](d._1,d._2)((r: Int, c: Int) => f((r,c)))
}
// This method only exists because of trouble in Scala-specialization land.
// basically, we turn off specialization for this loop, since it's not going to be fast anyway.
private def finishLiteral[V, R](rv: Matrix[V], rl : LiteralRow[R,V], rows: Seq[R]) {
for ((row,i) <- rows.zipWithIndex) {
rl.foreach(row, {(j, v) => rv(i,j) = v})
}
}
}
| mredaelli/breeze | math/src/main/scala/breeze/linalg/Matrix.scala | Scala | apache-2.0 | 8,000 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import com.twitter.scalding.serialization.macros.impl.ordered_serialization.runtime_helpers.MacroEqualityOrderedSerialization
import com.twitter.scalding.serialization.OrderedSerialization
import java.nio.file.Files
import java.io.File
import java.util
import java.util.concurrent.CountDownLatch
import org.scalatest.{Matchers, WordSpec}
import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContext => ConcurrentExecutionContext, Future, Promise}
import scala.util.{Failure, Success, Try}
import cascading.flow.{Flow, FlowDef, FlowListener}
import com.twitter.scalding.typed.cascading_backend.AsyncFlowDefRunner.TempFileCleanup
import com.twitter.scalding.cascading_interop.FlowListenerPromise.FlowStopException
import org.apache.hadoop.conf.Configuration
object ExecutionTestJobs {
def wordCount(in: String, out: String) =
TypedPipe
.from(TextLine(in))
.flatMap(_.split("\\\\s+"))
.map((_, 1L))
.sumByKey
.writeExecution(TypedTsv(out))
def wordCount2(in: TypedPipe[String]) =
in
.flatMap(_.split("\\\\s+"))
.map((_, 1L))
.sumByKey
.toIterableExecution
def zipped(in1: TypedPipe[Int], in2: TypedPipe[Int]) =
in1.groupAll.sum.values.toIterableExecution
.zip(in2.groupAll.sum.values.toIterableExecution)
def mergeFanout(in: List[Int]): Execution[Iterable[(Int, Int)]] = {
// Force a reduce, so no fancy optimizations kick in
val source = TypedPipe.from(in).groupBy(_ % 3).head
(source.mapValues(_ * 2) ++ (source.mapValues(_ * 3))).toIterableExecution
}
def writeExecutionWithTempFile(tempFile: String, testData: List[String]): Execution[List[String]] = {
val forced = TypedPipe.from(testData).map(s => s).forceToDiskExecution
Execution
.withConfig(forced)(conf => conf + ("hadoop.tmp.dir" -> tempFile))
.flatMap(_.toIterableExecution)
.map(_.toList)
}
}
abstract class TestExecutionJob[+T](args: Args) extends ExecutionJob[T](args) {
// In tests, classloader issues with sbt mean we should not
// really use threads, so we run immediately
override def concurrentExecutionContext = new scala.concurrent.ExecutionContext {
def execute(r: Runnable) = r.run
def reportFailure(t: Throwable) = ()
}
}
class WordCountEc(args: Args) extends TestExecutionJob[Unit](args) {
def execution = ExecutionTestJobs.wordCount(args("input"), args("output"))
}
class ExecutionWithTempFiles(args: Args, tempFile: String, testData: List[String])
extends TestExecutionJob[List[String]](args) {
override def execution = ExecutionTestJobs.writeExecutionWithTempFile(tempFile, testData)
}
class ZippedExecutionWithTempFiles(
args: Args,
tempFileOne: String,
tempFileTwo: String,
testDataOne: List[String],
testDataTwo: List[String]
) extends TestExecutionJob[(List[String], List[String])](args) {
override def execution = {
val executionOne = ExecutionTestJobs.writeExecutionWithTempFile(tempFileOne, testDataOne)
val executionTwo = ExecutionTestJobs.writeExecutionWithTempFile(tempFileTwo, testDataTwo)
executionOne.zip(executionTwo)
}
}
case class MyCustomType(s: String)
class NormalJobToExecutionTestJob(args: Args) extends Job(args) {
TypedPipe
.from(0 to 100)
.groupBy(_ % 3)
.sum
.write(source.NullSink)
}
class FlowListenerWithException extends FlowListener {
override def onStarting(flow: Flow[_]): Unit =
throw new RuntimeException("something goes wrong")
override def onCompleted(flow: Flow[_]): Unit = {}
override def onStopping(flow: Flow[_]): Unit = {}
override def onThrowable(flow: Flow[_], throwable: Throwable): Boolean = false
}
class ExecutionTest extends WordSpec with Matchers {
implicit class ExecutionTestHelper[T](ex: Execution[T]) {
def shouldSucceed(): T = {
val r = ex.waitFor(Config.default, Local(true))
r match {
case Success(s) => s
case Failure(e) => fail(s"Failed running execution, exception:\\n$e")
}
}
def shouldSucceedHadoop(): T = {
val mode = Hdfs(true, new Configuration)
val r = ex.waitFor(Config.defaultFrom(mode), mode)
r match {
case Success(s) => s
case Failure(e) => fail(s"Failed running execution, exception:\\n$e")
}
}
def shouldFail(): Unit = {
val r = ex.waitFor(Config.default, Local(true))
assert(r.isFailure)
}
def shouldFailWith(message: String): Unit = {
val r = ex.waitFor(Config.default, Local(true))
assert(r.isFailure)
r.failed.get.getMessage shouldBe message
}
}
def getShutdownHooks: Seq[Thread] = {
// The list of attached shutdown hooks are not accessible normally, so we must use reflection to get them
val clazz = Class.forName("java.lang.ApplicationShutdownHooks")
val hooksField = clazz.getDeclaredField("hooks")
hooksField.setAccessible(true)
hooksField.get(null).asInstanceOf[util.IdentityHashMap[Thread, Thread]].asScala.keys.toSeq
}
def isTempFileCleanupHook(hook: Thread): Boolean =
classOf[TempFileCleanup].isAssignableFrom(hook.getClass)
"An Execution" should {
"run" in {
ExecutionTestJobs
.wordCount2(TypedPipe.from(List("a b b c c c", "d d d d")))
.waitFor(Config.default, Local(false))
.get
.toMap shouldBe Map("a" -> 1L, "b" -> 2L, "c" -> 3L, "d" -> 4L)
}
"run with zip" in {
(ExecutionTestJobs
.zipped(TypedPipe.from(0 until 100), TypedPipe.from(100 until 200))
.shouldSucceed() match {
case (it1, it2) => (it1.head, it2.head)
}) shouldBe ((0 until 100).sum, (100 until 200).sum)
}
"run with exception in flow listener" in {
val exec = ExecutionTestJobs.wordCount2(TypedPipe.from(List("a", "b")))
Execution
.withConfig(exec) { config =>
config.addFlowListener((_, _) => new FlowListenerWithException())
}
.shouldFailWith("Flow was stopped")
}
"lift to try" in {
val res = ExecutionTestJobs
.wordCount2(TypedPipe.from(List("a", "b")))
.liftToTry
.shouldSucceed()
assert(res.isSuccess)
}
"lift to try on exception" in {
val res: Try[Nothing] = ExecutionTestJobs
.wordCount2(TypedPipe.from(List("a", "b")))
.map(_ => throw new RuntimeException("Something went wrong"))
.liftToTry
.shouldSucceed()
assert(res.isFailure)
}
"merge fanouts without error" in {
def unorderedEq[T](l: Iterable[T], r: Iterable[T]): Boolean =
(l.size == r.size) && (l.toSet == r.toSet)
def correct(l: List[Int]): List[(Int, Int)] = {
val in = l.groupBy(_ % 3).mapValues(_.head)
in.mapValues(_ * 2).toList ++ in.mapValues(_ * 3)
}
val input = (0 to 100).toList
val result = ExecutionTestJobs.mergeFanout(input).waitFor(Config.default, Local(false)).get
val cres = correct(input)
unorderedEq(cres, result.toList) shouldBe true
}
"If either fails, zip fails, else we get success" in {
val neverHappens = Promise[Int]().future
Execution
.fromFuture(_ => neverHappens)
.zip(Execution.failed(new Exception("oh no")))
.shouldFail()
Execution
.failed(new Exception("oh no"))
.zip(Execution.fromFuture(_ => neverHappens))
.shouldFail()
// If both are good, we succeed:
Execution
.from(1)
.zip(Execution.from("1"))
.shouldSucceed() shouldBe (1, "1")
}
"If one write fails, the other gets cancelled" in {
@volatile var cancelledEx: Option[Throwable] = None
val failedTp: TypedPipe[Int] =
TypedPipe.from(Seq(0)).groupAll.sum.values.map(_ => throw new Exception("oh no"))
val failedEx: Execution[Iterable[Int]] = failedTp.toIterableExecution
val mapCountDownLatch = new CountDownLatch(1)
val blockingTp: TypedPipe[Int] = TypedPipe.from(Seq(1)).groupAll.sum.values.map { i =>
// block until we are done
mapCountDownLatch.await()
i
}
val onCompleteCountDownLatch = new CountDownLatch(1)
val otherEx: Execution[Iterable[Int]] = blockingTp.toIterableExecution.onComplete { t =>
if (t.isFailure) {
// capture the exception
cancelledEx = t.failed.toOption
}
onCompleteCountDownLatch.countDown()
}
val zipped = failedEx.zip(otherEx)
zipped.shouldFail()
// wait for onComplete to finish
onCompleteCountDownLatch.await()
// execution should be cancelled and the flow stopped
assert(cancelledEx.get.isInstanceOf[FlowStopException])
// finish counting down on the map to release the thread
mapCountDownLatch.countDown()
// do the same on the other side
@volatile var cancelledEx2: Option[Throwable] = None
val failedTp2: TypedPipe[Int] =
TypedPipe.from(Seq(0)).groupAll.sum.values.map(_ => throw new Exception("oh no"))
val failedEx2: Execution[Iterable[Int]] = failedTp2.toIterableExecution
val mapCountDownLatch2 = new CountDownLatch(1)
val blockingTp2: TypedPipe[Int] = TypedPipe.from(Seq(1)).groupAll.sum.values.map { i =>
// block until we are done
mapCountDownLatch2.await()
i
}
val onCompleteCountDownLatch2 = new CountDownLatch(1)
val otherEx2: Execution[Iterable[Int]] = blockingTp2.toIterableExecution.onComplete { t =>
if (t.isFailure) {
// capture the exception
cancelledEx2 = t.failed.toOption
}
onCompleteCountDownLatch2.countDown()
}
val zipped2 = otherEx2.zip(failedEx2)
zipped2.shouldFail()
// wait for onComplete to finish
onCompleteCountDownLatch2.await()
// execution should be cancelled and the flow stopped
assert(cancelledEx2.get.isInstanceOf[FlowStopException])
// finish counting down on the map to release the thread
mapCountDownLatch2.countDown()
}
"If one write fails, the flatmapped execution gets cancelled" in {
@volatile var cancelledEx: Option[Throwable] = None
val failedTp: TypedPipe[Int] =
TypedPipe.from(Seq(0)).groupAll.sum.values.map(_ => throw new Exception("oh no"))
val failedEx: Execution[Iterable[Int]] = failedTp.toIterableExecution
val mapCountDownLatch = new CountDownLatch(1)
val otherTp: TypedPipe[Int] = TypedPipe.from(Seq(1)).groupAll.sum.values
val onCompleteCountDownLatch = new CountDownLatch(1)
val otherEx: Execution[Iterable[Int]] = otherTp.toIterableExecution
.flatMap { _ =>
TypedPipe
.from(Seq(2))
.groupAll
.sum
.values
.map { i =>
// block until we are done
mapCountDownLatch.await()
i
}
.toIterableExecution
}
.onComplete { t =>
if (t.isFailure) {
// capture the exception
cancelledEx = t.failed.toOption
}
onCompleteCountDownLatch.countDown()
}
val zipped = failedEx.zip(otherEx)
zipped.shouldFail()
// wait for onComplete to finish
onCompleteCountDownLatch.await()
// execution should be cancelled and the flow stopped
assert(cancelledEx.get.isInstanceOf[FlowStopException])
// finish counting down on the map to release the thread
mapCountDownLatch.countDown()
// do the same on the other side
@volatile var cancelledEx2: Option[Throwable] = None
val failedTp2: TypedPipe[Int] =
TypedPipe.from(Seq(0)).groupAll.sum.values.map(_ => throw new Exception("oh no"))
val failedEx2: Execution[Iterable[Int]] = failedTp2.toIterableExecution
val mapCountDownLatch2 = new CountDownLatch(1)
val otherTp2: TypedPipe[Int] = TypedPipe.from(Seq(1)).groupAll.sum.values
val onCompleteCountDownLatch2 = new CountDownLatch(1)
val otherEx2: Execution[Iterable[Int]] = otherTp2.toIterableExecution
.flatMap { _ =>
TypedPipe
.from(Seq(2))
.groupAll
.sum
.values
.map { i =>
// block until we are done
mapCountDownLatch2.await()
i
}
.toIterableExecution
}
.onComplete { t =>
if (t.isFailure) {
// capture the exception
cancelledEx2 = t.failed.toOption
}
onCompleteCountDownLatch2.countDown()
}
val zipped2 = otherEx2.zip(failedEx2)
zipped2.shouldFail()
// wait for onComplete to finish
onCompleteCountDownLatch2.await()
// execution should be cancelled and the flow stopped
assert(cancelledEx2.get.isInstanceOf[FlowStopException])
// finish counting down on the map to release the thread
mapCountDownLatch2.countDown()
}
"recoverWith may fail to match" in {
val exception = new RuntimeException()
val result = Execution
.from[Unit] {
throw exception
}
.recoverWith { case _: NullPointerException =>
Execution.unit
}
.waitFor(Config.default, Local(true))
result shouldBe Failure(exception)
}
"recover from failure" in {
val tp = TypedPipe.from(Seq(1)).groupAll.sum.values.map(_ => throw new Exception("oh no"))
val recoveredTp = TypedPipe.from(Seq(2)).groupAll.sum.values
val recoveredEx = tp.toIterableExecution.recoverWith { case t: Throwable =>
recoveredTp.toIterableExecution
}
val res = recoveredEx.shouldSucceed()
res shouldBe List(2)
}
"not recover when cancelled by another execution" in {
@volatile var cancelledEx: Option[Throwable] = None
val failedTp: TypedPipe[Int] =
TypedPipe.from(Seq(0)).groupAll.sum.values.map(_ => throw new Exception("oh no"))
val failedEx: Execution[Iterable[Int]] = failedTp.toIterableExecution
val mapCountDownLatch = new CountDownLatch(1)
val blockingTp: TypedPipe[Int] = TypedPipe.from(Seq(1)).groupAll.sum.values.map { i =>
// block until we are done
mapCountDownLatch.await()
i
}
val onCompleteCountDownLatch = new CountDownLatch(1)
val recoveredTp = TypedPipe.from(Seq(2))
val otherEx: Execution[Iterable[Int]] = blockingTp.toIterableExecution
.recoverWith { case t: Throwable =>
recoveredTp.toIterableExecution
}
.onComplete { t =>
if (t.isFailure) {
// capture the exception
cancelledEx = t.failed.toOption
}
onCompleteCountDownLatch.countDown()
}
val zipped = failedEx.zip(otherEx)
zipped.shouldFail()
// wait for onComplete to finish
onCompleteCountDownLatch.await()
// execution should be cancelled and the flow stopped
assert(cancelledEx.get.isInstanceOf[FlowStopException])
// finish counting down on the map to release the thread
mapCountDownLatch.countDown()
}
"Config transformer will isolate Configs" in {
def doesNotHaveVariable(message: String) = Execution.getConfig.flatMap { cfg =>
if (cfg.get("test.cfg.variable").isDefined)
Execution.failed(new Exception(s"$message\\n: var: ${cfg.get("test.cfg.variable")}"))
else
Execution.from(())
}
val hasVariable = Execution.getConfig.flatMap { cfg =>
if (cfg.get("test.cfg.variable").isEmpty)
Execution.failed(new Exception("Should see variable inside of transform"))
else
Execution.from(())
}
def addOption(cfg: Config) = cfg.+("test.cfg.variable", "dummyValue")
doesNotHaveVariable("Should not see variable before we've started transforming")
.flatMap(_ => Execution.withConfig(hasVariable)(addOption))
.flatMap(_ => doesNotHaveVariable("Should not see variable in flatMap's after the isolation"))
.map(_ => true)
.shouldSucceed() shouldBe true
}
"Config transformer will interact correctly with the cache" in {
var incrementIfDefined = 0
var totalEvals = 0
val incrementor = Execution.getConfig.flatMap { cfg =>
totalEvals += 1
if (cfg.get("test.cfg.variable").isDefined)
incrementIfDefined += 1
Execution.from(())
}
def addOption(cfg: Config) = cfg.+("test.cfg.variable", "dummyValue")
// Here we run without the option, with the option, and finally without again.
incrementor
.flatMap(_ => Execution.withConfig(incrementor)(addOption))
.flatMap(_ => incrementor)
.map(_ => true)
.shouldSucceed() shouldBe true
assert(incrementIfDefined === 1)
// We should evaluate once for the default config, and once for the modified config.
assert(totalEvals === 2)
}
"Config transformer will interact correctly with the cache when writing" in {
import java.io._
val srcF = File.createTempFile("tmpoutputLocation", ".tmp").getAbsolutePath
val sinkF = File.createTempFile("tmpoutputLocation2", ".tmp").getAbsolutePath
def writeNums(nums: List[Int]): Unit = {
val pw = new PrintWriter(new File(srcF))
pw.write(nums.mkString("\\n"))
pw.close
}
writeNums(List(1, 2, 3))
val sink = TypedTsv[Int](sinkF)
val src = TypedTsv[Int](srcF)
val operationTP = (TypedPipe.from(src) ++ TypedPipe
.from((1 until 100).toList)).writeExecution(sink).getCounters.map(_._2.toMap)
def addOption(cfg: Config) = cfg.+("test.cfg.variable", "dummyValue")
// Here we run without the option, with the option, and finally without again.
val (oldCounters, newCounters) = operationTP
.flatMap { oc =>
writeNums(List(1, 2, 3, 4, 5, 6, 7))
Execution.withConfig(operationTP)(addOption).map(nc => (oc, nc))
}
.shouldSucceed()
assert(
oldCounters != newCounters,
"With new configs given the source changed we shouldn't cache so the counters should be different"
)
}
"correctly add cached file into config" in {
val execution = Execution.withCachedFile("/path/to/your/file.txt") { cachedFile =>
Execution.getConfig.map { config =>
config.getDistributedCachedFiles should contain only cachedFile
}
}
execution.waitFor(Config.default, Hdfs(strict = true, new Configuration(false))) match {
case Success(s) => s
case Failure(e) => fail(s"Failed running execution, exception:\\n$e")
}
}
"correctly add cached files into config" in {
val execution =
Execution.withCachedFile("/path/to/your/one.txt") { one =>
Execution.withCachedFile("/path/to/your/second.txt") { second =>
Execution.getConfig.map { config =>
config.getDistributedCachedFiles should contain only (one, second)
}
}
}
execution.waitFor(Config.default, Hdfs(strict = true, new Configuration(false))) match {
case Success(s) => s
case Failure(e) => fail(s"Failed running execution, exception:\\n$e")
}
}
}
"ExecutionApp" should {
val parser = new ExecutionApp { def job = Execution.from(()) }
"parse hadoop args correctly" in {
val conf = parser.config(Array("-Dmapred.reduce.tasks=100", "--local"))._1
conf.get("mapred.reduce.tasks") should contain("100")
conf.getArgs.boolean("local") shouldBe true
val (conf1, Hdfs(_, hconf)) = parser.config(Array("--test", "-Dmapred.reduce.tasks=110", "--hdfs"))
conf1.get("mapred.reduce.tasks") should contain("110")
conf1.getArgs.boolean("test") shouldBe true
hconf.get("mapred.reduce.tasks") shouldBe "110"
}
}
"An ExecutionJob" should {
"run correctly" in {
JobTest(new WordCountEc(_))
.arg("input", "in")
.arg("output", "out")
.source(TextLine("in"), List((0, "hello world"), (1, "goodbye world")))
.typedSink(TypedTsv[(String, Long)]("out")) { outBuf =>
outBuf.toMap shouldBe Map("hello" -> 1L, "world" -> 2L, "goodbye" -> 1L)
}
.run
.runHadoop
.finish()
}
}
"Executions" should {
"work correctly with flowDef from user" in {
class PipeBuilderJob(args: Args) extends TestExecutionJob[Unit](args) {
override def execution: Execution[Unit] =
Execution.getMode.flatMap { mode =>
val flowDef: FlowDef = new FlowDef
pipeBuilder(flowDef, mode)
Execution.fromFn((_, _) => flowDef)
}
def pipeBuilder(implicit flowDef: FlowDef, mode: Mode): TypedPipe[Int] =
TypedPipe
.from(TextLine(args("input")))
.map(_.toInt)
.map(_ * 2)
.write(TypedTsv[Int]("out"))
}
val input = List((0, "1"), (1, "2"), (2, "3"), (3, "4"), (4, "5"))
val expected = input.map(_._2).map(_.toInt).map(_ * 2)
JobTest(new PipeBuilderJob(_))
.arg("input", "in")
.source(TextLine("in"), input)
.typedSink(TypedTsv[Int]("out")) { outBuf =>
outBuf.toList shouldBe expected
}
.run
.runHadoop
.finish()
}
"shutdown hook should clean up temporary files" in {
val tempFileOne = Files.createTempDirectory("scalding-execution-test")
val tempFileTwo = Files.createTempDirectory("scalding-execution-test")
val mode = Test(Map())
Files.exists(tempFileOne) should be(true)
Files.exists(tempFileTwo) should be(true)
val cleanupThread =
TempFileCleanup(List(tempFileOne.toFile.getAbsolutePath, tempFileTwo.toFile.getAbsolutePath), mode)
cleanupThread.run()
Files.exists(tempFileOne) should be(false)
Files.exists(tempFileTwo) should be(false)
}
"clean up temporary files on exit" in {
val tempFile = Files.createTempDirectory("scalding-execution-test").toFile.getAbsolutePath
val testData = List("a", "b", "c")
getShutdownHooks.foreach { hook: Thread =>
isTempFileCleanupHook(hook) should be(false)
}
ExecutionTestJobs
.writeExecutionWithTempFile(tempFile, testData)
.shouldSucceedHadoop()
// This is hacky, but there's a small chance that the new cleanup hook isn't registered by the time we get here
// A small sleep like this appears to be sufficient to ensure we can see it
Thread.sleep(1000)
val cleanupHook = getShutdownHooks.find(isTempFileCleanupHook)
cleanupHook shouldBe defined
val files = cleanupHook.get.asInstanceOf[TempFileCleanup].filesToCleanup
assert(files.size == 1)
assert(files.head.contains(tempFile))
cleanupHook.get.run()
// Remove the hook so it doesn't show up in the list of shutdown hooks for other tests
Runtime.getRuntime.removeShutdownHook(cleanupHook.get)
}
"clean up temporary files on finish" in {
val tempFile = Files.createTempDirectory("scalding-execution-test").toFile.getAbsolutePath
val testData = List("a", "b", "c")
val ex = ExecutionTestJobs.writeExecutionWithTempFile(tempFile, testData)
val onFinish = Execution.withConfig(ex)(_.setExecutionCleanupOnFinish(true))
onFinish.shouldSucceedHadoop()
// This is hacky, but there's a small chance that the cleanup thread has not finished
// running by the time we check below
// A small sleep like this appears to be sufficient to ensure we can see it
Thread.sleep(1000)
val f = new File(tempFile)
def allChildren(f: File): List[File] =
if (f.isDirectory) f.listFiles().toList.flatMap(allChildren(_))
else List(f)
assert(allChildren(f).isEmpty, f.toString)
}
"clean up temporary files on exit with a zip" in {
val tempFileOne = Files.createTempDirectory("scalding-execution-test").toFile.getAbsolutePath
val tempFileTwo = Files.createTempDirectory("scalding-execution-test").toFile.getAbsolutePath
val testDataOne = List("a", "b", "c")
val testDataTwo = List("x", "y", "z")
getShutdownHooks.foreach { hook: Thread =>
isTempFileCleanupHook(hook) should be(false)
}
ExecutionTestJobs
.writeExecutionWithTempFile(tempFileOne, testDataOne)
.zip(ExecutionTestJobs.writeExecutionWithTempFile(tempFileTwo, testDataTwo))
.shouldSucceedHadoop()
// This is hacky, but there's a small chance that the new cleanup hook isn't registered by the time we get here
// A small sleep like this appears to be sufficient to ensure we can see it
Thread.sleep(1000)
val cleanupHook = getShutdownHooks.find(isTempFileCleanupHook)
cleanupHook shouldBe defined
val files = cleanupHook.get.asInstanceOf[TempFileCleanup].filesToCleanup
assert(files.size == 2)
assert(files.head.contains(tempFileOne) || files.head.contains(tempFileTwo))
assert(files(1).contains(tempFileOne) || files(1).contains(tempFileTwo))
cleanupHook.get.run()
// Remove the hook so it doesn't show up in the list of shutdown hooks for other tests
Runtime.getRuntime.removeShutdownHook(cleanupHook.get)
}
"evaluate once per run" in {
var first = 0
var second = 0
var third = 0
val e1 = Execution.from { first += 1; 42 }
val e2 = e1.flatMap { x =>
second += 1
Execution.from(2 * x)
}
val e3 = e1.map { x => third += 1; x * 3 }
/**
* Notice both e3 and e2 need to evaluate e1.
*/
val res = e3.zip(e2)
res.shouldSucceed()
assert((first, second, third) == (1, 1, 1))
}
"zip does not duplicate counters" in {
val c1 = Execution
.withId { implicit uid =>
val stat = Stat("test")
val e1 = TypedPipe
.from(0 until 100)
.map { x =>
stat.inc
x
}
.writeExecution(source.NullSink)
e1.zip(e1)
}
.getCounters
.map { case (_, c) => c("test") }
val c2 = Execution
.withId { implicit uid =>
val stat = Stat("test")
val e2 = TypedPipe
.from(0 until 100)
.map { x =>
stat.inc
x
}
.writeExecution(source.NullSink)
e2.flatMap(Execution.from(_)).zip(e2)
}
.getCounters
.map { case (_, c) => c("test") }
c1.shouldSucceed() should ===(100)
c2.shouldSucceed() should ===(100)
}
"zip does not duplicate pure counters" in {
val c1 = {
val e1 = TypedPipe
.from(0 until 100)
.tallyAll("scalding", "test")
.writeExecution(source.NullSink)
e1.zip(e1).getCounters.map { case (_, c) =>
println(c.toMap)
c(("test", "scalding"))
}
}
val c2 = {
val e2 = TypedPipe
.from(0 until 100)
.tallyAll("scalding", "test")
.writeExecution(source.NullSink)
e2.flatMap(Execution.from(_)).zip(e2).getCounters.map { case (_, c) =>
println(c.toMap)
c(("test", "scalding"))
}
}
c1.shouldSucceed() should ===(100)
c2.shouldSucceed() should ===(100)
}
"Running a large loop won't exhaust boxed instances" in {
var timesEvaluated = 0
import com.twitter.scalding.serialization.macros.impl.BinaryOrdering._
// Attempt to use up 4 boxed classes for every execution
def baseExecution(idx: Int): Execution[Unit] = TypedPipe
.from(0 until 1000)
.map(_.toShort)
.flatMap { i =>
timesEvaluated += 1
List((i, i), (i, i))
}
.sumByKey
.map { case (k, v) =>
(k.toInt, v)
}
.sumByKey
.map { case (k, v) =>
(k.toLong, v)
}
.sumByKey
.map { case (k, v) =>
(k.toString, v)
}
.sumByKey
.map { case (k, v) =>
(MyCustomType(k), v)
}
.sumByKey
.writeExecution(TypedTsv(s"/tmp/asdf_$idx"))
implicitly[OrderedSerialization[MyCustomType]] match {
case mos: MacroEqualityOrderedSerialization[_] =>
assert(mos.uniqueId == "com.twitter.scalding.MyCustomType")
case _ =>
sys.error(
"Ordered serialization should have been the MacroEqualityOrderedSerialization for this test"
)
}
def executionLoop(idx: Int): Execution[Unit] =
if (idx > 0)
baseExecution(idx).flatMap(_ => executionLoop(idx - 1))
else
Execution.unit
executionLoop(55).shouldSucceed()
assert(timesEvaluated == 55 * 1000, "Should run the 55 execution loops for 1000 elements")
}
"evaluate shared portions just once, writeExecution" in {
var timesEvaluated = 0
val baseTp = TypedPipe
.from(0 until 1000)
.flatMap { i =>
timesEvaluated += 1
List(i, i)
}
.fork
val fde1 = baseTp.map(_ * 3).writeExecution(TypedTsv("/tmp/asdf"))
val fde2 = baseTp.map(_ * 5).writeExecution(TypedTsv("/tmp/asdf2"))
val res = fde1.zip(fde2)
res.shouldSucceed()
assert(
timesEvaluated == 1000,
"Should share the common sub section of the graph when we zip two write Executions"
)
}
"evaluate shared portions just once, forceToDiskExecution" in {
var timesEvaluated = 0
val baseTp = TypedPipe
.from(0 until 1000)
.flatMap { i =>
timesEvaluated += 1
List(i, i)
}
.fork
val fde1 = baseTp.map(_ * 3).forceToDiskExecution
val fde2 = baseTp.map(_ * 5).forceToDiskExecution
val res = fde1.zip(fde2)
res.shouldSucceed()
assert(
timesEvaluated == 1000,
"Should share the common sub section of the graph when we zip two write Executions"
)
}
"evaluate shared portions just once, forceToDiskExecution with execution cache" in {
var timesEvaluated = 0
val baseTp = TypedPipe
.from(0 until 1000)
.flatMap { i =>
timesEvaluated += 1
List(i, i)
}
.fork
val fde1 = baseTp.map(_ * 3).forceToDiskExecution
val fde2 = baseTp.map(_ * 5).forceToDiskExecution
val res = fde1.zip(fde2).flatMap(_ => fde1).flatMap(_.toIterableExecution)
res.shouldSucceed()
assert(
timesEvaluated == 1000,
"Should share the common sub section of the graph when we zip two write Executions and then flatmap"
)
}
"Ability to do isolated caches so we don't exhaust memory" in {
def memoryWastingExecutionGenerator(id: Int): Execution[Array[Long]] =
Execution.withNewCache(Execution.from(id).flatMap { idx =>
Execution.from(Array.fill(4000000)(idx.toLong))
})
def writeAll(numExecutions: Int): Execution[Unit] =
if (numExecutions > 0) {
memoryWastingExecutionGenerator(numExecutions).flatMap { _ =>
writeAll(numExecutions - 1)
}
} else {
Execution.from(())
}
writeAll(400).shouldSucceed()
}
"handle failure" in {
val result = Execution.withParallelism(Seq(Execution.failed(new Exception("failed"))), 1)
result.shouldFail()
}
"handle an error running in parallel" in {
val executions =
Execution.failed(new Exception("failed")) :: 0.to(10).map(i => Execution.from[Int](i)).toList
val result = Execution.withParallelism(executions, 3)
result.shouldFail()
}
"run in parallel" in {
val executions = 0.to(10).map(i => Execution.from[Int](i)).toList
val result = Execution.withParallelism(executions, 3)
assert(result.shouldSucceed() == 0.to(10).toSeq)
}
"block correctly" in {
var seen = 0
def updateSeen(idx: Int): Unit = {
assert(seen === idx)
seen += 1
}
val executions = 0
.to(10)
.map { i =>
Execution
.from[Int](i)
.map { i => Thread.sleep(10 - i); i }
.onComplete(t => updateSeen(t.get))
}
.toList
.reverse
val result = Execution.withParallelism(executions, 1)
assert(result.shouldSucceed() == 0.to(10).reverse)
}
"can hashCode, compare, and run a long sequence" in {
val execution = Execution.sequence((1 to 100000).toList.map(Execution.from(_)))
assert(execution.hashCode == execution.hashCode)
assert(execution == execution)
assert(execution.shouldSucceed() == (1 to 100000).toList)
}
"caches a withId Execution computation" in {
var called = false
val execution = Execution.withId { id =>
assert(!called)
called = true
Execution.from("foobar")
}
val doubleExecution = execution.zip(execution)
assert(doubleExecution.shouldSucceed() == ("foobar", "foobar"))
assert(called)
}
"maintains equality and hashCode after reconstruction" when {
// Make two copies of these. Comparison by reference
// won't match between the two.
val futureF = { _: ConcurrentExecutionContext => Future.successful(10) }
val futureF2 = { _: ConcurrentExecutionContext => Future.successful(10) }
val fnF = { (_: Config, _: Mode) => null }
val fnF2 = { (_: Config, _: Mode) => null }
val withIdF = { _: UniqueID => Execution.unit }
val withIdF2 = { _: UniqueID => Execution.unit }
val mapF = { _: Int => 12 }
val mapF2 = { _: Int => 12 }
def reconstructibleLaws[T](ex: => Execution[T], ex2: Execution[T]): Unit = {
assert(ex == ex)
assert(ex.hashCode == ex.hashCode)
assert(ex != ex2)
}
"Execution.fromFuture" in {
reconstructibleLaws(Execution.fromFuture(futureF), Execution.fromFuture(futureF2))
}
"Execution.fromFn" in {
reconstructibleLaws(Execution.fromFn(fnF), Execution.fromFn(fnF2))
}
"Execution.withId" in {
reconstructibleLaws(Execution.withId(withIdF), Execution.withId(withIdF2))
}
"Execution#map" in {
reconstructibleLaws(Execution.fromFuture(futureF).map(mapF), Execution.fromFuture(futureF).map(mapF2))
}
"Execution.zip" in {
reconstructibleLaws(
Execution.zip(Execution.fromFuture(futureF2), Execution.withId(withIdF)),
Execution.zip(Execution.fromFuture(futureF2), Execution.withId(withIdF2))
)
}
"Execution.sequence" in {
reconstructibleLaws(
Execution.sequence(
Seq(
Execution.fromFuture(futureF),
Execution.withId(withIdF),
Execution.fromFuture(futureF2).map(mapF)
)
),
Execution.sequence(
Seq(Execution.fromFuture(futureF), Execution.withId(withIdF), Execution.fromFn(fnF))
)
)
}
}
"Has consistent hashCode and equality for mutable" when {
// These cases are a bit convoluted, but we still
// want equality to be consistent
trait MutableX[T] {
protected var x: Int
def setX(newX: Int): Unit = x = newX
def makeExecution: Execution[T]
}
case class FromFutureMutable(var x: Int = 0)
extends Function1[ConcurrentExecutionContext, Future[Int]]
with MutableX[Int] {
def apply(context: ConcurrentExecutionContext) = Future.successful(x)
def makeExecution = Execution.fromFuture(this)
}
case class FromFnMutable(var x: Int = 0) extends Function2[Config, Mode, Null] with MutableX[Unit] {
def apply(config: Config, mode: Mode) = null
def makeExecution = Execution.fromFn(this)
}
case class WithIdMutable(var x: Int = 0)
extends Function1[UniqueID, Execution[Int]]
with MutableX[Int] {
def apply(id: UniqueID) = Execution.fromFuture(FromFutureMutable(x))
def makeExecution = Execution.withId(this)
}
val mapFunction = { x: Int => x * x }
case class MapMutable(var x: Int = 0) extends MutableX[Int] {
val m = FromFutureMutable(x)
override def setX(newX: Int) = {
x = newX
m.setX(x)
}
def makeExecution = m.makeExecution.map(mapFunction)
}
case class ZipMutable(var x: Int = 0) extends MutableX[(Int, Int)] {
val m1 = FromFutureMutable(x)
val m2 = WithIdMutable(x)
override def setX(newX: Int) = {
x = newX
m1.setX(x)
m2.setX(x + 20)
}
def makeExecution = m1.makeExecution.zip(m2.makeExecution)
}
case class SequenceMutable(var x: Int = 0) extends MutableX[Seq[Int]] {
val m1 = FromFutureMutable(x)
val m2 = WithIdMutable(x)
override def setX(newX: Int) = {
x = newX
m1.setX(x)
m2.setX(x * 3)
}
def makeExecution = Execution.sequence(Seq(m1.makeExecution, m2.makeExecution))
}
def mutableLaws[T, U <: MutableX[T]](mutableGen: => U, expectedOpt: Option[Int => T] = None): Unit = {
expectedOpt.foreach { expected =>
require(expected(10) != expected(20))
}
def validate(ex: Execution[T], seed: Int): Unit =
expectedOpt.foreach { expected =>
assert(ex.shouldSucceed() == expected(seed))
}
val mutable1 = mutableGen
mutable1.setX(10)
val ex1 = mutable1.makeExecution
val mutable2 = mutableGen
mutable2.setX(10)
val ex2 = mutable2.makeExecution
assert(ex1 == ex2)
assert(ex1.hashCode == ex2.hashCode)
validate(ex1, 10)
validate(ex2, 10)
mutable2.setX(20)
// We may have the same hashCode still, but we don't need to
assert(ex1 != ex2)
validate(ex2, 20)
val mutable3 = mutableGen
mutable3.setX(20)
val ex3 = mutable3.makeExecution
assert(ex1 != ex3)
validate(ex3, 20)
mutable3.setX(10)
if (ex1 == ex3) {
// If they are made equal again, the hashCodes must match
assert(ex1.hashCode == ex3.hashCode)
}
validate(ex3, 10)
}
"Execution.fromFuture" in {
mutableLaws(FromFutureMutable(), Some { x: Int => x })
}
"Execution.fromFn" in {
mutableLaws(FromFnMutable(), Option.empty[Int => Unit])
}
"Execution.withId" in {
mutableLaws(WithIdMutable(), Some { x: Int => x })
}
"Execution#map" in {
mutableLaws(MapMutable(), Some { x: Int => x * x })
}
"Execution#zip" in {
mutableLaws(ZipMutable(), Some { x: Int => (x, x + 20) })
}
"Execution.sequence" in {
mutableLaws(SequenceMutable(), Some { x: Int => Seq(x, x * 3) })
}
}
}
"Simple jobs" should {
"convert to Execution and run" in {
val ex = Job.toExecutionFromClass(
classOf[NormalJobToExecutionTestJob],
Execution.failed(new Exception("couldn't run"))
)
val res = ex.waitFor(Config.empty, Local(true))
assert(res.isSuccess)
}
"convert ExecutionJob to Execution" in {
val test = JobTest(new WordCountEc(_))
.arg("input", "in")
.arg("output", "out")
.source(TextLine("in"), List((0, "hello world"), (1, "goodbye world")))
.typedSink(TypedTsv[(String, Long)]("out")) { outBuf =>
outBuf.toMap shouldBe Map("hello" -> 1L, "world" -> 2L, "goodbye" -> 1L)
}
val ex = Job.toExecutionFromClass(classOf[WordCountEc], Execution.failed(new Exception("oh no")))
val check =
for {
_ <- ex
mode <- Execution.getMode
_ = test.postRunChecks(mode)
} yield ()
val conf = Config.empty.setArgs(test.getArgs)
val mode = test.getTestMode(useHadoop = false)
assert(check.waitFor(conf, mode).isSuccess)
}
}
"toIterableExecution" should {
"work in TypedSource" in {
val workingDir = System.getProperty("user.dir")
val job = TypedPipe.from(TextLine(workingDir + "/../tutorial/data/hello.txt")).toIterableExecution
assert(job.waitFor(Config.empty, Local(true)).get.toList == List("Hello world", "Goodbye world"))
}
"work in a mapped TypedSource" in {
val workingDir = System.getProperty("user.dir")
val job =
TypedPipe.from(TextLine(workingDir + "/../tutorial/data/hello.txt")).map(_.size).toIterableExecution
assert(
job.waitFor(Config.empty, Local(true)).get.toList == List("Hello world", "Goodbye world").map(_.size)
)
}
}
}
| twitter/scalding | scalding-core/src/test/scala/com/twitter/scalding/ExecutionTest.scala | Scala | apache-2.0 | 41,667 |
package org.jetbrains.sbt
package resolvers
import java.io.{IOException, File}
import com.intellij.notification.{Notification, NotificationType, Notifications}
import com.intellij.openapi.Disposable
import com.intellij.openapi.application.PathManager
import com.intellij.openapi.components.ServiceManager
import com.intellij.openapi.progress.{ProcessCanceledException, ProgressIndicator, ProgressManager, Task}
import com.intellij.openapi.util.io.FileUtil
import com.intellij.util.io.PersistentEnumeratorBase
import org.apache.lucene.store.LockReleaseFailedException
import org.jetbrains.plugins.scala.util.NotificationUtil
import scala.collection.mutable
/**
* @author Nikolay Obedin
* @since 7/25/14.
*/
class SbtResolverIndexesManager(val testIndexesDir: Option[File]) extends Disposable {
import org.jetbrains.sbt.resolvers.SbtResolverIndexesManager._
def this() = this(None)
private val indexesDir = testIndexesDir getOrElse SbtResolverIndexesManager.DEFAULT_INDEXES_DIR
private val indexes: mutable.Set[SbtResolverIndex] = mutable.Set.empty
private val updatingIndexes: mutable.Set[SbtResolverIndex] = mutable.Set.empty
loadIndexes()
def add(resolver: SbtResolver) = find(resolver) match {
case Some(index) => index
case None =>
val newIndex = SbtResolverIndex.create(resolver.kind, resolver.root, getIndexDirectory(resolver.root))
indexes.add(newIndex)
newIndex
}
def find(resolver: SbtResolver): Option[SbtResolverIndex] =
indexes find { _.root == resolver.root }
def dispose() =
indexes foreach { _.close() }
def update(resolvers: Seq[SbtResolver]) {
var indexesToUpdate = Seq.empty[SbtResolverIndex]
updatingIndexes synchronized {
indexesToUpdate = resolvers.filterNot(r => updatingIndexes.exists(r.root == _.root)).map(add)
updatingIndexes ++= indexesToUpdate
}
if (indexesToUpdate.isEmpty) return
ProgressManager.getInstance().run(new Task.Backgroundable(null, "Indexing resolvers") {
def run(progressIndicator: ProgressIndicator): Unit =
indexesToUpdate.foreach { index =>
progressIndicator.setFraction(0.0)
progressIndicator.setText(index.root)
try {
index.update(Some(progressIndicator))
} catch {
case exc : ResolverException =>
notifyWarning(exc.getMessage)
case exc : LockReleaseFailedException =>
notifyWarning(SbtBundle("sbt.resolverIndexer.luceneLockException", exc.getMessage))
} finally {
updatingIndexes synchronized {
updatingIndexes -= index
}
}
}
})
}
private def loadIndexes() {
indexesDir.mkdirs()
if (!indexesDir.exists || !indexesDir.isDirectory) {
notifyWarning(SbtBundle("sbt.resolverIndexer.cantCreateIndexesDir", indexesDir.absolutePath))
return
}
val indices = indexesDir.listFiles()
if (indices == null) return
indices foreach { indexDir =>
if (indexDir.isDirectory) {
try {
val index = SbtResolverIndex.load(indexDir)
indexes.add(index)
} catch {
case exc : ResolverException =>
notifyWarning(exc.getMessage)
case _: PersistentEnumeratorBase.CorruptedException | _: IOException =>
cleanUpCorruptedIndex(indexDir)
}
}
}
}
private def cleanUpCorruptedIndex(indexDir: File): Unit = {
try {
FileUtil.delete(indexDir)
notifyWarning(SbtBundle("sbt.resolverIndexer.indexDirIsCorruptedAndRemoved", indexDir.getAbsolutePath))
} catch {
case _ : Throwable =>
notifyWarning(SbtBundle("sbt.resolverIndexer.indexDirIsCorruptedCantBeRemoved", indexDir.getAbsolutePath))
}
}
private def getIndexDirectory(root: String) = new File(indexesDir, root.shaDigest)
}
object SbtResolverIndexesManager {
val DEFAULT_INDEXES_DIR = new File(PathManager.getSystemPath) / "sbt" / "indexes"
def notifyWarning(message: String) =
NotificationUtil.showMessage(null, message, title = "Resolver Indexer")
def apply() = ServiceManager.getService(classOf[SbtResolverIndexesManager])
}
| whorbowicz/intellij-scala | src/org/jetbrains/sbt/resolvers/SbtResolverIndexesManager.scala | Scala | apache-2.0 | 4,189 |
package scala.generator
import com.bryzek.apidoc.generator.v0.models.InvocationForm
import scala.models.Play23ClientGenerator
import scala.models.ning.Ning18ClientGenerator
import models.TestHelper
import org.scalatest.{FunSpec, Matchers}
class ReferenceSpec extends FunSpec with Matchers {
lazy val ssd = new ScalaService(models.TestHelper.referenceApiService)
it("user case classes") {
val model = ssd.models.find(_.name == "User").get
val code = ScalaCaseClasses.generateCaseClassWithDoc(model, Seq.empty)
models.TestHelper.assertEqualsFile("/generators/reference-spec-user-case-class.txt", code)
}
it("member case classes") {
val model = ssd.models.find(_.name == "Member").get
val code = ScalaCaseClasses.generateCaseClassWithDoc(model, Seq.empty)
models.TestHelper.assertEqualsFile("/generators/reference-spec-member-case-class.txt", code)
}
it("generates expected code for play 2.3 client") {
Play23ClientGenerator.invoke(InvocationForm(service = models.TestHelper.referenceApiService)) match {
case Left(errors) => fail(errors.mkString(", "))
case Right(sourceFiles) => {
sourceFiles.size shouldBe 1
models.TestHelper.assertEqualsFile("/generators/reference-spec-play-23.txt", sourceFiles.head.contents)
}
}
}
it("generates expected code for ning client") {
Ning18ClientGenerator.invoke(InvocationForm(service = models.TestHelper.referenceApiService)) match {
case Left(errors) => fail(errors.mkString(", "))
case Right(sourceFiles) => {
sourceFiles.size shouldBe 1
models.TestHelper.assertEqualsFile("/generators/reference-spec-ning-client.txt", sourceFiles.head.contents)
}
}
}
}
| Seanstoppable/apidoc-generator | scala-generator/src/test/scala/models/generator/ReferenceSpec.scala | Scala | mit | 1,727 |
package io.straight.radg.randomtypes
import io.straight.radg.Context
/**
* @author rbuckland
*/
case class RowNumberGenerator(name:String,startingNumber:Int = 1) extends DataGenerator[Int] {
var num = startingNumber
override def description: String = "Simple incrementing row number"
override def internalGenerate(context: Context): Int = {
val ret = num
num = num+1
ret
}
}
| rbuckland/random-data-generator | src/main/scala/io/straight/radg/randomtypes/RowNumberGenerator.scala | Scala | apache-2.0 | 402 |
package com.joescii
object PrimeScala {
def isPrime(n:Int) = (2 to (n-1))
.forall(i => n % i != 0)
def first(n:Int) = Stream.from(2)
.filter(isPrime _)
.take(n)
def first(n:Int, p:Int=>Boolean) = Stream.from(2)
.filter(p)
.take(n)
def first2(n:Int) = first(n, isPrime _)
}
| joescii/fp-renaissance | src/main/scala/com/joescii/PrimeScala.scala | Scala | apache-2.0 | 307 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.api.Request
import kafka.cluster.BrokerEndPoint
import kafka.log.{LeaderOffsetIncremented, LogAppendInfo}
import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions}
import kafka.server.QuotaFactory.UnboundedQuota
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.KafkaStorageException
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, RequestUtils}
import java.util
import java.util.Optional
import scala.collection.{Map, Seq, Set, mutable}
import scala.compat.java8.OptionConverters._
import scala.jdk.CollectionConverters._
class ReplicaAlterLogDirsThread(name: String,
sourceBroker: BrokerEndPoint,
brokerConfig: KafkaConfig,
failedPartitions: FailedPartitions,
replicaMgr: ReplicaManager,
quota: ReplicationQuotaManager,
brokerTopicStats: BrokerTopicStats)
extends AbstractFetcherThread(name = name,
clientId = name,
sourceBroker = sourceBroker,
failedPartitions,
fetchBackOffMs = brokerConfig.replicaFetchBackoffMs,
isInterruptible = false,
brokerTopicStats) {
private val replicaId = brokerConfig.brokerId
private val maxBytes = brokerConfig.replicaFetchResponseMaxBytes
private val fetchSize = brokerConfig.replicaFetchMaxBytes
private var inProgressPartition: Option[TopicPartition] = None
override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = {
replicaMgr.futureLocalLogOrException(topicPartition).latestEpoch
}
override protected def logStartOffset(topicPartition: TopicPartition): Long = {
replicaMgr.futureLocalLogOrException(topicPartition).logStartOffset
}
override protected def logEndOffset(topicPartition: TopicPartition): Long = {
replicaMgr.futureLocalLogOrException(topicPartition).logEndOffset
}
override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = {
replicaMgr.futureLocalLogOrException(topicPartition).endOffsetForEpoch(epoch)
}
def fetchFromLeader(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = {
var partitionData: Seq[(TopicPartition, FetchData)] = null
val request = fetchRequest.build()
def processResponseCallback(responsePartitionData: Seq[(TopicPartition, FetchPartitionData)]): Unit = {
partitionData = responsePartitionData.map { case (tp, data) =>
val abortedTransactions = data.abortedTransactions.map(_.asJava).orNull
val lastStableOffset = data.lastStableOffset.getOrElse(FetchResponse.INVALID_LAST_STABLE_OFFSET)
tp -> new FetchResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(data.error.code)
.setHighWatermark(data.highWatermark)
.setLastStableOffset(lastStableOffset)
.setLogStartOffset(data.logStartOffset)
.setAbortedTransactions(abortedTransactions)
.setRecords(data.records)
}
}
replicaMgr.fetchMessages(
0L, // timeout is 0 so that the callback will be executed immediately
Request.FutureLocalReplicaId,
request.minBytes,
request.maxBytes,
false,
request.fetchData.asScala.toSeq,
UnboundedQuota,
processResponseCallback,
request.isolationLevel,
None)
if (partitionData == null)
throw new IllegalStateException(s"Failed to fetch data for partitions ${request.fetchData.keySet().toArray.mkString(",")}")
partitionData.toMap
}
// process fetched data
override def processPartitionData(topicPartition: TopicPartition,
fetchOffset: Long,
partitionData: FetchData): Option[LogAppendInfo] = {
val partition = replicaMgr.getPartitionOrException(topicPartition)
val futureLog = partition.futureLocalLogOrException
val records = toMemoryRecords(FetchResponse.recordsOrFail(partitionData))
if (fetchOffset != futureLog.logEndOffset)
throw new IllegalStateException("Offset mismatch for the future replica %s: fetched offset = %d, log end offset = %d.".format(
topicPartition, fetchOffset, futureLog.logEndOffset))
val logAppendInfo = if (records.sizeInBytes() > 0)
partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = true)
else
None
futureLog.updateHighWatermark(partitionData.highWatermark)
futureLog.maybeIncrementLogStartOffset(partitionData.logStartOffset, LeaderOffsetIncremented)
if (partition.maybeReplaceCurrentWithFutureReplica())
removePartitions(Set(topicPartition))
quota.record(records.sizeInBytes)
logAppendInfo
}
override def addPartitions(initialFetchStates: Map[TopicPartition, InitialFetchState]): Set[TopicPartition] = {
partitionMapLock.lockInterruptibly()
try {
// It is possible that the log dir fetcher completed just before this call, so we
// filter only the partitions which still have a future log dir.
val filteredFetchStates = initialFetchStates.filter { case (tp, _) =>
replicaMgr.futureLogExists(tp)
}
super.addPartitions(filteredFetchStates)
} finally {
partitionMapLock.unlock()
}
}
override protected def fetchEarliestOffsetFromLeader(topicPartition: TopicPartition, leaderEpoch: Int): Long = {
val partition = replicaMgr.getPartitionOrException(topicPartition)
partition.localLogOrException.logStartOffset
}
override protected def fetchLatestOffsetFromLeader(topicPartition: TopicPartition, leaderEpoch: Int): Long = {
val partition = replicaMgr.getPartitionOrException(topicPartition)
partition.localLogOrException.logEndOffset
}
/**
* Fetches offset for leader epoch from local replica for each given topic partitions
* @param partitions map of topic partition -> leader epoch of the future replica
* @return map of topic partition -> end offset for a requested leader epoch
*/
override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = {
partitions.map { case (tp, epochData) =>
try {
val endOffset = if (epochData.leaderEpoch == UNDEFINED_EPOCH) {
new EpochEndOffset()
.setPartition(tp.partition)
.setErrorCode(Errors.NONE.code)
} else {
val partition = replicaMgr.getPartitionOrException(tp)
partition.lastOffsetForLeaderEpoch(
currentLeaderEpoch = RequestUtils.getLeaderEpoch(epochData.currentLeaderEpoch),
leaderEpoch = epochData.leaderEpoch,
fetchOnlyFromLeader = false)
}
tp -> endOffset
} catch {
case t: Throwable =>
warn(s"Error when getting EpochEndOffset for $tp", t)
tp -> new EpochEndOffset()
.setPartition(tp.partition)
.setErrorCode(Errors.forException(t).code)
}
}
}
override protected val isOffsetForLeaderEpochSupported: Boolean = true
override protected val isTruncationOnFetchSupported: Boolean = false
/**
* Truncate the log for each partition based on current replica's returned epoch and offset.
*
* The logic for finding the truncation offset is the same as in ReplicaFetcherThread
* and mainly implemented in AbstractFetcherThread.getOffsetTruncationState. One difference is
* that the initial fetch offset for topic partition could be set to the truncation offset of
* the current replica if that replica truncates. Otherwise, it is high watermark as in ReplicaFetcherThread.
*
* The reason we have to follow the leader epoch approach for truncating a future replica is to
* cover the case where a future replica is offline when the current replica truncates and
* re-replicates offsets that may have already been copied to the future replica. In that case,
* the future replica may miss "mark for truncation" event and must use the offset for leader epoch
* exchange with the current replica to truncate to the largest common log prefix for the topic partition
*/
override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = {
val partition = replicaMgr.getPartitionOrException(topicPartition)
partition.truncateTo(truncationState.offset, isFuture = true)
}
override protected def truncateFullyAndStartAt(topicPartition: TopicPartition, offset: Long): Unit = {
val partition = replicaMgr.getPartitionOrException(topicPartition)
partition.truncateFullyAndStartAt(offset, isFuture = true)
}
private def nextReadyPartition(partitionMap: Map[TopicPartition, PartitionFetchState]): Option[(TopicPartition, PartitionFetchState)] = {
partitionMap.filter { case (_, partitionFetchState) =>
partitionFetchState.isReadyForFetch
}.reduceLeftOption { (left, right) =>
if ((left._1.topic < right._1.topic) || (left._1.topic == right._1.topic && left._1.partition < right._1.partition))
left
else
right
}
}
private def selectPartitionToFetch(partitionMap: Map[TopicPartition, PartitionFetchState]): Option[(TopicPartition, PartitionFetchState)] = {
// Only move one partition at a time to increase its catch-up rate and thus reduce the time spent on
// moving any given replica. Replicas are selected in ascending order (lexicographically by topic) from the
// partitions that are ready to fetch. Once selected, we will continue fetching the same partition until it
// becomes unavailable or is removed.
inProgressPartition.foreach { tp =>
val fetchStateOpt = partitionMap.get(tp)
fetchStateOpt.filter(_.isReadyForFetch).foreach { fetchState =>
return Some((tp, fetchState))
}
}
inProgressPartition = None
val nextPartitionOpt = nextReadyPartition(partitionMap)
nextPartitionOpt.foreach { case (tp, fetchState) =>
inProgressPartition = Some(tp)
info(s"Beginning/resuming copy of partition $tp from offset ${fetchState.fetchOffset}. " +
s"Including this partition, there are ${partitionMap.size} remaining partitions to copy by this thread.")
}
nextPartitionOpt
}
private def buildFetchForPartition(tp: TopicPartition, fetchState: PartitionFetchState): ResultWithPartitions[Option[ReplicaFetch]] = {
val requestMap = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
val partitionsWithError = mutable.Set[TopicPartition]()
try {
val logStartOffset = replicaMgr.futureLocalLogOrException(tp).logStartOffset
val lastFetchedEpoch = if (isTruncationOnFetchSupported)
fetchState.lastFetchedEpoch.map(_.asInstanceOf[Integer]).asJava
else
Optional.empty[Integer]
requestMap.put(tp, new FetchRequest.PartitionData(fetchState.fetchOffset, logStartOffset,
fetchSize, Optional.of(fetchState.currentLeaderEpoch), lastFetchedEpoch))
} catch {
case e: KafkaStorageException =>
debug(s"Failed to build fetch for $tp", e)
partitionsWithError += tp
}
val fetchRequestOpt = if (requestMap.isEmpty) {
None
} else {
// Set maxWait and minBytes to 0 because the response should return immediately if
// the future log has caught up with the current log of the partition
val requestBuilder = FetchRequest.Builder.forReplica(ApiKeys.FETCH.latestVersion, replicaId, 0, 0, requestMap).setMaxBytes(maxBytes)
Some(ReplicaFetch(requestMap, requestBuilder))
}
ResultWithPartitions(fetchRequestOpt, partitionsWithError)
}
def buildFetch(partitionMap: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = {
// Only include replica in the fetch request if it is not throttled.
if (quota.isQuotaExceeded) {
ResultWithPartitions(None, Set.empty)
} else {
selectPartitionToFetch(partitionMap) match {
case Some((tp, fetchState)) =>
buildFetchForPartition(tp, fetchState)
case None =>
ResultWithPartitions(None, Set.empty)
}
}
}
}
| Chasego/kafka | core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala | Scala | apache-2.0 | 13,553 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.api._
import kafka.common._
import kafka.utils._
import kafka.cluster.{Broker, Partition, Replica}
import kafka.log.{LogAppendInfo, LogManager}
import kafka.metrics.KafkaMetricsGroup
import kafka.controller.KafkaController
import kafka.common.TopicAndPartition
import kafka.message.{ByteBufferMessageSet, MessageSet}
import java.util.concurrent.atomic.AtomicBoolean
import java.io.{IOException, File}
import java.util.concurrent.TimeUnit
import org.apache.kafka.common.protocol.Errors
import scala.Predef._
import scala.collection._
import scala.collection.mutable.HashMap
import scala.collection.Map
import scala.collection.Set
import org.I0Itec.zkclient.ZkClient
import com.yammer.metrics.core.Gauge
/*
* Result metadata of a log append operation on the log
*/
case class LogAppendResult(info: LogAppendInfo, error: Option[Throwable] = None) {
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(e) => ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
}
/*
* Result metadata of a log read operation on the log
*/
case class LogReadResult(info: FetchDataInfo, hw: Long, readSize: Int, error: Option[Throwable] = None) {
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(e) => ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
}
object ReplicaManager {
val HighWatermarkFilename = "replication-offset-checkpoint"
}
class ReplicaManager(val config: KafkaConfig,
time: Time,
val zkClient: ZkClient,
scheduler: Scheduler,
val logManager: LogManager,
val isShuttingDown: AtomicBoolean ) extends Logging with KafkaMetricsGroup {
/* epoch of the controller that last changed the leader */
@volatile var controllerEpoch: Int = KafkaController.InitialControllerEpoch - 1
private val localBrokerId = config.brokerId
private val allPartitions = new Pool[(String, Int), Partition]
private val replicaStateChangeLock = new Object
val replicaFetcherManager = new ReplicaFetcherManager(config, this)
private val highWatermarkCheckPointThreadStarted = new AtomicBoolean(false)
val highWatermarkCheckpoints = config.logDirs.map(dir => (new File(dir).getAbsolutePath, new OffsetCheckpoint(new File(dir, ReplicaManager.HighWatermarkFilename)))).toMap
private var hwThreadInitialized = false
this.logIdent = "[Replica Manager on Broker " + localBrokerId + "]: "
val stateChangeLogger = KafkaController.stateChangeLogger
val delayedProducePurgatory = new DelayedOperationPurgatory[DelayedProduce](
purgatoryName = "Produce", config.brokerId, config.producerPurgatoryPurgeIntervalRequests)
val delayedFetchPurgatory = new DelayedOperationPurgatory[DelayedFetch](
purgatoryName = "Fetch", config.brokerId, config.fetchPurgatoryPurgeIntervalRequests)
newGauge(
"LeaderCount",
new Gauge[Int] {
def value = {
getLeaderPartitions().size
}
}
)
newGauge(
"PartitionCount",
new Gauge[Int] {
def value = allPartitions.size
}
)
newGauge(
"UnderReplicatedPartitions",
new Gauge[Int] {
def value = underReplicatedPartitionCount()
}
)
val isrExpandRate = newMeter("IsrExpandsPerSec", "expands", TimeUnit.SECONDS)
val isrShrinkRate = newMeter("IsrShrinksPerSec", "shrinks", TimeUnit.SECONDS)
def underReplicatedPartitionCount(): Int = {
getLeaderPartitions().count(_.isUnderReplicated)
}
def startHighWaterMarksCheckPointThread() = {
if(highWatermarkCheckPointThreadStarted.compareAndSet(false, true))
scheduler.schedule("highwatermark-checkpoint", checkpointHighWatermarks, period = config.replicaHighWatermarkCheckpointIntervalMs, unit = TimeUnit.MILLISECONDS)
}
/**
* Try to complete some delayed produce requests with the request key;
* this can be triggered when:
*
* 1. The partition HW has changed (for acks = -1)
* 2. A follower replica's fetch operation is received (for acks > 1)
*/
def tryCompleteDelayedProduce(key: DelayedOperationKey) {
val completed = delayedProducePurgatory.checkAndComplete(key)
debug("Request key %s unblocked %d producer requests.".format(key.keyLabel, completed))
}
/**
* Try to complete some delayed fetch requests with the request key;
* this can be triggered when:
*
* 1. The partition HW has changed (for regular fetch)
* 2. A new message set is appended to the local log (for follower fetch)
*/
def tryCompleteDelayedFetch(key: DelayedOperationKey) {
val completed = delayedFetchPurgatory.checkAndComplete(key)
debug("Request key %s unblocked %d fetch requests.".format(key.keyLabel, completed))
}
def startup() {
// start ISR expiration thread
scheduler.schedule("isr-expiration", maybeShrinkIsr, period = config.replicaLagTimeMaxMs, unit = TimeUnit.MILLISECONDS)
}
def stopReplica(topic: String, partitionId: Int, deletePartition: Boolean): Short = {
stateChangeLogger.trace("Broker %d handling stop replica (delete=%s) for partition [%s,%d]".format(localBrokerId,
deletePartition.toString, topic, partitionId))
val errorCode = ErrorMapping.NoError
getPartition(topic, partitionId) match {
case Some(partition) =>
if(deletePartition) {
val removedPartition = allPartitions.remove((topic, partitionId))
if (removedPartition != null)
removedPartition.delete() // this will delete the local log
}
case None =>
// Delete log and corresponding folders in case replica manager doesn't hold them anymore.
// This could happen when topic is being deleted while broker is down and recovers.
if(deletePartition) {
val topicAndPartition = TopicAndPartition(topic, partitionId)
if(logManager.getLog(topicAndPartition).isDefined) {
logManager.deleteLog(topicAndPartition)
}
}
stateChangeLogger.trace("Broker %d ignoring stop replica (delete=%s) for partition [%s,%d] as replica doesn't exist on broker"
.format(localBrokerId, deletePartition, topic, partitionId))
}
stateChangeLogger.trace("Broker %d finished handling stop replica (delete=%s) for partition [%s,%d]"
.format(localBrokerId, deletePartition, topic, partitionId))
errorCode
}
def stopReplicas(stopReplicaRequest: StopReplicaRequest): (mutable.Map[TopicAndPartition, Short], Short) = {
replicaStateChangeLock synchronized {
val responseMap = new collection.mutable.HashMap[TopicAndPartition, Short]
if(stopReplicaRequest.controllerEpoch < controllerEpoch) {
stateChangeLogger.warn("Broker %d received stop replica request from an old controller epoch %d."
.format(localBrokerId, stopReplicaRequest.controllerEpoch) +
" Latest known controller epoch is %d " + controllerEpoch)
(responseMap, ErrorMapping.StaleControllerEpochCode)
} else {
controllerEpoch = stopReplicaRequest.controllerEpoch
// First stop fetchers for all partitions, then stop the corresponding replicas
replicaFetcherManager.removeFetcherForPartitions(stopReplicaRequest.partitions.map(r => TopicAndPartition(r.topic, r.partition)))
for(topicAndPartition <- stopReplicaRequest.partitions){
val errorCode = stopReplica(topicAndPartition.topic, topicAndPartition.partition, stopReplicaRequest.deletePartitions)
responseMap.put(topicAndPartition, errorCode)
}
(responseMap, ErrorMapping.NoError)
}
}
}
def getOrCreatePartition(topic: String, partitionId: Int): Partition = {
var partition = allPartitions.get((topic, partitionId))
if (partition == null) {
allPartitions.putIfNotExists((topic, partitionId), new Partition(topic, partitionId, time, this))
partition = allPartitions.get((topic, partitionId))
}
partition
}
def getPartition(topic: String, partitionId: Int): Option[Partition] = {
val partition = allPartitions.get((topic, partitionId))
if (partition == null)
None
else
Some(partition)
}
def getReplicaOrException(topic: String, partition: Int): Replica = {
val replicaOpt = getReplica(topic, partition)
if(replicaOpt.isDefined)
return replicaOpt.get
else
throw new ReplicaNotAvailableException("Replica %d is not available for partition [%s,%d]".format(config.brokerId, topic, partition))
}
def getLeaderReplicaIfLocal(topic: String, partitionId: Int): Replica = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None =>
throw new UnknownTopicOrPartitionException("Partition [%s,%d] doesn't exist on %d".format(topic, partitionId, config.brokerId))
case Some(partition) =>
partition.leaderReplicaIfLocal match {
case Some(leaderReplica) => leaderReplica
case None =>
throw new NotLeaderForPartitionException("Leader not local for partition [%s,%d] on broker %d"
.format(topic, partitionId, config.brokerId))
}
}
}
def getReplica(topic: String, partitionId: Int, replicaId: Int = config.brokerId): Option[Replica] = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None => None
case Some(partition) => partition.getReplica(replicaId)
}
}
/**
* Append messages to leader replicas of the partition, and wait for them to be replicated to other replicas;
* the callback function will be triggered either when timeout or the required acks are satisfied
*/
def appendMessages(timeout: Long,
requiredAcks: Short,
internalTopicsAllowed: Boolean,
messagesPerPartition: Map[TopicAndPartition, MessageSet],
responseCallback: Map[TopicAndPartition, ProducerResponseStatus] => Unit) {
if (isValidRequiredAcks(requiredAcks)) {
val sTime = SystemTime.milliseconds
val localProduceResults = appendToLocalLog(internalTopicsAllowed, messagesPerPartition, requiredAcks)
debug("Produce to local log in %d ms".format(SystemTime.milliseconds - sTime))
val produceStatus = localProduceResults.map { case (topicAndPartition, result) =>
topicAndPartition ->
ProducePartitionStatus(
result.info.lastOffset + 1, // required offset
ProducerResponseStatus(result.errorCode, result.info.firstOffset)) // response status
}
if (delayedRequestRequired(requiredAcks, messagesPerPartition, localProduceResults)) {
// create delayed produce operation
val produceMetadata = ProduceMetadata(requiredAcks, produceStatus)
val delayedProduce = new DelayedProduce(timeout, produceMetadata, this, responseCallback)
// create a list of (topic, partition) pairs to use as keys for this delayed produce operation
val producerRequestKeys = messagesPerPartition.keys.map(new TopicPartitionOperationKey(_)).toSeq
// try to complete the request immediately, otherwise put it into the purgatory
// this is because while the delayed produce operation is being created, new
// requests may arrive and hence make this operation completable.
delayedProducePurgatory.tryCompleteElseWatch(delayedProduce, producerRequestKeys)
} else {
// we can respond immediately
val produceResponseStatus = produceStatus.mapValues(status => status.responseStatus)
responseCallback(produceResponseStatus)
}
} else {
// If required.acks is outside accepted range, something is wrong with the client
// Just return an error and don't handle the request at all
val responseStatus = messagesPerPartition.map {
case (topicAndPartition, messageSet) =>
(topicAndPartition ->
ProducerResponseStatus(Errors.INVALID_REQUIRED_ACKS.code,
LogAppendInfo.UnknownLogAppendInfo.firstOffset))
}
responseCallback(responseStatus)
}
}
// If all the following conditions are true, we need to put a delayed produce request and wait for replication to complete
//
// 1. required acks = -1
// 2. there is data to append
// 3. at least one partition append was successful (fewer errors than partitions)
private def delayedRequestRequired(requiredAcks: Short, messagesPerPartition: Map[TopicAndPartition, MessageSet],
localProduceResults: Map[TopicAndPartition, LogAppendResult]): Boolean = {
requiredAcks == -1 &&
messagesPerPartition.size > 0 &&
localProduceResults.values.count(_.error.isDefined) < messagesPerPartition.size
}
private def isValidRequiredAcks(requiredAcks: Short): Boolean = {
requiredAcks == -1 || requiredAcks == 1 || requiredAcks == 0
}
/**
* Append the messages to the local replica logs
*/
private def appendToLocalLog(internalTopicsAllowed: Boolean,
messagesPerPartition: Map[TopicAndPartition, MessageSet],
requiredAcks: Short): Map[TopicAndPartition, LogAppendResult] = {
trace("Append [%s] to local log ".format(messagesPerPartition))
messagesPerPartition.map { case (topicAndPartition, messages) =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).totalProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalProduceRequestRate.mark()
// reject appending to internal topics if it is not allowed
if (Topic.InternalTopics.contains(topicAndPartition.topic) && !internalTopicsAllowed) {
(topicAndPartition, LogAppendResult(
LogAppendInfo.UnknownLogAppendInfo,
Some(new InvalidTopicException("Cannot append to internal topic %s".format(topicAndPartition.topic)))))
} else {
try {
val partitionOpt = getPartition(topicAndPartition.topic, topicAndPartition.partition)
val info = partitionOpt match {
case Some(partition) =>
partition.appendMessagesToLeader(messages.asInstanceOf[ByteBufferMessageSet], requiredAcks)
case None => throw new UnknownTopicOrPartitionException("Partition %s doesn't exist on %d"
.format(topicAndPartition, localBrokerId))
}
val numAppendedMessages =
if (info.firstOffset == -1L || info.lastOffset == -1L)
0
else
info.lastOffset - info.firstOffset + 1
// update stats for successfully appended bytes and messages as bytesInRate and messageInRate
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).messagesInRate.mark(numAppendedMessages)
BrokerTopicStats.getBrokerAllTopicsStats.messagesInRate.mark(numAppendedMessages)
trace("%d bytes written to log %s-%d beginning at offset %d and ending at offset %d"
.format(messages.sizeInBytes, topicAndPartition.topic, topicAndPartition.partition, info.firstOffset, info.lastOffset))
(topicAndPartition, LogAppendResult(info))
} catch {
// NOTE: Failed produce requests metric is not incremented for known exceptions
// it is supposed to indicate un-expected failures of a broker in handling a produce request
case e: KafkaStorageException =>
fatal("Halting due to unrecoverable I/O error while handling produce request: ", e)
Runtime.getRuntime.halt(1)
(topicAndPartition, null)
case utpe: UnknownTopicOrPartitionException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(utpe)))
case nle: NotLeaderForPartitionException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(nle)))
case mtl: MessageSizeTooLargeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mtl)))
case mstl: MessageSetSizeTooLargeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mstl)))
case imse : InvalidMessageSizeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(imse)))
case t: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).failedProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats.failedProduceRequestRate.mark()
error("Error processing append operation on partition %s".format(topicAndPartition), t)
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(t)))
}
}
}
}
/**
* Fetch messages from the leader replica, and wait until enough data can be fetched and return;
* the callback function will be triggered either when timeout or required fetch info is satisfied
*/
def fetchMessages(timeout: Long,
replicaId: Int,
fetchMinBytes: Int,
fetchInfo: Map[TopicAndPartition, PartitionFetchInfo],
responseCallback: Map[TopicAndPartition, FetchResponsePartitionData] => Unit) {
val isFromFollower = replicaId >= 0
val fetchOnlyFromLeader: Boolean = replicaId != Request.DebuggingConsumerId
val fetchOnlyCommitted: Boolean = ! Request.isValidBrokerId(replicaId)
// read from local logs
val logReadResults = readFromLocalLog(fetchOnlyFromLeader, fetchOnlyCommitted, fetchInfo)
// if the fetch comes from the follower,
// update its corresponding log end offset
if(Request.isValidBrokerId(replicaId))
updateFollowerLEOs(replicaId, logReadResults.mapValues(_.info.fetchOffset))
// check if this fetch request can be satisfied right away
val bytesReadable = logReadResults.values.map(_.info.messageSet.sizeInBytes).sum
val errorReadingData = logReadResults.values.foldLeft(false) ((errorIncurred, readResult) =>
errorIncurred || (readResult.errorCode != ErrorMapping.NoError))
// respond immediately if 1) fetch request does not want to wait
// 2) fetch request does not require any data
// 3) has enough data to respond
// 4) some error happens while reading data
if(timeout <= 0 || fetchInfo.size <= 0 || bytesReadable >= fetchMinBytes || errorReadingData) {
val fetchPartitionData = logReadResults.mapValues(result =>
FetchResponsePartitionData(result.errorCode, result.hw, result.info.messageSet))
responseCallback(fetchPartitionData)
} else {
// construct the fetch results from the read results
val fetchPartitionStatus = logReadResults.map { case (topicAndPartition, result) =>
(topicAndPartition, FetchPartitionStatus(result.info.fetchOffset, fetchInfo.get(topicAndPartition).get))
}
val fetchMetadata = FetchMetadata(fetchMinBytes, fetchOnlyFromLeader, fetchOnlyCommitted, isFromFollower, fetchPartitionStatus)
val delayedFetch = new DelayedFetch(timeout, fetchMetadata, this, responseCallback)
// create a list of (topic, partition) pairs to use as keys for this delayed fetch operation
val delayedFetchKeys = fetchPartitionStatus.keys.map(new TopicPartitionOperationKey(_)).toSeq
// try to complete the request immediately, otherwise put it into the purgatory;
// this is because while the delayed fetch operation is being created, new requests
// may arrive and hence make this operation completable.
delayedFetchPurgatory.tryCompleteElseWatch(delayedFetch, delayedFetchKeys)
}
}
/**
* Read from a single topic/partition at the given offset upto maxSize bytes
*/
def readFromLocalLog(fetchOnlyFromLeader: Boolean,
readOnlyCommitted: Boolean,
readPartitionInfo: Map[TopicAndPartition, PartitionFetchInfo]): Map[TopicAndPartition, LogReadResult] = {
readPartitionInfo.map { case (TopicAndPartition(topic, partition), PartitionFetchInfo(offset, fetchSize)) =>
BrokerTopicStats.getBrokerTopicStats(topic).totalFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalFetchRequestRate.mark()
val partitionDataAndOffsetInfo =
try {
trace("Fetching log segment for topic %s, partition %d, offset %d, size %d".format(topic, partition, offset, fetchSize))
// decide whether to only fetch from leader
val localReplica = if (fetchOnlyFromLeader)
getLeaderReplicaIfLocal(topic, partition)
else
getReplicaOrException(topic, partition)
// decide whether to only fetch committed data (i.e. messages below high watermark)
val maxOffsetOpt = if (readOnlyCommitted)
Some(localReplica.highWatermark.messageOffset)
else
None
// read on log
val logReadInfo = localReplica.log match {
case Some(log) =>
log.read(offset, fetchSize, maxOffsetOpt)
case None =>
error("Leader for partition [%s,%d] does not have a local log".format(topic, partition))
FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty)
}
LogReadResult(logReadInfo, localReplica.highWatermark.messageOffset, fetchSize, None)
} catch {
// NOTE: Failed fetch requests metric is not incremented for known exceptions since it
// is supposed to indicate un-expected failure of a broker in handling a fetch request
case utpe: UnknownTopicOrPartitionException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, Some(utpe))
case nle: NotLeaderForPartitionException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, Some(nle))
case rnae: ReplicaNotAvailableException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, Some(rnae))
case oor : OffsetOutOfRangeException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, Some(oor))
case e: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topic).failedFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().failedFetchRequestRate.mark()
error("Error processing fetch operation on partition [%s,%d] offset %d".format(topic, partition, offset))
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, Some(e))
}
(TopicAndPartition(topic, partition), partitionDataAndOffsetInfo)
}
}
def maybeUpdateMetadataCache(updateMetadataRequest: UpdateMetadataRequest, metadataCache: MetadataCache) {
replicaStateChangeLock synchronized {
if(updateMetadataRequest.controllerEpoch < controllerEpoch) {
val stateControllerEpochErrorMessage = ("Broker %d received update metadata request with correlation id %d from an " +
"old controller %d with epoch %d. Latest known controller epoch is %d").format(localBrokerId,
updateMetadataRequest.correlationId, updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch,
controllerEpoch)
stateChangeLogger.warn(stateControllerEpochErrorMessage)
throw new ControllerMovedException(stateControllerEpochErrorMessage)
} else {
metadataCache.updateCache(updateMetadataRequest, localBrokerId, stateChangeLogger)
controllerEpoch = updateMetadataRequest.controllerEpoch
}
}
}
def becomeLeaderOrFollower(leaderAndISRRequest: LeaderAndIsrRequest,
offsetManager: OffsetManager): (collection.Map[(String, Int), Short], Short) = {
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) =>
stateChangeLogger.trace("Broker %d received LeaderAndIsr request %s correlation id %d from controller %d epoch %d for partition [%s,%d]"
.format(localBrokerId, stateInfo, leaderAndISRRequest.correlationId,
leaderAndISRRequest.controllerId, leaderAndISRRequest.controllerEpoch, topic, partition))
}
replicaStateChangeLock synchronized {
val responseMap = new collection.mutable.HashMap[(String, Int), Short]
if(leaderAndISRRequest.controllerEpoch < controllerEpoch) {
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) =>
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d since " +
"its controller epoch %d is old. Latest known controller epoch is %d").format(localBrokerId, leaderAndISRRequest.controllerId,
leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerEpoch, controllerEpoch))
}
(responseMap, ErrorMapping.StaleControllerEpochCode)
} else {
val controllerId = leaderAndISRRequest.controllerId
val correlationId = leaderAndISRRequest.correlationId
controllerEpoch = leaderAndISRRequest.controllerEpoch
// First check partition's leader epoch
val partitionState = new HashMap[Partition, PartitionStateInfo]()
leaderAndISRRequest.partitionStateInfos.foreach{ case ((topic, partitionId), partitionStateInfo) =>
val partition = getOrCreatePartition(topic, partitionId)
val partitionLeaderEpoch = partition.getLeaderEpoch()
// If the leader epoch is valid record the epoch of the controller that made the leadership decision.
// This is useful while updating the isr to maintain the decision maker controller's epoch in the zookeeper path
if (partitionLeaderEpoch < partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch) {
if(partitionStateInfo.allReplicas.contains(config.brokerId))
partitionState.put(partition, partitionStateInfo)
else {
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " +
"epoch %d for partition [%s,%d] as itself is not in assigned replica list %s")
.format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch,
topic, partition.partitionId, partitionStateInfo.allReplicas.mkString(",")))
}
} else {
// Otherwise record the error code in response
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " +
"epoch %d for partition [%s,%d] since its associated leader epoch %d is old. Current leader epoch is %d")
.format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch,
topic, partition.partitionId, partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch, partitionLeaderEpoch))
responseMap.put((topic, partitionId), ErrorMapping.StaleLeaderEpochCode)
}
}
val partitionsTobeLeader = partitionState
.filter{ case (partition, partitionStateInfo) => partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader == config.brokerId}
val partitionsToBeFollower = (partitionState -- partitionsTobeLeader.keys)
if (!partitionsTobeLeader.isEmpty)
makeLeaders(controllerId, controllerEpoch, partitionsTobeLeader, leaderAndISRRequest.correlationId, responseMap, offsetManager)
if (!partitionsToBeFollower.isEmpty)
makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, leaderAndISRRequest.leaders, leaderAndISRRequest.correlationId, responseMap, offsetManager)
// we initialize highwatermark thread after the first leaderisrrequest. This ensures that all the partitions
// have been completely populated before starting the checkpointing there by avoiding weird race conditions
if (!hwThreadInitialized) {
startHighWaterMarksCheckPointThread()
hwThreadInitialized = true
}
replicaFetcherManager.shutdownIdleFetcherThreads()
(responseMap, ErrorMapping.NoError)
}
}
}
/*
* Make the current broker to become leader for a given set of partitions by:
*
* 1. Stop fetchers for these partitions
* 2. Update the partition metadata in cache
* 3. Add these partitions to the leader partitions set
*
* If an unexpected error is thrown in this function, it will be propagated to KafkaApis where
* the error message will be set on each partition since we do not know which partition caused it
* TODO: the above may need to be fixed later
*/
private def makeLeaders(controllerId: Int, epoch: Int,
partitionState: Map[Partition, PartitionStateInfo],
correlationId: Int, responseMap: mutable.Map[(String, Int), Short],
offsetManager: OffsetManager) = {
partitionState.foreach(state =>
stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-leader transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))))
for (partition <- partitionState.keys)
responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError)
try {
// First stop fetchers for all the partitions
replicaFetcherManager.removeFetcherForPartitions(partitionState.keySet.map(new TopicAndPartition(_)))
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-leader request from controller " +
"%d epoch %d with correlation id %d for partition %s")
.format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
// Update the partition information to be the leader
partitionState.foreach{ case (partition, partitionStateInfo) =>
partition.makeLeader(controllerId, partitionStateInfo, correlationId, offsetManager)}
} catch {
case e: Throwable =>
partitionState.foreach { state =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request correlationId %d received from controller %d" +
" epoch %d for partition %s").format(localBrokerId, correlationId, controllerId, epoch,
TopicAndPartition(state._1.topic, state._1.partitionId))
stateChangeLogger.error(errorMsg, e)
}
// Re-throw the exception for it to be caught in KafkaApis
throw e
}
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"for the become-leader transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
}
/*
* Make the current broker to become follower for a given set of partitions by:
*
* 1. Remove these partitions from the leader partitions set.
* 2. Mark the replicas as followers so that no more data can be added from the producer clients.
* 3. Stop fetchers for these partitions so that no more data can be added by the replica fetcher threads.
* 4. Truncate the log and checkpoint offsets for these partitions.
* 5. If the broker is not shutting down, add the fetcher to the new leaders.
*
* The ordering of doing these steps make sure that the replicas in transition will not
* take any more messages before checkpointing offsets so that all messages before the checkpoint
* are guaranteed to be flushed to disks
*
* If an unexpected error is thrown in this function, it will be propagated to KafkaApis where
* the error message will be set on each partition since we do not know which partition caused it
*/
private def makeFollowers(controllerId: Int, epoch: Int, partitionState: Map[Partition, PartitionStateInfo],
leaders: Set[Broker], correlationId: Int, responseMap: mutable.Map[(String, Int), Short],
offsetManager: OffsetManager) {
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-follower transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
for (partition <- partitionState.keys)
responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError)
try {
var partitionsToMakeFollower: Set[Partition] = Set()
// TODO: Delete leaders from LeaderAndIsrRequest in 0.8.1
partitionState.foreach{ case (partition, partitionStateInfo) =>
val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch
val newLeaderBrokerId = leaderIsrAndControllerEpoch.leaderAndIsr.leader
leaders.find(_.id == newLeaderBrokerId) match {
// Only change partition state when the leader is available
case Some(leaderBroker) =>
if (partition.makeFollower(controllerId, partitionStateInfo, correlationId, offsetManager))
partitionsToMakeFollower += partition
else
stateChangeLogger.info(("Broker %d skipped the become-follower state change after marking its partition as follower with correlation id %d from " +
"controller %d epoch %d for partition [%s,%d] since the new leader %d is the same as the old leader")
.format(localBrokerId, correlationId, controllerId, leaderIsrAndControllerEpoch.controllerEpoch,
partition.topic, partition.partitionId, newLeaderBrokerId))
case None =>
// The leader broker should always be present in the leaderAndIsrRequest.
// If not, we should record the error message and abort the transition process for this partition
stateChangeLogger.error(("Broker %d received LeaderAndIsrRequest with correlation id %d from controller" +
" %d epoch %d for partition [%s,%d] but cannot become follower since the new leader %d is unavailable.")
.format(localBrokerId, correlationId, controllerId, leaderIsrAndControllerEpoch.controllerEpoch,
partition.topic, partition.partitionId, newLeaderBrokerId))
// Create the local replica even if the leader is unavailable. This is required to ensure that we include
// the partition's high watermark in the checkpoint file (see KAFKA-1647)
partition.getOrCreateReplica()
}
}
replicaFetcherManager.removeFetcherForPartitions(partitionsToMakeFollower.map(new TopicAndPartition(_)))
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-follower request from controller " +
"%d epoch %d with correlation id %d for partition %s")
.format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(partition.topic, partition.partitionId)))
}
logManager.truncateTo(partitionsToMakeFollower.map(partition => (new TopicAndPartition(partition), partition.getOrCreateReplica().highWatermark.messageOffset)).toMap)
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d truncated logs and checkpointed recovery boundaries for partition [%s,%d] as part of " +
"become-follower request with correlation id %d from controller %d epoch %d").format(localBrokerId,
partition.topic, partition.partitionId, correlationId, controllerId, epoch))
}
if (isShuttingDown.get()) {
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d skipped the adding-fetcher step of the become-follower state change with correlation id %d from " +
"controller %d epoch %d for partition [%s,%d] since it is shutting down").format(localBrokerId, correlationId,
controllerId, epoch, partition.topic, partition.partitionId))
}
}
else {
// we do not need to check if the leader exists again since this has been done at the beginning of this process
val partitionsToMakeFollowerWithLeaderAndOffset = partitionsToMakeFollower.map(partition =>
new TopicAndPartition(partition) -> BrokerAndInitialOffset(
leaders.find(_.id == partition.leaderReplicaIdOpt.get).get,
partition.getReplica().get.logEndOffset.messageOffset)).toMap
replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset)
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d started fetcher to new leader as part of become-follower request from controller " +
"%d epoch %d with correlation id %d for partition [%s,%d]")
.format(localBrokerId, controllerId, epoch, correlationId, partition.topic, partition.partitionId))
}
}
} catch {
case e: Throwable =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request with correlationId %d received from controller %d " +
"epoch %d").format(localBrokerId, correlationId, controllerId, epoch)
stateChangeLogger.error(errorMsg, e)
// Re-throw the exception for it to be caught in KafkaApis
throw e
}
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"for the become-follower transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
}
private def maybeShrinkIsr(): Unit = {
trace("Evaluating ISR list of partitions to see which replicas can be removed from the ISR")
allPartitions.values.foreach(partition => partition.maybeShrinkIsr(config.replicaLagTimeMaxMs, config.replicaLagMaxMessages))
}
private def updateFollowerLEOs(replicaId: Int, offsets: Map[TopicAndPartition, LogOffsetMetadata]) {
debug("Recording follower broker %d log end offsets: %s ".format(replicaId, offsets))
offsets.foreach { case (topicAndPartition, offset) =>
getPartition(topicAndPartition.topic, topicAndPartition.partition) match {
case Some(partition) =>
partition.updateReplicaLEO(replicaId, offset)
// for producer requests with ack > 1, we need to check
// if they can be unblocked after some follower's log end offsets have moved
tryCompleteDelayedProduce(new TopicPartitionOperationKey(topicAndPartition))
case None =>
warn("While recording the replica LEO, the partition %s hasn't been created.".format(topicAndPartition))
}
}
}
private def getLeaderPartitions() : List[Partition] = {
allPartitions.values.filter(_.leaderReplicaIfLocal().isDefined).toList
}
// Flushes the highwatermark value for all partitions to the highwatermark file
def checkpointHighWatermarks() {
val replicas = allPartitions.values.map(_.getReplica(config.brokerId)).collect{case Some(replica) => replica}
val replicasByDir = replicas.filter(_.log.isDefined).groupBy(_.log.get.dir.getParentFile.getAbsolutePath)
for((dir, reps) <- replicasByDir) {
val hwms = reps.map(r => (new TopicAndPartition(r) -> r.highWatermark.messageOffset)).toMap
try {
highWatermarkCheckpoints(dir).write(hwms)
} catch {
case e: IOException =>
fatal("Error writing to highwatermark file: ", e)
Runtime.getRuntime().halt(1)
}
}
}
// High watermark do not need to be checkpointed only when under unit tests
def shutdown(checkpointHW: Boolean = true) {
info("Shutting down")
replicaFetcherManager.shutdown()
delayedFetchPurgatory.shutdown()
delayedProducePurgatory.shutdown()
if (checkpointHW)
checkpointHighWatermarks()
info("Shut down completely")
}
}
| cran/rkafkajars | java/kafka/server/ReplicaManager.scala | Scala | apache-2.0 | 41,964 |
/*
Copyright (c) 2014 by Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ml.dmlc.xgboost4j.scala
import scala.collection.JavaConverters._
import ml.dmlc.xgboost4j.java.{DMatrix => JDMatrix}
import ml.dmlc.xgboost4j.java.IObjective
trait ObjectiveTrait extends IObjective {
/**
* user define objective function, return gradient and second order gradient
*
* @param predicts untransformed margin predicts
* @param dtrain training data
* @return List with two float array, correspond to grad and hess
*/
def getGradient(predicts: Array[Array[Float]], dtrain: DMatrix): List[Array[Float]]
private[scala] def getGradient(predicts: Array[Array[Float]], dtrain: JDMatrix):
java.util.List[Array[Float]] = {
getGradient(predicts, new DMatrix(dtrain)).asJava
}
}
| dmlc/xgboost | jvm-packages/xgboost4j/src/main/scala/ml/dmlc/xgboost4j/scala/ObjectiveTrait.scala | Scala | apache-2.0 | 1,302 |
package me.hawkweisman.alexandria
package model
import org.json4s._
import scala.language.postfixOps
import scala.util.Try
trait Ownable {
def owner: User
}
/**
* Internal model for a book.
*
* "A library is just a box with strings in it"
* ~ Hawk Weisman
*
* @param isbn The book's International Standard Book Number, used to uniquely identify it
* @param title The book's title
* @param subtitle An optional subtitle
* @param byline A String listing the book's authors
* @param pages The number of pages in the book
* @param published_date The date the book was published.
* @param publisher The book's publisher, as a String
* @param weight The book's weight, as a String
*
* @author Hawk Weisman
* @since v0.1.0
*/
case class Book(
isbn: String, // ISBNs are unique identifiers for a book in the database
title: String,
subtitle: Option[String],
byline: String,
pages: Int,
published_date: String, // TODO: find a way to model this that's machineable
publisher: String,
weight: Option[String]
) {
/**
* Removes the word "the" from the book's title.
*
* This is to be used for sorting purposes.
* @return the book's title, with the word "The" stripped
*/
protected[model] def mungedTitle = (if (title startsWith "The") {
title.stripPrefix("The") + ", The" }
else title) toLowerCase
}
object Book {
implicit val formats = DefaultFormats
/**
* Parses a Book from OpenLibrary JSON.
*
* @param json the JSON blob from OpenLibrary
* @param isbn the book's ISBN
* @return A `Success[Book]` if the book was parsed successfully,
* otherwise a `Failure.`
*/
def fromJson(json: JValue, isbn: ISBN): Try[Book] = for {
book <- Try(json \\ isbn.toString)
title <- Try((book \\ "title").extract[String])
subOpt <- Try(book \\ "subtitle")
byline <- Try((book \\ "by_statement").extract[String])
pages <- Try((book \\ "number_of_pages").extract[Int])
pubDate <- Try((book \\ "publish_date").extract[String])
pubBy <- Try((book \\ "publishers" \\\\ "name").extract[String])
weightOpt <- Try(book \\ "weight")
subtitle <- Try(book \\ "subtitle")
} yield { Book(
isbn = isbn.format,
title = title,
subtitle = subOpt.toOption flatMap {
case JString(s) => Some(s)
case _ => None
},
byline = byline.stripPrefix("by ").stripSuffix("."),
pages = pages,
published_date = pubDate,
publisher = pubBy,
weight = weightOpt.toOption flatMap {
case JString(s) => Some(s)
case _ => None
}
)}
}
/**
* Ordering for ordering books by their titles alphabetically.
*
* @author Hawk Weisman
* @since v0.1.0
*/
object TitleOrdering extends Ordering[Book] {
def compare(a: Book, b: Book) = a.mungedTitle compare b.mungedTitle
}
/* // TODO finish
object PublicationOrdering extends Ordering[Book] {
def compare(a: Book, b: Book) = a.published compare b.published
}*/
| alexandrialibrary/Alexandria | src/main/scala/me/hawkweisman/alexandria/model/Book.scala | Scala | mit | 2,942 |
package com.wixpress.guineapig
import com.wixpress.guineapig.drivers.SpecificationWithEnvSupport
import org.springframework.http.HttpStatus
class PetriAppIT extends SpecificationWithEnvSupport {
"PetriApp" should {
"redirect on HTTP call" in {
val response = httpDriver.get("http://127.0.0.1:9901/petri/")
response.getStatusCode === HttpStatus.FOUND
response.getRedirectUrl === "https://127.0.0.1/petri"
}.pendingUntilFixed("Determine whether the functionality tested in PetriAppIT is required - seems like it was previously supported by framework code (the FW DispatcherServlet has been replaced with Spring's DispatcherServlet)")
"render on HTTPS call" in {
val response = httpDriver.get("https://127.0.0.1:9901/petri/")
response.getStatusCode === HttpStatus.OK
response.getBodyRaw === "https://127.0.0.1/petri"
} // this test was ignored in the original project as well
.pendingUntilFixed("Ignoring this new test until i will add support for making ssl calls to the test framework ")
}
}
| wix/petri | guineapig-webapp-os/src/it/java/com/wixpress/guineapig/PetriAppIT.scala | Scala | bsd-3-clause | 1,057 |
package com.artclod.play
/**
* See http://chocksaway.com/blog/?p=547
*/
object CommonsMailerHelper {
// TODO
// def sendEmail(body : String, subject: String, from: String, recipients : String*) = {
// val mail = use[MailerPlugin].email
// mail.setSubject(subject)
// mail.setRecipient(recipients: _*)
// mail.setFrom(from)
// mail.send(body)
// }
//
// def emailDefault(body : String, subject: String, recipients : String*) = {
// sendEmail(body, subject, fromAddress, recipients :_*)
// }
//
// def fromAddress = Play.current.configuration.getString("smtp.from").get
//
// def defaultMailSetup(recipients: String*) : MailerAPI = {
// val mail = use[MailerPlugin].email
// val from = fromAddress
// mail.setFrom(from)
// if(recipients.nonEmpty){ mail.setRecipient(recipients:_*) }
// mail
// }
//
// def serverAddress(request: play.api.mvc.Request[_]) = {
// val url = request.host
// val secure = false // TODO when upgraded to play 2.3+ use request.secure
// val protocol = if(secure) { "https://" } else { "http://" }
// protocol + url
// }
}
| kristiankime/calc-tutor | app/com/artclod/play/CommonsMailerHelper.scala | Scala | mit | 1,107 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.message.MessageSet
import kafka.security.auth.Topic
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.SecurityProtocol
import org.apache.kafka.common.TopicPartition
import kafka.api._
import kafka.admin.AdminUtils
import kafka.common._
import kafka.controller.KafkaController
import kafka.coordinator.ConsumerCoordinator
import kafka.log._
import kafka.network._
import kafka.network.RequestChannel.{Session, Response}
import org.apache.kafka.common.requests.{JoinGroupRequest, JoinGroupResponse, HeartbeatRequest, HeartbeatResponse, ResponseHeader, ResponseSend}
import kafka.utils.{ZkUtils, ZKGroupTopicDirs, SystemTime, Logging}
import scala.collection._
import org.I0Itec.zkclient.ZkClient
import kafka.security.auth.{Authorizer, Read, Write, Create, ClusterAction, Describe, Resource, Topic, Operation, ConsumerGroup}
/**
* Logic to handle the various Kafka requests
*/
class KafkaApis(val requestChannel: RequestChannel,
val replicaManager: ReplicaManager,
val coordinator: ConsumerCoordinator,
val controller: KafkaController,
val zkClient: ZkClient,
val brokerId: Int,
val config: KafkaConfig,
val metadataCache: MetadataCache,
val metrics: Metrics,
val authorizer: Option[Authorizer]) extends Logging {
this.logIdent = "[KafkaApi-%d] ".format(brokerId)
// Store all the quota managers for each type of request
private val quotaManagers = instantiateQuotaManagers(config)
/**
* Top-level method that handles all requests and multiplexes to the right api
*/
def handle(request: RequestChannel.Request) {
try{
trace("Handling request:%s from connection %s;securityProtocol:%s,principal:%s".
format(request.requestObj, request.connectionId, request.securityProtocol, request.session.principal))
request.requestId match {
case RequestKeys.ProduceKey => handleProducerRequest(request)
case RequestKeys.FetchKey => handleFetchRequest(request)
case RequestKeys.OffsetsKey => handleOffsetRequest(request)
case RequestKeys.MetadataKey => handleTopicMetadataRequest(request)
case RequestKeys.LeaderAndIsrKey => handleLeaderAndIsrRequest(request)
case RequestKeys.StopReplicaKey => handleStopReplicaRequest(request)
case RequestKeys.UpdateMetadataKey => handleUpdateMetadataRequest(request)
case RequestKeys.ControlledShutdownKey => handleControlledShutdownRequest(request)
case RequestKeys.OffsetCommitKey => handleOffsetCommitRequest(request)
case RequestKeys.OffsetFetchKey => handleOffsetFetchRequest(request)
case RequestKeys.ConsumerMetadataKey => handleConsumerMetadataRequest(request)
case RequestKeys.JoinGroupKey => handleJoinGroupRequest(request)
case RequestKeys.HeartbeatKey => handleHeartbeatRequest(request)
case requestId => throw new KafkaException("Unknown api code " + requestId)
}
} catch {
case e: Throwable =>
if ( request.requestObj != null)
request.requestObj.handleError(e, requestChannel, request)
else {
val response = request.body.getErrorResponse(request.header.apiVersion, e)
val respHeader = new ResponseHeader(request.header.correlationId)
/* If request doesn't have a default error response, we just close the connection.
For example, when produce request has acks set to 0 */
if (response == null)
requestChannel.closeConnection(request.processor, request)
else
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, response)))
}
error("error when handling request %s".format(request.requestObj), e)
} finally
request.apiLocalCompleteTimeMs = SystemTime.milliseconds
}
def handleLeaderAndIsrRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val leaderAndIsrRequest = request.requestObj.asInstanceOf[LeaderAndIsrRequest]
authorizeClusterAction(request)
try {
// call replica manager to handle updating partitions to become leader or follower
val result = replicaManager.becomeLeaderOrFollower(leaderAndIsrRequest)
val leaderAndIsrResponse = new LeaderAndIsrResponse(leaderAndIsrRequest.correlationId, result.responseMap, result.errorCode)
// for each new leader or follower, call coordinator to handle
// consumer group migration
result.updatedLeaders.foreach { case partition =>
if (partition.topic == ConsumerCoordinator.OffsetsTopicName)
coordinator.handleGroupImmigration(partition.partitionId)
}
result.updatedFollowers.foreach { case partition =>
partition.leaderReplicaIdOpt.foreach { leaderReplica =>
if (partition.topic == ConsumerCoordinator.OffsetsTopicName &&
leaderReplica == brokerId)
coordinator.handleGroupEmigration(partition.partitionId)
}
}
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, leaderAndIsrResponse)))
} catch {
case e: KafkaStorageException =>
fatal("Disk error during leadership change.", e)
Runtime.getRuntime.halt(1)
}
}
def handleStopReplicaRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val stopReplicaRequest = request.requestObj.asInstanceOf[StopReplicaRequest]
authorizeClusterAction(request)
val (response, error) = replicaManager.stopReplicas(stopReplicaRequest)
val stopReplicaResponse = new StopReplicaResponse(stopReplicaRequest.correlationId, response.toMap, error)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, stopReplicaResponse)))
replicaManager.replicaFetcherManager.shutdownIdleFetcherThreads()
}
def handleUpdateMetadataRequest(request: RequestChannel.Request) {
val updateMetadataRequest = request.requestObj.asInstanceOf[UpdateMetadataRequest]
authorizeClusterAction(request)
replicaManager.maybeUpdateMetadataCache(updateMetadataRequest, metadataCache)
val updateMetadataResponse = new UpdateMetadataResponse(updateMetadataRequest.correlationId)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, updateMetadataResponse)))
}
def handleControlledShutdownRequest(request: RequestChannel.Request) {
// ensureTopicExists is only for client facing requests
// We can't have the ensureTopicExists check here since the controller sends it as an advisory to all brokers so they
// stop serving data to clients for the topic being deleted
val controlledShutdownRequest = request.requestObj.asInstanceOf[ControlledShutdownRequest]
authorizeClusterAction(request)
val partitionsRemaining = controller.shutdownBroker(controlledShutdownRequest.brokerId)
val controlledShutdownResponse = new ControlledShutdownResponse(controlledShutdownRequest.correlationId,
ErrorMapping.NoError, partitionsRemaining)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, controlledShutdownResponse)))
}
/**
* Handle an offset commit request
*/
def handleOffsetCommitRequest(request: RequestChannel.Request) {
val offsetCommitRequest = request.requestObj.asInstanceOf[OffsetCommitRequest]
// filter non-exist topics
val invalidRequestsInfo = offsetCommitRequest.requestInfo.filter { case (topicAndPartition, offsetMetadata) =>
!metadataCache.contains(topicAndPartition.topic)
}
val filteredRequestInfo = (offsetCommitRequest.requestInfo -- invalidRequestsInfo.keys)
val (authorizedRequestInfo, unauthorizedRequestInfo) = filteredRequestInfo.partition {
case (topicAndPartition, offsetMetadata) =>
authorize(request.session, Read, new Resource(Topic, topicAndPartition.topic)) &&
authorize(request.session, Read, new Resource(ConsumerGroup, offsetCommitRequest.groupId))
}
// the callback for sending an offset commit response
def sendResponseCallback(commitStatus: immutable.Map[TopicAndPartition, Short]) {
val mergedCommitStatus = commitStatus ++ unauthorizedRequestInfo.mapValues(_ => ErrorMapping.AuthorizationCode)
mergedCommitStatus.foreach { case (topicAndPartition, errorCode) =>
// we only print warnings for known errors here; only replica manager could see an unknown
// exception while trying to write the offset message to the local log, and it will log
// an error message and write the error code in this case; hence it can be ignored here
if (errorCode != ErrorMapping.NoError && errorCode != ErrorMapping.UnknownCode) {
debug("Offset commit request with correlation id %d from client %s on partition %s failed due to %s"
.format(offsetCommitRequest.correlationId, offsetCommitRequest.clientId,
topicAndPartition, ErrorMapping.exceptionNameFor(errorCode)))
}
}
val combinedCommitStatus = mergedCommitStatus ++ invalidRequestsInfo.map(_._1 -> ErrorMapping.UnknownTopicOrPartitionCode)
val response = OffsetCommitResponse(combinedCommitStatus, offsetCommitRequest.correlationId)
requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response)))
}
if (offsetCommitRequest.versionId == 0) {
// for version 0 always store offsets to ZK
val responseInfo = authorizedRequestInfo.map {
case (topicAndPartition, metaAndError) => {
val topicDirs = new ZKGroupTopicDirs(offsetCommitRequest.groupId, topicAndPartition.topic)
try {
if (metadataCache.getTopicMetadata(Set(topicAndPartition.topic), request.securityProtocol).size <= 0) {
(topicAndPartition, ErrorMapping.UnknownTopicOrPartitionCode)
} else if (metaAndError.metadata != null && metaAndError.metadata.length > config.offsetMetadataMaxSize) {
(topicAndPartition, ErrorMapping.OffsetMetadataTooLargeCode)
} else {
ZkUtils.updatePersistentPath(zkClient, topicDirs.consumerOffsetDir + "/" +
topicAndPartition.partition, metaAndError.offset.toString)
(topicAndPartition, ErrorMapping.NoError)
}
} catch {
case e: Throwable => (topicAndPartition, ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]))
}
}
}
sendResponseCallback(responseInfo)
} else {
// for version 1 and beyond store offsets in offset manager
// compute the retention time based on the request version:
// if it is v1 or not specified by user, we can use the default retention
val offsetRetention =
if (offsetCommitRequest.versionId <= 1 ||
offsetCommitRequest.retentionMs == org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_RETENTION_TIME) {
coordinator.offsetConfig.offsetsRetentionMs
} else {
offsetCommitRequest.retentionMs
}
// commit timestamp is always set to now.
// "default" expiration timestamp is now + retention (and retention may be overridden if v2)
// expire timestamp is computed differently for v1 and v2.
// - If v1 and no explicit commit timestamp is provided we use default expiration timestamp.
// - If v1 and explicit commit timestamp is provided we calculate retention from that explicit commit timestamp
// - If v2 we use the default expiration timestamp
val currentTimestamp = SystemTime.milliseconds
val defaultExpireTimestamp = offsetRetention + currentTimestamp
val offsetData = authorizedRequestInfo.mapValues(offsetAndMetadata =>
offsetAndMetadata.copy(
commitTimestamp = currentTimestamp,
expireTimestamp = {
if (offsetAndMetadata.commitTimestamp == org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_TIMESTAMP)
defaultExpireTimestamp
else
offsetRetention + offsetAndMetadata.commitTimestamp
}
)
)
// call coordinator to handle commit offset
coordinator.handleCommitOffsets(
offsetCommitRequest.groupId,
offsetCommitRequest.consumerId,
offsetCommitRequest.groupGenerationId,
offsetData,
sendResponseCallback)
}
}
private def authorize(session: Session, operation: Operation, resource: Resource): Boolean =
authorizer.map(_.authorize(session, operation, resource)).getOrElse(true)
/**
* Handle a produce request
*/
def handleProducerRequest(request: RequestChannel.Request) {
val produceRequest = request.requestObj.asInstanceOf[ProducerRequest]
val numBytesAppended = produceRequest.sizeInBytes
val (authorizedRequestInfo, unauthorizedRequestInfo) = produceRequest.data.partition {
case (topicAndPartition, _) => authorize(request.session, Write, new Resource(Topic, topicAndPartition.topic))
}
// the callback for sending a produce response
def sendResponseCallback(responseStatus: Map[TopicAndPartition, ProducerResponseStatus]) {
var errorInResponse = false
val mergedResponseStatus = responseStatus ++ unauthorizedRequestInfo.mapValues(_ => ProducerResponseStatus(ErrorMapping.AuthorizationCode, -1))
mergedResponseStatus.foreach { case (topicAndPartition, status) =>
// we only print warnings for known errors here; if it is unknown, it will cause
// an error message in the replica manager
if (status.error != ErrorMapping.NoError && status.error != ErrorMapping.UnknownCode) {
debug("Produce request with correlation id %d from client %s on partition %s failed due to %s".format(
produceRequest.correlationId,
produceRequest.clientId,
topicAndPartition,
ErrorMapping.exceptionNameFor(status.error)))
errorInResponse = true
}
}
def produceResponseCallback(delayTimeMs: Int) {
if (produceRequest.requiredAcks == 0) {
// no operation needed if producer request.required.acks = 0; however, if there is any error in handling
// the request, since no response is expected by the producer, the server will close socket server so that
// the producer client will know that some error has happened and will refresh its metadata
if (errorInResponse) {
info(
"Close connection due to error handling produce request with correlation id %d from client id %s with ack=0".format(
produceRequest.correlationId,
produceRequest.clientId))
requestChannel.closeConnection(request.processor, request)
} else {
requestChannel.noOperation(request.processor, request)
}
} else {
val response = ProducerResponse(produceRequest.correlationId,
mergedResponseStatus,
produceRequest.versionId,
delayTimeMs)
requestChannel.sendResponse(new RequestChannel.Response(request,
new RequestOrResponseSend(request.connectionId,
response)))
}
}
quotaManagers(RequestKeys.ProduceKey).recordAndMaybeThrottle(produceRequest.clientId,
numBytesAppended,
produceResponseCallback)
}
// only allow appending to internal topic partitions
// if the client is not from admin
val internalTopicsAllowed = produceRequest.clientId == AdminUtils.AdminClientId
// call the replica manager to append messages to the replicas
replicaManager.appendMessages(
produceRequest.ackTimeoutMs.toLong,
produceRequest.requiredAcks,
internalTopicsAllowed,
authorizedRequestInfo,
sendResponseCallback)
// if the request is put into the purgatory, it will have a held reference
// and hence cannot be garbage collected; hence we clear its data here in
// order to let GC re-claim its memory since it is already appended to log
produceRequest.emptyData()
}
/**
* Handle a fetch request
*/
def handleFetchRequest(request: RequestChannel.Request) {
val fetchRequest = request.requestObj.asInstanceOf[FetchRequest]
val (authorizedRequestInfo, unauthorizedRequestInfo) = fetchRequest.requestInfo.partition {
case (topicAndPartition, _) => authorize(request.session, Read, new Resource(Topic, topicAndPartition.topic))
}
val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => FetchResponsePartitionData(ErrorMapping.AuthorizationCode, -1, MessageSet.Empty))
// the callback for sending a fetch response
def sendResponseCallback(responsePartitionData: Map[TopicAndPartition, FetchResponsePartitionData]) {
val mergedResponseStatus = responsePartitionData ++ unauthorizedResponseStatus
mergedResponseStatus.foreach { case (topicAndPartition, data) =>
// we only print warnings for known errors here; if it is unknown, it will cause
// an error message in the replica manager already and hence can be ignored here
if (data.error != ErrorMapping.NoError && data.error != ErrorMapping.UnknownCode) {
debug("Fetch request with correlation id %d from client %s on partition %s failed due to %s"
.format(fetchRequest.correlationId, fetchRequest.clientId,
topicAndPartition, ErrorMapping.exceptionNameFor(data.error)))
}
// record the bytes out metrics only when the response is being sent
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesOutRate.mark(data.messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats().bytesOutRate.mark(data.messages.sizeInBytes)
}
def fetchResponseCallback(delayTimeMs: Int) {
val response = FetchResponse(fetchRequest.correlationId, responsePartitionData, fetchRequest.versionId, delayTimeMs)
requestChannel.sendResponse(new RequestChannel.Response(request, new FetchResponseSend(request.connectionId, response)))
}
// Do not throttle replication traffic
if (fetchRequest.isFromFollower) {
fetchResponseCallback(0)
} else {
quotaManagers(RequestKeys.FetchKey).recordAndMaybeThrottle(fetchRequest.clientId,
FetchResponse.responseSize(responsePartitionData
.groupBy(_._1.topic),
fetchRequest.versionId),
fetchResponseCallback)
}
}
// call the replica manager to fetch messages from the local replica
replicaManager.fetchMessages(
fetchRequest.maxWait.toLong,
fetchRequest.replicaId,
fetchRequest.minBytes,
authorizedRequestInfo,
sendResponseCallback)
}
/**
* Handle an offset request
*/
def handleOffsetRequest(request: RequestChannel.Request) {
val offsetRequest = request.requestObj.asInstanceOf[OffsetRequest]
val (authorizedRequestInfo, unauthorizedRequestInfo) = offsetRequest.requestInfo.partition {
case (topicAndPartition, _) => authorize(request.session, Describe, new Resource(Topic, topicAndPartition.topic))
}
val unauthorizedResponseStatus = unauthorizedRequestInfo.mapValues(_ => PartitionOffsetsResponse(ErrorMapping.AuthorizationCode, Nil))
val responseMap = authorizedRequestInfo.map(elem => {
val (topicAndPartition, partitionOffsetRequestInfo) = elem
try {
// ensure leader exists
val localReplica = if (!offsetRequest.isFromDebuggingClient)
replicaManager.getLeaderReplicaIfLocal(topicAndPartition.topic, topicAndPartition.partition)
else
replicaManager.getReplicaOrException(topicAndPartition.topic, topicAndPartition.partition)
val offsets = {
val allOffsets = fetchOffsets(replicaManager.logManager,
topicAndPartition,
partitionOffsetRequestInfo.time,
partitionOffsetRequestInfo.maxNumOffsets)
if (!offsetRequest.isFromOrdinaryClient) {
allOffsets
} else {
val hw = localReplica.highWatermark.messageOffset
if (allOffsets.exists(_ > hw))
hw +: allOffsets.dropWhile(_ > hw)
else
allOffsets
}
}
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.NoError, offsets))
} catch {
// NOTE: UnknownTopicOrPartitionException and NotLeaderForPartitionException are special cased since these error messages
// are typically transient and there is no value in logging the entire stack trace for the same
case utpe: UnknownTopicOrPartitionException =>
debug("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
offsetRequest.correlationId, offsetRequest.clientId, topicAndPartition, utpe.getMessage))
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(utpe.getClass.asInstanceOf[Class[Throwable]]), Nil) )
case nle: NotLeaderForPartitionException =>
debug("Offset request with correlation id %d from client %s on partition %s failed due to %s".format(
offsetRequest.correlationId, offsetRequest.clientId, topicAndPartition,nle.getMessage))
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(nle.getClass.asInstanceOf[Class[Throwable]]), Nil) )
case e: Throwable =>
error("Error while responding to offset request", e)
(topicAndPartition, PartitionOffsetsResponse(ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]]), Nil) )
}
})
val mergedResponseMap = responseMap ++ unauthorizedResponseStatus
val response = OffsetResponse(offsetRequest.correlationId, mergedResponseMap)
requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response)))
}
def fetchOffsets(logManager: LogManager, topicAndPartition: TopicAndPartition, timestamp: Long, maxNumOffsets: Int): Seq[Long] = {
logManager.getLog(topicAndPartition) match {
case Some(log) =>
fetchOffsetsBefore(log, timestamp, maxNumOffsets)
case None =>
if (timestamp == OffsetRequest.LatestTime || timestamp == OffsetRequest.EarliestTime)
Seq(0L)
else
Nil
}
}
private def fetchOffsetsBefore(log: Log, timestamp: Long, maxNumOffsets: Int): Seq[Long] = {
val segsArray = log.logSegments.toArray
var offsetTimeArray: Array[(Long, Long)] = null
if (segsArray.last.size > 0)
offsetTimeArray = new Array[(Long, Long)](segsArray.length + 1)
else
offsetTimeArray = new Array[(Long, Long)](segsArray.length)
for(i <- 0 until segsArray.length)
offsetTimeArray(i) = (segsArray(i).baseOffset, segsArray(i).lastModified)
if (segsArray.last.size > 0)
offsetTimeArray(segsArray.length) = (log.logEndOffset, SystemTime.milliseconds)
var startIndex = -1
timestamp match {
case OffsetRequest.LatestTime =>
startIndex = offsetTimeArray.length - 1
case OffsetRequest.EarliestTime =>
startIndex = 0
case _ =>
var isFound = false
debug("Offset time array = " + offsetTimeArray.foreach(o => "%d, %d".format(o._1, o._2)))
startIndex = offsetTimeArray.length - 1
while (startIndex >= 0 && !isFound) {
if (offsetTimeArray(startIndex)._2 <= timestamp)
isFound = true
else
startIndex -=1
}
}
val retSize = maxNumOffsets.min(startIndex + 1)
val ret = new Array[Long](retSize)
for(j <- 0 until retSize) {
ret(j) = offsetTimeArray(startIndex)._1
startIndex -= 1
}
// ensure that the returned seq is in descending order of offsets
ret.toSeq.sortBy(- _)
}
private def getTopicMetadata(topics: Set[String], securityProtocol: SecurityProtocol): Seq[TopicMetadata] = {
val topicResponses = metadataCache.getTopicMetadata(topics, securityProtocol)
if (topics.size > 0 && topicResponses.size != topics.size) {
val nonExistentTopics = topics -- topicResponses.map(_.topic).toSet
val responsesForNonExistentTopics = nonExistentTopics.map { topic =>
if (topic == ConsumerCoordinator.OffsetsTopicName || config.autoCreateTopicsEnable) {
try {
if (topic == ConsumerCoordinator.OffsetsTopicName) {
val aliveBrokers = metadataCache.getAliveBrokers
val offsetsTopicReplicationFactor =
if (aliveBrokers.length > 0)
Math.min(config.offsetsTopicReplicationFactor.toInt, aliveBrokers.length)
else
config.offsetsTopicReplicationFactor.toInt
AdminUtils.createTopic(zkClient, topic, config.offsetsTopicPartitions,
offsetsTopicReplicationFactor,
coordinator.offsetsTopicConfigs)
info("Auto creation of topic %s with %d partitions and replication factor %d is successful!"
.format(topic, config.offsetsTopicPartitions, offsetsTopicReplicationFactor))
}
else {
AdminUtils.createTopic(zkClient, topic, config.numPartitions, config.defaultReplicationFactor)
info("Auto creation of topic %s with %d partitions and replication factor %d is successful!"
.format(topic, config.numPartitions, config.defaultReplicationFactor))
}
new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.LeaderNotAvailableCode)
} catch {
case e: TopicExistsException => // let it go, possibly another broker created this topic
new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.LeaderNotAvailableCode)
case itex: InvalidTopicException =>
new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.InvalidTopicCode)
}
} else {
new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.UnknownTopicOrPartitionCode)
}
}
topicResponses.appendAll(responsesForNonExistentTopics)
}
topicResponses
}
/**
* Handle a topic metadata request
*/
def handleTopicMetadataRequest(request: RequestChannel.Request) {
val metadataRequest = request.requestObj.asInstanceOf[TopicMetadataRequest]
//if topics is empty -> fetch all topics metadata but filter out the topic response that are not authorized
val topics = if (metadataRequest.topics.isEmpty) {
val topicResponses = metadataCache.getTopicMetadata(metadataRequest.topics.toSet, request.securityProtocol)
topicResponses.map(_.topic).filter(topic => authorize(request.session, Describe, new Resource(Topic, topic))).toSet
} else {
metadataRequest.topics.toSet
}
//when topics is empty this will be a duplicate authorization check but given this should just be a cache lookup, it should not matter.
var (authorizedTopics, unauthorizedTopics) = topics.partition(topic => authorize(request.session, Describe, new Resource(Topic, topic)))
if (!authorizedTopics.isEmpty) {
val topicResponses = metadataCache.getTopicMetadata(authorizedTopics, request.securityProtocol)
if (config.autoCreateTopicsEnable && topicResponses.size != authorizedTopics.size) {
val nonExistentTopics: Set[String] = topics -- topicResponses.map(_.topic).toSet
authorizer.foreach {
az => if (!az.authorize(request.session, Create, Resource.ClusterResource)) {
authorizedTopics --= nonExistentTopics
unauthorizedTopics ++= nonExistentTopics
}
}
}
}
val unauthorizedTopicMetaData = unauthorizedTopics.map(topic => new TopicMetadata(topic, Seq.empty[PartitionMetadata], ErrorMapping.AuthorizationCode))
val topicMetadata = getTopicMetadata(authorizedTopics, request.securityProtocol)
val brokers = metadataCache.getAliveBrokers
trace("Sending topic metadata %s and brokers %s for correlation id %d to client %s".format(topicMetadata.mkString(","), brokers.mkString(","), metadataRequest.correlationId, metadataRequest.clientId))
val response = new TopicMetadataResponse(brokers.map(_.getBrokerEndPoint(request.securityProtocol)), topicMetadata ++ unauthorizedTopicMetaData, metadataRequest.correlationId)
requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response)))
}
/*
* Handle an offset fetch request
*/
def handleOffsetFetchRequest(request: RequestChannel.Request) {
val offsetFetchRequest = request.requestObj.asInstanceOf[OffsetFetchRequest]
val (authorizedTopicPartitions, unauthorizedTopicPartitions) = offsetFetchRequest.requestInfo.partition { topicAndPartition =>
authorize(request.session, Describe, new Resource(Topic, topicAndPartition.topic)) &&
authorize(request.session, Read, new Resource(ConsumerGroup, offsetFetchRequest.groupId))
}
val authorizationError = OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata, ErrorMapping.AuthorizationCode)
val unauthorizedStatus = unauthorizedTopicPartitions.map(topicAndPartition => (topicAndPartition, authorizationError)).toMap
val response = if (offsetFetchRequest.versionId == 0) {
// version 0 reads offsets from ZK
val responseInfo = authorizedTopicPartitions.map( topicAndPartition => {
val topicDirs = new ZKGroupTopicDirs(offsetFetchRequest.groupId, topicAndPartition.topic)
try {
if (metadataCache.getTopicMetadata(Set(topicAndPartition.topic), request.securityProtocol).size <= 0) {
(topicAndPartition, OffsetMetadataAndError.UnknownTopicOrPartition)
} else {
val payloadOpt = ZkUtils.readDataMaybeNull(zkClient, topicDirs.consumerOffsetDir + "/" + topicAndPartition.partition)._1
payloadOpt match {
case Some(payload) => (topicAndPartition, OffsetMetadataAndError(payload.toLong))
case None => (topicAndPartition, OffsetMetadataAndError.UnknownTopicOrPartition)
}
}
} catch {
case e: Throwable =>
(topicAndPartition, OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata,
ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])))
}
})
val unauthorizedTopics = unauthorizedTopicPartitions.map( topicAndPartition =>
(topicAndPartition, OffsetMetadataAndError(OffsetMetadata.InvalidOffsetMetadata,ErrorMapping.AuthorizationCode)))
OffsetFetchResponse(collection.immutable.Map(responseInfo: _*) ++ unauthorizedTopics, offsetFetchRequest.correlationId)
} else {
// version 1 reads offsets from Kafka;
val offsets = coordinator.handleFetchOffsets(offsetFetchRequest.groupId, authorizedTopicPartitions).toMap
// Note that we do not need to filter the partitions in the
// metadata cache as the topic partitions will be filtered
// in coordinator's offset manager through the offset cache
OffsetFetchResponse(offsets ++ unauthorizedStatus, offsetFetchRequest.correlationId)
}
trace("Sending offset fetch response %s for correlation id %d to client %s."
.format(response, offsetFetchRequest.correlationId, offsetFetchRequest.clientId))
requestChannel.sendResponse(new RequestChannel.Response(request, new RequestOrResponseSend(request.connectionId, response)))
}
/*
* Handle a consumer metadata request
*/
def handleConsumerMetadataRequest(request: RequestChannel.Request) {
val consumerMetadataRequest = request.requestObj.asInstanceOf[ConsumerMetadataRequest]
if (!authorize(request.session, Read, new Resource(ConsumerGroup, consumerMetadataRequest.group))) {
val response = ConsumerMetadataResponse(None, ErrorMapping.AuthorizationCode, consumerMetadataRequest.correlationId)
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, response)))
} else {
val partition = coordinator.partitionFor(consumerMetadataRequest.group)
//get metadata (and create the topic if necessary)
val offsetsTopicMetadata = getTopicMetadata(Set(ConsumerCoordinator.OffsetsTopicName), request.securityProtocol).head
val errorResponse = ConsumerMetadataResponse(None, ErrorMapping.ConsumerCoordinatorNotAvailableCode, consumerMetadataRequest.correlationId)
val response =
offsetsTopicMetadata.partitionsMetadata.find(_.partitionId == partition).map { partitionMetadata =>
partitionMetadata.leader.map { leader =>
ConsumerMetadataResponse(Some(leader), ErrorMapping.NoError, consumerMetadataRequest.correlationId)
}.getOrElse(errorResponse)
}.getOrElse(errorResponse)
trace("Sending consumer metadata %s for correlation id %d to client %s."
.format(response, consumerMetadataRequest.correlationId, consumerMetadataRequest.clientId))
requestChannel.sendResponse(new Response(request, new RequestOrResponseSend(request.connectionId, response)))
}
}
def handleJoinGroupRequest(request: RequestChannel.Request) {
import JavaConversions._
val joinGroupRequest = request.body.asInstanceOf[JoinGroupRequest]
val respHeader = new ResponseHeader(request.header.correlationId)
// the callback for sending a join-group response
def sendResponseCallback(partitions: Set[TopicAndPartition], consumerId: String, generationId: Int, errorCode: Short) {
val partitionList = if (errorCode == ErrorMapping.NoError)
partitions.map(tp => new TopicPartition(tp.topic, tp.partition)).toBuffer
else
List.empty.toBuffer
val responseBody = new JoinGroupResponse(errorCode, generationId, consumerId, partitionList)
trace("Sending join group response %s for correlation id %d to client %s."
.format(responseBody, request.header.correlationId, request.header.clientId))
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, responseBody)))
}
// ensure that the client is authorized to join the group and read from all subscribed topics
if (!authorize(request.session, Read, new Resource(ConsumerGroup, joinGroupRequest.groupId())) ||
joinGroupRequest.topics().exists(topic => !authorize(request.session, Read, new Resource(Topic, topic)))) {
val responseBody = new JoinGroupResponse(ErrorMapping.AuthorizationCode, 0, joinGroupRequest.consumerId(), List.empty[TopicPartition])
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, responseBody)))
} else {
// let the coordinator to handle join-group
coordinator.handleJoinGroup(
joinGroupRequest.groupId(),
joinGroupRequest.consumerId(),
joinGroupRequest.topics().toSet,
joinGroupRequest.sessionTimeout(),
joinGroupRequest.strategy(),
sendResponseCallback)
}
}
def handleHeartbeatRequest(request: RequestChannel.Request) {
val heartbeatRequest = request.body.asInstanceOf[HeartbeatRequest]
val respHeader = new ResponseHeader(request.header.correlationId)
// the callback for sending a heartbeat response
def sendResponseCallback(errorCode: Short) {
val response = new HeartbeatResponse(errorCode)
trace("Sending heartbeat response %s for correlation id %d to client %s."
.format(response, request.header.correlationId, request.header.clientId))
requestChannel.sendResponse(new RequestChannel.Response(request, new ResponseSend(request.connectionId, respHeader, response)))
}
if (!authorize(request.session, Read, new Resource(ConsumerGroup, heartbeatRequest.groupId))) {
val heartbeatResponse = new HeartbeatResponse(ErrorMapping.AuthorizationCode)
requestChannel.sendResponse(new Response(request, new ResponseSend(request.connectionId, respHeader, heartbeatResponse)))
}
else {
// let the coordinator to handle heartbeat
coordinator.handleHeartbeat(
heartbeatRequest.groupId(),
heartbeatRequest.consumerId(),
heartbeatRequest.groupGenerationId(),
sendResponseCallback)
}
}
/*
* Returns a Map of all quota managers configured. The request Api key is the key for the Map
*/
private def instantiateQuotaManagers(cfg: KafkaConfig): Map[Short, ClientQuotaManager] = {
val producerQuotaManagerCfg = ClientQuotaManagerConfig(
quotaBytesPerSecondDefault = cfg.producerQuotaBytesPerSecondDefault,
quotaBytesPerSecondOverrides = cfg.producerQuotaBytesPerSecondOverrides,
numQuotaSamples = cfg.numQuotaSamples,
quotaWindowSizeSeconds = cfg.quotaWindowSizeSeconds
)
val consumerQuotaManagerCfg = ClientQuotaManagerConfig(
quotaBytesPerSecondDefault = cfg.consumerQuotaBytesPerSecondDefault,
quotaBytesPerSecondOverrides = cfg.consumerQuotaBytesPerSecondOverrides,
numQuotaSamples = cfg.numQuotaSamples,
quotaWindowSizeSeconds = cfg.quotaWindowSizeSeconds
)
val quotaManagers = Map[Short, ClientQuotaManager](
RequestKeys.ProduceKey ->
new ClientQuotaManager(producerQuotaManagerCfg, metrics, RequestKeys.nameForKey(RequestKeys.ProduceKey), new org.apache.kafka.common.utils.SystemTime),
RequestKeys.FetchKey ->
new ClientQuotaManager(consumerQuotaManagerCfg, metrics, RequestKeys.nameForKey(RequestKeys.FetchKey), new org.apache.kafka.common.utils.SystemTime)
)
quotaManagers
}
def close() {
quotaManagers.foreach { case (apiKey, quotaManager) =>
quotaManager.shutdown()
}
info("Shutdown complete.")
}
def authorizeClusterAction(request: RequestChannel.Request): Unit = {
if (!authorize(request.session, ClusterAction, Resource.ClusterResource))
throw new AuthorizationException(s"Request $request is not authorized.")
}
}
| robort/kafka | core/src/main/scala/kafka/server/KafkaApis.scala | Scala | apache-2.0 | 40,341 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.anyvals
import scala.collection.GenTraversable
import scala.collection.mutable.Buffer
import scala.collection.mutable.ListBuffer
import org.scalactic.{Every, One, Many, StringNormalizations}
import org.scalactic.UnitSpec
import org.scalactic.NormalizingEquality
import org.scalatest.CompatParColls.Converters._
class NonEmptySetSpec extends UnitSpec {
"A NonEmptySet" can "be constructed with one element" in {
val onesie = NonEmptySet(3)
onesie.size shouldBe 1
onesie(3) shouldBe true
onesie(2) shouldBe false
}
it can "be constructed with many elements" in {
val twosie = NonEmptySet(2, 3)
twosie.size shouldBe 2
twosie(2) shouldBe true
twosie(3) shouldBe true
val threesie = NonEmptySet(1, 2, 3)
threesie.size shouldBe 3
threesie(1) shouldBe true
threesie(2) shouldBe true
threesie(3) shouldBe true
threesie(4) shouldBe false
}
it can "be constructed from a GenTraversable via the from method on NonEmptySet singleton" in {
NonEmptySet.from(Set.empty[String]) shouldBe None
NonEmptySet.from(Set("1")) shouldBe Some(NonEmptySet("1"))
NonEmptySet.from(Set(1, 2, 3)) shouldBe Some(NonEmptySet(1, 2, 3))
// SKIP-SCALATESTJS,NATIVE-START
NonEmptySet.from(Set.empty[String].par) shouldBe None
NonEmptySet.from(Set("1").par) shouldBe Some(NonEmptySet("1"))
NonEmptySet.from(Set(1, 2, 3).par) shouldBe Some(NonEmptySet(1, 2, 3))
// SKIP-SCALATESTJS,NATIVE-END
}
it can "be constructed with null elements" in {
noException should be thrownBy NonEmptySet("hi", null, "ho")
noException should be thrownBy NonEmptySet(null)
noException should be thrownBy NonEmptySet("ho", null)
}
it can "be deconstructed with NonEmptySet" in {
NonEmptySet(1) match {
case NonEmptySet(x) => x shouldEqual 1
case _ => fail()
}
NonEmptySet("hi") match {
case NonEmptySet(s) => s shouldEqual "hi"
case _ => fail()
}
}
it can "be deconstructed with Many" in {
NonEmptySet(1, 2, 3) match {
case NonEmptySet(x, y, z) => (x, y, z) shouldEqual (2, 3, 1)
case _ => fail()
}
NonEmptySet("hi", "there") match {
case NonEmptySet(s, t) => (s, t) shouldEqual ("there", "hi")
case _ => fail()
}
NonEmptySet(1, 2, 3) match {
case NonEmptySet(x, y, _) => (x, y) shouldEqual (2, 3)
case _ => fail()
}
NonEmptySet(1, 2, 3, 4, 5) match {
case NonEmptySet(x, y, _*) => (x, y) shouldEqual (5, 1)
case _ => fail()
}
}
it can "be deconstructed with Every" in {
NonEmptySet(1, 2, 3) match {
case NonEmptySet(x, y, z) => (x, y, z) shouldEqual (2, 3, 1)
case _ => fail()
}
NonEmptySet("hi", "there") match {
case NonEmptySet(s, t) => (s, t) shouldEqual ("there", "hi")
case _ => fail()
}
NonEmptySet(1, 2, 3) match {
case NonEmptySet(x, y, _) => (x, y) shouldEqual (2, 3)
case _ => fail()
}
NonEmptySet(1, 2, 3, 4, 5) match {
case NonEmptySet(x, y, _*) => (x, y) shouldEqual (5, 1)
case _ => fail()
}
NonEmptySet(1, 2, 3) match {
case NonEmptySet(x, _*) => x shouldEqual 2
case _ => fail()
}
NonEmptySet("hi") match {
case NonEmptySet(s) => s shouldEqual "hi"
case _ => fail()
}
NonEmptySet(1, 2, 3) match {
case NonEmptySet(x, y, z) => (x, y, z) shouldEqual (2, 3, 1)
case _ => fail()
}
NonEmptySet("hi", "there") match {
case NonEmptySet(s, t) => (s, t) shouldEqual ("there", "hi")
case _ => fail()
}
NonEmptySet(1, 2, 3) match {
case NonEmptySet(x, y, _) => (x, y) shouldEqual (2, 3)
case _ => fail()
}
NonEmptySet(1, 2, 3, 4, 5) match {
case NonEmptySet(x, y, _*) => (x, y) shouldEqual (5, 1)
case _ => fail()
}
NonEmptySet(1, 2, 3) match {
case NonEmptySet(x, _*) => x shouldEqual 2
case _ => fail()
}
}
it should "have a ++ method that takes another NonEmptySet" in {
NonEmptySet(1, 2, 3) ++ NonEmptySet(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ NonEmptySet(4, 5) shouldEqual NonEmptySet(1, 2, 3, 4, 5)
NonEmptySet(1, 2, 3) ++ NonEmptySet(4, 5, 6) shouldEqual NonEmptySet(1, 2, 3, 4, 5, 6)
}
it should "have a ++ method that takes an Every" in {
NonEmptySet(1, 2, 3) ++ One(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ Every(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ Every(4, 5, 6) shouldEqual NonEmptySet(1, 2, 3, 4, 5, 6)
NonEmptySet(1, 2, 3) ++ One(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ One(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ Every(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ Every(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ One(4) shouldEqual NonEmptySet(1, 2, 3, 4)
}
it should "have a ++ method that takes a GenTraversableOnce" in {
NonEmptySet(1, 2, 3) ++ Set(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ Vector(4, 5, 6) shouldEqual NonEmptySet(1, 2, 3, 4, 5, 6)
NonEmptySet(1, 2, 3) ++ GenTraversable(4) shouldEqual NonEmptySet(1, 2, 3, 4)
NonEmptySet(1, 2, 3) ++ Set(4, 5) shouldEqual NonEmptySet(1, 2, 3, 4, 5)
NonEmptySet(1, 2, 3) ++ Set(4, 5).iterator shouldEqual NonEmptySet(1, 2, 3, 4, 5)
}
it should "have a + method" in {
NonEmptySet(1) + 0 shouldBe NonEmptySet(0, 1)
NonEmptySet(1, 2) + 0 shouldBe NonEmptySet(0, 1, 2)
NonEmptySet("one", "two") + "zero" shouldBe NonEmptySet("zero", "one", "two")
}
it should "have 3 addString methods" in {
NonEmptySet("hi").addString(new StringBuilder) shouldBe new StringBuilder("hi")
NonEmptySet(1, 2, 3).addString(new StringBuilder) shouldBe new StringBuilder("231")
NonEmptySet("hi").addString(new StringBuilder, "#") shouldBe new StringBuilder("hi")
NonEmptySet(1, 2, 3).addString(new StringBuilder, "#") shouldBe new StringBuilder("2#3#1")
NonEmptySet(1, 2, 3).addString(new StringBuilder, ", ") shouldBe new StringBuilder("2, 3, 1")
NonEmptySet("hi").addString(new StringBuilder, "<", "#", ">") shouldBe new StringBuilder("<hi>")
NonEmptySet(1, 2, 3).addString(new StringBuilder, "<", "#", ">") shouldBe new StringBuilder("<2#3#1>")
NonEmptySet(1, 2, 3).addString(new StringBuilder, " ( ", ", ", " ) ") shouldBe new StringBuilder(" ( 2, 3, 1 ) ")
}
it should "have an canEqual method" is pending
// it should "have an charAt method" is pending
// Could have an implicit conversion from Every[Char] to CharSequence like
// there is for Seq in Predef.
/*
scala> Vector(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).collect { case i if i > 10 == 0 => i / 2 }
res1: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an collectFirst method" in {
NonEmptySet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) collectFirst { case i if i > 10 => i / 2 } shouldBe None
NonEmptySet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) collectFirst { case i if i > 10 => i / 2 } shouldBe Some(6)
}
/*
scala> Vector(1).combinations(2).toVector
res2: Vector[scala.collection.immutable.Vector[Int]] = Vector()
*/
/*
companion method not relevant. Has an empty and other GenTraverable stuff.
*/
it should "have a contains method" in {
val e = NonEmptySet(1, 2, 3)
e.contains(-1) shouldBe false
e.contains(0) shouldBe false
e.contains(1) shouldBe true
e.contains(2) shouldBe true
e.contains(3) shouldBe true
e.contains(4) shouldBe false
val es = NonEmptySet("one", "two", "three")
es.contains("one") shouldBe true
es.contains("ONE") shouldBe false
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6114
implicit val strEq = StringNormalizations.lowerCased.toEquality
//DOTTY-ONLY implicit val strEq: NormalizingEquality[String] = StringNormalizations.lowerCased.toEquality
es.contains("one") shouldBe true
es.contains("ONE") shouldBe false
// SKIP-DOTTY-END
}
it should "have 3 copyToArray methods" in {
val arr1 = Array.fill(5)(-1)
NonEmptySet(1, 2, 3, 4, 5).copyToArray(arr1)
arr1 shouldEqual Array(5, 1, 2, 3, 4)
val arr2 = Array.fill(5)(-1)
NonEmptySet(1, 2, 3, 4, 5).copyToArray(arr2, 1)
arr2 shouldEqual Array(-1, 5, 1, 2, 3)
val arr3 = Array.fill(5)(-1)
NonEmptySet(1, 2, 3, 4, 5).copyToArray(arr3, 1, 2)
arr3 shouldEqual Array(-1, 5, 1, -1, -1)
}
it should "have a copyToBuffer method" in {
val buf = ListBuffer.fill(3)(-1)
NonEmptySet(1, 2, 3, 4, 5).copyToBuffer(buf)
buf shouldEqual Buffer(-1, -1, -1, 5, 1, 2, 3, 4)
}
it should "have a count method" in {
val nonEmptySet = NonEmptySet(1, 2, 3, 4, 5)
nonEmptySet.count(_ > 10) shouldBe 0
nonEmptySet.count(_ % 2 == 0) shouldBe 2
nonEmptySet.count(_ % 2 == 1) shouldBe 3
}
/*
it should not have a diff method
scala> Vector(1, 2, 3).diff(Vector(1, 2, 3))
res0: scala.collection.immutable.Vector[Int] = Vector()
*/
/*
it should not have an drop method
scala> Vector(1, 2, 3).drop(3)
res1: scala.collection.immutable.Vector[Int] = Vector()
it should not have an dropRight method
scala> Vector(1, 2, 3).dropRight(3)
res0: scala.collection.immutable.Vector[Int] = Vector()
it should not have an dropWhile method
scala> Vector(1, 2, 3).dropWhile(_ < 10)
res2: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an equals method" in {
NonEmptySet(1) shouldEqual NonEmptySet(1)
NonEmptySet(1) should not equal NonEmptySet(2)
NonEmptySet(1, 2) should not equal NonEmptySet(2, 3)
}
it should "have an exists method" in {
NonEmptySet(1, 2, 3).exists(_ == 2) shouldBe true
NonEmptySet(1, 2, 3).exists(_ == 5) shouldBe false
}
/*
it should not have a filter method
scala> Vector(1, 2, 3).filter(_ > 10)
res12: scala.collection.immutable.Vector[Int] = Vector()
it should not have a filterNot method
scala> Vector(1, 2, 3).filterNot(_ < 10)
res13: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a find method" in {
NonEmptySet(1, 2, 3).find(_ == 5) shouldBe None
NonEmptySet(1, 2, 3).find(_ == 2) shouldBe Some(2)
}
it should "have a flatMap method" in {
NonEmptySet(1, 2, 3) flatMap (i => NonEmptySet(i + 1)) shouldBe NonEmptySet(2, 3, 4)
val ss = NonEmptySet("hi", "ho")
val is = NonEmptySet(1, 2, 3)
(for (s <- ss; i <- is) yield (s, i)) shouldBe
NonEmptySet(
("hi",1), ("hi",2), ("hi",3), ("ho",1), ("ho",2), ("ho",3)
)
NonEmptySet(5) flatMap (i => NonEmptySet(i + 3)) shouldBe NonEmptySet(8)
NonEmptySet(8) flatMap (i => NonEmptySet(i.toString)) shouldBe NonEmptySet("8")
}
/*
Can only flatten NonEmptySets
scala> Vector(Set.empty[Int], Set.empty[Int]).flatten
res17: scala.collection.immutable.Vector[Int] = Vector()
*/
// TODO: Actually it would make sense to flatten Everys too
it should "have a flatten method that works on nested NonEmptySets" in {
NonEmptySet(NonEmptySet(1, 2, 3), NonEmptySet(1, 2, 3)).flatten shouldBe NonEmptySet(1, 2, 3, 1, 2, 3)
NonEmptySet(NonEmptySet(1)).flatten shouldBe NonEmptySet(1)
}
it can "be flattened when in a GenTraversableOnce" in {
Vector(NonEmptySet(1, 2, 3), NonEmptySet(1, 2, 3)).flatten shouldBe Vector(2, 3, 1, 2, 3, 1)
Set(NonEmptySet(1, 2, 3), NonEmptySet(1, 2, 3)).flatten shouldBe Set(1, 2, 3, 1, 2, 3)
Set(NonEmptySet(1, 2, 3), NonEmptySet(1, 2, 3)).toIterator.flatten.toStream shouldBe Set(2, 3, 1, 2, 3, 1).toIterator.toStream
// SKIP-SCALATESTJS,NATIVE-START
Set(NonEmptySet(1, 2, 3), NonEmptySet(1, 2, 3)).par.flatten shouldBe Set(1, 2, 3, 1, 2, 3).par
// SKIP-SCALATESTJS,NATIVE-END
}
it should "have a fold method" in {
NonEmptySet(1).fold(0)(_ + _) shouldBe 1
NonEmptySet(1).fold(1)(_ * _) shouldBe 1
NonEmptySet(2).fold(0)(_ + _) shouldBe 2
NonEmptySet(2).fold(1)(_ * _) shouldBe 2
NonEmptySet(3).fold(0)(_ + _) shouldBe 3
NonEmptySet(3).fold(1)(_ * _) shouldBe 3
NonEmptySet(1, 2, 3).fold(0)(_ + _) shouldBe 6
NonEmptySet(1, 2, 3).fold(1)(_ * _) shouldBe 6
NonEmptySet(1, 2, 3, 4, 5).fold(0)(_ + _) shouldBe 15
NonEmptySet(1, 2, 3, 4, 5).fold(1)(_ * _) shouldBe 120
}
it should "have a foldLeft method" in {
NonEmptySet(1).foldLeft(0)(_ + _) shouldBe 1
NonEmptySet(1).foldLeft(1)(_ + _) shouldBe 2
NonEmptySet(1, 2, 3).foldLeft(0)(_ + _) shouldBe 6
NonEmptySet(1, 2, 3).foldLeft(1)(_ + _) shouldBe 7
}
it should "have a foldRight method" in {
NonEmptySet(1).foldRight(0)(_ + _) shouldBe 1
NonEmptySet(1).foldRight(1)(_ + _) shouldBe 2
NonEmptySet(1, 2, 3).foldRight(0)(_ + _) shouldBe 6
NonEmptySet(1, 2, 3).foldRight(1)(_ + _) shouldBe 7
}
it should "have a forall method" in {
NonEmptySet(1, 2, 3, 4, 5).forall(_ > 0) shouldBe true
NonEmptySet(1, 2, 3, 4, 5).forall(_ < 0) shouldBe false
}
it should "have a foreach method" in {
var num = 0
NonEmptySet(1, 2, 3) foreach (num += _)
num shouldBe 6
for (i <- NonEmptySet(1, 2, 3))
num += i
num shouldBe 12
NonEmptySet(5) foreach (num *= _)
num shouldBe 60
}
it should "have a groupBy method" in {
NonEmptySet(1, 2, 3, 4, 5).groupBy(_ % 2) shouldBe Map(1 -> NonEmptySet(1, 3, 5), 0 -> NonEmptySet(2, 4))
NonEmptySet(1, 2, 3, 3, 3).groupBy(_ % 2) shouldBe Map(1 -> NonEmptySet(1, 3, 3, 3), 0 -> NonEmptySet(2))
NonEmptySet(1, 1, 3, 3, 3).groupBy(_ % 2) shouldBe Map(1 -> NonEmptySet(1, 1, 3, 3, 3))
NonEmptySet(1, 2, 3, 5, 7).groupBy(_ % 2) shouldBe Map(1 -> NonEmptySet(1, 3, 5, 7), 0 -> NonEmptySet(2))
}
it should "have a grouped method" in {
NonEmptySet(1, 2, 3).grouped(2).toSet shouldBe Set(NonEmptySet(2, 3), NonEmptySet(1))
NonEmptySet(1, 2, 3).grouped(1).toSet shouldBe Set(NonEmptySet(2), NonEmptySet(3), NonEmptySet(1))
an [IllegalArgumentException] should be thrownBy { NonEmptySet(1, 2, 3).grouped(0).toSet }
NonEmptySet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(2).toSet shouldBe Set(NonEmptySet(1, 6), NonEmptySet(7, 3), NonEmptySet(8, 4), NonEmptySet(9, 2), NonEmptySet(5, 10))
NonEmptySet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(3).toSet shouldBe Set(NonEmptySet(5, 10, 1), NonEmptySet(6, 9, 2), NonEmptySet(7, 3, 8), NonEmptySet(4))
NonEmptySet(1, 2, 3, 4, 5, 6, 7, 8, 9, 10).grouped(4).toSet shouldBe Set(NonEmptySet(5, 10, 1, 6), NonEmptySet(9, 2, 7, 3), NonEmptySet(8, 4))
NonEmptySet(1).grouped(2).toSet shouldBe Set(NonEmptySet(1))
NonEmptySet(1).grouped(1).toSet shouldBe Set(NonEmptySet(1))
}
it should "have a hasDefiniteSize method" in {
NonEmptySet(1).hasDefiniteSize shouldBe true
NonEmptySet(1, 2).hasDefiniteSize shouldBe true
}
it should "have a hashCode method" in {
NonEmptySet(1).hashCode shouldEqual NonEmptySet(1).hashCode
NonEmptySet(1, 2).hashCode shouldEqual NonEmptySet(1, 2).hashCode
}
it should "have a head method" in {
NonEmptySet("hi").head shouldBe "hi"
NonEmptySet(1, 2, 3).head shouldBe 2
}
it should "have a headOption method" in {
NonEmptySet("hi").headOption shouldBe Some("hi")
NonEmptySet(1, 2, 3).headOption shouldBe Some(2)
}
/*
it should not have an init method
scala> Vector(1).init
res30: scala.collection.immutable.Vector[Int] = Vector()
it should "have an inits method" is pending
scala> Vector(1).inits.toSet
res32: Set[scala.collection.immutable.Vector[Int]] = Set(Vector(1), Vector())
it should "have an intersect method" is pending
scala> Vector(1, 2, 3) intersect Vector(4, 5)
res33: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have an isEmpty method" in {
NonEmptySet("hi").isEmpty shouldBe false
NonEmptySet(1, 2, 3).isEmpty shouldBe false
}
it should "have an isTraversableAgain method" in {
NonEmptySet("hi").isTraversableAgain shouldBe true
NonEmptySet(1, 2, 3).isTraversableAgain shouldBe true
}
it should "have an iterator method" in {
NonEmptySet("hi").iterator.toSet shouldBe Set("hi")
NonEmptySet(1, 2, 3).iterator.toSet shouldBe Set(1, 2, 3)
}
it should "have a last method" in {
NonEmptySet("hi").last shouldBe "hi"
NonEmptySet(1, 2, 3).last shouldBe 1
}
it should "have an lastOption method" in {
NonEmptySet("hi").lastOption shouldBe Some("hi")
NonEmptySet(1, 2, 3).lastOption shouldBe Some(1)
}
it should "have a map method" in {
NonEmptySet(1, 2, 3) map (_ + 1) shouldBe NonEmptySet(2, 3, 4)
(for (ele <- NonEmptySet(1, 2, 3)) yield ele * 2) shouldBe NonEmptySet(2, 4, 6)
NonEmptySet(5) map (_ + 3) shouldBe NonEmptySet(8)
NonEmptySet(8) map (_.toString) shouldBe NonEmptySet("8")
}
it should "have a max method" in {
NonEmptySet(1, 2, 3, 4, 5).max shouldBe 5
NonEmptySet(1).max shouldBe 1
NonEmptySet(-1).max shouldBe -1
NonEmptySet("aaa", "ccc", "bbb").max shouldBe "ccc"
}
it should "have a maxBy method" in {
NonEmptySet(1, 2, 3, 4, 5).maxBy(_.abs) shouldBe 5
NonEmptySet(1, 2, 3, 4, -5).maxBy(_.abs) shouldBe -5
}
it should "have a min method" in {
NonEmptySet(1, 2, 3, 4, 5).min shouldBe 1
NonEmptySet(1).min shouldBe 1
NonEmptySet(-1).min shouldBe -1
NonEmptySet("aaa", "ccc", "bbb").min shouldBe "aaa"
}
it should "have a minBy method" in {
NonEmptySet(1, 2, 3, 4, 5).minBy(_.abs) shouldBe 1
NonEmptySet(-1, -2, 3, 4, 5).minBy(_.abs) shouldBe -1
}
it should "have a mkString method" in {
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6705
NonEmptySet("hi").mkString shouldBe "hi"
NonEmptySet(1, 2, 3).mkString shouldBe "231"
// SKIP-DOTTY-END
NonEmptySet("hi").mkString("#") shouldBe "hi"
NonEmptySet(1, 2, 3).mkString("#") shouldBe "2#3#1"
NonEmptySet(1, 2, 3).mkString(", ") shouldBe "2, 3, 1"
NonEmptySet("hi").mkString("<", "#", ">") shouldBe "<hi>"
NonEmptySet(1, 2, 3).mkString("<", "#", ">") shouldBe "<2#3#1>"
NonEmptySet(1, 2, 3).mkString(" ( ", ", ", " ) ") shouldBe " ( 2, 3, 1 ) "
}
it should "have an nonEmpty method" in {
NonEmptySet("hi").nonEmpty shouldBe true
NonEmptySet(1, 2, 3).nonEmpty shouldBe true
}
// it should not have a par method, because I don't want to support that. If the user
// needs a parallel collection, they can use a parallel collection: nonEmptySet.toVector.par...
/*
it should not have an partition method
scala> Vector(1, 2, 3, 4, 5).partition(_ > 10)
res10: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
*/
it should "have a product method" in {
NonEmptySet(1, 2, 3).product shouldBe 6
NonEmptySet(3).product shouldBe 3
NonEmptySet(3, 4, 5).product shouldBe 60
NonEmptySet(3, 4, 5).product shouldBe 60
NonEmptySet(3.1, 4.2, 5.3).product shouldBe 69.006
}
it should "have a reduce method" in {
NonEmptySet(1, 2, 3, 4, 5).reduce(_ + _) shouldBe 15
NonEmptySet(1, 2, 3, 4, 5).reduce(_ * _) shouldBe 120
NonEmptySet(5).reduce(_ + _) shouldBe 5
NonEmptySet(5).reduce(_ * _) shouldBe 5
}
it should "have a reduceLeft method" in {
NonEmptySet(1).reduceLeft(_ + _) shouldBe 1
NonEmptySet(1).reduceLeft(_ * _) shouldBe 1
NonEmptySet(1, 2, 3).reduceLeft(_ + _) shouldBe 6
NonEmptySet(1, 2, 3).reduceLeft(_ * _) shouldBe 6
NonEmptySet(1, 2, 3, 4, 5).reduceLeft(_ * _) shouldBe 120
}
it should "have a reduceLeftOption method" in {
NonEmptySet(1).reduceLeftOption(_ + _) shouldBe Some(1)
NonEmptySet(1).reduceLeftOption(_ * _) shouldBe Some(1)
NonEmptySet(1, 2, 3).reduceLeftOption(_ + _) shouldBe Some(6)
NonEmptySet(1, 2, 3).reduceLeftOption(_ * _) shouldBe Some(6)
NonEmptySet(1, 2, 3, 4, 5).reduceLeftOption(_ * _) shouldBe Some(120)
}
it should "have a reduceOption method" in {
NonEmptySet(1, 2, 3, 4, 5).reduceOption(_ + _) shouldBe Some(15)
NonEmptySet(1, 2, 3, 4, 5).reduceOption(_ * _) shouldBe Some(120)
NonEmptySet(5).reduceOption(_ + _) shouldBe Some(5)
NonEmptySet(5).reduceOption(_ * _) shouldBe Some(5)
}
it should "have a reduceRight method" in { One(1).reduceRight(_ + _) shouldBe 1
NonEmptySet(1).reduceRight(_ * _) shouldBe 1
NonEmptySet(1, 2, 3).reduceRight(_ + _) shouldBe 6
NonEmptySet(1, 2, 3).reduceRight(_ * _) shouldBe 6
NonEmptySet(1, 2, 3, 4, 5).reduceRight(_ * _) shouldBe 120
}
it should "have a reduceRightOption method" in {
NonEmptySet(1).reduceRightOption(_ + _) shouldBe Some(1)
NonEmptySet(1).reduceRightOption(_ * _) shouldBe Some(1)
NonEmptySet(1, 2, 3).reduceRightOption(_ + _) shouldBe Some(6)
NonEmptySet(1, 2, 3).reduceRightOption(_ * _) shouldBe Some(6)
NonEmptySet(1, 2, 3, 4, 5).reduceRightOption(_ * _) shouldBe Some(120)
}
it should "have a sameElements method that takes a GenIterable" in {
NonEmptySet(1, 2, 3, 4, 5).sameElements(Set(1, 2, 3, 4, 5)) shouldBe true
NonEmptySet(1, 2, 3, 4, 5).sameElements(Set(1, 2, 3, 4)) shouldBe false
NonEmptySet(1, 2, 3, 4, 5).sameElements(Set(1, 2, 3, 4, 5, 6)) shouldBe false
NonEmptySet(1, 2, 3, 4, 5).sameElements(Set(1, 2, 3, 4, 4)) shouldBe false
NonEmptySet(3).sameElements(Set(1, 2, 3, 4, 5)) shouldBe false
NonEmptySet(3).sameElements(Set(1)) shouldBe false
NonEmptySet(3).sameElements(Set(3)) shouldBe true
}
it should "have a sameElements method that takes an Every" in {
NonEmptySet(1, 2, 3, 4, 5).sameElements(Every(5, 1, 2, 3, 4)) shouldBe true
NonEmptySet(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4)) shouldBe false
NonEmptySet(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4, 5, 6)) shouldBe false
NonEmptySet(1, 2, 3, 4, 5).sameElements(Every(1, 2, 3, 4, 4)) shouldBe false
NonEmptySet(3).sameElements(Every(1, 2, 3, 4, 5)) shouldBe false
NonEmptySet(3).sameElements(Every(1)) shouldBe false
NonEmptySet(3).sameElements(Every(3)) shouldBe true
}
it should "have a sameElements method that takes a NonEmptySet" in {
NonEmptySet(1, 2, 3, 4, 5).sameElements(NonEmptySet(1, 2, 3, 4, 5)) shouldBe true
NonEmptySet(1, 2, 3, 4, 5).sameElements(NonEmptySet(1, 2, 3, 4)) shouldBe false
NonEmptySet(1, 2, 3, 4, 5).sameElements(NonEmptySet(1, 2, 3, 4, 5, 6)) shouldBe false
NonEmptySet(1, 2, 3, 4, 5).sameElements(NonEmptySet(1, 2, 3, 4, 4)) shouldBe false
NonEmptySet(3).sameElements(NonEmptySet(1, 2, 3, 4, 5)) shouldBe false
NonEmptySet(3).sameElements(NonEmptySet(1)) shouldBe false
NonEmptySet(3).sameElements(NonEmptySet(3)) shouldBe true
}
it should "have a scan method" in {
NonEmptySet(1).scan(0)(_ + _) shouldBe NonEmptySet(0, 1)
NonEmptySet(1, 2, 3).scan(0)(_ + _) shouldBe NonEmptySet(0, 2, 5, 6)
NonEmptySet(1, 2, 3).scan("z")(_.toString + _.toString) shouldBe NonEmptySet("z", "z2", "z23", "z231")
NonEmptySet(0).scan("z")(_.toString + _.toString) shouldBe NonEmptySet("z", "z0")
}
it should "have a scanLeft method" in {
NonEmptySet(1).scanLeft(0)(_ + _) shouldBe NonEmptySet(0, 1)
NonEmptySet(1, 2, 3).scanLeft(0)(_ + _) shouldBe NonEmptySet(0, 2, 5, 6)
NonEmptySet(1, 2, 3).scanLeft("z")(_ + _) shouldBe NonEmptySet("z", "z2", "z23", "z231")
NonEmptySet(0).scanLeft("z")(_ + _) shouldBe NonEmptySet("z", "z0")
}
it should "have a scanRight method" in {
NonEmptySet(1).scanRight(0)(_ + _) shouldBe NonEmptySet(1, 0)
NonEmptySet(1, 2, 3).scanRight(0)(_ + _) shouldBe NonEmptySet(6, 4, 1, 0)
NonEmptySet(1, 2, 3).scanRight("z")(_ + _) shouldBe NonEmptySet("231z", "31z", "1z", "z")
NonEmptySet(0).scanRight("z")(_ + _) shouldBe NonEmptySet("0z", "z")
}
// it should "have a seq method" is pending
it should "have a size method" in {
NonEmptySet(5).size shouldBe 1
NonEmptySet(1, 2, 3).size shouldBe 3
}
/*
it should not have a slice method
scala> Vector(3).slice(0, 0)
res83: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3, 4, 5).slice(2, 1)
res84: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have 2 sliding methods" in {
NonEmptySet(1).sliding(1).toSet shouldBe Set(NonEmptySet(1))
NonEmptySet(1).sliding(2).toSet shouldBe Set(NonEmptySet(1))
NonEmptySet(1, 2, 3).sliding(2).toSet shouldBe Set(NonEmptySet(2, 3), NonEmptySet(3, 1))
NonEmptySet(1, 2, 3).sliding(1).toSet shouldBe Set(NonEmptySet(2), NonEmptySet(3), NonEmptySet(1))
NonEmptySet(1, 2, 3).sliding(3).toSet shouldBe Set(NonEmptySet(2, 3, 1))
NonEmptySet(1, 2, 3, 4, 5).sliding(3).toSet shouldBe Set(NonEmptySet(5, 1, 2), NonEmptySet(1, 2, 3), NonEmptySet(2, 3, 4))
NonEmptySet(1, 2, 3, 4, 5).sliding(2).toSet shouldBe Set(NonEmptySet(5, 1), NonEmptySet(1, 2), NonEmptySet(2, 3), NonEmptySet(3, 4))
NonEmptySet(1, 2, 3, 4, 5).sliding(1).toSet shouldBe Set(NonEmptySet(1), NonEmptySet(2), NonEmptySet(3), NonEmptySet(4), NonEmptySet(5))
NonEmptySet(1, 2, 3, 4, 5).sliding(4).toSet shouldBe Set(NonEmptySet(5, 1, 2, 3), NonEmptySet(1, 2, 3, 4))
NonEmptySet(1, 2, 3, 4, 5).sliding(5).toSet shouldBe Set(NonEmptySet(1, 2, 3, 4, 5))
NonEmptySet(1).sliding(1, 1).toSet shouldBe Set(NonEmptySet(1))
NonEmptySet(1).sliding(1, 2).toSet shouldBe Set(NonEmptySet(1))
NonEmptySet(1, 2, 3).sliding(1, 1).toSet shouldBe Set(NonEmptySet(1), NonEmptySet(2), NonEmptySet(3))
NonEmptySet(1, 2, 3).sliding(2, 1).toSet shouldBe Set(NonEmptySet(2, 3), NonEmptySet(3, 1))
NonEmptySet(1, 2, 3).sliding(2, 2).toSet shouldBe Set(NonEmptySet(2, 3), NonEmptySet(1))
NonEmptySet(1, 2, 3).sliding(3, 2).toSet shouldBe Set(NonEmptySet(1, 2, 3))
NonEmptySet(1, 2, 3).sliding(3, 1).toSet shouldBe Set(NonEmptySet(1, 2, 3))
NonEmptySet(1, 2, 3, 4, 5).sliding(3, 1).toSet shouldBe Set(NonEmptySet(5, 1, 2), NonEmptySet(1, 2, 3), NonEmptySet(2, 3, 4))
NonEmptySet(1, 2, 3, 4, 5).sliding(2, 2).toSet shouldBe Set(NonEmptySet(5, 1), NonEmptySet(2, 3), NonEmptySet(4))
NonEmptySet(1, 2, 3, 4, 5).sliding(2, 3).toSet shouldBe Set(NonEmptySet(5, 1), NonEmptySet(3, 4))
NonEmptySet(1, 2, 3, 4, 5).sliding(2, 4).toSet shouldBe Set(NonEmptySet(5, 1), NonEmptySet(4))
NonEmptySet(1, 2, 3, 4, 5).sliding(3, 1).toSet shouldBe Set(NonEmptySet(5, 1, 2), NonEmptySet(1, 2, 3), NonEmptySet(2, 3, 4))
NonEmptySet(1, 2, 3, 4, 5).sliding(3, 2).toSet shouldBe Set(NonEmptySet(5, 1, 2), NonEmptySet(2, 3, 4))
NonEmptySet(1, 2, 3, 4, 5).sliding(3, 3).toSet shouldBe Set(NonEmptySet(5, 1, 2), NonEmptySet(3, 4))
NonEmptySet(1, 2, 3, 4, 5).sliding(3, 4).toSet shouldBe Set(NonEmptySet(5, 1, 2), NonEmptySet(4))
}
/*
it should not have a span method
scala> Vector(1, 2, 3, 4, 5).span(_ > 10)
res105: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
it should not have a splitAt method
scala> Vector(1, 2, 3, 4, 5).splitAt(0)
res106: (scala.collection.immutable.Vector[Int], scala.collection.immutable.Vector[Int]) = (Vector(),Vector(1, 2, 3, 4, 5))
*/
it should "have a stringPrefix method" in {
NonEmptySet(1).stringPrefix shouldBe "NonEmptySet"
NonEmptySet(1, 2, 3).stringPrefix shouldBe "NonEmptySet"
}
it should "have a sum method" in {
NonEmptySet(1).sum shouldBe 1
NonEmptySet(5).sum shouldBe 5
NonEmptySet(1, 2, 3).sum shouldBe 6
NonEmptySet(1, 2, 3, 4, 5).sum shouldBe 15
NonEmptySet(1.1, 2.2, 3.3).sum shouldBe 6.6
}
/*
it should not have a tail method
scala> Vector(1).tail
res7: scala.collection.immutable.Vector[Int] = Vector()
it should not have a tails method
scala> Vector(1).tails.toSet
res8: Set[scala.collection.immutable.Vector[Int]] = Set(Vector(1), Vector())
it should not have a take method
scala> Vector(1).take(0)
res10: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).take(0)
res11: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).take(-1)
res12: scala.collection.immutable.Vector[Int] = Vector()
it should not have a takeRight method
scala> Vector(1).takeRight(1)
res13: scala.collection.immutable.Vector[Int] = Vector(1)
scala> Vector(1).takeRight(0)
res14: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1, 2, 3).takeRight(0)
res15: scala.collection.immutable.Vector[Int] = Vector()
it should not have a takeWhile method
scala> Vector(1, 2, 3).takeWhile(_ > 10)
res17: scala.collection.immutable.Vector[Int] = Vector()
scala> Vector(1).takeWhile(_ > 10)
res18: scala.collection.immutable.Vector[Int] = Vector()
*/
it should "have a to method" in {
import org.scalactic.ColCompatHelper.Factory._
NonEmptySet(1).to(Set) shouldBe Set(1)
NonEmptySet(1, 2, 3).to(Set) shouldBe Set(1, 2, 3)
NonEmptySet(1, 2, 3).to(scala.collection.mutable.ListBuffer) shouldBe ListBuffer(2, 3, 1)
NonEmptySet(1, 2, 3).to(Vector) shouldBe Vector(2, 3, 1)
}
it should "have a toArray method" in {
NonEmptySet(1, 2, 3).toArray should === (Array(2, 3, 1))
NonEmptySet("a", "b").toArray should === (Array("b", "a"))
NonEmptySet(1).toArray should === (Array(1))
}
it should "have a toBuffer method" in {
NonEmptySet(1, 2, 3).toBuffer should === (Buffer(2, 3, 1))
NonEmptySet("a", "b").toBuffer should === (Buffer("b", "a"))
NonEmptySet(1).toBuffer should === (Buffer(1))
}
it should "have a toIndexedSeq method" in {
NonEmptySet(1, 2, 3).toIndexedSeq should === (IndexedSeq(2, 3, 1))
NonEmptySet("a", "b").toIndexedSeq should === (IndexedSeq("b", "a"))
NonEmptySet(1).toIndexedSeq should === (IndexedSeq(1))
}
it should "have a toIterable method" in {
NonEmptySet(1, 2, 3).toIterable should === (Set(2, 3, 1))
NonEmptySet("a", "b").toIterable should === (Set("b", "a"))
NonEmptySet(1).toIterable should === (Set(1))
}
it should "have a toIterator method" in {
NonEmptySet(1, 2, 3).toIterator.toSet should === (Iterator(2, 3, 1).toSet)
NonEmptySet("a", "b").toIterator.toSet should === (Iterator("b", "a").toSet)
NonEmptySet(1).toIterator.toSet should === (Iterator(1).toSet)
NonEmptySet(1, 2, 3).toIterator shouldBe an [Iterator[_]]
NonEmptySet("a", "b").toIterator shouldBe an [Iterator[_]]
NonEmptySet(1).toIterator shouldBe an [Iterator[_]]
}
it should "have a toList method" in {
NonEmptySet(1, 2, 3).toList should === (List(2, 3, 1))
NonEmptySet("a", "b").toList should === (List("b", "a"))
NonEmptySet(1).toList should === (List(1))
}
it should "have a toMap method" in {
NonEmptySet("1" -> 1, "2" -> 2, "3" -> 3).toMap should === (Map("1" -> 1, "2" -> 2, "3" -> 3))
NonEmptySet('A' -> "a", 'B' -> "b").toMap should === (Map('A' -> "a", 'B' -> "b"))
NonEmptySet("1" -> 1).toMap should === (Map("1" -> 1))
}
it should "have a toSeq method" in {
NonEmptySet(1, 2, 3).toSeq should === (Seq(2, 3, 1))
NonEmptySet("a", "b").toSeq should === (Seq("b", "a"))
NonEmptySet(1).toSeq should === (Seq(1))
}
it should "have a toSet method" in {
NonEmptySet(1, 2, 3).toSet should === (Set(2, 3, 1))
NonEmptySet("a", "b").toSet should === (Set("b", "a"))
NonEmptySet(1).toSet should === (Set(1))
}
it should "have a toStream method" in {
NonEmptySet(1, 2, 3).toStream should === (Stream(2, 3, 1))
NonEmptySet("a", "b").toStream should === (Stream("b", "a"))
NonEmptySet(1).toStream should === (Stream(1))
}
it should "have a toString method" in {
NonEmptySet(1, 2, 3).toString should === ("NonEmptySet(2, 3, 1)")
NonEmptySet(1, 2, 3).toString should === ("NonEmptySet(2, 3, 1)")
NonEmptySet(1).toString should === ("NonEmptySet(1)")
}
it should "have a toVector method" in {
NonEmptySet(1, 2, 3).toVector should === (Vector(2, 3, 1))
NonEmptySet("a", "b").toVector should === (Vector("b", "a"))
NonEmptySet(1).toVector should === (Vector(1))
}
it should "have a transpose method" in {
NonEmptySet(NonEmptySet(1, 2, 3), NonEmptySet(4, 5, 6), NonEmptySet(7, 8, 9)).transpose shouldBe NonEmptySet(NonEmptySet(1, 4, 7), NonEmptySet(2, 5, 8), NonEmptySet(3, 6, 9))
NonEmptySet(NonEmptySet(1, 2), NonEmptySet(3, 4), NonEmptySet(5, 6), NonEmptySet(7, 8)).transpose shouldBe NonEmptySet(NonEmptySet(1, 3, 5, 7), NonEmptySet(2, 4, 6, 8))
NonEmptySet(NonEmptySet(1, 2), NonEmptySet(3, 4), NonEmptySet(5, 6), NonEmptySet(7, 8)).transpose.transpose shouldBe NonEmptySet(NonEmptySet(1, 2), NonEmptySet(3, 4), NonEmptySet(5, 6), NonEmptySet(7, 8))
NonEmptySet(NonEmptySet(1, 2, 3), NonEmptySet(4, 5, 6), NonEmptySet(7, 8, 9)).transpose.transpose shouldBe NonEmptySet(NonEmptySet(1, 2, 3), NonEmptySet(4, 5, 6), NonEmptySet(7, 8, 9))
}
it should "have a union method that takes a GenSeq" in {
NonEmptySet(1) union Set(1) shouldBe NonEmptySet(1, 1)
NonEmptySet(1) union Set(1, 2) shouldBe NonEmptySet(1, 1, 2)
NonEmptySet(1, 2) union Set(1, 2) shouldBe NonEmptySet(1, 2, 1, 2)
NonEmptySet(1, 2) union Set(1) shouldBe NonEmptySet(1, 2, 1)
NonEmptySet(1, 2) union Set(3, 4, 5) shouldBe NonEmptySet(1, 2, 3, 4, 5)
NonEmptySet(1, 2, 3) union Set(3, 4, 5) shouldBe NonEmptySet(1, 2, 3, 3, 4, 5)
}
it should "have a union method that takes an Every" in {
NonEmptySet(1) union Every(1) shouldBe NonEmptySet(1, 1)
NonEmptySet(1) union Every(1, 2) shouldBe NonEmptySet(1, 1, 2)
NonEmptySet(1, 2) union Every(1, 2) shouldBe NonEmptySet(1, 2, 1, 2)
NonEmptySet(1, 2) union Every(1) shouldBe NonEmptySet(1, 2, 1)
NonEmptySet(1, 2) union Every(3, 4, 5) shouldBe NonEmptySet(1, 2, 3, 4, 5)
NonEmptySet(1, 2, 3) union Every(3, 4, 5) shouldBe NonEmptySet(1, 2, 3, 3, 4, 5)
}
it should "have a union method that takes a NonEmptySet" in {
NonEmptySet(1) union NonEmptySet(1) shouldBe NonEmptySet(1, 1)
NonEmptySet(1) union NonEmptySet(1, 2) shouldBe NonEmptySet(1, 1, 2)
NonEmptySet(1, 2) union NonEmptySet(1, 2) shouldBe NonEmptySet(1, 2, 1, 2)
NonEmptySet(1, 2) union NonEmptySet(1) shouldBe NonEmptySet(1, 2, 1)
NonEmptySet(1, 2) union NonEmptySet(3, 4, 5) shouldBe NonEmptySet(1, 2, 3, 4, 5)
NonEmptySet(1, 2, 3) union NonEmptySet(3, 4, 5) shouldBe NonEmptySet(1, 2, 3, 3, 4, 5)
}
it should "have an unzip method" in {
NonEmptySet((1, 2)).unzip shouldBe (NonEmptySet(1),NonEmptySet(2))
NonEmptySet((1, 2), (3, 4)).unzip shouldBe (NonEmptySet(1, 3), NonEmptySet(2, 4))
NonEmptySet((1, 2), (3, 4), (5, 6)).unzip shouldBe (NonEmptySet(1, 3, 5), NonEmptySet(2, 4, 6))
}
it should "have an unzip3 method" in {
NonEmptySet((1, 2, 3)).unzip3 shouldBe (NonEmptySet(1), NonEmptySet(2), NonEmptySet(3))
NonEmptySet((1, 2, 3), (4, 5, 6)).unzip3 shouldBe (NonEmptySet(1, 4), NonEmptySet(2, 5), NonEmptySet(3, 6))
NonEmptySet((1, 2, 3), (4, 5, 6), (7, 8, 9)).unzip3 shouldBe (NonEmptySet(1, 4, 7), NonEmptySet(2, 5, 8), NonEmptySet(3, 6, 9))
}
/*
it should not have 2 view methods, because I don't want to support views in Every
*/
/*
it should not have a zip method
scala> Set(1) zip Nil
res0: Set[(Int, Nothing)] = Set()
*/
it should "have a zipAll method that takes an Iterable" in {
// Empty on right
NonEmptySet(1).zipAll(Nil, -1, -2) shouldBe NonEmptySet((1, -2))
NonEmptySet(1, 2).zipAll(Nil, -1, -2) shouldBe NonEmptySet((1, -2), (2, -2))
// Same length
NonEmptySet(1).zipAll(Set(1), -1, -2) shouldBe NonEmptySet((1, 1))
NonEmptySet(1, 2).zipAll(Set(1, 2), -1, -2) shouldBe NonEmptySet((2, 1), (1, 2))
// Non-empty, longer on right
NonEmptySet(1).zipAll(Set(10, 20), -1, -2) shouldBe NonEmptySet((1,10), (-1,20))
NonEmptySet(1, 2).zipAll(Set(10, 20, 30), -1, -2) shouldBe NonEmptySet((2, 10), (1, 20), (-1, 30))
// Non-empty, shorter on right
NonEmptySet(1, 2, 3).zipAll(Set(10, 20), -1, -2) shouldBe NonEmptySet((2, 10), (3, 20), (1, -2))
NonEmptySet(1, 2, 3, 4).zipAll(Set(10, 20, 30), -1, -2) shouldBe NonEmptySet((2,10), (3,20), (4,30), (1,-2))
}
it should "have a zipAll method that takes an Every" in {
// Same length
NonEmptySet(1).zipAll(Every(1), -1, -2) shouldBe NonEmptySet((1, 1))
NonEmptySet(1, 2).zipAll(Every(1, 2), -1, -2) shouldBe NonEmptySet((2, 1), (1, 2))
// Non-empty, longer on right
NonEmptySet(1).zipAll(Every(10, 20), -1, -2) shouldBe NonEmptySet((1,10), (-1,20))
NonEmptySet(1, 2).zipAll(Every(10, 20, 30), -1, -2) shouldBe NonEmptySet((2,10), (1,20), (-1,30))
// Non-empty, shorter on right
NonEmptySet(1, 2, 3).zipAll(Every(10, 20), -1, -2) shouldBe NonEmptySet((2,10), (3,20), (1,-2))
NonEmptySet(1, 2, 3, 4).zipAll(Every(10, 20, 30), -1, -2) shouldBe NonEmptySet((2,10), (3,20), (4,30), (1,-2))
}
it should "have a zipAll method that takes a NonEmptySet" in {
// Same length
NonEmptySet(1).zipAll(NonEmptySet(1), -1, -2) shouldBe NonEmptySet((1, 1))
NonEmptySet(1, 2).zipAll(NonEmptySet(1, 2), -1, -2) shouldBe NonEmptySet((1, 1), (2, 2))
// Non-empty, longer on right
NonEmptySet(1).zipAll(NonEmptySet(10, 20), -1, -2) shouldBe NonEmptySet((1,20), (-1,10))
NonEmptySet(1, 2).zipAll(NonEmptySet(10, 20, 30), -1, -2) shouldBe NonEmptySet((2,20), (1,30), (-1,10))
// Non-empty, shorter on right
NonEmptySet(1, 2, 3).zipAll(NonEmptySet(10, 20), -1, -2) shouldBe NonEmptySet((2,20), (3,10), (1,-2))
NonEmptySet(1, 2, 3, 4).zipAll(NonEmptySet(10, 20, 30), -1, -2) shouldBe NonEmptySet((2,20), (3,30), (4,10), (1,-2))
}
it should "have a zipWithIndex method" in {
NonEmptySet(99).zipWithIndex shouldBe NonEmptySet((99,0))
NonEmptySet(1, 2, 3, 4, 5).zipWithIndex shouldBe NonEmptySet((5,0), (4,4), (1,1), (2,2), (3,3))
}
"End" should "have a pretty toString" in {
End.toString shouldBe "End"
}
}
| dotty-staging/scalatest | scalactic-test/src/test/scala/org/scalactic/anyvals/NonEmptySetSpec.scala | Scala | apache-2.0 | 38,593 |
package interretis.intro
import interretis.utils.SparkContextBuilder.buildContext
import org.apache.spark.rdd.RDD
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
class WordCount {
def wordCount(lines: RDD[String]): RDD[(String, Int)] = {
val words = lines flatMap (_ split " ")
val occurences = words map ((_, 1)) cache ()
val wordCounts = occurences reduceByKey (_ + _)
wordCounts
}
}
object WordCount {
def main(args: Array[String]): Unit = {
val (input, output) = processArguments(args)
val sc = buildContext(appName = "WordCount")
val lines = sc textFile input
val app = new WordCount
val counts = app wordCount lines
counts saveAsTextFile output
}
private def processArguments(args: Array[String]) = {
val expected = 2
val actual = args.length
if (actual != expected) {
sys error s"$expected arguments required and $actual given"
sys exit 1
}
val input = args(0)
val output = args(1)
(input, output)
}
}
| MarekDudek/spark-certification | src/main/scala/interretis/intro/WordCount.scala | Scala | mit | 1,022 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.python
import java.io._
import java.net._
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.JavaConverters._
import org.apache.arrow.vector.VectorSchemaRoot
import org.apache.arrow.vector.stream.{ArrowStreamReader, ArrowStreamWriter}
import org.apache.spark._
import org.apache.spark.api.python._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.arrow.{ArrowUtils, ArrowWriter}
import org.apache.spark.sql.execution.vectorized.{ArrowColumnVector, ColumnarBatch, ColumnVector}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* Similar to `PythonUDFRunner`, but exchange data with Python worker via Arrow stream.
*/
class ArrowPythonRunner(
funcs: Seq[ChainedPythonFunctions],
bufferSize: Int,
reuseWorker: Boolean,
evalType: Int,
argOffsets: Array[Array[Int]],
schema: StructType,
timeZoneId: String,
respectTimeZone: Boolean)
extends BasePythonRunner[Iterator[InternalRow], ColumnarBatch](
funcs, bufferSize, reuseWorker, evalType, argOffsets) {
protected override def newWriterThread(
env: SparkEnv,
worker: Socket,
inputIterator: Iterator[Iterator[InternalRow]],
partitionIndex: Int,
context: TaskContext): WriterThread = {
new WriterThread(env, worker, inputIterator, partitionIndex, context) {
protected override def writeCommand(dataOut: DataOutputStream): Unit = {
PythonUDFRunner.writeUDFs(dataOut, funcs, argOffsets)
if (respectTimeZone) {
PythonRDD.writeUTF(timeZoneId, dataOut)
} else {
dataOut.writeInt(SpecialLengths.NULL)
}
}
protected override def writeIteratorToStream(dataOut: DataOutputStream): Unit = {
val arrowSchema = ArrowUtils.toArrowSchema(schema, timeZoneId)
val allocator = ArrowUtils.rootAllocator.newChildAllocator(
s"stdout writer for $pythonExec", 0, Long.MaxValue)
val root = VectorSchemaRoot.create(arrowSchema, allocator)
val arrowWriter = ArrowWriter.create(root)
var closed = false
context.addTaskCompletionListener { _ =>
if (!closed) {
root.close()
allocator.close()
}
}
val writer = new ArrowStreamWriter(root, null, dataOut)
writer.start()
Utils.tryWithSafeFinally {
while (inputIterator.hasNext) {
val nextBatch = inputIterator.next()
while (nextBatch.hasNext) {
arrowWriter.write(nextBatch.next())
}
arrowWriter.finish()
writer.writeBatch()
arrowWriter.reset()
}
} {
writer.end()
root.close()
allocator.close()
closed = true
}
}
}
}
protected override def newReaderIterator(
stream: DataInputStream,
writerThread: WriterThread,
startTime: Long,
env: SparkEnv,
worker: Socket,
released: AtomicBoolean,
context: TaskContext): Iterator[ColumnarBatch] = {
new ReaderIterator(stream, writerThread, startTime, env, worker, released, context) {
private val allocator = ArrowUtils.rootAllocator.newChildAllocator(
s"stdin reader for $pythonExec", 0, Long.MaxValue)
private var reader: ArrowStreamReader = _
private var root: VectorSchemaRoot = _
private var schema: StructType = _
private var vectors: Array[ColumnVector] = _
private var closed = false
context.addTaskCompletionListener { _ =>
// todo: we need something like `reader.end()`, which release all the resources, but leave
// the input stream open. `reader.close()` will close the socket and we can't reuse worker.
// So here we simply not close the reader, which is problematic.
if (!closed) {
if (root != null) {
root.close()
}
allocator.close()
}
}
private var batchLoaded = true
protected override def read(): ColumnarBatch = {
if (writerThread.exception.isDefined) {
throw writerThread.exception.get
}
try {
if (reader != null && batchLoaded) {
batchLoaded = reader.loadNextBatch()
if (batchLoaded) {
val batch = new ColumnarBatch(schema, vectors, root.getRowCount)
batch.setNumRows(root.getRowCount)
batch
} else {
root.close()
allocator.close()
closed = true
// Reach end of stream. Call `read()` again to read control data.
read()
}
} else {
stream.readInt() match {
case SpecialLengths.START_ARROW_STREAM =>
reader = new ArrowStreamReader(stream, allocator)
root = reader.getVectorSchemaRoot()
schema = ArrowUtils.fromArrowSchema(root.getSchema())
vectors = root.getFieldVectors().asScala.map { vector =>
new ArrowColumnVector(vector)
}.toArray[ColumnVector]
read()
case SpecialLengths.TIMING_DATA =>
handleTimingData()
read()
case SpecialLengths.PYTHON_EXCEPTION_THROWN =>
throw handlePythonException()
case SpecialLengths.END_OF_DATA_SECTION =>
handleEndOfDataSection()
null
}
}
} catch handleException
}
}
}
}
| ron8hu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/python/ArrowPythonRunner.scala | Scala | apache-2.0 | 6,438 |
package unfiltered.filter
import unfiltered.response._
import unfiltered.Async
import javax.servlet.http.HttpServletResponse
trait AsyncBinding extends Async.Responder[HttpServletResponse] {
self: RequestBinding =>
private[filter] val con: org.eclipse.jetty.continuation.Continuation
private[filter] val filterChain: javax.servlet.FilterChain
def respond(rf: ResponseFunction[HttpServletResponse]) {
rf match {
case Pass =>
filterChain.doFilter(self.underlying, con.getServletResponse)
case rf =>
rf(new ResponseBinding(
con.getServletResponse.asInstanceOf[HttpServletResponse]
))
}
con.complete
}
}
| benhutchison/unfiltered | filter-async/src/main/scala/bindings.scala | Scala | mit | 673 |
package scavlink.task.schema
import scavlink.task._
import scala.collection.immutable.ListMap
/**
* Simplified definition for top-level request, response, and type reference schemas.
*/
case class RootSchema(title: String,
description: Option[String],
messages: List[Schema]) {
val schema = "http://json-schema.org/draft-04/schema#"
}
object RootSchema {
def requests(apis: Seq[TaskAPI]): RootSchema = {
val schemas = List(
Schema.singleProperty(propertyNameOf[StartTask], Schema.fromAPIs(apis)),
Schema.singleProperty(propertyNameOf[StopTask],
Schema(PropertyType.Object, ListMap("context" -> Right(Schema(PropertyType.Any))), List("context"))),
Schema.fromClass[StartTelemetry],
Schema.fromClass[StopTelemetry]
)
RootSchema("requests", None, schemas.map(addContext))
}
def responses(): RootSchema = {
val schemas = List(
Schema.fromClass[VehicleUp],
Schema.fromClass[VehicleDown],
Schema.fromClass[StatusMessage],
Schema.fromClass[Telemetry],
addContext(Schema.fromClass[TaskProgress]),
addContext(Schema.fromClass[TaskComplete]),
addContext(Schema.singleProperty("error", Schema(PropertyType.String)))
)
RootSchema("responses", None, schemas)
}
private def addContext(schema: Schema): Schema =
schema.copy(properties = ListMap("context" -> Right(Schema(PropertyType.Any))) ++ schema.properties)
} | nickolasrossi/scavlink | src/main/scala/scavlink/task/schema/RootSchema.scala | Scala | mit | 1,468 |
trait Foo {
type Repr[+O] <: Foo {
type Repr[+OO] = Foo.this.Repr[OO]
}
def foo[T](f: Repr[T]): f.Repr[T] = ???
} | lampepfl/dotty | tests/pos-deep-subtype/i9346.scala | Scala | apache-2.0 | 124 |
package io.buoyant.namerd.storage.etcd
import com.twitter.finagle.Dtab
import com.twitter.io.Buf
import com.twitter.util.Activity
import io.buoyant.etcd.{Etcd, EtcdFixture}
import io.buoyant.namer.RichActivity
import io.buoyant.namerd.DtabStore.{DtabNamespaceAlreadyExistsException, DtabNamespaceDoesNotExistException, DtabVersionMismatchException}
import io.buoyant.namerd.VersionedDtab
import io.buoyant.test.Exceptions
class EtcdDtabStoreIntegrationTest extends EtcdFixture with Exceptions {
def mkStore(etcd: Etcd) = {
val key = etcd.key("/dtabs")
new EtcdDtabStore(key)
}
def extractDtab(obs: Activity[Option[VersionedDtab]]): Dtab =
await(obs.values.map(_.get.get.dtab).toFuture)
test("observe follows changes") { etcd =>
val store = mkStore(etcd)
val obs = store.observe("hello")
assert(await(obs.toFuture).isEmpty)
await(store.create("hello", Dtab.read("/hello => /world")))
val dtab = await(obs.toFuture).get
assert(dtab.dtab == Dtab.read("/hello => /world"))
await(store.update("hello", Dtab.read("/goodbye => /world"), dtab.version))
val updatedDtab = await(obs.toFuture).get
assert(updatedDtab.dtab == Dtab.read("/goodbye => /world"))
assert(updatedDtab.version != dtab.version)
}
test("fail to create duplicate namespace") { etcd =>
val store = mkStore(etcd)
assert(
await(store.create("duplicate", Dtab.read("/hello => /world"))).equals(())
)
assertThrows[DtabNamespaceAlreadyExistsException] {
await(store.create("duplicate", Dtab.read("/hello => /world")))
}
}
test("fail to update with incorrect version") { etcd =>
val store = mkStore(etcd)
await(store.create("test", Dtab.read("/hello => /world")))
assertThrows[DtabVersionMismatchException] {
await(store.update("test", Dtab.read("/hello => /world"), Buf.Utf8("999")))
}
}
test("fail to update non-existent namespace") { etcd =>
val store = mkStore(etcd)
assertThrows[DtabNamespaceDoesNotExistException] {
await(store.update("nothing", Dtab.read("/hello => /world"), Buf.Utf8("1")))
}
}
test("put creates new namespace") { etcd =>
val store = mkStore(etcd)
await(store.put("hello", Dtab.read("/hello => /world")))
assert(
extractDtab(store.observe("hello")) == Dtab.read("/hello => /world")
)
}
test("put updates existing namespace") { etcd =>
val store = mkStore(etcd)
await(store.put("test", Dtab.read("/hello => /world")))
assert(
extractDtab(store.observe("test")) == Dtab.read("/hello => /world")
)
}
test("delete deletes dtab") { etcd =>
val store = mkStore(etcd)
val obs = store.observe("test")
assert(await(obs.toFuture).isDefined)
await(store.delete("test"))
assert(await(obs.toFuture).isEmpty)
assert(!await(store.list().toFuture).contains("test"))
}
}
| linkerd/linkerd | namerd/storage/etcd/src/integration/scala/io/buoyant/namerd/storage/etcd/EtcdDtabStoreIntegrationTest.scala | Scala | apache-2.0 | 2,870 |
/*
* IJ-Plugins
* Copyright (C) 2002-2021 Jarek Sacha
* Author's email: jpsacha at gmail dot com
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Latest release available at https://github.com/ij-plugins/ijp-toolkit/
*/
package ij_plugins.toolkit.ui.progress
/**
* Example of creating and listening to `ProgressReporter`
*/
object ProgressReporterExample extends App {
// Create counter
val counter = new CounterWithProgress4J('+')
// Add progress listener
counter.addProgressListener((e: ProgressEvent) => println(f"\nProgress notification: ${e.progressPercent}%3.0f%%"))
// Count
counter.count(100)
}
/**
* Example of using `ProgressReporter`
*/
class CounterWithProgress(marker: Char) extends ProgressReporter4J {
def count(max: Int): Unit = {
val progressIncrement = Math.max(max / 10, 1)
println("Counting " + max + " '" + marker + "'.")
for (i <- 1 to max) {
print(marker)
if (i % progressIncrement == 0) notifyProgressListeners(i, max)
}
println("\nCounting done.")
}
} | ij-plugins/ijp-toolkit | examples/scala/ij_plugins/toolkit/ui/progress/ProgressReporterExample.scala | Scala | lgpl-2.1 | 1,754 |
/**
* Copyright (c) 2013 Saddle Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.saddle.io
import java.io.{InputStreamReader, BufferedReader, RandomAccessFile}
import java.nio.channels.FileChannel
import it.unimi.dsi.io.ByteBufferInputStream
import org.saddle.UTF8
/**
* CsvFile provides an implementation of a [[org.saddle.io.CsvSource]] for
* parsing a CSV file.
*
* For example,
*
* {{{
* val fs = CsvFile("tmp.csv")
* val data = CsvParser.parse(CsvParser.parseInt)(fs)
* ...
* data.toFrame
* }}}
*
* @param path Path to file
* @param encoding Encoding of text file
*/
class CsvFile(path: String, encoding: String = UTF8) extends CsvSource {
private val file = new RandomAccessFile(path, "r")
private val chan = file.getChannel
private val stream = ByteBufferInputStream.map(chan, FileChannel.MapMode.READ_ONLY)
private val reader = new BufferedReader(new InputStreamReader(stream, encoding))
def readLine = {
val line = reader.readLine()
if (line == null) file.close()
line
}
override def toString = "CsvFile(%s, encoding: %s)".format(path, encoding)
}
object CsvFile {
def apply(path: String, encoding: String = UTF8) = new CsvFile(path, encoding)
} | saddle/saddle | saddle-core/src/main/scala/org/saddle/io/CsvFile.scala | Scala | apache-2.0 | 1,756 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.rest
import java.io.DataOutputStream
import java.net.{HttpURLConnection, URL}
import javax.servlet.http.HttpServletResponse
import scala.collection.mutable
import com.google.common.base.Charsets
import org.scalatest.BeforeAndAfterEach
import org.json4s.JsonAST._
import org.json4s.jackson.JsonMethods._
import org.apache.spark._
import org.apache.spark.rpc._
import org.apache.spark.util.Utils
import org.apache.spark.deploy.DeployMessages._
import org.apache.spark.deploy.{SparkSubmit, SparkSubmitArguments}
import org.apache.spark.deploy.master.DriverState._
/**
* Tests for the REST application submission protocol used in standalone cluster mode.
*/
class StandaloneRestSubmitSuite extends SparkFunSuite with BeforeAndAfterEach {
private var rpcEnv: Option[RpcEnv] = None
private var server: Option[RestSubmissionServer] = None
override def afterEach() {
rpcEnv.foreach(_.shutdown())
server.foreach(_.stop())
}
test("construct submit request") {
val appArgs = Array("one", "two", "three")
val sparkProperties = Map("spark.app.name" -> "pi")
val environmentVariables = Map("SPARK_ONE" -> "UN", "SPARK_TWO" -> "DEUX")
val request = new RestSubmissionClient("spark://host:port").constructSubmitRequest(
"my-app-resource", "my-main-class", appArgs, sparkProperties, environmentVariables)
assert(request.action === Utils.getFormattedClassName(request))
assert(request.clientSparkVersion === SPARK_VERSION)
assert(request.appResource === "my-app-resource")
assert(request.mainClass === "my-main-class")
assert(request.appArgs === appArgs)
assert(request.sparkProperties === sparkProperties)
assert(request.environmentVariables === environmentVariables)
}
test("create submission") {
val submittedDriverId = "my-driver-id"
val submitMessage = "your driver is submitted"
val masterUrl = startDummyServer(submitId = submittedDriverId, submitMessage = submitMessage)
val appArgs = Array("one", "two", "four")
val request = constructSubmitRequest(masterUrl, appArgs)
assert(request.appArgs === appArgs)
assert(request.sparkProperties("spark.master") === masterUrl)
val response = new RestSubmissionClient(masterUrl).createSubmission(request)
val submitResponse = getSubmitResponse(response)
assert(submitResponse.action === Utils.getFormattedClassName(submitResponse))
assert(submitResponse.serverSparkVersion === SPARK_VERSION)
assert(submitResponse.message === submitMessage)
assert(submitResponse.submissionId === submittedDriverId)
assert(submitResponse.success)
}
test("create submission from main method") {
val submittedDriverId = "your-driver-id"
val submitMessage = "my driver is submitted"
val masterUrl = startDummyServer(submitId = submittedDriverId, submitMessage = submitMessage)
val conf = new SparkConf(loadDefaults = false)
conf.set("spark.master", masterUrl)
conf.set("spark.app.name", "dreamer")
val appArgs = Array("one", "two", "six")
// main method calls this
val response = RestSubmissionClient.run("app-resource", "main-class", appArgs, conf)
val submitResponse = getSubmitResponse(response)
assert(submitResponse.action === Utils.getFormattedClassName(submitResponse))
assert(submitResponse.serverSparkVersion === SPARK_VERSION)
assert(submitResponse.message === submitMessage)
assert(submitResponse.submissionId === submittedDriverId)
assert(submitResponse.success)
}
test("kill submission") {
val submissionId = "my-lyft-driver"
val killMessage = "your driver is killed"
val masterUrl = startDummyServer(killMessage = killMessage)
val response = new RestSubmissionClient(masterUrl).killSubmission(submissionId)
val killResponse = getKillResponse(response)
assert(killResponse.action === Utils.getFormattedClassName(killResponse))
assert(killResponse.serverSparkVersion === SPARK_VERSION)
assert(killResponse.message === killMessage)
assert(killResponse.submissionId === submissionId)
assert(killResponse.success)
}
test("request submission status") {
val submissionId = "my-uber-driver"
val submissionState = KILLED
val submissionException = new Exception("there was an irresponsible mix of alcohol and cars")
val masterUrl = startDummyServer(state = submissionState, exception = Some(submissionException))
val response = new RestSubmissionClient(masterUrl).requestSubmissionStatus(submissionId)
val statusResponse = getStatusResponse(response)
assert(statusResponse.action === Utils.getFormattedClassName(statusResponse))
assert(statusResponse.serverSparkVersion === SPARK_VERSION)
assert(statusResponse.message.contains(submissionException.getMessage))
assert(statusResponse.submissionId === submissionId)
assert(statusResponse.driverState === submissionState.toString)
assert(statusResponse.success)
}
test("create then kill") {
val masterUrl = startSmartServer()
val request = constructSubmitRequest(masterUrl)
val client = new RestSubmissionClient(masterUrl)
val response1 = client.createSubmission(request)
val submitResponse = getSubmitResponse(response1)
assert(submitResponse.success)
assert(submitResponse.submissionId != null)
// kill submission that was just created
val submissionId = submitResponse.submissionId
val response2 = client.killSubmission(submissionId)
val killResponse = getKillResponse(response2)
assert(killResponse.success)
assert(killResponse.submissionId === submissionId)
}
test("create then request status") {
val masterUrl = startSmartServer()
val request = constructSubmitRequest(masterUrl)
val client = new RestSubmissionClient(masterUrl)
val response1 = client.createSubmission(request)
val submitResponse = getSubmitResponse(response1)
assert(submitResponse.success)
assert(submitResponse.submissionId != null)
// request status of submission that was just created
val submissionId = submitResponse.submissionId
val response2 = client.requestSubmissionStatus(submissionId)
val statusResponse = getStatusResponse(response2)
assert(statusResponse.success)
assert(statusResponse.submissionId === submissionId)
assert(statusResponse.driverState === RUNNING.toString)
}
test("create then kill then request status") {
val masterUrl = startSmartServer()
val request = constructSubmitRequest(masterUrl)
val client = new RestSubmissionClient(masterUrl)
val response1 = client.createSubmission(request)
val response2 = client.createSubmission(request)
val submitResponse1 = getSubmitResponse(response1)
val submitResponse2 = getSubmitResponse(response2)
assert(submitResponse1.success)
assert(submitResponse2.success)
assert(submitResponse1.submissionId != null)
assert(submitResponse2.submissionId != null)
val submissionId1 = submitResponse1.submissionId
val submissionId2 = submitResponse2.submissionId
// kill only submission 1, but not submission 2
val response3 = client.killSubmission(submissionId1)
val killResponse = getKillResponse(response3)
assert(killResponse.success)
assert(killResponse.submissionId === submissionId1)
// request status for both submissions: 1 should be KILLED but 2 should be RUNNING still
val response4 = client.requestSubmissionStatus(submissionId1)
val response5 = client.requestSubmissionStatus(submissionId2)
val statusResponse1 = getStatusResponse(response4)
val statusResponse2 = getStatusResponse(response5)
assert(statusResponse1.submissionId === submissionId1)
assert(statusResponse2.submissionId === submissionId2)
assert(statusResponse1.driverState === KILLED.toString)
assert(statusResponse2.driverState === RUNNING.toString)
}
test("kill or request status before create") {
val masterUrl = startSmartServer()
val doesNotExist = "does-not-exist"
val client = new RestSubmissionClient(masterUrl)
// kill a non-existent submission
val response1 = client.killSubmission(doesNotExist)
val killResponse = getKillResponse(response1)
assert(!killResponse.success)
assert(killResponse.submissionId === doesNotExist)
// request status for a non-existent submission
val response2 = client.requestSubmissionStatus(doesNotExist)
val statusResponse = getStatusResponse(response2)
assert(!statusResponse.success)
assert(statusResponse.submissionId === doesNotExist)
}
/* ---------------------------------------- *
| Aberrant client / server behavior |
* ---------------------------------------- */
test("good request paths") {
val masterUrl = startSmartServer()
val httpUrl = masterUrl.replace("spark://", "http://")
val v = RestSubmissionServer.PROTOCOL_VERSION
val json = constructSubmitRequest(masterUrl).toJson
val submitRequestPath = s"$httpUrl/$v/submissions/create"
val killRequestPath = s"$httpUrl/$v/submissions/kill"
val statusRequestPath = s"$httpUrl/$v/submissions/status"
val (response1, code1) = sendHttpRequestWithResponse(submitRequestPath, "POST", json)
val (response2, code2) = sendHttpRequestWithResponse(s"$killRequestPath/anything", "POST")
val (response3, code3) = sendHttpRequestWithResponse(s"$killRequestPath/any/thing", "POST")
val (response4, code4) = sendHttpRequestWithResponse(s"$statusRequestPath/anything", "GET")
val (response5, code5) = sendHttpRequestWithResponse(s"$statusRequestPath/any/thing", "GET")
// these should all succeed and the responses should be of the correct types
getSubmitResponse(response1)
val killResponse1 = getKillResponse(response2)
val killResponse2 = getKillResponse(response3)
val statusResponse1 = getStatusResponse(response4)
val statusResponse2 = getStatusResponse(response5)
assert(killResponse1.submissionId === "anything")
assert(killResponse2.submissionId === "any")
assert(statusResponse1.submissionId === "anything")
assert(statusResponse2.submissionId === "any")
assert(code1 === HttpServletResponse.SC_OK)
assert(code2 === HttpServletResponse.SC_OK)
assert(code3 === HttpServletResponse.SC_OK)
assert(code4 === HttpServletResponse.SC_OK)
assert(code5 === HttpServletResponse.SC_OK)
}
test("good request paths, bad requests") {
val masterUrl = startSmartServer()
val httpUrl = masterUrl.replace("spark://", "http://")
val v = RestSubmissionServer.PROTOCOL_VERSION
val submitRequestPath = s"$httpUrl/$v/submissions/create"
val killRequestPath = s"$httpUrl/$v/submissions/kill"
val statusRequestPath = s"$httpUrl/$v/submissions/status"
val goodJson = constructSubmitRequest(masterUrl).toJson
val badJson1 = goodJson.replaceAll("action", "fraction") // invalid JSON
val badJson2 = goodJson.substring(goodJson.size / 2) // malformed JSON
val notJson = "\\"hello, world\\""
val (response1, code1) = sendHttpRequestWithResponse(submitRequestPath, "POST") // missing JSON
val (response2, code2) = sendHttpRequestWithResponse(submitRequestPath, "POST", badJson1)
val (response3, code3) = sendHttpRequestWithResponse(submitRequestPath, "POST", badJson2)
val (response4, code4) = sendHttpRequestWithResponse(killRequestPath, "POST") // missing ID
val (response5, code5) = sendHttpRequestWithResponse(s"$killRequestPath/", "POST")
val (response6, code6) = sendHttpRequestWithResponse(statusRequestPath, "GET") // missing ID
val (response7, code7) = sendHttpRequestWithResponse(s"$statusRequestPath/", "GET")
val (response8, code8) = sendHttpRequestWithResponse(submitRequestPath, "POST", notJson)
// these should all fail as error responses
getErrorResponse(response1)
getErrorResponse(response2)
getErrorResponse(response3)
getErrorResponse(response4)
getErrorResponse(response5)
getErrorResponse(response6)
getErrorResponse(response7)
getErrorResponse(response8)
assert(code1 === HttpServletResponse.SC_BAD_REQUEST)
assert(code2 === HttpServletResponse.SC_BAD_REQUEST)
assert(code3 === HttpServletResponse.SC_BAD_REQUEST)
assert(code4 === HttpServletResponse.SC_BAD_REQUEST)
assert(code5 === HttpServletResponse.SC_BAD_REQUEST)
assert(code6 === HttpServletResponse.SC_BAD_REQUEST)
assert(code7 === HttpServletResponse.SC_BAD_REQUEST)
assert(code8 === HttpServletResponse.SC_BAD_REQUEST)
}
test("bad request paths") {
val masterUrl = startSmartServer()
val httpUrl = masterUrl.replace("spark://", "http://")
val v = RestSubmissionServer.PROTOCOL_VERSION
val (response1, code1) = sendHttpRequestWithResponse(httpUrl, "GET")
val (response2, code2) = sendHttpRequestWithResponse(s"$httpUrl/", "GET")
val (response3, code3) = sendHttpRequestWithResponse(s"$httpUrl/$v", "GET")
val (response4, code4) = sendHttpRequestWithResponse(s"$httpUrl/$v/", "GET")
val (response5, code5) = sendHttpRequestWithResponse(s"$httpUrl/$v/submissions", "GET")
val (response6, code6) = sendHttpRequestWithResponse(s"$httpUrl/$v/submissions/", "GET")
val (response7, code7) = sendHttpRequestWithResponse(s"$httpUrl/$v/submissions/bad", "GET")
val (response8, code8) = sendHttpRequestWithResponse(s"$httpUrl/bad-version", "GET")
assert(code1 === HttpServletResponse.SC_BAD_REQUEST)
assert(code2 === HttpServletResponse.SC_BAD_REQUEST)
assert(code3 === HttpServletResponse.SC_BAD_REQUEST)
assert(code4 === HttpServletResponse.SC_BAD_REQUEST)
assert(code5 === HttpServletResponse.SC_BAD_REQUEST)
assert(code6 === HttpServletResponse.SC_BAD_REQUEST)
assert(code7 === HttpServletResponse.SC_BAD_REQUEST)
assert(code8 === RestSubmissionServer.SC_UNKNOWN_PROTOCOL_VERSION)
// all responses should be error responses
val errorResponse1 = getErrorResponse(response1)
val errorResponse2 = getErrorResponse(response2)
val errorResponse3 = getErrorResponse(response3)
val errorResponse4 = getErrorResponse(response4)
val errorResponse5 = getErrorResponse(response5)
val errorResponse6 = getErrorResponse(response6)
val errorResponse7 = getErrorResponse(response7)
val errorResponse8 = getErrorResponse(response8)
// only the incompatible version response should have server protocol version set
assert(errorResponse1.highestProtocolVersion === null)
assert(errorResponse2.highestProtocolVersion === null)
assert(errorResponse3.highestProtocolVersion === null)
assert(errorResponse4.highestProtocolVersion === null)
assert(errorResponse5.highestProtocolVersion === null)
assert(errorResponse6.highestProtocolVersion === null)
assert(errorResponse7.highestProtocolVersion === null)
assert(errorResponse8.highestProtocolVersion === RestSubmissionServer.PROTOCOL_VERSION)
}
test("server returns unknown fields") {
val masterUrl = startSmartServer()
val httpUrl = masterUrl.replace("spark://", "http://")
val v = RestSubmissionServer.PROTOCOL_VERSION
val submitRequestPath = s"$httpUrl/$v/submissions/create"
val oldJson = constructSubmitRequest(masterUrl).toJson
val oldFields = parse(oldJson).asInstanceOf[JObject].obj
val newFields = oldFields ++ Seq(
JField("tomato", JString("not-a-fruit")),
JField("potato", JString("not-po-tah-to"))
)
val newJson = pretty(render(JObject(newFields)))
// send two requests, one with the unknown fields and the other without
val (response1, code1) = sendHttpRequestWithResponse(submitRequestPath, "POST", oldJson)
val (response2, code2) = sendHttpRequestWithResponse(submitRequestPath, "POST", newJson)
val submitResponse1 = getSubmitResponse(response1)
val submitResponse2 = getSubmitResponse(response2)
assert(code1 === HttpServletResponse.SC_OK)
assert(code2 === HttpServletResponse.SC_OK)
// only the response to the modified request should have unknown fields set
assert(submitResponse1.unknownFields === null)
assert(submitResponse2.unknownFields === Array("tomato", "potato"))
}
test("client handles faulty server") {
val masterUrl = startFaultyServer()
val client = new RestSubmissionClient(masterUrl)
val httpUrl = masterUrl.replace("spark://", "http://")
val v = RestSubmissionServer.PROTOCOL_VERSION
val submitRequestPath = s"$httpUrl/$v/submissions/create"
val killRequestPath = s"$httpUrl/$v/submissions/kill/anything"
val statusRequestPath = s"$httpUrl/$v/submissions/status/anything"
val json = constructSubmitRequest(masterUrl).toJson
// server returns malformed response unwittingly
// client should throw an appropriate exception to indicate server failure
val conn1 = sendHttpRequest(submitRequestPath, "POST", json)
intercept[SubmitRestProtocolException] { client.readResponse(conn1) }
// server attempts to send invalid response, but fails internally on validation
// client should receive an error response as server is able to recover
val conn2 = sendHttpRequest(killRequestPath, "POST")
val response2 = client.readResponse(conn2)
getErrorResponse(response2)
assert(conn2.getResponseCode === HttpServletResponse.SC_INTERNAL_SERVER_ERROR)
// server explodes internally beyond recovery
// client should throw an appropriate exception to indicate server failure
val conn3 = sendHttpRequest(statusRequestPath, "GET")
intercept[SubmitRestProtocolException] { client.readResponse(conn3) } // empty response
assert(conn3.getResponseCode === HttpServletResponse.SC_INTERNAL_SERVER_ERROR)
}
/* --------------------- *
| Helper methods |
* --------------------- */
/** Start a dummy server that responds to requests using the specified parameters. */
private def startDummyServer(
submitId: String = "fake-driver-id",
submitMessage: String = "driver is submitted",
killMessage: String = "driver is killed",
state: DriverState = FINISHED,
exception: Option[Exception] = None): String = {
startServer(new DummyMaster(_, submitId, submitMessage, killMessage, state, exception))
}
/** Start a smarter dummy server that keeps track of submitted driver states. */
private def startSmartServer(): String = {
startServer(new SmarterMaster(_))
}
/** Start a dummy server that is faulty in many ways... */
private def startFaultyServer(): String = {
startServer(new DummyMaster(_), faulty = true)
}
/**
* Start a [[StandaloneRestServer]] that communicates with the given endpoint.
* If `faulty` is true, start an [[FaultyStandaloneRestServer]] instead.
* Return the master URL that corresponds to the address of this server.
*/
private def startServer(
makeFakeMaster: RpcEnv => RpcEndpoint, faulty: Boolean = false): String = {
val name = "test-standalone-rest-protocol"
val conf = new SparkConf
val localhost = Utils.localHostName()
val securityManager = new SecurityManager(conf)
val _rpcEnv = RpcEnv.create(name, localhost, 0, conf, securityManager)
val fakeMasterRef = _rpcEnv.setupEndpoint("fake-master", makeFakeMaster(_rpcEnv))
val _server =
if (faulty) {
new FaultyStandaloneRestServer(localhost, 0, conf, fakeMasterRef, "spark://fake:7077")
} else {
new StandaloneRestServer(localhost, 0, conf, fakeMasterRef, "spark://fake:7077")
}
val port = _server.start()
// set these to clean them up after every test
rpcEnv = Some(_rpcEnv)
server = Some(_server)
s"spark://$localhost:$port"
}
/** Create a submit request with real parameters using Spark submit. */
private def constructSubmitRequest(
masterUrl: String,
appArgs: Array[String] = Array.empty): CreateSubmissionRequest = {
val mainClass = "main-class-not-used"
val mainJar = "dummy-jar-not-used.jar"
val commandLineArgs = Array(
"--deploy-mode", "cluster",
"--master", masterUrl,
"--name", mainClass,
"--class", mainClass,
mainJar) ++ appArgs
val args = new SparkSubmitArguments(commandLineArgs)
val (_, _, sparkProperties, _) = SparkSubmit.prepareSubmitEnvironment(args)
new RestSubmissionClient("spark://host:port").constructSubmitRequest(
mainJar, mainClass, appArgs, sparkProperties.toMap, Map.empty)
}
/** Return the response as a submit response, or fail with error otherwise. */
private def getSubmitResponse(response: SubmitRestProtocolResponse): CreateSubmissionResponse = {
response match {
case s: CreateSubmissionResponse => s
case e: ErrorResponse => fail(s"Server returned error: ${e.message}")
case r => fail(s"Expected submit response. Actual: ${r.toJson}")
}
}
/** Return the response as a kill response, or fail with error otherwise. */
private def getKillResponse(response: SubmitRestProtocolResponse): KillSubmissionResponse = {
response match {
case k: KillSubmissionResponse => k
case e: ErrorResponse => fail(s"Server returned error: ${e.message}")
case r => fail(s"Expected kill response. Actual: ${r.toJson}")
}
}
/** Return the response as a status response, or fail with error otherwise. */
private def getStatusResponse(response: SubmitRestProtocolResponse): SubmissionStatusResponse = {
response match {
case s: SubmissionStatusResponse => s
case e: ErrorResponse => fail(s"Server returned error: ${e.message}")
case r => fail(s"Expected status response. Actual: ${r.toJson}")
}
}
/** Return the response as an error response, or fail if the response was not an error. */
private def getErrorResponse(response: SubmitRestProtocolResponse): ErrorResponse = {
response match {
case e: ErrorResponse => e
case r => fail(s"Expected error response. Actual: ${r.toJson}")
}
}
/**
* Send an HTTP request to the given URL using the method and the body specified.
* Return the connection object.
*/
private def sendHttpRequest(
url: String,
method: String,
body: String = ""): HttpURLConnection = {
val conn = new URL(url).openConnection().asInstanceOf[HttpURLConnection]
conn.setRequestMethod(method)
if (body.nonEmpty) {
conn.setDoOutput(true)
val out = new DataOutputStream(conn.getOutputStream)
out.write(body.getBytes(Charsets.UTF_8))
out.close()
}
conn
}
/**
* Send an HTTP request to the given URL using the method and the body specified.
* Return a 2-tuple of the response message from the server and the response code.
*/
private def sendHttpRequestWithResponse(
url: String,
method: String,
body: String = ""): (SubmitRestProtocolResponse, Int) = {
val conn = sendHttpRequest(url, method, body)
(new RestSubmissionClient("spark://host:port").readResponse(conn), conn.getResponseCode)
}
}
/**
* A mock standalone Master that responds with dummy messages.
* In all responses, the success parameter is always true.
*/
private class DummyMaster(
override val rpcEnv: RpcEnv,
submitId: String = "fake-driver-id",
submitMessage: String = "submitted",
killMessage: String = "killed",
state: DriverState = FINISHED,
exception: Option[Exception] = None)
extends RpcEndpoint {
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RequestSubmitDriver(driverDesc) =>
context.reply(SubmitDriverResponse(self, success = true, Some(submitId), submitMessage))
case RequestKillDriver(driverId) =>
context.reply(KillDriverResponse(self, driverId, success = true, killMessage))
case RequestDriverStatus(driverId) =>
context.reply(DriverStatusResponse(found = true, Some(state), None, None, exception))
}
}
/**
* A mock standalone Master that keeps track of drivers that have been submitted.
*
* If a driver is submitted, its state is immediately set to RUNNING.
* If an existing driver is killed, its state is immediately set to KILLED.
* If an existing driver's status is requested, its state is returned in the response.
* Submits are always successful while kills and status requests are successful only
* if the driver was submitted in the past.
*/
private class SmarterMaster(override val rpcEnv: RpcEnv) extends ThreadSafeRpcEndpoint {
private var counter: Int = 0
private val submittedDrivers = new mutable.HashMap[String, DriverState]
override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
case RequestSubmitDriver(driverDesc) =>
val driverId = s"driver-$counter"
submittedDrivers(driverId) = RUNNING
counter += 1
context.reply(SubmitDriverResponse(self, success = true, Some(driverId), "submitted"))
case RequestKillDriver(driverId) =>
val success = submittedDrivers.contains(driverId)
if (success) {
submittedDrivers(driverId) = KILLED
}
context.reply(KillDriverResponse(self, driverId, success, "killed"))
case RequestDriverStatus(driverId) =>
val found = submittedDrivers.contains(driverId)
val state = submittedDrivers.get(driverId)
context.reply(DriverStatusResponse(found, state, None, None, None))
}
}
/**
* A [[StandaloneRestServer]] that is faulty in many ways.
*
* When handling a submit request, the server returns a malformed JSON.
* When handling a kill request, the server returns an invalid JSON.
* When handling a status request, the server throws an internal exception.
* The purpose of this class is to test that client handles these cases gracefully.
*/
private class FaultyStandaloneRestServer(
host: String,
requestedPort: Int,
masterConf: SparkConf,
masterEndpoint: RpcEndpointRef,
masterUrl: String)
extends RestSubmissionServer(host, requestedPort, masterConf) {
protected override val submitRequestServlet = new MalformedSubmitServlet
protected override val killRequestServlet = new InvalidKillServlet
protected override val statusRequestServlet = new ExplodingStatusServlet
/** A faulty servlet that produces malformed responses. */
class MalformedSubmitServlet
extends StandaloneSubmitRequestServlet(masterEndpoint, masterUrl, masterConf) {
protected override def sendResponse(
responseMessage: SubmitRestProtocolResponse,
responseServlet: HttpServletResponse): Unit = {
val badJson = responseMessage.toJson.drop(10).dropRight(20)
responseServlet.getWriter.write(badJson)
}
}
/** A faulty servlet that produces invalid responses. */
class InvalidKillServlet extends StandaloneKillRequestServlet(masterEndpoint, masterConf) {
protected override def handleKill(submissionId: String): KillSubmissionResponse = {
val k = super.handleKill(submissionId)
k.submissionId = null
k
}
}
/** A faulty status servlet that explodes. */
class ExplodingStatusServlet extends StandaloneStatusRequestServlet(masterEndpoint, masterConf) {
private def explode: Int = 1 / 0
protected override def handleStatus(submissionId: String): SubmissionStatusResponse = {
val s = super.handleStatus(submissionId)
s.workerId = explode.toString
s
}
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/test/scala/org/apache/spark/deploy/rest/StandaloneRestSubmitSuite.scala | Scala | apache-2.0 | 28,117 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.env
object DummyProdEnvironmentResolver extends EnvironmentResolver {
override def resolve: Environment = PROD
override def name: String = "DummyProdEnvironmentResolver"
/**
* Java API for obtaining the instance.
* @return This instance
*/
def get: EnvironmentResolver = this
}
object DummyQAEnvironmentResolver extends EnvironmentResolver {
override def resolve: Environment = QA
override def name: String = "DummyQAEnvironmentResolver"
/**
* Java API for obtaining the instance.
* @return This instance
*/
def get: EnvironmentResolver = this
}
object DummyNotResolveEnvironmentResolver extends EnvironmentResolver {
override def resolve: Environment = Default
override def name: String = "DummyNotResolveEnvironmentResolver"
/**
* Java API for obtaining the instance.
* @return This instance
*/
def get: EnvironmentResolver = this
}
| akara/squbs | squbs-ext/src/test/scala/org/squbs/env/DummyEnvironmentResolver.scala | Scala | apache-2.0 | 1,516 |
package lila.tournament
import scala.concurrent.duration.FiniteDuration
import org.joda.time.DateTime
import lila.db.BSON._
import lila.user.{ User, UserRepo }
final class Winners(
mongoCache: lila.memo.MongoCache.Builder,
ttl: FiniteDuration) {
private implicit val WinnerBSONHandler =
reactivemongo.bson.Macros.handler[Winner]
private val scheduledCache = mongoCache[Int, List[Winner]](
prefix = "tournament:winner",
f = fetchScheduled,
timeToLive = ttl)
import Schedule.Freq
private def fetchScheduled(nb: Int): Fu[List[Winner]] = {
val since = DateTime.now minusMonths 1
List(Freq.Marathon, Freq.Monthly, Freq.Weekly, Freq.Daily).map { freq =>
TournamentRepo.lastFinishedScheduledByFreq(freq, since, 4) flatMap toursToWinners
}.sequenceFu map (_.flatten) flatMap { winners =>
TournamentRepo.lastFinishedScheduledByFreq(
Freq.Hourly, since, math.max(0, nb - winners.size)
) flatMap toursToWinners map (winners ::: _)
}
}
private def toursToWinners(tours: List[Tournament]): Fu[List[Winner]] =
tours.map { tour =>
PlayerRepo winner tour.id flatMap {
case Some(player) => UserRepo isEngine player.userId map { engine =>
!engine option Winner(tour.id, tour.name, player.userId)
}
case _ => fuccess(none)
}
}.sequenceFu map (_.flatten)
def scheduled(nb: Int): Fu[List[Winner]] = scheduledCache apply nb
}
| r0k3/lila | modules/tournament/src/main/Winners.scala | Scala | mit | 1,446 |
package skinny.mailer
import org.scalatest._
class BodyTypeSpec extends FlatSpec with Matchers {
it should "have Text type" in {
Text.extension should equal("text")
}
it should "have Html type" in {
Html.extension should equal("html")
}
}
| holycattle/skinny-framework | mailer/src/test/scala/skinny/mailer/BodyTypeSpec.scala | Scala | mit | 260 |
package io.udash.testing
import io.udash._
import scala.collection.mutable
class TestViewFactoryRegistry(vp: Map[TestState, () => ViewFactory[_ <: TestState]],
default: () => ViewFactory[_ <: TestState]) extends ViewFactoryRegistry[TestState] {
var statesHistory: mutable.ArrayBuffer[TestState] = mutable.ArrayBuffer.empty
override def matchStateToResolver(state: TestState): ViewFactory[_ <: TestState] = {
if (state == ThrowExceptionState) throw new RuntimeException("ThrowExceptionState")
statesHistory.append(state)
vp.get(state).map(_.apply()).getOrElse(default())
}
}
| UdashFramework/udash-core | core/.js/src/test/scala/io/udash/testing/TestViewFactoryRegistry.scala | Scala | apache-2.0 | 598 |
package com.omega.config
import org.springframework.context.annotation.Bean
import org.springframework.context.annotation.ComponentScan
import org.springframework.context.annotation.Configuration
import org.springframework.web.servlet.config.annotation.DefaultServletHandlerConfigurer
import org.springframework.web.servlet.config.annotation.EnableWebMvc
import org.springframework.web.servlet.config.annotation.ResourceHandlerRegistry
import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter
import org.springframework.web.servlet.view.InternalResourceViewResolver
import org.springframework.web.servlet.view.JstlView
@Configuration("OmegaWebApplicationConfig")
@EnableWebMvc
@ComponentScan(basePackages = Array("com.omega.controllers"))
class OmegaWebApplicationConfig extends WebMvcConfigurerAdapter {
override def addResourceHandlers(registry: ResourceHandlerRegistry): Unit = {
// registry.addResourceHandler("/assets/**").addResourceLocations("classpath:/META-INF/resources/webjars/").setCachePeriod(31556926)
registry.addResourceHandler("/resources/**").addResourceLocations("/resources/").setCachePeriod(31556926)
}
override def configureDefaultServletHandling(configurer: DefaultServletHandlerConfigurer) {
configurer.enable()
}
@Bean
def theJspViewResolver(): InternalResourceViewResolver = {
val viewResolver = new InternalResourceViewResolver
viewResolver.setViewClass(classOf[JstlView])
viewResolver.setPrefix("/WEB-INF/views/jsp/")
viewResolver.setSuffix(".jsp")
viewResolver.setOrder(1)
viewResolver
}
} | milind-chawla/Omega | src/main/scala/com/omega/config/OmegaWebApplicationConfig.scala | Scala | mit | 1,655 |
/** soar
*
* Copyright (c) 2017 Hugo Firth
* Email: <[email protected]/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.ac.ncl.la.soar
import cats._
import cats.implicits._
import simulacrum.typeclass
import uk.ac.ncl.la.soar.data.{ModuleRecords, StudentRecords}
import scala.collection.immutable.SortedMap
/** Record typeclass. Types which form a Record include [[StudentRecords]] (which is a record of a student's performance
* across a collection of modules) and [[ModuleRecords]] (which is a record of the performance of a collection of
* students on a single module).
*
* Records require one primary feature: a collection of entries (Which are Tuple2s / Map Entries).
*
* As such the typeclass has a ternary type constructor: the types of an entry and the type of a collection
*
* TODO: Look into Fix and CoFree datatypes
* TODO: Look into which typeclasses this TC should extend, if any? Foldable if Traverse assumes Functor? There are
* useful Records which are not valid Functors
*
* @author hugofirth
*/
trait Record[F[_, _]] extends Any with Serializable { self =>
/** Import companion object */
import Record._
/** Import filter syntax */
import Filter._
//TODO: Look to define typeclass in terms of foldable if possible. Currently defined in terms of Iterable
/** Get a specific element of a record. Return none if that element does not exist
*
* Decided against an `iterator(r).find(_ == k)` based implementation as complexity is still O(N) and most records will be
* map types, in which case `toMap(r).get` is amortized O(1)
*/
def get[A, B](r: F[A, B], k: A): Option[B] = self.toMap(r).get(k)
/** Return iterator of tuples for record entries. Cannot define in terms of Traverse instance as that fixes on `A` */
def iterator[A, B](r: F[A, B]): Iterator[(A, B)]
/** Produce a List of tuples from a record instance */
def toList[A, B](r: F[A, B]): List[(A, B)] = self.iterator(r).toList
/** Produce a Map from a record instance - should be overriden by types whose internal datastructure *is* a Map */
def toMap[A, B](r: F[A, B]): Map[A, B] = self.iterator(r).toMap
/** Truncate records by key upto given key inclusive. Note that the key need not explicitly exist in the record */
//TODO: Look into partial unification to work out why inference is falling over for *Key methods
def toKey[A: Order, B](r: F[A, B], lim: A)(implicit ev: Filter[F[?, B]]): F[A, B] = ev.filter(r)(_ <= lim)
/** Truncate records by key from given key inclusive. Note that the key need not explicitly exist in the record */
def fromKey[A: Order, B](r: F[A, B], lim: A)(implicit ev: Filter[F[?, B]]): F[A, B] = ev.filter(r)(_ >= lim)
/** Truncate records by value upt given value inclusive. */
def to[A, B: Order](r: F[A, B], lim: B)(implicit ev: Filter[F[A, ?]]): F[A, B] = r.filter(_ <= lim)
/** Truncate records by value from given value inclusive. */
def from[A, B: Order](r: F[A, B], lim: B)(implicit ev: Filter[F[A, ?]]): F[A, B] = r.filter(_ >= lim)
}
/** Record */
object Record extends RecordInstances {
/** Access an implicit `Record`. */
@inline final def apply[F[_, _]](implicit ev: Record[F]): Record[F] = ev
/** Implicit syntax enrichment */
final implicit class RecordOps[F[_,_], A, B](val r: F[A, B]) extends AnyVal {
def get(k: A)(implicit ev: Record[F]) = ev.get(r, k)
def iterator(implicit ev: Record[F]) = ev.iterator(r)
def toList(implicit ev: Record[F]) = ev.toList(r)
def toMap(implicit ev: Record[F]) = ev.toMap(r)
def toKey(lim: A)(implicit ev: Record[F], ev2: Filter[F[?, B]], ev3: Order[A]) = ev.toKey(r, lim)
}
}
/** Highest priority Record instances */
//TOOD: Find out the right way to do this - sealed abstract class or sealed trait - if the latter then why?
sealed abstract class RecordInstances extends LowPriorityRecordInstances {
//TODO: Move this to an instances package for easy global import - how to work out implicit resolution then?
implicit val sortedMapRecord: Record[SortedMap] = new Record[SortedMap] {
override def iterator[A, B](r: SortedMap[A, B]): Iterator[(A, B)] = r.iterator
override def toMap[A, B](r: SortedMap[A, B]): Map[A, B] = r
/** Truncate records by key upto given key inclusive. Note that the key need not explicitly exist in the record */
override def toKey[A: Order, B](r: SortedMap[A, B], lim: A)
(implicit ev: Filter[SortedMap[?, B]]): SortedMap[A, B] = r.to(lim)
/** Truncate records by key from given key inclusive. Note that the key need not explicitly exist in the record */
override def fromKey[A: Order, B](r: SortedMap[A, B], lim: A)
(implicit ev: Filter[SortedMap[?, B]]): SortedMap[A, B] = r.from(lim)
}
}
/** Lower priority Record instances */
sealed abstract class LowPriorityRecordInstances {
//Define a Record instance for any Iterable type - possibly a massively unprincipled thing to do?
implicit def iterableRecord[F[A, B] <: Iterable[(A, B)]]: Record[F] = new Record[F] {
def iterator[A, B](r: F[A, B]): Iterator[(A, B)] = r.iterator
}
}
| NewcastleComputingScience/student-outcome-accelerator | core/src/main/scala/uk/ac/ncl/la/soar/Record.scala | Scala | apache-2.0 | 5,708 |
package controllers
import play.api._
import play.api.mvc._
import play.api.mvc.Session
import play.api.db.slick._
import play.api.data.{ Form }
import play.api.data.Forms._
import models.LoginInfo
import models.LDAPAuthentication
import models.persistance.UserDAO
object Login extends Controller {
def showLogin = Action { implicit request =>
val form = if (request.session.get("errorMessage").isDefined && request.session.get("errorMessage").get.equals("Not authorized")) {
loginForm.bind(request.session.data)
} else {
loginForm
}
Ok(views.html.loginPage(form))
}
def processForm = DBAction { implicit rs =>
loginForm.bindFromRequest().fold(
formWithErrors => Redirect(routes.Login.showLogin()).withSession("errorMessage" -> "You must provide a username"),
login => {
if (authenticateLogin(login)) {
if (!UserDAO.existsUserWithName(login.username)(rs.dbSession)) {
UserDAO.createUserWithName(login.username)(rs.dbSession);
}
Redirect(routes.Application.entrance()).withSession(("loginTime" -> "Just a few seconds ago"), ("username" -> login.username))
} else {
Redirect(routes.Login.showLogin()).withSession(("errorMessage" -> "Not authorized"), ("username" -> login.username), ("password" -> ""))
}
})
}
def authenticateLogin(login: LoginInfo): Boolean = {
return (models.LDAPAuthentication.validateUser(login.username, login.password))
}
val loginForm = Form(mapping(
"username" -> nonEmptyText,
"password" -> text)(LoginInfo.apply)(LoginInfo.unapply))
} | seqprodbio/restoule | app/controllers/Login.scala | Scala | gpl-3.0 | 1,694 |
package service.bitcoin
import akka.event.Logging
import akka.http.scaladsl.marshalling.ToResponseMarshallable
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.MediaTypes._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.stream.scaladsl.{Flow, Source}
import akka.util.ByteString
import model.{Periodic, Trade}
import service.bitcoin.FlowGraphs._
import service.http.HttpService
import stream.csv._
import stream.http._
/**
* Calculates Open-High-Low-Close-Volume aggregates of Bitcoin trades.
*
* This class demonstrates the use of flows in transforming a response entity from an underlying service and
* the parsing of complex path segments.
*
*/
trait BitcoinTradesService extends HttpService {
private lazy val log = Logging(system, classOf[BitcoinTradesService])
protected lazy val tradesClient: BitcoinTradesClient = BitcoinChartsTradesClient()
abstract override def route =
(get & pathPrefix("bitcoin")) {
path("price" / PeriodicSegment / Segment / Segment) {
(periodic, exchange, currency) =>
val symbol = exchange + currency
complete(fetch(symbol, periodicOHLCV(periodic)))
} ~
(path("trades" / Segment / Segment) & parameter('raw.as[Boolean] ? false)) {
(exchange, currency, raw) =>
val symbol = exchange + currency
raw match {
case true => complete(fetchRaw(symbol))
case false => complete(fetch(symbol, tradeToCsv))
}
}
} ~ super.route
// Parse a path segment as a Periodic
val PeriodicSegment = Segment.tflatMap {
case Tuple1(s) => Periodic.unapply(s).map(p => Tuple1(p))
case _ => None
}
private val tradeToCsv = Flow[Trade].via(trade.toRow()).via(formatRow)
private def periodicOHLCV(periodic: Periodic): Flow[Trade, ByteString, Unit] =
ohlcv.periodic(periodic).via(ohlcv.intervalToRow).via(formatRow)
private def fetch(symbol: String, transformer: Flow[Trade, ByteString, Any]) =
handle(tradesClient.history(symbol), transformer)
private def fetchRaw(symbol: String) =
handle(tradesClient.rawHistory(symbol), Flow[ByteString])
private def handle[T](response: BitcoinTradesClient#Response[Source[T, _]], transformer: Flow[T, ByteString, Any]) = {
response.map[ToResponseMarshallable] {
// Map to text/plain instead of stream.csv for easy display in browser
case Right(source) => HttpEntity.Chunked.fromData(`text/plain`, source.via(transformer).via(chunkSize()))
case Left(err @ (NotFound, _)) => err
case Left(_) => ServiceUnavailable -> "Service unavailable - error calling an underlying service"
}
}
}
| lancearlaus/akka-streams-http-presentation | src/main/scala/service/bitcoin/BitcoinTradesService.scala | Scala | apache-2.0 | 2,716 |
package cn.hjmao.learning.akka.http.demo.service
import cn.hjmao.learning.akka.http.demo.model.{UserEntity, UserEntityUpdate}
import cn.hjmao.learning.akka.http.demo.model.db.{DataSource, UserEntityTable}
import scala.concurrent.{ExecutionContext, Future}
/**
* Created by hjmao on 17-5-10.
*/
class UserService(val datasource: DataSource)
(implicit executionContext: ExecutionContext) extends UserEntityTable {
import datasource._
import datasource.driver.api._
def getUsers(): Future[Seq[UserEntity]] = db.run(users.result)
def getUserByUsername(username: String): Future[Option[UserEntity]] = {
db.run(users.filter(_.username === username).result.headOption)
}
def createUser(user: UserEntity): Future[UserEntity] = {
db.run(users returning users += user)
}
def updateUser(username: String, userUpdate: UserEntityUpdate): Future[Option[UserEntity]] = {
getUserByUsername(username).flatMap {
case Some(user) =>
val updatedUser = userUpdate.merge(user)
db.run(users.filter(_.username === username).update(updatedUser)).map(_ => Some(updatedUser))
case None => Future.successful(None)
}
}
def deleteUser(username: String): Future[Int] = {
db.run(users.filter(_.username === username).delete)
}
}
| huajianmao/learning | framework/akka-http/demo/src/main/scala/cn/hjmao/learning/akka/http/demo/service/UserService.scala | Scala | mit | 1,294 |
package ml.sparkling.graph.operators.measures.vertex.betweenness.hua
import java.nio.file.Files
import ml.sparkling.graph.operators.MeasureTest
import ml.sparkling.graph.operators.measures.vertex.betweenness.edmonds.EdmondsBC
import org.apache.commons.io.FileUtils
import org.apache.spark.SparkContext
import org.apache.spark.graphx.Graph
import org.scalatest.tagobjects.Slow
/**
* Created by mth on 6/29/17.
*/
class BetweennessHua$Test (implicit sc: SparkContext) extends MeasureTest {
val tempDir = Files.createTempDirectory("spark-checkpoint")
override def beforeAll() = {
sc.setCheckpointDir(tempDir.toAbsolutePath.toString)
}
override def afterAll() = {
FileUtils.deleteDirectory(tempDir.toFile)
}
"Hua betweenness centrality for random graph" should "be correctly calculated" in {
Given("graph")
val filePath = getClass.getResource("/graphs/graph_ER_15")
val graph: Graph[Int, Int] = loadGraph(filePath.toString)
When("Computes betweenness")
val result = HuaBC.computeBC(graph)
Then("Should calculate betweenness correctly")
val bcFile = getClass.getResource("/graphs/graph_ER_15_bc")
val bcCorrectValues = sc.textFile(bcFile.getPath)
.filter(_.nonEmpty)
.map(l => { val t = l.split("\\t", 2); (t(0).toInt, t(1).toDouble) })
.sortBy({ case (vId, data) => vId })
.map({ case (vId, data) => data}).collect()
val bcValues = result.sortBy({ case (vId, data) => vId })
.map({ case (vId, data) => data }).collect()
bcCorrectValues.zip(bcValues).foreach({ case (a, b) =>
a should be(b +- 1e-5)
})
result.unpersist(false)
}
"Hua betweenness centrality for random graph" should "take no longer then Edmonds" taggedAs(Slow) in {
Given("graph")
val filePath = getClass.getResource("/graphs/graph_ER_15")
val graph: Graph[Int, Int] = loadGraph(filePath.toString)
When("computes betwenness centrality")
val (_, edmondsTime) = time("Edmonds algorithm for betweenness centrality")(EdmondsBC.computeBC(graph))
val (_, huaTime) = time("Hua algorithm for betweenness centrality")(HuaBC.computeBC(graph))
Then("Hua algorithm should be faster")
huaTime should be <= edmondsTime
}
} | sparkling-graph/sparkling-graph | operators/src/test/scala/ml/sparkling/graph/operators/measures/vertex/betweenness/hua/BetweennessHua$Test.scala | Scala | bsd-2-clause | 2,222 |
package com.lateralthoughts.points.controllers
import java.util.UUID
import com.lateralthoughts.points.model.JsonFormatter
import com.lateralthoughts.points.model.records.RewardingAction
import org.json4s.jackson.JsonMethods
import org.scalatra.test.scalatest._
class RewardingActionControllerTest extends ScalatraSuite with InitServlet with JsonFormatter {
"Calling get /actions/" should "retrieve list of available rewarding actions" in {
get("/actions/") {
status should equal(200)
val listOfRewardingActions = JsonMethods.parse(body).extract[List[RewardingAction]]
listOfRewardingActions should have length 3
}
}
"Calling get /actions/:actionId" should "return bad request when action id is not valid" in {
val notValidId = "notValidId"
get(s"/actions/$notValidId") {
status should equal(400)
body should equal( s"""{"code":"UUIDNotValid","message":"Invalid UUID string: $notValidId"}""")
}
}
it should "return not found when action is not found" in {
val nonExistentActionId = "00000000-0000-0000-0000-000000000000"
get(s"/actions/$nonExistentActionId") {
status should equal(404)
body should equal( s"""{"code":"RecordNotFound","message":"No element with id $nonExistentActionId found"}""")
}
}
it should "return a rewarding action when action is found" in {
val existentActionId = "1210955b-27b1-40c2-9d33-81601fbcfc31"
get(s"/actions/$existentActionId") {
status should equal(200)
val rewardingAction = JsonMethods.parse(body).extract[RewardingAction]
rewardingAction.id.toString should equal (existentActionId)
}
}
"Calling post /actions/" should "return bad request when body is empty" in {
post("/actions/") {
status should equal(400)
body should equal( """{"code":"JsonNotValid","message":"The request body is not a valid JSON object"}""")
}
}
it should "return bad request when body is not a rewarding action input" in {
val json = "{}"
post("/actions/", json.toCharArray.map(_.toByte)) {
status should equal(400)
body should equal( """{"code":"InputObjectNotValid","message":"The request body is not a JSON object representing rewardingAction"}""")
}
}
it should "return bad request when body does not contain a valid category" in {
val categoryId = UUID.randomUUID()
val json =
s"""
{
"name":"myAction",
"category": {
"id":"$categoryId"
},
"description":"Description of my action",
"points":1
}
""".stripMargin
post("/actions/", json.toCharArray.map(_.toByte)) {
status should equal(400)
body should equal( """{"code":"InputObjectIncomplete","message":"Unable to create category due to : The following fields weren't correctly filled in the request : name, description"}""")
}
}
it should "return created when a rewarding action is created" in {
val categoryId = UUID.randomUUID()
val json =
s"""
{
"name":"myAction",
"category": {
"id":"$categoryId",
"name":"myCategory",
"description":"Description of my category"
},
"description":"Description of my action",
"points":1
}
""".stripMargin
post("/actions/", json.toCharArray.map(_.toByte)) {
status should equal(201)
}
}
"Calling put /actions/:actionId" should "return bad request when trying to update an action with a not valid action id" in {
val notValidId = "notValidId"
put(s"/actions/$notValidId") {
status should equal(400)
body should equal( s"""{"code":"UUIDNotValid","message":"Invalid UUID string: $notValidId"}""")
}
}
it should "return bad request when body is empty" in {
val actionId = "C56A4180-65AA-42EC-A945-5FD21DEC0538"
put(s"/actions/$actionId") {
status should equal(400)
body should equal( """{"code":"JsonNotValid","message":"The request body is not a valid JSON object"}""")
}
}
it should "return not found when rewarding action to be updated is not found" in {
val nonExistentActionId = "00000000-0000-0000-0000-000000000000"
val json = "{}"
put(s"/actions/$nonExistentActionId", json.toCharArray.map(_.toByte)) {
status should equal(404)
body should equal( s"""{"code":"RecordNotFound","message":"No rewarding action with id $nonExistentActionId found"}""")
}
}
it should "return ok when rewarding action is updated" in {
val actionId = "1210955b-27b1-40c2-9d33-81601fbcfc31"
val json = """{"description":"my new description"}"""
put(s"/actions/$actionId", json.toCharArray.map(_.toByte)) {
status should equal(200)
}
}
"Calling delete /actions/:actionId" should "return bad request when trying to delete an action with a not valid action id" in {
val notValidId = "notValidId"
delete(s"/actions/$notValidId") {
status should equal(400)
body should equal( s"""{"code":"UUIDNotValid","message":"Invalid UUID string: $notValidId"}""")
}
}
it should "return no content when trying to delete an action that doesn't exist" in {
val nonExistentActionId = "00000000-0000-0000-0000-000000000000"
delete(s"/actions/$nonExistentActionId") {
status should equal(204)
}
}
it should "return no content when deleting an action" in {
val nonExistentActionId = "a6c31aff-a227-403d-9ace-f90ef993262d"
delete(s"/actions/$nonExistentActionId") {
status should equal(204)
}
}
}
| vincentdoba/points | points-server/src/test/scala/com/lateralthoughts/points/controllers/RewardingActionControllerTest.scala | Scala | mit | 5,587 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package sys
import scala.collection.{mutable, Iterator}
import scala.jdk.CollectionConverters._
import java.security.AccessControlException
import scala.language.implicitConversions
/** A bidirectional map wrapping the java System properties.
* Changes to System properties will be immediately visible in the map,
* and modifications made to the map will be immediately applied to the
* System properties. If a security manager is in place which prevents
* the properties from being read or written, the AccessControlException
* will be caught and discarded.
* @define Coll `collection.mutable.Map`
* @define coll mutable map
*/
class SystemProperties
extends mutable.AbstractMap[String, String] {
override def empty: mutable.Map[String, String] = mutable.Map[String, String]()
override def default(key: String): String = null
def iterator: Iterator[(String, String)] = wrapAccess {
val ps = System.getProperties()
names map (k => (k, ps getProperty k)) filter (_._2 ne null)
} getOrElse Iterator.empty
override def isEmpty: Boolean = iterator.isEmpty
def names: Iterator[String] = wrapAccess (
System.getProperties().stringPropertyNames().asScala.iterator
) getOrElse Iterator.empty
def get(key: String): Option[String] =
wrapAccess(Option(System.getProperty(key))) flatMap (x => x)
override def contains(key: String): Boolean =
wrapAccess(super.contains(key)) exists (x => x)
override def clear(): Unit = wrapAccess(System.getProperties().clear())
def subtractOne (key: String): this.type = { wrapAccess(System.clearProperty(key)) ; this }
def addOne (kv: (String, String)): this.type = { wrapAccess(System.setProperty(kv._1, kv._2)) ; this }
def wrapAccess[T](body: => T): Option[T] =
try Some(body) catch { case _: AccessControlException => None }
}
/** The values in SystemProperties can be used to access and manipulate
* designated system properties. See `scala.sys.Prop` for particulars.
* @example {{{
* if (!headless.isSet) headless.enable()
* }}}
*/
object SystemProperties {
/** An unenforceable, advisory only place to do some synchronization when
* mutating system properties.
*/
def exclusively[T](body: => T): T = this synchronized body
implicit def systemPropertiesToCompanion(p: SystemProperties): SystemProperties.type = this
private final val HeadlessKey = "java.awt.headless"
private final val PreferIPv4StackKey = "java.net.preferIPv4Stack"
private final val PreferIPv6AddressesKey = "java.net.preferIPv6Addresses"
private final val NoTraceSuppressionKey = "scala.control.noTraceSuppression"
def help(key: String): String = key match {
case HeadlessKey => "system should not utilize a display device"
case PreferIPv4StackKey => "system should prefer IPv4 sockets"
case PreferIPv6AddressesKey => "system should prefer IPv6 addresses"
case NoTraceSuppressionKey => "scala should not suppress any stack trace creation"
case _ => ""
}
lazy val headless: BooleanProp = BooleanProp.keyExists(HeadlessKey)
lazy val preferIPv4Stack: BooleanProp = BooleanProp.keyExists(PreferIPv4StackKey)
lazy val preferIPv6Addresses: BooleanProp = BooleanProp.keyExists(PreferIPv6AddressesKey)
lazy val noTraceSuppression: BooleanProp = BooleanProp.valueIsTrue(NoTraceSuppressionKey)
}
| lrytz/scala | src/library/scala/sys/SystemProperties.scala | Scala | apache-2.0 | 3,703 |
/*
* Copyright 2015 RONDHUIT Co.,LTD.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nlp4l.stats
import org.apache.lucene.analysis.core.KeywordAnalyzer
import org.apache.lucene.analysis.standard.StandardAnalyzer
import org.nlp4l.core._
import org.nlp4l.core.analysis.Analyzer
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfter, FunSuite}
import scala.util.Try
import scalax.file.Path
class WordCountsSuite extends FunSuite with BeforeAndAfterAll {
val indexDir = "/tmp/testindex_wordcountsuite"
val contentAnalyzer = Analyzer(new StandardAnalyzer)
val fieldTypes = Map("title" -> FieldType(null, true, true),
"content1" -> FieldType(contentAnalyzer, true, true, termVectors = true),
"content2" -> FieldType(contentAnalyzer, true, true, termVectors = false))
val schema = Schema(Analyzer(new KeywordAnalyzer), fieldTypes)
val docs = List(
Document(Set(
Field("title", "London Bridge A"),
Field("content1", "London Bridge is falling down, Falling down, Falling down. London Bridge is falling down, My fair lady."),
Field("content2", "Take a key and lock her up, Lock her up, Lock her up. Take a key and lock her up, My fair lady. "))),
Document(Set(
Field("title", "London Bridge B"),
Field("content1", "How will we build it up, Build it up, Build it up? How will we build it up, My fair lady?"),
Field("content2", "Build it up with silver and gold, Silver and gold, Silver and gold. Build it up with silver and gold, My fair lady."))),
Document(Set(
Field("title", "London Bridge A"),
Field("content1", "Gold and silver I have none, I have none, I have none. Gold and silver I have none, My fair lady."),
Field("content2", "Build it up with needles and pins, Needles and pins, Needles and pins. Build it up with needles and pins, My fair lady."))),
Document(Set(
Field("title", "London Bridge B"),
Field("content1", "Pins and needles bend and break, Bend and break, Bend and break. Pins and needles bend and break, My fair lady."),
Field("content2", "Build it up with wood and clay, Wood and clay, Wood and clay. Build it up with wood and clay, My fair lady."))),
Document(Set(
Field("title", "London Bridge A"),
Field("content1", "Wood and clay will wash away, Wash away, Wash away. Wood and clay will wash away, My fair lady."),
Field("content2", "Build it up with stone so strong, Stone so strong, Stone so strong. Build it up with stone so strong, My fair lady.")))
)
override def beforeAll {
deleteIndexDir()
val iw1 = IWriter(indexDir, schema)
docs.foreach{ iw1.write(_) }
iw1.close()
}
override def afterAll {
deleteIndexDir()
}
private def deleteIndexDir(): Unit = {
val path = Path.fromString(indexDir)
Try(path.deleteRecursively(continueOnFailure = false))
}
test("counts all word frequencies in all documents") {
val reader = IReader(indexDir, schema)
val counts = WordCounts.count(reader, "content1", Set.empty[String], Set.empty[Int])
assert(counts.size > 0)
assertResult(5)(counts.getOrElse("lady", 0))
assertResult(2)(counts.getOrElse("wood", 0))
assertResult(4)(counts.getOrElse("up", 0))
}
test("counts all word frequencies in one document (with term vectors)") {
val reader = IReader(indexDir, schema)
val counts = WordCounts.count(reader, "content1", Set.empty[String], Set(0))
assert(counts.size > 0)
assertResult(1)(counts.getOrElse("lady", 0))
assertResult(2)(counts.getOrElse("bridge", 0))
}
test("empty map returned if the field not exists") {
val reader = IReader(indexDir, schema)
val counts = WordCounts.count(reader, "unknown", Set.empty[String], Set.empty[Int])
assert(counts.isEmpty)
}
test("counts word frequencies for top N words") {
val reader = IReader(indexDir, schema)
val counts = WordCounts.count(reader, "content1", Set.empty[String], Set.empty[Int], maxWords = 10)
assertResult(10)(counts.size)
assertResult(5)(counts.getOrElse("fair", 0))
assertResult(5)(counts.getOrElse("lady", 0))
assertResult(5)(counts.getOrElse("my", 0))
}
test("counts all word frequencies in specified documents set (with term vectors)") {
val reader = IReader(indexDir, schema)
val docset = reader.subset(TermFilter("title", "London Bridge A"))
val counts = WordCounts.count(reader, "content1", Set.empty[String], docset)
assert(counts.size > 0)
assertResult(3)(counts.getOrElse("lady", 0))
assertResult(2)(counts.getOrElse("wood", 0))
assertResult(0)(counts.getOrElse("up", 0))
}
test("counts all word frequencies in specified documents set (without term vectors)") {
val reader = IReader(indexDir, schema)
val docset = reader.subset(TermFilter("title", "London Bridge A"))
val counts = WordCounts.count(reader, "content2", Set.empty[String], docset)
assert(counts.size > 0)
assertResult(3)(counts.getOrElse("lady", 0))
assertResult(0)(counts.getOrElse("wood", 0))
assertResult(8)(counts.getOrElse("up", 0))
// do same stuff with RawReader and Analyzer
val reader2 = RawReader(indexDir)
val counts2 = WordCounts.count(reader2, "content2", Set.empty[String], docset, -1, Analyzer(new StandardAnalyzer()))
assert(counts2.size > 0)
assertResult(3)(counts2.getOrElse("lady", 0))
assertResult(0)(counts2.getOrElse("wood", 0))
assertResult(8)(counts2.getOrElse("up", 0))
}
test("counts all word frequencies for specified words (with term vectors)") {
val reader = IReader(indexDir, schema)
val words = Set("london", "gold", "build")
val counts = WordCounts.count(reader, "content1", words, Set.empty[Int])
assertResult(3)(counts.size)
assertResult(2)(counts.getOrElse("london", 0))
assertResult(2)(counts.getOrElse("gold", 0))
assertResult(4)(counts.getOrElse("build", 0))
}
test("total counts") {
val reader = IReader(indexDir, schema)
val counts1 = WordCounts.totalCount(reader, "content1", Set.empty[Int])
assertResult(79)(counts1)
val counts2 = WordCounts.totalCount(reader, "content2", Set.empty[Int])
assertResult(83)(counts2)
}
test("unique word count after the specific prefix") {
val reader = IReader(indexDir, schema)
val counts = WordCounts.countPrefix(reader, "content2", "st")
assertResult(2)(counts)
}
test("counts document frequencies for all words") {
val reader = IReader(indexDir, schema)
val dfs = WordCounts.countDF(reader, "content2", Set.empty[String])
assert(dfs.size > 0)
assertResult(4)(dfs.getOrElse("build", 0))
assertResult(1)(dfs.getOrElse("silver", 0))
assertResult(5)(dfs.getOrElse("up", 0))
}
test("counts document frequencies for top N words") {
val reader = IReader(indexDir, schema)
val dfs = WordCounts.countDF(reader, "content2", Set.empty[String], 10)
assert(dfs.size == 10)
}
test("counts document frequencies for specified words") {
val reader = IReader(indexDir, schema)
val words = Set("gold", "fair", "build")
val dfs = WordCounts.countDF(reader, "content2", words)
assert(dfs.size == 3)
assertResult(1)(dfs.getOrElse("gold", 0))
assertResult(5)(dfs.getOrElse("fair", 0))
assertResult(4)(dfs.getOrElse("build", 0))
}
}
| gazimahmud/nlp4l | src/test/scala/org/nlp4l/stats/WordCountsSuite.scala | Scala | apache-2.0 | 7,862 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.integration.torch
import com.intel.analytics.bigdl.nn.{Log, Power}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator
@com.intel.analytics.bigdl.tags.Serial
class LogSpec extends TorchSpec {
"A Log()" should "generate correct output and grad" in {
torchCheck()
def randomn(): Double = RandomGenerator.RNG.uniform(2, 10)
val layer = new Log[Double]()
val input = Tensor[Double](2, 2, 2)
input.apply1(x => randomn())
val gradOutput = Tensor[Double](2, 2, 2)
gradOutput.apply1(x => randomn())
val start = System.nanoTime()
val output = layer.forward(input)
val gradInput = layer.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "module = nn.Log()\\n" +
"output = module:forward(input)\\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be (luaOutput)
gradInput should be (luaGradInput)
println("Test case : Log, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
}
| zhangxiaoli73/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/integration/torch/LogSpec.scala | Scala | apache-2.0 | 1,994 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.api
import play.api.libs.json.Json
/**
* Represents a linked login for an identity (i.e. a local username/password or a Facebook/Google account).
*
* The login info contains the data about the provider that authenticated that identity.
*
* @param providerID The ID of the provider.
* @param providerKey A unique key which identifies a user on this provider (userID, email, ...).
*/
case class LoginInfo(providerID: String, providerKey: String)
/**
* The companion object of the login info.
*/
object LoginInfo extends ((String, String) => LoginInfo) {
/**
* Converts the [[com.mohiva.play.silhouette.api.LoginInfo]] to Json and vice versa.
*/
implicit val jsonFormat = Json.format[LoginInfo]
}
| mohiva/play-silhouette | silhouette/app/com/mohiva/play/silhouette/api/LoginInfo.scala | Scala | apache-2.0 | 1,384 |
package org.nexbook.app
import com.softwaremill.macwire._
import org.nexbook.concepts.akka.AkkaModule
import org.nexbook.concepts.pubsub.PubSubModule
import org.nexbook.fix.{FixEngineRunner, FixMessageHandler}
import org.slf4j.LoggerFactory
object OrderBookApp extends BasicComponentProvider {
val logger = LoggerFactory.getLogger(classOf[App])
val mode = AppConfig.mode
val runningMode = AppConfig.runningMode
val module: Module = mode match {
case PubSub => wire[PubSubModule]
case Akka => wire[AkkaModule]
}
val fixMessageHandler: FixMessageHandler = wire[FixMessageHandler]
val applicationRunner: ApplicationRunner = if (Live == runningMode) new FixEngineRunner(fixMessageHandler, AppConfig.fixConfigPath) else new WaitingLoopRunner
def main(args: Array[String]) {
logger.info(s"NexBook starting, config name: ${AppConfig.configName}, app mode: $mode, running mode: $runningMode")
applicationRunner.start()
}
def stop(): Unit = {
logger.info("Stop App")
applicationRunner.stop()
}
}
| milczarekIT/nexbook | src/main/scala/org/nexbook/app/OrderBookApp.scala | Scala | apache-2.0 | 1,025 |
package fi.pyppe.ircbot.slave
import com.google.code.chatterbotapi.{ChatterBotType, ChatterBotFactory}
import fi.pyppe.ircbot.LoggerSupport
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
object BotWithinBot extends LoggerSupport {
private val botSession = {
val factory = new ChatterBotFactory()
factory.create(ChatterBotType.JABBERWACKY).createSession()
}
def think(message: String)(implicit ec: ExecutionContext): Future[String] = {
val t = System.currentTimeMillis
val future = Future {
org.jsoup.Jsoup.parse(botSession.think(message)).text()
}
future.onFailure {
case err =>
logger.warn(s"Failed in ${System.currentTimeMillis - t} ms", err)
}
future
}
def main(args: Array[String]) {
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Await
import scala.concurrent.duration._
val text = Await.result(think("hello?"), 10.seconds)
println(text)
}
}
| Pyppe/akka-ircbot | slave/src/main/scala/fi/pyppe/ircbot/slave/BotWithinBot.scala | Scala | mit | 1,016 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.assertion
import io.gatling.BaseSpec
import io.gatling.commons.shared.unstable.model.stats.{ GeneralStats, GeneralStatsSource, Group, GroupStatsPath, RequestStatsPath, StatsPath }
import io.gatling.commons.shared.unstable.model.stats.assertion.AssertionValidator
import io.gatling.commons.stats._
import io.gatling.commons.stats.assertion.Assertion
import io.gatling.commons.util.StringHelper._
import io.gatling.core.config.GatlingConfiguration
import org.mockito.Mockito.when
@SuppressWarnings(Array("org.wartremover.warts.DefaultArguments"))
final case class Stats(
generalStats: GeneralStats,
requestName: String = "",
groupPath: List[String] = Nil,
status: Option[Status] = None
) {
def request: Option[String] = requestName.trimToOption
def group: Option[Group] = if (groupPath.nonEmpty) Some(Group(groupPath)) else None
}
class AssertionValidatorSpec extends BaseSpec with AssertionSupport {
implicit val configuration: GatlingConfiguration = GatlingConfiguration.loadForTest()
private type Conditions[T] = List[AssertionWithPathAndTarget[T] => Assertion]
private type StatsModifiers = List[Stats => Stats]
private val SetRequestThenGroupModifiers: StatsModifiers =
List(_.copy(requestName = "foo"), _.copy(groupPath = List("foo")))
private def generalStatsSource[T: Numeric](
metric: AssertionWithPathAndTarget[T],
conditions: Conditions[T],
stats: Stats*
): GeneralStatsSource = {
def mockAssertion(source: GeneralStatsSource): Unit =
when(source.assertions) thenReturn conditions.map(_(metric))
def mockStats(stat: Stats, source: GeneralStatsSource): Unit = {
when(source.requestGeneralStats(stat.request, stat.group, stat.status)) thenReturn stat.generalStats
stat.group.foreach { group =>
when(source.groupCumulatedResponseTimeGeneralStats(group, stat.status)) thenReturn stat.generalStats
}
}
def statsPaths: List[StatsPath] =
stats
.map(stat => (stat.request, stat.group))
.map {
case (Some(request), group) => RequestStatsPath(request, group)
case (None, Some(group)) => GroupStatsPath(group)
case _ => throw new AssertionError("Can't have neither a request or group stats path")
}
.toList
def mockStatsPath(source: GeneralStatsSource) =
when(source.statsPaths) thenReturn statsPaths
val mockedGeneralStatsSource = mock[GeneralStatsSource]
mockAssertion(mockedGeneralStatsSource)
stats.foreach(mockStats(_, mockedGeneralStatsSource))
mockStatsPath(mockedGeneralStatsSource)
mockedGeneralStatsSource
}
private def validateAssertions(source: GeneralStatsSource) =
AssertionValidator.validateAssertions(source).map(_.result).forall(identity)
"AssertionValidator" should "fail the assertion when the request path does not exist" in {
val requestStats = Stats(GeneralStats.NoPlot, requestName = "bar")
val source1 = generalStatsSource[Double](details("foo").requestsPerSec, List(_.is(100)), requestStats)
validateAssertions(source1) shouldBe false
val groupStats = Stats(GeneralStats.NoPlot, groupPath = List("bar"))
val source2 = generalStatsSource[Double](details("foo").requestsPerSec, List(_.is(100)), groupStats)
validateAssertions(source2) shouldBe false
val requestAndGroupStats = Stats(GeneralStats.NoPlot, requestName = "baz", groupPath = List("bar"))
val source3 = generalStatsSource[Double](details("baz").requestsPerSec, List(_.is(100)), requestAndGroupStats)
validateAssertions(source3) shouldBe false
}
//TODO : add test on global and forAll
it should "be able to validate a meanRequestsPerSec assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestAndGroupStats = modifier(Stats(GeneralStats.NoPlot.copy(meanRequestsPerSec = 5)))
val conditions: Conditions[Double] = List(_.lte(10), _.gte(3), _.is(5), _.between(4, 6), _.in(1, 3, 5, 7))
val source3 = generalStatsSource(details("foo").requestsPerSec, conditions, requestAndGroupStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a successfulRequests.count assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(count = 5), status = Some(OK)))
val conditions: Conditions[Long] = List(_.lte(10), _.gte(3), _.is(5), _.between(4, 6), _.in(1, 3, 5, 7))
val source3 = generalStatsSource(details("foo").successfulRequests.count, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a failedRequests.count assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(count = 5), status = Some(KO)))
val conditions: Conditions[Long] = List(_.lte(10), _.gte(3), _.is(5), _.between(4, 6), _.in(1, 3, 5, 7))
val source3 = generalStatsSource(details("foo").failedRequests.count, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a allRequests.count assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(count = 10)))
val conditions: Conditions[Long] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").allRequests.count, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a successfulRequests.percent assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val successful = modifier(Stats(GeneralStats.NoPlot.copy(count = 10)))
val failed = modifier(Stats(GeneralStats.NoPlot.copy(count = 5), status = Some(OK)))
val conditions: Conditions[Double] = List(_.lte(60), _.gte(30), _.is(50), _.between(40, 60), _.in(20, 40, 50, 80))
val source3 = generalStatsSource(details("foo").successfulRequests.percent, conditions, successful, failed)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a failedRequests.percent assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val failed = modifier(Stats(GeneralStats.NoPlot.copy(count = 10)))
val successful = modifier(Stats(GeneralStats.NoPlot.copy(count = 5), status = Some(KO)))
val conditions: Conditions[Double] = List(_.lte(60), _.gte(30), _.is(50), _.between(40, 60), _.in(20, 40, 50, 80))
val source3 = generalStatsSource(details("foo").failedRequests.percent, conditions, failed, successful)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a allRequests.percent assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(count = 10)))
val conditions: Conditions[Double] = List(_.lte(110), _.gte(90), _.is(100), _.between(80, 120), _.in(90, 100, 130))
val source3 = generalStatsSource(details("foo").allRequests.percent, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a responseTime.min assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(min = 10)))
val conditions: Conditions[Int] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").responseTime.min, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a responseTime.max assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(max = 10)))
val conditions: Conditions[Int] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").responseTime.max, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a responseTime.mean assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(mean = 10)))
val conditions: Conditions[Int] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").responseTime.mean, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a responseTime.stdDev assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(stdDev = 10)))
val conditions: Conditions[Int] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").responseTime.stdDev, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a responseTime.percentiles1 assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(percentile = _ => 10)))
val conditions: Conditions[Int] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").responseTime.percentile1, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a responseTime.percentiles2 assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(percentile = _ => 10)))
val conditions: Conditions[Int] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").responseTime.percentile2, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a responseTime.percentiles3 assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(percentile = _ => 10)))
val conditions: Conditions[Int] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").responseTime.percentile3, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
//TODO : add test on global and forAll
it should "be able to validate a responseTime.percentiles4 assertion for requests and groups" in {
for (modifier <- SetRequestThenGroupModifiers) {
val requestStats = modifier(Stats(GeneralStats.NoPlot.copy(percentile = _ => 10)))
val conditions: Conditions[Int] = List(_.lte(15), _.gte(8), _.is(10), _.between(8, 12), _.in(1, 3, 10, 13))
val source3 = generalStatsSource(details("foo").responseTime.percentile4, conditions, requestStats)
validateAssertions(source3) shouldBe true
}
}
}
| gatling/gatling | gatling-core/src/test/scala/io/gatling/core/assertion/AssertionSpec.scala | Scala | apache-2.0 | 12,749 |
package de.metacoder.edwardthreadlocal.analysis.datamodel
import de.metacoder.edwardthreadlocal
import scala.language.postfixOps
// other than a StackTraceElement array, this can be compared with equals, and returns a meaningful toString
case class StackTrace(elements:Seq[StackTraceElement])
object StackTrace {
private val trivialStackLocalMethodNames = Set("set", "remove")
def current():StackTrace = {
def removeInitialStackTraceCall(st:Seq[StackTraceElement]):Seq[StackTraceElement] = st match {
case Seq(getStackTraceCall, rst@_*) if getStackTraceCall.getClassName == classOf[Thread].getName ⇒ rst
case _ ⇒ st
}
def removeInitialEdwardThreadLocalCalls(st:Seq[StackTraceElement]):Seq[StackTraceElement] = st match {
case Seq(edwardThreadLocalCall, rst@_*) if edwardThreadLocalCall.getClassName.startsWith(edwardthreadlocal.packageName) ⇒
removeInitialEdwardThreadLocalCalls(rst)
case _ ⇒ st
}
def removeInitialTrivialThreadLocalCalls(st:Seq[StackTraceElement]):Seq[StackTraceElement] = st match {
case Seq(tlCall, rst@_*) if tlCall.getClassName == classOf[ThreadLocal[_]].getName && trivialStackLocalMethodNames(tlCall.getMethodName) ⇒
removeInitialTrivialThreadLocalCalls(rst)
case _ ⇒ st
}
val removeReduntantIntials:Seq[StackTraceElement] ⇒ Seq[StackTraceElement] =
removeInitialStackTraceCall _ andThen removeInitialEdwardThreadLocalCalls andThen removeInitialTrivialThreadLocalCalls
StackTrace(removeReduntantIntials(Thread.currentThread().getStackTrace toSeq))
}
}
| metacoder/edward-tl | agent-impl/src/main/scala/de/metacoder/edwardthreadlocal/analysis/datamodel/StackTrace.scala | Scala | apache-2.0 | 1,589 |
package org.f100ded.play.fakews
import akka.util.ByteString
import play.api.libs.ws.{WSCookie, WSProxyServer}
case class FakeRequest
(
method: String,
url: String,
body: ByteString = ByteString.empty,
headers: Map[String, Seq[String]] = Map(),
cookies: Seq[WSCookie] = Seq(),
proxyServer: Option[WSProxyServer] = None
) {
lazy val bodyAsString: String = body.decodeString(ByteString.UTF_8)
} | f100ded/play-fake-ws-standalone | src/main/scala/org/f100ded/play/fakews/FakeRequest.scala | Scala | apache-2.0 | 407 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.message.MessageSet
case class FetchDataInfo(fetchOffset: LogOffsetMetadata, messageSet: MessageSet)
| cran/rkafkajars | java/kafka/server/FetchDataInfo.scala | Scala | apache-2.0 | 939 |
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by jie on 4/13/16.
* userid, itemid, behavior, geohash, category, time
* item, geohash, category
*/
object featuresExtraction {
def main(args: Array[String]): Unit ={
if(args.length < 2){
println("Usage args")
System.exit(1)
}
val conf = new SparkConf().setAppName("features")
val sc = new SparkContext(conf)
//user.csv
val rawUserData = sc.textFile(args(0)).map{
line =>
(line.split(",")(0), line.split(",")(1), line.split(",")(2).toInt, line.split(",")(5))
}.cache()
//Global
val lastDay = "2014-12-18" //args()
val lastDis = if(lastDay.split(" ")(0).split("-")(1).toInt >= 12){
(lastDay.split(" ")(0).split("-")(2).toInt + 12)
}else{
(lastDay.split(" ")(0).split("-")(2).toInt - 18)
}
println("lastDay Dis : " + lastDis)
//*counting
//[(user,item),(sum1,sum2,sum3,sum4 - 1.1.1),(day1,day2,day3,day4 - 1.1.2), afterbuyclickcount - 1.1.3,
// (Conversion ratio - 1.2.1), ((existAction) - 1.3.1), (twiceBuy - 1.3.2),
// (firstDis, finalDis, finalBuyDis - 1.4.(1,2,3) )]
val userGroup = rawUserData.map(data => ((data._1, data._2), data._3, data._4))
.groupBy(_._1).map{
case(k, v) =>
val arr = v.seq.toList
val sum1 = arr.count(_._2 == 1)
val sum2 = arr.count(_._2 == 2)
val sum3 = arr.count(_._2 == 3)
val sum4 = arr.count(_._2 == 4)
val exist1 = if(arr.count(_._2 == 1) > 0) true else false
val exist2 = if(arr.count(_._2 == 2) > 0) true else false
val exist3 = if(arr.count(_._2 == 3) > 0) true else false
val exist4 = if(arr.count(_._2 == 4) > 0) true else false
val day = v.seq.toList.distinct
val day1 = day.count(_._2 == 1)
val day2 = day.count(_._2 == 2)
val day3 = day.count(_._2 == 3)
val day4 = day.count(_._2 == 4)
val sortarr = arr.sortBy(_._3)
val clickcount = if(sortarr.indexWhere(_._2 == 4) != -1)
arr.length - 1 - sortarr.indexWhere(_._2 == 4)
else 0
val arry = sortarr
val firstDay = arry.apply(0)._3
val finalDay = arry.last._3
val firstDis = if(firstDay.split(" ")(0).split("-")(1).toInt >= 12){
(firstDay.split(" ")(0).split("-")(2).toInt + 12)
}else{
(firstDay.split(" ")(0).split("-")(2).toInt - 18)
}
val finalDis = if(finalDay.split(" ")(0).split("-")(1).toInt >= 12){
(finalDay.split(" ")(0).split("-")(2).toInt + 12)
}else{
(finalDay.split(" ")(0).split("-")(2).toInt - 18)
}
val finalBuyDay = {
val t = arry.indexWhere(_._2 == 4)
if(t != -1)
arry.apply(t)._3
else lastDay
}
val finalBuyDis = if(finalBuyDay.split(" ")(0).split("-")(1).toInt >= 12){
(finalBuyDay.split(" ")(0).split("-")(2).toInt + 12)
}else{
(finalBuyDay.split(" ")(0).split("-")(2).toInt - 18)
}
//1.3.2
val twiceBuy = arr.filter(_._2 == 4)
(k._1, (k._2, ((sum1, sum2, sum3, sum4), (day1, day2, day3, day4), clickcount,
(("%.2f").format(sum4 * 1.0 / arr.length), ("%.2f").format(day4 * 1.0 / arr.length)),
(exist1, exist2, exist3, exist4), twiceBuy.length >= 2),
((lastDis - firstDis, lastDis - finalDis), lastDis - finalBuyDis, finalDis - firstDis)))
}.persist()
userGroup.repartition(1).saveAsTextFile("/user/features/counting.txt")
//1.2.3 [(user, action1, action2, action3, action4)]
val userActionCount = rawUserData.map(data => (data._1, data._3)).groupBy(_._1).map{
case(u, v) =>
val arr = v.seq.toList
(u, (arr.count(_._2 == 1), arr.count(_._2 == 2), arr.count(_._2 == 3), arr.count(_._2 == 4)))
}.persist()
//[(user,item),(sum1,sum2,sum3,sum4 - 1.1.1),(day1,day2,day3,day4 - 1.1.2), afterbuyclickcount - 1.1.3,
// (Conversion ratio(c1, c2) - 1.2.1), ((existAction) - 1.3.1), (twiceBuy - 1.3.2), *(Cross ratio - 1.2.3),
// (firstDis, finalDis, finalBuyDis - 1.4.(1,2,3) )]
val userItemGroup = userGroup.join(userActionCount).map{
case(u, ((p, item, dis), count)) =>
val a1 = if(item._1._1 > 0) ("%.2f").format(count._1 * 1.0 / item._1._1) else -1
val a2 = if(item._1._2 > 0) ("%.2f").format(count._2 * 1.0 / item._1._2) else -1
val a3 = if(item._1._3 > 0) ("%.2f").format(count._3 * 1.0 / item._1._3) else -1
val a4 = if(item._1._4 > 0) ("%.2f").format(count._4 * 1.0 / item._1._4) else -1
((u, p), item, (a1, a2, a3, a4), dis)
}
userItemGroup.repartition(1).saveAsTextFile("/user/features/useritemgroup.csv")
// */
//*
//(user, (action:product - 1.1.4), (r1,r2 - 1.2.1), (1.2.2), (1.3.1), (1.3.2), (1.4.1, 1.4.2, 1.4.3))
val userAction = rawUserData.distinct().groupBy(_._1).map{
case(k, v) =>
val a1 = v.seq.count(_._3 == 1)
val a2 = v.seq.count(_._3 == 2)
val a3 = v.seq.count(_._3 == 3)
val a4 = v.seq.count(_._3 == 4)
val exist1 = if(a1 > 0) true else false
val exist2 = if(a2 > 0) true else false
val exist3 = if(a3 > 0) true else false
val exist4 = if(a4 > 0) true else false
val day = v.seq.toList.map(x => (x._3, x._4)).distinct
val day1 = day.count(_._1 == 1)
val day2 = day.count(_._1 == 2)
val day3 = day.count(_._1 == 3)
val day4 = day.count(_._1 == 4)
val d1 = if(day1 > 0) ("%.2f").format(a1 * 1.0 / day1) else -1
val d2 = if(day2 > 0) ("%.2f").format(a2 * 1.0 / day2) else -1
val d3 = if(day3 > 0) ("%.2f").format(a3 * 1.0 / day3) else -1
val d4 = if(day4 > 0) ("%.2f").format(a4 * 1.0 / day4) else -1
val len = v.seq.toList.length
val arry = v.seq.toList.sortBy(_._4)
val firstDay = arry.apply(0)._4
val finalDay = arry.last._4
val firstDis = if(firstDay.split(" ")(0).split("-")(1).toInt >= 12){
(firstDay.split(" ")(0).split("-")(2).toInt + 12)
}else{
(firstDay.split(" ")(0).split("-")(2).toInt - 18)
}
val finalDis = if(finalDay.split(" ")(0).split("-")(1).toInt >= 12){
(finalDay.split(" ")(0).split("-")(2).toInt + 12)
}else{
(finalDay.split(" ")(0).split("-")(2).toInt - 18)
}
val finalBuyDay = {
val t = arry.indexWhere(_._3 == 4)
if(t != -1)
arry.apply(t)._4
else lastDay
}
val finalBuyDis = if(finalBuyDay.split(" ")(0).split("-")(1).toInt >= 12){
(finalBuyDay.split(" ")(0).split("-")(2).toInt + 12)
}else{
(finalBuyDay.split(" ")(0).split("-")(2).toInt - 18)
}
val twiceBuy = v.seq.toList.filter(_._3 == 4).distinct
(k, (a1, a2, a3, a4), (("%.2f").format(a4 * 1.0 / len), ("%.2f").format(day4 * 1.0 / len)),
(d1, d2, d3, d4), (exist1, exist2, exist3, exist4), (twiceBuy.length >= 2),
((lastDis - firstDis, lastDis - finalDis), lastDis - finalBuyDis, finalDis - firstDis))
}
userAction.repartition(1).saveAsTextFile("/user/features/useraction.csv")
// */
//[(product, (action:user - 1.1.4), (r1,r2 - 1.2.1), (1.2.2), (1.3.1), (1.3.2), ((buyuser.length >= 2 else -1)- 1.4.4)]
val productAction = rawUserData.distinct().groupBy(_._2).map{
case(k, v) =>
val a1 = v.seq.count(_._3 == 1)
val a2 = v.seq.count(_._3 == 2)
val a3 = v.seq.count(_._3 == 3)
val a4 = v.seq.count(_._3 == 4)
val exist1 = if(a1 > 0) true else false
val exist2 = if(a2 > 0) true else false
val exist3 = if(a3 > 0) true else false
val exist4 = if(a4 > 0) true else false
val day = v.seq.toList.map(x => (x._3, x._4)).distinct
val day1 = day.count(_._1 == 1)
val day2 = day.count(_._1 == 2)
val day3 = day.count(_._1 == 3)
val day4 = day.count(_._1 == 4)
val d1 = if(day1 > 0) ("%.2f").format(a1 * 1.0 / day1) else -1
val d2 = if(day2 > 0) ("%.2f").format(a2 * 1.0 / day2) else -1
val d3 = if(day3 > 0) ("%.2f").format(a3 * 1.0 / day3) else -1
val d4 = if(day4 > 0) ("%.2f").format(a4 * 1.0 / day4) else -1
val len = v.seq.toList.length
val buyuser = v.seq.toList.filter(_._3 == 4).map(x => x._1).distinct
val buyuserlen = if(buyuser.length >= 2) buyuser.length else -1
val twiceBuy = v.seq.toList.filter(_._3 == 4).distinct
(k, (a1, a2, a3, a4), (("%.2f").format(a4 * 1.0 / len), ("%.2f").format(day4 * 1.0 / len)),
(d1, d2, d3, d4), (exist1, exist2, exist3, exist4), (twiceBuy.length >= 2), buyuserlen)
}
productAction.repartition(1).saveAsTextFile("/user/features/productaction.csv")
// */
/* 暂时不用
val actionData = rawUserData.map(data => (data._1, data._3)).distinct().cache()
(1 to 4).foreach{
i =>
val ac = actionData.filter(_._2 == i)
ac.repartition(1).saveAsTextFile("/user/features/useraction" + i + "_" + ac.count + ".txt")
}
val productAction = rawUserData.map(data => (data._2, data._3)).distinct().cache()
(1 to 4).foreach{
i =>
val ac = productAction.filter(_._2 == i)
ac.repartition(1).saveAsTextFile("/user/features/productaction" + i + "_" + ac.count + ".txt")
}
// */
sc.stop()
}
}
| JensenFeng/tianchi | src/main/featuresExtraction.scala | Scala | apache-2.0 | 9,521 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.stream
import akka.actor.ActorSystem
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex._
import scala.concurrent.duration._
object UnicomplexActorPublisherSpec {
val myConfig: Config = ConfigFactory.parseString(
"""
| squbs.actorsystem-name = UnicomplexActorPublisherSpec
""".stripMargin)
val boot = UnicomplexBoot(myConfig).createUsing((name, config) => ActorSystem(name, config))
.scanResources("/")
.initExtensions
.start()
}
final class UnicomplexActorPublisherSpec extends TestKit(UnicomplexActorPublisherSpec.boot.actorSystem)
with AnyFlatSpecLike with Matchers with BeforeAndAfterAll {
val duration = 10.second
val in = TestSource.probe[String]
// expose probe port(s)
val ((pubIn, pubTrigger), sub) = LifecycleManaged().source(in).toMat(TestSink.probe[String](system))(Keep.both).run()
override def afterAll(): Unit = {
Unicomplex(system).uniActor ! GracefulStop
}
"UnicomplexTrigger" should "activate flow by unicomplex" in {
// send 2 elements to in
pubIn.sendNext("1")
pubIn.sendNext("2")
sub.request(2)
sub.expectNext(duration, "1")
sub.expectNext("2")
// re-send Active to unicomplex trigger, flow continues
sub.request(2)
sub.expectNoMessage(remainingOrDefault)
pubTrigger() ! SystemState
pubIn.sendNext("3")
pubIn.sendNext("4")
sub.expectNext("3", "4")
}
}
| akara/squbs | squbs-unicomplex/src/test/scala/org/squbs/stream/UnicomplexActorPublisherSpec.scala | Scala | apache-2.0 | 2,306 |
package org.ndc.ndc
class SimHash {
def fromFeatures(features: Array[Int]): Int = {
val bitcount = 32
val table = Array.fill(bitcount)(0)
for (value <- features) {
for (bit <- Range(0, bitcount)) {
if (((value >> bit) & 1) == 1) {
table(bit) += 1
}
else {
table(bit) -= 1
}
}
}
var simhash = 0
for (bit <- Range(0, bitcount)) {
if (table(bit) > 0) {
simhash = simhash | (1 << bit)
}
}
return simhash
}
} | NikolajLeischner/near_duplicates | src/main/scala/org/ndc/ndc/SimHash.scala | Scala | mit | 580 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.aws
import java.net.InetAddress
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain
import com.netflix.atlas.config.ConfigManager
import org.scalatest.FunSuite
class DefaultAwsClientFactorySuite extends FunSuite {
import scala.collection.JavaConversions._
val config = ConfigManager.current
// Double check that the endpoints can be resolved, helps catch silly mistakes and typos
val endpoints = config.getConfig("atlas.aws.endpoint").entrySet.toList
endpoints.sortWith(_.getKey < _.getKey).foreach { endpoint =>
val service = endpoint.getKey
// Can be slow (~30s) to run and we don't update the list that often. Can be enabled when
// the list is updated.
ignore(s"resolve: $service") {
val host = endpoint.getValue.unwrapped.asInstanceOf[String]
InetAddress.getByName(host)
}
}
// Try a few key services
val clientClasses = List(
classOf[com.amazonaws.services.cloudwatch.AmazonCloudWatchClient],
classOf[com.amazonaws.services.cloudwatch.AmazonCloudWatchAsyncClient],
classOf[com.amazonaws.services.ec2.AmazonEC2Client],
classOf[com.amazonaws.services.ec2.AmazonEC2AsyncClient],
classOf[com.amazonaws.services.s3.AmazonS3Client]
)
clientClasses.foreach { c =>
val credentials = new DefaultAWSCredentialsProviderChain
val factory = new DefaultAwsClientFactory(credentials, config.getConfig("atlas.aws"))
test(s"newInstance: ${c.getSimpleName}") {
val client = factory.newInstance(c)
client.shutdown()
}
}
}
| jasimmk/atlas | atlas-aws/src/test/scala/com/netflix/atlas/aws/DefaultAwsClientFactorySuite.scala | Scala | apache-2.0 | 2,150 |
object A {
def x: { def q: Int } = error("not important")
} | jamesward/xsbt | sbt/src/sbt-test/source-dependencies/struct-usage/A.scala | Scala | bsd-3-clause | 60 |
package nak.classify
import org.scalatest.FunSuite
import nak.data.{DataMatrix, Example}
import nak.stats.ContingencyStats
import breeze.linalg._
/**
*
* @author dlwh
*/
trait ClassifierTrainerTestHarness extends FunSuite {
def trainer[L,F]: Classifier.Trainer[L,Counter[F,Double]]
test("simple example") {
val trainingData = Array (
Example("cat",Counter.count("fuzzy","claws","small").mapValues(_.toDouble)),
Example("bear",Counter.count("fuzzy","claws","big").mapValues(_.toDouble)),
Example("cat",Counter.count("claws","medium").mapValues(_.toDouble))
)
val testData = Array(
Example("cat", Counter.count("claws","small").mapValues(_.toDouble))
)
val r = trainer[String,String].train(trainingData).classify(testData(0).features)
assert(r == testData(0).label)
}
}
trait ContinuousTestHarness extends ClassifierTrainerTestHarness {
test("prml") {
val classifier = trainer[Int,Int].train(PRMLData.classification)
val contingencyStats = ContingencyStats(classifier, PRMLData.classification)
assert(contingencyStats.microaveraged.precision > 0.65,contingencyStats)
}
}
// Data from Bishop
object PRMLData {
val classification = {
val url = PRMLData.getClass().getClassLoader().getResource("data/classify/prml")
val datamatrix = DataMatrix.fromURL(url,3)
datamatrix.rows.map { ex =>
ex.map{row =>
val r = Counter[Int,Double]()
for( (v,k) <- row.zipWithIndex) {
r(k) = v
}
r
}.relabel(_.toInt)
}
}
}
| seanlgoldberg/nak | src/test/scala/nak/classify/ClassifierTrainerTestHarness.scala | Scala | apache-2.0 | 1,554 |
/**
* (C) Copyright IBM Corp. 2015 - 2017
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.ibm.sparktc.sparkbench.sparklaunch.submission.livy
import com.ibm.sparktc.sparkbench.sparklaunch.confparse.SparkJobConf
import com.ibm.sparktc.sparkbench.sparklaunch.submission.livy.LivySubmit._
import com.ibm.sparktc.sparkbench.sparklaunch.submission.Submitter
import com.ibm.sparktc.sparkbench.utils.SparkBenchException
import com.softwaremill.sttp.{Id, SttpBackend}
import org.slf4j.{Logger, LoggerFactory}
import scala.annotation.tailrec
import scala.sys.ShutdownHookThread
object LivySubmit {
val log: Logger = LoggerFactory.getLogger(this.getClass)
val successCode = 200
import com.softwaremill.sttp._
val emptyBodyException: SparkBenchException = SparkBenchException("REST call returned empty message body")
val nonSuccessCodeException: Int => SparkBenchException = (code: Int) => SparkBenchException(s"REST call returned non-sucess code: $code")
def apply(): LivySubmit = {
new LivySubmit()(HttpURLConnectionBackend())
}
def cancelAllBatches(livyWithID: LivyRequestWithID)(implicit backend: SttpBackend[Id, Nothing]): Response[ResponseBodyDelete] = {
log.info(s"Cancelling batch request id: ${livyWithID.id}")
val response = livyWithID.deleteRequest.send()
(response.is200, response.body) match {
case (true, Right(bod)) => if (bod.msg == "deleted") response else throw SparkBenchException(s"Unexpected status for delete request: ${bod.msg}")
case (true, Left(b)) => throw emptyBodyException
case (_, _) => throw nonSuccessCodeException(response.code)
}
}
def sendPostBatchRequest(conf: SparkJobConf)
(implicit backend: SttpBackend[Id, Nothing]):
(LivyRequestWithID, Response[ResponseBodyBatch]) = {
val livyRequest = LivyRequest(conf)
log.info(s"Sending Livy POST request:\\n${livyRequest.postRequest.toString}")
val response: Id[Response[ResponseBodyBatch]] = livyRequest.postRequest.send()
(response.isSuccess, response.body) match {
case (true, Left(_)) => throw emptyBodyException
case (false, Left(_)) => throw nonSuccessCodeException(response.code)
case (false, Right(bod)) => throw SparkBenchException(s"POST Request to ${livyRequest.postBatchUrl} failed:\\n" +
s"${bod.log.mkString("\\n")}")
case (_,_) => // no exception thrown
}
val livyWithID = LivyRequestWithID(livyRequest, response.body.right.get.id)
(livyWithID, response)
}
private def pollHelper(request: LivyRequestWithID)(implicit backend: SttpBackend[Id, Nothing]): Response[ResponseBodyState] = {
Thread.sleep(request.pollSeconds * 1000)
log.info(s"Sending Livy status GET request:\\n${request.statusRequest.toString}")
val response: Id[Response[ResponseBodyState]] = request.statusRequest.send()
response
}
@tailrec
def poll(request: LivyRequestWithID, response: Response[ResponseBodyState])
(implicit backend: SttpBackend[Id, Nothing]): Response[ResponseBodyState] = (response.isSuccess, response.body) match {
case (false, _) => throw SparkBenchException(s"Request failed with code ${response.code}")
case (_, Left(_)) => throw emptyBodyException
case (true, Right(bod)) => bod.state match {
case "success" => response
case "dead" => throw SparkBenchException(s"Poll request failed with state: dead\\n" + getLogs(request))
case "running" => poll(request, pollHelper(request))
case st => throw SparkBenchException(s"Poll request failed with state: $st")
}
}
def getLogs(request: LivyRequestWithID)(implicit backend: SttpBackend[Id, Nothing]): String = {
val response = request.logRequest.send()
(response.is200, response.body) match {
case (true, Right(bod)) => bod.log.mkString("\\n")
case (false, Right(_)) => throw SparkBenchException(s"Log request failed with code: ${response.code}")
case (_, Left(_)) => throw emptyBodyException
}
}
}
class LivySubmit()(implicit val backend: SttpBackend[Id, Nothing]) extends Submitter {
override def launch(conf: SparkJobConf): Unit = {
val (livyWithID, postResponse) = sendPostBatchRequest(conf)(backend)
val shutdownHook: ShutdownHookThread = sys.ShutdownHookThread {
// interrupt any batches
cancelAllBatches(livyWithID)(backend)
}
val pollResponse = poll(livyWithID, pollHelper(livyWithID))(backend)
// The request has completed, so we're going to remove the shutdown hook.
shutdownHook.remove()
}
}
| SparkTC/spark-bench | spark-launch/src/main/scala/com/ibm/sparktc/sparkbench/sparklaunch/submission/livy/LivySubmit.scala | Scala | apache-2.0 | 5,104 |
/*
* Sentries
* Copyright (c) 2012-2015 Erik van Oosten All rights reserved.
*
* The primary distribution site is https://github.com/erikvanoosten/sentries
*
* This software is released under the terms of the BSD 2-Clause License.
* There is NO WARRANTY. See the file LICENSE for the full text.
*/
package nl.grons.sentries.core
import org.specs2.mutable.Specification
import nl.grons.sentries.support.NotAvailableException
import org.specs2.specification.Scope
import java.util.concurrent.{ExecutionException, Future, Callable, Executors}
import scala.collection.JavaConverters._
/**
* Tests [[nl.grons.sentries.core.ConcurrencyLimitSentry]].
*/
class ConcurrencyLimitSentryTest extends Specification {
"The concurrency limit sentry" should {
"return value" in new SentryContext {
sentry("value") must_== "value"
}
"rethrow exception" in new SentryContext {
sentry(throwAnIllegalArgumentException) must throwA[IllegalArgumentException]
}
"throw NotAvailableException for too many invocations" in new SentryContext {
val executor = Executors.newFixedThreadPool(10)
val options = try {
val task = new Callable[String] {
def call() = sentry(slowCode)
}
val tasks = Vector.fill(10)(task).asJava
// Start all 10 tasks simultaneously:
val futures = executor.invokeAll(tasks).asScala
futuresToOptions(futures)
} finally {
executor.shutdown()
}
options.filter(_ == Some("slow")).size must_== 4
options.filter(_ == None).size must_== 6
}
}
private trait SentryContext extends Scope {
val sentry = new ConcurrencyLimitSentry(classOf[ConcurrencyLimitSentryTest], "testSentry", 4)
def slowCode = {
Thread.sleep(300L)
"slow"
}
def throwAnIllegalArgumentException: String = {
throw new IllegalArgumentException("fail")
}
// Convert normal Future results to a Some, and NotAvailableExceptions
// (wrapped in ExecutionException) to a None.
def futuresToOptions[A](futures: Seq[Future[A]]): Seq[Option[A]] = futures.map { future =>
try Some(future.get())
catch {
case e: ExecutionException if e.getCause.isInstanceOf[NotAvailableException] => None
}
}
}
}
| erikvanoosten/sentries | src/test/scala/nl/grons/sentries/core/ConcurrencyLimitSentryTest.scala | Scala | bsd-2-clause | 2,290 |
package concrete.constraint
import concrete.{Domain, Event, Outcome, ProblemState}
trait Residues extends Constraint with EnumerateVariables {
val residues: ResidueManager = {
if (scope.map(v => v.initDomain.last - v.initDomain.head).sum < 30000) {
new ResidueManagerFast(scope)
} else {
new ResidueManagerMap(scope)
}
}
def init(ps: ProblemState): Outcome = ps
def reviseDomain(doms: Array[Domain], position: Int): Domain = {
doms(position).filter { value =>
val residue = residues.getResidue(position, value)
((residue ne null) && ctp(doms, residue)) || {
findSupport(doms, position, value) match {
case Some(tuple) =>
assert(check(tuple))
residues.updateResidue(tuple)
true
case None =>
false
}
}
}
}
def findSupport(doms: Array[Domain], position: Int, value: Int): Option[Array[Int]]
override def advise(ps: ProblemState, event: Event, pos: Int): Int = advise(ps, pos)
def advise(ps: ProblemState, pos: Int): Int
}
| concrete-cp/concrete | src/main/scala/concrete/constraint/Residues.scala | Scala | lgpl-2.1 | 1,086 |
/**
* Copyright (C) 2007 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.processor.pipeline
import org.orbeon.oxf.xforms.function.xxforms._
import PipelineProcessor.PIPELINE_NAMESPACE_URI
import org.orbeon.oxf.util.NetUtils
import org.orbeon.saxon.sxpath.XPathEvaluator
import org.orbeon.oxf.common.Version
import org.w3c.dom.Node
import org.orbeon.oxf.xml.OrbeonFunctionLibrary
import org.orbeon.oxf.xforms.library._
import org.orbeon.oxf.xforms.XFormsUtils
import org.orbeon.oxf.xforms.state.DynamicState
import org.orbeon.saxon.om.{NamespaceConstant, NodeInfo}
// For backward compatibility
object PipelineFunctionLibrary extends PipelineFunctionLibrary
/**
* Function library for XPath expressions in XPL.
*
* TODO:
*
* - add Java-accessible functions below to XXFormsIndependentFunctions
* - then remove them from below
* - then update XSLT stylesheets to use the p:* functions instead of direct Java calls
*/
class PipelineFunctionLibrary extends {
// Namespace the functions (we wish we had trait constructors!)
val XFormsIndependentFunctionsNS = Seq(PIPELINE_NAMESPACE_URI)
val XXFormsIndependentFunctionsNS = Seq(PIPELINE_NAMESPACE_URI)
val XSLTFunctionsNS = Seq(NamespaceConstant.FN, PIPELINE_NAMESPACE_URI)
}
with OrbeonFunctionLibrary
with XFormsIndependentFunctions
with XXFormsIndependentFunctions
with XSLTFunctions {
// === Functions made accessible to XSLT via Java calls
// Add these to XXFormsIndependentFunctions?
def decodeXML(encodedXML: String) = XFormsUtils.decodeXML(encodedXML)
def encodeXML(node: Node) = XFormsUtils.encodeXMLAsDOM(node)
def decodeDynamicStateString(dynamicState: String) = DynamicState.apply(dynamicState).toXML // for unit tests only
def newEvaluator(context: NodeInfo) = new XPathEvaluator(context.getConfiguration)
def isPE = Version.isPE
def isPortlet = "portlet" == NetUtils.getExternalContext.getRequest.getContainerType
def setTitle(title: String): String = {
NetUtils.getExternalContext.getResponse.setTitle(title)
null
}
// These are already available in XXFormsIndependentFunctions
def property(name: String) = XXFormsProperty.property(name)
def propertiesStartsWith(name: String) = XXFormsPropertiesStartsWith.propertiesStartsWith(name)
def rewriteServiceURI(uri: String, absolute: Boolean) = XXFormsRewriteServiceURI.rewriteServiceURI(uri, absolute)
def rewriteResourceURI(uri: String, absolute: Boolean) = XXFormsRewriteResourceURI.rewriteResourceURI(uri, absolute)
} | martinluther/orbeon-forms | src/main/scala/org/orbeon/oxf/processor/pipeline/PipelineFunctionLibrary.scala | Scala | lgpl-2.1 | 3,167 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.config
import java.net.URI
import java.io.File
import java.util.Properties
import kafka.consumer.ConsumerConfig
import org.apache.samza.config.factories.PropertiesConfigFactory
import org.junit.Assert._
import org.junit.Test
import scala.collection.JavaConversions._
import org.apache.kafka.common.serialization.ByteArraySerializer
import org.apache.kafka.clients.producer.ProducerConfig
import org.junit.Before
import org.junit.BeforeClass
class TestKafkaConfig {
var props : Properties = new Properties
val SYSTEM_NAME = "kafka";
val KAFKA_PRODUCER_PROPERTY_PREFIX = "systems." + SYSTEM_NAME + ".producer."
@Before
def setupProperties() {
props = new Properties
props.setProperty(KAFKA_PRODUCER_PROPERTY_PREFIX + "bootstrap.servers", "localhost:9092")
props.setProperty("systems." + SYSTEM_NAME + ".consumer.zookeeper.connect", "localhost:2181/")
}
@Test
def testIdGeneration = {
val factory = new PropertiesConfigFactory()
props.setProperty("systems." + SYSTEM_NAME + ".samza.factory", "org.apache.samza.system.kafka.KafkaSystemFactory")
val mapConfig = new MapConfig(props.toMap[String, String])
val kafkaConfig = new KafkaConfig(mapConfig)
val consumerConfig1 = kafkaConfig.getKafkaSystemConsumerConfig(SYSTEM_NAME)
val consumerClientId1 = consumerConfig1.clientId
val groupId1 = consumerConfig1.groupId
val consumerConfig2 = kafkaConfig.getKafkaSystemConsumerConfig(SYSTEM_NAME)
val consumerClientId2 = consumerConfig2.clientId
val groupId2 = consumerConfig2.groupId
assert(consumerClientId1.startsWith("undefined-samza-consumer-"))
assert(consumerClientId2.startsWith("undefined-samza-consumer-"))
assert(groupId1.startsWith("undefined-samza-consumer-group-"))
assert(groupId2.startsWith("undefined-samza-consumer-group-"))
assert(consumerClientId1 != consumerClientId2)
assert(groupId1 != groupId2)
val consumerConfig3 = kafkaConfig.getKafkaSystemConsumerConfig(SYSTEM_NAME, "TestClientId", "TestGroupId")
val consumerClientId3 = consumerConfig3.clientId
val groupId3 = consumerConfig3.groupId
assert(consumerClientId3 == "TestClientId")
assert(groupId3 == "TestGroupId")
val producerConfig1 = kafkaConfig.getKafkaSystemProducerConfig(SYSTEM_NAME)
val producerClientId1 = producerConfig1.clientId
val producerConfig2 = kafkaConfig.getKafkaSystemProducerConfig(SYSTEM_NAME)
val producerClientId2 = producerConfig2.clientId
assert(producerClientId1.startsWith("undefined-samza-producer-"))
assert(producerClientId2.startsWith("undefined-samza-producer-"))
assert(producerClientId1 != producerClientId2)
val producerConfig3 = kafkaConfig.getKafkaSystemProducerConfig(SYSTEM_NAME, "TestClientId")
val producerClientId3 = producerConfig3.clientId
assert(producerClientId3 == "TestClientId")
}
@Test
def testStreamLevelFetchSizeOverride() {
val mapConfig = new MapConfig(props.toMap[String, String])
val kafkaConfig = new KafkaConfig(mapConfig)
val consumerConfig = kafkaConfig.getKafkaSystemConsumerConfig(SYSTEM_NAME)
// default fetch size
assertEquals(1024*1024, consumerConfig.fetchMessageMaxBytes)
props.setProperty("systems." + SYSTEM_NAME + ".consumer.fetch.message.max.bytes", "262144")
val mapConfig1 = new MapConfig(props.toMap[String, String])
val kafkaConfig1 = new KafkaConfig(mapConfig1)
val consumerConfig1 = kafkaConfig1.getKafkaSystemConsumerConfig(SYSTEM_NAME)
// shared fetch size
assertEquals(512*512, consumerConfig1.fetchMessageMaxBytes)
props.setProperty("systems." + SYSTEM_NAME + ".streams.topic1.consumer.fetch.message.max.bytes", "65536")
val mapConfig2 = new MapConfig(props.toMap[String, String])
val kafkaConfig2 = new KafkaConfig(mapConfig2)
val consumerConfig2 = kafkaConfig2.getFetchMessageMaxBytesTopics(SYSTEM_NAME)
// topic fetch size
assertEquals(256*256, consumerConfig2 getOrElse ("topic1", 1024*1024))
}
@Test
def testChangeLogProperties() {
props.setProperty("systems." + SYSTEM_NAME + ".samza.factory", "org.apache.samza.system.kafka.KafkaSystemFactory")
props.setProperty("stores.test1.changelog", "kafka.mychangelog1")
props.setProperty("stores.test2.changelog", "kafka.mychangelog2")
props.setProperty("stores.test1.changelog.kafka.cleanup.policy", "delete")
val mapConfig = new MapConfig(props.toMap[String, String])
val kafkaConfig = new KafkaConfig(mapConfig)
assertEquals(kafkaConfig.getChangelogKafkaProperties("test1").getProperty("cleanup.policy"), "delete")
assertEquals(kafkaConfig.getChangelogKafkaProperties("test2").getProperty("cleanup.policy"), "compact")
val storeToChangelog = kafkaConfig.getKafkaChangelogEnabledStores()
assertEquals(storeToChangelog.get("test1").getOrElse(""), "mychangelog1")
assertEquals(storeToChangelog.get("test2").getOrElse(""), "mychangelog2")
}
@Test
def testDefaultValuesForProducerProperties() {
val mapConfig = new MapConfig(props.toMap[String, String])
val kafkaConfig = new KafkaConfig(mapConfig)
val kafkaProducerConfig = kafkaConfig.getKafkaSystemProducerConfig(SYSTEM_NAME)
val producerProperties = kafkaProducerConfig.getProducerProperties
assertEquals(classOf[ByteArraySerializer].getCanonicalName, producerProperties.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG))
assertEquals(classOf[ByteArraySerializer].getCanonicalName, producerProperties.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG))
assertEquals(kafkaProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION_DEFAULT, producerProperties.get(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION))
assertEquals(kafkaProducerConfig.RETRIES_DEFAULT, producerProperties.get(ProducerConfig.RETRIES_CONFIG))
}
@Test
def testMaxInFlightRequestsPerConnectionOverride() {
val expectedValue = "200";
props.setProperty(KAFKA_PRODUCER_PROPERTY_PREFIX + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, expectedValue);
val mapConfig = new MapConfig(props.toMap[String, String])
val kafkaConfig = new KafkaConfig(mapConfig)
val kafkaProducerConfig = kafkaConfig.getKafkaSystemProducerConfig(SYSTEM_NAME)
val producerProperties = kafkaProducerConfig.getProducerProperties
assertEquals(expectedValue, producerProperties.get(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION))
}
@Test
def testRetriesOverride() {
val expectedValue = "200";
props.setProperty(KAFKA_PRODUCER_PROPERTY_PREFIX + ProducerConfig.RETRIES_CONFIG, expectedValue);
val mapConfig = new MapConfig(props.toMap[String, String])
val kafkaConfig = new KafkaConfig(mapConfig)
val kafkaProducerConfig = kafkaConfig.getKafkaSystemProducerConfig(SYSTEM_NAME)
val producerProperties = kafkaProducerConfig.getProducerProperties
assertEquals(expectedValue, producerProperties.get(ProducerConfig.RETRIES_CONFIG))
}
@Test(expected = classOf[NumberFormatException])
def testMaxInFlightRequestsPerConnectionWrongNumberFormat() {
props.setProperty(KAFKA_PRODUCER_PROPERTY_PREFIX + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "Samza");
val mapConfig = new MapConfig(props.toMap[String, String])
val kafkaConfig = new KafkaConfig(mapConfig)
val kafkaProducerConfig = kafkaConfig.getKafkaSystemProducerConfig(SYSTEM_NAME)
kafkaProducerConfig.getProducerProperties
}
@Test(expected = classOf[NumberFormatException])
def testRetriesWrongNumberFormat() {
props.setProperty(KAFKA_PRODUCER_PROPERTY_PREFIX + ProducerConfig.RETRIES_CONFIG, "Samza");
val mapConfig = new MapConfig(props.toMap[String, String])
val kafkaConfig = new KafkaConfig(mapConfig)
val kafkaProducerConfig = kafkaConfig.getKafkaSystemProducerConfig(SYSTEM_NAME)
kafkaProducerConfig.getProducerProperties
}
}
| zcan/samza | samza-kafka/src/test/scala/org/apache/samza/config/TestKafkaConfig.scala | Scala | apache-2.0 | 8,744 |
package com.github.bzumhagen.sct
import java.time.LocalDate
import com.github.zafarkhaja.semver.Version
/** A changelog change
*
* @param description change description
* @param version change version
* @param changeType change type (i.e. Added)
* @param reference change reference (i.e. XYZ-123)
* @param date change date
*/
object ChangelogChange {
def apply(description: String, version: Version, changeType: String, date: LocalDate, reference: Option[String] = None) =
new ChangelogChange(description, version, changeType, reference.map(ChangelogReference), date)
}
case class ChangelogChange(description: String, version: Version, changeType: String, reference: Option[ChangelogReference], date: LocalDate)
case class ChangelogReference(value: String) | bzumhagen/sct | src/main/scala/com/github/bzumhagen/sct/ChangelogChange.scala | Scala | mit | 780 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features
import com.esotericsoftware.kryo.io.{Input, Output}
import org.locationtech.geomesa.utils.collection.IntBitSet
import org.locationtech.geomesa.utils.conf.GeoMesaSystemProperties.SystemProperty
import scala.concurrent.duration.Duration
package object kryo {
val SerializerCacheExpiry: Duration = SystemProperty("geomesa.serializer.cache.expiry", "1 hour").toDuration.get
/**
* Metadata for serialized simple features
*
* @param input kryo input
* @param count number of attributes serialized in this feature (may be less than the current sft)
* @param size size of each offset - either 2 or 4 bytes
* @param offset attribute positions are stored relative to this offset into the serialized bytes
* @param nulls null bit set
*/
case class Metadata(input: Input, count: Int, size: Int, offset: Int, nulls: IntBitSet) {
/**
* Position the input to read an attribute
*
* @param i attribute to read
* @return the relative position being set
*/
def setPosition(i: Int): Int = {
input.setPosition(offset + i * size)
val pos = if (size == 2) { input.readShortUnsigned() } else { input.readInt() }
input.setPosition(offset + pos)
pos
}
/**
* Position the input to read the feature ID
*
* @return the relative position being set
*/
def setIdPosition(): Int = {
val pos = size * (count + 1) + (IntBitSet.size(count) * 4)
input.setPosition(offset + pos)
pos
}
/**
* Position the input to read the user data
*
* @return the relative position being set
*/
def setUserDataPosition(): Int = setPosition(count)
}
object Metadata {
/**
* Read the metadata from a kryo input. The input should be positioned at the start of the serialized
* simple feature, just after reading the 'version' byte
*
* @param input input
* @return
*/
def apply(input: Input): Metadata = {
val count = input.readShortUnsigned()
val size = input.readByte()
val offset = input.position()
// read our null mask
input.setPosition(offset + size * (count + 1))
val nulls = IntBitSet.deserialize(input, count)
Metadata(input, count, size, offset, nulls)
}
/**
* Write metadata to the output. After this call, the output will be positioned to write the feature ID
*
* @param output output
* @param count number of serialized attributes
* @param size size of each offset (2 or 4 bytes)
* @return the relative offset used to track attribute offsets
*/
def write(output: Output, count: Int, size: Int): Int = {
output.writeShort(count) // track the number of attributes
output.write(size) // size of each offset
val offset = output.position()
output.setPosition(offset + (size * (count + 1)) + (IntBitSet.size(count) * 4))
offset
}
}
}
| locationtech/geomesa | geomesa-features/geomesa-feature-kryo/src/main/scala/org/locationtech/geomesa/features/kryo/package.scala | Scala | apache-2.0 | 3,471 |
package example
import akka.actor.{PoisonPill, Props, Actor}
import akka.event.Logging
class Routee extends Actor {
val log = Logging(context.system, this)
def receive = {
case i : Int => {
println("Recieved something wallah............ClusterTest2")
log.error(s"${self.path} routing a calculation of $i")
context.actorOf(Props[FactorialCalculator]) ! i
}
case _ => case _ => log.error(s"${self.path.address} received unknown message")
}
}
| taamneh/akka-cassandra-cluster-test-V2 | src/main/scala/example/Routee.scala | Scala | apache-2.0 | 482 |
package homepage.model
import net.liftweb.mapper._
object ImpressingItem extends ImpressingItem with KeyedMetaMapper[Long, ImpressingItem] {
override def dbTableName = "impressing_item"
override def fieldOrder = id :: title :: content :: youtubeIds :: urls :: tag :: Nil
}
class ImpressingItem extends KeyedMapper[Long, ImpressingItem] {
def getSingleton = ImpressingItem
def primaryKeyField = id
object id extends MappedLongIndex(this)
object title extends MappedString(this, 1024)
object content extends MappedText(this)
object youtubeIds extends MappedString(this, 64)
object urls extends MappedString(this, 1024)
object tag extends MappedString(this, 64)
} | bbiletskyy/homepage | src/main/scala/homepage/model/ImpressingItem.scala | Scala | apache-2.0 | 685 |
object Test {
trait Fili[A]
trait Kili[M] {
def fili: Fili[M]
}
trait A extends Kili[A] {
def fili: Fili[A]
}
trait Ori[M] extends Kili[M] {
val ori: Fili[M]
def fili: ori.type
}
trait B extends Ori[B]
def foo(a: A, b: B) = if (true) a else b
}
| scala/scala | test/files/pos/t7612.scala | Scala | apache-2.0 | 282 |
package ca.hyperreal.sscheme
object MiscPrimitives extends Primitives
{
val list = Seq(
new Primitive( "display" )( {case SList(obj) => println(obj)} ),
new Primitive( "begin" )( {case exps: SList => exps.last} )
)
} | edadma/sscheme | src/main/scala/MiscPrimitives.scala | Scala | mit | 225 |
/*
* Copyright (C) 2014 Szu-Hsien Lee ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.a30corner.twculture
import android.os.Bundle
import scala.language.postfixOps
import android.view.{View, LayoutInflater, ViewGroup}
import scala.concurrent.Future
import android.widget.GridView
import scala.concurrent.ExecutionContext.Implicits.global
class CategoryFragment extends AbstractGridFragment with CommonFragment {
override def onActivityCreated(savedInstanceState: Bundle): Unit = {
super.onActivityCreated(savedInstanceState)
setTitle(getString(R.string.event_category))
}
def populateData = OpenData.getCategories(getActivity)
override def onItemSelected(category: Category): Unit = changePage(category)(InfoListFragment(_))
}
class PlaceTypeFragment extends AbstractGridFragment with CommonFragment {
override def onActivityCreated(savedInstanceState: Bundle): Unit = {
super.onActivityCreated(savedInstanceState)
setTitle(getString(R.string.place_category))
}
def populateData = Future {
OpenData.getPlacesType
}
override def onCreateView(a: LayoutInflater, b: ViewGroup, c: Bundle): View = {
val v = super.onCreateView(a, b, c)
v.findViewById(R.id.gridview).asInstanceOf[GridView].setNumColumns(2)
v
}
override def onItemSelected(category: Category): Unit = changePage(category)(PlaceListFragment(_))
}
| misgod/twculture | src/main/scala/com/a30corner/twculture/CategoryFragment.scala | Scala | apache-2.0 | 1,909 |
package speedcam
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
class LicenseFilter(next : ActorRef) extends Actor with ActorLogging {
override def receive : Receive = {
case msg : PhotoMessage =>
if (msg.license.isDefined) {
next ! msg
}
}
}
object LicenseFilter {
def props(next : ActorRef) = Props(new LicenseFilter(next))
}
| jvorhauer/akka-workshop | exercises/speedcam/src/main/scala/speedcam/LicenseFilter.scala | Scala | apache-2.0 | 371 |
package com.airbnb.scheduler.jobs.graph
import com.airbnb.scheduler.graph.JobGraph
import com.airbnb.scheduler.jobs.{DependencyBasedJob, BaseJob}
import org.jgrapht.experimental.dag.DirectedAcyclicGraph.CycleFoundException
import org.specs2.mock._
import org.specs2.mutable._
class JobGraphSpec extends SpecificationWithJUnit with Mockito {
"JobGraph" should {
"Adding a parent and child to the graph works as expected" in {
val a = new DependencyBasedJob(Set(), "A", "noop")
val b = new DependencyBasedJob(Set(), "B", "noop")
val g = new JobGraph()
g.addVertex(a)
g.addVertex(b)
g.addDependency(a.name, b.name)
g.getChildren(a.name) must contain(b.name)
}
"Adding a circular dependency should not be allowed" in {
val a = new DependencyBasedJob(Set(), "A", "noop")
val b = new DependencyBasedJob(Set(), "B", "noop")
val c = new DependencyBasedJob(Set(), "C", "noop")
val g = new JobGraph()
g.addVertex(a)
g.addVertex(b)
g.addVertex(c)
g.addDependency(a.name, b.name)
g.addDependency(b.name, c.name)
g.addDependency(c.name, a.name) must throwA[CycleFoundException]
}
"Adding nodes with the same name should not be allowed" in {
val a = new DependencyBasedJob(Set(), "A", "noop")
val b = new DependencyBasedJob(Set(), "A", "noop")
val g = new JobGraph()
g.addVertex(a)
g.addVertex(b) must throwA[Exception]
}
"Adding the same edge twice is idempotent" in {
val a = new DependencyBasedJob(Set(), "A", "noop")
val b = new DependencyBasedJob(Set(), "A", "noop")
val g = new JobGraph()
g.addVertex(a)
g.addVertex(b) must throwA[Exception]
}
"Adding dependencies should create proper edges" in {
val a = new DependencyBasedJob(Set(), "A", "noop")
val b = new DependencyBasedJob(Set(), "B", "noop")
val c = new DependencyBasedJob(Set(), "C", "noop")
val d = new DependencyBasedJob(Set(), "D", "noop")
val g = new JobGraph()
g.addVertex(a)
g.addVertex(b)
g.addVertex(c)
g.addVertex(d)
g.addDependency(a.name, b.name)
g.addDependency(b.name, d.name)
g.addDependency(c.name, d.name)
g.getEdgesToParents(d.name).toSet.size must_== 2
}
"A complex graph should be traversable in correct order" in {
/**
* A -> B -> D
* \
* \-> E
* /
* C --
*/
val a = new DependencyBasedJob(Set(), "A", "noop")
val b = new DependencyBasedJob(Set(), "B", "noop")
val c = new DependencyBasedJob(Set(), "C", "noop")
val d = new DependencyBasedJob(Set(), "D", "noop")
val e = new DependencyBasedJob(Set(), "E", "noop")
val graph = new JobGraph()
graph.addVertex(a)
graph.addVertex(b)
graph.addVertex(c)
graph.addVertex(d)
graph.addVertex(e)
graph.addDependency(a.name, b.name)
graph.addDependency(a.name, c.name)
graph.addDependency(b.name, d.name)
graph.addDependency(b.name, e.name)
graph.addDependency(c.name, e.name)
val aCompleted = graph.getExecutableChildren(a.name)
aCompleted.toSet must_== Set(b.name, c.name)
val bCompleted = graph.getExecutableChildren(b.name)
bCompleted.toSet must_== Set(d.name)
val cCompleted = graph.getExecutableChildren(c.name)
cCompleted.toSet must_== Set(e.name)
val aCompleted2 = graph.getExecutableChildren(a.name)
aCompleted2.toSet must_== Set(b.name, c.name)
val cCompleted2 = graph.getExecutableChildren(c.name)
cCompleted2.toSet must_== Set()
val bCompleted2 = graph.getExecutableChildren(b.name)
bCompleted2.toSet must_== Set(d.name, e.name)
}
"Replacing a vertex works" in {
val a = new DependencyBasedJob(Set(), "A", "noopA")
val b = new DependencyBasedJob(Set(), "B", "noopB")
val c = new DependencyBasedJob(Set(), "C", "noopC")
val d = new DependencyBasedJob(Set(), "C", "noopD")
val graph = new JobGraph()
graph.addVertex(a)
graph.addVertex(b)
graph.addVertex(c)
graph.addDependency(a.name, b.name)
graph.addDependency(a.name, c.name)
graph.addDependency(b.name, c.name)
graph.getChildren(a.name).map(x => graph.lookupVertex(x).get.command).toSet must_== Set("noopB", "noopC")
graph.replaceVertex(c, d)
graph.getChildren(a.name).toSet must_== Set(b.name, d.name)
graph.getChildren(a.name).map(x => graph.lookupVertex(x).get.command).toSet must_== Set("noopB", "noopD")
graph.getChildren(b.name).toSet must_== Set(d.name)
}
}
}
| meelapshah/chronos | src/test/scala/com/airbnb/scheduler/jobs/graph/JobGraphSpec.scala | Scala | apache-2.0 | 4,688 |
package com.pygmalios.rawKafkaCassandra
import akka.actor.ActorSystem
import com.pygmalios.rawKafkaCassandra.actors.KafkaToCassandra
import com.pygmalios.rawKafkaCassandra.cassandra.CassandraSessionFactoryImpl
/**
* Application entry point.
*
* To load an external config file named `raw-kafka-cassandra.config`,
* provide `-Dconfig.file=raw-kafka-cassandra.config` argument to JVM.
*/
object RawKafkaCassandraApp extends App {
// Bootstrap
val actorSystem = ActorSystem("raw-kafka-cassandra")
val config = new SimpleRawKafkaCassandraConfig(actorSystem)
val cassandraSessionFactory = new CassandraSessionFactoryImpl(config)
// Create root KafkaToCassandra actor
KafkaToCassandra.factory(actorSystem, cassandraSessionFactory)
}
| pygmalios/raw-kafka-cassandra | src/main/scala/com/pygmalios/rawKafkaCassandra/RawKafkaCassandraApp.scala | Scala | apache-2.0 | 752 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import org.apache.spark.SparkConf
import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef, RpcEnv, RpcTimeout}
private[spark] object RpcUtils {
/**
* Retrieve a [[RpcEndpointRef]] which is located in the driver via its name.
*/
def makeDriverRef(name: String, conf: SparkConf, rpcEnv: RpcEnv): RpcEndpointRef = {
val driverHost: String = conf.get("spark.driver.host", "localhost")
val driverPort: Int = conf.getInt("spark.driver.port", 7077)
Utils.checkHost(driverHost, "Expected hostname")
rpcEnv.setupEndpointRef(RpcAddress(driverHost, driverPort), name)
}
/** Returns the configured number of times to retry connecting */
def numRetries(conf: SparkConf): Int = {
conf.getInt("spark.rpc.numRetries", 3)
}
/** Returns the configured number of milliseconds to wait on each retry */
def retryWaitMs(conf: SparkConf): Long = {
conf.getTimeAsMs("spark.rpc.retry.wait", "3s")
}
/** Returns the default Spark timeout to use for RPC ask operations. */
def askRpcTimeout(conf: SparkConf): RpcTimeout = {
RpcTimeout(conf, Seq("spark.rpc.askTimeout", "spark.network.timeout"), "120s")
}
/** Returns the default Spark timeout to use for RPC remote endpoint lookup. */
def lookupRpcTimeout(conf: SparkConf): RpcTimeout = {
RpcTimeout(conf, Seq("spark.rpc.lookupTimeout", "spark.network.timeout"), "120s")
}
private val MAX_MESSAGE_SIZE_IN_MB = Int.MaxValue / 1024 / 1024
/** Returns the configured max message size for messages in bytes. */
def maxMessageSizeBytes(conf: SparkConf): Int = {
val maxSizeInMB = conf.getInt("spark.rpc.message.maxSize", 128)
if (maxSizeInMB > MAX_MESSAGE_SIZE_IN_MB) {
throw new IllegalArgumentException(
s"spark.rpc.message.maxSize should not be greater than $MAX_MESSAGE_SIZE_IN_MB MB")
}
maxSizeInMB * 1024 * 1024
}
}
| likithkailas/StreamingSystems | core/src/main/scala/org/apache/spark/util/RpcUtils.scala | Scala | apache-2.0 | 2,687 |
package com.twitter.finagle.httpx
import com.twitter.util.{Await, Promise, Future}
import com.twitter.finagle.client.Transporter
import com.twitter.finagle.{Service, ServiceFactory, Stack}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TlsFilterTest extends FunSuite {
import Version._
import Method._
def svc(p: Promise[Request]) = Service.mk { (req: Request) =>
p.setValue(req)
Future.never
}
test("filter") {
val host = "test.host"
val tls = new TlsFilter(host)
val req = Request(Http11, Get, "/")
val p = new Promise[Request]
(tls andThen svc(p))(req)
assert(Await.result(p).headerMap.get("Host") === Some(host))
}
test("module") {
val host = "test.host"
val p = new Promise[Request]
val stk = TlsFilter.module.toStack(
Stack.Leaf(TlsFilter.role, ServiceFactory.const(svc(p))))
val fac = stk.make(Stack.Params.empty + Transporter.TLSHostname(Some(host)))
Await.result(fac())(Request(Http11, Get, "/"))
assert(Await.result(p).headerMap.get("Host") === Some(host))
}
}
| lysu/finagle | finagle-httpx/src/test/scala/com/twitter/finagle/httpx/TlsFilterTest.scala | Scala | apache-2.0 | 1,145 |
package com.lucidchart.open.nark.controllers
import com.lucidchart.open.nark.models.UserModel
import com.lucidchart.open.nark.models.records.{Pagination,User}
import com.lucidchart.open.nark.request.{AppAction, AppFlash, AuthAction}
import com.lucidchart.open.nark.views
import java.util.UUID
import play.api.data._
import play.api.data.Forms._
import play.api.data.format.Formats._
object UsersController extends UsersController
class UsersController extends AppController {
private case class EditFormSubmission(
errorAddress: String,
errorEnable: Boolean,
warnAddress: String,
warnEnable: Boolean
)
private val editAddressesForm = Form(
mapping(
"error_address" -> email,
"error_enable" -> boolean,
"warn_address" -> email,
"warn_enable" -> boolean
)(EditFormSubmission.apply)(EditFormSubmission.unapply)
)
/**
* Get the page to change a user's alert notification addresses
*/
def addresses = AuthAction.authenticatedUser { implicit user =>
AppAction { implicit request =>
val form = editAddressesForm.fill(EditFormSubmission(user.errorAddress, user.errorEnable, user.warnAddress, user.warnEnable))
Ok(views.html.users.addresses(form))
}
}
def manageAdmin(page: Int) = AuthAction.authenticatedUser { implicit user =>
AppAction { implicit request =>
val ids = UserModel.getAdminUserId()
val realPage = page.max(1)
if ( ids.length > 0 ) {
if( ids.contains(user.id) ){
val (found,matches) = UserModel.getAllUsers( realPage-1, user.id )
Ok( views.html.users.admin(Pagination[User](realPage, found, UserModel.configuredLimit , matches),false) )
} else {
Redirect(routes.HomeController.index).flashing(AppFlash.error("Please Contact the administrator to gain Admin Privileges."))
}
}
else {
Ok(views.html.users.admin(Pagination[User](realPage,0,UserModel.configuredLimit,Nil),true))
}
}
}
def manageAdminSubmit = AuthAction.authenticatedUser { implicit user =>
AppAction { implicit request =>
println( UserModel.getAdminUserId().length )
if ( UserModel.isAdmin(user.id) || (UserModel.getAdminUserId().length == 0) ){
val formData = request.body.asFormUrlEncoded
val userIds = formData.get("userIds")
val action = formData.get("action").head
val admin = action match {
case "Revoke" => false
case "Grant" | "Yes" => true
}
userIds.map { id =>
UserModel.manageAdmin( UUID.fromString(id), admin )
}
Redirect(routes.UsersController.manageAdmin()).flashing(AppFlash.success("Administrative Privileges Changed Successfully"))
} else {
Redirect(routes.HomeController.index).flashing(AppFlash.error("Please Contact the administrator to gain Admin Privileges"))
}
}
}
/**
* Handle the form submitted by the user and edit the addresses
*/
def addressesSubmit = AuthAction.authenticatedUser { implicit user =>
AppAction { implicit request =>
editAddressesForm.bindFromRequest().fold(
formWithErrors => {
Ok(views.html.users.addresses(formWithErrors))
},
data => {
UserModel.editUser(user.copy(
errorAddress = data.errorAddress,
errorEnable = data.errorEnable,
warnAddress = data.warnAddress,
warnEnable = data.warnEnable
))
Redirect(routes.UsersController.addresses()).flashing(AppFlash.success("Alert addresses saved."))
}
)
}
}
} | lucidsoftware/nark | app/com/lucidchart/open/nark/controllers/UsersController.scala | Scala | apache-2.0 | 3,390 |
Subsets and Splits