code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/** * Copyright 2015 Mohiva Organisation (license at mohiva dot com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.mohiva.play.silhouette.api.actions import javax.inject.Inject import akka.actor.{ Actor, ActorSystem, Props } import akka.testkit.TestProbe import com.mohiva.play.silhouette.api._ import com.mohiva.play.silhouette.api.actions.SecuredActionSpec._ import com.mohiva.play.silhouette.api.exceptions.{ NotAuthenticatedException, NotAuthorizedException } import com.mohiva.play.silhouette.api.services.{ AuthenticatorResult, AuthenticatorService, IdentityService } import net.codingwell.scalaguice.ScalaModule import org.specs2.control.NoLanguageFeatures import org.specs2.matcher.JsonMatchers import org.specs2.mock.Mockito import org.specs2.specification.Scope import play.api.inject.bind import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.json.Json import play.api.mvc.Results._ import play.api.mvc._ import play.api.test.{ FakeRequest, PlaySpecification, WithApplication } import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.concurrent.duration._ import scala.language.postfixOps import scala.reflect.ClassTag /** * Test case for the [[com.mohiva.play.silhouette.api.actions.SecuredActionSpec]]. */ class SecuredActionSpec extends PlaySpecification with Mockito with JsonMatchers with NoLanguageFeatures { "The `SecuredAction` action" should { "restrict access if no valid authenticator can be retrieved" in new InjectorContext { new WithApplication(app) with Context { withEvent[NotAuthenticatedEvent] { env.authenticatorService.retrieve(any()) returns Future.successful(None) val result = controller.defaultAction(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("global.not.authenticated") theProbe.expectMsg(500 millis, NotAuthenticatedEvent(request)) } } } "restrict access and discard authenticator if an invalid authenticator can be retrieved" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator.copy(isValid = false))) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } withEvent[NotAuthenticatedEvent] { val result = controller.defaultAction(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("global.not.authenticated") there was one(env.authenticatorService).discard(any(), any())(any()) theProbe.expectMsg(500 millis, NotAuthenticatedEvent(request)) } } } "restrict access and discard authenticator if no identity could be found for an authenticator" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(None) withEvent[NotAuthenticatedEvent] { val result = controller.defaultAction(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("global.not.authenticated") there was one(env.authenticatorService).discard(any(), any())(any()) theProbe.expectMsg(500 millis, NotAuthenticatedEvent(request)) } } } "display local not-authenticated result if user isn't authenticated[authorization and error handler]" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(None) val result = controller.actionWithAuthorizationAndErrorHandler(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("local.not.authenticated") } } "display local not-authenticated result if user isn't authenticated[error handler only]" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(None) val result = controller.actionWithErrorHandler(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("local.not.authenticated") } } "display global not-authenticated result if user isn't authenticated" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(None) val result = controller.defaultAction(request) status(result) must equalTo(UNAUTHORIZED) contentAsString(result) must contain("global.not.authenticated") } } "restrict access and update authenticator if a user is authenticated but not authorized" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.update(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) authorization.isAuthorized(any(), any())(any()) returns Future.successful(false) withEvent[NotAuthorizedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(FORBIDDEN) contentAsString(result) must contain("global.not.authorized") there was one(env.authenticatorService).update(any(), any())(any()) theProbe.expectMsg(500 millis, NotAuthorizedEvent(identity, request)) } } } "display local not-authorized result if user isn't authorized" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.update(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) authorization.isAuthorized(any(), any())(any()) returns Future.successful(false) val result = controller.actionWithAuthorizationAndErrorHandler(request) status(result) must equalTo(FORBIDDEN) contentAsString(result) must contain("local.not.authorized") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).update(any(), any())(any()) } } "display global not-authorized result if user isn't authorized" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.update(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) authorization.isAuthorized(any(), any())(any()) returns Future.successful(false) val result = controller.actionWithAuthorization(request) status(result) must equalTo(FORBIDDEN) contentAsString(result) must contain("global.not.authorized") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).update(any(), any())(any()) } } "invoke action without authorization if user is authenticated" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.update(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.defaultAction(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).update(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "invoke action with authorization if user is authenticated but not authorized" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.update(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).update(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "use next request provider in the chain if first isn't responsible" in new InjectorContext with WithRequestProvider { new WithApplication(app) with Context { tokenRequestProvider.authenticate(any()) returns Future.successful(None) basicAuthRequestProvider.authenticate(any()) returns Future.successful(Some(identity.loginInfo)) env.authenticatorService.retrieve(any()) returns Future.successful(None) env.authenticatorService.create(any())(any()) returns Future.successful(authenticator) env.authenticatorService.init(any())(any[RequestHeader]()) answers { p: Any => Future.successful(p.asInstanceOf[FakeAuthenticator#Value]) } env.authenticatorService.embed(any(), any[Result]())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).create(any())(any()) there was one(env.authenticatorService).init(any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "update an initialized authenticator if it was touched" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) env.authenticatorService.update(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).update(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "do not update an initialized authenticator if it was not touched" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Right(authenticator) env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).touch(any()) there was no(env.authenticatorService).update(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "init an uninitialized authenticator" in new InjectorContext with WithRequestProvider { new WithApplication(app) with Context { tokenRequestProvider.authenticate(any()) returns Future.successful(Some(identity.loginInfo)) env.authenticatorService.retrieve(any()) returns Future.successful(None) env.authenticatorService.create(any())(any()) returns Future.successful(authenticator) env.authenticatorService.init(any())(any[RequestHeader]()) answers { p: Any => Future.successful(p.asInstanceOf[FakeAuthenticator#Value]) } env.authenticatorService.embed(any(), any[Result]())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.actionWithAuthorization(request) status(result) must equalTo(OK) contentAsString(result) must contain("full.access") there was one(env.authenticatorService).create(any())(any()) there was one(env.authenticatorService).init(any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "renew an initialized authenticator" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.renew(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.renewAction(request) status(result) must equalTo(OK) contentAsString(result) must contain("renewed") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).renew(any(), any())(any()) there was no(env.authenticatorService).update(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "renew an uninitialized authenticator" in new InjectorContext with WithRequestProvider { new WithApplication(app) with Context { tokenRequestProvider.authenticate(any()) returns Future.successful(Some(identity.loginInfo)) env.authenticatorService.retrieve(any()) returns Future.successful(None) env.authenticatorService.create(any())(any()) returns Future.successful(authenticator) env.authenticatorService.renew(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.renewAction(request) status(result) must equalTo(OK) contentAsString(result) must contain("renewed") there was one(env.authenticatorService).create(any())(any()) there was one(env.authenticatorService).renew(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "discard an initialized authenticator" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.discardAction(request) status(result) must equalTo(OK) contentAsString(result) must contain("discarded") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).discard(any(), any())(any()) there was no(env.authenticatorService).update(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "discard an uninitialized authenticator" in new InjectorContext with WithRequestProvider { new WithApplication(app) with Context { tokenRequestProvider.authenticate(any()) returns Future.successful(Some(identity.loginInfo)) env.authenticatorService.retrieve(any()) returns Future.successful(None) env.authenticatorService.create(any())(any()) returns Future.successful(authenticator) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.discardAction(request) status(result) must equalTo(OK) there was one(env.authenticatorService).create(any())(any()) there was one(env.authenticatorService).discard(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, request)) } } } "handle an Ajax request" in new InjectorContext { new WithApplication(app) with Context { implicit val req = FakeRequest().withHeaders("Accept" -> "application/json") env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.update(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) withEvent[AuthenticatedEvent[FakeIdentity]] { val result = controller.defaultAction(req) status(result) must equalTo(OK) contentType(result) must beSome("application/json") contentAsString(result) must /("result" -> "full.access") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).update(any(), any())(any()) theProbe.expectMsg(500 millis, AuthenticatedEvent(identity, req)) } } } } "The `SecuredRequestHandler`" should { "return status 401 if authentication was not successful" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(None) val result = controller.defaultHandler(request) status(result) must equalTo(UNAUTHORIZED) there was no(env.authenticatorService).touch(any()) there was no(env.authenticatorService).update(any(), any())(any()) } } "return the user if authentication was successful" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(Some(authenticator)) env.authenticatorService.touch(any()) returns Left(authenticator) env.authenticatorService.update(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } env.identityService.retrieve(identity.loginInfo) returns Future.successful(Some(identity)) val result = controller.defaultHandler(request) status(result) must equalTo(OK) contentAsString(result) must */("providerID" -> "test") and */("providerKey" -> "1") there was one(env.authenticatorService).touch(any()) there was one(env.authenticatorService).update(any(), any())(any()) } } } "The `exceptionHandler` method of the SecuredErrorHandler" should { "translate an ForbiddenException into a 403 Forbidden result" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(None) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } val failed = Future.failed(new NotAuthorizedException("Access denied")) val result = controller.recover(failed) status(result) must equalTo(FORBIDDEN) } } "translate an UnauthorizedException into a 401 Unauthorized result" in new InjectorContext { new WithApplication(app) with Context { env.authenticatorService.retrieve(any()) returns Future.successful(None) env.authenticatorService.discard(any(), any())(any()) answers { (a, m) => Future.successful(AuthenticatorResult(a.asInstanceOf[Array[Any]](1).asInstanceOf[Result])) } val failed = Future.failed(new NotAuthenticatedException("Not authenticated")) val result = controller.recover(failed) status(result) must equalTo(UNAUTHORIZED) } } } /** * The injector context. */ trait InjectorContext extends Scope { /** * The Silhouette environment. */ lazy val env = Environment[SecuredEnv]( mock[IdentityService[SecuredEnv#I]], mock[AuthenticatorService[SecuredEnv#A]], Seq(), new EventBus ) /** * An authorization mock. */ lazy val authorization = { val a = mock[Authorization[SecuredEnv#I, SecuredEnv#A]] a.isAuthorized(any(), any())(any()) returns Future.successful(true) a } /** * The guice application builder. */ lazy val app = new GuiceApplicationBuilder() .bindings(new GuiceModule) .overrides(bind[SecuredErrorHandler].to[GlobalSecuredErrorHandler]) .build() /** * The guice module. */ class GuiceModule extends ScalaModule { override def configure(): Unit = { bind[Environment[SecuredEnv]].toInstance(env) bind[Authorization[SecuredEnv#I, SecuredEnv#A]].toInstance(authorization) bind[Silhouette[SecuredEnv]].to[SilhouetteProvider[SecuredEnv]] bind[SecuredController] } } /** * The context. */ trait Context { self: WithApplication => /** * An identity. */ lazy val identity = FakeIdentity(LoginInfo("test", "1")) /** * An authenticator. */ lazy val authenticator = FakeAuthenticator(LoginInfo("test", "1")) /** * A fake request. */ lazy implicit val request = FakeRequest() /** * The secured controller. */ lazy implicit val controller = app.injector.instanceOf[SecuredController] /** * The Play actor system. */ lazy implicit val system = app.injector.instanceOf[ActorSystem] /** * The test probe. */ lazy val theProbe = TestProbe() /** * Executes a block after event bus initialization, so that the event can be handled inside the given block. * * @param ct The class tag of the event. * @tparam T The type of the event to handle. * @return The result of the block. */ def withEvent[T <: SilhouetteEvent](block: => Any)(implicit ct: ClassTag[T]) = { val listener = system.actorOf(Props(new Actor { def receive = { case e: T => theProbe.ref ! e } })) env.eventBus.subscribe(listener, ct.runtimeClass.asInstanceOf[Class[T]]) block } } } /** * Adds some request providers in scope. * * We add two providers in scope to test the chaining of this providers. */ trait WithRequestProvider { self: InjectorContext => /** * A mock that simulates a token request provider. */ lazy val tokenRequestProvider = mock[RequestProvider] /** * A mock that simulates a basic auth request provider. */ lazy val basicAuthRequestProvider = mock[RequestProvider] /** * A non request provider. */ lazy val nonRequestProvider = mock[RequestProvider] /** * The Silhouette environment. */ override lazy val env = Environment[SecuredEnv]( mock[IdentityService[FakeIdentity]], mock[AuthenticatorService[FakeAuthenticator]], Seq( tokenRequestProvider, basicAuthRequestProvider, nonRequestProvider ), new EventBus ) } } /** * The companion object. */ object SecuredActionSpec { /** * The environment type. */ trait SecuredEnv extends Env { type I = FakeIdentity type A = FakeAuthenticator } /** * A test identity. * * @param loginInfo The linked login info. */ case class FakeIdentity(loginInfo: LoginInfo) extends Identity /** * A test authenticator. * * @param loginInfo The linked login info. */ case class FakeAuthenticator(loginInfo: LoginInfo, isValid: Boolean = true) extends Authenticator /** * A simple authorization class. * * @param isAuthorized True if the access is authorized, false otherwise. */ case class SimpleAuthorization(isAuthorized: Boolean = true) extends Authorization[FakeIdentity, FakeAuthenticator] { /** * Checks whether the user is authorized to execute an action or not. * * @param identity The current identity instance. * @param authenticator The current authenticator instance. * @param request The current request header. * @tparam B The type of the request body. * @return True if the user is authorized, false otherwise. */ def isAuthorized[B](identity: FakeIdentity, authenticator: FakeAuthenticator)( implicit request: Request[B]): Future[Boolean] = { Future.successful(isAuthorized) } } /** * The global secured error handler. */ class GlobalSecuredErrorHandler extends SecuredErrorHandler { /** * Called when a user is not authenticated. * * As defined by RFC 2616, the status code of the response should be 401 Unauthorized. * * @param request The request header. * @return The result to send to the client. */ def onNotAuthenticated(implicit request: RequestHeader): Future[Result] = { Future.successful(Unauthorized("global.not.authenticated")) } /** * Called when a user is authenticated but not authorized. * * As defined by RFC 2616, the status code of the response should be 403 Forbidden. * * @param request The request header. * @return The result to send to the client. */ def onNotAuthorized(implicit request: RequestHeader) = { Future.successful(Forbidden("global.not.authorized")) } } /** * A secured controller. * * @param silhouette The Silhouette stack. * @param authorization An authorization implementation. * @param components The Play controller components. */ class SecuredController @Inject() ( silhouette: Silhouette[SecuredEnv], authorization: Authorization[FakeIdentity, FakeAuthenticator], components: ControllerComponents ) extends AbstractController(components) { /** * A local error handler. */ lazy val errorHandler = new SecuredErrorHandler { override def onNotAuthenticated(implicit request: RequestHeader) = { Future.successful(Unauthorized("local.not.authenticated")) } override def onNotAuthorized(implicit request: RequestHeader) = { Future.successful(Forbidden("local.not.authorized")) } } /** * A secured action. * * @return The result to send to the client. */ def defaultAction = silhouette.SecuredAction { implicit request => render { case Accepts.Json() => Ok(Json.obj("result" -> "full.access")) case Accepts.Html() => Ok("full.access") } } /** * A secured action with an authorization and a custom error handler. * * @return The result to send to the client. */ def actionWithAuthorizationAndErrorHandler = silhouette.SecuredAction(authorization)(errorHandler) { Ok } /** * A secured action with a custom error handler. * * @return The result to send to the client. */ def actionWithErrorHandler = silhouette.SecuredAction(errorHandler) { Ok("full.access") } /** * A secured action with authorization. * * @return The result to send to the client. */ def actionWithAuthorization = silhouette.SecuredAction(authorization) { Ok("full.access") } /** * A secured renew action. * * @return The result to send to the client. */ def renewAction = silhouette.SecuredAction.async { implicit request => silhouette.env.authenticatorService.renew(request.authenticator, Ok("renewed")) } /** * A secured discard action. * * @return The result to send to the client. */ def discardAction = silhouette.SecuredAction.async { implicit request => silhouette.env.authenticatorService.discard(request.authenticator, Ok("discarded")) } /** * A secured request handler. */ def defaultHandler = Action.async { implicit request => silhouette.SecuredRequestHandler { securedRequest => Future.successful(HandlerResult(Ok, Some(securedRequest.identity))) }.map { case HandlerResult(r, Some(user)) => Ok(Json.toJson(user.loginInfo)) case HandlerResult(r, None) => Unauthorized } } /** * Method to test the `exceptionHandler` method of the [[SecuredErrorHandler]]. * * @param f The future to recover from. * @param request The request header. * @return The result to send to the client. */ def recover(f: Future[Result])(implicit request: RequestHeader): Future[Result] = { f.recoverWith(silhouette.SecuredAction.requestHandler.errorHandler.exceptionHandler) } } }
mohiva/play-silhouette
silhouette/test/com/mohiva/play/silhouette/api/actions/SecuredActionSpec.scala
Scala
apache-2.0
34,087
/* * Copyright 2019 Spotify AB. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.spotify.scio.avro import org.apache.avro.Schema import org.apache.avro.generic.{GenericData, GenericRecord} import scala.jdk.CollectionConverters._ object AvroUtils { private def f(name: String, tpe: Schema.Type) = new Schema.Field( name, Schema.createUnion(List(Schema.create(Schema.Type.NULL), Schema.create(tpe)).asJava), null: String, null: AnyRef ) private def fArr(name: String, tpe: Schema.Type) = new Schema.Field(name, Schema.createArray(Schema.create(tpe)), null: String, null: AnyRef) val schema: Schema = Schema.createRecord("GenericTestRecord", null, null, false) schema.setFields( List( f("int_field", Schema.Type.INT), f("long_field", Schema.Type.LONG), f("float_field", Schema.Type.FLOAT), f("double_field", Schema.Type.DOUBLE), f("boolean_field", Schema.Type.BOOLEAN), f("string_field", Schema.Type.STRING), fArr("array_field", Schema.Type.STRING) ).asJava ) def newGenericRecord(i: Int): GenericRecord = { val r = new GenericData.Record(schema) r.put("int_field", 1 * i) r.put("long_field", 1L * i) r.put("float_field", 1f * i) r.put("double_field", 1.0 * i) r.put("boolean_field", true) r.put("string_field", "hello") r.put("array_field", List[CharSequence]("a", "b", "c").asJava) r } def newSpecificRecord(i: Int): TestRecord = new TestRecord( i, i.toLong, i.toFloat, i.toDouble, true, "hello", List[CharSequence]("a", "b", "c").asJava ) }
spotify/scio
scio-schemas/src/main/scala/com/spotify/scio/avro/AvroUtils.scala
Scala
apache-2.0
2,167
package com.programmaticallyspeaking.ncd.nashorn import com.programmaticallyspeaking.ncd.host.{Script, ScriptLocation} import com.sun.jdi.Location import com.sun.jdi.request.{BreakpointRequest, EventRequest, EventRequestManager} object BreakableLocation { private def scriptLocationFromScriptAndLocation(script: Script, location: Location): ScriptLocation = { ScriptLocation.fromScriptAndLine(script, location.lineNumber()) } } /** * Represents a location in a script that the debugger can break at. * * @param script the script that contains the location * @param location the location */ class BreakableLocation private(val script: Script, val scriptLocation: ScriptLocation, location: Location) { def hasLocation(loc: Location): Boolean = loc == location import JDIExtensions._ def this(script: Script, location: Location) = this(script, BreakableLocation.scriptLocationFromScriptAndLocation(script, location), location) def sameMethodAndLineAs(l: Location): Boolean = l.sameMethodAndLineAs(Some(location)) def createBreakpointRequest(): BreakpointRequest = { val eventRequestManager = location.virtualMachine().eventRequestManager() val breakpointRequest = eventRequestManager.createBreakpointRequest(location) breakpointRequest } override def toString: String = s"${script.id}:${scriptLocation.lineNumber1Based} ($location)" }
provegard/ncdbg
src/main/scala/com/programmaticallyspeaking/ncd/nashorn/BreakableLocation.scala
Scala
bsd-3-clause
1,388
package com.datamountaineer.streamreactor.connect.cassandra.source import java.net.InetAddress import java.nio.ByteBuffer import java.util.UUID import com.datamountaineer.streamreactor.connect.cassandra.TestConfig import com.datamountaineer.streamreactor.connect.cassandra.config.{CassandraConfigConstants, CassandraConfigSource, CassandraSettings} import com.datastax.driver.core.{CodecRegistry, _} import org.apache.kafka.connect.data.{Decimal, Schema, Struct, Timestamp} import org.apache.kafka.connect.errors.DataException import org.mockito.MockitoSugar import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec import scala.collection.JavaConverters._ class TestCassandraTypeConverter extends AnyWordSpec with TestConfig with Matchers with MockitoSugar { val OPTIONAL_DATE_SCHEMA = org.apache.kafka.connect.data.Date.builder().optional().build() val OPTIONAL_TIMESTAMP_SCHEMA = Timestamp.builder().optional().build() val OPTIONAL_DECIMAL_SCHEMA = Decimal.builder(18).optional().build() val uuid = UUID.randomUUID() val codecRegistry: CodecRegistry = new CodecRegistry(); "should handle null when converting a Cassandra row schema to a Connect schema" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(false)) val schema = cassandraTypeConverter.convertToConnectSchema(null, "test") val schemaFields = schema.fields().asScala schemaFields.size shouldBe 0 schema.name() shouldBe "test" } "should convert a Cassandra row schema to a Connect schema" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(false)) val cols: ColumnDefinitions = TestUtils.getColumnDefs val schema = cassandraTypeConverter.convertToConnectSchema(cols.asScala.toList, "test") val schemaFields = schema.fields().asScala schemaFields.size shouldBe cols.asList().size() schema.name() shouldBe "test" checkCols(schema) } "should convert a Cassandra row to a Struct" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(false)) val row = mock[Row] val cols = TestUtils.getColumnDefs when(row.getColumnDefinitions).thenReturn(cols) mockRow(row) val colDefList = cassandraTypeConverter.getStructColumns(row, Set.empty) val sr: Struct = cassandraTypeConverter.convert(row, "test", colDefList, None) val schema = sr.schema() checkCols(schema) sr.get("timeuuidCol").toString shouldBe uuid.toString sr.get("intCol") shouldBe 0 sr.get("mapCol") shouldBe ('empty) } "should convert a Cassandra row to a Struct with map in sub struct" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(false)) val row = mock[Row] val cols = TestUtils.getColumnDefs when(row.getColumnDefinitions).thenReturn(cols) mockRow(row) when(row.getMap("mapCol", classOf[String], classOf[String])).thenReturn(new java.util.HashMap[String,String] { put("sub1","sub1value"); }) val colDefList = cassandraTypeConverter.getStructColumns(row, Set.empty) val sr: Struct = cassandraTypeConverter.convert(row, "test", colDefList, None) val schema = sr.schema() checkCols(schema) sr.getMap("mapCol").get("sub1").toString shouldBe "sub1value" } "should convert a Cassandra row to a Struct with map in json" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(true)) val row = mock[Row] val cols = TestUtils.getColumnDefs when(row.getColumnDefinitions).thenReturn(cols) mockRow(row) when(row.getMap("mapCol", classOf[String], classOf[String])).thenReturn(new java.util.HashMap[String,String] { put("sub1","sub1value"); }) val colDefList = cassandraTypeConverter.getStructColumns(row, Set.empty) val sr: Struct = cassandraTypeConverter.convert(row, "test", colDefList, None) val schema = sr.schema() checkCols(schema) sr.get("mapCol") shouldBe "{\\"sub1\\":\\"sub1value\\"}" } "should convert a Cassandra row to a Struct with list in sub struct" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(false)) val row = mock[Row] val cols = TestUtils.getColumnDefs when(row.getColumnDefinitions).thenReturn(cols) mockRow(row) when(row.getList("listCol", classOf[String])).thenReturn(new java.util.ArrayList[String]{ add("A"); add("B"); add("C"); }) val colDefList = cassandraTypeConverter.getStructColumns(row, Set.empty) val sr: Struct = cassandraTypeConverter.convert(row, "test", colDefList, None) val schema = sr.schema() checkCols(schema) sr.getArray("listCol").get(0).toString shouldBe "A" sr.getArray("listCol").get(1).toString shouldBe "B" sr.getArray("listCol").get(2).toString shouldBe "C" } "should convert a Cassandra row to a Struct with list in json" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(true)) val row = mock[Row] val cols = TestUtils.getColumnDefs when(row.getColumnDefinitions).thenReturn(cols) mockRow(row) when(row.getList("listCol", classOf[String])).thenReturn(new java.util.ArrayList[String]{ add("A"); add("B"); add("C"); }) val colDefList = cassandraTypeConverter.getStructColumns(row, Set.empty) val sr: Struct = cassandraTypeConverter.convert(row, "test", colDefList, None) val schema = sr.schema() checkCols(schema) sr.get("listCol") shouldBe "[\\"A\\",\\"B\\",\\"C\\"]" } "should convert a Cassandra row to a Struct with set" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(false)) val row = mock[Row] val cols = TestUtils.getColumnDefs when(row.getColumnDefinitions).thenReturn(cols) mockRow(row) when(row.getSet("setCol", classOf[String])).thenReturn(new java.util.HashSet[String]{ add("A"); add("B"); add("C"); }) val colDefSet = cassandraTypeConverter.getStructColumns(row, Set.empty) val sr: Struct = cassandraTypeConverter.convert(row, "test", colDefSet, None) val schema = sr.schema() checkCols(schema) sr.getArray("setCol").get(0).toString shouldBe "A" sr.getArray("setCol").get(1).toString shouldBe "B" sr.getArray("setCol").get(2).toString shouldBe "C" } "should convert a Cassandra row to a Struct no columns" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(false)) val row = mock[Row] val cols = TestUtils.getColumnDefs when(row.getColumnDefinitions).thenReturn(cols) mockRow(row) val colDefList = null val sr: Struct = cassandraTypeConverter.convert(row, "test", colDefList, None) val schema = sr.schema() schema.defaultValue() shouldBe null } "should convert a Cassandra row to a Struct and ignore some" in { val cassandraTypeConverter = new CassandraTypeConverter(codecRegistry = codecRegistry, setting = getSettings(false)) val row = mock[Row] val cols = TestUtils.getColumnDefs when(row.getColumnDefinitions).thenReturn(cols) mockRow(row) val ignoreList = Set("intCol", "floatCol") val colDefList = cassandraTypeConverter.getStructColumns(row, ignoreList) val sr: Struct = cassandraTypeConverter.convert(row, "test", colDefList, None) sr.get("timeuuidCol").toString shouldBe uuid.toString sr.get("mapCol") shouldBe ('empty) try { sr.get("intCol") fail() } catch { case _: DataException => // Expected, so continue } try { sr.get("floatCol") fail() } catch { case _: DataException => // Expected, so continue } } def mockRow(row: Row) = { when(row.getString("uuid")).thenReturn("string") when(row.getInet("inetCol")).thenReturn(InetAddress.getByName("127.0.0.1")) when(row.getString("asciiCol")).thenReturn("string") when(row.getString("textCol")).thenReturn("string") when(row.getString("varcharCol")).thenReturn("string") when(row.getBool("booleanCol")).thenReturn(true) when(row.getShort("smallintCol")).thenReturn(0.toShort) when(row.getInt("intCol")).thenReturn(0) when(row.getDecimal("decimalCol")).thenReturn(new java.math.BigDecimal(0)) when(row.getFloat("floatCol")).thenReturn(0) when(row.getLong("counterCol")).thenReturn(0.toLong) when(row.getLong("bigintCol")).thenReturn(0.toLong) when(row.getLong("varintCol")).thenReturn(0.toLong) when(row.getDouble("doubleCol")).thenReturn(0.toDouble) when(row.getString("timeuuidCol")).thenReturn("111111") when(row.getBytes("blobCol")).thenReturn(ByteBuffer.allocate(10)) when(row.getList("listCol", classOf[String])).thenReturn(new java.util.ArrayList[String]) when(row.getSet("setCol", classOf[String])).thenReturn(new java.util.HashSet[String]) when(row.getMap("mapCol", classOf[String], classOf[String])).thenReturn(new java.util.HashMap[String, String]) when(row.getDate("dateCol")).thenReturn(com.datastax.driver.core.LocalDate.fromDaysSinceEpoch(1)) when(row.getTime("timeCol")).thenReturn(0) when(row.getTimestamp("timestampCol")).thenReturn(new java.util.Date) when(row.getUUID("timeuuidCol")).thenReturn(uuid) //when(row.getTupleValue("tupleCol")).thenReturn(new TupleValue("tuple")) } def checkCols(schema: Schema) = { schema.field("uuidCol").schema().`type`() shouldBe Schema.OPTIONAL_STRING_SCHEMA.`type`() schema.field("inetCol").schema().`type`() shouldBe Schema.OPTIONAL_STRING_SCHEMA.`type`() schema.field("asciiCol").schema().`type`() shouldBe Schema.OPTIONAL_STRING_SCHEMA.`type`() schema.field("textCol").schema().`type`() shouldBe Schema.OPTIONAL_STRING_SCHEMA.`type`() schema.field("varcharCol").schema().`type`() shouldBe Schema.OPTIONAL_STRING_SCHEMA.`type`() schema.field("booleanCol").schema().`type`() shouldBe Schema.OPTIONAL_BOOLEAN_SCHEMA.`type`() schema.field("smallintCol").schema().`type`() shouldBe Schema.INT16_SCHEMA.`type`() schema.field("intCol").schema().`type`() shouldBe Schema.OPTIONAL_INT32_SCHEMA.`type`() schema.field("decimalCol").schema().`type`() shouldBe OPTIONAL_DECIMAL_SCHEMA.`type`() schema.field("floatCol").schema().`type`() shouldBe Schema.OPTIONAL_FLOAT32_SCHEMA.`type`() schema.field("counterCol").schema().`type`() shouldBe Schema.OPTIONAL_INT64_SCHEMA.`type`() schema.field("bigintCol").schema().`type`() shouldBe Schema.OPTIONAL_INT64_SCHEMA.`type`() schema.field("varintCol").schema().`type`() shouldBe Schema.OPTIONAL_INT64_SCHEMA.`type`() schema.field("doubleCol").schema().`type`() shouldBe Schema.OPTIONAL_FLOAT64_SCHEMA.`type`() schema.field("timeuuidCol").schema().`type`() shouldBe Schema.OPTIONAL_STRING_SCHEMA.`type`() schema.field("blobCol").schema().`type`() shouldBe Schema.OPTIONAL_BYTES_SCHEMA.`type`() schema.field("timeCol").schema().`type`() shouldBe Schema.OPTIONAL_INT64_SCHEMA.`type`() schema.field("timestampCol").schema().`type`() shouldBe OPTIONAL_TIMESTAMP_SCHEMA.`type`() schema.field("dateCol").schema().`type`() shouldBe OPTIONAL_DATE_SCHEMA.`type`() } def getSettings(mappingCollectionToJson: Boolean) = { val config = Map( CassandraConfigConstants.CONTACT_POINTS -> CONTACT_POINT, CassandraConfigConstants.KEY_SPACE -> CASSANDRA_SINK_KEYSPACE, CassandraConfigConstants.USERNAME -> USERNAME, CassandraConfigConstants.PASSWD -> PASSWD, CassandraConfigConstants.KCQL -> "INSERT INTO cassandra-source SELECT * FROM orders PK created", CassandraConfigConstants.POLL_INTERVAL -> "1000", CassandraConfigConstants.MAPPING_COLLECTION_TO_JSON -> mappingCollectionToJson.toString ) val taskConfig = CassandraConfigSource(config.asJava); CassandraSettings.configureSource(taskConfig).toList.head } }
datamountaineer/stream-reactor
kafka-connect-cassandra/src/test/scala/com/datamountaineer/streamreactor/connect/cassandra/source/TestCassandraTypeConverter.scala
Scala
apache-2.0
12,192
package no.netcompany.testdatagen.aggreg // Copyright (C) 2014 Lars Reed -- GNU GPL 2.0 -- see LICENSE.txt import no.netcompany.testdatagen.{GeneratorImpl, Generator} import no.netcompany.testdatagen.generators.FromList /** * This generator takes one generator and a generator function as input; it draws values from the first * and uses that to generate values for the second. * Formats functions are not supported. */ class TwoFromFunction[T, U](gen: Generator[T], genFun: T=>U) extends GeneratorImpl[(T, U)] { def getStream: Stream[(T, U)]= gen.gen map {v => (v, genFun(v))} override def genStrings: Stream[String]= genFormatted map { _.toString } /** * Returns pairs where both values are Strings, the first value formatted by the input * generator, the second by toString. */ def genFormatted: Stream[(String, String)] = { gen.gen.map { v=> val p1= gen.formatOne(v) val p2= genFun(v) (p1, if (p2==null) null else p2.toString) } } /** * Return two ListGenerators corresponding to the parts of the generated tuples, * with a given sample size (number of occurrences read from the original). * TODO: Streams? */ def asListGens(sampleSize: Int): (FromList[T], FromList[U]) = { val tuples = get(sampleSize) val gen1 = FromList(tuples map {_._1}).sequential val gen2 = FromList(tuples map {_._2}).sequential (gen1, gen2) } override def formatWith(f: ((T, U)) => String) = throw new UnsupportedOperationException override def formatOne[S >: (T, U)](v: S) = throw new UnsupportedOperationException } object TwoFromFunction { def apply[T, U](gen: Generator[T], genFun: T=>U): TwoFromFunction[T, U] = new TwoFromFunction(gen, genFun) }
lre-mesan/testdata
src/main/scala/no/netcompany/testdatagen/aggreg/TwoFromFunction.scala
Scala
gpl-2.0
1,729
package ru.maizy.ambient7.mt8057agent /** * Copyright (c) Nikita Kovaliov, maizy.ru, 2015-2017 * See LICENSE.txt for details. */ import scala.util.{ Failure, Success, Try } import scalaj.http.{ BaseHttp, HttpOptions, HttpRequest, HttpResponse } import com.typesafe.scalalogging.LazyLogging import ru.maizy.ambient7.core.config.Ambient7Options import ru.maizy.ambient7.core.config.options.InfluxDbOptions import ru.maizy.ambient7.core.data.{ AgentTag, AgentTags } class InfluxDbWriter(opts: Ambient7Options) extends Writer with LazyLogging { import InfluxDbWriter._ val OK_NO_CONTENT = 204 override def write(event: Event): Unit = { formatLine(event).foreach { data => opts.influxDb match { case None => logger.error(s"Unable to perform influxdb request: options not available") case Some(influxDbOptions) => val request = buildWriteRequest(data, influxDbOptions) val responseRes = performRequest(request) // TODO: buffer for N events if a failure happens (iss #14) responseRes match { case Failure(e) => logger.warn(s"Unable to perform influxdb request: ${e.getClass}") logger.debug(s"Request error", e) case Success(response) if response.code != OK_NO_CONTENT => logger.warn(s"Unable to write event to influxdb: HTTP ${response.code} ${response.body}") case _ => } } } } override def onInit(): Unit = {} private[mt8057agent] def formatLine(event: Event): Option[String] = { event match { case Co2Updated(Co2(co2, high), ts) => Some(s"co2$tags ppm=${co2}i,high=$high $ts") case TempUpdated(Temp(temp), ts) => val formattedTemp = temp.formatted("%.2f").replace(",", ".") Some(s"temp$tags celsius=$formattedTemp $ts") case _ => None } } private lazy val tags: String = { opts.selectedCo2Device match { case Some(device) => val finalTags = AgentTags( device.agent.tags.tags ++ IndexedSeq( AgentTag("agent", device.agent.agentName), AgentTag("device", "mt8057") ) ) "," + finalTags.encoded case _ => "" } } private[mt8057agent] def performRequest(request: HttpRequest): Try[HttpResponse[String]] = Try(request.asString) private def buildWriteRequest(data: String, influxDbOpts: InfluxDbOptions): HttpRequest = { // TODO: migrate to influxdb client val baseUrl = influxDbOpts.baseUrl.stripSuffix("/") var request = HttpClient(s"$baseUrl/write") .postData(data) request = influxDbOpts.database match { case Some(dbName) => request.param("db", dbName) case _ => request } request = (influxDbOpts.user, influxDbOpts.password) match { case (Some(user), Some(pass)) => request.auth(user, pass) case _ => request } request } } object InfluxDbWriter { object HttpClient extends BaseHttp ( userAgent = "ambient7", // TODO: app version options = Seq( HttpOptions.connTimeout(200), HttpOptions.readTimeout(200), HttpOptions.followRedirects(false) ) ) }
maizy/ambient7
mt8057-agent/src/main/scala/ru/maizy/ambient7/mt8057agent/InfluxDbWriter.scala
Scala
apache-2.0
3,211
package org.ensime.client import akka.actor.ActorSystem import ammonite.ops._ import org.ensime.api.SourceFileInfo import org.slf4j.LoggerFactory object EnsimeClientTestMain { val logger = LoggerFactory.getLogger("EnsimeClientTestMain") import EnsimeClientHelper._ def main(args: Array[String]): Unit = { logger.info("EnsimeClientTestMain started") val system = ActorSystem() val projectPath = Path("/workspace/ensime-server/") startServer(system, projectPath, MemoryConfig(), skipCreate = true) val (_, api) = connectToServer(system, projectPath) logger.info("Connection ready - requesting info") val ci = api.connectionInfo() logger.info(s"Connection Info received $ci") val ci2 = api.connectionInfo() val srcPath = projectPath / "api" / "src" / "main" / "scala" val files = ls.rec ! srcPath |? (_.ext == "scala") files.foreach { f => logger.info("Asking symbols for " + f) val file = new java.io.File(f.toString()) // val symbols = api.symbolDesignations(file, 0, Integer.MAX_VALUE, SourceSymbol.allSymbols) println(" xxxxx - sending request " + f.relativeTo(projectPath)) api.typecheckFile(SourceFileInfo(file)) println(" xxxxx - after request") Thread.sleep(10000) } } def startServer(actorSystem: ActorSystem, projectPath: Path, memoryConfig: MemoryConfig, skipCreate: Boolean = false): Unit = { logger.info("Initialising ensime") val wsc = new EnsimeServerStartup(actorSystem, projectPath, memoryConfig) if (!skipCreate) wsc.create() logger.info("Starting server") wsc.startServer() logger.info("Project startup complete") } }
rorygraves/ensime-client
src/main/scala/org/ensime/client/EnsimeClientTestMain.scala
Scala
apache-2.0
1,687
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.core.server.common import java.net.{ Inet4Address, Inet6Address, InetAddress } import com.google.common.net.InetAddresses import play.core.server.common.ForwardedHeaderHandler.{ ForwardedHeaderVersion, Rfc7239, Xforwarded } import play.core.server.common.NodeIdentifierParser._ import scala.util.Try import scala.util.parsing.combinator.RegexParsers /** * The NodeIdentifierParser object can parse node identifiers described in RFC 7239. * * @param version The version of the forwarded headers that we want to parse nodes for. * The version is used to switch between IP address parsing behavior. */ private[common] class NodeIdentifierParser(version: ForwardedHeaderVersion) extends RegexParsers { def parseNode(s: String): Either[String, (IpAddress, Option[Port])] = { parse(node, s) match { case Success(matched, _) => Right(matched) case Failure(msg, _) => Left("failure: " + msg) case Error(msg, _) => Left("error: " + msg) } } private lazy val node = phrase(nodename ~ opt(":" ~> nodeport)) ^^ { case x ~ y => x -> y } private lazy val nodename = version match { case Rfc7239 => // RFC 7239 recognizes IPv4 addresses, escaped IPv6 addresses, unknown and obfuscated addresses (ipv4Address | "[" ~> ipv6Address <~ "]" | "unknown" | obfnode) ^^ { case x: Inet4Address => Ip(x) case x: Inet6Address => Ip(x) case "unknown" => UnknownIp case x => ObfuscatedIp(x.toString) } case Xforwarded => // X-Forwarded-For recognizes IPv4 and escaped or unescaped IPv6 addresses (ipv4Address | "[" ~> ipv6Address <~ "]" | ipv6Address) ^^ { case x: Inet4Address => Ip(x) case x: Inet6Address => Ip(x) } } private lazy val ipv4Address = regex("[\\\\d\\\\.]{7,15}".r) ^? inetAddress private lazy val ipv6Address = regex("[\\\\da-fA-F:\\\\.]+".r) ^? inetAddress private lazy val obfnode = regex("_[\\\\p{Alnum}\\\\._-]+".r) private lazy val nodeport = (port | obfport) ^^ { case x: Int => PortNumber(x) case x => ObfuscatedPort(x.toString) } private lazy val port = regex("\\\\d{1,5}".r) ^? { case x if x.toInt <= 65535 => x.toInt } private def obfport = regex("_[\\\\p{Alnum}\\\\._-]+".r) private def inetAddress = new PartialFunction[String, InetAddress] { def isDefinedAt(s: String) = Try { InetAddresses.forString(s) }.isSuccess def apply(s: String) = Try { InetAddresses.forString(s) }.get } } private[common] object NodeIdentifierParser { sealed trait Port case class PortNumber(number: Int) extends Port case class ObfuscatedPort(s: String) extends Port sealed trait IpAddress case class Ip(ip: InetAddress) extends IpAddress case class ObfuscatedIp(s: String) extends IpAddress case object UnknownIp extends IpAddress }
Shruti9520/playframework
framework/src/play-server/src/main/scala/play/core/server/common/NodeIdentifierParser.scala
Scala
apache-2.0
2,894
//A fatal error or Scala compiler // Scala compiler version 2.7.1-final -- (c) 2002-2011 LAMP/EPFL // Carlos Loria [email protected] // 7/10/2008 class A { var name:String = compiletime.uninitialized def getName() = name def this(name:String, age:Int) = {this(); this.name = name} } class B(name:String) extends A(name,0){ } class D { object A { def unapply(p:A) = Some(p.getName()) } object B { def unapply(p:B) = Some(p.getName()) } def foo(p:Any) = p match { case B(n) => println("B") case A(n) => println("A") } }
dotty-staging/dotty
tests/pos/t1035.scala
Scala
apache-2.0
583
package scala.meta.converter import scala.meta.TreeConverterTestBaseNoLibrary import scala.meta._ class TreeConverterDeclTest extends TreeConverterTestBaseNoLibrary { def testVal(): Unit = { doTest( "val x,y: Int", Decl.Val(Nil, List(Pat.Var(Term.Name("x")), Pat.Var(Term.Name("y"))), Type.Name("Int")) ) } def testVar(): Unit = { doTest( "var x: Int", Decl.Var(Nil, List(Pat.Var(Term.Name("x"))), Type.Name("Int")) ) } def testMultiVal(): Unit = { doTest( "val x, y: Int", Decl.Val(Nil, List(Pat.Var(Term.Name("x")), Pat.Var(Term.Name("y"))), Type.Name("Int")) ) } def testMultiVar(): Unit = { doTest( "var x, y: Int", Decl.Var(Nil, List(Pat.Var(Term.Name("x")), Pat.Var(Term.Name("y"))), Type.Name("Int")) ) } def testTypeT(): Unit = { doTest( "type T", Decl.Type(Nil, Type.Name("T"), Nil, Type.Bounds(None, None)) ) } def testTypeUpperBound(): Unit = { doTest( "type T <: Any", Decl.Type(Nil, Type.Name("T"), Nil, Type.Bounds(None, Some(Type.Name("Any")))) ) } def testTypeLowerBound(): Unit = { doTest( "type T >: Any", Decl.Type(Nil, Type.Name("T"), Nil, Type.Bounds(Some(Type.Name("Any")), None)) ) } def testBothTypeBounds(): Unit = { doTest( "type T >: Any <: Int", Decl.Type(Nil, Type.Name("T"), Nil, Type.Bounds(Some(Type.Name("Any")), Some(Type.Name("Int")))) ) } def testParametrizedType(): Unit = { doTest( "type F[T]", Decl.Type(Nil, Type.Name("F"), Type.Param(Nil, Type.Name("T"), Nil, Type.Bounds(None, None), Nil, Nil) :: Nil, Type.Bounds(None, None)) ) } def testParametrizedAnonType(): Unit = { doTest( "type F[_]", Decl.Type(Nil, Type.Name("F"), Type.Param(Nil, Name.Anonymous(), Nil, Type.Bounds(None, None), Nil, Nil) :: Nil, Type.Bounds(None, None)) ) } def testParametrizedWithUpperBoundType(): Unit = { doTest( "type F[T <: Any]", Decl.Type(Nil, Type.Name("F"), Type.Param(Nil, Type.Name("T"), Nil, Type.Bounds(None, Some(Type.Name("Any"))), Nil, Nil) :: Nil, Type.Bounds(None, None)) ) } def testCovariantType(): Unit = { doTest( "type F[+T]", Decl.Type(Nil, Type.Name("F"), Type.Param(Mod.Covariant() :: Nil, Type.Name("T"), Nil, Type.Bounds(None, None), Nil, Nil) :: Nil, Type.Bounds(None, None)) ) } def testContravariantType(): Unit = { doTest( "type F[-T]", Decl.Type(Nil, Type.Name("F"), Type.Param(Mod.Contravariant() :: Nil, Type.Name("T"), Nil, Type.Bounds(None, None), Nil, Nil) :: Nil, Type.Bounds(None, None)) ) } def testDefNoReturnType(): Unit = { doTest( "def f", Decl.Def(Nil, Term.Name("f"), Nil, Nil, Type.Name("Unit")) ) } def testDefWithReturnType(): Unit = { doTest( "def f: Int", Decl.Def(Nil, Term.Name("f"), Nil, Nil, Type.Name("Int")) ) } def testDefOneParameter(): Unit = { doTest( "def f(x: Int)", Decl.Def(Nil, Term.Name("f"), Nil, (Term.Param(Nil, Term.Name("x"), Some(Type.Name("Int")), None) :: Nil) :: Nil, Type.Name("Unit")) ) } def testDefManyParameters(): Unit = { doTest( "def f(x: Int, y: Int)", Decl.Def(Nil, Term.Name("f"), Nil, (Term.Param(Nil, Term.Name("x"), Some(Type.Name("Int")), None) :: Term.Param(Nil, Term.Name("y"), Some(Type.Name("Int")), None) :: Nil) :: Nil, Type.Name("Unit")) ) } def testDefMiltiParameterList(): Unit = { doTest( "def f(x: Int)(y: Int)", Decl.Def(Nil, Term.Name("f"), Nil, (Term.Param(Nil, Term.Name("x"), Some(Type.Name("Int")), None) :: Nil) :: (Term.Param(Nil, Term.Name("y"), Some(Type.Name("Int")), None) :: Nil) :: Nil, Type.Name("Unit")) ) } def testDefVararg(): Unit = { doTest( "def f (a: Int*)", Decl.Def(Nil, Term.Name("f"), Nil, List(List(Term.Param(Nil, Term.Name("a"), Some(Type.Repeated(Type.Name("Int"))), None))), Type.Name("Unit")) ) } def testImplicitArgument(): Unit = { doTest( "def f(implicit x: Int)", Decl.Def(Nil, Term.Name("f"), Nil, (Term.Param(Mod.Implicit() :: Nil, Term.Name("x"), Some(Type.Name("Int")), None) :: Nil) :: Nil, Type.Name("Unit")) ) } def testDefTypeArgs(): Unit = { doTest( "def f[T]: T", Decl.Def(Nil, Term.Name("f"), Type.Param(Nil, Type.Name("T"), Nil, Type.Bounds(None, None), Nil, Nil) :: Nil, Nil, Type.Name("T")) ) } def testLocalDeclarations(): Unit = { doTest( "def f = { val x = 42 }", Defn.Def(Nil, Term.Name("f"), Nil, Nil, None, Term.Block(List(Defn.Val(Nil, List(Pat.Var(Term.Name("x"))), None, Lit.Int(42))))) ) } }
JetBrains/intellij-scala
scala/scala-impl/test/scala/meta/converter/TreeConverterDeclTest.scala
Scala
apache-2.0
4,913
package ca.uwo.eng.sel.cepsim.gen import scala.concurrent.duration.Duration /** UniformIncreaseGenerator companion object. */ object UniformIncreaseGenerator { def apply(increaseDuration: Duration, maxRate: Double) = new UniformIncreaseGenerator(increaseDuration, maxRate) } /** * Event generator in which the generation rates increase uniformly during a period, until it reaches * a maximum rate. From this period until the end of the simulation, the generation rate is kept at * the maximum. * * @param increaseDuration Period of time when the generation rate increases. * @param maxRate Maximum generation rate (in events per second). */ class UniformIncreaseGenerator(val increaseDuration: Duration, val maxRate: Double) extends Generator { /** Alias. */ type Point = (Double, Double) /** Keep track of the current simulation time */ var currentPos = 0.0 /** The maximum converted in events per milliseconds */ val maxRateInMs = maxRate / 1000 /** The increaseDuration converted to milliseconds */ val durationInMs = increaseDuration.toMillis /** Multiplier used during the rate growth period. */ val multiplier = maxRateInMs / durationInMs override def doGenerate(interval: Double): Double = { val nextPos = currentPos + interval var area = 0.0 // Position still is at the increasing part of the graph if (currentPos < durationInMs) { // the sampling interval starts at the increasing part, and ends at the constant part if (nextPos > durationInMs) { area = trapezoidArea((currentPos, 0), (currentPos, multiplier * currentPos), (durationInMs, 0), (durationInMs, maxRateInMs)) + rectangleArea((durationInMs, 0), (durationInMs, maxRateInMs), (nextPos, 0), (nextPos, maxRateInMs)) } else { area = trapezoidArea((currentPos, 0), (currentPos, multiplier * currentPos), (nextPos, 0), (nextPos, multiplier * nextPos)) } // position is in the constant part } else { // integral is simply the are of the rectangle area = rectangleArea((currentPos, 0), (currentPos, maxRateInMs), (nextPos, 0), (nextPos, maxRateInMs)) } currentPos = nextPos area } /** * Calculates the area of a triangle rectangle. It assumes the following vertices parameters: * * + v3 * /| * / | * / | * v1 +---+v2 * * @param v1 First vertex. * @param v2 Second vertex. * @param v3 Third vertex. * @return The triangle area. */ def triangleRectangleArea(v1: Point, v2: Point, v3: Point): Double = ((v2._1 - v1._1) * (v3._2 - v2._2 )) / 2 /** * * Calculates the area of a rectangle. It assumes the following vertices parameters: * * v2 +---+ v4 * | | * | | * | | * v1 +---+v3 * * @param v1 First vertex. * @param v2 Second vertex. * @param v3 Third vertex. * @param v4 Fourth vertex. * @return The rectangle area. */ def rectangleArea(v1: Point, v2: Point, v3: Point, v4: Point): Double = (v2._2 - v1._2) * (v3._1 - v1._1) /** * Calculates the area of a trapezoid. It assumes the following vertices parameters: * * + v4 * /| * / | * / | * v2 + + tempPoint * | | * v1 +---+ v3 * * @param v1 First vertex. * @param v2 Second vertex. * @param v3 Third vertex. * @param v4 Fourth vertex. * @return The area of the trapezoid. */ def trapezoidArea(v1: Point, v2: Point, v3: Point, v4: Point): Double = { val tempPoint = (v3._1, v2._2) rectangleArea(v1, v2, v3, tempPoint) + triangleRectangleArea(v2, tempPoint, v4) } }
virsox/cepsim
cepsim-core/src/main/scala/ca/uwo/eng/sel/cepsim/gen/UniformIncreaseGenerator.scala
Scala
mit
3,877
package argonaut /** * Utility for building the argonaut API over * various types. This is used to implement * StringWrap, and it is expected that it would * be used by integrations with other toolkits * to provide an argonaut API on their types. */ class ParseWrap[A](value: A, parser: Parse[A]) { /** * Parses the string value and either returns a list of the failures from parsing the string * or an instance of the Json type if parsing succeeds. */ def parse: Either[String, Json] = { parser.parse(value) } /** * Parses the string value and executes one of the given functions, depending on the parse outcome. * * @param success Run this function if the parse succeeds. * @param failure Run this function if the parse produces a failure. */ def parseWith[X](success: Json => X, failure: String => X): X = { parser.parseWith(value, success, failure) } /** * Parses the string value and executes one of the given functions, depending on the parse outcome. * Any error message is ignored. * * @param success Run this function if the parse succeeds. * @param failure Run this function if the parse produces a failure. */ def parseOr[X](success: Json => X, failure: => X): X = parser.parseOr(value, success, failure) /** * Parses the string value to a possible JSON value. */ def parseOption: Option[Json] = parser.parseOption(value) /** * Parses the string value and decodes it returning a list of all the failures stemming from * either the JSON parsing or the decoding. */ def decode[X: DecodeJson]: Either[Either[String, (String, CursorHistory)], X] = parser.decode(value) /** * Parses the string value into a JSON value and if it succeeds, decodes to a data-type. * * @param success Run this function if the parse produces a success. * @param parsefailure Run this function if the parse produces a failure. * @param decodefailure Run this function if the decode produces a failure. */ def decodeWith[Y, X: DecodeJson](success: X => Y, parsefailure: String => Y, decodefailure: (String, CursorHistory) => Y): Y = parser.decodeWith(value, success, parsefailure, decodefailure) /** * Parses the string value into a JSON value and if it succeeds, decodes to a data-type. * * @param success Run this function if the parse produces a success. * @param failure Run this function if the parse produces a failure. */ def decodeWithEither[Y, X: DecodeJson](success: X => Y, failure: Either[String, (String, CursorHistory)] => Y): Y = parser.decodeWithEither(value, success, failure) /** * Parses the string value into a JSON value and if it succeeds, decodes to a data-type. * * @param success Run this function if the parse produces a success. * @param failure Run this function if the parse produces a failure. */ def decodeWithMessage[Y, X: DecodeJson](success: X => Y, failure: String => Y): Y = parser.decodeWithMessage(value, success, failure) /** * Parses the string value into a JSON value and if it succeeds, decodes to a data-type. * * @param success Run this function if the parse produces a success. * @param default Return this value of the parse or decode fails. */ def decodeOr[Y, X: DecodeJson](success: X => Y, default: => Y): Y = parser.decodeOr(value, success, default) /** * Parses and decodes the string value to a possible JSON value. */ def decodeOption[X: DecodeJson]: Option[X] = parser.decodeOption(value) /** * Parses and decodes the string value to a possible JSON value. */ def decodeEither[X: DecodeJson]: Either[String, X] = parser.decodeEither(value) }
jedws/argonaut
argonaut/src/main/scala/argonaut/ParseWrap.scala
Scala
bsd-3-clause
3,721
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.mapreduce import org.apache.hadoop.conf.Configuration import task.{TaskAttemptContextImpl, JobContextImpl} trait HadoopMapReduceUtil { def newJobContext(conf: Configuration, jobId: JobID): JobContext = new JobContextImpl(conf, jobId) def newTaskAttemptContext(conf: Configuration, attemptId: TaskAttemptID): TaskAttemptContext = new TaskAttemptContextImpl(conf, attemptId) def newTaskAttemptID(jtIdentifier: String, jobId: Int, isMap: Boolean, taskId: Int, attemptId: Int) = new TaskAttemptID(jtIdentifier, jobId, isMap, taskId, attemptId) }
wgpshashank/spark
core/src/hadoop2/scala/org/apache/hadoop/mapreduce/HadoopMapReduceUtil.scala
Scala
apache-2.0
1,384
package org.littlewings.infinispan.distiterator import java.util.Map import org.infinispan.Cache import org.infinispan.commons.util.CloseableIterator import org.infinispan.manager.DefaultCacheManager import org.infinispan.util.function.{SerializableConsumer, SerializableFunction} import org.scalatest.{FunSuite, Matchers} import scala.collection.JavaConverters._ class DistributedIteratorSuite extends FunSuite with Matchers { test("distributed cache iterator") { withCache[String, Integer]("distributedCache", 3) { cache => (1 to 10).foreach(i => cache.put(s"key${i}", i)) val iterator: CloseableIterator[java.util.Map.Entry[String, Integer]] = cache.entrySet.iterator.asInstanceOf[CloseableIterator[java.util.Map.Entry[String, Integer]]] /* while (iterator.hasNext()) { println(iterator.next()) } */ iterator.asScala.toList.map(e => e.getKey -> e.getValue) should contain theSameElementsAs Array( "key1" -> 1, "key2" -> 2, "key3" -> 3, "key4" -> 4, "key5" -> 5, "key6" -> 6, "key7" -> 7, "key8" -> 8, "key9" -> 9, "key10" -> 10 ) iterator.close() } } test("replicated cache iterator") { withCache[String, Integer]("replicatedCache", 3) { cache => (1 to 10).foreach(i => cache.put(s"key${i}", i)) val iterator: CloseableIterator[java.util.Map.Entry[String, Integer]] = cache.entrySet.iterator.asInstanceOf[CloseableIterator[java.util.Map.Entry[String, Integer]]] /* while (iterator.hasNext()) { println(iterator.next()) } */ iterator.asScala.toList.map(e => e.getKey -> e.getValue) should contain theSameElementsAs Array( "key1" -> 1, "key2" -> 2, "key3" -> 3, "key4" -> 4, "key5" -> 5, "key6" -> 6, "key7" -> 7, "key8" -> 8, "key9" -> 9, "key10" -> 10 ) iterator.close() } } test("local cache iterator") { withCache[String, Integer]("localCache") { cache => (1 to 10).foreach(i => cache.put(s"key${i}", i)) val iterator: CloseableIterator[java.util.Map.Entry[String, Integer]] = cache.entrySet.iterator.asInstanceOf[CloseableIterator[java.util.Map.Entry[String, Integer]]] /* while (iterator.hasNext()) { println(iterator.next()) } */ iterator.asScala.toList.map(e => e.getKey -> e.getValue) should contain theSameElementsAs Array( "key1" -> 1, "key2" -> 2, "key3" -> 3, "key4" -> 4, "key5" -> 5, "key6" -> 6, "key7" -> 7, "key8" -> 8, "key9" -> 9, "key10" -> 10 ) iterator.close() } } test("distributed cache stream") { withCache[String, Integer]("distributedCache", 3) { cache => (1 to 10).foreach(i => cache.put(s"key${i}", i)) val stream = cache.entrySet.stream try { stream .map[String](new SerializableFunction[java.util.Map.Entry[String, Integer], String] { override def apply(e: Map.Entry[String, Integer]): String = e.getKey }) .forEach(new SerializableConsumer[String] { override def accept(v: String): Unit = println(v) }) } finally { stream.close() } } } protected def withCache[K, V](cacheName: String, numInstances: Int = 1)(fun: Cache[K, V] => Unit): Unit = { val managers = (1 to numInstances).map(_ => new DefaultCacheManager("infinispan.xml")) try { managers.foreach(_.getCache[K, V](cacheName)) val cache = managers(0).getCache[K, V](cacheName) fun(cache) cache.stop() } finally { managers.foreach(_.stop()) } } }
kazuhira-r/infinispan-getting-started
embedded-distributed-iterator/src/test/scala/org/littlewings/infinispan/distiterator/DistributedIteratorSuite.scala
Scala
mit
3,621
class Constructor { def <caret>this(i: Int) { this() } val c = new Constructor(1) } class ConstructorChild extends Constructor(0)
ilinum/intellij-scala
testdata/changeSignature/fromScala/SecConstructor.scala
Scala
apache-2.0
142
/** * Copyright 2011-2012 @WalmartLabs, a division of Wal-Mart Stores, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.walmartlabs.mupd8; import org.junit.runner.RunWith import org.junit.Assert._ import org.scalatest.junit.JUnitRunner import org.scalatest.FunSuite import org.scale7.cassandra.pelops._ import com.walmartlabs.mupd8.Mupd8Main._ import com.walmartlabs.mupd8.examples._ @RunWith(classOf[JUnitRunner]) class AppInfoTest extends FunSuite { val cfgDir = this.getClass().getClassLoader().getResource("testapp").getPath() val appInfo = new AppStaticInfo(Some(cfgDir), None, None) test("test ttl extraction") { val k3 = "K3Updater" val k3TTL = appInfo.performers(appInfo.performerName2ID(k3)).ttl assertTrue(k3TTL == Mutator.NO_TTL) val k4 = "K4Updater" val k4TTL = appInfo.performers(appInfo.performerName2ID(k4)).ttl assertTrue(k4TTL == 300) } }
walmartlabs/mupd8
src/test/scala/com/walmartlabs/mupd8/AppInfoTest.scala
Scala
apache-2.0
1,428
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.QueryTest import org.apache.spark.sql.catalyst.parser.ParseException import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.test.SQLTestUtils /** * A set of tests that validates support for Hive Explain command. */ class HiveExplainSuite extends QueryTest with SQLTestUtils with TestHiveSingleton { test("explain extended command") { checkKeywordsExist(sql(" explain select * from src where key=123 "), "== Physical Plan ==") checkKeywordsNotExist(sql(" explain select * from src where key=123 "), "== Parsed Logical Plan ==", "== Analyzed Logical Plan ==", "== Optimized Logical Plan ==") checkKeywordsExist(sql(" explain extended select * from src where key=123 "), "== Parsed Logical Plan ==", "== Analyzed Logical Plan ==", "== Optimized Logical Plan ==", "== Physical Plan ==") } test("explain create table command") { checkKeywordsExist(sql("explain create table temp__b as select * from src limit 2"), "== Physical Plan ==", "InsertIntoHiveTable", "Limit", "src") checkKeywordsExist(sql("explain extended create table temp__b as select * from src limit 2"), "== Parsed Logical Plan ==", "== Analyzed Logical Plan ==", "== Optimized Logical Plan ==", "== Physical Plan ==", "CreateHiveTableAsSelect", "InsertIntoHiveTable", "Limit", "src") checkKeywordsExist(sql( """ | EXPLAIN EXTENDED CREATE TABLE temp__b | ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" | WITH SERDEPROPERTIES("serde_p1"="p1","serde_p2"="p2") | STORED AS RCFile | TBLPROPERTIES("tbl_p1"="p11", "tbl_p2"="p22") | AS SELECT * FROM src LIMIT 2 """.stripMargin), "== Parsed Logical Plan ==", "== Analyzed Logical Plan ==", "== Optimized Logical Plan ==", "== Physical Plan ==", "CreateHiveTableAsSelect", "InsertIntoHiveTable", "Limit", "src") } test("SPARK-17409: The EXPLAIN output of CTAS only shows the analyzed plan") { withTempView("jt") { val rdd = sparkContext.parallelize((1 to 10).map(i => s"""{"a":$i, "b":"str$i"}""")) spark.read.json(rdd).createOrReplaceTempView("jt") val outputs = sql( s""" |EXPLAIN EXTENDED |CREATE TABLE t1 |AS |SELECT * FROM jt """.stripMargin).collect().map(_.mkString).mkString val shouldContain = "== Parsed Logical Plan ==" :: "== Analyzed Logical Plan ==" :: "Subquery" :: "== Optimized Logical Plan ==" :: "== Physical Plan ==" :: "CreateHiveTableAsSelect" :: "InsertIntoHiveTable" :: "jt" :: Nil for (key <- shouldContain) { assert(outputs.contains(key), s"$key doesn't exist in result") } val physicalIndex = outputs.indexOf("== Physical Plan ==") assert(outputs.substring(physicalIndex).contains("Subquery"), "Physical Plan should contain SubqueryAlias since the query should not be optimized") } } test("EXPLAIN CODEGEN command") { checkKeywordsExist(sql("EXPLAIN CODEGEN SELECT 1"), "WholeStageCodegen", "Generated code:", "/* 001 */ public Object generate(Object[] references) {", "/* 002 */ return new GeneratedIterator(references);", "/* 003 */ }" ) checkKeywordsNotExist(sql("EXPLAIN CODEGEN SELECT 1"), "== Physical Plan ==" ) intercept[ParseException] { sql("EXPLAIN EXTENDED CODEGEN SELECT 1") } } }
ZxlAaron/mypros
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveExplainSuite.scala
Scala
apache-2.0
4,622
package com.typesafe.slick.testkit.tests import com.typesafe.slick.testkit.util.{RelationalTestDB, AsyncTest} class CountTest extends AsyncTest[RelationalTestDB] { import tdb.profile.api._ def testSimple = { class TestTable(tag: Tag) extends Table[Int](tag, "TEST") { def id = column[Int]("ID") def * = id } val testTable = TableQuery(new TestTable(_)) for { _ <- testTable.schema.create _ <- testTable ++= Seq(1, 2, 3, 4, 5) q1 = Query(testTable.length) _ <- q1.result.map(_ shouldBe Vector(5)) q2 = testTable.length _ <- q2.result.map(_ shouldBe 5) q3 = testTable.filter(_.id < 3).length _ <- q3.result.map(_ shouldBe 2) q4 = testTable.take(2).length _ <- q4.result.map(_ shouldBe 2) } yield () } def testJoin = { class Categories(tag: Tag) extends Table[(Int, String)](tag, "cat_j") { def id = column[Int]("id") def name = column[String]("name") def * = (id, name) } val categories = TableQuery[Categories] class Posts(tag: Tag) extends Table[(Int, String, Int)](tag, "posts_j") { def id = column[Int]("id", O.PrimaryKey, O.AutoInc) def title = column[String]("title") def category = column[Int]("category") def * = (id, title, category) } val posts = TableQuery[Posts] for { _ <- (categories.schema ++ posts.schema).create _ <- categories ++= Seq((1, "Scala"), (2, "JVM"), (3, "Java"), (4, "Erlang"), (5, "Haskell")) _ <- posts ++= Seq((1, "Shiny features", 1), (2, "HotSpot", 2)) joinedQuery = for { c <- categories p <- posts if p.category === c.id } yield (c, p) q1 = joinedQuery.length _ <- q1.result.map(_ shouldBe 2) q2 = Query(joinedQuery.length) _ <- q2.result.map(_ shouldBe Vector(2)) } yield () } def testJoinCount = { class A(tag: Tag) extends Table[Long](tag, "a_j") { def id = column[Long]("id", O.PrimaryKey) def * = id } lazy val as = TableQuery[A] class B(tag: Tag) extends Table[(Long, String)](tag, "b_j") { def aId = column[Long]("a_id") def data = column[String]("data") def * = (aId, data) } lazy val bs = TableQuery[B] DBIO.seq( (as.schema ++ bs.schema).create, as ++= Seq(1L, 2L), bs ++= Seq((1L, "1a"), (1L, "1b"), (2L, "2")), (for { a <- as if a.id === 1L } yield (a, (for { b <- bs if b.aId === a.id } yield b).length)).result.named("directLength").map(_ shouldBe Seq((1L, 2))), (for { a <- as if a.id === 1L l <- Query((for { b <- bs if b.aId === a.id } yield b).length) } yield (a, l)).result.named("joinLength").map(_ shouldBe Seq((1L, 2))), (for { (a, b) <- as joinLeft bs on (_.id === _.aId) } yield (a.id, b.map(_.data))).length.result.named("outerJoinLength").map(_ shouldBe 3) ) } def testTableCount = { class T(tag: Tag) extends Table[(Long, String, Long, Option[Long], Option[Long])](tag, "TABLECOUNT_T") { def a = column[Long]("ID") def b = column[String]("B") def c = column[Long]("C") def d = column[Option[Long]]("DISCONTINUED") def e = column[Option[Long]]("E") def * = (a, b, c, d, e) } val ts = TableQuery[T] DBIO.seq( ts.schema.create, ts += (1L, "a", 1L, None, None), ts.length.result.map(_ shouldBe 1) ).withPinnedSession } }
jkutner/slick
slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/CountTest.scala
Scala
bsd-2-clause
3,506
import play.api.http.HttpFilters import play.filters.csrf.CSRFFilter import javax.inject.Inject class Filters @Inject() (csrfFilter: CSRFFilter) extends HttpFilters { def filters = Seq(csrfFilter) }
adrianhurt/play-bootstrap3
play26-bootstrap4/sample/app/Filters.scala
Scala
apache-2.0
201
package gov.wicourts.json.formlet import argonaut.Json import argonaut.ArgonautScalaz._ import scalaz.{Equal, Monoid} import scalaz.std.list._ import scalaz.std.string._ import scalaz.std.tuple._ import scalaz.syntax.equal._ sealed trait JsonBuilder { def toJson: Json } class JsonArrayBuilder private [formlet] (private val items: List[Json]) extends JsonBuilder { def toJson: Json = Json.array(items: _*) } object JsonArrayBuilder { implicit val jsonArrayBuilderMonoid: Monoid[JsonArrayBuilder] = new Monoid[JsonArrayBuilder] { def zero: JsonArrayBuilder = new JsonArrayBuilder(Nil) def append(f1: JsonArrayBuilder, f2: => JsonArrayBuilder): JsonArrayBuilder = new JsonArrayBuilder(f1.items ++ f2.items) } implicit val jsonArrayBuilderEqual: Equal[JsonArrayBuilder] = Equal.equal((a1, a2) => a1.items === a2.items ) def item(json: Json): JsonArrayBuilder = new JsonArrayBuilder(List(json)) } class JsonObjectBuilder private [formlet] ( private val items: List[(String, Json)] ) extends JsonBuilder { def toJson: Json = Json.obj(items: _*) override def toString: String = items.mkString("JsonObjectBuilder(", ", ", ")") } object JsonObjectBuilder { implicit val jsonObjectBuilderMonoid: Monoid[JsonObjectBuilder] = new Monoid[JsonObjectBuilder] { def zero: JsonObjectBuilder = new JsonObjectBuilder(Nil) def append(f1: JsonObjectBuilder, f2: => JsonObjectBuilder): JsonObjectBuilder = new JsonObjectBuilder(f1.items ++ f2.items) } implicit val jsonObjectBuildEqual: Equal[JsonObjectBuilder] = Equal.equal((a1, a2) => a1.items === a2.items ) def row(name: String, json: Json): JsonObjectBuilder = new JsonObjectBuilder(List((name, json))) }
ccap/json-formlets
src/main/scala/gov/wicourts/json/formlet/JsonBuilder.scala
Scala
mit
1,729
package com.andy import javax.ws.rs.{GET, Path, Produces} import javax.ws.rs.core.MediaType @Path("/bonjour") class BonjourResource { @GET @Produces(Array[String](MediaType.TEXT_PLAIN)) def hello() = "Hello RESTEasy" }
quarkusio/quarkus
independent-projects/tools/devtools-testing/src/test/resources/__snapshots__/QuarkusCodestartGenerationTest/generateRESTEasyScalaCustom/src_main_scala_com_andy_BonjourResource.scala
Scala
apache-2.0
233
/* * ****************************************************************************** * Copyright 2012-2013 SpotRight * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ package com.spotright.polidoro package modelish abstract class ColumnPathish[K: Manifest, N: Manifest] extends RowPathish[K] { val colname: N }
SpotRight/Polidoro
src/main/scala/com/spotright/polidoro/modelish/ColumnPathish.scala
Scala
apache-2.0
940
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.spark.rdd import java.util.concurrent.ExecutorService import scala.collection.JavaConverters._ import org.apache.spark.sql.{CarbonSession, SQLContext} import org.apache.spark.sql.execution.command.CompactionModel import org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand import org.apache.spark.sql.execution.command.preaaggregate.PreAggregateUtil import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.datamap.Segment import org.apache.carbondata.core.datastore.impl.FileFactory import org.apache.carbondata.core.statusmanager.{SegmentStatus, SegmentStatusManager} import org.apache.carbondata.core.util.path.CarbonTablePath import org.apache.carbondata.events.OperationContext import org.apache.carbondata.processing.loading.model.CarbonLoadModel import org.apache.carbondata.processing.merger.{CarbonDataMergerUtil, CompactionType} /** * Used to perform compaction on Aggregate data map. */ class AggregateDataMapCompactor(carbonLoadModel: CarbonLoadModel, compactionModel: CompactionModel, executor: ExecutorService, sqlContext: SQLContext, storeLocation: String, operationContext: OperationContext) extends Compactor(carbonLoadModel, compactionModel, executor, sqlContext, storeLocation) { override def executeCompaction(): Unit = { val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable val loadMetaDataDetails = identifySegmentsToBeMerged() // If segmentFile name is specified in load details then segment is for partition table // therefore the segment file name should be loadName#segmentFileName.segment val segments = loadMetaDataDetails.asScala.map { loadDetail => new Segment(loadDetail.getLoadName, loadDetail.getSegmentFile, null).toString } if (segments.nonEmpty) { val mergedLoadName = CarbonDataMergerUtil.getMergedLoadName(loadMetaDataDetails).split("_")(1) CarbonSession.threadSet( CarbonCommonConstants.CARBON_INPUT_SEGMENTS + carbonLoadModel.getDatabaseName + "." + carbonLoadModel.getTableName, segments.mkString(",")) CarbonSession.threadSet( CarbonCommonConstants.VALIDATE_CARBON_INPUT_SEGMENTS + carbonLoadModel.getDatabaseName + "." + carbonLoadModel.getTableName, "false") val loadCommand = operationContext.getProperty(carbonTable.getTableName + "_Compaction") .asInstanceOf[CarbonLoadDataCommand] val uuid = Option(loadCommand.operationContext.getProperty("uuid")).getOrElse("").toString try { val newInternalOptions = loadCommand.internalOptions ++ Map("mergedSegmentName" -> mergedLoadName) loadCommand.internalOptions = newInternalOptions loadCommand.dataFrame = Some(PreAggregateUtil.getDataFrame( sqlContext.sparkSession, loadCommand.logicalPlan.get)) CarbonSession.threadSet(CarbonCommonConstants.SUPPORT_DIRECT_QUERY_ON_DATAMAP, "true") loadCommand.processData(sqlContext.sparkSession) val newLoadMetaDataDetails = SegmentStatusManager.readLoadMetadata( carbonTable.getMetadataPath, uuid) val updatedLoadMetaDataDetails = newLoadMetaDataDetails collect { case load if loadMetaDataDetails.contains(load) => load.setMergedLoadName(mergedLoadName) load.setSegmentStatus(SegmentStatus.COMPACTED) load.setModificationOrdeletionTimesStamp(System.currentTimeMillis()) load case other => other } SegmentStatusManager.writeLoadDetailsIntoFile( CarbonTablePath.getTableStatusFilePathWithUUID(carbonTable.getTablePath, uuid), updatedLoadMetaDataDetails) carbonLoadModel.setLoadMetadataDetails(updatedLoadMetaDataDetails.toList.asJava) } finally { // check if any other segments needs compaction on in case of MINOR_COMPACTION. // For example: after 8.1 creation 0.1, 4.1, 8.1 have to be merged to 0.2 if threshhold // allows it. // Also as the load which will be fired for 2nd level compaction will read the // tablestatus file and not the tablestatus_UUID therefore we have to commit the // intermediate tablestatus file for 2nd level compaction to be successful. // This is required because: // 1. after doing 12 loads and a compaction after every 4 loads the table status file will // have 0.1, 4.1, 8, 9, 10, 11 as Success segments. While tablestatus_UUID will have // 0.1, 4.1, 8.1. // 2. Now for 2nd level compaction 0.1, 8.1, 4.1 have to be merged to 0.2. therefore we // need to read the tablestatus_UUID. But load flow should always read tablestatus file // because it contains the actual In-Process status for the segments. // 3. If we read the tablestatus then 8, 9, 10, 11 will keep getting compacted into 8.1. // 4. Therefore tablestatus file will be committed in between multiple commits. if (!compactionModel.compactionType.equals(CompactionType.MAJOR) && !compactionModel.compactionType.equals(CompactionType.CUSTOM)) { if (!identifySegmentsToBeMerged().isEmpty) { val uuidTableStaus = CarbonTablePath.getTableStatusFilePathWithUUID( carbonTable.getTablePath, uuid) val tableStatus = CarbonTablePath.getTableStatusFilePath(carbonTable.getTablePath) if (!uuidTableStaus.equalsIgnoreCase(tableStatus)) { FileFactory.getCarbonFile(uuidTableStaus).renameForce(tableStatus) } executeCompaction() } } CarbonSession .threadUnset(CarbonCommonConstants.CARBON_INPUT_SEGMENTS + carbonLoadModel.getDatabaseName + "." + carbonLoadModel.getTableName) CarbonSession.threadUnset(CarbonCommonConstants.VALIDATE_CARBON_INPUT_SEGMENTS + carbonLoadModel.getDatabaseName + "." + carbonLoadModel.getTableName) LOGGER .info(s"Compaction request for datamap ${ carbonTable.getTableUniqueName } is successful") LOGGER .audit(s"Compaction request for datamap ${carbonTable.getTableUniqueName} is successful") } } } }
sgururajshetty/carbondata
integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/AggregateDataMapCompactor.scala
Scala
apache-2.0
7,253
package project_euler /* * Problem 1 * If we list all the natural numbers below 10 that are multiples of 3 or 5, * we get 3, 5, 6 and 9. * The sum of these multiples is 23. * Find the sum of all the multiples of 3 or 5 below 1000. * */ object Problem_1 {;import org.scalaide.worksheet.runtime.library.WorksheetSupport._; def main(args: Array[String])=$execute{;$skip(335); def func = (0 until 1000).filter(x => x % 3 ==0 || x % 5 ==0).sum;System.out.println("""func: => Int""");$skip(193); def imper = { //collect all values into a vector {for { i <- 0 until 1000 if( i % 3 ==0 || i % 5 ==0) }yield i }.sum //sum the values of the collected vector };System.out.println("""imper: => Int""");$skip(160); //java style imperative def imper2 = { var sum = 0 for (i <- 0 until 1000) if( i % 3 ==0 || i % 5 ==0) sum +=i sum };System.out.println("""imper2: => Int""");$skip(160); def imper3 = { var i,sum =0 while (i < 1000) { if( i % 3 ==0 || i % 5 ==0) sum +=i i+=1 //increase counter } sum };System.out.println("""imper3: => Int""");$skip(33); val t1 = System.nanoTime();System.out.println("""t1 : Long = """ + $show(t1 ));$skip(18); val res = func;System.out.println("""res : Int = """ + $show(res ));$skip(43); val t2 = (System.nanoTime() - t1 )/1000;System.out.println("""t2 : Long = """ + $show(t2 ));$skip(54); println(s"The result is: $res time taken $t2 ms ")} }
firephil/scala-math-problems
.worksheet/src/project_euler.Problem_1.scala
Scala
mit
1,528
/* * Copyright (c) 2014-2015 by its authors. Some rights reserved. * See the project homepage at: http://www.monifu.org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monifu.reactive.observers import minitest.TestSuite import monifu.concurrent.Scheduler import monifu.concurrent.schedulers.TestScheduler import monifu.reactive.Ack.{Cancel, Continue} import monifu.reactive.OverflowStrategy.ClearBuffer import monifu.reactive.exceptions.DummyException import monifu.reactive.{Subscriber, Ack, Observer} import scala.concurrent.{Future, Promise} object BufferClearBufferThenSignalSuite extends TestSuite[TestScheduler] { def setup() = TestScheduler() def tearDown(s: TestScheduler) = { assert(s.state.get.tasks.isEmpty, "TestScheduler should have no pending tasks") } def buildNew(bufferSize: Int, underlying: Observer[Int]) (implicit s: Scheduler): BufferedSubscriber[Int] = { BufferedSubscriber(Subscriber(underlying, s), ClearBuffer(bufferSize), nr => nr.toInt) } test("should not lose events, test 1") { implicit s => var number = 0 var wasCompleted = false val underlying = new Observer[Int] { def onNext(elem: Int): Future[Ack] = { number += 1 Continue } def onError(ex: Throwable): Unit = { s.reportFailure(ex) } def onComplete(): Unit = { wasCompleted = true } } val buffer = buildNew(1000, underlying) for (i <- 0 until 1000) buffer.onNext(i) buffer.onComplete() assert(!wasCompleted) s.tick() assertEquals(number, 1000) assert(wasCompleted) } test("should not lose events, test 2") { implicit s => var number = 0 var completed = false val underlying = new Observer[Int] { def onNext(elem: Int): Future[Ack] = { number += 1 Continue } def onError(ex: Throwable): Unit = { s.reportFailure(ex) } def onComplete(): Unit = { completed = true } } val buffer = buildNew(1000, underlying) def loop(n: Int): Unit = if (n > 0) s.execute { buffer.onNext(n); loop(n-1) } else buffer.onComplete() loop(10000) assert(!completed) assertEquals(number, 0) s.tick() assert(completed) assertEquals(number, 10000) } test("should drop old events when over capacity") { implicit s => var received = 0 var wasCompleted = false val promise = Promise[Ack]() val underlying = new Observer[Int] { def onNext(elem: Int) = { received += elem if (elem < 7) Continue else promise.future } def onError(ex: Throwable) = () def onComplete() = { wasCompleted = true } } val buffer = buildNew(5, underlying) for (i <- 1 to 7) assertEquals(buffer.onNext(i), Continue) s.tick() assertEquals(received, 28) for (i <- 0 to 150) { assertEquals(buffer.onNext(100 + i), Continue) s.tick() } assertEquals(received, 28) promise.success(Continue); s.tick() assertEquals(received, 28 + (247 to 250).sum + 147) buffer.onComplete(); s.tick() assert(wasCompleted, "wasCompleted should be true") } test("should send onError when empty") { implicit s => var errorThrown: Throwable = null val buffer = buildNew(5, new Observer[Int] { def onError(ex: Throwable) = { errorThrown = ex } def onNext(elem: Int) = throw new IllegalStateException() def onComplete() = throw new IllegalStateException() }) buffer.onError(DummyException("dummy")) s.tickOne() assertEquals(errorThrown, DummyException("dummy")) val r = buffer.onNext(1) assertEquals(r, Cancel) } test("should send onError when in flight") { implicit s => var errorThrown: Throwable = null val buffer = buildNew(5, new Observer[Int] { def onError(ex: Throwable) = { errorThrown = ex } def onNext(elem: Int) = Continue def onComplete() = throw new IllegalStateException() }) buffer.onNext(1) buffer.onError(DummyException("dummy")) s.tick() assertEquals(errorThrown, DummyException("dummy")) } test("should send onError when at capacity") { implicit s => var errorThrown: Throwable = null val promise = Promise[Ack]() val buffer = buildNew(5, new Observer[Int] { def onError(ex: Throwable) = { errorThrown = ex } def onNext(elem: Int) = promise.future def onComplete() = throw new IllegalStateException() }) for (i <- 1 to 10) assertEquals(buffer.onNext(i), Continue) buffer.onError(DummyException("dummy")) promise.success(Continue) s.tick() assertEquals(errorThrown, DummyException("dummy")) } test("should do onComplete only after all the queue was drained") { implicit s => var sum = 0L var wasCompleted = false val startConsuming = Promise[Continue]() val buffer = buildNew(10000, new Observer[Int] { def onNext(elem: Int) = { sum += elem startConsuming.future } def onError(ex: Throwable) = throw ex def onComplete() = wasCompleted = true }) (0 until 9999).foreach(x => buffer.onNext(x)) buffer.onComplete() startConsuming.success(Continue) s.tick() assert(wasCompleted) assert(sum == (0 until 9999).sum) } test("should do onComplete only after all the queue was drained, test2") { implicit s => var sum = 0L var wasCompleted = false val buffer = buildNew(10000, new Observer[Int] { def onNext(elem: Int) = { sum += elem Continue } def onError(ex: Throwable) = throw ex def onComplete() = wasCompleted = true }) (0 until 9999).foreach(x => buffer.onNext(x)) buffer.onComplete() s.tick() assert(wasCompleted) assert(sum == (0 until 9999).sum) } test("should do onError only after the queue was drained") { implicit s => var sum = 0L var errorThrown: Throwable = null val startConsuming = Promise[Continue]() val buffer = buildNew(10000, new Observer[Int] { def onNext(elem: Int) = { sum += elem startConsuming.future } def onError(ex: Throwable) = errorThrown = ex def onComplete() = throw new IllegalStateException() }) (0 until 9999).foreach(x => buffer.onNext(x)) buffer.onError(DummyException("dummy")) startConsuming.success(Continue) s.tick() assertEquals(errorThrown, DummyException("dummy")) assertEquals(sum, (0 until 9999).sum) } test("should do onError only after all the queue was drained, test2") { implicit s => var sum = 0L var errorThrown: Throwable = null val buffer = buildNew(10000, new Observer[Int] { def onNext(elem: Int) = { sum += elem Continue } def onError(ex: Throwable) = errorThrown = ex def onComplete() = throw new IllegalStateException() }) (0 until 9999).foreach(x => buffer.onNext(x)) buffer.onError(DummyException("dummy")) s.tick() assertEquals(errorThrown, DummyException("dummy")) assertEquals(sum, (0 until 9999).sum) } test("should do synchronous execution in batches") { implicit s => var received = 0L var wasCompleted = false val buffer = buildNew(s.env.batchSize * 3, new Observer[Int] { def onNext(elem: Int) = { received += 1 Continue } def onError(ex: Throwable) = () def onComplete() = wasCompleted = true }) for (i <- 0 until (s.env.batchSize * 2)) buffer.onNext(i) buffer.onComplete() assertEquals(received, 0) s.tickOne() assertEquals(received, s.env.batchSize) s.tickOne() assertEquals(received, s.env.batchSize * 2) s.tickOne() assertEquals(wasCompleted, true) } }
virtualirfan/monifu
monifu/shared/src/test/scala/monifu/reactive/observers/BufferClearBufferThenSignalSuite.scala
Scala
apache-2.0
8,378
package com.linkedin.norbert /* * Copyright 2009-2010 LinkedIn, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ import annotation.tailrec import java.util.concurrent.{ConcurrentMap} package object norbertutils { def binarySearch[T](array: Seq[T], value: T)(implicit ordering: Ordering[T]): Int = binarySearch(array, value, 0, array.length - 1) @tailrec private def binarySearch[T](array: Seq[T], value: T, lo: Int, hi: Int)(implicit ordering: Ordering[T]): Int = { if (lo > hi) -lo - 1 else { val mid = lo + ((hi - lo) >> 2) val middleValue = array(mid) if (ordering.gt(value, middleValue)) binarySearch(array, value, mid + 1, hi) else if (ordering.lt(value, middleValue)) binarySearch(array, value, lo, mid - 1) else mid } } def getOrElse[T](seq: Seq[T], index: Int, other: T): T = { if (0 <= index && index < seq.size) seq(index) else other } // TODO: Put this into a utility somewhere? Scala's concurrent getOrElseUpdate is not atomic, unlike this guy def atomicCreateIfAbsent[K, V](map: ConcurrentMap[K, V], key: K)(fn: K => V): V = { val oldValue = map.get(key) if (oldValue == null) { map.synchronized { val oldValue2 = map.get(key) if (oldValue2 == null) { val newValue = fn(key) map.putIfAbsent(key, newValue) map.get(key) } else { oldValue2 } } } else { oldValue } } def safeDivide(num: Double, den: Double)(orElse: Double): Double = { if (den == 0) orElse else num / den } // Apparently the map in JavaConversions isn't serializable... def toJMap[K, V](map: Map[K, V]): java.util.Map[K, V] = { val m = new java.util.HashMap[K, V](map.size) map.foreach { case (k, v) => m.put(k, v) } m } def toJMap[K, V](map: Option[Map[K, V]]): java.util.Map[K, V] = toJMap(map.getOrElse(Map.empty[K, V])) def calculatePercentile[T](values: Array[T], percentile: Double, default: Double = 0.0)(implicit n: Numeric[T]): Double = { import math._ if (values.isEmpty) return default val p = max(0.0, min(1.0, percentile)) var idx = p * (values.size - 1) idx = max(0.0, min(values.size - 1, idx)) val (lIdx, rIdx) = (idx.floor.toInt, idx.ceil.toInt) if (idx == lIdx) n.toDouble(values(lIdx)) else { // Linearly Interpolate between the two (idx - lIdx) * n.toDouble(values(rIdx)) + (rIdx - idx) * n.toDouble(values(lIdx)) } } def continueOnError(block: => Unit): Unit = { try { block } catch { case e : Exception => // nothing } } }
linkedin/norbert
cluster/src/main/scala/com/linkedin/norbert/norbertutils/package.scala
Scala
apache-2.0
3,172
package ipfix.actors import akka.actor.{ActorRef, Terminated} import akka.routing.Router trait BaseRouter extends BaseActor { var router: Router def terminate: Receive = { case Terminated(a) => remRoutee(a) } def addRoutee(a: ActorRef): Unit = { log.info(s"adding routee: $a") router = router.addRoutee(a) context watch a } def remRoutee(a: ActorRef): Unit = { log.info(s"removing routee: $a") router = router.removeRoutee(a) } }
ConnorDillon/ipfix
src/main/scala/ipfix/actors/BaseRouter.scala
Scala
gpl-3.0
473
/* sbt -- Simple Build Tool * Copyright 2010 Mark Harrah */ package sbt import Relation._ object Relation { /** Constructs a new immutable, finite relation that is initially empty. */ def empty[A,B]: Relation[A,B] = make(Map.empty, Map.empty) def make[A,B](forward: Map[A,Set[B]], reverse: Map[B, Set[A]]): Relation[A,B] = new MRelation(forward, reverse) def reconstruct[A,B](forward: Map[A, Set[B]]): Relation[A,B] = { val reversePairs = for( (a,bs) <- forward.view; b <- bs.view) yield (b, a) val reverse = (Map.empty[B,Set[A]] /: reversePairs) { case (m, (b, a)) => add(m, b, a :: Nil) } make(forward, reverse) } private[sbt] def remove[X,Y](map: M[X,Y], from: X, to: Y): M[X,Y] = map.get(from) match { case Some(tos) => val newSet = tos - to if(newSet.isEmpty) map - from else map.updated(from, newSet) case None => map } private[sbt] def combine[X,Y](a: M[X,Y], b: M[X,Y]): M[X,Y] = (a /: b) { (map, mapping) => add(map, mapping._1, mapping._2) } private[sbt] def add[X,Y](map: M[X,Y], from: X, to: Traversable[Y]): M[X,Y] = map.updated(from, get(map, from) ++ to) private[sbt] def get[X,Y](map: M[X,Y], t: X): Set[Y] = map.getOrElse(t, Set.empty[Y]) private[sbt] type M[X,Y] = Map[X, Set[Y]] } /** Binary relation between A and B. It is a set of pairs (_1, _2) for _1 in A, _2 in B. */ trait Relation[A,B] { /** Returns the set of all _2s such that (_1, _2) is in this relation. */ def forward(_1: A): Set[B] /** Returns the set of all _1s such that (_1, _2) is in this relation. */ def reverse(_2: B): Set[A] /** Includes the relation given by `pair`. */ def +(pair: (A, B)): Relation[A,B] /** Includes the relation (a, b). */ def +(a: A, b: B): Relation[A,B] /** Includes the relations (a, b) for all b in bs. */ def +(a: A, bs: Traversable[B]): Relation[A,B] /** Returns the union of the relation r with this relation. */ def ++(r: Relation[A,B]): Relation[A,B] /** Includes the given relations. */ def ++(rs: Traversable[(A,B)]): Relation[A,B] /** Removes all relations (_1, _2) for all _1 in _1s. */ def --(_1s: Traversable[A]): Relation[A,B] /** Removes all `pairs` from this relation. */ def --(pairs: TraversableOnce[(A,B)]): Relation[A,B] /** Removes all pairs (_1, _2) from this relation. */ def -(_1: A): Relation[A,B] /** Removes `pair` from this relation. */ def -(pair: (A,B)): Relation[A,B] /** Returns the set of all _1s such that (_1, _2) is in this relation. */ def _1s: collection.Set[A] /** Returns the set of all _2s such that (_1, _2) is in this relation. */ def _2s: collection.Set[B] /** Returns the number of pairs in this relation */ def size: Int /** Returns true iff (a,b) is in this relation*/ def contains(a: A, b: B): Boolean /** Returns a relation with only pairs (a,b) for which f(a,b) is true.*/ def filter(f: (A,B) => Boolean): Relation[A,B] /** Returns all pairs in this relation.*/ def all: Traversable[(A,B)] def forwardMap: Map[A, Set[B]] def reverseMap: Map[B, Set[A]] } private final class MRelation[A,B](fwd: Map[A, Set[B]], rev: Map[B, Set[A]]) extends Relation[A,B] { def forwardMap = fwd def reverseMap = rev def forward(t: A) = get(fwd, t) def reverse(t: B) = get(rev, t) def _1s = fwd.keySet def _2s = rev.keySet def size = fwd.size def all: Traversable[(A,B)] = fwd.iterator.flatMap { case (a, bs) => bs.iterator.map( b => (a,b) ) }.toTraversable def +(pair: (A,B)) = this + (pair._1, Set(pair._2)) def +(from: A, to: B) = this + (from, to :: Nil) def +(from: A, to: Traversable[B]) = new MRelation( add(fwd, from, to), (rev /: to) { (map, t) => add(map, t, from :: Nil) }) def ++(rs: Traversable[(A,B)]) = ((this: Relation[A,B]) /: rs) { _ + _ } def ++(other: Relation[A,B]) = new MRelation[A,B]( combine(fwd, other.forwardMap), combine(rev, other.reverseMap) ) def --(ts: Traversable[A]): Relation[A,B] = ((this: Relation[A,B]) /: ts) { _ - _ } def --(pairs: TraversableOnce[(A,B)]): Relation[A,B] = ((this: Relation[A,B]) /: pairs) { _ - _ } def -(pair: (A,B)): Relation[A,B] = new MRelation( remove(fwd, pair._1, pair._2), remove(rev, pair._2, pair._1) ) def -(t: A): Relation[A,B] = fwd.get(t) match { case Some(rs) => val upRev = (rev /: rs) { (map, r) => remove(map, r, t) } new MRelation(fwd - t, upRev) case None => this } def filter(f: (A,B) => Boolean): Relation[A,B] = Relation.empty[A,B] ++ all.filter(f.tupled) def contains(a: A, b: B): Boolean = forward(a)(b) override def toString = all.map { case (a,b) => a + " -> " + b }.mkString("Relation [", ", ", "]") }
jamesward/xsbt
util/relation/Relation.scala
Scala
bsd-3-clause
4,581
package spark.storage import java.nio.ByteBuffer import scala.collection.mutable.StringBuilder import scala.collection.mutable.ArrayBuffer import spark._ import spark.network._ private[spark] class BlockMessageArray(var blockMessages: Seq[BlockMessage]) extends Seq[BlockMessage] with Logging { def this(bm: BlockMessage) = this(Array(bm)) def this() = this(null.asInstanceOf[Seq[BlockMessage]]) def apply(i: Int) = blockMessages(i) def iterator = blockMessages.iterator def length = blockMessages.length initLogging() def set(bufferMessage: BufferMessage) { val startTime = System.currentTimeMillis val newBlockMessages = new ArrayBuffer[BlockMessage]() val buffer = bufferMessage.buffers(0) buffer.clear() /* println() println("BlockMessageArray: ") while(buffer.remaining > 0) { print(buffer.get()) } buffer.rewind() println() println() */ while (buffer.remaining() > 0) { val size = buffer.getInt() logDebug("Creating block message of size " + size + " bytes") val newBuffer = buffer.slice() newBuffer.clear() newBuffer.limit(size) logDebug("Trying to convert buffer " + newBuffer + " to block message") val newBlockMessage = BlockMessage.fromByteBuffer(newBuffer) logDebug("Created " + newBlockMessage) newBlockMessages += newBlockMessage buffer.position(buffer.position() + size) } val finishTime = System.currentTimeMillis logDebug("Converted block message array from buffer message in " + (finishTime - startTime) / 1000.0 + " s") this.blockMessages = newBlockMessages } def toBufferMessage: BufferMessage = { val buffers = new ArrayBuffer[ByteBuffer]() blockMessages.foreach(blockMessage => { val bufferMessage = blockMessage.toBufferMessage logDebug("Adding " + blockMessage) val sizeBuffer = ByteBuffer.allocate(4).putInt(bufferMessage.size) sizeBuffer.flip buffers += sizeBuffer buffers ++= bufferMessage.buffers logDebug("Added " + bufferMessage) }) logDebug("Buffer list:") buffers.foreach((x: ByteBuffer) => logDebug("" + x)) /* println() println("BlockMessageArray: ") buffers.foreach(b => { while(b.remaining > 0) { print(b.get()) } b.rewind() }) println() println() */ return Message.createBufferMessage(buffers) } } private[spark] object BlockMessageArray { def fromBufferMessage(bufferMessage: BufferMessage): BlockMessageArray = { val newBlockMessageArray = new BlockMessageArray() newBlockMessageArray.set(bufferMessage) newBlockMessageArray } def main(args: Array[String]) { val blockMessages = (0 until 10).map(i => { if (i % 2 == 0) { val buffer = ByteBuffer.allocate(100) buffer.clear BlockMessage.fromPutBlock(PutBlock(i.toString, buffer, StorageLevel.MEMORY_ONLY_SER)) } else { BlockMessage.fromGetBlock(GetBlock(i.toString)) } }) val blockMessageArray = new BlockMessageArray(blockMessages) println("Block message array created") val bufferMessage = blockMessageArray.toBufferMessage println("Converted to buffer message") val totalSize = bufferMessage.size val newBuffer = ByteBuffer.allocate(totalSize) newBuffer.clear() bufferMessage.buffers.foreach(buffer => { newBuffer.put(buffer) buffer.rewind() }) newBuffer.flip val newBufferMessage = Message.createBufferMessage(newBuffer) println("Copied to new buffer message, size = " + newBufferMessage.size) val newBlockMessageArray = BlockMessageArray.fromBufferMessage(newBufferMessage) println("Converted back to block message array") newBlockMessageArray.foreach(blockMessage => { blockMessage.getType match { case BlockMessage.TYPE_PUT_BLOCK => { val pB = PutBlock(blockMessage.getId, blockMessage.getData, blockMessage.getLevel) println(pB) } case BlockMessage.TYPE_GET_BLOCK => { val gB = new GetBlock(blockMessage.getId) println(gB) } } }) } }
koeninger/spark
core/src/main/scala/spark/storage/BlockMessageArray.scala
Scala
bsd-3-clause
4,222
/* * Ported from https://github.com/hamcrest/JavaHamcrest/ */ package org.hamcrest.core import org.hamcrest.BaseMatcher import org.hamcrest.Description import org.hamcrest.Matcher import org.hamcrest.core.IsEqual.equalTo class IsNot[T](matcher: Matcher[T]) extends BaseMatcher[T] { override def matches(arg: AnyRef): Boolean = !matcher.matches(arg) override def describeTo(description: Description): Unit = description.appendText("not ").appendDescriptionOf(matcher) } object IsNot { def not[T](matcher: Matcher[T]): Matcher[T] = new IsNot[T](matcher) def not[T](value: T): Matcher[T] = not(equalTo(value)) }
nicolasstucki/scala-js-junit
runtime/src/main/scala/org/hamcrest/core/IsNot.scala
Scala
bsd-3-clause
641
/* * Copyright (c) 2014-2018 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.reactive.subjects import minitest.TestSuite import monix.eval.Task import monix.execution.Ack.{Continue, Stop} import monix.execution.exceptions.DummyException import monix.execution.schedulers.TestScheduler import monix.reactive.{Observable, Observer} import scala.util.Random trait BaseSubjectSuite extends TestSuite[TestScheduler] { case class Sample(subject: Subject[Long, Long], expectedSum: Long) def setup() = TestScheduler() def tearDown(s: TestScheduler) = { assert(s.state.tasks.isEmpty, "TestScheduler should have no pending tasks") } /** * Returns a sample subject that needs testing. */ def alreadyTerminatedTest(expectedElems: Seq[Long]): Sample /** * Returns a sample subject for the test of * continuous streaming. */ def continuousStreamingTest(expectedElems: Seq[Long]): Option[Sample] test("already completed and empty subject terminates observers") { implicit s => var wereCompleted = 0 var sum = 0L def createObserver = new Observer[Long] { def onNext(elem: Long) = { sum += elem Continue } def onError(ex: Throwable) = () def onComplete() = { wereCompleted += 1 } } val Sample(subject, expectedSum) = alreadyTerminatedTest(Seq.empty) subject.onComplete() subject.unsafeSubscribeFn(createObserver) subject.unsafeSubscribeFn(createObserver) subject.unsafeSubscribeFn(createObserver) s.tick() assertEquals(sum, expectedSum * 3) assertEquals(wereCompleted, 3) } test("failed empty subject terminates observers with an error") { implicit s => var wereCompleted = 0 var sum = 0L def createObserver = new Observer[Long] { def onNext(elem: Long) = { sum += elem Continue } def onComplete() = () def onError(ex: Throwable) = ex match { case DummyException("dummy") => wereCompleted += 1 case _ => () } } val Sample(subject, _) = alreadyTerminatedTest(Seq.empty) subject.onError(DummyException("dummy")) subject.unsafeSubscribeFn(createObserver) subject.unsafeSubscribeFn(createObserver) subject.unsafeSubscribeFn(createObserver) s.tick() assertEquals(sum, 0) assertEquals(wereCompleted, 3) } test("already completed but non-empty subject terminates new observers") { implicit s => val elems = (0 until 20).map(_ => Random.nextLong()) var wereCompleted = 0 var sum = 0L def createObserver = new Observer[Long] { def onNext(elem: Long) = { sum += elem Continue } def onError(ex: Throwable) = () def onComplete() = { wereCompleted += 1 } } val Sample(subject, expectedSum) = alreadyTerminatedTest(elems) Observable.fromIterable(elems).unsafeSubscribeFn(subject) s.tick() subject.unsafeSubscribeFn(createObserver) subject.unsafeSubscribeFn(createObserver) subject.unsafeSubscribeFn(createObserver) s.tick() assertEquals(sum, expectedSum * 3) assertEquals(wereCompleted, 3) } test("already failed but non-empty subject terminates new observers") { implicit s => val elems = (0 until 20).map(_ => Random.nextLong()) var wereCompleted = 0 def createObserver = new Observer[Long] { def onNext(elem: Long) = Continue def onComplete() = () def onError(ex: Throwable) = ex match { case DummyException("dummy") => wereCompleted += 1 case _ => () } } val Sample(subject, _) = alreadyTerminatedTest(elems) Observable.fromIterable(elems) .endWithError(DummyException("dummy")) .unsafeSubscribeFn(subject) s.tick() subject.unsafeSubscribeFn(createObserver) subject.unsafeSubscribeFn(createObserver) subject.unsafeSubscribeFn(createObserver) s.tick() assertEquals(wereCompleted, 3) } test("should remove subscribers that triggered errors") { implicit s => val elemsLength = Random.nextInt(300) + 100 val elems = (0 until elemsLength).map(_.toLong) var wereCompleted = 0 var totalReceived = 0 def createObserver = new Observer[Long] { var received = 0 def onNext(elem: Long) = { totalReceived += 1 received += 1 if (received > 10) throw DummyException("dummy") else Continue } def onComplete() = () def onError(ex: Throwable) = ex match { case DummyException("dummy") => wereCompleted += 1 case _ => () } } continuousStreamingTest(elems) match { case None => ignore() case Some(Sample(subject, expectedSum)) => var totalEmitted = 0L subject.doOnNext(x => Task(totalEmitted += x)).subscribe() subject.subscribe(createObserver) subject.subscribe(createObserver) subject.subscribe(createObserver) s.tick() Observable.fromIterable(elems).unsafeSubscribeFn(subject) s.tick() assertEquals(wereCompleted, 3) assertEquals(totalEmitted, expectedSum) assertEquals(totalReceived, 33) } } test("should protect onNext after onCompleted") { implicit s => val Sample(subject, _) = alreadyTerminatedTest(Seq.empty) subject.onComplete() assertEquals(subject.onNext(1), Stop) assertEquals(subject.onNext(2), Stop) assertEquals(subject.onNext(2), Stop) } test("should protect onNext after onError") { implicit s => val Sample(subject, _) = alreadyTerminatedTest(Seq.empty) subject.onError(DummyException("dummy")) assertEquals(subject.onNext(1), Stop) assertEquals(subject.onNext(2), Stop) assertEquals(subject.onNext(2), Stop) } }
Wogan/monix
monix-reactive/shared/src/test/scala/monix/reactive/subjects/BaseSubjectSuite.scala
Scala
apache-2.0
6,508
package linguistic import scala.annotation.tailrec import scala.collection.JavaConverters._ import scala.collection.mutable import scala.collection.mutable.ListBuffer //https://github.com/mauricio/scala-sandbox/blob/master/src/main/scala/trie/Trie.scala package object trie { object Trie { def apply(): Trie = new TrieNode() } sealed trait Trie extends Traversable[String] { def append(key: String): Unit def findByPrefix(prefix: String): scala.collection.Seq[String] def contains(word: String): Boolean def remove(word: String): Boolean } private[trie] class TrieNode(val char: Option[Char] = None, var word: Option[String] = None) extends Trie { private[trie] val children: mutable.Map[Char, TrieNode] = new java.util.TreeMap[Char, TrieNode]().asScala override def append(key: String) = { @tailrec def go(node: TrieNode, currentIndex: Int): Unit = if (currentIndex == key.length) node.word = Some(key) else { val pref = key.charAt(currentIndex).toLower val result = node.children.getOrElseUpdate(pref, new TrieNode(Some(pref))) go(result, currentIndex + 1) } go(this, 0) } override def foreach[U](f: String => U): Unit = { @tailrec def go(nodes: TrieNode*): Unit = if (nodes.size != 0) { nodes.foreach(node => node.word.foreach(f)) go(nodes.flatMap(node => node.children.values): _*) } go(this) } override def findByPrefix(prefix: String): scala.collection.Seq[String] = { @tailrec def go(currentIndex: Int, node: TrieNode, items: ListBuffer[String]): ListBuffer[String] = if (currentIndex == prefix.length) { items ++ node } else { node.children.get(prefix.charAt(currentIndex).toLower) match { case Some(child) => go(currentIndex + 1, child, items) case None => items } } go(0, this, new ListBuffer[String]()) } override def contains(word: String): Boolean = { @tailrec def go(currentIndex: Int, node: TrieNode): Boolean = if (currentIndex == word.length) { node.word.isDefined } else { node.children.get(word.charAt(currentIndex).toLower) match { case Some(child) => go(currentIndex + 1, child) case None => false } } go(0, this) } override def remove(word: String): Boolean = { def loop(index: Int, continue: Boolean, path: ListBuffer[TrieNode]): Unit = if (index > 0 && continue) { val current = path(index) if (current.word.isDefined) loop(index, false, path) else { val parent = path(index - 1) if (current.children.isEmpty) { parent.children.remove(word.charAt(index - 1).toLower) } loop(index - 1, true, path) } } pathTo(word) match { case Some(path) => { var index = path.length - 1 var continue = true path(index).word = None // while (index > 0 && continue) { val current = path(index) if (current.word.isDefined) continue = false else { val parent = path(index - 1) if (current.children.isEmpty) { parent.children.remove(word.charAt(index - 1).toLower) } index -= 1 } } true } case None => false } } private[trie] def pathTo(word: String): Option[ListBuffer[TrieNode]] = { def go(buffer: ListBuffer[TrieNode], currentIndex: Int, node: TrieNode): Option[ListBuffer[TrieNode]] = if (currentIndex == word.length) { node.word.map(word => buffer += node) } else { node.children.get(word.charAt(currentIndex).toLower) match { case Some(found) => { buffer += node go(buffer, currentIndex + 1, found) } case None => None } } go(new ListBuffer[TrieNode](), 0, this) } override def toString(): String = s"Trie(char=${char},word=${word})" } }
haghard/linguistic
server/src/main/scala/linguistic/trie/package.scala
Scala
apache-2.0
4,263
/* * Copyright 2015 Webtrends (http://www.webtrends.com) * * See the LICENCE.txt file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.webtrends.harness.component.zookeeper import akka.actor.{Actor, Identify} import akka.pattern.ask import akka.util.Timeout import com.webtrends.harness.component.zookeeper.config.ZookeeperSettings import scala.concurrent.Await import scala.concurrent.duration._ trait Zookeeper { this: Actor=> import context.system implicit val zookeeperSettings:ZookeeperSettings def startZookeeper(clusterEnabled:Boolean=false) = { // Load the zookeeper actor val zk = context.actorOf(ZookeeperActor.props(zookeeperSettings, clusterEnabled), Zookeeper.ZookeeperName) // We need to block here during startup so that we know that things // are up and running for other services that depend on this implicit val to = Timeout(20 seconds) Await.result(zk ? Identify("StartZK"), to.duration) } } object Zookeeper { val ZookeeperName = "zookeeper" }
mjwallin1/wookiee-zookeeper
src/main/scala/com/webtrends/harness/component/zookeeper/Zookeeper.scala
Scala
apache-2.0
1,624
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.v2.jdbc import java.util import scala.collection.JavaConverters._ import org.apache.spark.sql.SparkSession import org.apache.spark.sql.connector.catalog._ import org.apache.spark.sql.connector.catalog.TableCapability._ import org.apache.spark.sql.connector.write.{LogicalWriteInfo, WriteBuilder} import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcOptionsInWrite} import org.apache.spark.sql.types.StructType import org.apache.spark.sql.util.CaseInsensitiveStringMap case class JDBCTable(ident: Identifier, schema: StructType, jdbcOptions: JDBCOptions) extends Table with SupportsRead with SupportsWrite { override def name(): String = ident.toString override def capabilities(): util.Set[TableCapability] = { util.EnumSet.of(BATCH_READ, V1_BATCH_WRITE, TRUNCATE) } override def newScanBuilder(options: CaseInsensitiveStringMap): JDBCScanBuilder = { val mergedOptions = new JDBCOptions( jdbcOptions.parameters.originalMap ++ options.asCaseSensitiveMap().asScala) JDBCScanBuilder(SparkSession.active, schema, mergedOptions) } override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = { val mergedOptions = new JdbcOptionsInWrite( jdbcOptions.parameters.originalMap ++ info.options.asCaseSensitiveMap().asScala) JDBCWriteBuilder(schema, mergedOptions) } }
chuckchen/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTable.scala
Scala
apache-2.0
2,194
package atto import Atto._ import scala.util.Random import org.scalacheck._ import scalaz.\\/._ object CharacterTest extends Properties("Character") { import Prop._ import Parser._ property("satisfy") = forAll { (w: Char, s: String) => satisfy(_ <= w).parse(w +: s).option == Some(w) } property("oneOf") = forAll { (s: String) => s.nonEmpty ==> { val randomChar = s(Random.nextInt(s.size)) oneOf(s).parse(randomChar.toString).option == Some(randomChar) }} property("noneOf") = forAll { (s: String, c: Char) => s.nonEmpty ==> { val randomChar = s(Random.nextInt(s.size)) noneOf(s).parse(randomChar.toString).option == None }} property("char") = forAll { (w: Char, s: String) => char(w).parse(w +: s).option == Some(w) } property("anyChar") = forAll { (s: String) => val p = anyChar.parse(s).option if (s.isEmpty) p == None else p == Some(s.head) } property("notChar") = forAll { (w: Char, s: String) => (!s.isEmpty) ==> { val v = s.head notChar(w).parse(s).option == (if (v == w) None else Some(v)) }} property("charRange") = forAll { (ps: List[(Char, Char)], c: Char) => val rs = ps.map(p => p._1 to p._2) val in = rs.exists(_.contains(c)) charRange(rs: _*).parseOnly(c.toString).option match { case Some(`c`) if in => true case None if !in => true case _ => false } } property("optElem") = forAll { (c: Char, d: Char) => optElem(c => Some(c).filter(_ < d)).parseOnly(c.toString).option == Some(c).filter(_ < d) } property("optElem + many") = forAll { (s: String, c: Char) => val p = many(optElem(ch => Some(ch).filter(_ < c))) p.parseOnly(s).option == Some(s.toList.takeWhile(_ < c)) } }
coltfred/atto
core/src/test/scala/atto/CharacterTest.scala
Scala
mit
1,734
package com.twitter.finagle.http.netty4 import com.twitter.conversions.DurationOps._ import com.twitter.finagle.http.Cookie import com.twitter.finagle.http.CookieCodec import com.twitter.finagle.http.CookieMap import com.twitter.finagle.http.cookie.SameSiteCodec import io.netty.handler.codec.http.cookie.{ClientCookieDecoder => NettyClientCookieDecoder} import io.netty.handler.codec.http.cookie.{ClientCookieEncoder => NettyClientCookieEncoder} import io.netty.handler.codec.http.cookie.{Cookie => NettyCookie} import io.netty.handler.codec.http.cookie.{DefaultCookie => NettyDefaultCookie} import io.netty.handler.codec.http.cookie.{ServerCookieDecoder => NettyServerCookieDecoder} import io.netty.handler.codec.http.cookie.{ServerCookieEncoder => NettyServerCookieEncoder} import java.util.{BitSet => JBitSet} import scala.collection.JavaConverters._ private[finagle] object Netty4CookieCodec extends CookieCodec { // not stateful, so safe to re-use private[this] val clientEncoder = NettyClientCookieEncoder.STRICT private[this] val serverEncoder = NettyServerCookieEncoder.STRICT private[this] val clientDecoder = NettyClientCookieDecoder.STRICT private[this] val serverDecoder = NettyServerCookieDecoder.STRICT // These are the chars that trigger double-quote-wrapping of values in Netty 3, minus the // characters that are prohibited in Netty 4. private[this] val ShouldWrapCharsBitSet: JBitSet = { val bs = new JBitSet "()/:<?@[]=>{}".foreach(bs.set(_)) bs } def encodeClient(cookies: Iterable[Cookie]): String = // N4 Encoder returns null if cookies is empty if (cookies.isEmpty) "" else clientEncoder.encode(cookies.map(cookieToNetty).asJava) def encodeServer(cookie: Cookie): String = { val encoded = serverEncoder.encode(cookieToNetty(cookie)) if (CookieMap.includeSameSite) SameSiteCodec.encodeSameSite(cookie, encoded) else encoded } def decodeClient(header: String): Option[Iterable[Cookie]] = { val cookie = clientDecoder.decode(header) if (cookie != null) { val decoded = cookieToFinagle(cookie) val finagleCookie = if (CookieMap.includeSameSite) SameSiteCodec.decodeSameSite(header, decoded) else decoded Some(Seq(finagleCookie)) } else None } def decodeServer(header: String): Option[Iterable[Cookie]] = { val cookies = serverDecoder.decodeAll(header).asScala.map(cookieToFinagle) if (!cookies.isEmpty) Some(cookies) else None } private[this] def shouldWrap(cookie: Cookie): Boolean = Cookie.stringContains(cookie.value, ShouldWrapCharsBitSet) private[netty4] val cookieToNetty: Cookie => NettyCookie = c => { val nc = new NettyDefaultCookie(c.name, c.value) nc.setDomain(c.domain) nc.setPath(c.path) // We convert the Durations to Ints to circumvent maxAge being // Int.MinValue.seconds, which does not equal Duration.Bottom, even though // they have the same integer value in seconds. if (c.maxAge.inSeconds != Cookie.DefaultMaxAge.inSeconds) { nc.setMaxAge(c.maxAge.inSeconds) } nc.setSecure(c.secure) nc.setHttpOnly(c.httpOnly) if (shouldWrap(c)) nc.setWrap(true) nc } private[netty4] val cookieToFinagle: NettyCookie => Cookie = nc => { val cookie = new Cookie( name = nc.name, value = nc.value, domain = Option(nc.domain()), path = Option(nc.path()), secure = nc.isSecure(), httpOnly = nc.isHttpOnly() ) // Note: Long.MinValue is what Netty 4 uses to indicate "never expires." if (nc.maxAge() != Long.MinValue) cookie.maxAge(Some(nc.maxAge().seconds)) else cookie } }
twitter/finagle
finagle-base-http/src/main/scala/com/twitter/finagle/http/netty4/Netty4CookieCodec.scala
Scala
apache-2.0
3,655
package pcf.pcl import scala.collection.mutable.ListBuffer import pcf.pcl._ import org.slf4j.LoggerFactory import org.scalatest.FunSuite class PCLParserTestSpec extends FunSuite { val logger = LoggerFactory.getLogger(getClass()) test("test1") { val input = "constraint test1\\n" + "context t1:Task1, t2:Task2\\n" + "pre a->b.c->d.location < 60.0\\n" + " and distance(a.location, b.location) < 100" + " or b < c" logger.info("********** test 1 **********\\n" + input + "\\n") logger.info("********** test 1 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint.get.name == "test1") } test("test2") { val input = "constraint if_expressionTest\\n" + "context t:Task, task2, task3\\n" + "post if aa.size < bb.size\\n" + " then bb.size - aa.size" + " else aa.size - bb.size endif" logger.info("********** test 2 **********\\n" + input + "\\n") logger.info("********** test 2 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint.get.name == "if_expressionTest") } test("test2 1") { val input = "constraint if_expressionTest\\n" + "context t:Task, task2, task3\\n" + "post if aa.size < bb.size\\n" + " then bb.size - aa.size" + " endif" logger.info("********** test 2.1 **********\\n" + input + "\\n") logger.info("********** test 2.1 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint.get.name == "if_expressionTest") } test("test3 unsupported keyword") { val input = "Constraint test\\n" + "context t:Task, task2, task3\\n" + "pre aa.size < bb.size\\n" logger.info("********** test 3 **********\\n" + input + "\\n") logger.info("********** test 3 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint == None) } test("test4 unsupported keyword") { val input = "constraint test\\n" + "Context t:Task, task2, task3\\n" + "pre aa.size < bb.size\\n" logger.info("********** test 4 **********\\n" + input + "\\n") logger.info("********** test 4 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint == None) } test("test5 unsupported keyword") { val input = "constraint test\\n" + "context t:Task, task2, task3\\n" + "Pre aa.size < bb.size\\n" logger.info("********** test 5 **********\\n" + input + "\\n") logger.info("********** test 5 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint == None) } test("test6 unsupported expression") { val input = "constraint test\\n" + "context t:Task, task2, task3\\n" + "pre aa.size <<<< bb.size\\n" logger.info("********** test 6 **********\\n" + input + "\\n") logger.info("********** test 6 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint == None) } test("test7") { val input = "constraint if_expressionTest\\n" + "context t:Task, task2, task3\\n" + "post if aa.size < bb.size\\n" + " then bb.size -- aa.size" + " else aa.size * bb.size endif" logger.info("********** test 7 **********\\n" + input + "\\n") logger.info("********** test 7 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint == None) } test("test8") { val input = "constraint if_expressionTest\\n" + "context t:Task, task2, task3\\n" + "post aa.size < bb.size and bb.size >= aa.size \\n" logger.info("********** test 8 **********\\n" + input + "\\n") logger.info("********** test 8 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint.get.name == "if_expressionTest") } test("test9 exception parser") { val input = "constraint exception_expressionTest\\n" + "context t:Task, task2, task3\\n" + "post aa.size < bb.size and bb.size >= aa.size \\n" + " raise HospitalNotFoundException" logger.info("********** test 9 **********\\n" + input + "\\n") logger.info("********** test 9 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint.get.name == "exception_expressionTest") } test("test9_1 raise clause with exception type") { val input = "constraint exception_expressionTest\\n" + "context t:Task, task2, task3\\n" + "post aa.size < bb.size and bb.size >= aa.size \\n" + " raise" logger.info("********** test 9 **********\\n" + input + "\\n") logger.info("********** test 9 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint == None) } test("test9_2 raise clause with multiple exception types") { val input = "constraint exception_expressionTest\\n" + "context t:Task, task2, task3\\n" + "post aa.size < bb.size and bb.size >= aa.size \\n" + " raise HospitalNotFoundException, DetailNotAvailableException" logger.info("********** test 9 **********\\n" + input + "\\n") logger.info("********** test 9 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint.get.name == "exception_expressionTest") } test("test10 exception handler clause") { val input = "constraint exception_handler_clauseTest\\n" + "context t:Task, task2, task3\\n" + "post aa.size < bb.size and bb.size >= aa.size \\n" + " raise HospitalNotFoundException, DetailNotAvailableException\\n" + "exception " logger.info("********** test 10 **********\\n" + input + "\\n") logger.info("********** test 10 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint == None) } test("test10_1") { val input = "constraint exception_handler_clauseTest\\n" + "context t:Task, task2, task3\\n" + "post aa.size < bb.size and bb.size >= aa.size \\n" + " raise HospitalNotFoundException, DetailNotAvailableException\\n" + "exception when HospitalNotFoundException\\n" + " then extendDistance" logger.info("********** test 10 **********\\n" + input + "\\n") logger.info("********** test 10 result **********") val parser = new PCLParser val constraint = parser.pclParse(input) logger.info(constraint.toString) assert(constraint.constraint.get.name == "exception_handler_clauseTest") } }
shasha-amy-liu/process-constraint-framework
pcf/src/test/scala/pcf/pcl/PCLParserTest.scala
Scala
mit
7,397
/** * Copyright (C) 2017 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.builder import org.orbeon.datatypes.Orientation import org.orbeon.jquery.Offset import org.orbeon.oxf.util.CoreUtils.asUnit import org.orbeon.oxf.util.StringUtils._ import org.orbeon.xforms._ import org.orbeon.xforms.facade.{Events, Globals} import org.scalajs.dom.{document, window} import org.scalajs.jquery.{JQuery, JQueryEventObject} import scala.scalajs.js object Position { // Keeps track of pointer position var pointerPos: Offset = Offset(0, 0) $(document).on("mousemove.orbeon.builder", (event: JQueryEventObject) ⇒ asUnit { pointerPos = Offset( left = event.pageX, top = event.pageY ) }) // How much we need to add to offset to account for the form having been scrolled def scrollTop() : Double = $(".fb-main").scrollTop () def scrollLeft(): Double = $(".fb-main").scrollLeft() // Gets an element offset, normalizing for scrolling, so the offset can be stored in a cache def adjustedOffset(el: JQuery): Offset = { val rawOffset = Offset(el) Offset( left = rawOffset.left, top = rawOffset.top + scrollTop() ) } // Calls listener when what is under the pointer has potentially changed def onUnderPointerChange(fn: ⇒ Unit): Unit = { $(document).on("mousemove.orbeon.builder", fn _) // Resizing the window might change what is under the pointer the last time we saw it in the window $(window).on("resize.orbeon.builder", fn _) Events.ajaxResponseProcessedEvent.subscribe(fn _) } // Call listener when anything on the page that could change element positions happened def onOffsetMayHaveChanged(fn: () ⇒ Unit): Unit = { Events.orbeonLoadedEvent.subscribe(fn) Events.ajaxResponseProcessedEvent.subscribe(fn) Events.componentChangedLayoutEvent.subscribe(fn) $(window).on("resize.orbeon.builder", fn) } // Finds the container, if any, based on a vertical position def findInCache( containerCache : BlockCache, top : Double, left : Double ): Option[Block] = containerCache.elems find { container ⇒ // Rounding when comparing as the offset of an element isn't always exactly the same as the offset it was set to val horizontalPosInside = Math.round(container.left) <= Math.round(left) && Math.round(left) <= Math.round(container.left + container.width) val verticalPosInside = Math.round(container.top ) <= Math.round(top) && Math.round(top) <= Math.round(container.top + container.height) horizontalPosInside && verticalPosInside } // Container is either a section or grid; calls listeners passing old/new container def currentContainerChanged( containerCache : BlockCache, wasCurrent : (Block) ⇒ Unit, becomesCurrent : (Block) ⇒ Unit ): Unit = { val notifyChange = notifyOnChange(wasCurrent, becomesCurrent) onUnderPointerChange { val top = pointerPos.top + Position.scrollTop() val left = pointerPos.left + Position.scrollLeft() val dialogVisible = Globals.dialogs.exists { case (_: String, yuiDialog: js.Dynamic) ⇒ yuiDialog.cfg.config.visible.value.asInstanceOf[Boolean] } val newContainer = if (dialogVisible) // Ignore container under the pointer if a dialog is visible None else findInCache(containerCache, top, left) notifyChange(newContainer) } } // Returns a function, which is expected to be called every time the value changes passing the new value, and which // will when appropriate notify the listeners `was` and `becomes` of the old and new value // TODO: replace `Any` by `Unit` once callers are all in Scala def notifyOnChange[T]( was : (Block) ⇒ Unit, becomes : (Block) ⇒ Unit ): (Option[Block]) ⇒ Unit = { var currentBlockOpt: Option[Block] = None (newBlockOpt: Option[Block]) ⇒ { newBlockOpt match { case Some(newBlock) ⇒ val doNotify = currentBlockOpt match { case None ⇒ true case Some(currentBlock) ⇒ // Typically after an Ajax request, maybe a column/row was added/removed, so we might consequently // need to update the icon position ! newBlock.el.is(currentBlock.el) || // The elements could be the same, but their position could have changed, in which case want to // reposition relative icons, so we don't consider the value to be the "same" newBlock.left != currentBlock.left || newBlock.top != currentBlock.top } if (doNotify) { currentBlockOpt.foreach(was) currentBlockOpt = newBlockOpt becomes(newBlock) } case None ⇒ currentBlockOpt.foreach(was) currentBlockOpt = None } } } // Get the height of each row track def tracksWidth( gridBody : JQuery, orientation : Orientation ): List[Double] = { val cssProperty = orientation match { case Orientation.Horizontal ⇒ "grid-template-rows" case Orientation.Vertical ⇒ "grid-template-columns" } gridBody .css(cssProperty) .splitTo[List]() .map((w) ⇒ w.substring(0, w.indexOf("px"))) .map(_.toDouble) } }
brunobuzzi/orbeon-forms
form-builder/js/src/main/scala/org/orbeon/builder/Position.scala
Scala
lgpl-2.1
6,115
package lila.lobby import chess.{ Mode, Clock, Speed } import org.joda.time.DateTime import ornicar.scalalib.Random import play.api.libs.json._ import actorApi.LobbyUser import lila.game.PerfPicker import lila.rating.RatingRange import lila.user.{ User, Perfs } // correspondence chess, persistent case class Seek( _id: String, variant: Int, daysPerTurn: Option[Int], mode: Int, color: String, user: LobbyUser, ratingRange: String, createdAt: DateTime) { def id = _id val realColor = Color orDefault color val realVariant = chess.variant.Variant orDefault variant val realMode = Mode orDefault mode def compatibleWith(h: Seek) = user.id != h.user.id && compatibilityProperties == h.compatibilityProperties && (realColor compatibleWith h.realColor) && ratingRangeCompatibleWith(h) && h.ratingRangeCompatibleWith(this) private def ratingRangeCompatibleWith(h: Seek) = realRatingRange.fold(true) { range => h.rating ?? range.contains } private def compatibilityProperties = (variant, mode, daysPerTurn) lazy val realRatingRange: Option[RatingRange] = RatingRange noneIfDefault ratingRange def rating = perfType map (_.key) flatMap user.ratingMap.get def render: JsObject = Json.obj( "id" -> _id, "username" -> user.username, "rating" -> rating, "variant" -> Json.obj( "key" -> realVariant.key, "short" -> realVariant.shortName, "name" -> realVariant.name), "mode" -> realMode.id, "days" -> daysPerTurn, "color" -> chess.Color(color).??(_.name), "perf" -> Json.obj( "icon" -> perfType.map(_.iconChar.toString), "name" -> perfType.map(_.name)) ) lazy val perfType = PerfPicker.perfType(Speed.Correspondence, realVariant, daysPerTurn) } object Seek { val idSize = 8 def make( variant: chess.variant.Variant, daysPerTurn: Option[Int], mode: Mode, color: String, user: User, ratingRange: RatingRange, blocking: Set[String]): Seek = new Seek( _id = Random nextStringUppercase idSize, variant = variant.id, daysPerTurn = daysPerTurn, mode = mode.id, color = color, user = LobbyUser.make(user, blocking), ratingRange = ratingRange.toString, createdAt = DateTime.now) def renew(seek: Seek) = new Seek( _id = Random nextStringUppercase idSize, variant = seek.variant, daysPerTurn = seek.daysPerTurn, mode = seek.mode, color = seek.color, user = seek.user, ratingRange = seek.ratingRange, createdAt = DateTime.now) import reactivemongo.bson.Macros import lila.db.BSON.MapValue.MapHandler import lila.db.BSON.BSONJodaDateTimeHandler private[lobby] implicit val lobbyUserBSONHandler = Macros.handler[LobbyUser] private[lobby] implicit val seekBSONHandler = Macros.handler[Seek] }
Happy0/lila
modules/lobby/src/main/Seek.scala
Scala
mit
2,837
package pl.newicom.dddd.aggregate import pl.newicom.dddd.aggregate.error.DomainException import pl.newicom.dddd.office.{LocalOfficeId, OfficeListener} import pl.newicom.dddd.utils.ImplicitUtils._ import scala.concurrent.duration.FiniteDuration object AggregateRootSupport { sealed trait AbstractReaction[+R] sealed trait Reaction[+E] extends AbstractReaction[Seq[E]] { def flatMap[B](f: Seq[E] => Reaction[B]): Reaction[B] def flatMapMatching[B](f: PartialFunction[E, Reaction[B]]): Reaction[B] = flatMap { case Seq(e) if f.isDefinedAt(e) => f(e) case Seq(e, _*) if f.isDefinedAt(e) => f(e) case _ => this.asParameterizedBy[B] } def recoverWith[B](f: => Reaction[B]): Reaction[B] def reversed: Reaction[E] = this } case object NoReaction extends Reaction[Nothing] { def flatMap[B](f: Seq[Nothing] => Reaction[B]): Reaction[B] = f(Seq()) def recoverWith[B](f: => Reaction[B]): Reaction[B] = f } trait Collaborate[E] extends Reaction[E] { type HandleResponse = PartialFunction[Any, Reaction[E]] def apply(receive: HandleResponse)(implicit timeout: FiniteDuration): Collaborate[E] } case class AcceptC[E](events: Seq[E]) extends Reaction[E] { def &(next: E): AcceptC[E] = AcceptC(events :+ next) def &(next: Seq[E]): AcceptC[E] = AcceptC(events ++ next) override def reversed: Reaction[E] = copy(events.reverse) def flatMap[B](f: Seq[E] => Reaction[B]): Reaction[B] = { (f(events).asParameterizedBy[E] match { case AcceptC(es) => AcceptC(events ++ es) case c: Collaborate[E] => c.flatMap(_ => this).reversed case r => r.flatMap(_ => this) }).asParameterizedBy[B] } def recoverWith[B](f: => Reaction[B]): Reaction[B] = this.asParameterizedBy[B] } case class AcceptQ[R](response: R) extends AbstractReaction[R] object Reject { private[aggregate] def apply(reason: Throwable): Reject = new Reject(reason) def unapply(arg: Reject): Option[Throwable] = Some(arg.reason) } class Reject private[aggregate] (val reason: Throwable) extends Reaction[Nothing] { def flatMap[B](f: Seq[Nothing] => Reaction[B]): Reaction[B] = this def recoverWith[B](f: => Reaction[B]): Reaction[B] = f match { case NoReaction => this case o => o } } class RejectConditionally(condition: Boolean, reject: => Reject) { def orElse[E <: DomainEvent](reaction: => Reaction[E]): Reaction[E] = if (condition) reject else reaction def orElse(alternative: => RejectConditionally): RejectConditionally = if (condition) this else alternative def isRejected: Boolean = !condition } class AcceptConditionally[E <: DomainEvent](condition: Boolean, reaction: => Reaction[E]) { def orElse(rejectionReason: String): Reaction[E] = orElse(new DomainException(rejectionReason)) def orElse(rejectionReason: => DomainException): Reaction[E] = if (condition) reaction else Reject(rejectionReason) def isAccepted: Boolean = condition } } trait AggregateRootSupport extends BehaviorSupport[DomainEvent] { implicit def officeListener[A <: AggregateRoot[_, _, _] : LocalOfficeId]: OfficeListener[A] = new OfficeListener[A] }
pawelkaczor/akka-ddd
akka-ddd-core/src/main/scala/pl/newicom/dddd/aggregate/AggregateRootSupport.scala
Scala
mit
3,327
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{Attribute, UnsafeProjection} import org.apache.spark.sql.execution.metric.SQLMetrics /** * Physical plan node for scanning data from a local collection. */ case class LocalTableScanExec( output: Seq[Attribute], @transient rows: Seq[InternalRow]) extends LeafExecNode { override lazy val metrics = Map( "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows")) @transient private lazy val unsafeRows: Array[InternalRow] = { if (rows.isEmpty) { Array.empty } else { val proj = UnsafeProjection.create(output, output) rows.map(r => proj(r).copy()).toArray } } private lazy val numParallelism: Int = math.min(math.max(unsafeRows.length, 1), sqlContext.sparkContext.defaultParallelism) private lazy val rdd = sqlContext.sparkContext.parallelize(unsafeRows, numParallelism) protected override def doExecute(): RDD[InternalRow] = { val numOutputRows = longMetric("numOutputRows") rdd.map { r => numOutputRows += 1 r } } override protected def stringArgs: Iterator[Any] = { if (rows.isEmpty) { Iterator("<empty>", output) } else { Iterator(output) } } override def executeCollect(): Array[InternalRow] = { longMetric("numOutputRows").add(unsafeRows.size) unsafeRows } override def executeTake(limit: Int): Array[InternalRow] = { val taken = unsafeRows.take(limit) longMetric("numOutputRows").add(taken.size) taken } }
minixalpha/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/LocalTableScanExec.scala
Scala
apache-2.0
2,460
object HelloWorld{ /* Impure program */ def hello(): Unit = println("Hello, world!") /* Functional solution */ object Fun{ // Effect language type IOProgram = Write case class Write(msg: String) // Program def pureHello(): IOProgram = Write("Hello, world!") // Interpreter def run(program: IOProgram): Unit = program match { case Write(msg) => println(msg) } // Composition def hello() = run(pureHello()) } }
jserranohidalgo/hellomonad
src/main/scala/proposal1/Step1-HelloWorld.scala
Scala
apache-2.0
508
/** * Various functions we will test. */ package object props { /** Advanced encryption function. */ def rot13(s: String): String = s map { case c if 'a' <= c.toLower && c.toLower <= 'm' => c + 13 toChar case c if 'n' <= c.toLower && c.toLower <= 'z' => c - 13 toChar case c => c } /** Just the logistic curve. What could possibly go wrong? * Write some properties to test your assumptions. * see also: http://en.wikipedia.org/wiki/Sigmoid_function */ def logistic(t: Double): Double = 1 / (1 + Math.exp(-t)) /** Greatest common divisor function. * Implement it and test against the specification. * Is the implementation correct? Is the specification? */ def gcd(a: Int, b: Int): Int = if (b==0) a else gcd(b, a%b) /** Black-box unit-test this! */ object WeirdlyBehavedObject { var acc = 0 def accumulate(n: Int): Unit = { // sure, why not the 999th prime if (acc % 7907 == 0) acc = 23 acc += n } } /** * Union set of two sets. * Implement it using only add/remove operations against the predefined property tests. */ def union[T](a: Set[T], b: Set[T]): Set[T] = a.foldLeft(b) { (acc,e) => acc + e } /** Datatype for game of rock/paper/scissors. */ sealed trait RockPaperScissors case object Rock extends RockPaperScissors case object Paper extends RockPaperScissors case object Scissors extends RockPaperScissors /** * Play a game of rock, paper, scissors. Returns the winning object. * Write properties to check your assumptions. */ def rockPaperScissors(a: RockPaperScissors, b: RockPaperScissors): RockPaperScissors = (a,b) match { // manually enumerating the possibilities because it's not that many case (Rock,Scissors) => Rock case (Rock,Paper) => Paper case (Scissors,Rock) => Rock case (Scissors,Paper) => Scissors case (Paper,Rock) => Paper case (Paper,Scissors) => Scissors case _ => a // both are the same } /** Simplest definition of a linked list. */ sealed trait ConsList[+T] case class Cons[T](head: T, tail: ConsList[T]) extends ConsList[T] case object Empty extends ConsList[Nothing] /** * Calculate the size of a list. * Write properties and an implementation. */ def size[T](list: ConsList[T]): Int = list match { case Empty => 0 case Cons(h,t) => 1 + size(t) } def contains[T](elem: T, list: ConsList[T]): Boolean = list match { case Empty => false case Cons(e,list) => elem == e && contains(elem, list) } /** * Test if the list `list` starts with the elements of the list `start`. * Write properties and an implementation. */ def startsWith[T](list: ConsList[T], start: ConsList[T]): Boolean = (list,start) match { case (_,Empty) => true case (Empty,_) => false case (Cons(h1,t1), Cons(h2,t2)) => h1 == h2 && startsWith(t1,t2) } /** * Append b to a. * Write properties and an implementation. */ def append[T](a: ConsList[T], b: ConsList[T]): ConsList[T] = a match { case Empty => b case Cons(h,t) => Cons(h, append(t,b)) } /** * Reverse a list. * Write properties and an implementation. */ def reverse[T](list: ConsList[T]): ConsList[T] = list match { case Empty => Empty case Cons(h,t) => Cons(h, reverse(t)) } /** * Sum up the repeated numbers in a list. * Write properties and am implementation. */ def sumRepeated(list: ConsList[Int]): ConsList[Int] = sumRepeatedAcc(Empty,Empty,0) private def sumRepeatedAcc(list:ConsList[Int], stack: ConsList[Int], prev: Int): ConsList[Int] = (list,stack) match { case (Empty,_) => stack case (Cons(h,t), Cons(sh,st)) if h==prev => sumRepeatedAcc(t, Cons(sh+h,st),prev) case (Cons(h,t), _) => sumRepeatedAcc(t, Cons(h,stack),h) } }
jastice/proptests
src/main/scala/props/package.scala
Scala
bsd-2-clause
3,863
package maliki import upickle.default._ import org.scalajs.jquery.{jQuery, JQuery} object Parse { def jsToScala(ns: List[js.Node]): List[interface.Node] = { (for(n <- ns) yield Parse(n)) toList } def scalaToJs(ns: List[interface.Node]): List[js.Node] = { (for(n <- ns) yield Parse(n)) toList } /* Javascript Node -> Interface Node */ def apply(n: js.Node): interface.Node = { /* We need to update this for all values. I don't think there will be much of a client performance problem doing so many node lookups for a single Ajax request. */ n.value = n.jqSelect.value().toString new interface.Node(n.tag, n.attributes, n.style, n.text, n.value, n.items map { x => Parse(x) }, n.javascript, n.id) } /* Interface Node -> Javascript Node */ def apply(n: interface.Node): js.Node = maliki.js.Lola.getById(n.id) match { case Some(node) => node case None => new js.Node(n.tag, n.attributes, n.style, n.text, n.value, n.items map { x => Parse(x) }, n.javascript, n.id) } /* Interface Command -> Javascript Unit Execution -> Interface Node */ def apply(c: interface.Command): Unit = c match { case interface.Create.Create(n: interface.Node) => Parse(n).create() case interface.Delete.Delete(n: interface.Node) => Parse(n).remove case interface.OnClick.OnClick(n: interface.Node, c: List[interface.Command]) => Parse(n).onClick(() => Parse(c)) case interface.OnHover.OnHover(n: interface.Node, c: interface.Command, c2: interface.Command) => Parse(n).onHover(() => Parse(c), () => Parse(c2)) case interface.OnKeyUp.OnKeyUp(n: interface.Node, c: List[interface.Command]) => Parse(n).onKeyUp(() => Parse(c)) case interface.SlideUp.SlideUp(n: interface.Node, mili: Int) => Parse(n).slideUp(mili) case interface.SlideDown.SlideDown(n: interface.Node, mili: Int) => Parse(n).slideDown(mili) case interface.FadeIn.FadeIn(n: interface.Node, mili: Int) => Parse(n).fadeIn(mili) case interface.FadeOut.FadeOut(n: interface.Node, mili: Int) => Parse(n).fadeOut(mili) case interface.Get.Get(url: String) => js.Lola.get(url) case interface.Post.Post(url: String, n: List[interface.Node]) => js.Lola.post(url, Parse.scalaToJs(n)) case interface.Update.Update(n: interface.Node) => { val neu = n.copy() neu.id = maliki.interface.Lola.assign Parse(n).update(Parse(neu)) } case interface.Clear.Clear(s: String) => jQuery(s).empty() } def apply(cms: List[interface.Command]): Unit = { for(c <- cms) Parse(c) } }
jamesreinke/Lola-Beta
src/main/scala/Parser.scala
Scala
mit
2,485
/* Copyright 2013 Stephen K Samuel Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.sksamuel.scrimage.filter import thirdparty.marvin.image.color.Sepia /** @author Stephen Samuel */ object SepiaFilter extends MarvinFilter { val plugin = new Sepia(20) }
carlosFattor/scrimage
scrimage-filters/src/main/scala/com/sksamuel/scrimage/filter/SepiaFilter.scala
Scala
apache-2.0
778
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.stat import scala.collection.mutable.{Map => MutableMap} import org.apache.spark.internal.Logging import org.apache.spark.sql.{Column, DataFrame, Dataset, Row} import org.apache.spark.sql.catalyst.plans.logical.LocalRelation import org.apache.spark.sql.types._ object FrequentItems extends Logging { /** A helper class wrapping `MutableMap[Any, Long]` for simplicity. */ private class FreqItemCounter(size: Int) extends Serializable { val baseMap: MutableMap[Any, Long] = MutableMap.empty[Any, Long] /** * Add a new example to the counts if it exists, otherwise deduct the count * from existing items. */ def add(key: Any, count: Long): this.type = { if (baseMap.contains(key)) { baseMap(key) += count } else { if (baseMap.size < size) { baseMap += key -> count } else { val minCount = if (baseMap.values.isEmpty) 0 else baseMap.values.min val remainder = count - minCount if (remainder >= 0) { baseMap += key -> count // something will get kicked out, so we can add this baseMap.retain((k, v) => v > minCount) baseMap.transform((k, v) => v - minCount) } else { baseMap.transform((k, v) => v - count) } } } this } /** * Merge two maps of counts. * @param other The map containing the counts for that partition */ def merge(other: FreqItemCounter): this.type = { other.baseMap.foreach { case (k, v) => add(k, v) } this } } /** * Finding frequent items for columns, possibly with false positives. Using the * frequent element count algorithm described in * <a href="https://doi.org/10.1145/762471.762473">here</a>, proposed by Karp, Schenker, * and Papadimitriou. * The `support` should be greater than 1e-4. * For Internal use only. * * @param df The input DataFrame * @param cols the names of the columns to search frequent items in * @param support The minimum frequency for an item to be considered `frequent`. Should be greater * than 1e-4. * @return A Local DataFrame with the Array of frequent items for each column. */ def singlePassFreqItems( df: DataFrame, cols: Seq[String], support: Double): DataFrame = { require(support >= 1e-4 && support <= 1.0, s"Support must be in [1e-4, 1], but got $support.") val numCols = cols.length // number of max items to keep counts for val sizeOfMap = (1 / support).toInt val countMaps = Seq.tabulate(numCols)(i => new FreqItemCounter(sizeOfMap)) val freqItems = df.select(cols.map(Column(_)) : _*).rdd.treeAggregate(countMaps)( seqOp = (counts, row) => { var i = 0 while (i < numCols) { val thisMap = counts(i) val key = row.get(i) thisMap.add(key, 1L) i += 1 } counts }, combOp = (baseCounts, counts) => { var i = 0 while (i < numCols) { baseCounts(i).merge(counts(i)) i += 1 } baseCounts } ) val justItems = freqItems.map(m => m.baseMap.keys.toArray) val resultRow = Row(justItems : _*) val originalSchema = df.schema val outputCols = cols.map { name => val index = originalSchema.fieldIndex(name) val originalField = originalSchema.fields(index) // append frequent Items to the column name for easy debugging StructField(name + "_freqItems", ArrayType(originalField.dataType, originalField.nullable)) }.toArray val schema = StructType(outputCols).toAttributes Dataset.ofRows(df.sparkSession, LocalRelation.fromExternalRows(schema, Seq(resultRow))) } }
goldmedal/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/stat/FrequentItems.scala
Scala
apache-2.0
4,611
import sounder.Sounder._ import sounder.Util._ import scala.math.sin import scala.math.min import scala.math.max import scala.math.floor import scala.math.ceil import scala.math.Pi print("Plug in the multiplier circuit and press ENTER") System.in.read println("Playing signal for 2 seconds and recording input") val f1 = 100 val f2 = 233 val Fs = 44100 val xtrue : Double => Double = t => sin(2*Pi*f1*t)/3 + sin(2*Pi*f2*t)/3 val (right,left) = playRecord(xtrue, 0, 2.0, Fs) //playSamples(left) //playSamples(right) //total number of samples recorded val L = min(left.length, right.length) //value at which we truncate sinc function (makes things faster) val sinc_truncate = 200 //reconstructed input signal x def x(t : Double) : Double = { val mini = max(0, floor(Fs*t - sinc_truncate).toInt) val maxi = min(L-1, ceil(Fs*t + sinc_truncate).toInt) (mini to maxi).foldLeft(0.0){ (sum, i) => sum + left(i)*sinc(Fs*t - i) } } //reconstructed output signal y def y(t : Double) : Double = { val mini = max(0, floor(Fs*t - sinc_truncate).toInt) val maxi = min(L-1, ceil(Fs*t + sinc_truncate).toInt) (mini to maxi).foldLeft(0.0){ (sum, i) => sum + right(i)*sinc(Fs*t - i) } } //H(x) val R1 = 12e3 val R2 = 56e3 def Hx(t : Double) : Double = (1 + R2/R1)*x(t) println("Writing data to file data.csv") val tmin = 0.998 val tmax = 1.022 val filetfun = new java.io.FileWriter("data.csv") (tmin to tmax by 0.00005) foreach { t => filetfun.write(t.toString.replace('E', 'e') + "\\t" + x(t).toString.replace('E', 'e') + "\\t" + y(t).toString.replace('E', 'e') + "\\t" + Hx(t).toString.replace('E', 'e') + "\\n") } filetfun.close println("Scala finished")
robbymckilliam/testablelinearsystems
tests/multiplier/multiplier.scala
Scala
agpl-3.0
1,662
/* * Copyright 2014–2017 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.fs.mount import quasar.contrib.pathy.{ADir, AFile, APath} import pathy.scalacheck.PathyArbitrary._ import org.scalacheck.{Arbitrary, Gen} trait MountingsConfigArbitrary { implicit val mountingsConfigArbitrary: Arbitrary[MountingsConfig] = Arbitrary(Gen.listOf(Gen.oneOf(genFileSystemConfigEntry, genViewConfigEntry)).map(l => MountingsConfig(l.toMap))) private def genFileSystemConfigEntry: Gen[(APath, MountConfig)] = for { dir <- Arbitrary.arbitrary[ADir] config <- MountConfigArbitrary.genFileSystemConfig } yield (dir, config) private def genViewConfigEntry: Gen[(APath, MountConfig)] = for { file <- Arbitrary.arbitrary[AFile] config <- MountConfigArbitrary.genViewConfig } yield (file, config) } object MountingsConfigArbitrary extends MountingsConfigArbitrary
drostron/quasar
core/src/test/scala/quasar/fs/mount/MountingsConfigArbitrary.scala
Scala
apache-2.0
1,449
package de.choffmeister.microserviceutils.auth import java.time.Instant import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.{Directive1, Route, StandardRoute} import akka.http.scaladsl.testkit.ScalatestRouteTest import de.choffmeister.microserviceutils.auth.consumer.{AuthConsumer, AuthConsumerSettings} import de.choffmeister.microserviceutils.auth.grants.PasswordGrant import de.choffmeister.microserviceutils.auth.models.{AccessToken, AuthError, AuthorizationCode, RefreshToken} import de.choffmeister.microserviceutils.auth.utils.SecretGenerator import de.heikoseeberger.akkahttpplayjson.PlayJsonSupport import io.jsonwebtoken.security.Keys import org.scalatest.EitherValues import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec import scala.concurrent.Future import scala.concurrent.duration._ import scala.language.reflectiveCalls class CookieAuthProviderTest extends AnyWordSpec with ScalatestRouteTest with Matchers with EitherValues with ScalaFutures with PlayJsonSupport { def prepare = { val realm = "test" val signKey = Keys.hmacShaKeyFor(SecretGenerator.generate(32)) val verifyKey = signKey val providerSettings = AuthProviderSettings(5.minutes, signKey) val provider = new AuthProvider[TestResourceOwner, TestClient, AccessToken](providerSettings) with TestAuthProvider with CookieAuthProvider[TestResourceOwner, TestClient, AccessToken, RefreshToken, AuthorizationCode] { override val authClientId: String = "auth" override val cookieNamePrefix: String = "test" val authorizeUri: Uri = Uri("/oauth/authorize") val loginUri: Uri = Uri("/login") val consentUri: Uri = Uri("/consent") def handleUnauthorizeResourceOwner: StandardRoute = redirectWithParameters(loginUri) def selectAuthCookie( authCookies: List[AuthCookie[TestResourceOwner]] ): Directive1[Option[AuthCookie[TestResourceOwner]]] = parameter("authuser".?) .map(_.flatMap(resourceOwnerId => authCookies.find(_.resourceOwner.id == resourceOwnerId))) def loginRoute: Route = (toStrictEntity(3.second, 128 * 1024) & extractAllAuthCookies) { _ => concat( get { complete("login") }, post { formFields("username", "password", "team") { case (username, password, team) => resourceOwners .find(ro => ro._1.id == username && ro._1.team.contains(team)) match { case Some((resourceOwner, resourceOwnerPassword)) if password == resourceOwnerPassword => provideAuthCookie(resourceOwner) { redirectWithParameters(consentUri, "authuser" -> resourceOwner.id) } case _ => complete("login") } } } ) } def consentRoute: Route = toStrictEntity(3.second, 128 * 1024) { extractAllAuthCookies { authCookies => selectAuthCookie(authCookies) { case Some(authCookie) if authCookie.valid => extractClient { case Right(client) => verifyConsentCookie(authCookie.resourceOwner, client) { case true => redirectWithParameters(authorizeUri) case false => concat( get { complete("consent") }, post { formField("consent".as[Boolean].?) { case Some(true) => provideConsentCookie(authCookie.resourceOwner, client) { redirectWithParameters(authorizeUri) } case _ => complete("login - consent") } } ) } case Left(_) => ??? } case None => redirectWithParameters(loginUri) } } } private def redirectWithParameters(uri: Uri, additionalParameters: (String, String)*): StandardRoute = StandardRoute { parameterMap { parameters => redirect(uri.withQuery(Uri.Query(parameters ++ additionalParameters)), StatusCodes.Found) } } override val grants = List( refreshTokenGrant, authorizationCodeGrant, new PasswordGrant[TestResourceOwner, TestClient, AccessToken](this) { override def verifyCredentials( username: String, password: String, parameters: Map[String, String] ): Future[AuthResult[Option[String]]] = parameters.get("team") match { case Some(team) => Future.successful( Right( resourceOwners .find(p => p._1.team.contains(team) && p._1.id == username && p._2 == password) .map(_._1.id) ) ) case None => Future.successful(Left(AuthError.invalidRequest("Parameter team is missing"))) } } ) private lazy val clients = List( TestClient( id = "public", secret = None, grantTypes = grants.map(_.id).toSet, scopes = Set("admin", "read", "write"), refreshTokenLifetime = Some(30.days), redirectUris = "http://public/callback" :: Nil ), TestClient( id = "public-no-consent", secret = None, grantTypes = grants.map(_.id).toSet, scopes = Set("admin", "read", "write"), refreshTokenLifetime = Some(30.days), redirectUris = "http://public/callback" :: Nil ) ) override def findClient(id: String): Future[Option[TestClient]] = Future.successful(clients.find(_.id == id)) } val consumerSettings = AuthConsumerSettings(realm, verifyKey) val consumer = new AuthConsumer(consumerSettings) val routes = concat( (path("login"))(provider.loginRoute), (path("consent"))(provider.consentRoute), (path("oauth" / "access_token") & post)(provider.accessTokenRoute), (path("oauth" / "authorize") & get)(provider.authorizeRoute) ) (provider, consumer, routes) } "keeps cookies of known resource owners with valid refresh token" in { val (provider, _, routes) = prepare provider.createOrUpdateRefreshTokenTest( RefreshToken( refreshToken = "refreshToken", expiresAt = Some(Instant.now.plusSeconds(30L * 24L * 60L * 60L)), scopes = Set.empty, clientId = "auth", resourceOwnerId = "user1", revoked = false ) ) val lastUsedAt1 = provider.refreshTokens.find(_.refreshToken == "refreshToken").get.expiresAt val cookie = HttpCookiePair("test_user1_token", "refreshToken") Get("/login") ~> addHeader(Cookie(cookie)) ~> routes ~> check { val setCookies = headers.collect { case h: `Set-Cookie` => h }.toSet setCookies should be(Set.empty) } val lastUsedAt2 = provider.refreshTokens.find(_.refreshToken == "refreshToken").get.expiresAt lastUsedAt2 should not be (lastUsedAt1) } "keeps cookies of known resource owners with revoked refresh token" in { val (provider, _, routes) = prepare provider.createOrUpdateRefreshTokenTest( RefreshToken( refreshToken = "refreshToken", expiresAt = Some(Instant.now.plusSeconds(60)), scopes = Set.empty, clientId = "auth", resourceOwnerId = "user1", revoked = true ) ) val lastUsedAt1 = provider.refreshTokens.find(_.refreshToken == "refreshToken").get.expiresAt val cookie = HttpCookiePair("test_user1_token", "refreshToken") Get("/login") ~> addHeader(Cookie(cookie)) ~> routes ~> check { val setCookies = headers.collect { case h: `Set-Cookie` => h }.toSet setCookies should be(Set.empty) } val lastUsedAt2 = provider.refreshTokens.find(_.refreshToken == "refreshToken").get.expiresAt lastUsedAt2 should be(lastUsedAt1) } "keeps cookies of known resource owners with expired refresh token" in { val (provider, _, routes) = prepare provider.createOrUpdateRefreshTokenTest( RefreshToken( refreshToken = "refreshToken", expiresAt = Some(Instant.now.minusSeconds(60)), scopes = Set.empty, clientId = "auth", resourceOwnerId = "user1", revoked = false ) ) val lastUsedAt1 = provider.refreshTokens.find(_.refreshToken == "refreshToken").get.expiresAt val cookie = HttpCookiePair("test_user1_token", "refreshToken") Get("/login") ~> addHeader(Cookie(cookie)) ~> routes ~> check { val setCookies = headers.collect { case h: `Set-Cookie` => h }.toSet setCookies should be(Set.empty) } val lastUsedAt2 = provider.refreshTokens.find(_.refreshToken == "refreshToken").get.expiresAt lastUsedAt2 should be(lastUsedAt1) } "removes cookies of unknown resource owners" in { val (_, _, routes) = prepare val otherCookie = HttpCookiePair("other_unknown_token", "token1") val unknown1ResourceOwnerCookie = HttpCookiePair("test_unknown1_token", "unknown1_token") val unknown2ResourceOwnerCookie = HttpCookiePair("test_unknown2_token", "unknown2_token") Get("/login") ~> addHeader( Cookie(otherCookie, unknown1ResourceOwnerCookie, unknown2ResourceOwnerCookie) ) ~> routes ~> check { val setCookies = headers.collect { case h: `Set-Cookie` => h }.toSet setCookies should be( Set( `Set-Cookie`( HttpCookie( unknown1ResourceOwnerCookie.name, "deleted", path = Some("/"), httpOnly = true, expires = Some(DateTime.MinValue) ) ), `Set-Cookie`( HttpCookie( unknown2ResourceOwnerCookie.name, "deleted", path = Some("/"), httpOnly = true, expires = Some(DateTime.MinValue) ) ) ) ) } } "removes cookies of other clients" in { val (provider, _, routes) = prepare provider.createOrUpdateRefreshTokenTest( RefreshToken( refreshToken = "refreshToken", expiresAt = None, scopes = Set.empty, clientId = "public", resourceOwnerId = "user1", revoked = true ) ) val cookie = HttpCookiePair("test_user1_token", "refreshToken") Get("/login") ~> addHeader(Cookie(cookie)) ~> routes ~> check { val setCookies = headers.collect { case h: `Set-Cookie` => h }.toSet setCookies should be( Set( `Set-Cookie`( HttpCookie(cookie.name, "deleted", path = Some("/"), httpOnly = true, expires = Some(DateTime.MinValue)) ) ) ) } } "removes cookies of unknown refresh token" in { val (_, _, routes) = prepare val cookie = HttpCookiePair("test_user1_token", "refreshToken") Get("/login") ~> addHeader(Cookie(cookie)) ~> routes ~> check { val setCookies = headers.collect { case h: `Set-Cookie` => h }.toSet setCookies should be( Set( `Set-Cookie`( HttpCookie(cookie.name, "deleted", path = Some("/"), httpOnly = true, expires = Some(DateTime.MinValue)) ) ) ) } } "supports resource owner ids with special characters" in { val (provider, _, routes) = prepare provider.createOrUpdateRefreshTokenTest( RefreshToken( refreshToken = "refreshToken", expiresAt = None, scopes = Set.empty, clientId = "auth", resourceOwnerId = "user-special:%=", revoked = false ) ) val validUser1Cookie = HttpCookiePair("test_user-special%3A%25%3D_token", "refreshToken") Get("/login") ~> addHeader(Cookie(validUser1Cookie)) ~> routes ~> check { val setCookies = headers.collect { case h: `Set-Cookie` => h }.toSet setCookies should be(Set.empty) } } "works with full flow" in { val (provider, _, routes) = prepare val cookies = new TestCookieStore() val location0 = Uri("/oauth/authorize?client_id=public&redirect_uri=http://public/callback&response_type=code&scope=read&foo=bar") val location1 = cookies(Get(location0)) ~> routes ~> check { cookies.update(headers) status should be(StatusCodes.Found) header[Location].get.uri } location1.path.toString should be("/login") location1.query().toMap should be( Map( "client_id" -> "public", "redirect_uri" -> "http://public/callback", "response_type" -> "code", "scope" -> "read", "foo" -> "bar" ) ) val location2 = cookies( Post(location1, FormData("username" -> "user1", "password" -> "pass1", "team" -> "team1")) ) ~> routes ~> check { cookies.update(headers) status should be(StatusCodes.Found) header[Location].get.uri } location2.path.toString should be("/consent") location2.query().toMap should be( Map( "client_id" -> "public", "redirect_uri" -> "http://public/callback", "response_type" -> "code", "authuser" -> "user1", "scope" -> "read", "foo" -> "bar" ) ) val location3 = cookies(Post(location2, FormData("consent" -> "on"))) ~> routes ~> check { cookies.update(headers) status should be(StatusCodes.Found) header[Location].get.uri } location3.path.toString should be("/oauth/authorize") location3.query().toMap should be( Map( "client_id" -> "public", "redirect_uri" -> "http://public/callback", "response_type" -> "code", "authuser" -> "user1", "scope" -> "read", "foo" -> "bar" ) ) val location4 = cookies(Get(location3)) ~> routes ~> check { cookies.update(headers) status should be(StatusCodes.Found) header[Location].get.uri } location4.path.toString should be("/callback") location4.query().toMap should be(Map("code" -> location4.query().get("code").get)) val code = provider.findAuthorizationCode(location4.query().get("code").get).futureValue.get code.clientId should be("public") code.resourceOwnerId should be("user1") code.scopes should be(Set("read")) } } private[auth] class TestCookieStore(initial: List[HttpCookiePair] = List.empty) { private var cookies = initial def apply(req: HttpRequest): HttpRequest = { if (cookies.nonEmpty) req.withHeaders(req.headers :+ Cookie(cookies)) else req } def update(headers: scala.collection.immutable.Seq[HttpHeader]): Unit = { headers .collect { case `Set-Cookie`(c) => c } .foreach { case cookie if cookie.expires.exists(_.compare(DateTime.now) < 0) => cookies = cookies.filter(_.name != cookie.name) case cookie => cookies = cookies.filter(_.name != cookie.name) :+ cookie.pair } } }
choffmeister/microservice-utils
microservice-utils-auth/src/test/scala/de/choffmeister/microserviceutils/auth/CookieAuthProviderTest.scala
Scala
mit
15,677
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package spark.bagel import spark._ import spark.SparkContext._ import scala.collection.mutable.ArrayBuffer import storage.StorageLevel object Bagel extends Logging { val DEFAULT_STORAGE_LEVEL = StorageLevel.MEMORY_AND_DISK /** * Runs a Bagel program. * @param sc [[spark.SparkContext]] to use for the program. * @param vertices vertices of the graph represented as an RDD of (Key, Vertex) pairs. Often the Key will be * the vertex id. * @param messages initial set of messages represented as an RDD of (Key, Message) pairs. Often this will be an * empty array, i.e. sc.parallelize(Array[K, Message]()). * @param combiner [[spark.bagel.Combiner]] combines multiple individual messages to a given vertex into one * message before sending (which often involves network I/O). * @param aggregator [[spark.bagel.Aggregator]] performs a reduce across all vertices after each superstep, * and provides the result to each vertex in the next superstep. * @param partitioner [[spark.Partitioner]] partitions values by key * @param numPartitions number of partitions across which to split the graph. * Default is the default parallelism of the SparkContext * @param storageLevel [[spark.storage.StorageLevel]] to use for caching of intermediate RDDs in each superstep. * Defaults to caching in memory. * @param compute function that takes a Vertex, optional set of (possibly combined) messages to the Vertex, * optional Aggregator and the current superstep, * and returns a set of (Vertex, outgoing Messages) pairs * @tparam K key * @tparam V vertex type * @tparam M message type * @tparam C combiner * @tparam A aggregator * @return an RDD of (K, V) pairs representing the graph after completion of the program */ def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest, A: Manifest]( sc: SparkContext, vertices: RDD[(K, V)], messages: RDD[(K, M)], combiner: Combiner[M, C], aggregator: Option[Aggregator[V, A]], partitioner: Partitioner, numPartitions: Int, storageLevel: StorageLevel = DEFAULT_STORAGE_LEVEL )( compute: (V, Option[C], Option[A], Int) => (V, Array[M]) ): RDD[(K, V)] = { val splits = if (numPartitions != 0) numPartitions else sc.defaultParallelism var superstep = 0 var verts = vertices var msgs = messages var noActivity = false do { logInfo("Starting superstep "+superstep+".") val startTime = System.currentTimeMillis val aggregated = agg(verts, aggregator) val combinedMsgs = msgs.combineByKey( combiner.createCombiner _, combiner.mergeMsg _, combiner.mergeCombiners _, partitioner) val grouped = combinedMsgs.groupWith(verts) val superstep_ = superstep // Create a read-only copy of superstep for capture in closure val (processed, numMsgs, numActiveVerts) = comp[K, V, M, C](sc, grouped, compute(_, _, aggregated, superstep_), storageLevel) val timeTaken = System.currentTimeMillis - startTime logInfo("Superstep %d took %d s".format(superstep, timeTaken / 1000)) verts = processed.mapValues { case (vert, msgs) => vert } msgs = processed.flatMap { case (id, (vert, msgs)) => msgs.map(m => (m.targetId, m)) } superstep += 1 noActivity = numMsgs == 0 && numActiveVerts == 0 } while (!noActivity) verts } /** Runs a Bagel program with no [[spark.bagel.Aggregator]] and the default storage level */ def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest]( sc: SparkContext, vertices: RDD[(K, V)], messages: RDD[(K, M)], combiner: Combiner[M, C], partitioner: Partitioner, numPartitions: Int )( compute: (V, Option[C], Int) => (V, Array[M]) ): RDD[(K, V)] = run(sc, vertices, messages, combiner, numPartitions, DEFAULT_STORAGE_LEVEL)(compute) /** Runs a Bagel program with no [[spark.bagel.Aggregator]] */ def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest]( sc: SparkContext, vertices: RDD[(K, V)], messages: RDD[(K, M)], combiner: Combiner[M, C], partitioner: Partitioner, numPartitions: Int, storageLevel: StorageLevel )( compute: (V, Option[C], Int) => (V, Array[M]) ): RDD[(K, V)] = { run[K, V, M, C, Nothing]( sc, vertices, messages, combiner, None, partitioner, numPartitions, storageLevel)( addAggregatorArg[K, V, M, C](compute)) } /** * Runs a Bagel program with no [[spark.bagel.Aggregator]], default [[spark.HashPartitioner]] * and default storage level */ def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest]( sc: SparkContext, vertices: RDD[(K, V)], messages: RDD[(K, M)], combiner: Combiner[M, C], numPartitions: Int )( compute: (V, Option[C], Int) => (V, Array[M]) ): RDD[(K, V)] = run(sc, vertices, messages, combiner, numPartitions, DEFAULT_STORAGE_LEVEL)(compute) /** Runs a Bagel program with no [[spark.bagel.Aggregator]] and the default [[spark.HashPartitioner]]*/ def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C: Manifest]( sc: SparkContext, vertices: RDD[(K, V)], messages: RDD[(K, M)], combiner: Combiner[M, C], numPartitions: Int, storageLevel: StorageLevel )( compute: (V, Option[C], Int) => (V, Array[M]) ): RDD[(K, V)] = { val part = new HashPartitioner(numPartitions) run[K, V, M, C, Nothing]( sc, vertices, messages, combiner, None, part, numPartitions, storageLevel)( addAggregatorArg[K, V, M, C](compute)) } /** * Runs a Bagel program with no [[spark.bagel.Aggregator]], default [[spark.HashPartitioner]], * [[spark.bagel.DefaultCombiner]] and the default storage level */ def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest]( sc: SparkContext, vertices: RDD[(K, V)], messages: RDD[(K, M)], numPartitions: Int )( compute: (V, Option[Array[M]], Int) => (V, Array[M]) ): RDD[(K, V)] = run(sc, vertices, messages, numPartitions, DEFAULT_STORAGE_LEVEL)(compute) /** * Runs a Bagel program with no [[spark.bagel.Aggregator]], the default [[spark.HashPartitioner]] * and [[spark.bagel.DefaultCombiner]] */ def run[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest]( sc: SparkContext, vertices: RDD[(K, V)], messages: RDD[(K, M)], numPartitions: Int, storageLevel: StorageLevel )( compute: (V, Option[Array[M]], Int) => (V, Array[M]) ): RDD[(K, V)] = { val part = new HashPartitioner(numPartitions) run[K, V, M, Array[M], Nothing]( sc, vertices, messages, new DefaultCombiner(), None, part, numPartitions, storageLevel)( addAggregatorArg[K, V, M, Array[M]](compute)) } /** * Aggregates the given vertices using the given aggregator, if it * is specified. */ private def agg[K, V <: Vertex, A: Manifest]( verts: RDD[(K, V)], aggregator: Option[Aggregator[V, A]] ): Option[A] = aggregator match { case Some(a) => Some(verts.map { case (id, vert) => a.createAggregator(vert) }.reduce(a.mergeAggregators(_, _))) case None => None } /** * Processes the given vertex-message RDD using the compute * function. Returns the processed RDD, the number of messages * created, and the number of active vertices. */ private def comp[K: Manifest, V <: Vertex, M <: Message[K], C]( sc: SparkContext, grouped: RDD[(K, (Seq[C], Seq[V]))], compute: (V, Option[C]) => (V, Array[M]), storageLevel: StorageLevel ): (RDD[(K, (V, Array[M]))], Int, Int) = { var numMsgs = sc.accumulator(0) var numActiveVerts = sc.accumulator(0) val processed = grouped.flatMapValues { case (_, vs) if vs.size == 0 => None case (c, vs) => val (newVert, newMsgs) = compute(vs(0), c match { case Seq(comb) => Some(comb) case Seq() => None }) numMsgs += newMsgs.size if (newVert.active) numActiveVerts += 1 Some((newVert, newMsgs)) }.persist(storageLevel) // Force evaluation of processed RDD for accurate performance measurements processed.foreach(x => {}) (processed, numMsgs.value, numActiveVerts.value) } /** * Converts a compute function that doesn't take an aggregator to * one that does, so it can be passed to Bagel.run. */ private def addAggregatorArg[K: Manifest, V <: Vertex : Manifest, M <: Message[K] : Manifest, C]( compute: (V, Option[C], Int) => (V, Array[M]) ): (V, Option[C], Option[Nothing], Int) => (V, Array[M]) = { (vert: V, msgs: Option[C], aggregated: Option[Nothing], superstep: Int) => compute(vert, msgs, superstep) } } trait Combiner[M, C] { def createCombiner(msg: M): C def mergeMsg(combiner: C, msg: M): C def mergeCombiners(a: C, b: C): C } trait Aggregator[V, A] { def createAggregator(vert: V): A def mergeAggregators(a: A, b: A): A } /** Default combiner that simply appends messages together (i.e. performs no aggregation) */ class DefaultCombiner[M: Manifest] extends Combiner[M, Array[M]] with Serializable { def createCombiner(msg: M): Array[M] = Array(msg) def mergeMsg(combiner: Array[M], msg: M): Array[M] = combiner :+ msg def mergeCombiners(a: Array[M], b: Array[M]): Array[M] = a ++ b } /** * Represents a Bagel vertex. * * Subclasses may store state along with each vertex and must * inherit from java.io.Serializable or scala.Serializable. */ trait Vertex { def active: Boolean } /** * Represents a Bagel message to a target vertex. * * Subclasses may contain a payload to deliver to the target vertex * and must inherit from java.io.Serializable or scala.Serializable. */ trait Message[K] { def targetId: K }
rjpower/spark
bagel/src/main/scala/spark/bagel/Bagel.scala
Scala
apache-2.0
10,855
package com.twitter.concurrent import com.twitter.conversions.time._ import com.twitter.io.{Buf, Reader} import com.twitter.util._ import org.junit.runner.RunWith import org.scalacheck.{Arbitrary, Gen} import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import org.scalatest.prop.GeneratorDrivenPropertyChecks @RunWith(classOf[JUnitRunner]) class AsyncStreamTest extends FunSuite with GeneratorDrivenPropertyChecks { import AsyncStream.{mk, of} import AsyncStreamTest._ test("strict head") { intercept[Exception] { (undefined: Unit) +:: AsyncStream.empty } intercept[Exception] { mk(undefined, AsyncStream.empty) } intercept[Exception] { of(undefined) } } test("lazy tail") { var forced = false val s = () +:: { forced = true; AsyncStream.empty[Unit] } assert(await(s.head) == Some(())) assert(!forced) await(s.tail) assert(forced) var forced1 = false val t = mk((), { forced1 = true; AsyncStream.empty[Unit] }) assert(await(t.head) == Some(())) assert(!forced1) await(t.tail) assert(forced1) } test("call-by-name tail evaluated at most once") { val p = new Promise[Unit] val s = () +:: { if (p.setDone()) of(()) else AsyncStream.empty[Unit] } assert(toSeq(s) == toSeq(s)) } test("ops that force tail evaluation") { def isForced(f: AsyncStream[_] => Future[_]): Unit = { var forced = false Await.ready(f(() +:: { forced = true; AsyncStream.empty })) assert(forced) } isForced(_.foldLeft(0)((_, _) => 0)) isForced(_.foldLeftF(0)((_, _) => Future.value(0))) isForced(_.tail) } test("observe: failure") { val s = 1 +:: 2 +:: (undefined: AsyncStream[Int]) val (x +: y +: Nil, exc) = await(s.observe()) assert(x == 1) assert(y == 2) assert(exc.isDefined) } test("observe: no failure") { val s = 1 +:: 2 +:: AsyncStream.empty[Int] val (x +: y +: Nil, exc) = await(s.observe()) assert(x == 1) assert(y == 2) assert(exc.isEmpty) } test("fromSeq works on infinite streams") { def ones: Stream[Int] = 1 #:: ones assert(toSeq(fromSeq(ones).take(3)) == Seq(1, 1, 1)) } test("foreach") { val x = new Promise[Unit] val y = new Promise[Unit] def f() = { x.setDone(); () } def g() = { y.setDone(); () } val s = () +:: f() +:: g() +:: AsyncStream.empty[Unit] assert(!x.isDefined) assert(!y.isDefined) s.foreach(_ => ()) assert(x.isDefined) assert(y.isDefined) } test("lazy ops") { val p = new Promise[Unit] val s = () +:: { p.setDone() undefined: AsyncStream[Unit] } s.map(x => 0) assert(!p.isDefined) s.mapF(x => Future.True) assert(!p.isDefined) s.flatMap(x => of(x)) assert(!p.isDefined) s.filter(_ => true) assert(!p.isDefined) s.withFilter(_ => true) assert(!p.isDefined) s.take(Int.MaxValue) assert(!p.isDefined) assert(toSeq(s.take(1)) == Seq(())) assert(!p.isDefined) s.takeWhile(_ => true) assert(!p.isDefined) s.uncons assert(!p.isDefined) s.foldRight(Future.Done) { (_, _) => Future.Done } assert(!p.isDefined) s.scanLeft(Future.Done) { (_, _) => Future.Done } assert(!p.isDefined) s ++ s assert(!p.isDefined) assert(await(s.head) == Some(())) assert(!p.isDefined) intercept[Exception] { await(s.tail).isEmpty } assert(p.isDefined) } test("memoized stream") { class Ctx[A](ops: AsyncStream[Int] => AsyncStream[A]) { var once = 0 val s: AsyncStream[Int] = 2 +:: { once = once + 1 if (once > 1) throw new Exception("evaluated more than once") AsyncStream.of(1) } val ss = ops(s) ss.foreach(_ => ()) // does not throw ss.foreach(_ => ()) } new Ctx(s => s.map(_ => 0)) new Ctx(s => s.mapF(_ => Future.value(1))) new Ctx(s => s.flatMap(of(_))) new Ctx(s => s.filter(_ => true)) new Ctx(s => s.withFilter(_ => true)) new Ctx(s => s.take(2)) new Ctx(s => s.takeWhile(_ => true)) new Ctx(s => s.scanLeft(Future.Done) { (_, _) => Future.Done }) new Ctx(s => s ++ s) } // Note: We could use ScalaCheck's Arbitrary[Function1] for some of the tests // below, however ScalaCheck generates only constant functions which return // the same value for any input. This makes it quite useless to us. We'll take // another look since https://github.com/rickynils/scalacheck/issues/136 might // have solved this issue. test("map") { forAll { (s: List[Int]) => def f(n: Int) = n.toString assert(toSeq(fromSeq(s).map(f)) == s.map(f)) } } test("mapF") { forAll { (s: List[Int]) => def f(n: Int) = n.toString val g = f _ andThen Future.value assert(toSeq(fromSeq(s).mapF(g)) == s.map(f)) } } test("flatMap") { forAll { (s: List[Int]) => def f(n: Int) = n.toString def g(a: Int): AsyncStream[String] = of(f(a)) def h(a: Int): List[String] = List(f(a)) assert(toSeq(fromSeq(s).flatMap(g)) == s.flatMap(h)) } } test("filter") { forAll { (s: List[Int]) => def f(n: Int) = n % 3 == 0 assert(toSeq(fromSeq(s).filter(f)) == s.filter(f)) } } test("++") { forAll { (a: List[Int], b: List[Int]) => assert(toSeq(fromSeq(a) ++ fromSeq(b)) == a ++ b) } } test("++ with a long stream") { var count = 0 def genLongStream(len: Int): AsyncStream[Int] = if (len == 0) { AsyncStream.of(1) } else { count = count + 1 1 +:: genLongStream(len - 1) } // concat a long stream does not stack overflow val s = genLongStream(1000000) ++ genLongStream(3) s.foreach(_ => ()) val first = count s.foreach(_ => ()) // the values are evaluated once assert(count == first) } test("foldRight") { forAll { (a: List[Int]) => def f(n: Int, s: String) = (s.toLong + n).toString def g(q: Int, p: => Future[String]): Future[String] = p.map(f(q, _)) val m = fromSeq(a).foldRight(Future.value("0"))(g) assert(await(m) == a.foldRight("0")(f)) } } test("scanLeft") { forAll { (a: List[Int]) => def f(s: String, n: Int) = (s.toLong + n).toString assert(toSeq(fromSeq(a).scanLeft("0")(f)) == a.scanLeft("0")(f)) } } test("scanLeft is eager") { val never = AsyncStream.fromFuture(Future.never) val hd = never.scanLeft("hi")((_,_) => ???).head assert(hd.isDefined) assert(await(hd) == Some("hi")) } test("foldLeft") { forAll { (a: List[Int]) => def f(s: String, n: Int) = (s.toLong + n).toString assert(await(fromSeq(a).foldLeft("0")(f)) == a.foldLeft("0")(f)) } } test("foldLeftF") { forAll { (a: List[Int]) => def f(s: String, n: Int) = (s.toLong + n).toString val g: (String, Int) => Future[String] = (q, p) => Future.value(f(q, p)) assert(await(fromSeq(a).foldLeftF("0")(g)) == a.foldLeft("0")(f)) } } test("flatten") { val small = Gen.resize(10, Arbitrary.arbitrary[List[List[Int]]]) forAll(small) { s => assert(toSeq(fromSeq(s.map(fromSeq)).flatten) == s.flatten) } } test("head") { forAll { (a: List[Int]) => assert(await(fromSeq(a).head) == a.headOption) } } test("isEmpty") { val s = AsyncStream.of(1) val tail = await(s.tail) assert(tail == None) } test("tail") { forAll(Gen.nonEmptyListOf(Arbitrary.arbitrary[Int])) { (a: List[Int]) => val tail = await(fromSeq(a).tail) a.tail match { case Nil => assert(tail == None) case _ => assert(toSeq(tail.get) == a.tail) } } } test("uncons") { assert(await(AsyncStream.empty.uncons) == None) forAll(Gen.nonEmptyListOf(Arbitrary.arbitrary[Int])) { (a: List[Int]) => val Some((h, t)) = await(fromSeq(a).uncons) assert(h == a.head) assert(toSeq(t()) == a.tail) } } test("take") { forAll(genListAndN) { case (as, n) => assert(toSeq(fromSeq(as).take(n)) == as.take(n)) } } test("drop") { forAll(genListAndN) { case (as, n) => assert(toSeq(fromSeq(as).drop(n)) == as.drop(n)) } } test("takeWhile") { forAll(genListAndSentinel) { case (as, x) => assert(toSeq(fromSeq(as).takeWhile(_ != x)) == as.takeWhile(_ != x)) } } test("dropWhile") { forAll(genListAndSentinel) { case (as, x) => assert(toSeq(fromSeq(as).dropWhile(_ != x)) == as.dropWhile(_ != x)) } } test("toSeq") { forAll { (as: List[Int]) => assert(await(fromSeq(as).toSeq()) == as) } } test("identity") { val small = Gen.resize(10, Arbitrary.arbitrary[List[Int]]) forAll(small) { s => val a = fromSeq(s) def f(x: Int) = x +:: a assert(toSeq(of(1).flatMap(f)) == toSeq(f(1))) assert(toSeq(a.flatMap(of)) == toSeq(a)) } } test("associativity") { val small = Gen.resize(10, Arbitrary.arbitrary[List[Int]]) forAll(small, small, small) { (s, t, u) => val a = fromSeq(s) val b = fromSeq(t) val c = fromSeq(u) def f(x: Int) = x +:: b def g(x: Int) = x +:: c val v = a.flatMap(f).flatMap(g) val w = a.flatMap(x => f(x).flatMap(g)) assert(toSeq(v) == toSeq(w)) } } test("buffer() works like Seq.splitAt") { forAll { (items: List[Char], bufferSize: Int) => val (expectedBuffer, expectedRest) = items.splitAt(bufferSize) val (buffer, rest) = await(fromSeq(items).buffer(bufferSize)) assert(expectedBuffer == buffer) assert(expectedRest == toSeq(rest())) } } test("buffer() has the same properties as take() and drop()") { // We need items to be non-empty, because AsyncStream.empty ++ // <something> forces the future to be created. val gen = Gen.zip(Gen.nonEmptyListOf(Arbitrary.arbitrary[Char]), Arbitrary.arbitrary[Int]) forAll(gen) { case (items, n) => var forced1 = false val stream1 = fromSeq(items) ++ { forced1 = true; AsyncStream.empty[Char] } var forced2 = false val stream2 = fromSeq(items) ++ { forced2 = true; AsyncStream.empty[Char] } val takeResult = toSeq(stream2.take(n)) val (bufferResult, bufferRest) = await(stream1.buffer(n)) assert(takeResult == bufferResult) // Strictness property: we should only need to force the full // stream if we asked for more items that were present in the // stream. assert(forced1 == (n > items.size)) assert(forced1 == forced2) val wasForced = forced1 // Strictness property: Since AsyncStream contains a Future // rather than a thunk, we need to evaluate the next element in // order to get the result of drop and the rest of the stream // after buffering. val bufferTail = bufferRest() val dropTail = stream2.drop(n) assert(forced1 == (n >= items.size)) assert(forced1 == forced2) // This is the only case that should have caused the item to be forced. assert((wasForced == forced1) || n == items.size) // Forcing the rest of the sequence should always cause evaluation. assert(toSeq(bufferTail) == toSeq(dropTail)) assert(forced1) assert(forced2) } } test("grouped() works like Seq.grouped") { forAll { (items: Seq[Char], groupSize: Int) => // This is a Try so that we can test that bad inputs act the // same. (Zero or negative group sizes throw the same // exception.) val expected = Try(items.grouped(groupSize).toSeq) val actual = Try(toSeq(fromSeq(items).grouped(groupSize))) // If they are both exceptions, then pass if the exceptions are // the same type (don't require them to define equality or have // the same exception message) (actual, expected) match { case (Throw(e1), Throw(e2)) => assert(e1.getClass == e2.getClass) case _ => assert(actual == expected) } } } test("grouped should be lazy") { val gen = for { // We need items to be non-empty, because AsyncStream.empty ++ // <something> forces the future to be created. items <- Gen.nonEmptyListOf(Arbitrary.arbitrary[Char]) // We need to make sure that the chunk size (1) is valid and (2) // is short enough that forcing the first group does not force // the exception. groupSize <- Gen.chooseNum(1, items.size) } yield (items, groupSize) forAll(gen) { case (items, groupSize) => var forced = false val stream: AsyncStream[Char] = fromSeq(items) ++ { forced = true; AsyncStream.empty } val expected = items.grouped(groupSize).toSeq.headOption // This will take up to items.size items from the stream. This // does not require forcing the tail. val actual = await(stream.grouped(groupSize).head) assert(actual == expected) assert(!forced) val expectedChunks = items.grouped(groupSize).toSeq val allChunks = toSeq(stream.grouped(groupSize)) assert(allChunks == expectedChunks) assert(forced) } } test("mapConcurrent preserves items") { forAll(Arbitrary.arbitrary[List[Int]], Gen.choose(1, 10)) { (xs, conc) => assert(toSeq(AsyncStream.fromSeq(xs).mapConcurrent(conc)(Future.value)).sorted == xs.sorted) } } test("mapConcurrent makes progress when an item is blocking") { forAll(Arbitrary.arbitrary[List[Int]], Gen.choose(2, 10)) { (xs, conc) => // This promise is not satisfied, which would block the evaluation // of .map, and should not block .mapConcurrent when conc > 1 val first = new Promise[Int] // This function will return a blocking future the first time it // is called and an immediately-available future thereafter. var used = false def f(x: Int) = if (used) { Future.value(x) } else { used = true first } // Concurrently map over the stream. The whole stream should be // available, except for one item which is still blocked. val mapped = AsyncStream.fromSeq(xs).mapConcurrent(conc)(f) // All but the first value, which is still blocking, has been returned assert(toSeq(mapped.take(xs.length - 1)).sorted == xs.drop(1).sorted) if (xs.nonEmpty) { // The stream as a whole is still blocking on the unsatisfied promise assert(!mapped.foreach(_ => ()).isDefined) // Unblock the first value first.setValue(xs.head) } // Now the whole stream should be available and should contain all // of the items, ignoring order (but preserving repetition) assert(mapped.foreach(_ => ()).isDefined) assert(toSeq(mapped).sorted == xs.sorted) } } test("mapConcurrent is lazy once it reaches its concurrency limit") { forAll(Gen.choose(2, 10), Arbitrary.arbitrary[Seq[Int]]) { (conc, xs) => val q = new scala.collection.mutable.Queue[Promise[Unit]] val mapped = AsyncStream.fromSeq(xs).mapConcurrent(conc) { _ => val p = new Promise[Unit] q.enqueue(p) p } // If there are at least `conc` items in the queue, then we should // have started exactly `conc` of them. Otherwise, we should have // started all of them. assert(q.size == conc.min(xs.size)) if (xs.nonEmpty) { assert(!mapped.head.isDefined) val p = q.dequeue() p.setDone() } // Satisfying that promise makes the head of the queue available. assert(mapped.head.isDefined) if (xs.size > 1) { // We do not add another element to the queue until the next // element is forced. assert(q.size == (conc.min(xs.size) - 1)) val tl = mapped.drop(1) assert(!tl.head.isDefined) // Forcing the next element of the queue causes us to enqueue // one more element (if there are more elements to enqueue) assert(q.size == conc.min(xs.size - 1)) val p = q.dequeue() p.setDone() // Satisfying that promise causes the head to be available. assert(tl.head.isDefined) } } } test("mapConcurrent makes progress, even with blocking streams and blocking work") { val gen = Gen.zip( Gen.choose(0, 10).label("numActions"), Gen.choose(0, 10).flatMap(Gen.listOfN(_, Arbitrary.arbitrary[Int])), Gen.choose(1, 11).label("concurrency") ) forAll(gen) { case (numActions, items, concurrency) => val input: AsyncStream[Int] = AsyncStream.fromSeq(items) ++ AsyncStream.fromFuture(Future.never) var workStarted = 0 var workFinished = 0 val result = input.mapConcurrent(concurrency) { i => workStarted += 1 if (workFinished < numActions) { workFinished += 1 Future.value(i) } else { // After numActions evaluations, return a Future that // will never be satisfied. Future.never } } // How much work should have been started by mapConcurrent. val expectedStarted = items.size.min(concurrency) assert(workStarted == expectedStarted, "work started") val expectedFinished = numActions.min(expectedStarted) assert(workFinished == expectedFinished, "expected finished") // Make sure that all of the finished items are now // available. (As a side-effect, this will force more work to // be done if concurrency was the limiting factor.) val completed = toSeq(result.take(workFinished)).sorted val expectedCompleted = items.take(expectedFinished).sorted assert(completed == expectedCompleted) } } test("fromReader") { forAll { l: List[Byte] => val buf = Buf.ByteArray.Owned(l.toArray) val as = AsyncStream.fromReader(Reader.fromBuf(buf), chunkSize = 1) assert(toSeq(as).map(b => Buf.ByteArray.Owned.extract(b).head) == l) } } test("sum") { forAll { xs: List[Int] => assert(xs.sum == await(AsyncStream.fromSeq(xs).sum)) } } test("size") { forAll { xs: List[Int] => assert(xs.size == await(AsyncStream.fromSeq(xs).size)) } } test("force") { forAll { xs: List[Int] => val p = new Promise[Unit] // The promise will be defined iff the tail is forced. val s = AsyncStream.fromSeq(xs) ++ { p.setDone() ; AsyncStream.empty } // If the input is empty, then the tail will be forced right away. assert(p.isDefined == xs.isEmpty) // Unconditionally force the whole stream await(s.force) assert(p.isDefined) } } test("withEffect") { forAll(genListAndN) { case (xs, n) => var i = 0 val s = AsyncStream.fromSeq(xs).withEffect(_ => i += 1) // Is lazy on initial application (with the exception of the first element) assert(i == (if (xs.isEmpty) 0 else 1)) // Is lazy when consuming the stream await(s.take(n).force) // If the list is empty, no effects should occur. If the list is // non-empty, the effect will occur for the first item right away, // since the head is not lazy. Otherwise, we expect the same // number of effects as items demanded. val expected = if (xs.isEmpty) 0 else 1.max(xs.length.min(n)) assert(i == expected) // Preserves the elements in the stream assert(toSeq(s) == xs) } } test("merge generates a stream equal to all input streams") { forAll { (lists: Seq[List[Int]]) => val streams = lists.map(fromSeq) val merged = AsyncStream.merge(streams: _*) val input = AsyncStream(streams: _*).flatten assert(toSeq(input).sorted == toSeq(merged).sorted) } } test("merge fails the result stream if an input stream fails") { forAll() { (lists: Seq[List[Int]]) => val s = mk(1, undefined: AsyncStream[Int]) val streams = s +: lists.map(fromSeq) val merged = AsyncStream.merge(streams: _*) intercept[Exception](toSeq(merged)) } } test("merged stream contains elements as they become available from input streams") { forAll { (input: List[Int]) => val promises = List.fill(input.size)(Promise[Int]()) // grouped into lists of 10 elements each val grouped = promises.grouped(10).toList // merged list of streams val streams = grouped.map(fromSeq(_).flatMap(AsyncStream.fromFuture)) val merged = AsyncStream.merge(streams: _*).toSeq() // build an interleaved list of the promises for the stream // [s1(1), s2(1), s3(1), s1(2), s2(2), s3(2), ...] val interleavedHeads = grouped.flatMap(_.zipWithIndex).sortBy(_._2).map(_._1) interleavedHeads.zip(input).foreach { case (p, i) => p.update(Return(i)) } assert(Await.result(merged) == input) } } } private object AsyncStreamTest { val genListAndN = for { as <- Arbitrary.arbitrary[List[Int]] n <- Gen.choose(0, as.length) } yield (as, n) val genListAndSentinel = for { as <- Arbitrary.arbitrary[List[Int]] n <- Gen.choose(0, as.length - 1) } yield (as, as(n)) def await[T](fut: Future[T]) = Await.result(fut, 100.milliseconds) def undefined[A]: A = throw new Exception def toSeq[A](s: AsyncStream[A]): Seq[A] = await(s.toSeq()) def fromSeq[A](s: Seq[A]): AsyncStream[A] = // Test all AsyncStream constructors: Empty, FromFuture, Cons, Embed. s match { case Nil => AsyncStream.empty case a +: Nil => AsyncStream.of(a) case a +: b +: Nil => AsyncStream.embed(Future.value(a +:: AsyncStream.of(b))) case a +: as => a +:: fromSeq(as) } }
folone/util
util-core/src/test/scala/com/twitter/concurrent/AsyncStreamTest.scala
Scala
apache-2.0
21,811
/* ASIB - A Scala IRC Bot Copyright (C) 2012 Iain Cambridge This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package asib.mocks.util.connector import asib.util.connector.Irc class MockIrcConnector extends Irc { var lastSentMessage = List[String]() override def connect = { } override def send(message: String) = { lastSentMessage ::= message } override def isConnected: Boolean = true }
icambridge-old/asib
src/test/scala/asib/mocks/util/connector/MockIrcConnector.scala
Scala
gpl-3.0
1,015
package index.annotations import com.spatial4j.core.distance.DistanceUtils import com.spatial4j.core.shape.Rectangle import com.spatial4j.core.shape.impl.RectangleImpl import com.vividsolutions.jts.geom.Coordinate import global.Global import index.{ Heatmap, Index, IndexBase, IndexFields, SearchParameters } import index.places.IndexedPlaceNetwork import models.core.{ AnnotatedThings, Datasets } import models.geo.BoundingBox import org.apache.lucene.facet.FacetsCollector import org.apache.lucene.facet.taxonomy.{ FastTaxonomyFacetCounts, TaxonomyReader } import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader import org.apache.lucene.facet.taxonomy.SearcherTaxonomyManager.SearcherAndTaxonomy import org.apache.lucene.index.Term import org.apache.lucene.queryparser.classic.MultiFieldQueryParser import org.apache.lucene.search.{ BooleanClause, BooleanQuery, IndexSearcher, NumericRangeQuery, Query, QueryWrapperFilter, TermQuery } import org.apache.lucene.spatial.query.{ SpatialArgs, SpatialOperation } import org.apache.lucene.spatial.prefix.HeatmapFacetCounter import play.api.Play import play.api.Play.current import play.api.db.slick._ import play.api.Logger trait AnnotationReader extends IndexBase { def calculateTopPlaces(query: Query, limit: Int, searcher: IndexSearcher, taxonomyReader: TaxonomyReader): Seq[(IndexedPlaceNetwork, Int)] = { val fc = new FacetsCollector() searcher.search(query, fc) val facets = new FastTaxonomyFacetCounts(taxonomyReader, Index.facetsConfig, fc) val topURIs = Option(facets.getTopChildren(limit, IndexFields.PLACE_URI)).map(result => { result.labelValues.toSeq.map(lv => (lv.label, lv.value.intValue)) }).getOrElse(Seq.empty[(String, Int)]) topURIs.map { case (uri, count) => Global.index.findNetworkByPlaceURI(uri).map((_, count)) }.flatten } def getSnippets(thingId: String, phrase: String, places: Seq[String], limit: Int, searcher: IndexSearcher): Seq[String] = { DB.withSession { implicit session: Session => val rootId = AnnotatedThings.getParentHierarchy(thingId).lastOption.getOrElse(thingId) val allIds = rootId +: AnnotatedThings.listChildrenRecursive(rootId) val idQuery = if (allIds.size > 1) { val q = new BooleanQuery() allIds.foreach(id => q.add(new TermQuery(new Term(IndexFields.ANNOTATION_THING, id)), BooleanClause.Occur.SHOULD)) q } else { new TermQuery(new Term(IndexFields.ANNOTATION_THING, allIds.head)) } val query = new BooleanQuery() query.add(idQuery, BooleanClause.Occur.MUST) val fields = Seq( IndexFields.ANNOTATION_QUOTE, IndexFields.ANNOTATION_FULLTEXT_PREFIX, IndexFields.ANNOTATION_FULLTEXT_SUFFIX).toArray query.add(new MultiFieldQueryParser(fields, analyzer).parse(phrase), BooleanClause.Occur.MUST) if (places.size > 0) { val q = new BooleanQuery() places.foreach(uri => q.add(new TermQuery(new Term(IndexFields.PLACE_URI, Index.normalizeURI(uri))), BooleanClause.Occur.SHOULD)) query.add(q, BooleanClause.Occur.MUST) } val topDocs = searcher.search(query, 3) val snippetTuples = topDocs.scoreDocs.foldLeft(Seq.empty[Seq[String]])((listOfSegments, scoreDoc) => { val allPreviousSegments = listOfSegments.flatten val annotation = new IndexedAnnotation(searcher.doc(scoreDoc.doc)) val segments = Seq(annotation.prefix, annotation.quote, annotation.suffix).flatten // We don't want segments (prefix, quote, suffix) which are already in the segments list val newSegments = segments.filter(str => !allPreviousSegments.exists(_.equals(str))) // We don't want segments that don't contain the query phrase val segmentsToKeep = newSegments.filter(_.toLowerCase.contains(phrase.toLowerCase)) listOfSegments :+ segmentsToKeep }).filter(_.size > 0) snippetTuples.map(segments => { val snippet = segments.mkString(" ") // Trim around the search term val idx = snippet.toLowerCase.indexOf(phrase.toLowerCase) val start = Math.max(0, idx - 200) val end = Math.min(snippet.size, idx + 200) if (end > start) Some(snippet.substring(start, end).replace(phrase, "<strong>" + phrase + "</strong>")) else None }).flatten } } def calculateAnnotationHeatmap( query: Option[String] = None, datasets: Seq[String] = Seq.empty[String], excludeDatasets: Seq[String] = Seq.empty[String], fromYear: Option[Int] = None, toYear: Option[Int] = None, places: Seq[String] = Seq.empty[String], bbox: Option[Rectangle] = None, coord: Option[Coordinate] = None, radius: Option[Double] = None, level: Int, searcher: SearcherAndTaxonomy )(implicit s: Session): Heatmap = { val q = new BooleanQuery() // Keyword query if (query.isDefined) { val fields = Seq(IndexFields.ANNOTATION_QUOTE, IndexFields.ANNOTATION_FULLTEXT_PREFIX, IndexFields.ANNOTATION_FULLTEXT_SUFFIX).toArray q.add(new MultiFieldQueryParser(fields, analyzer).parse(query.get), BooleanClause.Occur.MUST) } /* Dataset filter if (dataset.isDefined) { val datasetHierarchy = dataset.get +: Datasets.listSubsetsRecursive(dataset.get) if (datasetHierarchy.size == 1) { q.add(new TermQuery(new Term(IndexFields.SOURCE_DATASET, dataset.get)), BooleanClause.Occur.MUST) } else { val datasetQuery = new BooleanQuery() datasetHierarchy.foreach(id => { datasetQuery.add(new TermQuery(new Term(IndexFields.SOURCE_DATASET, id)), BooleanClause.Occur.SHOULD) }) q.add(datasetQuery, BooleanClause.Occur.MUST) } } */ // Timespan filter if (fromYear.isDefined || toYear.isDefined) { val timeIntervalQuery = new BooleanQuery() if (fromYear.isDefined) timeIntervalQuery.add(NumericRangeQuery.newIntRange(IndexFields.DATE_TO, fromYear.get, null, true, true), BooleanClause.Occur.MUST) if (toYear.isDefined) timeIntervalQuery.add(NumericRangeQuery.newIntRange(IndexFields.DATE_FROM, null, toYear.get, true, true), BooleanClause.Occur.MUST) q.add(timeIntervalQuery, BooleanClause.Occur.MUST) } // Places filter places.foreach(uri => q.add(new TermQuery(new Term(IndexFields.PLACE_URI, uri)), BooleanClause.Occur.MUST)) // Spatial filter if (bbox.isDefined) { q.add(Index.rptStrategy.makeQuery(new SpatialArgs(SpatialOperation.IsWithin, bbox.get)), BooleanClause.Occur.MUST) } else if (coord.isDefined) { // Warning - there appears to be a bug in Lucene spatial that flips coordinates! val circle = Index.spatialCtx.makeCircle(coord.get.y, coord.get.x, DistanceUtils.dist2Degrees(radius.getOrElse(10), DistanceUtils.EARTH_MEAN_RADIUS_KM)) q.add(Index.rptStrategy.makeQuery(new SpatialArgs(SpatialOperation.IsWithin, circle)), BooleanClause.Occur.MUST) } execute(q, bbox, level, searcher) } private def execute(query: Query, bbox: Option[Rectangle], level: Int, searcher: SearcherAndTaxonomy): Heatmap = { val rect = bbox.getOrElse(new RectangleImpl(-90, 90, -90, 90, null)) val filter = new QueryWrapperFilter(query) val heatmap = HeatmapFacetCounter.calcFacets(Index.rptStrategy, searcher.searcher.getTopReaderContext, filter, rect, level, 100000) // Heatmap grid cells with non-zero count, in the form of a tuple (x, y, count) val nonEmptyCells = Seq.range(0, heatmap.rows).flatMap(row => { Seq.range(0, heatmap.columns).map(column => (column, row, heatmap.getCount(column, row))) }).filter(_._3 > 0) // Convert non-zero grid cells to map points val region = heatmap.region val (minX, minY) = (region.getMinX, region.getMinY) val cellWidth = region.getWidth / heatmap.columns val cellHeight = region.getHeight / heatmap.rows Heatmap(nonEmptyCells.map { case (x, y, count) => val lon = DistanceUtils.normLonDEG(minX + x * cellWidth + cellWidth / 2) val lat = DistanceUtils.normLatDEG(minY + y * cellHeight + cellHeight / 2) (lon, lat, count) }, cellWidth, cellHeight) } }
pelagios/peripleo
app/index/annotations/AnnotationReader.scala
Scala
gpl-3.0
8,593
package java package lang import scala.scalajs.js object Math { final val E = 2.718281828459045 final val PI = 3.141592653589793 @inline def abs(a: scala.Int): scala.Int = if (a < 0) -a else a @inline def abs(a: scala.Long): scala.Long = if (a < 0) -a else a @inline def abs(a: scala.Float): scala.Float = js.Math.abs(a).toFloat @inline def abs(a: scala.Double): scala.Double = js.Math.abs(a) @inline def max(a: scala.Int, b: scala.Int): scala.Int = if (a > b) a else b @inline def max(a: scala.Long, b: scala.Long): scala.Long = if (a > b) a else b @inline def max(a: scala.Float, b: scala.Float): scala.Float = js.Math.max(a, b).toFloat @inline def max(a: scala.Double, b: scala.Double): scala.Double = js.Math.max(a, b) @inline def min(a: scala.Int, b: scala.Int): scala.Int = if (a < b) a else b @inline def min(a: scala.Long, b: scala.Long): scala.Long = if (a < b) a else b @inline def min(a: scala.Float, b: scala.Float): scala.Float = js.Math.min(a, b).toFloat @inline def min(a: scala.Double, b: scala.Double): scala.Double = js.Math.min(a, b) @inline def ceil(a: scala.Double): scala.Double = js.Math.ceil(a) @inline def floor(a: scala.Double): scala.Double = js.Math.floor(a) def rint(a: scala.Double): scala.Double = { val rounded = js.Math.round(a) val mod = a % 1.0 // The following test is also false for specials (0's, Infinities and NaN) if (mod == 0.5 || mod == -0.5) { // js.Math.round(a) rounds up but we have to round to even if (rounded % 2.0 == 0.0) rounded else rounded - 1.0 } else { rounded } } @inline def round(a: scala.Float): scala.Int = js.Math.round(a).toInt @inline def round(a: scala.Double): scala.Long = js.Math.round(a).toLong @inline def sqrt(a: scala.Double): scala.Double = js.Math.sqrt(a) @inline def pow(a: scala.Double, b: scala.Double): scala.Double = js.Math.pow(a, b) @inline def exp(a: scala.Double): scala.Double = js.Math.exp(a) @inline def log(a: scala.Double): scala.Double = js.Math.log(a) @inline def log10(a: scala.Double): scala.Double = log(a) / 2.302585092994046 @inline def log1p(a: scala.Double): scala.Double = log(a + 1) @inline def sin(a: scala.Double): scala.Double = js.Math.sin(a) @inline def cos(a: scala.Double): scala.Double = js.Math.cos(a) @inline def tan(a: scala.Double): scala.Double = js.Math.tan(a) @inline def asin(a: scala.Double): scala.Double = js.Math.asin(a) @inline def acos(a: scala.Double): scala.Double = js.Math.acos(a) @inline def atan(a: scala.Double): scala.Double = js.Math.atan(a) @inline def atan2(y: scala.Double, x: scala.Double): scala.Double = js.Math.atan2(y, x) @inline def random(): scala.Double = js.Math.random() @inline def toDegrees(a: scala.Double): scala.Double = a * 180.0 / PI @inline def toRadians(a: scala.Double): scala.Double = a / 180.0 * PI @inline def signum(a: scala.Double): scala.Double = { if (a > 0) 1.0 else if (a < 0) -1.0 else a } @inline def signum(a: scala.Float): scala.Float = { if (a > 0) 1.0f else if (a < 0) -1.0f else a } def cbrt(a: scala.Double): scala.Double = { if (a == 0 || a.isNaN) { a } else { val sign = if (a < 0.0) -1.0 else 1.0 val value = sign * a //Initial Approximation var x = 0.0 var xi = pow(value, 0.3333333333333333) //Halley's Method (http://metamerist.com/cbrt/cbrt.htm) while (abs(x - xi) >= 1E-16) { x = xi val x3 = js.Math.pow(x, 3) val x3Plusa = x3 + value xi = x * (x3Plusa + value) / (x3Plusa + x3) } sign * xi } } def nextUp(a: scala.Double): scala.Double = { // js implementation of nextUp https://gist.github.com/Yaffle/4654250 import scala.Double._ if (a != a || a == PositiveInfinity) a else if (a == NegativeInfinity) -MaxValue else if (a == MaxValue) PositiveInfinity else if (a == 0) MinValue else { def iter(x: scala.Double, xi: scala.Double, n: scala.Double): scala.Double = { if (Math.abs(xi - x) >= 1E-16) { val c0 = (xi + x) / 2 val c = if (c0 == NegativeInfinity || c0 == PositiveInfinity) x + (xi - x) / 2 else c0 if (n == c) xi else if (a < c) iter(x = x, xi = c, n = c) else iter(x = c, xi = xi, n = c) } else xi } val d = Math.max(Math.abs(a) * 2E-16, MinValue) val ad = a + d val xi0 = if (ad == PositiveInfinity) MaxValue else ad iter(x = a, xi = xi0, n = a) } } def nextAfter(a: scala.Double, b: scala.Double): scala.Double = { if (b < a) -nextUp(-a) else if (a < b) nextUp(a) else if (a != a || b != b) scala.Double.NaN else b } def ulp(a: scala.Double): scala.Double = { if (abs(a) == scala.Double.PositiveInfinity) scala.Double.PositiveInfinity else if (abs(a) == scala.Double.MaxValue) pow(2, 971) else nextAfter(abs(a), scala.Double.MaxValue) - a } def hypot(a: scala.Double, b: scala.Double): scala.Double = { // http://en.wikipedia.org/wiki/Hypot#Implementation if (abs(a) == scala.Double.PositiveInfinity || abs(b) == scala.Double.PositiveInfinity) scala.Double.PositiveInfinity else if (a.isNaN || b.isNaN) scala.Double.NaN else if (a == 0 && b == 0) 0.0 else { //To Avoid Overflow and UnderFlow // calculate |x| * sqrt(1 - (y/x)^2) instead of sqrt(x^2 + y^2) val x = abs(a) val y = abs(b) val m = max(x, y) val t = min(x, y) / m m * sqrt(1 + t * t) } } def expm1(a: scala.Double): scala.Double = { // https://github.com/ghewgill/picomath/blob/master/javascript/expm1.js if (a == 0 || a.isNaN) a // Power Series http://en.wikipedia.org/wiki/Power_series // for small values of a, exp(a) = 1 + a + (a*a)/2 else if (abs(a) < 1E-5) a + 0.5 * a * a else exp(a) - 1.0 } def sinh(a: scala.Double): scala.Double = { if (a.isNaN || a == 0.0 || abs(a) == scala.Double.PositiveInfinity) a else (exp(a) - exp(-a)) / 2.0 } def cosh(a: scala.Double): scala.Double = { if (a.isNaN) a else if (a == 0.0) 1.0 else if (abs(a) == scala.Double.PositiveInfinity) scala.Double.PositiveInfinity else (exp(a) + exp(-a)) / 2.0 } def tanh(a: scala.Double): scala.Double = { if (a.isNaN || a == 0.0) a else if (abs(a) == scala.Double.PositiveInfinity) signum(a) else { // sinh(a) / cosh(a) = // 1 - 2 * (exp(-a)/ (exp(-a) + exp (a))) val expma = exp(-a) if (expma == scala.Double.PositiveInfinity) //Infinity / Infinity -1.0 else { val expa = exp(a) val ret = expma / (expa + expma) 1.0 - (2.0 * ret) } } } // TODO The methods not available in the JavaScript Math object }
CapeSepias/scala-js
javalanglib/src/main/scala/java/lang/Math.scala
Scala
bsd-3-clause
7,041
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions.aggregate import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ /** * Base class for computing Pearson correlation between two expressions. * When applied on empty data (i.e., count is zero), it returns NULL. * * Definition of Pearson correlation can be found at * http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient */ abstract class PearsonCorrelation(x: Expression, y: Expression, nullOnDivideByZero: Boolean) extends DeclarativeAggregate with ImplicitCastInputTypes { override def children: Seq[Expression] = Seq(x, y) override def nullable: Boolean = true override def dataType: DataType = DoubleType override def inputTypes: Seq[AbstractDataType] = Seq(DoubleType, DoubleType) protected val n = AttributeReference("n", DoubleType, nullable = false)() protected val xAvg = AttributeReference("xAvg", DoubleType, nullable = false)() protected val yAvg = AttributeReference("yAvg", DoubleType, nullable = false)() protected val ck = AttributeReference("ck", DoubleType, nullable = false)() protected val xMk = AttributeReference("xMk", DoubleType, nullable = false)() protected val yMk = AttributeReference("yMk", DoubleType, nullable = false)() protected def divideByZeroEvalResult: Expression = { if (nullOnDivideByZero) Literal.create(null, DoubleType) else Double.NaN } override def stringArgs: Iterator[Any] = super.stringArgs.filter(_.isInstanceOf[Expression]) override val aggBufferAttributes: Seq[AttributeReference] = Seq(n, xAvg, yAvg, ck, xMk, yMk) override val initialValues: Seq[Expression] = Array.fill(6)(Literal(0.0)) override lazy val updateExpressions: Seq[Expression] = updateExpressionsDef override val mergeExpressions: Seq[Expression] = { val n1 = n.left val n2 = n.right val newN = n1 + n2 val dx = xAvg.right - xAvg.left val dxN = If(newN === 0.0, 0.0, dx / newN) val dy = yAvg.right - yAvg.left val dyN = If(newN === 0.0, 0.0, dy / newN) val newXAvg = xAvg.left + dxN * n2 val newYAvg = yAvg.left + dyN * n2 val newCk = ck.left + ck.right + dx * dyN * n1 * n2 val newXMk = xMk.left + xMk.right + dx * dxN * n1 * n2 val newYMk = yMk.left + yMk.right + dy * dyN * n1 * n2 Seq(newN, newXAvg, newYAvg, newCk, newXMk, newYMk) } protected def updateExpressionsDef: Seq[Expression] = { val newN = n + 1.0 val dx = x - xAvg val dxN = dx / newN val dy = y - yAvg val dyN = dy / newN val newXAvg = xAvg + dxN val newYAvg = yAvg + dyN val newCk = ck + dx * (y - newYAvg) val newXMk = xMk + dx * (x - newXAvg) val newYMk = yMk + dy * (y - newYAvg) val isNull = x.isNull || y.isNull Seq( If(isNull, n, newN), If(isNull, xAvg, newXAvg), If(isNull, yAvg, newYAvg), If(isNull, ck, newCk), If(isNull, xMk, newXMk), If(isNull, yMk, newYMk) ) } } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(expr1, expr2) - Returns Pearson coefficient of correlation between a set of number pairs.", examples = """ Examples: > SELECT _FUNC_(c1, c2) FROM VALUES (3, 2), (3, 3), (6, 4) as tab(c1, c2); 0.8660254037844387 """, group = "agg_funcs", since = "1.6.0") // scalastyle:on line.size.limit case class Corr( x: Expression, y: Expression, nullOnDivideByZero: Boolean = !SQLConf.get.legacyStatisticalAggregate) extends PearsonCorrelation(x, y, nullOnDivideByZero) { def this(x: Expression, y: Expression) = this(x, y, !SQLConf.get.legacyStatisticalAggregate) override val evaluateExpression: Expression = { If(n === 0.0, Literal.create(null, DoubleType), If(n === 1.0, divideByZeroEvalResult, ck / sqrt(xMk * yMk))) } override def prettyName: String = "corr" }
witgo/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Corr.scala
Scala
apache-2.0
4,771
package net.room271 object ScalaSchool extends App { println("Welcome to Scala School!") }
nicl/scala-school
src/main/scala/net/room271/ScalaSchool.scala
Scala
gpl-3.0
94
package monocle.law import monocle.Prism import org.scalacheck.Prop._ import org.scalacheck.{Arbitrary, Properties} import scalaz.Equal import scalaz.Id._ import scalaz.std.option._ import scalaz.syntax.equal._ object PrismLaws { def apply[S: Arbitrary: Equal, A: Arbitrary: Equal](prism: Prism[S, A]) = new Properties("Prism") { property("reverseGet produces a value") = forAll { a: A => prism.getOption(prism.reverseGet(a)) === Some(a) } property("if a Prism match you can always go back to the source") = forAll { s: S => prism.getOrModify(s).fold(identity, prism.reverseGet) === s } /** modifyF does not change the number of targets */ property("modifyF with Id does not do anything") = forAll { s: S => prism.modifyF[Id](id.point[A](_))(s) === s } /** modify does not change the number of targets */ property("modify with id does not do anything") = forAll { s: S => prism.modify(identity)(s) === s } property("setOption only succeeds when the prism is matching") = forAll { (s: S, a: A) => prism.setOption(a)(s) === prism.getOption(s).map(_ => prism.set(a)(s)) } property("modifyOption with id is isomorphomic to isMatching") = forAll { s: S => prism.modifyOption(identity)(s) === prism.getOption(s).map(_ => s) } } }
CapeSepias/Monocle
law/src/main/scala/monocle/law/PrismLaws.scala
Scala
mit
1,332
package peschke.console.progressbar import java.io.PrintStream import java.util.concurrent.{ConcurrentHashMap, LinkedBlockingQueue} import peschke.{Complete => CompletedUpdate} import peschke.console.progressbar.Command._ import scala.annotation.tailrec import scala.concurrent.{ExecutionContext, Future, Promise} import scala.collection.JavaConverters._ import scala.util.Failure /** * Create, display, control, and terminate a console progress bar. * * @param initialState Initial state of the progress bar * @param commandBufferSize Maximum of changes to the bar (increments, etc) per display * @param output This is a [[java.io.PrintStream]] to match [[java.lang.System.out]] * @param ec Execution context where the worker thread will run */ class ProgressBar(initialState: ProgressBarState, commandBufferSize: Int, output: PrintStream) (implicit ec: ExecutionContext) { private [progressbar] val commandQueue: LinkedBlockingQueue[Command] = new LinkedBlockingQueue[Command](commandBufferSize) private [progressbar] val updateObservers = ConcurrentHashMap.newKeySet[Promise[CompletedUpdate]]() private [progressbar] val future = Future { @tailrec def loop(state: ProgressBarState): ProgressBarState = { val willNotify = updateObservers.iterator.asScala.toList updateObservers.removeAll(willNotify.asJava) state.draw(output) if (state.isFinished) { output.println() willNotify.foreach(_.trySuccess(CompletedUpdate)) // We want to make sure that, if we're finished, we notify everyone. updateObservers.iterator.asScala.foreach(_.trySuccess(CompletedUpdate)) state } else { val nextState = Command.takeFrom(commandQueue).foldLeft(state) { case (prevState, _) if prevState.isFinished => prevState case (prevState, Terminate) => prevState.terminated case (prevState, Complete) => prevState.completed case (prevState, Refresh) => prevState case (prevState, IncrementCount(delta)) => prevState.incrementCount(delta) case (prevState, IncrementTotal(delta)) => prevState.incrementTotal(delta) } // Any added during the update can be notified next time around. willNotify.foreach(_.trySuccess(peschke.Complete)) loop(nextState) } } loop(initialState) } private def maybeThrowExceptionFromWorker(): Unit = { future.value match { case Some(Failure(ex)) => throw ex case _ => () } } private def queueCommandWithCompletion(command: Command): Future[CompletedUpdate] = { val completionPromise = Promise[CompletedUpdate]() updateObservers.add(completionPromise) commandQueue.put(command) completionPromise.future } /** * Increment (or decrement) the progress of the bar * @param delta can be positive or negative */ def incrementCount(delta: Long = 1): Unit = { maybeThrowExceptionFromWorker() commandQueue.put(IncrementCount(delta)) } /** * Set the progress of the bar * @param count must be between 0 and the total value of the bar */ def setCount(count: Long): Unit = { maybeThrowExceptionFromWorker() commandQueue.put(SetCount(count)) } /** * Increment (or decrement) the total value of the bar * * This won't modify the count, but will modify the percent completion. * * @param delta can be positive or negative */ def incrementTotal(delta: Long = 1): Unit = { maybeThrowExceptionFromWorker() commandQueue.put(IncrementTotal(delta)) } /** * Set the total value of the bar * * This won't modify the count, but will modify the percent completion. * * @param total must be greater than or equal to the current count */ def setTotal(total: Long): Unit = { maybeThrowExceptionFromWorker() commandQueue.put(SetTotal(total)) } /** * Force a redraw */ def redraw(): Future[CompletedUpdate] = { maybeThrowExceptionFromWorker() queueCommandWithCompletion(Refresh) } /** * Terminate the progress bar. * * Prints a final bar update, and drops a newline so printing to the output stream can continue without issue. */ def terminate(): Future[CompletedUpdate] = { maybeThrowExceptionFromWorker() queueCommandWithCompletion(Terminate) } /** * Complete the progress bar. * * Sets the bar value to the total, prints a final bar update, and drops a newline so printing to the output stream * can continue without issue. */ def complete(): Future[CompletedUpdate] = { maybeThrowExceptionFromWorker() queueCommandWithCompletion(Complete) } } object ProgressBar { /** * Create a new [[ProgressBar]] * * @param initialCount Initial counter value, must be less than or equal to totalCount * @param totalCount Maximum count value, must be greater than or equal to initialCount * @param width Display width of the bar, manually set to avoid depending on JLine * @param commandBufferSize Maximum of changes to the bar (increments, etc) per display * @param output This is a [[java.io.PrintStream]] to match [[java.lang.System.out]] * @param ec Execution context where the worker thread will run */ def apply(initialCount: Long = 0L, totalCount: Long = 100L, width: Int = 80, commandBufferSize: Int = 50, output: PrintStream = System.out) (implicit ec: ExecutionContext): ProgressBar = { new ProgressBar( ProgressBarState(count = initialCount, total = totalCount, width = width), commandBufferSize, output) } }
morgen-peschke/scala-progress-bar
src/main/scala/peschke/console/progressbar/ProgressBar.scala
Scala
mit
5,750
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.api.stream.table import org.apache.flink.api.scala._ import org.apache.flink.table.api.scala._ import org.apache.flink.table.api.Tumble import org.apache.flink.table.expressions.utils.{Func1, Func23, Func24} import org.apache.flink.table.utils.TableTestBase import org.apache.flink.table.utils.TableTestUtil._ import org.junit.Test class CalcTest extends TableTestBase { // ---------------------------------------------------------------------------------------------- // Tests for all the situations when we can do fields projection. Like selecting few fields // from a large field count source. // ---------------------------------------------------------------------------------------------- @Test def testSelectFromWindow(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String, Double)]("MyTable", 'a, 'b, 'c, 'd, 'rowtime.rowtime) val resultTable = sourceTable .window(Tumble over 5.millis on 'rowtime as 'w) .groupBy('w) .select('c.upperCase().count, 'a.sum) val expected = unaryNode( "DataStreamGroupWindowAggregate", unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "a", "rowtime", "UPPER(c) AS $f5") ), term("window", "TumblingGroupWindow('w, 'rowtime, 5.millis)"), term("select", "COUNT($f5) AS EXPR$0", "SUM(a) AS EXPR$1") ) util.verifyTable(resultTable, expected) } @Test def testSelectFromGroupedWindow(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String, Double)]("MyTable", 'a, 'b, 'c, 'd, 'rowtime.rowtime) val resultTable = sourceTable .window(Tumble over 5.millis on 'rowtime as 'w) .groupBy('w, 'b) .select('c.upperCase().count, 'a.sum, 'b) val expected = unaryNode( "DataStreamCalc", unaryNode( "DataStreamGroupWindowAggregate", unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "a", "b", "rowtime", "UPPER(c) AS $f5") ), term("groupBy", "b"), term("window", "TumblingGroupWindow('w, 'rowtime, 5.millis)"), term("select", "b", "COUNT($f5) AS EXPR$0", "SUM(a) AS EXPR$1") ), term("select", "EXPR$0", "EXPR$1", "b") ) util.verifyTable(resultTable, expected) } @Test def testMultiFilter(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String, Double)]("MyTable", 'a, 'b, 'c, 'd) val resultTable = sourceTable.select('a, 'b) .filter('a > 0) .filter('b < 2) .filter(('a % 2) === 1) val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "a", "b"), term("where", "AND(AND(>(a, 0), <(b, 2)), =(MOD(a, 2), 1))") ) util.verifyTable(resultTable, expected) } @Test def testIn(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val resultTable = sourceTable.select('a, 'b, 'c) .where((1 to 30).map($"b" === _).reduce((ex1, ex2) => ex1 || ex2) && ($"c" === "xx")) val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "a", "b", "c"), term("where", s"AND(IN(b, ${(1 to 30).mkString(", ")}), =(c, 'xx'))") ) util.verifyTable(resultTable, expected) } @Test def testNotIn(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val resultTable = sourceTable.select('a, 'b, 'c) .where((1 to 30).map($"b" !== _).reduce((ex1, ex2) => ex1 && ex2) || ($"c" !== "xx")) val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "a", "b", "c"), term("where", s"OR(NOT IN(b, ${(1 to 30).mkString(", ")}), <>(c, 'xx'))") ) util.verifyTable(resultTable, expected) } @Test def testAddColumns(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val resultTable = sourceTable .addColumns("concat(c, 'sunny') as kid") .addColumns('a + 2, 'b as 'b2) .addOrReplaceColumns(concat('c, "_kid") as 'kid, concat('c, "kid") as 'kid) .addOrReplaceColumns("concat(c, '_kid_last') as kid") .addColumns("'literal_value'") val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term( "select", "a", "b", "c", "CONCAT(c, '_kid_last') AS kid", "+(a, 2) AS _c4, b AS b2", "'literal_value' AS _c6") ) util.verifyTable(resultTable, expected) } @Test def testRenameColumns(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val resultTable = sourceTable.renameColumns('a as 'a2, 'b as 'b2).select('a2, 'b2) val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "a AS a2", "b AS b2") ) util.verifyTable(resultTable, expected) } @Test def testDropColumns(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val resultTable = sourceTable.dropColumns('a, 'b) val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "c") ) util.verifyTable(resultTable, expected) } @Test def testSimpleMap(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val resultTable = sourceTable.map(Func23('a, 'b, 'c)) val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "Func23$(a, b, c).f0 AS _c0, Func23$(a, b, c).f1 AS _c1, " + "Func23$(a, b, c).f2 AS _c2, Func23$(a, b, c).f3 AS _c3") ) util.verifyTable(resultTable, expected) } @Test def testScalarResult(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val resultTable = sourceTable.map(Func1('a)) val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "Func1$(a) AS _c0") ) util.verifyTable(resultTable, expected) } @Test def testMultiMap(): Unit = { val util = streamTestUtil() val sourceTable = util.addTable[(Int, Long, String)]("MyTable", 'a, 'b, 'c) val resultTable = sourceTable .map(Func23('a, 'b, 'c)) .map(Func24('_c0, '_c1, '_c2, '_c3)) val expected = unaryNode( "DataStreamCalc", streamTableNode(sourceTable), term("select", "Func24$(Func23$(a, b, c).f0, Func23$(a, b, c).f1, " + "Func23$(a, b, c).f2, Func23$(a, b, c).f3).f0 AS _c0, " + "Func24$(Func23$(a, b, c).f0, Func23$(a, b, c).f1, " + "Func23$(a, b, c).f2, Func23$(a, b, c).f3).f1 AS _c1, " + "Func24$(Func23$(a, b, c).f0, Func23$(a, b, c).f1, " + "Func23$(a, b, c).f2, Func23$(a, b, c).f3).f2 AS _c2, " + "Func24$(Func23$(a, b, c).f0, Func23$(a, b, c).f1, " + "Func23$(a, b, c).f2, Func23$(a, b, c).f3).f3 AS _c3") ) util.verifyTable(resultTable, expected) } }
hequn8128/flink
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/table/CalcTest.scala
Scala
apache-2.0
8,354
/* * Copyright 2014 Databricks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.databricks.spark.avro import java.io.{File, IOException} import java.nio.ByteBuffer import java.util import scala.collection.immutable.HashSet import scala.collection.mutable.ArrayBuffer import scala.util.Random import com.google.common.io.Files import org.apache.spark.sql.SparkSession private[avro] object TestUtils { /** * This function checks that all records in a file match the original * record. */ def checkReloadMatchesSaved(spark: SparkSession, testFile: String, avroDir: String) = { def convertToString(elem: Any): String = { elem match { case null => "NULL" // HashSets can't have null in them, so we use a string instead case arrayBuf: ArrayBuffer[_] => arrayBuf.asInstanceOf[ArrayBuffer[Any]].toArray.deep.mkString(" ") case arrayByte: Array[Byte] => arrayByte.deep.mkString(" ") case other => other.toString } } val originalEntries = spark.read.avro(testFile).collect() val newEntries = spark.read.avro(avroDir).collect() assert(originalEntries.length == newEntries.length) val origEntrySet = Array.fill(originalEntries(0).size)(new HashSet[Any]()) for (origEntry <- originalEntries) { var idx = 0 for (origElement <- origEntry.toSeq) { origEntrySet(idx) += convertToString(origElement) idx += 1 } } for (newEntry <- newEntries) { var idx = 0 for (newElement <- newEntry.toSeq) { assert(origEntrySet(idx).contains(convertToString(newElement))) idx += 1 } } } def withTempDir(f: File => Unit): Unit = { val dir = Files.createTempDir() dir.delete() try f(dir) finally deleteRecursively(dir) } /** * This function deletes a file or a directory with everything that's in it. This function is * copied from Spark with minor modifications made to it. See original source at: * github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/util/Utils.scala */ def deleteRecursively(file: File) { def listFilesSafely(file: File): Seq[File] = { if (file.exists()) { val files = file.listFiles() if (files == null) { throw new IOException("Failed to list files for dir: " + file) } files } else { List() } } if (file != null) { try { if (file.isDirectory) { var savedIOException: IOException = null for (child <- listFilesSafely(file)) { try { deleteRecursively(child) } catch { // In case of multiple exceptions, only last one will be thrown case ioe: IOException => savedIOException = ioe } } if (savedIOException != null) { throw savedIOException } } } finally { if (!file.delete()) { // Delete can also fail if the file simply did not exist if (file.exists()) { throw new IOException("Failed to delete: " + file.getAbsolutePath) } } } } } /** * This function generates a random map(string, int) of a given size. */ private[avro] def generateRandomMap(rand: Random, size: Int): java.util.Map[String, Int] = { val jMap = new util.HashMap[String, Int]() for (i <- 0 until size) { jMap.put(rand.nextString(5), i) } jMap } /** * This function generates a random array of booleans of a given size. */ private[avro] def generateRandomArray(rand: Random, size: Int): util.ArrayList[Boolean] = { val vec = new util.ArrayList[Boolean]() for (i <- 0 until size) { vec.add(rand.nextBoolean()) } vec } /** * This function generates a random ByteBuffer of a given size. */ private[avro] def generateRandomByteBuffer(rand: Random, size: Int): ByteBuffer = { val bb = ByteBuffer.allocate(size) val arrayOfBytes = new Array[Byte](size) rand.nextBytes(arrayOfBytes) bb.put(arrayOfBytes) } }
tresata-opensource/spark-avro
src/test/scala/com/databricks/spark/avro/TestUtils.scala
Scala
apache-2.0
4,641
package eu.shiftforward.J1 class j1cpu(program: Seq[Short], ramSize: Int = 65536, stackSize: Int = 32) { def this(program: Seq[Microcode]) = this(program.map(_.encode)) // Memory Structures val ram = program.toArray ++ Array.fill(ramSize - program.length)(0.toShort) val dstack = Array.fill(stackSize)(0.toShort) val rstack = Array.fill(stackSize)(0.toShort) // Registers var st0 = 0.toShort def st1 = dstack(dsp) def rst0 = rstack(rsp) // Pointers var pc, rsp, dsp = 0.toShort def step() = { val instruction = Microcode(ram(pc)) val _st0 = (instruction match { case Ldc(v) ⇒ v & 0x7FFF case Alu(Fst, _, _, _, _, _, _) ⇒ st0 case Alu(Snd, _, _, _, _, _, _) ⇒ st1 case Alu(Add, _, _, _, _, _, _) ⇒ st0 + st1 case Alu(And, _, _, _, _, _, _) ⇒ st0 & st1 case Alu(Or, _, _, _, _, _, _) ⇒ st0 | st1 case Alu(Xor, _, _, _, _, _, _) ⇒ st0 ^ st1 case Alu(Not, _, _, _, _, _, _) ⇒ st0 ^ 0xFFFF case Alu(Eq, _, _, _, _, _, _) ⇒ if (st1 == st0) 1 else 0 case Alu(Lt, _, _, _, _, _, _) ⇒ if (st1 < st0) 1 else 0 case Alu(ULt, _, _, _, _, _, _) ⇒ if (st1 < st0) 1 else 0 // FIXME: unsigned comparision case Alu(Shr, _, _, _, _, _, _) ⇒ st1 >> st0 case Alu(Shl, _, _, _, _, _, _) ⇒ st1 << st0 case Alu(Dec, _, _, _, _, _, _) ⇒ st0 - 1 case Alu(Ret, _, _, _, _, _, _) ⇒ rst0 case Alu(Ld, _, _, _, _, _, _) ⇒ ram(st0) case Alu(Dph, _, _, _, _, _, _) ⇒ dstack.length // FIXME: + 000 + rstack.length case _ ⇒ st0 }).toShort instruction match { case Ldc(_) ⇒ dsp = (dsp + 1).toShort dstack(dsp) = st0 pc = (pc + 1).toShort case Jmp(a) ⇒ pc = a case Jmz(a) ⇒ dsp = (dsp - 1).toShort pc = if (st0 == 0) a else (pc + 1).toShort case Call(a) ⇒ rstack(rsp) = (pc + 1).toShort rsp = (rsp + 1).toShort pc = a case Alu(_, ret, push, pushAddr, store, rsd, dsd) ⇒ val r = rstack(dsp) if (pushAddr) { rstack(rsp) = st0 } if (push) { dstack(dsp) = st0 } if (store) { ram(st0) = st1 } dsp = (dsp + dsd).toShort rsp = (rsp + rsd).toShort pc = if (ret) r else (pc + 1).toShort } st0 = _st0 println(s"$pc: $instruction\\t(${instruction.encode.toBinaryString.reverse.padTo(16, '0').take(16).reverse}, rsp $rsp, dsp $dsp, st0 $st0, st1 $st1)") } }
hugoferreira/from-zero-to-computer
src/main/scala/eu/shiftforward/J1/cpu.scala
Scala
mit
2,517
package com.twitter.finagle.tracing.opencensus import com.twitter.finagle.{Http, ThriftMux} import org.scalatest.FunSuite class StackClientOpsTest extends FunSuite { test("Http.withOpenCensusTracing") { import StackClientOps._ val client = Http.client assert(!client.stack.contains(ClientTraceContextFilter.role)) assert(!client.stack.contains(StackClientOps.HttpSerializationStackRole)) val clientWithOC = client.withOpenCensusTracing assert(clientWithOC.stack.contains(ClientTraceContextFilter.role)) assert(clientWithOC.stack.contains(StackClientOps.HttpSerializationStackRole)) } test("ThriftMux.withOpenCensusTracing") { import StackClientOps._ val client = ThriftMux.client assert(!client.stack.contains(ClientTraceContextFilter.role)) val clientWithOC = client.withOpenCensusTracing assert(clientWithOC.stack.contains(ClientTraceContextFilter.role)) } }
luciferous/finagle
finagle-opencensus-tracing/src/test/scala/com/twitter/finagle/tracing/opencensus/StackClientOpsTest.scala
Scala
apache-2.0
926
package com.twitter.finagle.httpx.codec import com.twitter.finagle.Service import com.twitter.finagle.dispatch.GenSerialServerDispatcher import com.twitter.finagle.httpx._ import com.twitter.finagle.httpx.netty.Bijections._ import com.twitter.finagle.netty3.ChannelBufferBuf import com.twitter.finagle.stats.{StatsReceiver, DefaultStatsReceiver, RollupStatsReceiver} import com.twitter.finagle.transport.Transport import com.twitter.io.{Reader, Buf, BufReader} import com.twitter.logging.Logger import com.twitter.util.{Future, NonFatal, Promise, Return, Throw, Throwables} import java.net.InetSocketAddress import org.jboss.netty.handler.codec.frame.TooLongFrameException import org.jboss.netty.handler.codec.http.{HttpRequest, HttpResponse, HttpHeaders} class HttpServerDispatcher( trans: Transport[Any, Any], service: Service[Request, Response], stats: StatsReceiver) extends GenSerialServerDispatcher[Request, Response, Any, Any](trans) { def this( trans: Transport[Any, Any], service: Service[Request, Response]) = this(trans, service, DefaultStatsReceiver) private[this] val failureReceiver = new RollupStatsReceiver(stats.scope("stream")).scope("failures") import ReaderUtils.{readChunk, streamChunks} trans.onClose ensure { service.close() } private[this] def BadRequestResponse = Response(Version.Http10, Status.BadRequest) private[this] def RequestUriTooLongResponse = Response(Version.Http10, Status.RequestURITooLong) private[this] def RequestHeaderFieldsTooLarge = Response(Version.Http10, Status.RequestHeaderFieldsTooLarge) protected def dispatch(m: Any, eos: Promise[Unit]) = m match { case badReq: BadHttpRequest => eos.setDone() val response = badReq.exception match { case ex: TooLongFrameException => // this is very brittle :( if (ex.getMessage().startsWith("An HTTP line is larger than ")) RequestUriTooLongResponse else RequestHeaderFieldsTooLarge case _ => BadRequestResponse } // The connection in unusable so we close it here. // Note that state != Idle while inside dispatch // so state will be set to Closed but trans.close // will not be called. Instead isClosing will be // set to true, keep-alive headers set correctly // in handle, and trans.close will be called in // the respond statement of loop(). close() Future.value(response) case reqIn: HttpRequest => val reader = if (reqIn.isChunked) { val coll = Transport.collate(trans, readChunk) coll.proxyTo(eos) coll: Reader } else { eos.setDone() BufReader(ChannelBufferBuf.Owned(reqIn.getContent)) } val addr = trans.remoteAddress match { case ia: InetSocketAddress => ia case _ => new InetSocketAddress(0) } val req = Request(reqIn, reader, addr) service(req) case invalid => eos.setDone() Future.exception(new IllegalArgumentException("Invalid message "+invalid)) } protected def handle(rep: Response): Future[Unit] = { setKeepAlive(rep, !isClosing) if (rep.isChunked) { // We remove content length here in case the content is later // compressed. This is a pretty bad violation of modularity: // this is likely an issue with the Netty content // compressors, which (should?) adjust headers regardless of // transfer encoding. rep.headers.remove(HttpHeaders.Names.CONTENT_LENGTH) val p = new Promise[Unit] val f = trans.write(from[Response, HttpResponse](rep)) before streamChunks(trans, rep.reader) f.proxyTo(p) // This awkwardness is unfortunate but necessary for now as you may be // interrupted in the middle of a write, or when there otherwise isn’t // an outstanding read (e.g. read-write race). f.onFailure { t => Logger.get(this.getClass.getName).debug(t, "Failed mid-stream. Terminating stream, closing connection") failureReceiver.counter(Throwables.mkString(t): _*).incr() rep.reader.discard() } p.setInterruptHandler { case intr => rep.reader.discard() f.raise(intr) } p } else { // Ensure Content-Length is set if not chunked if (!rep.headers.contains(Fields.ContentLength)) rep.contentLength = rep.content.length trans.write(from[Response, HttpResponse](rep)) } } protected def setKeepAlive(rep: Response, keepAlive: Boolean) { rep.version match { case Version.Http10 => if (keepAlive) { rep.headers.set(Fields.Connection, "keep-alive") } else { rep.headers.remove(Fields.Connection) } case Version.Http11 => if (keepAlive) { rep.headers.remove(Fields.Connection) } else { rep.headers.set(Fields.Connection, "close") } } } }
nkhuyu/finagle
finagle-httpx/src/main/scala/com/twitter/finagle/httpx/codec/HttpServerDispatcher.scala
Scala
apache-2.0
4,976
package com.rrdinsights.scalabrine.models private[rrdinsights] trait RawResponse { def resource: String def resultSets: Array[ResultSetResponse] } private[rrdinsights] case class ResultSetResponse(name: String, headers: Array[String], rowSet: Array[Array[Any]]) private[models] trait ResultSetRawResponseConverters { protected def converters: Seq[_ <: ResultSetRawResponseConverter[_]] protected def converterOf(name: String): ResultSetRawResponseConverter[_] = converterOfOpt(name) .getOrElse(throw new IllegalArgumentException(s"$name is not a valid result set response object")) protected def converterOfOpt(name: String): Option[ResultSetRawResponseConverter[_]] = converters.find(_.name == name) protected def convert[T <: ConvertedResultSetResponse](rawResponses: Seq[ResultSetResponse], converter: ResultSetRawResponseConverter[T]): Seq[T] = rawResponses .find(_.name == converter.name) .map(v => converter.convertRaw(v.rowSet)) .getOrElse(Seq.empty) } private[models] trait ResultSetRawResponseConverter[T <: ConvertedResultSetResponse] { def name: String def convertRaw(rows: Array[Array[Any]]): Seq[T] } private[models] trait ConvertedResultSetResponse
rd11490/Scalabrine
src/main/scala/com/rrdinsights/scalabrine/models/RawResponse.scala
Scala
mit
1,307
/* Deduction Tactics Copyright (C) 2012-2015 Raymond Dodge This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ package com.rayrobdod.boardGame.view import org.scalatest.FunSpec class NilTilesheetTest extends FunSpec { describe ("NilTilesheet") { val dut = new NilTilesheet[Int, Float, Float](() => 1f, 2f) it ("getIconFor(null, 0, 0, null).aboveFrames is BlankIcon") { assertResult(Seq(1f)){dut.getIconFor(null, -1, null).aboveFrames} } it ("getIconFor(null, 0, 1, null).aboveFrames is BlankIcon") { assertResult(Seq(1f)){dut.getIconFor(null, 0, null).aboveFrames} } it ("getIconFor(null, 0, 0, null)._2 is BlankIcon") { assertResult(Seq(1f)){dut.getIconFor(null, 1, null).belowFrames} } it ("dimension is dimension") { assertResult(2f){dut.iconDimensions} } } }
rayrobdod/boardGame
View/src/test/scala/NilTilesheetTest.scala
Scala
gpl-3.0
1,376
package org.jetbrains.plugins.scala package debugger import com.intellij.concurrency.ConcurrentCollectionFactory import java.{util => ju} import com.intellij.debugger.engine._ import com.intellij.debugger.impl.DebuggerUtilsEx import com.intellij.debugger.jdi.VirtualMachineProxyImpl import com.intellij.debugger.requests.ClassPrepareRequestor import com.intellij.debugger.{MultiRequestPositionManager, NoDataException, PositionManager, SourcePosition} import com.intellij.openapi.diagnostic.ControlFlowException import com.intellij.openapi.editor.Document import com.intellij.openapi.fileTypes.FileType import com.intellij.openapi.project.{DumbService, Project} import com.intellij.openapi.util.Ref import com.intellij.psi._ import com.intellij.psi.search.{FilenameIndex, GlobalSearchScope} import com.intellij.psi.util.CachedValueProvider.Result import com.intellij.psi.util.{CachedValueProvider, CachedValuesManager, PsiTreeUtil} import com.intellij.util.containers.{ConcurrentIntObjectMap, ContainerUtil} import com.sun.jdi._ import com.sun.jdi.request.ClassPrepareRequest import org.jetbrains.annotations.{NotNull, Nullable} import org.jetbrains.plugins.scala.caches.ScalaShortNamesCacheManager import org.jetbrains.plugins.scala.debugger.ScalaPositionManager._ import org.jetbrains.plugins.scala.debugger.TopLevelMembers.{findFileWithTopLevelMembers, topLevelMemberClassName} import org.jetbrains.plugins.scala.debugger.evaluation.ScalaEvaluatorBuilderUtil import org.jetbrains.plugins.scala.debugger.evaluation.evaluator.ScalaCompilingEvaluator import org.jetbrains.plugins.scala.debugger.evaluation.util.DebuggerUtil._ import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes import org.jetbrains.plugins.scala.lang.macros.MacroDef import org.jetbrains.plugins.scala.lang.psi.ElementScope import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScConstructorPattern, ScInfixPattern} import org.jetbrains.plugins.scala.lang.psi.api.expr._ import org.jetbrains.plugins.scala.lang.psi.api.statements._ import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameters} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScPackaging import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._ import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager import org.jetbrains.plugins.scala.lang.psi.types.ValueClassType import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil import org.jetbrains.plugins.scala.macroAnnotations.CachedInUserData import scala.annotation.tailrec import scala.collection.immutable.ArraySeq import scala.collection.mutable import scala.jdk.CollectionConverters._ import scala.reflect.NameTransformer import scala.util.Try /** * @author ilyas */ class ScalaPositionManager(val debugProcess: DebugProcess) extends PositionManager with MultiRequestPositionManager with LocationLineManager { protected[debugger] val caches = new ScalaPositionManagerCaches(debugProcess) private val outerAndNestedTypePartsPattern = """([^\\$]*)(\\$.*)?""".r import caches._ private val debugProcessScope: ElementScope = ElementScope(debugProcess.getProject, debugProcess.getSearchScope) ScalaPositionManager.cacheInstance(this) override def getAcceptedFileTypes: ju.Set[_ <: FileType] = ju.Collections.singleton(ScalaFileType.INSTANCE) @Nullable override def getSourcePosition(@Nullable location: Location): SourcePosition = { if (debugProcess.getProject.isDisposed || shouldSkip(location)) return null val position = for { loc <- location.toOption psiFile <- getPsiFileByReferenceType(debugProcess.getProject, loc.declaringType).toOption lineNumber = exactLineNumber(location) if lineNumber >= 0 } yield { calcPosition(psiFile, location, lineNumber).getOrElse { SourcePosition.createFromLine(psiFile, lineNumber) } } position.getOrThrow(NoDataException.INSTANCE) } @NotNull override def getAllClasses(@NotNull position: SourcePosition): ju.List[ReferenceType] = { val file = position.getFile throwIfNotScalaFile(file) val generatedClassName = file.getUserData(ScalaCompilingEvaluator.classNameKey) def hasLocations(refType: ReferenceType, position: SourcePosition): Boolean = { try { val generated = isGeneratedClass(generatedClassName, refType) lazy val sameFile = getPsiFileByReferenceType(file.getProject, refType) == file generated || sameFile && locationsOfLine(refType, position).size > 0 } catch { case _: NoDataException | _: AbsentInformationException | _: ClassNotPreparedException | _: ObjectCollectedException => false } } val exactClasses = mutable.ArrayBuffer.empty[ReferenceType] val namePatterns = mutable.Set[NamePattern]() var packageName: Option[String] = None inReadAction { val possiblePositions = positionsOnLine(file, position.getLine) packageName = possiblePositions.headOption.flatMap(findPackageName) val onTheLine = possiblePositions.map(findGeneratingClassOrMethodParent) if (onTheLine.isEmpty) return ju.Collections.emptyList() val nonLambdaParent = if (isCompiledWithIndyLambdas(file)) { val nonStrictParents = onTheLine.head.withParentsInFile nonStrictParents.find(p => ScalaEvaluatorBuilderUtil.isGenerateNonAnonfunClass(p)) } else None def addExactClasses(td: ScTypeDefinition): Unit = { val qName = getSpecificNameForDebugger(td) val additional = td match { case _: ScTrait => qName.stripSuffix("$class") :: Nil case c: ScClass if ValueClassType.isValueClass(c) => s"$qName$$" :: Nil case c if isDelayedInit(c) => s"$qName$delayedInitBody" :: Nil case _ => Nil } (qName :: additional).foreach { name => exactClasses ++= debugProcess.getVirtualMachineProxy.classesByName(name).asScala } } val sourceImages = onTheLine ++ nonLambdaParent sourceImages.foreach { case null => case td: ScTypeDefinition if !isLocalClass(td) => addExactClasses(td) case elem => val namePattern = NamePattern.forElement(elem) namePatterns ++= Option(namePattern) } } val foundWithPattern = if (namePatterns.isEmpty) Nil else filterAllClasses(c => hasLocations(c, position) && namePatterns.exists(_.matches(c)), packageName) val distinctExactClasses = exactClasses.distinct val loadedNestedClasses = getNestedClasses(distinctExactClasses).filter(hasLocations(_, position)) (distinctExactClasses ++ foundWithPattern ++ loadedNestedClasses).distinct.asJava } @NotNull override def locationsOfLine(@NotNull refType: ReferenceType, @NotNull position: SourcePosition): ju.List[Location] = { throwIfNotScalaFile(position.getFile) checkForIndyLambdas(refType) try { inReadAction { val line: Int = position.getLine locationsOfLine(refType, line).asJava } } catch { case _: AbsentInformationException => ju.Collections.emptyList() } } override def createPrepareRequest(@NotNull requestor: ClassPrepareRequestor, @NotNull position: SourcePosition): ClassPrepareRequest = { throw new IllegalStateException("This class implements MultiRequestPositionManager, corresponding createPrepareRequests version should be used") } override def createPrepareRequests(requestor: ClassPrepareRequestor, position: SourcePosition): ju.List[ClassPrepareRequest] = { def isLocalOrUnderDelayedInit(definition: PsiClass): Boolean = { isLocalClass(definition) || isDelayedInit(definition) } def findEnclosingTypeDefinition: Option[ScTypeDefinition] = { @tailrec def notLocalEnclosingTypeDefinition(element: PsiElement): Option[ScTypeDefinition] = { PsiTreeUtil.getParentOfType(element, classOf[ScTypeDefinition]) match { case null => None case td if isLocalClass(td) => notLocalEnclosingTypeDefinition(td.getParent) case td => Some(td) } } val element = nonWhitespaceElement(position) notLocalEnclosingTypeDefinition(element) } def createClassPrepareRequests(classPrepareRequestor: ClassPrepareRequestor, classPreparePattern: String): Seq[ClassPrepareRequest] = { val reqManager = debugProcess.getRequestsManager val patternCoversNestedTypes = classPreparePattern.endsWith("*") if (patternCoversNestedTypes) { List(reqManager.createClassPrepareRequest(classPrepareRequestor, classPreparePattern)) } else { val nestedTypesSuffix = if (classPreparePattern.endsWith("$")) "*" else "$*" val nestedTypesPattern = classPreparePattern + nestedTypesSuffix List(reqManager.createClassPrepareRequest(classPrepareRequestor, classPreparePattern), reqManager.createClassPrepareRequest(classPrepareRequestor, nestedTypesPattern)) } } def createPrepareRequests(position: SourcePosition): Seq[ClassPrepareRequest] = { val qName = new Ref[String](null) val waitRequestor = new Ref[ClassPrepareRequestor](null) inReadAction { val sourceImage = findReferenceTypeSourceImage(position) val insideMacro: Boolean = isInsideMacro(nonWhitespaceElement(position)) sourceImage match { case cl: ScClass if ValueClassType.isValueClass(cl) => //there are no instances of value classes, methods from companion object are used qName.set(getSpecificNameForDebugger(cl) + "$") case tr: ScTrait if !isLocalClass(tr) => //to handle both trait methods encoding qName.set(tr.getQualifiedNameForDebugger + "*") case typeDef: ScTypeDefinition if !isLocalOrUnderDelayedInit(typeDef) => val specificName = getSpecificNameForDebugger(typeDef) qName.set(if (insideMacro) specificName + "*" else specificName) case file: ScalaFile => //top level member in default package qName.set(topLevelMemberClassName(file, None)) case pckg: ScPackaging => qName.set(topLevelMemberClassName(pckg.getContainingFile, Some(pckg))) case _ => findEnclosingTypeDefinition.foreach(typeDef => qName.set(typeDef.getQualifiedNameForDebugger + "*")) } // Enclosing type definition is not found if (qName.get == null) { qName.set(SCRIPT_HOLDER_CLASS_NAME + "*") } waitRequestor.set(new ScalaPositionManager.MyClassPrepareRequestor(position, requestor)) } createClassPrepareRequests(waitRequestor.get, qName.get) } val file = position.getFile throwIfNotScalaFile(file) val possiblePositions = inReadAction { positionsOnLine(file, position.getLine).map(SourcePosition.createFromElement) } possiblePositions.flatMap(createPrepareRequests).asJava } private def throwIfNotScalaFile(file: PsiFile): Unit = { if (!checkScalaFile(file)) throw NoDataException.INSTANCE } private def checkScalaFile(file: PsiFile): Boolean = file match { case sf: ScalaFile => !sf.isCompiled case _ => false } private def findPackageName(position: PsiElement): Option[String] = { def packageWithName(e: PsiElement): Option[String] = e match { case p: ScPackaging => Some(p.fullPackageName) case obj: ScObject if obj.isPackageObject => Some(obj.qualifiedName.stripSuffix("package$")) case _ => None } position.parentsInFile.flatMap(packageWithName).headOption } private def filterAllClasses(condition: ReferenceType => Boolean, packageName: Option[String]): collection.Seq[ReferenceType] = { def samePackage(refType: ReferenceType) = { val name = nonLambdaName(refType) val lastDot = name.lastIndexOf('.') val refTypePackageName = if (lastDot < 0) "" else name.substring(0, lastDot) packageName.isEmpty || packageName.contains(refTypePackageName) } def isAppropriate(refType: ReferenceType) = { Try(samePackage(refType) && refType.isInitialized && condition(refType)).getOrElse(false) } for { refType <- debugProcess.getVirtualMachineProxy.allClasses.asScala if isAppropriate(refType) } yield { refType } } @Nullable private def findReferenceTypeSourceImage(@NotNull position: SourcePosition): PsiElement = { val element = nonWhitespaceElement(position) findGeneratingClassOrMethodParent(element) } protected def nonWhitespaceElement(@NotNull position: SourcePosition): PsiElement = { val file = position.getFile @tailrec def nonWhitespaceInner(element: PsiElement, document: Document): PsiElement = { element match { case null => null case _: PsiWhiteSpace if document.getLineNumber(element.getTextRange.getEndOffset) == position.getLine => val nextElement = file.findElementAt(element.getTextRange.getEndOffset) nonWhitespaceInner(nextElement, document) case _ => element } } if (!file.isInstanceOf[ScalaFile]) null else { val firstElement = file.findElementAt(position.getOffset) try { val document = PsiDocumentManager.getInstance(file.getProject).getDocument(file) nonWhitespaceInner(firstElement, document) } catch { case c: ControlFlowException => throw c case _: Throwable => firstElement } } } private def calcPosition(file: PsiFile, location: Location, lineNumber: Int): Option[SourcePosition] = { throwIfNotScalaFile(file) def isDefaultArgument(method: Method) = { val methodName = method.name() val lastDollar = methodName.lastIndexOf("$") if (lastDollar >= 0) { val (start, index) = methodName.splitAt(lastDollar + 1) (start.endsWith("$default$"), index) } else (false, "") } def findDefaultArg(possiblePositions: Seq[PsiElement], defaultArgIndex: String) : Option[PsiElement] = { try { val paramNumber = defaultArgIndex.toInt - 1 possiblePositions.find { e => val scParameters = PsiTreeUtil.getParentOfType(e, classOf[ScParameters]) if (scParameters != null) { val param = scParameters.params(paramNumber) param.isDefaultParam && param.isAncestorOf(e) } else false } } catch { case c: ControlFlowException => throw c case _: Exception => None } } def calcElement(): Option[PsiElement] = { val possiblePositions = positionsOnLine(file, lineNumber) val currentMethod = location.method() lazy val (isDefaultArg, defaultArgIndex) = isDefaultArgument(currentMethod) def findPsiElementForIndyLambda(): Option[PsiElement] = { val lambdas = lambdasOnLine(file, lineNumber) val methods = indyLambdaMethodsOnLine(location.declaringType(), lineNumber) val methodsToLambdas = methods.zip(lambdas).toMap methodsToLambdas.get(currentMethod) } def functionExprBody(element: PsiElement): PsiElement = element match { case ScFunctionExpr(_, Some(body)) => body case _ => element } if (possiblePositions.size <= 1) { possiblePositions.headOption } else if (isIndyLambda(currentMethod)) { findPsiElementForIndyLambda().map(functionExprBody) } else if (isDefaultArg) { findDefaultArg(possiblePositions, defaultArgIndex) } else if (!isAnonfun(currentMethod)) { possiblePositions.find { case e: PsiElement if isLambda(e) => false case (_: ScExpression) childOf (_: ScParameter) => false case _ => true } } else { val generatingPsiElem = findElementByReferenceType(location.declaringType()) possiblePositions .find(p => generatingPsiElem.contains(findGeneratingClassOrMethodParent(p))) .map(functionExprBody) } } calcElement().filter(_.isValid).map(SourcePosition.createFromElement) } @Nullable private def getPsiFileByReferenceType(project: Project, refType: ReferenceType): PsiFile = { if (refType == null) return null if (refTypeToFileCache.contains(refType)) return refTypeToFileCache(refType) def findFile() = { def withDollarTestName(originalQName: String): Option[String] = { val dollarTestSuffix = "$Test" //See SCL-9340 if (originalQName.endsWith(dollarTestSuffix)) Some(originalQName) else if (originalQName.contains(dollarTestSuffix + "$")) { val index = originalQName.indexOf(dollarTestSuffix) + dollarTestSuffix.length Some(originalQName.take(index)) } else None } def topLevelClassName(originalQName: String): String = { if (originalQName.endsWith(packageSuffix)) originalQName else originalQName.replace(packageSuffix, ".").takeWhile(_ != '$') } def tryToFindClass(name: String) = { val classes = findClassesByQName(name, debugProcessScope, fallbackToProjectScope = true) classes.find(!_.isInstanceOf[ScObject]) .orElse(classes.headOption) } val originalQName = NameTransformer.decode(nonLambdaName(refType)) if (originalQName.endsWith("$package$")) findFileWithTopLevelMembers(debugProcessScope, originalQName).orNull else { val clazz = withDollarTestName(originalQName) .flatMap(tryToFindClass) .orElse(tryToFindClass(topLevelClassName(originalQName))) clazz.map(_.getNavigationElement.getContainingFile).orNull } } val file = inReadAction(findFile()) if (file != null && refType.methods().asScala.exists(isIndyLambda)) { isCompiledWithIndyLambdasCache.put(file, true) } refTypeToFileCache.put(refType, file) file } private def nameMatches(elem: PsiElement, refType: ReferenceType): Boolean = { val pattern = NamePattern.forElement(elem) pattern != null && pattern.matches(refType) } private def checkForIndyLambdas(refType: ReferenceType) = { if (!refTypeToFileCache.contains(refType)) { getPsiFileByReferenceType(debugProcess.getProject, refType) } } def findElementByReferenceType(refType: ReferenceType): Option[PsiElement] = { refTypeToElementCache.get(refType) match { case Some(Some(p)) if p.getElement != null => Some(p.getElement) case Some(Some(_)) | None => val found = findElementByReferenceTypeInner(refType) refTypeToElementCache.update(refType, found.map { element => implicit val manager: SmartPointerManager = SmartPointerManager.getInstance(debugProcess.getProject) element.createSmartPointer }) found case Some(None) => None } } private def findElementByReferenceTypeInner(refType: ReferenceType): Option[PsiElement] = { val byName = findPsiClassByQName(refType, debugProcessScope) orElse findByShortName(refType) if (byName.isDefined) return byName val project = debugProcess.getProject val allLocations = Try(refType.allLineLocations().asScala).getOrElse(Seq.empty) val refTypeLineNumbers = allLocations.map(checkedLineNumber).filter(_ > 0) if (refTypeLineNumbers.isEmpty) return None val firstRefTypeLine = refTypeLineNumbers.min val lastRefTypeLine = refTypeLineNumbers.max val refTypeLines = firstRefTypeLine to lastRefTypeLine val file = getPsiFileByReferenceType(project, refType) if (!checkScalaFile(file)) return None val document = PsiDocumentManager.getInstance(project).getDocument(file) if (document == null) return None def elementLineRange(elem: PsiElement, document: Document) = { val startLine = document.getLineNumber(elem.getTextRange.getStartOffset) val endLine = document.getLineNumber(elem.getTextRange.getEndOffset) startLine to endLine } def checkLines(elem: PsiElement, document: Document) = { val lineRange = elementLineRange(elem, document) //intersection, very loose check because sometimes first line for <init> method is after range of the class firstRefTypeLine <= lineRange.end && lastRefTypeLine >= lineRange.start } def isAppropriateCandidate(elem: PsiElement) = { checkLines(elem, document) && ScalaEvaluatorBuilderUtil.isGenerateClass(elem) && nameMatches(elem, refType) } def findCandidates(): Seq[PsiElement] = { def findAt(offset: Int): Option[PsiElement] = { val startElem = file.findElementAt(offset) startElem.parentsInFile.find(isAppropriateCandidate) } if (lastRefTypeLine - firstRefTypeLine >= 2 && firstRefTypeLine + 1 <= document.getLineCount - 1) { val offsetsInTheMiddle = Seq( document.getLineEndOffset(firstRefTypeLine), document.getLineEndOffset(firstRefTypeLine + 1) ) offsetsInTheMiddle.flatMap(findAt).distinct } else { val firstLinePositions = positionsOnLine(file, firstRefTypeLine) val allPositions = if (firstRefTypeLine == lastRefTypeLine) firstLinePositions else firstLinePositions ++ positionsOnLine(file, lastRefTypeLine) allPositions.distinct.filter(isAppropriateCandidate) } } def filterWithSignature(candidates: Seq[PsiElement]): Seq[PsiElement] = { val applySignature = refType.methodsByName("apply").asScala.find(m => !m.isSynthetic).map(_.signature()) if (applySignature.isEmpty) candidates else { candidates.filter(l => applySignature == lambdaJVMSignature(l)) } } val candidates = findCandidates() if (candidates.size <= 1) return candidates.headOption if (refTypeLines.size > 1) { val withExactlySameLines = candidates.filter(elementLineRange(_, document) == refTypeLines) if (withExactlySameLines.size == 1) return withExactlySameLines.headOption } if (candidates.exists(!isLambda(_))) return candidates.headOption val filteredWithSignature = filterWithSignature(candidates) if (filteredWithSignature.size == 1) return filteredWithSignature.headOption val byContainingClasses = filteredWithSignature.groupBy(c => findGeneratingClassOrMethodParent(c.getParent)) if (byContainingClasses.size > 1) { findContainingClass(refType) match { case Some(e) => return byContainingClasses.get(e).flatMap(_.headOption) case None => } } filteredWithSignature.headOption } private def findByShortName(refType: ReferenceType): Option[PsiClass] = { val project = debugProcess.getProject if (DumbService.getInstance(project).isDumb) return None lazy val sourceName = cachedSourceName(refType).getOrElse("") def sameFileName(elem: PsiElement) = { val containingFile = elem.getContainingFile containingFile != null && containingFile.name == sourceName } val originalQName = NameTransformer.decode(refType.name) val withoutSuffix = if (originalQName.endsWith(packageSuffix)) originalQName else originalQName.replace(packageSuffix, ".").stripSuffix("$").stripSuffix("$class") val lastDollar = withoutSuffix.lastIndexOf('$') val lastDot = withoutSuffix.lastIndexOf('.') val index = Seq(lastDollar, lastDot, 0).max + 1 val name = withoutSuffix.drop(index) val isScalaObject = originalQName.endsWith("$") val cacheManager = ScalaShortNamesCacheManager.getInstance(project) val classes = cacheManager.getClassesByName(name, GlobalSearchScope.allScope(project)).toSeq val inSameFile = classes.filter(c => c.isValid && sameFileName(c)) if (inSameFile.length == 1) classes.headOption else if (inSameFile.length >= 2) { if (isScalaObject) inSameFile.find(_.isInstanceOf[ScObject]) else inSameFile.find(!_.isInstanceOf[ScObject]) } else None } private def findContainingClass(refType: ReferenceType): Option[PsiElement] = { def classesByName(s: String) = { val vm = debugProcess.getVirtualMachineProxy vm.classesByName(s) } val fullName = refType.name() val containingClassName = DebuggerUtilsEx.getLambdaBaseClassName(fullName) match { case baseClassName: String => Some(baseClassName) case null => val decoded = NameTransformer.decode(fullName) val index = decoded.lastIndexOf("$$") if (index < 0) None else Some(NameTransformer.encode(decoded.substring(0, index))) } for { name <- containingClassName clazz <- classesByName(name).asScala.headOption elem <- findElementByReferenceType(clazz) } yield elem } private def nonLambdaName(refType: ReferenceType): String = { val fullName = refType.name() //typeName can be SomeClass$$Lambda$1.1836643189 DebuggerUtilsEx.getLambdaBaseClassName(fullName) match { case null => fullName case name => name } } /** * Retrieve potentially nested classes currently loaded to VM just by iterating all classes and taking into account * the name mangling - instead of using VirtualMachineProxy's nestedTypes method (with caches etc.). */ private def getNestedClasses(outerClasses: collection.Seq[ReferenceType]) = { for { outer <- outerClasses nested <- debugProcess.getVirtualMachineProxy.allClasses().asScala if outer != nested && extractOuterTypeName(nested.name) == outer.name } yield nested } private def extractOuterTypeName(typeName: String) = typeName match { case outerAndNestedTypePartsPattern(outerTypeName, _) => outerTypeName } } object ScalaPositionManager { private val SCRIPT_HOLDER_CLASS_NAME: String = "Main$$anon$1" private val delayedInitBody = "delayedInit$body" private val isCompiledWithIndyLambdasCache = mutable.HashMap[PsiFile, Boolean]() private val instances = mutable.HashMap[DebugProcess, ScalaPositionManager]() private def cacheInstance(scPosManager: ScalaPositionManager): Unit = { val debugProcess = scPosManager.debugProcess instances.put(debugProcess, scPosManager) debugProcess.addDebugProcessListener(new DebugProcessListener { override def processDetached(process: DebugProcess, closedByUser: Boolean): Unit = { ScalaPositionManager.instances.remove(process) debugProcess.removeDebugProcessListener(this) } }) } def instance(vm: VirtualMachine): Option[ScalaPositionManager] = instances.collectFirst { case (process, manager) if getVM(process).contains(vm) => manager } def instance(debugProcess: DebugProcess): Option[ScalaPositionManager] = instances.get(debugProcess) def instance(mirror: Mirror): Option[ScalaPositionManager] = instance(mirror.virtualMachine()) private def getVM(debugProcess: DebugProcess) = { if (!DebuggerManagerThreadImpl.isManagerThread) None else { debugProcess.getVirtualMachineProxy match { case impl: VirtualMachineProxyImpl => Option(impl.getVirtualMachine) case _ => None } } } def positionsOnLine(file: PsiFile, lineNumber: Int): Seq[PsiElement] = { //stored in `file`, invalidated on `file` change @CachedInUserData(file, file) def cachedMap: ConcurrentIntObjectMap[Seq[PsiElement]] = ConcurrentCollectionFactory.createConcurrentIntObjectMap() if (lineNumber < 0) return Seq.empty val scFile: ScalaFile = file match { case sf: ScalaFile => sf case _ => return Seq.empty } val map = cachedMap Option(map.get(lineNumber)) .getOrElse(map.cacheOrGet(lineNumber, positionsOnLineInner(scFile, lineNumber))) } def checkedLineNumber(location: Location): Int = try location.lineNumber() - 1 catch {case _: InternalError => -1} def cachedSourceName(refType: ReferenceType): Option[String] = { ScalaPositionManager.instance(refType).map(_.caches).flatMap(_.cachedSourceName(refType)) } private def positionsOnLineInner(file: ScalaFile, lineNumber: Int): Seq[PsiElement] = { inReadAction { val document = PsiDocumentManager.getInstance(file.getProject).getDocument(file) if (document == null || lineNumber >= document.getLineCount) return Seq.empty val startLine = document.getLineStartOffset(lineNumber) val endLine = document.getLineEndOffset(lineNumber) def elementsOnTheLine(file: ScalaFile, lineNumber: Int): Seq[PsiElement] = { val builder = ArraySeq.newBuilder[PsiElement] var elem = file.findElementAt(startLine) while (elem != null && elem.getTextOffset <= endLine) { elem match { case ChildOf(_: ScUnitExpr) | ChildOf(ScBlock()) => builder += elem case ElementType(t) if ScalaTokenTypes.WHITES_SPACES_AND_COMMENTS_TOKEN_SET.contains(t) || ScalaTokenTypes.ANY_BRACKETS_TOKEN_SET.contains(t) => case _ => builder += elem } elem = PsiTreeUtil.nextLeaf(elem, true) } builder.result() } def findParent(element: PsiElement): Option[PsiElement] = { val parentsOnTheLine = element.withParentsInFile.takeWhile(e => e.getTextOffset > startLine).toIndexedSeq val anon = parentsOnTheLine.collectFirst { case e if isLambda(e) => e case newTd: ScNewTemplateDefinition if generatesAnonClass(newTd) => newTd } val filteredParents = parentsOnTheLine.reverse.filter { case _: ScExpression => true case _: ScConstructorPattern | _: ScInfixPattern | _: ScBindingPattern => true case callRefId childOf ((ref: ScReferenceExpression) childOf (_: ScMethodCall)) if ref.nameId == callRefId && ref.getTextRange.getStartOffset < startLine => true case _: ScTypeDefinition => true case _ => false } val maxExpressionPatternOrTypeDef = filteredParents.find(!_.isInstanceOf[ScBlock]).orElse(filteredParents.headOption) Seq(anon, maxExpressionPatternOrTypeDef).flatten.sortBy(_.getTextLength).headOption } elementsOnTheLine(file, lineNumber).flatMap(findParent).distinct } } def isLambda(element: PsiElement): Boolean = { ScalaEvaluatorBuilderUtil.isGenerateAnonfun211(element) && !isInsideMacro(element) } def lambdasOnLine(file: PsiFile, lineNumber: Int): Seq[PsiElement] = { positionsOnLine(file, lineNumber).filter(isLambda) } def isIndyLambda(m: Method): Boolean = { val name = m.name() name.contains("$anonfun$") && name.charAt(name.length - 1).isDigit } def isAnonfunType(refType: ReferenceType): Boolean = { refType match { case ct: ClassType => val supClass = ct.superclass() supClass != null && supClass.name().startsWith("scala.runtime.AbstractFunction") case _ => false } } def isAnonfun(m: Method): Boolean = { isIndyLambda(m) || m.name.startsWith("apply") && isAnonfunType(m.declaringType()) } def indyLambdaMethodsOnLine(refType: ReferenceType, lineNumber: Int): Seq[Method] = { def ordinal(m: Method) = { val name = m.name() val lastDollar = name.lastIndexOf('$') Try(name.substring(lastDollar + 1).toInt).getOrElse(-1) } val all = refType.methods().asScala.iterator.filter(isIndyLambda) val onLine = all.filter(m => Try(!m.locationsOfLine(lineNumber + 1).isEmpty).getOrElse(false)) onLine.toSeq.sortBy(ordinal) } def isCompiledWithIndyLambdas(file: PsiFile): Boolean = { if (file == null) false else { val originalFile = Option(file.getUserData(ScalaCompilingEvaluator.originalFileKey)).getOrElse(file) isCompiledWithIndyLambdasCache.getOrElse(originalFile, false) } } @tailrec def findGeneratingClassOrMethodParent(element: PsiElement): PsiElement = { element match { case null => null case f: ScalaFile => f case p: ScPackaging => p case elem if ScalaEvaluatorBuilderUtil.isGenerateClass(elem) || isLambda(elem) => elem case elem if isMacroCall(elem) => elem case elem => findGeneratingClassOrMethodParent(elem.getParent) } } private object InsideMacro { def unapply(elem: PsiElement): Option[ScMethodCall] = { elem.parentsInFile.collectFirst { case mc: ScMethodCall if isMacroCall(mc) => mc } } } def isInsideMacro(elem: PsiElement): Boolean = elem.parentsInFile.exists(isMacroCall) private def isMacroCall(elem: PsiElement): Boolean = elem match { case ScMethodCall(ResolvesTo(MacroDef(_)), _) => true case _ => false } object InsideAsync { def unapply(elem: PsiElement): Option[ScMethodCall] = elem match { case InsideMacro(call @ ScMethodCall(ref: ScReferenceExpression, _)) if ref.refName == "async" => Some(call) case _ => None } } def shouldSkip(location: Location, debugProcess: DebugProcess): Boolean = { ScalaPositionManager.instance(debugProcess).forall(_.shouldSkip(location)) } private def getSpecificNameForDebugger(td: ScTypeDefinition): String = { val name = td.getQualifiedNameForDebugger td match { case _: ScObject => s"$name$$" case _: ScTrait => s"$name$$class" case _ => name } } def isDelayedInit(cl: PsiClass): Boolean = cl match { case obj: ScObject => val manager: ScalaPsiManager = ScalaPsiManager.instance(obj.getProject) val clazz: PsiClass = manager.getCachedClass(obj.resolveScope, "scala.DelayedInit").orNull clazz != null && obj.isInheritor(clazz, true) case _ => false } private def isGeneratedClass(generatedClassName: String, refType: ReferenceType): Boolean = { if (generatedClassName == null) return false val name = refType.name() val index = name.lastIndexOf(generatedClassName) index >= 0 && { val suffix = name.substring(index + generatedClassName.length) //we need exact class, not possible lambdas inside //but local classes may have suffices like $1 !suffix.exists(_.isLetter) } } private class MyClassPrepareRequestor(position: SourcePosition, requestor: ClassPrepareRequestor) extends ClassPrepareRequestor { private val sourceFile = position.getFile private val sourceName = sourceFile.getName private def sourceNameOf(refType: ReferenceType): Option[String] = ScalaPositionManager.cachedSourceName(refType) override def processClassPrepare(debuggerProcess: DebugProcess, referenceType: ReferenceType): Unit = { val positionManager: CompoundPositionManager = debuggerProcess.asInstanceOf[DebugProcessImpl].getPositionManager if (!sourceNameOf(referenceType).contains(sourceName)) return if (positionManager.locationsOfLine(referenceType, position).size > 0) { requestor.processClassPrepare(debuggerProcess, referenceType) } else { val positionClasses: ju.List[ReferenceType] = positionManager.getAllClasses(position) if (positionClasses.contains(referenceType)) { requestor.processClassPrepare(debuggerProcess, referenceType) } } } } private class NamePattern(elem: PsiElement) { private val containingFile = elem.getContainingFile private val sourceName = containingFile.getName private val isGeneratedForCompilingEvaluator = containingFile.getUserData(ScalaCompilingEvaluator.classNameKey) != null private var compiledWithIndyLambdas = isCompiledWithIndyLambdas(containingFile) private val exactName: Option[String] = { elem match { case td: ScTypeDefinition if !isLocalClass(td) => Some(getSpecificNameForDebugger(td)) case _ => None } } private var classJVMNameParts: Seq[String] = _ private def computeClassJVMNameParts(elem: PsiElement): Seq[String] = { if (exactName.isDefined) Seq.empty else inReadAction { elem match { case InsideMacro(call) => computeClassJVMNameParts(call.getParent) case _ => val parts = elem.withParentsInFile.flatMap(partsFor) parts.toSeq.reverse } } } private def partsFor(elem: PsiElement): Seq[String] = { elem match { case o: ScObject if o.isPackageObject => Seq("package$") case td: ScTypeDefinition => Seq(ScalaNamesUtil.toJavaName(td.name)) case newTd: ScNewTemplateDefinition if generatesAnonClass(newTd) => Seq("$anon") case e if ScalaEvaluatorBuilderUtil.isGenerateClass(e) => partsForAnonfun(e) case _ => Seq.empty } } private def partsForAnonfun(elem: PsiElement): Seq[String] = { val anonfunCount = ScalaEvaluatorBuilderUtil.anonClassCount(elem) val lastParts = Seq.fill(anonfunCount - 1)(Seq("$apply", "$anonfun")).flatten val containingClass = findGeneratingClassOrMethodParent(elem.getParent) val owner = PsiTreeUtil.getParentOfType(elem, classOf[ScFunctionDefinition], classOf[ScTypeDefinition], classOf[ScPatternDefinition], classOf[ScVariableDefinition]) val firstParts = if (PsiTreeUtil.isAncestor(owner, containingClass, true)) Seq("$anonfun") else owner match { case fun: ScFunctionDefinition => val name = if (fun.name == "this") JVMNameUtil.CONSTRUCTOR_NAME else fun.name val encoded = NameTransformer.encode(name) Seq(s"$$$encoded", "$anonfun") case _ => Seq("$anonfun") } lastParts ++ firstParts } private def checkParts(name: String): Boolean = { var nameTail = name updateParts() for (part <- classJVMNameParts) { val index = nameTail.indexOf(part) if (index >= 0) { nameTail = nameTail.substring(index + part.length) } else return false } nameTail.indexOf("$anon") == -1 } def updateParts(): Unit = { val newValue = isCompiledWithIndyLambdas(containingFile) if (newValue != compiledWithIndyLambdas || classJVMNameParts == null) { compiledWithIndyLambdas = newValue classJVMNameParts = computeClassJVMNameParts(elem) } } def matches(refType: ReferenceType): Boolean = { val refTypeSourceName = cachedSourceName(refType).getOrElse("") if (refTypeSourceName != sourceName && !isGeneratedForCompilingEvaluator) return false val name = refType.name() exactName match { case Some(qName) => qName == name || qName.stripSuffix("$class") == name case None => checkParts(name) } } } private object NamePattern { def forElement(elem: PsiElement): NamePattern = { if (elem == null || !ScalaEvaluatorBuilderUtil.isGenerateClass(elem)) return null val cacheProvider = new CachedValueProvider[NamePattern] { override def compute(): Result[NamePattern] = Result.create(new NamePattern(elem), elem) } CachedValuesManager.getCachedValue(elem, cacheProvider) } } private[debugger] class ScalaPositionManagerCaches(debugProcess: DebugProcess) { debugProcess.addDebugProcessListener(new DebugProcessListener { override def processDetached(process: DebugProcess, closedByUser: Boolean): Unit = { clear() process.removeDebugProcessListener(this) } }) val refTypeToFileCache: mutable.HashMap[ReferenceType, PsiFile] = mutable.HashMap[ReferenceType, PsiFile]() val refTypeToElementCache: mutable.HashMap[ReferenceType, Option[SmartPsiElementPointer[PsiElement]]] = mutable.HashMap[ReferenceType, Option[SmartPsiElementPointer[PsiElement]]]() val customizedLocationsCache: mutable.HashMap[Location, Int] = mutable.HashMap[Location, Int]() val lineToCustomizedLocationCache: mutable.HashMap[(ReferenceType, Int), Seq[Location]] = mutable.HashMap[(ReferenceType, Int), Seq[Location]]() val seenRefTypes: mutable.Set[ReferenceType] = mutable.Set[ReferenceType]() val sourceNames: mutable.HashMap[ReferenceType, Option[String]] = mutable.HashMap[ReferenceType, Option[String]]() def cachedSourceName(refType: ReferenceType): Option[String] = sourceNames.getOrElseUpdate(refType, Try(refType.sourceName()).toOption) def clear(): Unit = { isCompiledWithIndyLambdasCache.clear() refTypeToFileCache.clear() refTypeToElementCache.clear() customizedLocationsCache.clear() lineToCustomizedLocationCache.clear() seenRefTypes.clear() sourceNames.clear() } } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/debugger/ScalaPositionManager.scala
Scala
apache-2.0
40,870
/******************************************************************************* * Copyright (c) 2019. Carl Minden * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. ******************************************************************************/ package com.anathema_roguelike package entities.characters.player.perks.abilities.potions class FleetFootedElixir() extends Elixir(null, null) { }
carlminden/anathema-roguelike
src/com/anathema_roguelike/entities/characters/player/perks/abilities/potions/FleetFootedElixir.scala
Scala
gpl-3.0
994
import sbt._ import Keys._ import play.twirl.sbt.SbtTwirl import play.twirl.sbt.Import.TwirlKeys object Resolvers { val typesafe = "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/" } object Dependencies { // for autodoc-sbt val playPlugin = "com.typesafe.play" %% "sbt-plugin" % System.getProperty("play.version", "2.4.0") // for autodoc-core val playTest = "com.typesafe.play" %% "play-test" % System.getProperty("play.version", "2.4.0") val scalaTest = "org.scalatest" %% "scalatest" % "2.2.5" % "test" val scalaTestPlus = "org.scalatestplus" %% "play" % "1.4.0-M2" % "test" } object BuildSettings { import xerial.sbt.Sonatype.SonatypeKeys.sonatypeProfileName import scala.Console.{CYAN, RESET} val buildSettings = com.typesafe.sbt.SbtScalariform.scalariformSettings ++ Seq( organization := "com.krrrr38", scalaVersion := "2.10.5", version := "0.2.1-SNAPSHOT", scalacOptions ++= ( "-deprecation" :: "-feature" :: "-unchecked" :: "-Xlint" :: Nil ), scalacOptions ++= { if (scalaVersion.value.startsWith("2.11")) Seq("-Ywarn-unused", "-Ywarn-unused-import") else Nil }, shellPrompt := { state => s"$CYAN${name.value}$RESET > " } ) val publishSettings = Seq( isSnapshot := false, sonatypeProfileName := "com.krrrr38", pomExtra := { <url>http://github.com/krrrr38/play-autodoc</url> <scm> <url>[email protected]:krrrr38/play-autodoc.git</url> <connection>scm:git:[email protected]:krrrr38/play-autodoc.git</connection> </scm> <licenses> <license> <name>MIT License</name> <url>http://www.opensource.org/licenses/mit-license.php</url> <distribution>repo</distribution> </license> </licenses> <developers> <developer> <id>krrrr38</id> <name>Ken Kaizu</name> <url>http://www.krrrr38.com</url> </developer> </developers> }, publishArtifact in Test := false, publishMavenStyle := true, pomIncludeRepository := { _ => false }, publishTo <<= version { (v: String) => val nexus = "https://oss.sonatype.org/" if (v.trim.endsWith("SNAPSHOT")) Some("snapshots" at nexus + "content/repositories/snapshots") else Some("releases" at nexus + "service/local/staging/deploy/maven2") } ) } object PlayAutodocBuild extends Build { import BuildSettings._ import Resolvers._ import Dependencies._ lazy val autodoc = Project( "play-autodoc-core", file("play-autodoc-core"), settings = buildSettings ++ publishSettings ++ Seq( name := "play-autodoc-core", description := "Generate documentation from your play application request tests.", crossScalaVersions := scalaVersion.value :: "2.11.6" :: Nil, resolvers += typesafe, libraryDependencies ++= Seq( playTest, scalaTest, scalaTestPlus ), TwirlKeys.templateImports ++= Seq( "com.krrrr38.play.autodoc.{ Request, Response }" ), TwirlKeys.templateFormats += ("md" -> "com.krrrr38.play.autodoc.twirl.MarkdownFormat") ) ).enablePlugins(SbtTwirl) lazy val autodocPlugin = Project( "play-autodoc-sbt", file("play-autodoc-sbt"), settings = buildSettings ++ publishSettings ++ Seq( name := "play-autodoc-sbt", description := "Generate documentation from your play application request tests.", sbtPlugin := true, resolvers += typesafe, addSbtPlugin(playPlugin), sourceGenerators in Compile <+= (version, sourceManaged in Compile) map Tasks.AutodocVersion ) ) lazy val root = Project( "root", file("."), settings = Defaults.coreDefaultSettings ++ Seq( shellPrompt := { status => "There are no contents on root project, see `projects` to change project\\n> " }, packagedArtifacts := Map.empty // prevent publishing ) ).aggregate(autodoc, autodocPlugin) } object Tasks { def AutodocVersion(version: String, dir: File): Seq[File] = { val file = dir / "AutodocVersion.scala" IO.write(file, """package com.krrrr38.play.autodoc | |object AutodocVersion { | val value = "%s" |} """.stripMargin.format(version)) Seq(file) } }
krrrr38/play-autodoc
project/Build.scala
Scala
mit
4,428
// Copyright (C) 2015, codejitsu. package net.codejitsu.tasks import java.net.URL import net.codejitsu.tasks.dsl._ /** * Download url. * * @param hosts hosts. * @param url url to download. * @param destinationPath destination path on hosts. * @param usingSudo true, if sudo needed. * @param usingPar true, if parallel execution required. * @param user user. */ final case class Download[S <: Stage](hosts: Hosts, url: URL, destinationPath: Option[String] = None, usingSudo: Boolean = false, usingPar: Boolean = false, exec: String = Download.getExec(), params: List[String] = List("-P"), verbose: VerbosityLevel = NoOutput)(implicit user: User, stage: S, rights: S Allow Download[S]) extends GenericTask("wget", "download url", hosts, exec, params ++ List(url.toString) ++ Download.getDestination(destinationPath), usingSudo, usingPar, taskRepr = s"download url '${url.toString}'", verbose = Option(verbose)) with UsingSudo[Download[S]] with UsingParallelExecution[Download[S]] { override def sudo: Download[S] = copy[S](usingSudo = true) override def par: Download[S] = copy[S](usingPar = true) } object Download { def getExec(): String = OS.getCurrentOs() match { case Linux => "/usr/bin/wget" case MacOS => "/usr/local/bin/wget" case _ => throw new IllegalArgumentException("Not supported OS") } def getDestination(destinationPath: Option[String]): List[String] = destinationPath.fold(List.empty[String])(d => List(d)) }
codejitsu/tasks
tasks-dsl/src/main/scala/net/codejitsu/tasks/Download.scala
Scala
apache-2.0
1,662
package com.sksamuel.avro4s import java.io.{ByteArrayOutputStream, File} import org.scalatest.{Matchers, WordSpec} case class Version1(string: String) case class Version2(string: String, int: Int = 3) class AvroBinaryTest extends WordSpec with Matchers { val tgtbtu = Score("The good, the bad and the ugly", "ennio", Rating(10000)) "AvroBinary" should { "be able to read its own output" in { val pepperoni = Pizza("pepperoni", Seq(Ingredient("pepperoni", 12, 4.4), Ingredient("onions", 1, 0.4)), false, false, 98) val file: File = new File("pizzas.avro.binary") val os = AvroOutputStream.binary[Pizza](file) os.write(pepperoni) os.close() val is = AvroInputStream.binary[Pizza](file) val pizzas = is.iterator.toList pizzas shouldBe List(pepperoni) is.close() file.delete() } "support value classes" in { val baos = new ByteArrayOutputStream() val output = AvroOutputStream.binary[Score](baos) output.write(tgtbtu) output.close() val is = AvroInputStream.binary[Score](baos.toByteArray) val pizzas = is.iterator.toList pizzas shouldBe List(tgtbtu) is.close() } "support schema evolution" in { val v1 = Version1("hello") val baos = new ByteArrayOutputStream() val output = AvroOutputStream.binary[Version1](baos) output.write(v1) output.close() val is = AvroInputStream.binary[Version2](baos.toByteArray, AvroSchema[Version1]) val v2 = is.iterator.toList.head is.close() v2.string shouldEqual v1.string v2.int shouldEqual 3 } } }
YuvalItzchakov/avro4s
avro4s-core/src/test/scala/com/sksamuel/avro4s/AvroBinaryTest.scala
Scala
mit
1,643
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import java.io.{File, IOException} import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong} import com.yammer.metrics.core.Gauge import kafka.api._ import kafka.cluster.{Partition, Replica} import kafka.common._ import kafka.controller.KafkaController import kafka.log.{LogAppendInfo, LogManager} import kafka.message.{ByteBufferMessageSet, InvalidMessageException, Message, MessageSet} import kafka.metrics.KafkaMetricsGroup import kafka.server.QuotaFactory.UnboundedQuota import kafka.utils._ import org.apache.kafka.common.errors.{ControllerMovedException, CorruptRecordException, InvalidTimestampException, InvalidTopicException, NotLeaderForPartitionException, OffsetOutOfRangeException, RecordBatchTooLargeException, RecordTooLargeException, ReplicaNotAvailableException, UnknownTopicOrPartitionException} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{LeaderAndIsrRequest, PartitionState, StopReplicaRequest, UpdateMetadataRequest} import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests.FetchRequest.PartitionData import org.apache.kafka.common.utils.{Time => JTime} import scala.collection._ import scala.collection.JavaConverters._ /* * Result metadata of a log append operation on the log */ case class LogAppendResult(info: LogAppendInfo, error: Option[Throwable] = None) { def errorCode = error match { case None => Errors.NONE.code case Some(e) => Errors.forException(e).code } } /* * Result metadata of a log read operation on the log * @param info @FetchDataInfo returned by the @Log read * @param hw high watermark of the local replica * @param readSize amount of data that was read from the log i.e. size of the fetch * @param isReadFromLogEnd true if the request read up to the log end offset snapshot * when the read was initiated, false otherwise * @param error Exception if error encountered while reading from the log */ case class LogReadResult(info: FetchDataInfo, hw: Long, readSize: Int, isReadFromLogEnd : Boolean, error: Option[Throwable] = None) { def errorCode = error match { case None => Errors.NONE.code case Some(e) => Errors.forException(e).code } override def toString = { "Fetch Data: [%s], HW: [%d], readSize: [%d], isReadFromLogEnd: [%b], error: [%s]" .format(info, hw, readSize, isReadFromLogEnd, error) } } object LogReadResult { val UnknownLogReadResult = LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, -1, false) } case class BecomeLeaderOrFollowerResult(responseMap: collection.Map[TopicPartition, Short], errorCode: Short) { override def toString = { "update results: [%s], global error: [%d]".format(responseMap, errorCode) } } object ReplicaManager { val HighWatermarkFilename = "replication-offset-checkpoint" val IsrChangePropagationBlackOut = 5000L val IsrChangePropagationInterval = 60000L } class ReplicaManager(val config: KafkaConfig, metrics: Metrics, time: Time, jTime: JTime, val zkUtils: ZkUtils, scheduler: Scheduler, val logManager: LogManager, val isShuttingDown: AtomicBoolean, quotaManager: ReplicationQuotaManager, threadNamePrefix: Option[String] = None) extends Logging with KafkaMetricsGroup { /* epoch of the controller that last changed the leader */ @volatile var controllerEpoch: Int = KafkaController.InitialControllerEpoch - 1 private val localBrokerId = config.brokerId private val allPartitions = new Pool[(String, Int), Partition](valueFactory = Some { case (t, p) => new Partition(t, p, time, this) }) private val replicaStateChangeLock = new Object val replicaFetcherManager = new ReplicaFetcherManager(config, this, metrics, jTime, threadNamePrefix, quotaManager) private val highWatermarkCheckPointThreadStarted = new AtomicBoolean(false) val highWatermarkCheckpoints = config.logDirs.map(dir => (new File(dir).getAbsolutePath, new OffsetCheckpoint(new File(dir, ReplicaManager.HighWatermarkFilename)))).toMap private var hwThreadInitialized = false this.logIdent = "[Replica Manager on Broker " + localBrokerId + "]: " val stateChangeLogger = KafkaController.stateChangeLogger private val isrChangeSet: mutable.Set[TopicAndPartition] = new mutable.HashSet[TopicAndPartition]() private val lastIsrChangeMs = new AtomicLong(System.currentTimeMillis()) private val lastIsrPropagationMs = new AtomicLong(System.currentTimeMillis()) val delayedProducePurgatory = DelayedOperationPurgatory[DelayedProduce]( purgatoryName = "Produce", config.brokerId, config.producerPurgatoryPurgeIntervalRequests) val delayedFetchPurgatory = DelayedOperationPurgatory[DelayedFetch]( purgatoryName = "Fetch", config.brokerId, config.fetchPurgatoryPurgeIntervalRequests) val leaderCount = newGauge( "LeaderCount", new Gauge[Int] { def value = { getLeaderPartitions().size } } ) val partitionCount = newGauge( "PartitionCount", new Gauge[Int] { def value = allPartitions.size } ) val underReplicatedPartitions = newGauge( "UnderReplicatedPartitions", new Gauge[Int] { def value = underReplicatedPartitionCount() } ) val isrExpandRate = newMeter("IsrExpandsPerSec", "expands", TimeUnit.SECONDS) val isrShrinkRate = newMeter("IsrShrinksPerSec", "shrinks", TimeUnit.SECONDS) def underReplicatedPartitionCount(): Int = { getLeaderPartitions().count(_.isUnderReplicated) } def startHighWaterMarksCheckPointThread() = { if(highWatermarkCheckPointThreadStarted.compareAndSet(false, true)) scheduler.schedule("highwatermark-checkpoint", checkpointHighWatermarks, period = config.replicaHighWatermarkCheckpointIntervalMs, unit = TimeUnit.MILLISECONDS) } def recordIsrChange(topicAndPartition: TopicAndPartition) { isrChangeSet synchronized { isrChangeSet += topicAndPartition lastIsrChangeMs.set(System.currentTimeMillis()) } } /** * This function periodically runs to see if ISR needs to be propagated. It propagates ISR when: * 1. There is ISR change not propagated yet. * 2. There is no ISR Change in the last five seconds, or it has been more than 60 seconds since the last ISR propagation. * This allows an occasional ISR change to be propagated within a few seconds, and avoids overwhelming controller and * other brokers when large amount of ISR change occurs. */ def maybePropagateIsrChanges() { val now = System.currentTimeMillis() isrChangeSet synchronized { if (isrChangeSet.nonEmpty && (lastIsrChangeMs.get() + ReplicaManager.IsrChangePropagationBlackOut < now || lastIsrPropagationMs.get() + ReplicaManager.IsrChangePropagationInterval < now)) { ReplicationUtils.propagateIsrChanges(zkUtils, isrChangeSet) isrChangeSet.clear() lastIsrPropagationMs.set(now) } } } /** * Try to complete some delayed produce requests with the request key; * this can be triggered when: * * 1. The partition HW has changed (for acks = -1) * 2. A follower replica's fetch operation is received (for acks > 1) */ def tryCompleteDelayedProduce(key: DelayedOperationKey) { val completed = delayedProducePurgatory.checkAndComplete(key) debug("Request key %s unblocked %d producer requests.".format(key.keyLabel, completed)) } /** * Try to complete some delayed fetch requests with the request key; * this can be triggered when: * * 1. The partition HW has changed (for regular fetch) * 2. A new message set is appended to the local log (for follower fetch) */ def tryCompleteDelayedFetch(key: DelayedOperationKey) { val completed = delayedFetchPurgatory.checkAndComplete(key) debug("Request key %s unblocked %d fetch requests.".format(key.keyLabel, completed)) } def startup() { // start ISR expiration thread scheduler.schedule("isr-expiration", maybeShrinkIsr, period = config.replicaLagTimeMaxMs, unit = TimeUnit.MILLISECONDS) scheduler.schedule("isr-change-propagation", maybePropagateIsrChanges, period = 2500L, unit = TimeUnit.MILLISECONDS) } def stopReplica(topic: String, partitionId: Int, deletePartition: Boolean): Short = { stateChangeLogger.trace("Broker %d handling stop replica (delete=%s) for partition [%s,%d]".format(localBrokerId, deletePartition.toString, topic, partitionId)) val errorCode = Errors.NONE.code getPartition(topic, partitionId) match { case Some(_) => if (deletePartition) { val removedPartition = allPartitions.remove((topic, partitionId)) if (removedPartition != null) { removedPartition.delete() // this will delete the local log val topicHasPartitions = allPartitions.keys.exists { case (t, _) => topic == t } if (!topicHasPartitions) BrokerTopicStats.removeMetrics(topic) } } case None => // Delete log and corresponding folders in case replica manager doesn't hold them anymore. // This could happen when topic is being deleted while broker is down and recovers. if (deletePartition) { val topicAndPartition = TopicAndPartition(topic, partitionId) if(logManager.getLog(topicAndPartition).isDefined) { logManager.deleteLog(topicAndPartition) } } stateChangeLogger.trace("Broker %d ignoring stop replica (delete=%s) for partition [%s,%d] as replica doesn't exist on broker" .format(localBrokerId, deletePartition, topic, partitionId)) } stateChangeLogger.trace("Broker %d finished handling stop replica (delete=%s) for partition [%s,%d]" .format(localBrokerId, deletePartition, topic, partitionId)) errorCode } def stopReplicas(stopReplicaRequest: StopReplicaRequest): (mutable.Map[TopicPartition, Short], Short) = { replicaStateChangeLock synchronized { val responseMap = new collection.mutable.HashMap[TopicPartition, Short] if(stopReplicaRequest.controllerEpoch() < controllerEpoch) { stateChangeLogger.warn("Broker %d received stop replica request from an old controller epoch %d. Latest known controller epoch is %d" .format(localBrokerId, stopReplicaRequest.controllerEpoch, controllerEpoch)) (responseMap, Errors.STALE_CONTROLLER_EPOCH.code) } else { val partitions = stopReplicaRequest.partitions.asScala controllerEpoch = stopReplicaRequest.controllerEpoch // First stop fetchers for all partitions, then stop the corresponding replicas replicaFetcherManager.removeFetcherForPartitions(partitions) for (topicPartition <- partitions){ val errorCode = stopReplica(topicPartition.topic, topicPartition.partition, stopReplicaRequest.deletePartitions) responseMap.put(topicPartition, errorCode) } (responseMap, Errors.NONE.code) } } } def getOrCreatePartition(topic: String, partitionId: Int): Partition = { allPartitions.getAndMaybePut((topic, partitionId)) } def getPartition(topic: String, partitionId: Int): Option[Partition] = { val partition = allPartitions.get((topic, partitionId)) if (partition == null) None else Some(partition) } def getReplicaOrException(topic: String, partition: Int): Replica = { val replicaOpt = getReplica(topic, partition) if(replicaOpt.isDefined) replicaOpt.get else throw new ReplicaNotAvailableException("Replica %d is not available for partition [%s,%d]".format(config.brokerId, topic, partition)) } def getLeaderReplicaIfLocal(topic: String, partitionId: Int): Replica = { val partitionOpt = getPartition(topic, partitionId) partitionOpt match { case None => throw new UnknownTopicOrPartitionException("Partition [%s,%d] doesn't exist on %d".format(topic, partitionId, config.brokerId)) case Some(partition) => partition.leaderReplicaIfLocal match { case Some(leaderReplica) => leaderReplica case None => throw new NotLeaderForPartitionException("Leader not local for partition [%s,%d] on broker %d" .format(topic, partitionId, config.brokerId)) } } } def getReplica(topic: String, partitionId: Int, replicaId: Int = config.brokerId): Option[Replica] = { val partitionOpt = getPartition(topic, partitionId) partitionOpt match { case None => None case Some(partition) => partition.getReplica(replicaId) } } /** * Append messages to leader replicas of the partition, and wait for them to be replicated to other replicas; * the callback function will be triggered either when timeout or the required acks are satisfied */ def appendMessages(timeout: Long, requiredAcks: Short, internalTopicsAllowed: Boolean, messagesPerPartition: Map[TopicPartition, MessageSet], responseCallback: Map[TopicPartition, PartitionResponse] => Unit) { if (isValidRequiredAcks(requiredAcks)) { val sTime = SystemTime.milliseconds val localProduceResults = appendToLocalLog(internalTopicsAllowed, messagesPerPartition, requiredAcks) debug("Produce to local log in %d ms".format(SystemTime.milliseconds - sTime)) val produceStatus = localProduceResults.map { case (topicPartition, result) => topicPartition -> ProducePartitionStatus( result.info.lastOffset + 1, // required offset new PartitionResponse(result.errorCode, result.info.firstOffset, result.info.logAppendTime)) // response status } if (delayedRequestRequired(requiredAcks, messagesPerPartition, localProduceResults)) { // create delayed produce operation val produceMetadata = ProduceMetadata(requiredAcks, produceStatus) val delayedProduce = new DelayedProduce(timeout, produceMetadata, this, responseCallback) // create a list of (topic, partition) pairs to use as keys for this delayed produce operation val producerRequestKeys = messagesPerPartition.keys.map(new TopicPartitionOperationKey(_)).toSeq // try to complete the request immediately, otherwise put it into the purgatory // this is because while the delayed produce operation is being created, new // requests may arrive and hence make this operation completable. delayedProducePurgatory.tryCompleteElseWatch(delayedProduce, producerRequestKeys) } else { // we can respond immediately val produceResponseStatus = produceStatus.mapValues(status => status.responseStatus) responseCallback(produceResponseStatus) } } else { // If required.acks is outside accepted range, something is wrong with the client // Just return an error and don't handle the request at all val responseStatus = messagesPerPartition.map { case (topicAndPartition, _) => topicAndPartition -> new PartitionResponse(Errors.INVALID_REQUIRED_ACKS.code, LogAppendInfo.UnknownLogAppendInfo.firstOffset, Message.NoTimestamp) } responseCallback(responseStatus) } } // If all the following conditions are true, we need to put a delayed produce request and wait for replication to complete // // 1. required acks = -1 // 2. there is data to append // 3. at least one partition append was successful (fewer errors than partitions) private def delayedRequestRequired(requiredAcks: Short, messagesPerPartition: Map[TopicPartition, MessageSet], localProduceResults: Map[TopicPartition, LogAppendResult]): Boolean = { requiredAcks == -1 && messagesPerPartition.nonEmpty && localProduceResults.values.count(_.error.isDefined) < messagesPerPartition.size } private def isValidRequiredAcks(requiredAcks: Short): Boolean = { requiredAcks == -1 || requiredAcks == 1 || requiredAcks == 0 } /** * Append the messages to the local replica logs */ private def appendToLocalLog(internalTopicsAllowed: Boolean, messagesPerPartition: Map[TopicPartition, MessageSet], requiredAcks: Short): Map[TopicPartition, LogAppendResult] = { trace("Append [%s] to local log ".format(messagesPerPartition)) messagesPerPartition.map { case (topicPartition, messages) => BrokerTopicStats.getBrokerTopicStats(topicPartition.topic).totalProduceRequestRate.mark() BrokerTopicStats.getBrokerAllTopicsStats().totalProduceRequestRate.mark() // reject appending to internal topics if it is not allowed if (Topic.isInternal(topicPartition.topic) && !internalTopicsAllowed) { (topicPartition, LogAppendResult( LogAppendInfo.UnknownLogAppendInfo, Some(new InvalidTopicException("Cannot append to internal topic %s".format(topicPartition.topic))))) } else { try { val partitionOpt = getPartition(topicPartition.topic, topicPartition.partition) val info = partitionOpt match { case Some(partition) => partition.appendMessagesToLeader(messages.asInstanceOf[ByteBufferMessageSet], requiredAcks) case None => throw new UnknownTopicOrPartitionException("Partition %s doesn't exist on %d" .format(topicPartition, localBrokerId)) } val numAppendedMessages = if (info.firstOffset == -1L || info.lastOffset == -1L) 0 else info.lastOffset - info.firstOffset + 1 // update stats for successfully appended bytes and messages as bytesInRate and messageInRate BrokerTopicStats.getBrokerTopicStats(topicPartition.topic).bytesInRate.mark(messages.sizeInBytes) BrokerTopicStats.getBrokerAllTopicsStats.bytesInRate.mark(messages.sizeInBytes) BrokerTopicStats.getBrokerTopicStats(topicPartition.topic).messagesInRate.mark(numAppendedMessages) BrokerTopicStats.getBrokerAllTopicsStats.messagesInRate.mark(numAppendedMessages) trace("%d bytes written to log %s-%d beginning at offset %d and ending at offset %d" .format(messages.sizeInBytes, topicPartition.topic, topicPartition.partition, info.firstOffset, info.lastOffset)) (topicPartition, LogAppendResult(info)) } catch { // NOTE: Failed produce requests metric is not incremented for known exceptions // it is supposed to indicate un-expected failures of a broker in handling a produce request case e: KafkaStorageException => fatal("Halting due to unrecoverable I/O error while handling produce request: ", e) Runtime.getRuntime.halt(1) (topicPartition, null) case e@ (_: UnknownTopicOrPartitionException | _: NotLeaderForPartitionException | _: RecordTooLargeException | _: RecordBatchTooLargeException | _: CorruptRecordException | _: InvalidMessageException | _: InvalidTimestampException) => (topicPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(e))) case t: Throwable => BrokerTopicStats.getBrokerTopicStats(topicPartition.topic).failedProduceRequestRate.mark() BrokerTopicStats.getBrokerAllTopicsStats.failedProduceRequestRate.mark() error("Error processing append operation on partition %s".format(topicPartition), t) (topicPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(t))) } } } } /** * Fetch messages from the leader replica, and wait until enough data can be fetched and return; * the callback function will be triggered either when timeout or required fetch info is satisfied */ def fetchMessages(timeout: Long, replicaId: Int, fetchMinBytes: Int, fetchMaxBytes: Int, hardMaxBytesLimit: Boolean, fetchInfos: Seq[(TopicPartition, PartitionData)], quota: ReplicaQuota = UnboundedQuota, responseCallback: Seq[(TopicAndPartition, FetchResponsePartitionData)] => Unit) { val isFromFollower = replicaId >= 0 val fetchOnlyFromLeader: Boolean = replicaId != Request.DebuggingConsumerId val fetchOnlyCommitted: Boolean = ! Request.isValidBrokerId(replicaId) // read from local logs val logReadResults = readFromLocalLog( replicaId = replicaId, fetchOnlyFromLeader = fetchOnlyFromLeader, readOnlyCommitted = fetchOnlyCommitted, fetchMaxBytes = fetchMaxBytes, hardMaxBytesLimit = hardMaxBytesLimit, readPartitionInfo = fetchInfos, quota = quota) // if the fetch comes from the follower, // update its corresponding log end offset if(Request.isValidBrokerId(replicaId)) updateFollowerLogReadResults(replicaId, logReadResults) // check if this fetch request can be satisfied right away val logReadResultValues = logReadResults.map { case (_, v) => v } val bytesReadable = logReadResultValues.map(_.info.messageSet.sizeInBytes).sum val errorReadingData = logReadResultValues.foldLeft(false) ((errorIncurred, readResult) => errorIncurred || (readResult.errorCode != Errors.NONE.code)) // respond immediately if 1) fetch request does not want to wait // 2) fetch request does not require any data // 3) has enough data to respond // 4) some error happens while reading data if (timeout <= 0 || fetchInfos.isEmpty || bytesReadable >= fetchMinBytes || errorReadingData) { val fetchPartitionData = logReadResults.map { case (tp, result) => tp -> FetchResponsePartitionData(result.errorCode, result.hw, result.info.messageSet) } responseCallback(fetchPartitionData) } else { // construct the fetch results from the read results val fetchPartitionStatus = logReadResults.map { case (topicAndPartition, result) => val fetchInfo = fetchInfos.collectFirst { case (tp, v) if TopicAndPartition(tp.topic, tp.partition) == topicAndPartition => v }.getOrElse(sys.error(s"Partition $topicAndPartition not found in fetchInfos")) (topicAndPartition, FetchPartitionStatus(result.info.fetchOffsetMetadata, fetchInfo)) } val fetchMetadata = FetchMetadata(fetchMinBytes, fetchMaxBytes, hardMaxBytesLimit, fetchOnlyFromLeader, fetchOnlyCommitted, isFromFollower, replicaId, fetchPartitionStatus) val delayedFetch = new DelayedFetch(timeout, fetchMetadata, this, quota, responseCallback) // create a list of (topic, partition) pairs to use as keys for this delayed fetch operation val delayedFetchKeys = fetchPartitionStatus.map { case (tp, _) => new TopicPartitionOperationKey(tp) } // try to complete the request immediately, otherwise put it into the purgatory; // this is because while the delayed fetch operation is being created, new requests // may arrive and hence make this operation completable. delayedFetchPurgatory.tryCompleteElseWatch(delayedFetch, delayedFetchKeys) } } /** * Read from multiple topic partitions at the given offset up to maxSize bytes */ def readFromLocalLog(replicaId: Int, fetchOnlyFromLeader: Boolean, readOnlyCommitted: Boolean, fetchMaxBytes: Int, hardMaxBytesLimit: Boolean, readPartitionInfo: Seq[(TopicPartition, PartitionData)], quota: ReplicaQuota): Seq[(TopicAndPartition, LogReadResult)] = { def read(tp: TopicPartition, fetchInfo: PartitionData, limitBytes: Int, minOneMessage: Boolean): LogReadResult = { val topic = tp.topic val partition = tp.partition val offset = fetchInfo.offset val partitionFetchSize = fetchInfo.maxBytes BrokerTopicStats.getBrokerTopicStats(topic).totalFetchRequestRate.mark() BrokerTopicStats.getBrokerAllTopicsStats().totalFetchRequestRate.mark() try { trace(s"Fetching log segment for partition $tp, offset $offset, partition fetch size $partitionFetchSize, " + s"remaining response limit $limitBytes" + (if (minOneMessage) s", ignoring response/partition size limits" else "")) // decide whether to only fetch from leader val localReplica = if (fetchOnlyFromLeader) getLeaderReplicaIfLocal(topic, partition) else getReplicaOrException(topic, partition) // decide whether to only fetch committed data (i.e. messages below high watermark) val maxOffsetOpt = if (readOnlyCommitted) Some(localReplica.highWatermark.messageOffset) else None /* Read the LogOffsetMetadata prior to performing the read from the log. * We use the LogOffsetMetadata to determine if a particular replica is in-sync or not. * Using the log end offset after performing the read can lead to a race condition * where data gets appended to the log immediately after the replica has consumed from it * This can cause a replica to always be out of sync. */ val initialLogEndOffset = localReplica.logEndOffset val logReadInfo = localReplica.log match { case Some(log) => val adjustedFetchSize = math.min(partitionFetchSize, limitBytes) // Try the read first, this tells us whether we need all of adjustedFetchSize for this partition val fetch = log.read(offset, adjustedFetchSize, maxOffsetOpt, minOneMessage) // If the partition is being throttled, simply return an empty set. if (shouldLeaderThrottle(quota, TopicAndPartition(tp.topic, tp.partition), replicaId)) FetchDataInfo(fetch.fetchOffsetMetadata, MessageSet.Empty) // For FetchRequest version 3, we replace incomplete message sets with an empty one as consumers can make // progress in such cases and don't need to report a `RecordTooLargeException` else if (!hardMaxBytesLimit && fetch.firstMessageSetIncomplete) FetchDataInfo(fetch.fetchOffsetMetadata, MessageSet.Empty) else fetch case None => error(s"Leader for partition $tp does not have a local log") FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty) } val readToEndOfLog = initialLogEndOffset.messageOffset - logReadInfo.fetchOffsetMetadata.messageOffset <= 0 LogReadResult(logReadInfo, localReplica.highWatermark.messageOffset, partitionFetchSize, readToEndOfLog, None) } catch { // NOTE: Failed fetch requests metric is not incremented for known exceptions since it // is supposed to indicate un-expected failure of a broker in handling a fetch request case e@ (_: UnknownTopicOrPartitionException | _: NotLeaderForPartitionException | _: ReplicaNotAvailableException | _: OffsetOutOfRangeException) => LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, partitionFetchSize, false, Some(e)) case e: Throwable => BrokerTopicStats.getBrokerTopicStats(topic).failedFetchRequestRate.mark() BrokerTopicStats.getBrokerAllTopicsStats().failedFetchRequestRate.mark() error(s"Error processing fetch operation on partition ${tp}, offset $offset", e) LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, partitionFetchSize, false, Some(e)) } } var limitBytes = fetchMaxBytes val result = new mutable.ArrayBuffer[(TopicAndPartition, LogReadResult)] var minOneMessage = !hardMaxBytesLimit readPartitionInfo.foreach { case (tp, fetchInfo) => val readResult = read(tp, fetchInfo, limitBytes, minOneMessage) val messageSetSize = readResult.info.messageSet.sizeInBytes // Once we read from a non-empty partition, we stop ignoring request and partition level size limits if (messageSetSize > 0) minOneMessage = false limitBytes = math.max(0, limitBytes - messageSetSize) result += (TopicAndPartition(tp.topic, tp.partition) -> readResult) } result } /** * To avoid ISR thrashing, we only throttle a replica on the leader if it's in the throttled replica list, * the quota is exceeded and the replica is not in sync. */ def shouldLeaderThrottle(quota: ReplicaQuota, topicPartition: TopicAndPartition, replicaId: Int): Boolean = { val isReplicaInSync = getPartition(topicPartition.topic, topicPartition.partition).flatMap { partition => partition.getReplica(replicaId).map(partition.inSyncReplicas.contains) }.getOrElse(false) quota.isThrottled(topicPartition) && quota.isQuotaExceeded && !isReplicaInSync } def getMessageFormatVersion(topicAndPartition: TopicAndPartition): Option[Byte] = getReplica(topicAndPartition.topic, topicAndPartition.partition).flatMap { replica => replica.log.map(_.config.messageFormatVersion.messageFormatVersion) } def maybeUpdateMetadataCache(correlationId: Int, updateMetadataRequest: UpdateMetadataRequest, metadataCache: MetadataCache) { replicaStateChangeLock synchronized { if(updateMetadataRequest.controllerEpoch < controllerEpoch) { val stateControllerEpochErrorMessage = ("Broker %d received update metadata request with correlation id %d from an " + "old controller %d with epoch %d. Latest known controller epoch is %d").format(localBrokerId, correlationId, updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch, controllerEpoch) stateChangeLogger.warn(stateControllerEpochErrorMessage) throw new ControllerMovedException(stateControllerEpochErrorMessage) } else { metadataCache.updateCache(correlationId, updateMetadataRequest) controllerEpoch = updateMetadataRequest.controllerEpoch } } } def becomeLeaderOrFollower(correlationId: Int,leaderAndISRRequest: LeaderAndIsrRequest, metadataCache: MetadataCache, onLeadershipChange: (Iterable[Partition], Iterable[Partition]) => Unit): BecomeLeaderOrFollowerResult = { leaderAndISRRequest.partitionStates.asScala.foreach { case (topicPartition, stateInfo) => stateChangeLogger.trace("Broker %d received LeaderAndIsr request %s correlation id %d from controller %d epoch %d for partition [%s,%d]" .format(localBrokerId, stateInfo, correlationId, leaderAndISRRequest.controllerId, leaderAndISRRequest.controllerEpoch, topicPartition.topic, topicPartition.partition)) } replicaStateChangeLock synchronized { val responseMap = new mutable.HashMap[TopicPartition, Short] if (leaderAndISRRequest.controllerEpoch < controllerEpoch) { stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d since " + "its controller epoch %d is old. Latest known controller epoch is %d").format(localBrokerId, leaderAndISRRequest.controllerId, correlationId, leaderAndISRRequest.controllerEpoch, controllerEpoch)) BecomeLeaderOrFollowerResult(responseMap, Errors.STALE_CONTROLLER_EPOCH.code) } else { val controllerId = leaderAndISRRequest.controllerId controllerEpoch = leaderAndISRRequest.controllerEpoch // First check partition's leader epoch val partitionState = new mutable.HashMap[Partition, PartitionState]() leaderAndISRRequest.partitionStates.asScala.foreach { case (topicPartition, stateInfo) => val partition = getOrCreatePartition(topicPartition.topic, topicPartition.partition) val partitionLeaderEpoch = partition.getLeaderEpoch() // If the leader epoch is valid record the epoch of the controller that made the leadership decision. // This is useful while updating the isr to maintain the decision maker controller's epoch in the zookeeper path if (partitionLeaderEpoch < stateInfo.leaderEpoch) { if(stateInfo.replicas.contains(config.brokerId)) partitionState.put(partition, stateInfo) else { stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " + "epoch %d for partition [%s,%d] as itself is not in assigned replica list %s") .format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch, topicPartition.topic, topicPartition.partition, stateInfo.replicas.asScala.mkString(","))) responseMap.put(topicPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code) } } else { // Otherwise record the error code in response stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " + "epoch %d for partition [%s,%d] since its associated leader epoch %d is not higher than the current leader epoch %d") .format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch, topicPartition.topic, topicPartition.partition, stateInfo.leaderEpoch, partitionLeaderEpoch)) responseMap.put(topicPartition, Errors.STALE_CONTROLLER_EPOCH.code) } } val partitionsTobeLeader = partitionState.filter { case (_, stateInfo) => stateInfo.leader == config.brokerId } val partitionsToBeFollower = partitionState -- partitionsTobeLeader.keys val partitionsBecomeLeader = if (partitionsTobeLeader.nonEmpty) makeLeaders(controllerId, controllerEpoch, partitionsTobeLeader, correlationId, responseMap) else Set.empty[Partition] val partitionsBecomeFollower = if (partitionsToBeFollower.nonEmpty) makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, correlationId, responseMap, metadataCache) else Set.empty[Partition] // we initialize highwatermark thread after the first leaderisrrequest. This ensures that all the partitions // have been completely populated before starting the checkpointing there by avoiding weird race conditions if (!hwThreadInitialized) { startHighWaterMarksCheckPointThread() hwThreadInitialized = true } replicaFetcherManager.shutdownIdleFetcherThreads() onLeadershipChange(partitionsBecomeLeader, partitionsBecomeFollower) BecomeLeaderOrFollowerResult(responseMap, Errors.NONE.code) } } } /* * Make the current broker to become leader for a given set of partitions by: * * 1. Stop fetchers for these partitions * 2. Update the partition metadata in cache * 3. Add these partitions to the leader partitions set * * If an unexpected error is thrown in this function, it will be propagated to KafkaApis where * the error message will be set on each partition since we do not know which partition caused it. Otherwise, * return the set of partitions that are made leader due to this method * * TODO: the above may need to be fixed later */ private def makeLeaders(controllerId: Int, epoch: Int, partitionState: Map[Partition, PartitionState], correlationId: Int, responseMap: mutable.Map[TopicPartition, Short]): Set[Partition] = { partitionState.foreach(state => stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " + "starting the become-leader transition for partition %s") .format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))) for (partition <- partitionState.keys) responseMap.put(new TopicPartition(partition.topic, partition.partitionId), Errors.NONE.code) val partitionsToMakeLeaders: mutable.Set[Partition] = mutable.Set() try { // First stop fetchers for all the partitions replicaFetcherManager.removeFetcherForPartitions(partitionState.keySet.map(p => new TopicPartition(p.topic, p.partitionId))) // Update the partition information to be the leader partitionState.foreach{ case (partition, partitionStateInfo) => if (partition.makeLeader(controllerId, partitionStateInfo, correlationId)) partitionsToMakeLeaders += partition else stateChangeLogger.info(("Broker %d skipped the become-leader state change after marking its partition as leader with correlation id %d from " + "controller %d epoch %d for partition %s since it is already the leader for the partition.") .format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(partition.topic, partition.partitionId))); } partitionsToMakeLeaders.foreach { partition => stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-leader request from controller " + "%d epoch %d with correlation id %d for partition %s") .format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(partition.topic, partition.partitionId))) } } catch { case e: Throwable => partitionState.foreach { state => val errorMsg = ("Error on broker %d while processing LeaderAndIsr request correlationId %d received from controller %d" + " epoch %d for partition %s").format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)) stateChangeLogger.error(errorMsg, e) } // Re-throw the exception for it to be caught in KafkaApis throw e } partitionState.foreach { state => stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " + "for the become-leader transition for partition %s") .format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))) } partitionsToMakeLeaders } /* * Make the current broker to become follower for a given set of partitions by: * * 1. Remove these partitions from the leader partitions set. * 2. Mark the replicas as followers so that no more data can be added from the producer clients. * 3. Stop fetchers for these partitions so that no more data can be added by the replica fetcher threads. * 4. Truncate the log and checkpoint offsets for these partitions. * 5. Clear the produce and fetch requests in the purgatory * 6. If the broker is not shutting down, add the fetcher to the new leaders. * * The ordering of doing these steps make sure that the replicas in transition will not * take any more messages before checkpointing offsets so that all messages before the checkpoint * are guaranteed to be flushed to disks * * If an unexpected error is thrown in this function, it will be propagated to KafkaApis where * the error message will be set on each partition since we do not know which partition caused it. Otherwise, * return the set of partitions that are made follower due to this method */ private def makeFollowers(controllerId: Int, epoch: Int, partitionState: Map[Partition, PartitionState], correlationId: Int, responseMap: mutable.Map[TopicPartition, Short], metadataCache: MetadataCache) : Set[Partition] = { partitionState.foreach { state => stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " + "starting the become-follower transition for partition %s") .format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))) } for (partition <- partitionState.keys) responseMap.put(new TopicPartition(partition.topic, partition.partitionId), Errors.NONE.code) val partitionsToMakeFollower: mutable.Set[Partition] = mutable.Set() try { // TODO: Delete leaders from LeaderAndIsrRequest partitionState.foreach{ case (partition, partitionStateInfo) => val newLeaderBrokerId = partitionStateInfo.leader metadataCache.getAliveBrokers.find(_.id == newLeaderBrokerId) match { // Only change partition state when the leader is available case Some(_) => if (partition.makeFollower(controllerId, partitionStateInfo, correlationId)) partitionsToMakeFollower += partition else stateChangeLogger.info(("Broker %d skipped the become-follower state change after marking its partition as follower with correlation id %d from " + "controller %d epoch %d for partition [%s,%d] since the new leader %d is the same as the old leader") .format(localBrokerId, correlationId, controllerId, partitionStateInfo.controllerEpoch, partition.topic, partition.partitionId, newLeaderBrokerId)) case None => // The leader broker should always be present in the metadata cache. // If not, we should record the error message and abort the transition process for this partition stateChangeLogger.error(("Broker %d received LeaderAndIsrRequest with correlation id %d from controller" + " %d epoch %d for partition [%s,%d] but cannot become follower since the new leader %d is unavailable.") .format(localBrokerId, correlationId, controllerId, partitionStateInfo.controllerEpoch, partition.topic, partition.partitionId, newLeaderBrokerId)) // Create the local replica even if the leader is unavailable. This is required to ensure that we include // the partition's high watermark in the checkpoint file (see KAFKA-1647) partition.getOrCreateReplica() } } replicaFetcherManager.removeFetcherForPartitions(partitionsToMakeFollower.map(p => new TopicPartition(p.topic, p.partitionId))) partitionsToMakeFollower.foreach { partition => stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-follower request from controller " + "%d epoch %d with correlation id %d for partition %s") .format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(partition.topic, partition.partitionId))) } logManager.truncateTo(partitionsToMakeFollower.map(partition => (new TopicAndPartition(partition), partition.getOrCreateReplica().highWatermark.messageOffset)).toMap) partitionsToMakeFollower.foreach { partition => val topicPartitionOperationKey = new TopicPartitionOperationKey(partition.topic, partition.partitionId) tryCompleteDelayedProduce(topicPartitionOperationKey) tryCompleteDelayedFetch(topicPartitionOperationKey) } partitionsToMakeFollower.foreach { partition => stateChangeLogger.trace(("Broker %d truncated logs and checkpointed recovery boundaries for partition [%s,%d] as part of " + "become-follower request with correlation id %d from controller %d epoch %d").format(localBrokerId, partition.topic, partition.partitionId, correlationId, controllerId, epoch)) } if (isShuttingDown.get()) { partitionsToMakeFollower.foreach { partition => stateChangeLogger.trace(("Broker %d skipped the adding-fetcher step of the become-follower state change with correlation id %d from " + "controller %d epoch %d for partition [%s,%d] since it is shutting down").format(localBrokerId, correlationId, controllerId, epoch, partition.topic, partition.partitionId)) } } else { // we do not need to check if the leader exists again since this has been done at the beginning of this process val partitionsToMakeFollowerWithLeaderAndOffset = partitionsToMakeFollower.map(partition => new TopicPartition(partition.topic, partition.partitionId) -> BrokerAndInitialOffset( metadataCache.getAliveBrokers.find(_.id == partition.leaderReplicaIdOpt.get).get.getBrokerEndPoint(config.interBrokerSecurityProtocol), partition.getReplica().get.logEndOffset.messageOffset)).toMap replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset) partitionsToMakeFollower.foreach { partition => stateChangeLogger.trace(("Broker %d started fetcher to new leader as part of become-follower request from controller " + "%d epoch %d with correlation id %d for partition [%s,%d]") .format(localBrokerId, controllerId, epoch, correlationId, partition.topic, partition.partitionId)) } } } catch { case e: Throwable => val errorMsg = ("Error on broker %d while processing LeaderAndIsr request with correlationId %d received from controller %d " + "epoch %d").format(localBrokerId, correlationId, controllerId, epoch) stateChangeLogger.error(errorMsg, e) // Re-throw the exception for it to be caught in KafkaApis throw e } partitionState.foreach { state => stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " + "for the become-follower transition for partition %s") .format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))) } partitionsToMakeFollower } private def maybeShrinkIsr(): Unit = { trace("Evaluating ISR list of partitions to see which replicas can be removed from the ISR") allPartitions.values.foreach(partition => partition.maybeShrinkIsr(config.replicaLagTimeMaxMs)) } private def updateFollowerLogReadResults(replicaId: Int, readResults: Seq[(TopicAndPartition, LogReadResult)]) { debug("Recording follower broker %d log read results: %s ".format(replicaId, readResults)) readResults.foreach { case (topicAndPartition, readResult) => getPartition(topicAndPartition.topic, topicAndPartition.partition) match { case Some(partition) => partition.updateReplicaLogReadResult(replicaId, readResult) // for producer requests with ack > 1, we need to check // if they can be unblocked after some follower's log end offsets have moved tryCompleteDelayedProduce(new TopicPartitionOperationKey(topicAndPartition)) case None => warn("While recording the replica LEO, the partition %s hasn't been created.".format(topicAndPartition)) } } } private def getLeaderPartitions() : List[Partition] = { allPartitions.values.filter(_.leaderReplicaIfLocal().isDefined).toList } // Flushes the highwatermark value for all partitions to the highwatermark file def checkpointHighWatermarks() { val replicas = allPartitions.values.flatMap(_.getReplica(config.brokerId)) val replicasByDir = replicas.filter(_.log.isDefined).groupBy(_.log.get.dir.getParentFile.getAbsolutePath) for ((dir, reps) <- replicasByDir) { val hwms = reps.map(r => new TopicAndPartition(r) -> r.highWatermark.messageOffset).toMap try { highWatermarkCheckpoints(dir).write(hwms) } catch { case e: IOException => fatal("Error writing to highwatermark file: ", e) Runtime.getRuntime().halt(1) } } } // High watermark do not need to be checkpointed only when under unit tests def shutdown(checkpointHW: Boolean = true) { info("Shutting down") replicaFetcherManager.shutdown() delayedFetchPurgatory.shutdown() delayedProducePurgatory.shutdown() if (checkpointHW) checkpointHighWatermarks() info("Shut down completely") } }
geeag/kafka
core/src/main/scala/kafka/server/ReplicaManager.scala
Scala
apache-2.0
50,179
import sbt._, Keys._ import de.johoop.jacoco4sbt.JacocoPlugin._ object TestCoverage { val settings = jacoco.settings }
longcao/framian
project/TestCoverage.scala
Scala
apache-2.0
123
package com.guidewire.tools.chronos.client.api.v2 import scalaz._ import scala.concurrent.{ExecutionContext, Future} import play.api.libs.json._ import com.guidewire.tools.chronos.client._ import com.guidewire.tools.chronos.client.api._ /** * */ object Debug { import JsonUtils._ import HttpUtils._ /** * Constructs a [[scala.Predef.String]] representing the URI for this resource. * * @param connection used to construct the full URI * @return a [[scala.Predef.String]] representing the URI for this resource */ def uriPing(connection: Connection): String = { require(connection ne null, s"Missing connection") connection.uri(s"/ping") } /** * Makes the equivalent call to `GET /ping` and provides the response at a future time. * * @param connection used to construct the full URI * @param executor the [[scala.concurrent.ExecutionContext]] used to process the request * @return A [[scala.concurrent.Future]] with a scalaz [[scalaz.Validation]] object providing the results of * the request or an error */ def ping(implicit connection: Connection, executor: ExecutionContext = ExecutionContext.Implicits.global): Future[Validation[Error, Boolean]] = httpGet[Boolean](connection)(uriPing)(processPing) /** * Performs the processing of the payload from a call to `GET /ping`. * * @param response `true` if the returned payload is the string `pong` * @return a [[scalaz.Validation]] that can be composed using normal scalaz methods */ def processPing(statusCode: Int, response: Array[Byte]): Validation[Error, Boolean] = processSingleStringHttpGetResponse(statusCode, response).map(x => (x ne null) && x.trim() == "pong") }
Guidewire/chronos-client
src/main/scala/com/guidewire/tools/chronos/client/api/v2/Debug.scala
Scala
apache-2.0
1,725
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive import java.util.Locale import scala.util.{Failure, Success, Try} import scala.util.control.NonFatal import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hive.ql.exec.{UDAF, UDF} import org.apache.hadoop.hive.ql.exec.{FunctionRegistry => HiveFunctionRegistry} import org.apache.hadoop.hive.ql.udf.generic.{AbstractGenericUDAFResolver, GenericUDF, GenericUDTF} import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.FunctionIdentifier import org.apache.spark.sql.catalyst.analysis.FunctionRegistry import org.apache.spark.sql.catalyst.catalog.{CatalogFunction, FunctionResourceLoader, GlobalTempViewManager, SessionCatalog} import org.apache.spark.sql.catalyst.expressions.{Cast, Expression} import org.apache.spark.sql.catalyst.parser.ParserInterface import org.apache.spark.sql.hive.HiveShim.HiveFunctionWrapper import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{DecimalType, DoubleType} private[sql] class HiveSessionCatalog( externalCatalog: HiveExternalCatalog, globalTempViewManager: GlobalTempViewManager, val metastoreCatalog: HiveMetastoreCatalog, functionRegistry: FunctionRegistry, conf: SQLConf, hadoopConf: Configuration, parser: ParserInterface, functionResourceLoader: FunctionResourceLoader) extends SessionCatalog( externalCatalog, globalTempViewManager, functionRegistry, conf, hadoopConf, parser, functionResourceLoader) { /** * Constructs a [[Expression]] based on the provided class that represents a function. * * This performs reflection to decide what type of [[Expression]] to return in the builder. */ override def makeFunctionExpression( name: String, clazz: Class[_], input: Seq[Expression]): Expression = { Try(super.makeFunctionExpression(name, clazz, input)).getOrElse { var udfExpr: Option[Expression] = None try { // When we instantiate hive UDF wrapper class, we may throw exception if the input // expressions don't satisfy the hive UDF, such as type mismatch, input number // mismatch, etc. Here we catch the exception and throw AnalysisException instead. if (classOf[UDF].isAssignableFrom(clazz)) { udfExpr = Some(HiveSimpleUDF(name, new HiveFunctionWrapper(clazz.getName), input)) udfExpr.get.dataType // Force it to check input data types. } else if (classOf[GenericUDF].isAssignableFrom(clazz)) { udfExpr = Some(HiveGenericUDF(name, new HiveFunctionWrapper(clazz.getName), input)) udfExpr.get.dataType // Force it to check input data types. } else if (classOf[AbstractGenericUDAFResolver].isAssignableFrom(clazz)) { udfExpr = Some(HiveUDAFFunction(name, new HiveFunctionWrapper(clazz.getName), input)) udfExpr.get.dataType // Force it to check input data types. } else if (classOf[UDAF].isAssignableFrom(clazz)) { udfExpr = Some(HiveUDAFFunction( name, new HiveFunctionWrapper(clazz.getName), input, isUDAFBridgeRequired = true)) udfExpr.get.dataType // Force it to check input data types. } else if (classOf[GenericUDTF].isAssignableFrom(clazz)) { udfExpr = Some(HiveGenericUDTF(name, new HiveFunctionWrapper(clazz.getName), input)) udfExpr.get.asInstanceOf[HiveGenericUDTF].elementSchema // Force it to check data types. } } catch { case NonFatal(e) => val noHandlerMsg = s"No handler for UDF/UDAF/UDTF '${clazz.getCanonicalName}': $e" val errorMsg = if (classOf[GenericUDTF].isAssignableFrom(clazz)) { s"$noHandlerMsg\nPlease make sure your function overrides " + "`public StructObjectInspector initialize(ObjectInspector[] args)`." } else { noHandlerMsg } val analysisException = new AnalysisException(errorMsg) analysisException.setStackTrace(e.getStackTrace) throw analysisException } udfExpr.getOrElse { throw new AnalysisException(s"No handler for UDF/UDAF/UDTF '${clazz.getCanonicalName}'") } } } override def lookupFunction(name: FunctionIdentifier, children: Seq[Expression]): Expression = { try { lookupFunction0(name, children) } catch { case NonFatal(_) => // SPARK-16228 ExternalCatalog may recognize `double`-type only. val newChildren = children.map { child => if (child.dataType.isInstanceOf[DecimalType]) Cast(child, DoubleType) else child } lookupFunction0(name, newChildren) } } private def lookupFunction0(name: FunctionIdentifier, children: Seq[Expression]): Expression = { val database = name.database.map(formatDatabaseName) val funcName = name.copy(database = database) Try(super.lookupFunction(funcName, children)) match { case Success(expr) => expr case Failure(error) => if (functionRegistry.functionExists(funcName)) { // If the function actually exists in functionRegistry, it means that there is an // error when we create the Expression using the given children. // We need to throw the original exception. throw error } else { // This function is not in functionRegistry, let's try to load it as a Hive's // built-in function. // Hive is case insensitive. val functionName = funcName.unquotedString.toLowerCase(Locale.ROOT) if (!hiveFunctions.contains(functionName)) { failFunctionLookup(funcName) } // TODO: Remove this fallback path once we implement the list of fallback functions // defined below in hiveFunctions. val functionInfo = { try { Option(HiveFunctionRegistry.getFunctionInfo(functionName)).getOrElse( failFunctionLookup(funcName)) } catch { // If HiveFunctionRegistry.getFunctionInfo throws an exception, // we are failing to load a Hive builtin function, which means that // the given function is not a Hive builtin function. case NonFatal(e) => failFunctionLookup(funcName) } } val className = functionInfo.getFunctionClass.getName val functionIdentifier = FunctionIdentifier(functionName.toLowerCase(Locale.ROOT), database) val func = CatalogFunction(functionIdentifier, className, Nil) // Put this Hive built-in function to our function registry. registerFunction(func, overrideIfExists = false) // Now, we need to create the Expression. functionRegistry.lookupFunction(functionIdentifier, children) } } } // TODO Removes this method after implementing Spark native "histogram_numeric". override def functionExists(name: FunctionIdentifier): Boolean = { super.functionExists(name) || hiveFunctions.contains(name.funcName) } /** List of functions we pass over to Hive. Note that over time this list should go to 0. */ // We have a list of Hive built-in functions that we do not support. So, we will check // Hive's function registry and lazily load needed functions into our own function registry. // List of functions we are explicitly not supporting are: // compute_stats, context_ngrams, create_union, // current_user, ewah_bitmap, ewah_bitmap_and, ewah_bitmap_empty, ewah_bitmap_or, field, // in_file, index, matchpath, ngrams, noop, noopstreaming, noopwithmap, // noopwithmapstreaming, parse_url_tuple, reflect2, windowingtablefunction. // Note: don't forget to update SessionCatalog.isTemporaryFunction private val hiveFunctions = Seq( "histogram_numeric" ) }
cin/spark
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala
Scala
apache-2.0
8,719
object Test extends App { (new PlaceboAssorted).combo (new placeboannot.klass.PlaceboClass).combo (new placeboannot.objekt.PlaceboClass).combo (new PlaceboParameters).combo }
scala/scala
test/macro-annot/run/placebo/test_2.scala
Scala
apache-2.0
183
/** * Copyright 2012-2013 StackMob * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import io.Source import java.io.PrintWriter import sbtrelease._ import ReleasePlugin._ import ReleaseKeys._ import sbt._ object LucidReleaseSteps { val launchConfig = "src/main/conscript/lucid/launchconfig" val readme = "README.md" lazy val setReadmeReleaseVersion: ReleaseStep = { st: State => val releaseVersions = getReleasedVersion(st) updateReadme(st, releaseVersions._1) commitReadme(st, releaseVersions._1) st } lazy val setLaunchConfigReleaseVersion: ReleaseStep = { st: State => val releaseVersions = getReleasedVersion(st) updateLaunchConfig(st, "%s-SNAPSHOT".format(releaseVersions._1), releaseVersions._1) commitLaunchConfig(st, releaseVersions._1) st } lazy val setLaunchConfigNextVersion: ReleaseStep = { st: State => val releaseVersions = getReleasedVersion(st) updateLaunchConfig(st, releaseVersions._1, releaseVersions._2) commitLaunchConfig(st, releaseVersions._2) st } private def getReleasedVersion(st: State): (String, String) = { st.get(versions).getOrElse(sys.error("No versions are set.")) } private def updateLaunchConfig(st: State, oldVersion: String, newVersion: String) { val oldLaunchConfig = Source.fromFile(launchConfig).mkString val out = new PrintWriter(launchConfig, "UTF-8") try { val newLaunchConfig = oldLaunchConfig.replaceAll(oldVersion, newVersion) newLaunchConfig.foreach(out.write(_)) } finally { out.close() } } private def updateReadme(st: State, newVersion: String) { val conscriptR = """stackmob/lucid/\\d+\\.\\d+\\.\\d+""".r val oldReadme = Source.fromFile(readme).mkString val out = new PrintWriter(readme, "UTF-8") try { val newReadme = conscriptR.replaceFirstIn(oldReadme, "stackmob/lucid/%s".format(newVersion)) newReadme.foreach(out.write(_)) } finally { out.close() } } private def commitLaunchConfig(st: State, newVersion: String) { val vcs = Project.extract(st).get(versionControlSystem).getOrElse(sys.error("Unable to get version control system.")) vcs.add(launchConfig) !! st.log vcs.commit("launchconfig updated to %s".format(newVersion)) ! st.log } private def commitReadme(st: State, newVersion: String) { val vcs = Project.extract(st).get(versionControlSystem).getOrElse(sys.error("Unable to get version control system.")) vcs.add(readme) !! st.log vcs.commit("README.md updated to %s".format(newVersion)) ! st.log } }
stackmob/lucid
project/Build.scala
Scala
apache-2.0
3,073
package hintsAll import common.CommonTest import de.ust.skill.common.scala.api.Create import de.ust.skill.common.scala.api.Read import de.ust.skill.common.scala.api.Write import hintsAll.api.SkillFile /** * Tests interface API. */ class BasicTest extends CommonTest { test("create a node with distributed fields and access a field") { val path = tmpFile("hints"); val sf = SkillFile.open(path, Create, Write) val n = sf.User.make(30, "ich", "ich nicht") println(n.age) sf.close } test("create a node write and read") { val path = tmpFile("hints"); locally { val sf = SkillFile.open(path, Create, Write) val n = sf.User.make(30, "ich", "ich nicht") println(n.age) sf.close } locally { val sf = SkillFile.open(path, Read, Write) assert(30 === sf.User.head.age) sf.close } } }
skill-lang/skillScalaTestSuite
src/test/scala/hintsAll/BasicTest.scala
Scala
bsd-3-clause
874
/*** * Copyright 2014 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rackspace.com.papi.components.checker.step import javax.servlet.FilterChain import javax.xml.parsers.DocumentBuilder import javax.xml.transform.dom.{DOMResult, DOMSource} import javax.xml.validation.{Schema, Validator} import com.rackspace.com.papi.components.checker.servlet._ import com.rackspace.com.papi.components.checker.step.base.{ConnectedStep, Step, StepContext} import com.rackspace.com.papi.components.checker.util.ValidatorPool.{borrowValidator, returnValidator} import com.rackspace.com.papi.components.checker.util.XMLParserPool.{borrowParser, returnParser} class XSD(id : String, label : String, schema : Schema, transform : Boolean, val priority : Long, next : Array[Step]) extends ConnectedStep(id, label, next) { override val mismatchMessage : String = "The XML does not validate against the schema." override def checkStep(req : CheckerServletRequest, resp : CheckerServletResponse, chain : FilterChain, context : StepContext) : Option[StepContext] = { var ret : Option[StepContext] = None var validator : Validator = null var parser : DocumentBuilder = null val capture = new ErrorCapture //Used to capture parse errors var error : Exception = null //Other errors may be caught here try { validator = borrowValidator(schema) validator.setErrorHandler(capture) if (transform) { // // We create a new document because Saxon doesn't pool // DocumentBuilders and letting saxon create a new builder // slows things down. // parser = borrowParser val result = parser.newDocument() returnParser(parser); parser = null validator.validate (new DOMSource (req.parsedXML), new DOMResult(result)) req.parsedXML = result } else { validator.validate (new DOMSource (req.parsedXML)) } ret = Some(context) } catch { case e : Exception => error = e } finally { if (validator != null) returnValidator (schema, validator) if (parser != null) returnParser(parser) } // // Always give precedence to parse errors. // if (capture.error.isDefined) { req.contentError = capture.error.get req.contentErrorPriority = priority ret = None } else if (error != null) { req.contentError = error req.contentErrorPriority = priority ret = None } ret } }
wdschei/api-checker
core/src/main/scala/com/rackspace/com/papi/components/checker/step/XSD.scala
Scala
apache-2.0
3,039
/* Copyright 2012 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding import java.io.Serializable import java.lang.reflect.{ Type, ParameterizedType } import cascading.pipe.Pipe import cascading.tap.SinkMode import cascading.tuple.{ Tuple, TupleEntry, Fields } import com.fasterxml.jackson.core.`type`.TypeReference import com.fasterxml.jackson.module.scala._ import com.fasterxml.jackson.databind.ObjectMapper /** * This Source writes out the TupleEntry as a simple JSON object, using the field * names as keys and the string representation of the values. * * TODO: it would be nice to have a way to add read/write transformations to pipes * that doesn't require extending the sources and overriding methods. * * @param failOnEmptyLines When set to false, it just skips empty lines instead of failing the jobs. Defaults to true * for backwards compatibility. */ case class JsonLine(p: String, fields: Fields = Fields.ALL, override val sinkMode: SinkMode = SinkMode.REPLACE, override val transformInTest: Boolean = false, failOnEmptyLines: Boolean = true) extends FixedPathSource(p) with TextLineScheme { import Dsl._ import JsonLine._ override def transformForWrite(pipe: Pipe) = pipe.mapTo(fields -> 'json) { t: TupleEntry => mapper.writeValueAsString(TupleConverter.ToMap(t)) } override def transformForRead(pipe: Pipe) = { @scala.annotation.tailrec def nestedRetrieval(node: Option[Map[String, AnyRef]], path: List[String]): AnyRef = { (path, node) match { case (_, None) => null case (h :: Nil, Some(fs)) => fs.get(h).orNull case (h :: tail, Some(fs)) => fs.get(h).orNull match { case fs: Map[String, AnyRef] => nestedRetrieval(Option(fs), tail) case _ => null } case (Nil, _) => null } } val splitFields = (0 until fields.size).map { i: Int => fields.get(i).toString.split('.').toList } pipe.collectTo[String, Tuple]('line -> fields) { case line: String if failOnEmptyLines || line.trim.nonEmpty => val fs: Map[String, AnyRef] = mapper.readValue(line, mapTypeReference) val values = splitFields.map { nestedRetrieval(Option(fs), _) } new cascading.tuple.Tuple(values: _*) } } override def toString = "JsonLine(" + p + ", " + fields.toString + ")" } /** * TODO: at the next binary incompatible version remove the AbstractFunction2/scala.Serializable jank which * was added to get mima to not report binary errors */ object JsonLine extends scala.runtime.AbstractFunction5[String, Fields, SinkMode, Boolean, Boolean, JsonLine] with Serializable with scala.Serializable { val mapTypeReference = typeReference[Map[String, AnyRef]] private[this] def typeReference[T: Manifest] = new TypeReference[T] { override def getType = typeFromManifest(manifest[T]) } private[this] def typeFromManifest(m: Manifest[_]): Type = { if (m.typeArguments.isEmpty) { m.runtimeClass } else new ParameterizedType { def getRawType = m.runtimeClass def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray def getOwnerType = null } } val mapper = new ObjectMapper() mapper.registerModule(DefaultScalaModule) }
soundcloud/scalding
scalding-json/src/main/scala/com/twitter/scalding/JsonLine.scala
Scala
apache-2.0
3,778
/*********************************************************************** * Copyright (c) 2013-2019 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.index.filters import java.nio.ByteBuffer import org.locationtech.geomesa.index.filters.RowFilter.RowFilterFactory import org.locationtech.geomesa.index.filters.Z3Filter._ import org.locationtech.geomesa.index.index.z2.Z2IndexValues import org.locationtech.geomesa.utils.index.ByteArrays import org.locationtech.sfcurve.zorder.Z2 class Z2Filter(val xy: Array[Array[Int]]) extends RowFilter { override def inBounds(buf: Array[Byte], offset: Int): Boolean = { val z = ByteArrays.readLong(buf, offset) val x = Z2(z).d0 val y = Z2(z).d1 var i = 0 while (i < xy.length) { val xyi = xy(i) if (x >= xyi(0) && x <= xyi(2) && y >= xyi(1) && y <= xyi(3)) { return true } i += 1 } false } override def toString: String = Z2Filter.serializeToStrings(this).toSeq.sortBy(_._1).mkString(",") } object Z2Filter extends RowFilterFactory[Z2Filter] { private val RangeSeparator = ":" private val TermSeparator = ";" def apply(values: Z2IndexValues): Z2Filter = { val sfc = values.sfc val xy: Array[Array[Int]] = values.bounds.map { case (xmin, ymin, xmax, ymax) => Array(sfc.lon.normalize(xmin), sfc.lat.normalize(ymin), sfc.lon.normalize(xmax), sfc.lat.normalize(ymax)) }.toArray new Z2Filter(xy) } override def serializeToBytes(filter: Z2Filter): Array[Byte] = { // 4 bytes for length plus 16 bytes for each xy val (4 ints) val xyLength = 4 + filter.xy.length * 16 val buffer = ByteBuffer.allocate(xyLength) buffer.putInt(filter.xy.length) filter.xy.foreach(bounds => bounds.foreach(buffer.putInt)) buffer.array() } override def deserializeFromBytes(serialized: Array[Byte]): Z2Filter = { val buffer = ByteBuffer.wrap(serialized) val xy = Array.fill(buffer.getInt())(Array.fill(4)(buffer.getInt)) new Z2Filter(xy) } override def serializeToStrings(filter: Z2Filter): Map[String, String] = { val xy = filter.xy.map(bounds => bounds.mkString(RangeSeparator)).mkString(TermSeparator) Map(XYKey -> xy) } override def deserializeFromStrings(serialized: scala.collection.Map[String, String]): Z2Filter = { val xy = serialized(XYKey).split(TermSeparator).map(_.split(RangeSeparator).map(_.toInt)) new Z2Filter(xy) } }
elahrvivaz/geomesa
geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/filters/Z2Filter.scala
Scala
apache-2.0
2,770
/* * Copyright 2011-2022 GatlingCorp (https://gatling.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gatling.jms.jndi import java.util import javax.jms.{ Connection, ConnectionFactory, JMSContext } import javax.naming.Context import javax.naming.spi.InitialContextFactory import org.apache.activemq.jndi.ReadOnlyContext class DummyContextFactory extends InitialContextFactory { override def getInitialContext(environment: util.Hashtable[_, _]): Context = { val bindings = new util.HashMap[String, Object]() bindings.put("DummyConnectionFactory", new DummyConnectionFactory(environment)) new ReadOnlyContext(environment, bindings) } } class DummyConnectionFactory(env: util.Hashtable[_, _]) extends ConnectionFactory { val environment: util.Hashtable[_, _] = env override def createConnection(): Connection = null override def createConnection(userName: String, password: String): Connection = null override def createContext(sessionMode: Int): JMSContext = null override def createContext(userName: String, password: String, sessionMode: Int): JMSContext = null override def createContext(userName: String, password: String): JMSContext = null override def createContext(): JMSContext = null }
gatling/gatling
gatling-jms/src/test/scala/io/gatling/jms/jndi/DummyContextFactory.scala
Scala
apache-2.0
1,763
package com.azavea.opentransit.database import com.azavea.opentransit._ import com.azavea.gtfs._ import geotrellis.slick._ import geotrellis.vector._ import scala.slick.driver.{JdbcDriver, JdbcProfile, PostgresDriver} import scala.slick.jdbc.JdbcBackend.DatabaseDef // The performance characteristics of postgres enums should be the same as integers. // Instead of introducing a dependency for PG enums (something we should do from the start), // I'll use typesafety and the `DeltaType` sum type to achieve parity. sealed trait DeltaType { val intRep: Int } case object GTFSAddition extends DeltaType { val intRep = 1 } case object GTFSRemoval extends DeltaType { val intRep = -1 } object DeltaType { def apply(intRep: Int) = intRep match { case GTFSAddition.intRep => GTFSAddition case GTFSRemoval.intRep => GTFSRemoval case _ => throw new Exception("GTFSDelta must be 1 or -1") } } case class TripDelta(deltaType: DeltaType, tripShape: TripShape) case class StopDelta(deltaType: DeltaType, stop: Stop) object TripDeltaStore { import PostgresDriver.simple._ private val gisSupport = new PostGisProjectionSupport(PostgresDriver) import gisSupport._ def serialize(tripDelta: TripDelta): Option[(Int, String, Projected[Line])] = Some((tripDelta.deltaType.intRep, tripDelta.tripShape.id, tripDelta.tripShape.line)) def deserialize(tripDeltaTuple: (Int, String, Projected[Line])): TripDelta = TripDelta( DeltaType(tripDeltaTuple._1), TripShape(tripDeltaTuple._2, tripDeltaTuple._3) ) class TripDeltaTable(tag: Tag) extends Table[TripDelta](tag, "trip_deltas") { def id = column[String]("id", O.PrimaryKey) def geom = column[Projected[Line]]("geom") def deltaType = column[Int]("delta_type") def * = (deltaType, id, geom) <> (deserialize, serialize) } val tripDeltas = TableQuery[TripDeltaTable] def addTripShape(tripShape: TripShape)(implicit sess: Session): Unit = { val query = tripDeltas.filter(_.id === tripShape.id).map(_.deltaType) query.firstOption match { case Some(_) => () case None => tripDeltas.insert(TripDelta(GTFSAddition, tripShape)) } } def removeTripShape(tripShape: TripShape)(implicit sess: Session): Unit = { val query = tripDeltas.filter(_.id === tripShape.id) query.firstOption match { case Some(_) => query.delete case None => tripDeltas.insert(TripDelta(GTFSRemoval, tripShape)) } } def tripHighlights(deltaType: DeltaType)(implicit sess: Session): MultiLine = { val query = for { d <- tripDeltas if d.deltaType === deltaType.intRep } yield d.geom query.run.map(_.geom).foldLeft(MultiLine.EMPTY) { (union, geom) => union.union(geom) match { case LineResult(l) => MultiLine(l) case MultiLineResult(ml) => ml } } } } object StopDeltaStore { import PostgresDriver.simple._ private val gisSupport = new PostGisProjectionSupport(PostgresDriver) import gisSupport._ def serialize(stopDelta: StopDelta): Option[(Int, String, String, Option[String], Projected[Point])] = Some(Tuple5( stopDelta.deltaType.intRep, stopDelta.stop.id, stopDelta.stop.name, stopDelta.stop.description, stopDelta.stop.point )) def deserialize(stopDeltaTuple: (Int, String, String, Option[String], Projected[Point])): StopDelta = StopDelta( DeltaType(stopDeltaTuple._1), Stop(stopDeltaTuple._2, stopDeltaTuple._3, stopDeltaTuple._4, stopDeltaTuple._5) ) class StopDeltaTable(tag: Tag) extends Table[StopDelta](tag, "stop_deltas") { def id = column[String]("id", O.PrimaryKey) def name = column[String]("name") def description = column[Option[String]]("description") def geom = column[Projected[Point]]("geom") def deltaType = column[Int]("delta_type") def * = (deltaType, id, name, description, geom) <> (deserialize, serialize) } val stopDeltas = TableQuery[StopDeltaTable] def addStop(stop: Stop)(implicit sess: Session): Unit = { val query = stopDeltas.filter(_.id === stop.id).map(_.deltaType) query.firstOption match { case Some(_) => () case None => stopDeltas.insert(StopDelta(GTFSAddition, stop)) } } def removeStop(stop: Stop)(implicit sess: Session): Unit = { val query = stopDeltas.filter(_.id === stop.id) query.firstOption match { case Some(_) => query.delete case None => stopDeltas.insert(StopDelta(GTFSRemoval, stop)) } } def stopHighlights(deltaType: DeltaType)(implicit sess: Session): MultiPoint = { val query = for { d <- stopDeltas if d.deltaType === deltaType.intRep } yield d.geom query.run.map(_.geom).foldLeft(MultiPoint.EMPTY) { (union, geom) => union.union(geom) match { case PointResult(p) => MultiPoint(p) case MultiPointResult(mp) => mp } } } }
flibbertigibbet/open-transit-indicators
scala/opentransit/src/main/scala/com/azavea/opentransit/database/GTFSDeltaTables.scala
Scala
gpl-3.0
4,871
/* * Copyright 2010-2014 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftweb package http import scala.xml.NodeSeq import org.specs2.mutable.Specification import actor.LAScheduler import common._ import js.JsCmds._ object CometActorSpec extends Specification { private case object TestMessage private val testSession = new LiftSession("Test Session", "", Empty) private class SpecCometActor extends CometActor { var receivedMessages = List[Any]() def render = NodeSeq.Empty override def theSession = testSession override def !(msg: Any) = { receivedMessages ::= msg LAScheduler.onSameThread = true super.!(msg) LAScheduler.onSameThread = false } } "A CometActor" should { class RedirectingComet extends SpecCometActor { override def lowPriority = { case TestMessage => S.redirectTo("place") } } "redirect the user when a ResponseShortcutException with redirect occurs" in { val comet = new RedirectingComet comet ! TestMessage comet.receivedMessages.exists { case PartialUpdateMsg(update) if update() == RedirectTo("place") => true case _ => false } must beTrue } class FunctionRedirectingComet extends SpecCometActor { override def lowPriority = { case TestMessage => S.redirectTo("place", () => "do stuff") } } "redirect the user with a function when a ResponseShortcutException with redirect+function occurs" in { val comet = new FunctionRedirectingComet comet ! TestMessage val matchingMessage = comet.receivedMessages.collect { case PartialUpdateMsg(update) => update() } matchingMessage must beLike { case List(RedirectTo(redirectUri)) => redirectUri must startWith("place") redirectUri must beMatching("^[^?]+\\\\?F[^=]+=_$".r) } } } }
lzpfmh/framework-2
web/webkit/src/test/scala/net/liftweb/http/CometActorSpec.scala
Scala
apache-2.0
2,525
package org.codeswarm.aksync import akka.testkit.{TestActorRef, TestKit} import akka.actor.{Actor, Props, ActorSystem, ActorRef, ActorContext}, akka.event.Logging import org.scalatest._ import scala.concurrent.duration.{Duration, DurationDouble, FiniteDuration} import com.typesafe.config.ConfigFactory import collection.mutable.ArrayBuffer class ServerSpec extends FunSpec { class Token(val id: Int, var alive: Boolean = true) class LogActor(val buffer: ArrayBuffer[String]) extends Actor { def receive = { case e: akka.event.Logging.LogEvent => buffer += List(e.logSource, e.message).mkString(" ") } } class Fixture( val poolSizeRange: PoolSizeRange, val leaseTimeout: LeaseTimeout, val tokenRetryInterval: TokenRetryInterval.Fixed = (0.25).seconds, val fast: Duration = (0.05).seconds ) { implicit val system = ActorSystem("test", ConfigFactory.parseString( """ |akka { | event-handlers = [org.codeswarm.aksync.NoActorLogging] | loglevel = DEBUG | daemonic = on |} """.stripMargin)) val testKit = new TestKit(system) implicit val self = testKit.testActor val lifecycle = new Lifecycle[Token]() { override def isAlive(a: Token): Boolean = a.alive override def actor(implicit context: ActorContext): ActorRef = self } val serverProps = Props(new Server[Token]( lifecycle = lifecycle, leaseTimeout = leaseTimeout, poolSizeRange = poolSizeRange, tokenRetryInterval = tokenRetryInterval )) lazy val server = TestActorRef(serverProps, "server") def expectNoMsg() { testKit.expectNoMsg() } def expectNoMsg(max: FiniteDuration) { testKit.expectNoMsg(max) } def expectMsg[T](obj: T): T = testKit.expectMsg(obj) def expectMsg[T](obj: T, max: FiniteDuration): T = testKit.expectMsg(max, obj) def expectLease[A](token: A, max: Duration = Duration.Undefined): Lease[A] = testKit.expectMsgPF (max = max) { case x: Lease[_] if x.token == token => x.asInstanceOf[Lease[A]] } def expectDead[A](token: A, max: Duration = Duration.Undefined) { testKit.expectMsgPF (max = max) { case x: Lifecycle.Dead[_] if x.token == token => x.asInstanceOf[Lifecycle.Dead[A]] } } def expectRevoked[A](token: A, max: Duration = Duration.Undefined) { testKit.expectMsgPF (max = max) { case x: Lifecycle.Revoked[_] if x.token == token => x.asInstanceOf[Lifecycle.Revoked[A]] } } object Tokens { val map = collection.mutable.HashMap[Int, Token]() def apply(i: Int): Token = map.get(i) match { case Some(token) => token case None => val token = new Token(i) map += i -> token token } } } def withFixture(test: Fixture => Any)(implicit poolSizeRange: PoolSizeRange, leaseTimeout: LeaseTimeout = LeaseTimeout.Fixed(30.seconds)) { val fixture = new Fixture(poolSizeRange, leaseTimeout) import fixture.system val logBuffer = new ArrayBuffer[String] val logActor = system.actorOf(Props(new LogActor(logBuffer))) akka.event.Logging.AllLogLevels.foreach(l => system.eventStream.subscribe(logActor, akka.event.Logging.classFor(l))) fixture.server try { test(fixture) system.eventStream.setLogLevel(Logging.WarningLevel) system.shutdown() system.awaitTermination() } finally { logBuffer.foreach(info(_)) } } describe ("A Server with pool size 0-1") { implicit val poolSizeRange: PoolSizeRange = 0 to 1 it ("should initially do nothing") { withFixture { f => import f._ expectNoMsg() } } it ("should request a token when a client requests a lease") { withFixture { f => import f._ server ! Lease.Request expectMsg(Lifecycle.TokenRequest) } } it ("should issue a lease when one is becomes available") { withFixture { f => import f._ server ! Lease.Request expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(1)) expectLease(Tokens(1)) } } it ("""should not request a second token, and should not respond to a lease request | while the token is already leased""".stripMargin) { withFixture { f => import f._ server ! Lease.Request expectMsg(Lifecycle.TokenRequest) server ! Lease.Request server ! Lifecycle.NewToken(Tokens(1)) expectLease(Tokens(1)) expectNoMsg() } } it ("should re-issue the same token to a second client after the first client releases") { withFixture { f => import f._ server ! Lease.Request expectMsg(Lifecycle.TokenRequest) server ! Lease.Request server ! Lifecycle.NewToken(Tokens(1)) val lease = expectLease(Tokens(1)) lease.release() expectLease(Tokens(1)) } } it ("should destroy and replace the token after it dies") { withFixture { f => import f._ server ! Lease.Request expectMsg(Lifecycle.TokenRequest) server ! Lease.Request server ! Lifecycle.NewToken(Tokens(1)) val lease = expectLease(Tokens(1)) Tokens(1).alive = false lease.release() expectDead(Tokens(1)) expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(2)) expectLease(Tokens(2)) } } } describe ("A Server with pool size 1-2") { implicit val poolSizeRange: PoolSizeRange = 1 to 2 it ("should immediately request a token") { withFixture { f => import f._ expectMsg(Lifecycle.TokenRequest, max = 1.second) } } it ("should not immediately retry when a token request fails") { withFixture { f => import f._ expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.TokenUnavailable expectNoMsg(tokenRetryInterval/2) } } it ("should delay and retry after a token request fails") { withFixture { f => import f._ expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.TokenUnavailable expectMsg(Lifecycle.TokenRequest, max = tokenRetryInterval*3/2) } } } describe ("A server with a 0.2-second initial and 1-second subsequent lease timeout") { implicit val poolSizeRange: PoolSizeRange = 0 to 1 implicit val leaseTimeout: LeaseTimeout = LeaseTimeout.FirstAndSubsequent(first = (0.2).seconds, subsequent = 1.second) it ("should revoke a lease not acknowledged within 0.2 seconds") { withFixture { f => import f._ server ! Lease.Request expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(1)) expectLease(Tokens(1)) expectRevoked(Tokens(1), max = (0.3).seconds) } } it ("""should revoke a lease that was immediately (but not subsequently) acknowledged, | after 1 second""".stripMargin) { withFixture { f => import f._ server ! Lease.Request expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(1)) val lease = expectLease(Tokens(1)) lease.acknowledge() expectNoMsg(max = (0.8).seconds) expectRevoked(Tokens(1), max = (0.3).seconds) } } it ("should not revoke a lease that is acknowledged every half-second then released") { withFixture { f => import f._ server ! Lease.Request expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(1)) val lease = expectLease(Tokens(1)) lease.acknowledge() Thread.sleep(500) lease.acknowledge() Thread.sleep(500) lease.acknowledge() Thread.sleep(500) lease.acknowledge() Thread.sleep(500) lease.release() expectNoMsg(max = 2.seconds) } } } describe ("A server with pool size 1") { implicit val poolSizeRange: PoolSizeRange = 1 to 1 implicit val leaseTimeout: LeaseTimeout = (0.2).seconds it ("should request a new token if the only token is revoked") { withFixture { f => import f._ expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(1)) server ! Lease.Request expectLease(Tokens(1)) expectRevoked(Tokens(1), max = 1.second) expectMsg(Lifecycle.TokenRequest) } } } describe ("A server with pool size 2") { implicit val poolSizeRange: PoolSizeRange = 2 to 2 it ("should refill itself when both of its tokens die") { withFixture { f => import f._ expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(1)) expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(2)) Tokens(1).alive = false Tokens(2).alive = false server ! Lease.Request expectDead(Tokens(2)) expectDead(Tokens(1)) expectMsg(Lifecycle.TokenRequest) server ! Lifecycle.NewToken(Tokens(3)) expectLease(Tokens(3)) expectMsg(Lifecycle.TokenRequest) } } } }
chris-martin/aksync
src/test/scala/ServerSpec.scala
Scala
apache-2.0
9,345
// code-examples/ObjectSystem/linearization/linearization2-script.scala var clist = List[String]() class C1 { clist ::= "C1" } trait T1 extends C1 { clist ::= "T1" } trait T2 extends C1 { clist ::= "T2" } trait T3 extends C1 { clist ::= "T3" } class C2 extends T1 with T2 with T3 { clist ::= "C2" } val c2 = new C2 println(clist.reverse)
XClouded/t4f-core
scala/src/tmp/ObjectSystem/linearization/linearization2-script.scala
Scala
apache-2.0
355
package com.nidkil.downloader.validator import org.scalatest.Matchers import org.scalatest.FunSpec import java.io.File import org.scalatest.Tag class FileSizeValidatorTest extends FunSpec with Matchers { def curDir = new java.io.File(".").getCanonicalPath def testFile = new File(curDir, "LICENSE") describe("A FileSizeValidator") { it("should throw an IllegalArgumentException if the file size is negative", Tag("unit")) { intercept[IllegalArgumentException] { val validator = new FileSizeValidator(-1L) } } it("should throw an IllegalArgumentException if the file is null", Tag("unit")) { intercept[IllegalArgumentException] { val validator = new FileSizeValidator(0L) validator.validate(null) } } it("should throw an IllegalArgumentException if the file does not exist", Tag("unit")) { intercept[IllegalArgumentException] { val validator = new FileSizeValidator(0L) validator.validate(new File("does_no_exist")) } } it("should return true if the file size matches", Tag("unit")) { // File size determined using system properties val validator = new FileSizeValidator(11527L) assert(validator.validate(testFile)) } it("should return false if the file size does not match", Tag("unit")) { val validator = new FileSizeValidator(11500L) assert(validator.validate(testFile) == false) } } }
nidkil/scala-downloader
src/test/scala/com/nidkil/downloader/validator/FileSizeValidatorTest.scala
Scala
apache-2.0
1,448
package geek.lawsof.physics.init import geek.lawsof.physics.lib.util.helpers.Log /** * Created by anshuman on 26-05-2014. */ object ModRecipies { def init() = { Log.info("Adding Recipes") } }
GeckoTheGeek42/TheLawsOfPhysics
src/main/scala/geek/lawsof/physics/init/ModRecipies.scala
Scala
mit
204
package gloving import java.net.URI import java.io.{File, FileInputStream, BufferedInputStream, DataInputStream, InputStream} import java.nio.file.Paths import java.nio.charset.StandardCharsets import java.nio.{ByteBuffer,ByteOrder} import java.util.zip.GZIPInputStream import scala.collection.mutable.ArrayBuffer import org.apache.spark.SparkContext import org.apache.spark.sql.SQLContext import org.apache.spark.sql.SaveMode import org.apache.spark.rdd.RDD import org.apache.spark.storage.StorageLevel import org.apache.spark.util.StatCounter import org.apache.spark.mllib.clustering.{KMeans, KMeansModel} import breeze.linalg.DenseVector import org.slf4j.LoggerFactory import com.typesafe.scalalogging.slf4j.Logger import gloving.WordVectorRDD._ class Word2VecWordVectorLoader(val path: URI) extends WordVectorLoader { val SpaceCharacter = 0x20 val NewLineCharacter = 0x0a val MaxString = 2000 //From word2vec source code val WordsPerChunk = 100000 val ReadBufferSize = 1024 * 1024 //Aggressively buffer the input if it's compressed val BytesPerFloat = 4 @transient lazy val logger = Logger(LoggerFactory.getLogger(getClass.getName)) def load(sc: SparkContext): WordVectorRDD = { val name = new File(path.getPath()).getName val inputStream = openVectorFile(path) val words = readWords(inputStream) //It's not practical to try to read all of the word vectors into memory, at least not on //a Mac laptop with a Java heap limit of a few gigs. So process chunks of words at a time, loading them //into a dataframe and then appending each chunk one at a time. val tempDataFrameFile = File.createTempFile(s"$name-working-dataframe", ".tmp") tempDataFrameFile.delete() val tempDataFrameUri = tempDataFrameFile.toURI() logger.info(s"Loading word2vec vector $path into temporary data frame file $tempDataFrameUri") //First, create an empty data frame on disk sc.parallelize(Seq[WordVector]()).save(tempDataFrameUri, SaveMode.Overwrite) var wordCount: Long = 0 words.grouped(WordsPerChunk).foreach { chunk => val chunkSeq = chunk.toSeq sc.parallelize(chunkSeq).save(tempDataFrameUri, SaveMode.Append) wordCount += chunkSeq.length logger.info(s"Processed $wordCount words so far") } //When loading the temp file must repartition since it will be broken up into too many small partitions due to the way //it was constructed val rdd = WordVectorRDD.load(sc, tempDataFrameUri) .repartition(sc.defaultParallelism * 3) rdd.setName(s"$name-wordvectors") tempDataFrameFile.delete() rdd } def readWords(stream: DataInputStream): Iterator[WordVector] = { //The pre-trained word2vec files are in a very strange format. First is an ASCII string representation of the number //of words in the file, followed by a space, and an ASCII string representation of the number of dimensions in each vector, followed by a newline. //After that, it's an array of records, consisting of the ASCII representation of the word, terminated by a space, followed //by binary representations of each vector dimension's value, stored as a single precision floating point. //It's just nutty. val numWords = readWord(stream).toInt val numDimensions = readWord(stream).toInt logger.info(s"Reading $numWords word vectors, $numDimensions dimensions per vector") val readBuffer = ByteBuffer.allocate(numDimensions * BytesPerFloat) readBuffer.order(ByteOrder.LITTLE_ENDIAN) for (wordIndex <- (0 until numWords).iterator) yield { val word = readWord(stream) val values = new Array[Double](numDimensions) //Read all of the bytes for this vector into the ByteBuffer first, and then and only then //use ByteBuffer to decode the float values. This is needed because the floats are encoded little-endian //but DataInputStream only handles big-endian encoding readBuffer.rewind() stream.read(readBuffer.array(), readBuffer.arrayOffset(), numDimensions * BytesPerFloat) for (dim <- 0 until numDimensions) { val value = readBuffer.getFloat() values(dim) = value.toDouble } WordVector(wordIndex, word, DenseVector[Double](values)) } } def readWord(stream: DataInputStream): String = { var readBuffer: Array[Byte] = new Array(MaxString) var wordCountStr: String = "" var b: Byte = 0x00 var idx: Int = 0 do { b = stream.readByte() readBuffer(idx) = b idx+=1 } while (b != SpaceCharacter && b != NewLineCharacter) val word = new String(readBuffer, 0, idx-1, StandardCharsets.US_ASCII) logger.debug(s"Read word $word") word } private def openVectorFile(path: URI): DataInputStream = { //If the file is gzip-compressed, run it through a GZIPInputStfream first val rawStream = new FileInputStream(path.toString()) if (path.getPath().endsWith(".gz")) { new DataInputStream(new BufferedInputStream(new GZIPInputStream(rawStream))) } else { new DataInputStream(new BufferedInputStream(rawStream, ReadBufferSize)) } } }
anelson/gloving
src/main/scala/Word2VecWordVectorLoader.scala
Scala
apache-2.0
5,145
package me.shengmin.graph import org.scalatest._ class FlowNetworkSpec extends FlatSpec with Matchers{ it should "have maximum network flow value of 3" in { val x = NodeReference("x") val y = NodeReference("y") val a = NodeReference("a") val b = NodeReference("b") val c = NodeReference("c") val d = NodeReference("d") val e = NodeReference("e") val network = new FlowNetwork(x, y) .addEdge(x, a, 3) .addEdge(x, b, 1) .addEdge(a, c, 3) .addEdge(b, c, 5) .addEdge(b, d, 4) .addEdge(c, y, 2) .addEdge(d, e, 2) .addEdge(e, y, 3) network.findMaximumFlow() should be (3) val nodes = network.nodes nodes.size should be (7) nodes(x).edges(nodes(a)).capacity should be (1) nodes(x).edges(nodes(b)).capacity should be (0) nodes(a).edges(nodes(c)).capacity should be (1) nodes(b).edges(nodes(c)).capacity should be (5) nodes(b).edges(nodes(d)).capacity should be (3) nodes(c).edges(nodes(y)).capacity should be (0) nodes(d).edges(nodes(e)).capacity should be (1) nodes(e).edges(nodes(y)).capacity should be (2) } }
shengmin/coding-problem
common/src/test/scala/me/shengmin/graph/FlowNetworkSpec.scala
Scala
mit
1,135
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.cloudml.zen.ml.semiSupervised import java.util.concurrent.ConcurrentLinkedQueue import breeze.linalg._ import breeze.numerics._ import com.github.cloudml.zen.ml.semiSupervised.GLDADefines._ import com.github.cloudml.zen.ml.util.Concurrent._ import com.github.cloudml.zen.ml.util._ import org.apache.hadoop.fs.Path import org.apache.spark._ import org.apache.spark.broadcast.Broadcast import org.apache.spark.rdd.RDD import org.apache.spark.storage.StorageLevel import scala.collection.JavaConverters._ import scala.collection.concurrent.TrieMap import scala.collection.mutable import scala.concurrent.Future class GLDA(@transient var dataBlocks: RDD[(Int, DataBlock)], @transient var paraBlocks: RDD[(Int, ParaBlock)], val numTopics: Int, val numGroups: Int, val numThreads: Int, val numTerms: Int, val numDocs: Long, val numTokens: Long, val params: HyperParams, var algo: GLDATrainer, var storageLevel: StorageLevel) extends Serializable { @transient var globalVarsBc: Broadcast[GlobalVars] = _ @transient var extraVarsBc: Broadcast[ExtraVars] = _ @transient lazy val seed = new XORShiftRandom().nextInt() @transient var dataCpFile: String = _ @transient var paraCpFile: String = _ @inline def scContext: SparkContext = dataBlocks.context @inline def scConf: SparkConf = scContext.getConf def init(): GLDA = { val initParaBlocks = algo.updateParaBlocks(dataBlocks, paraBlocks) initParaBlocks.persist(storageLevel).setName("ParaBlocks-0") initParaBlocks.count() paraBlocks.unpersist(blocking=false) paraBlocks = initParaBlocks this } def fit(totalIter: Int): Unit = { val evalMetrics = scConf.get(cs_evalMetric).split(raw"\\+") val toEval = !evalMetrics.contains("none") val saveIntv = scConf.get(cs_saveInterval).toInt if (toEval) { println("Before Gibbs sampling:") globalVarsBc = scContext.broadcast(algo.collectGlobalVariables(dataBlocks, params, numTerms)) GLDAMetrics(this, evalMetrics).foreach(_.output(println)) globalVarsBc.unpersist(blocking=false) } val burninIter = scConf.get(cs_burninIter).toInt val chkptIntv = scConf.get(cs_chkptInterval).toInt val canChkpt = chkptIntv > 0 && scContext.getCheckpointDir.isDefined for (iter <- 1 to totalIter) { println(s"\\nStart Gibbs sampling (Iteration $iter/$totalIter)") val startedAt = System.nanoTime val needChkpt = canChkpt && iter % chkptIntv == 1 val globalVars = algo.collectGlobalVariables(dataBlocks, params, numTerms) val GlobalVars(piGK, sigGW, nK, dG) = globalVars assert(sum(convert(nK, Long)) == numTokens && sum(dG) == numDocs) globalVarsBc = scContext.broadcast(globalVars) extraVarsBc = scContext.broadcast(calcExtraVars(piGK, sigGW)) fitIteration(iter, burninIter, needChkpt) if (toEval) { GLDAMetrics(this, evalMetrics).foreach(_.output(println)) } globalVarsBc.unpersist(blocking=false) extraVarsBc.unpersist(blocking=false) if (saveIntv > 0 && iter % saveIntv == 0 && iter < totalIter) { val model = toGLDAModel val savPath = new Path(scConf.get(cs_outputpath) + s"-iter$iter") val fs = SparkUtils.getFileSystem(scConf, savPath) fs.delete(savPath, true) model.save(scContext, savPath.toString) println(s"Model saved after Iteration $iter") } val elapsedSeconds = (System.nanoTime - startedAt) / 1e9 println(s"End Gibbs sampling (Iteration $iter/$totalIter) takes total: $elapsedSeconds secs") } } def fitIteration(sampIter: Int, burninIter: Int, needChkpt: Boolean): Unit = { val startedAt = System.nanoTime val shippeds = algo.ShipParaBlocks(dataBlocks, paraBlocks) val newDataBlocks = algo.SampleNGroup(dataBlocks, shippeds, globalVarsBc, extraVarsBc, params, seed, sampIter, burninIter) newDataBlocks.persist(storageLevel).setName(s"DataBlocks-$sampIter") if (needChkpt) { newDataBlocks.checkpoint() newDataBlocks.count() } val newParaBlocks = algo.updateParaBlocks(newDataBlocks, paraBlocks) newParaBlocks.persist(storageLevel).setName(s"ParaBlocks-$sampIter") if (needChkpt) { newParaBlocks.checkpoint() } newParaBlocks.count() dataBlocks.unpersist(blocking=false) paraBlocks.unpersist(blocking=false) dataBlocks = newDataBlocks paraBlocks = newParaBlocks if (needChkpt) { if (dataCpFile != null && paraCpFile != null) { SparkUtils.deleteChkptDirs(scConf, Array(dataCpFile, paraCpFile)) } dataCpFile = newDataBlocks.getCheckpointFile.get paraCpFile = newParaBlocks.getCheckpointFile.get } val elapsedSeconds = (System.nanoTime - startedAt) / 1e9 println(s"Sampling & grouping & updating paras $sampIter takes: $elapsedSeconds secs") } def calcExtraVars(piGK: DenseMatrix[Float], sigGW: DenseMatrix[Float]): ExtraVars = { val lnPiGK = DenseMatrix.zeros[Float](numGroups, numTopics) val lnSigGW = DenseMatrix.zeros[Float](numGroups, numTopics) Range(0, numGroups).par.foreach { g => lnPiGK(g, ::) := log(piGK(g, ::)) lnSigGW(g, ::) := log(sigGW(g, ::)) } ExtraVars(lnPiGK, lnSigGW) } def toGLDAModel: DistributedGLDAModel = { val termTopicsRDD = paraBlocks.mapPartitions(_.flatMap { case (_, ParaBlock(routes, index, attrs)) => val totalSize = attrs.length val results = new Array[Vector[Int]](totalSize) val sizePerthrd = { val npt = totalSize / numThreads if (npt * numThreads == totalSize) npt else npt + 1 } implicit val es = initExecutionContext(numThreads) val allDecomp = Range(0, numThreads).map(thid => withFuture { val decomp = new BVDecompressor(numTopics) val posN = math.min(sizePerthrd * (thid + 1), totalSize) var pos = sizePerthrd * thid while (pos < posN) { results(pos) = decomp.CV2BV(attrs(pos)) pos += 1 } }) withAwaitReadyAndClose(Future.sequence(allDecomp)) index.iterator.map { case (termId, termIdx) => (termId, results(termIdx)) } }, preservesPartitioning=true) termTopicsRDD.persist(storageLevel) new DistributedGLDAModel(termTopicsRDD, numTopics, numGroups, numTerms, params, storageLevel) } } object GLDA { def apply(corpus: (RDD[(Int, DataBlock)], RDD[(Int, ParaBlock)]), numTopics: Int, numGroups: Int, numThreads: Int, params: HyperParams, storageLevel: StorageLevel): GLDA = { val (dataBlocks, paraBlocks) = corpus val numTerms = paraBlocks.map(_._2.index.keySet.max).max() + 1 val activeTerms = paraBlocks.map(_._2.attrs.length).reduce(_ + _) println(s"terms in the corpus: $numTerms, $activeTerms of which are active") val numDocs = dataBlocks.map(_._2.DocRecs.length.toLong).reduce(_ + _) println(s"docs in the corpus: $numDocs") val numTokens = dataBlocks.mapPartitions(_.map { dbp => val docRecs = dbp._2.DocRecs val totalDocSize = docRecs.length val sizePerThrd = { val npt = totalDocSize / numThreads if (npt * numThreads == totalDocSize) npt else npt + 1 } implicit val es = initExecutionContext(numThreads) val allToken = Range(0, numThreads).map(thid => withFuture { var numTokensThrd = 0L val posN = math.min(sizePerThrd * (thid + 1), totalDocSize) var pos = sizePerThrd * thid while (pos < posN) { val docData = docRecs(pos).docData var i = 0 while (i < docData.length) { val ind = docData(i) if (ind >= 0) { numTokensThrd += 1 i += 2 } else { numTokensThrd += -ind i += 2 - ind } } pos += 1 } numTokensThrd }) withAwaitResultAndClose(Future.reduce(allToken)(_ + _)) }).reduce(_ + _) println(s"tokens in the corpus: $numTokens") val algo = new GLDATrainer(numTopics, numGroups, numThreads) val glda = new GLDA(dataBlocks, paraBlocks, numTopics, numGroups, numThreads, numTerms, numDocs, numTokens, params, algo, storageLevel) glda.init() } def initCorpus(rawDocsRDD: RDD[String], numTopics: Int, numGroups: Int, numThreads: Int, labelsRate: Float, storageLevel: StorageLevel): (RDD[(Int, DataBlock)], RDD[(Int, ParaBlock)]) = { val bowDocsRDD = GLDA.parseRawDocs(rawDocsRDD, numGroups, numThreads, labelsRate) initCorpus(bowDocsRDD, numTopics, numThreads, storageLevel) } def initCorpus(bowDocsRDD: RDD[DocBow], numTopics: Int, numThreads: Int, storageLevel: StorageLevel): (RDD[(Int, DataBlock)], RDD[(Int, ParaBlock)]) = { val dataBlocks = GLDA.convertBowDocs(bowDocsRDD, numTopics, numThreads) dataBlocks.persist(storageLevel).setName("DataBlocks-0") val paraBlocks = GLDA.buildParaBlocks(dataBlocks) (dataBlocks, paraBlocks) } def parseRawDocs(rawDocsRDD: RDD[String], numGroups: Int, numThreads: Int, labelsRate: Float): RDD[DocBow] = { rawDocsRDD.mapPartitions { iter => val docs = iter.toArray val totalDocSize = docs.length val docBows = new Array[DocBow](totalDocSize) val sizePerThrd = { val npt = totalDocSize / numThreads if (npt * numThreads == totalDocSize) npt else npt + 1 } implicit val es = initExecutionContext(numThreads) val allParsing = Range(0, numThreads).map(thid => withFuture { val gen = new XORShiftRandom(System.nanoTime * numThreads + thid) val posN = math.min(sizePerThrd * (thid + 1), totalDocSize) var pos = sizePerThrd * thid while (pos < posN) { val line = docs(pos) val fields = line.split(raw"\\t|\\s+").view val docInfo = fields.head.split(":") val docId = docInfo(0).toLong val docGrp = if (docInfo.length > 1 && gen.nextFloat() < labelsRate) { docInfo(1).toInt | 0x10000 } else { gen.nextInt(numGroups) } val docTerms = SparseVector.zeros[Int](Int.MaxValue) fields.tail.foreach { field => val pair = field.split(":") val termId = pair(0).toInt var termCnt = if (pair.length > 1) pair(1).toInt else 1 if (termCnt > 0) { docTerms(termId) += termCnt } } docBows(pos) = DocBow(docId, docGrp, docTerms) pos += 1 } }) withAwaitReadyAndClose(Future.sequence(allParsing)) docBows.iterator } } def convertBowDocs(bowDocsRDD: RDD[DocBow], numTopics: Int, numThreads: Int): RDD[(Int, DataBlock)] = { val numParts = bowDocsRDD.partitions.length bowDocsRDD.mapPartitionsWithIndex { (pid, iter) => val docs = iter.toArray val totalDocSize = docs.length val docRecs = new Array[DocRec](totalDocSize) val termSet = new TrieMap[Int, Null]() val sizePerThrd = { val npt = totalDocSize / numThreads if (npt * numThreads == totalDocSize) npt else npt + 1 } implicit val es = initExecutionContext(numThreads) val allConv = Range(0, numThreads).map(thid => withFuture { val gen = new XORShiftRandom(System.nanoTime * numThreads + thid) val posN = math.min(sizePerThrd * (thid + 1), totalDocSize) var pos = sizePerThrd * thid while (pos < posN) { val DocBow(docId, docGrp, docTerms) = docs(pos) val docData = new mutable.ArrayBuffer[Int]() docTerms.activeIterator.foreach { case (termId, termCnt) => if (termCnt == 1) { docData += termId docData += gen.nextInt(numTopics) } else if (termCnt > 1) { docData += -termCnt docData += termId var c = 0 while (c < termCnt) { docData += gen.nextInt(numTopics) c += 1 } } termSet.putIfAbsent(termId, null) } docRecs(pos) = DocRec(docId, IntWrapper(docGrp), docData.toArray) pos += 1 } }) withAwaitReady(Future.sequence(allConv)) val localTerms = termSet.keys.toArray val numLocalTerms = localTerms.length val l2g = new Array[Int](numLocalTerms) val g2l = new mutable.HashMap[Int, Int]() val tqs = Array.fill(numLocalTerms)(new ConcurrentLinkedQueue[(Int, Int)]()) for ((termId, localIdx) <- localTerms.iterator.zipWithIndex) { l2g(localIdx) = termId g2l(termId) = localIdx } val allInvIdx = Range(0, numThreads).map(thid => withFuture { val posN = math.min(sizePerThrd * (thid + 1), totalDocSize) var pos = sizePerThrd * thid while (pos < posN) { val docData = docRecs(pos).docData var i = 0 while (i < docData.length) { val ind = docData(i) if (ind >= 0) { val termIdx = g2l(ind) docData(i) = termIdx tqs(termIdx).add((pos, i)) i += 2 } else { val termIdx = g2l(docData(i + 1)) docData(i + 1) = termIdx tqs(termIdx).add((pos, i)) i += 2 - ind } } pos += 1 } }) withAwaitReadyAndClose(Future.sequence(allInvIdx)) val termRecs = new Array[TermRec](numLocalTerms) Range(0, numLocalTerms).par.foreach { li => termRecs(li) = TermRec(l2g(li), tqs(li).asScala.flatMap(t => Iterator(t._1, t._2)).toArray) } Iterator.single((pid, DataBlock(termRecs, docRecs))) }.partitionBy(new HashPartitioner(numParts)) } def buildParaBlocks(dataBlocks: RDD[(Int, DataBlock)]): RDD[(Int, ParaBlock)] = { val numParts = dataBlocks.partitions.length val routesRdd = dataBlocks.mapPartitions(_.flatMap { case (pid, db) => db.termRecs.iterator.map(tr => (tr.termId, pid)) }).partitionBy(new HashPartitioner(numParts)) routesRdd.mapPartitionsWithIndex((pid, iter) => { val routes = Array.fill(numParts)(new mutable.ArrayBuffer[Int]()) var cnt = 0 val index = new mutable.HashMap[Int, Int]() iter.foreach { case (termId, termPid) => routes(termPid) += termId index.getOrElseUpdate(termId, { cnt += 1 cnt - 1 }) } val attrs = new Array[CompressedVector](cnt) Iterator.single((pid, ParaBlock(routes.map(_.toArray), index, attrs))) }, preservesPartitioning=true) } }
bhoppi/zen
ml/src/main/scala/com/github/cloudml/zen/ml/semiSupervised/GLDA.scala
Scala
apache-2.0
15,488
package scala.models import io.apibuilder.generator.v0.models.InvocationForm import org.scalatest.funspec.AnyFunSpec import org.scalatest.matchers.should.Matchers class ExampleUnionTypesWithDiscriminatorSpec extends AnyFunSpec with Matchers { private lazy val service = models.TestHelper.parseFile(s"/examples/apidoc-example-union-types-discriminator.json") it("generates expected code for play 2.4 client") { Play24ClientGenerator.invoke(InvocationForm(service = service)) match { case Left(errors) => fail(errors.mkString(", ")) case Right(sourceFiles) => { sourceFiles.size shouldBe 1 models.TestHelper.assertEqualsFile("/union-types-discriminator-service-play-24.txt", sourceFiles.head.contents) } } } it("generates expected code for play 2.7 client") { Play27ClientGenerator.invoke(InvocationForm(service = service)) match { case Left(errors) => fail(errors.mkString(", ")) case Right(sourceFiles) => { sourceFiles.size shouldBe 1 models.TestHelper.assertEqualsFile("/union-types-discriminator-service-play-27.txt", sourceFiles.head.contents) } } } }
gheine/apidoc-generator
scala-generator/src/test/scala/models/ExampleUnionTypesWithDiscriminatorSpec.scala
Scala
mit
1,156
package org.jetbrains.plugins.scala package lang package resolve package processor import com.intellij.openapi.progress.ProgressManager import com.intellij.openapi.util.Key import com.intellij.psi._ import com.intellij.psi.scope._ import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.api._ import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeProjection import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAlias import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages.ImportUsed import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition} import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.{ScSyntheticFunction, SyntheticClasses} import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.TypeDefinitionMembers import org.jetbrains.plugins.scala.lang.psi.types._ import org.jetbrains.plugins.scala.lang.psi.types.api._ import org.jetbrains.plugins.scala.lang.psi.types.api.designator.{ScDesignatorType, ScProjectionType, ScThisType} import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypeResult, TypingContext} import org.jetbrains.plugins.scala.lang.resolve.processor.PrecedenceHelper.PrecedenceTypes import org.jetbrains.plugins.scala.project.ProjectContext import scala.collection.{Set, mutable} object BaseProcessor { def unapply(p: BaseProcessor) = Some(p.kinds) val boundClassKey: Key[PsiClass] = Key.create("bound.class.key") val FROM_TYPE_KEY: Key[ScType] = Key.create("from.type.key") val UNRESOLVED_TYPE_PARAMETERS_KEY: Key[Seq[TypeParameter]] = Key.create("unresolved.type.parameters.key") val COMPOUND_TYPE_THIS_TYPE_KEY: Key[Option[ScType]] = Key.create("compound.type.this.type.key") val FORWARD_REFERENCE_KEY: Key[java.lang.Boolean] = Key.create("forward.reference.key") def isImplicitProcessor(processor: PsiScopeProcessor): Boolean = { processor match { case b: BaseProcessor => b.isImplicitProcessor case _ => false } } } abstract class BaseProcessor(val kinds: Set[ResolveTargets.Value]) (implicit val projectContext: ProjectContext) extends PsiScopeProcessor { protected val candidatesSet: mutable.HashSet[ScalaResolveResult] = new mutable.HashSet[ScalaResolveResult] def isImplicitProcessor: Boolean = false def changedLevel: Boolean = true private var knownPriority: Option[Int] = None def definePriority(p: Int)(body: => Unit) { val oldPriority = knownPriority knownPriority = Some(p) try { body } finally { knownPriority = oldPriority } } def isPredefPriority: Boolean = knownPriority.contains(PrecedenceTypes.SCALA_PREDEF) def specialPriority: Option[Int] = knownPriority protected var accessibility = true def doNotCheckAccessibility() {accessibility = false} def rrcandidates: Array[ResolveResult] = { val set = candidatesS val size = set.size val res = JavaArrayFactoryUtil.ResolveResultFactory.create(size) if (size == 0) return res val iter = set.iterator var count = 0 while (iter.hasNext) { val next = iter.next() res(count) = next count += 1 } res } def candidates: Array[ScalaResolveResult] = { val set = candidatesS val size = set.size val res = JavaArrayFactoryUtil.ScalaResolveResultFactory.create(size) if (size == 0) return res val iter = set.iterator var count = 0 while (iter.hasNext) { val next = iter.next() res(count) = next count += 1 } res } def candidatesS: Set[ScalaResolveResult] = candidatesSet //todo: fix this ugly performance improvement private var classKind = true def setClassKind(classKind: Boolean) { this.classKind = classKind } def getClassKind: Boolean = { classKind && getClassKindInner } def getClassKindInner: Boolean = { (kinds contains ResolveTargets.CLASS) || (kinds contains ResolveTargets.OBJECT) || (kinds contains ResolveTargets.METHOD) } //java compatibility object MyElementClassHint extends ElementClassHint { import com.intellij.psi.scope.ElementClassHint.DeclarationKind def shouldProcess(kind: DeclarationKind): Boolean = { kind match { case null => true case DeclarationKind.PACKAGE => kinds contains ResolveTargets.PACKAGE case DeclarationKind.CLASS if classKind => (kinds contains ResolveTargets.CLASS) || (kinds contains ResolveTargets.OBJECT) || (kinds contains ResolveTargets.METHOD) //case classes get 'apply' generated case DeclarationKind.VARIABLE => (kinds contains ResolveTargets.VAR) || (kinds contains ResolveTargets.VAL) case DeclarationKind.FIELD => (kinds contains ResolveTargets.VAR) || (kinds contains ResolveTargets.VAL) case DeclarationKind.METHOD => kinds contains ResolveTargets.METHOD case _ => false } } } def getHint[T](hintKey: Key[T]): T = { hintKey match { case ElementClassHint.KEY => MyElementClassHint.asInstanceOf[T] case _ => null.asInstanceOf[T] } } def handleEvent(event: PsiScopeProcessor.Event, associated: Object) {} protected def kindMatches(element: PsiElement): Boolean = ResolveUtils.kindMatches(element, kinds) def processType(t: ScType, place: PsiElement, state: ResolveState = ResolveState.initial(), updateWithProjectionSubst: Boolean = true, //todo ugly recursion breakers, maybe we need general for type? What about performance? visitedProjections: Set[PsiNamedElement] = Set.empty, visitedTypeParameter: Set[TypeParameterType] = Set.empty): Boolean = { ProgressManager.checkCanceled() t match { case ScDesignatorType(clazz: PsiClass) if clazz.qualifiedName == "java.lang.String" => val plusMethod: ScType => ScSyntheticFunction = SyntheticClasses.get(place.getProject).stringPlusMethod if (plusMethod != null) execute(plusMethod(t), state) //add + method case _ => } t match { case ScThisType(clazz) => if (clazz.selfType.isEmpty) { processElement(clazz, ScSubstitutor.empty, place, state, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) } else { val selfType = clazz.selfType.get val clazzType: ScType = clazz.getTypeWithProjections(TypingContext.empty).getOrElse(return true) if (selfType == ScThisType(clazz)) { //to prevent SOE, let's process Element processElement(clazz, ScSubstitutor.empty, place, state, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) } else if (selfType.conforms(clazzType)) { processType(selfType, place, state.put(BaseProcessor.COMPOUND_TYPE_THIS_TYPE_KEY, Some(t)). put(ScSubstitutor.key, ScSubstitutor(ScThisType(clazz))), visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) } else if (clazzType.conforms(selfType)) { processElement(clazz, ScSubstitutor.empty, place, state, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) } else { processType(clazz.selfType.map(_.glb(clazzType)).get, place, state.put(BaseProcessor.COMPOUND_TYPE_THIS_TYPE_KEY, Some(t)), visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) } } case d@ScDesignatorType(e: PsiClass) if d.asInstanceOf[ScDesignatorType].isStatic && !e.isInstanceOf[ScTemplateDefinition] => //not scala from scala var break = true for (method <- e.getMethods if break && method.hasModifierProperty("static")) { if (!execute(method, state)) break = false } for (cl <- e.getInnerClasses if break && cl.hasModifierProperty("static")) { if (!execute(cl, state)) break = false } for (field <- e.getFields if break && field.hasModifierProperty("static")) { if (!execute(field, state)) break = false } if (!break) return false TypeDefinitionMembers.processEnum(e, execute(_, state)) case ScDesignatorType(o: ScObject) => processElement(o, ScSubstitutor.empty, place, state, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case ScDesignatorType(e: ScTypedDefinition) if place.isInstanceOf[ScTypeProjection] => val result: TypeResult[ScType] = e match { case p: ScParameter => p.getRealParameterType(TypingContext.empty) case _ => e.getType(TypingContext.empty) } result match { case Success(tp, _) => processType(tp, place, state, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case _ => true } case ScDesignatorType(e) => processElement(e, ScSubstitutor.empty, place, state, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case TypeParameterType(Nil, _, upper, _) => processType(upper, place, state, updateWithProjectionSubst = false, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case j: JavaArrayType => implicit val elementScope = place.elementScope processType(j.getParameterizedType.getOrElse(return true), place, state, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case p@ParameterizedType(designator, typeArgs) => designator match { case tpt@TypeParameterType(_, _, upper, _) => if (visitedTypeParameter.contains(tpt)) return true processType(p.substitutor.subst(ParameterizedType(upper, typeArgs)), place, state.put(ScSubstitutor.key, ScSubstitutor(p)), visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter + tpt) case _ => p.extractDesignatedType(expandAliases = false) match { case Some((designator, subst)) => processElement(designator, subst, place, state, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case None => true } } case proj@ScProjectionType(_, _, _) if proj.actualElement.isInstanceOf[ScTypeAlias] => val ta = proj.actualElement.asInstanceOf[ScTypeAlias] val subst = proj.actualSubst val upper = ta.upperBound.getOrElse(return true) processType(subst.subst(upper), place, state.put(ScSubstitutor.key, ScSubstitutor.empty), visitedProjections = visitedProjections + ta, visitedTypeParameter = visitedTypeParameter) case proj@ScProjectionType(_, _, _) => val s: ScSubstitutor = if (updateWithProjectionSubst) ScSubstitutor(proj) followed proj.actualSubst else proj.actualSubst val actualElement = proj.actualElement processElement(actualElement, s, place, state, visitedProjections = visitedProjections + actualElement, visitedTypeParameter = visitedTypeParameter) case StdType(name, tSuper) => SyntheticClasses.get(place.getProject).byName(name) match { case Some(c) => if (!c.processDeclarations(this, state, null, place) || !(tSuper match { case Some(ts) => processType(ts, place, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case _ => true })) return false case None => //nothing to do } val scope = place.resolveScope val obj: PsiClass = ScalaPsiManager.instance(place.getProject).getCachedClass(scope, "java.lang.Object").orNull if (obj != null) { val namesSet = Set("hashCode", "toString", "equals", "getClass") val methods = obj.getMethods.iterator while (methods.hasNext) { val method = methods.next() if (name == "AnyRef" || namesSet.contains(method.name)) { if (!execute(method, state)) return false } } } true case comp@ScCompoundType(_, _, _) => TypeDefinitionMembers.processDeclarations(comp, this, state, null, place) case ex: ScExistentialType => processType(ex.quantified, place, state.put(ScSubstitutor.key, ScSubstitutor.empty), visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case ScExistentialArgument(_, _, _, upper) => processType(upper, place, state, updateWithProjectionSubst, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case _ => true } } private def processElement(e: PsiNamedElement, s: ScSubstitutor, place: PsiElement, state: ResolveState, visitedProjections: Set[PsiNamedElement], visitedTypeParameter: Set[TypeParameterType]): Boolean = { val subst = state.get(ScSubstitutor.key) val compound = state.get(BaseProcessor.COMPOUND_TYPE_THIS_TYPE_KEY) //todo: looks like ugly workaround val newSubst = compound match { case Some(_) => subst case _ => if (subst != null) subst followed s else s } e match { case ta: ScTypeAlias => if (visitedProjections.contains(ta)) return true processType(s.subst(ta.upperBound.getOrAny), place, state.put(ScSubstitutor.key, ScSubstitutor.empty), visitedProjections = visitedProjections + ta, visitedTypeParameter = visitedTypeParameter) //need to process scala way case clazz: PsiClass => TypeDefinitionMembers.processDeclarations(clazz, BaseProcessor.this, state.put(ScSubstitutor.key, newSubst), null, place) case des: ScTypedDefinition => val typeResult: TypeResult[ScType] = des match { case p: ScParameter => p.getRealParameterType(TypingContext.empty) case _ => des.getType(TypingContext.empty) } typeResult match { case Success(tp, _) => processType(newSubst subst tp, place, state.put(ScSubstitutor.key, ScSubstitutor.empty), updateWithProjectionSubst = false, visitedProjections = visitedProjections, visitedTypeParameter = visitedTypeParameter) case _ => true } case pack: ScPackage => pack.processDeclarations(BaseProcessor.this, state.put(ScSubstitutor.key, newSubst), null, place) case des => des.processDeclarations(BaseProcessor.this, state.put(ScSubstitutor.key, newSubst), null, place) } } protected def getSubst(state: ResolveState): ScSubstitutor = { val subst: ScSubstitutor = state.get(ScSubstitutor.key) if (subst == null) ScSubstitutor.empty else subst } protected def getImports(state: ResolveState): Set[ImportUsed] = { val used = state.get(ImportUsed.key) if (used == null) Set[ImportUsed]() else used } protected def getBoundClass(state: ResolveState): PsiClass = { state.get(BaseProcessor.boundClassKey) } protected def getFromType(state: ResolveState): Option[ScType] = { state.get(BaseProcessor.FROM_TYPE_KEY).toOption } protected def isForwardReference(state: ResolveState): Boolean = { val res: java.lang.Boolean = state.get(BaseProcessor.FORWARD_REFERENCE_KEY) if (res != null) res else false } }
loskutov/intellij-scala
src/org/jetbrains/plugins/scala/lang/resolve/processor/BaseProcessor.scala
Scala
apache-2.0
15,799
/* * Copyright 2014–2018 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.metastore import slamdata.Predef._ import java.sql.SQLException /** Thrown when doobie applies a custom mapping to a value which should never * appear in the metastore DB. */ final class UnexpectedValueException(msg: String) extends SQLException
jedesah/Quasar
core/src/main/scala/quasar/metastore/UnexpectedValueException.scala
Scala
apache-2.0
871
/* * Copyright 2009 Twitter, Inc. * Copyright 2009 Robey Pointer <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.lag.kestrel import java.io.FileOutputStream import scala.collection.mutable import com.twitter.logging.Logger /** * Pack one or more journal files into a single new file that only consists of the queue's current * contents, as of the end of the last journal file processed. */ class JournalPacker(filenames: Seq[String], newFilename: String) { private val log = Logger.get val journals = filenames.map { filename => new Journal(filename, false) } val remover = journals.map { _.walk() }.iterator.flatten val adder = journals.map { _.walk() }.iterator.flatten val writer = new FileOutputStream(newFilename, false).getChannel val adderStack = new mutable.ListBuffer[QItem] val openTransactions = new mutable.HashMap[Int, QItem] var currentXid = 0 var offset = 0L var adderOffset = 0L var lastUpdate = 0L var lastAdderUpdate = 0L private var statusCallback: ((Long, Long) => Unit) = (_, _) => () private def advanceAdder(): Option[QItem] = { if (!adderStack.isEmpty) { Some(adderStack.remove(0)) } else { if (!adder.hasNext) { None } else { val (item, itemsize) = adder.next() adderOffset += itemsize if (adderOffset - lastAdderUpdate > 1024 * 1024) { statusCallback(offset, adderOffset) lastAdderUpdate = adderOffset } item match { case JournalItem.Add(qitem) => Some(qitem) case _ => advanceAdder() } } } } def apply(statusCallback: (Long, Long) => Unit) = { this.statusCallback = statusCallback for ((item, itemsize) <- remover) { item match { case JournalItem.Add(qitem) => case JournalItem.Continue(qitem, xid) => openTransactions -= xid case JournalItem.Remove => advanceAdder().get case JournalItem.RemoveTentative => do { currentXid += 1 } while (openTransactions contains currentXid) val qitem = advanceAdder().get qitem.xid = currentXid openTransactions(currentXid) = qitem case JournalItem.SavedXid(xid) => currentXid = xid case JournalItem.Unremove(xid) => adderStack prepend openTransactions.remove(xid).get case JournalItem.ConfirmRemove(xid) => openTransactions -= xid } offset += itemsize if (offset - lastUpdate > 1024 * 1024) { statusCallback(offset, adderOffset) lastUpdate = offset } } // now write the new journal. statusCallback(0, 0) val remaining = new Iterable[QItem] { def iterator = new tools.PythonIterator[QItem] { def apply() = advanceAdder() } } val out = new Journal(newFilename, false) out.open() out.dump(currentXid, openTransactions.values.toList, remaining) out.close() out } }
kmiku7/gizzard
src/main/scala/net/lag/kestrel/JournalPacker.scala
Scala
apache-2.0
3,539
/* * Copyright © 2016 - 2019 Schlichtherle IT Services * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package global.namespace.neuron.di.guice.java import com.google.inject._ import com.google.inject.binder.{AnnotatedBindingBuilder, AnnotatedConstantBindingBuilder, ConstantBindingBuilder, ScopedBindingBuilder} import com.google.inject.name.Names.named import global.namespace.neuron.di.guice.java.BinderLikeSpec._ import global.namespace.neuron.di.java.Neuron import org.mockito.ArgumentCaptor import org.mockito.ArgumentMatchers._ import org.mockito.Mockito._ import org.scalatest.matchers.should.Matchers._ import org.scalatest.wordspec.AnyWordSpec import org.scalatestplus.mockito.MockitoSugar.mock import scala.reflect._ class BinderLikeSpec extends AnyWordSpec { val binderLike: BinderLike = new BinderLike { val binder: Binder = mock[Binder] when(binder skipSources any[Class[_]]) thenReturn binder } import binderLike.binder "A BinderLike" should afterWord("bind a") { "named constant" in { val builder1 = mock[AnnotatedConstantBindingBuilder] val builder2 = mock[ConstantBindingBuilder] when(binder.bindConstant) thenReturn builder1 when(builder1 annotatedWith named("foo")) thenReturn builder2 binderLike bindConstantNamed "foo" should be theSameInstanceAs builder2 } "neuron interface using a class" in { testBindNeuronUsingClass[NeuronInterface] } "neuron interface using a type literal" in { testBindNeuronUsingTypeLiteral[NeuronInterface] } "neuron interface using a key" in { testBindNeuronUsingKey[NeuronInterface] } "neuron class using a class" in { testBindNeuronUsingClass[NeuronClass] } "neuron class using a type literal" in { testBindNeuronUsingTypeLiteral[NeuronClass] } "neuron clazz using a key" in { testBindNeuronUsingKey[NeuronClass] } "neuron classes and interfaces using classes" in { testBindNeuronsUsingClasses(classOf[NeuronInterface], classOf[NeuronClass]) } "neuron classes and interfaces using type literals" in { testBindNeuronsUsingTypeLiterals(classOf[NeuronInterface], classOf[NeuronClass]) } "neuron classes and interfaces using keys" in { testBindNeuronsUsingKeys(classOf[NeuronInterface], classOf[NeuronClass]) } } private def testBindNeuronUsingClass[A](implicit classTag: ClassTag[A]): Unit = { testBindNeuron[A] { binderLike bindNeuron runtimeClassOf[A] } } private def testBindNeuronUsingTypeLiteral[A](implicit classTag: ClassTag[A]): Unit = { testBindNeuron[A] { binderLike bindNeuron (TypeLiteral get runtimeClassOf[A]) } } private def testBindNeuronUsingKey[A](implicit classTag: ClassTag[A]): Unit = { testBindNeuron[A] { binderLike bindNeuron (Key get runtimeClassOf[A]) } } private def testBindNeuron[A: ClassTag](bindingCall: => ScopedBindingBuilder): Unit = { val injectorProvider = mock[Provider[Injector]] val injector = mock[Injector] when(binder getProvider classOf[Injector]) thenReturn injectorProvider when(injectorProvider.get) thenReturn injector val clazz = runtimeClassOf[A] val typeLiteral = TypeLiteral get clazz val key = Key get clazz val builder1 = mock[AnnotatedBindingBuilder[A]] val builder2 = mock[ScopedBindingBuilder] val membersInjector = mock[MembersInjector[A]] when(binder bind clazz) thenReturn builder1 when(binder bind typeLiteral) thenReturn builder1 when(binder bind key) thenReturn builder1 when(builder1 toProvider any[Provider[A]]) thenReturn builder2 when(binder getMembersInjector typeLiteral) thenReturn membersInjector bindingCall should be theSameInstanceAs builder2 val neuronProviderCaptor = ArgumentCaptor forClass classOf[NeuronProvider[A]] verify(builder1) toProvider neuronProviderCaptor.capture val neuronProvider = neuronProviderCaptor.getValue neuronProvider.injector should be theSameInstanceAs injector if (clazz.isInterface) { neuronProvider.membersInjector should not be theSameInstanceAs(membersInjector) neuronProvider.membersInjector should not be null } else { neuronProvider.membersInjector should be theSameInstanceAs membersInjector } neuronProvider.typeLiteral shouldBe typeLiteral } private def testBindNeuronsUsingClasses(classes: Class[_]*): Unit = { testBindNeurons(classes: _*) { binderLike.bindNeurons(classes.head, classes.tail: _*) } } private def testBindNeuronsUsingTypeLiterals(classes: Class[_]*): Unit = { testBindNeurons(classes: _*) { binderLike.bindNeurons(TypeLiteral get classes.head, classes.tail.map(TypeLiteral get _): _*) } } private def testBindNeuronsUsingKeys(classes: Class[_]*): Unit = { testBindNeurons(classes: _*) { binderLike.bindNeurons(Key get classes.head, classes.tail.map(Key get _): _*) } } private def testBindNeurons(classes: Class[_]*)(bindingCall: => Unit): Unit = { val injectorProvider = mock[Provider[Injector]] val injector = mock[Injector] when(binder getProvider classOf[Injector]) thenReturn injectorProvider when(injectorProvider.get) thenReturn injector def wiring[A](clazz: Class[A]) = { val typeLiteral = TypeLiteral get clazz val key = Key get clazz val builder = mock[AnnotatedBindingBuilder[A]] val membersInjector = mock[MembersInjector[A]] when(binder bind clazz) thenReturn builder when(binder bind typeLiteral) thenReturn builder when(binder bind key) thenReturn builder when(binder getMembersInjector typeLiteral) thenReturn membersInjector (clazz, builder, membersInjector) } def verification[A](clazz: Class[A], builder: AnnotatedBindingBuilder[A], membersInjector: MembersInjector[A]): Unit = { val typeLiteral = TypeLiteral get clazz val neuronProviderCaptor = ArgumentCaptor forClass classOf[NeuronProvider[A]] verify(builder) toProvider neuronProviderCaptor.capture val neuronProvider = neuronProviderCaptor.getValue neuronProvider.injector should be theSameInstanceAs injector if (clazz.isInterface) { neuronProvider.membersInjector should not be theSameInstanceAs(membersInjector) neuronProvider.membersInjector should not be null } else { neuronProvider.membersInjector should be theSameInstanceAs membersInjector } neuronProvider.typeLiteral shouldBe typeLiteral } val wirings = classes map (wiring(_)) bindingCall wirings foreach { case (clazz, builder, membersInjector) => verification(clazz, builder, membersInjector) } } } private object BinderLikeSpec { @Neuron trait NeuronInterface @Neuron abstract class NeuronClass def runtimeClassOf[A](implicit tag: ClassTag[A]): Class[A] = { require(tag != classTag[Nothing], "Missing type parameter.") tag.runtimeClass.asInstanceOf[Class[A]] } }
christian-schlichtherle/neuron-di
guice/src/test/scala/global/namespace/neuron/di/guice/java/BinderLikeSpec.scala
Scala
apache-2.0
7,551
package gui import java.awt.{BorderLayout, Insets} import java.io.{BufferedReader, InputStreamReader} import javax.swing._ import connections.usb.Adb class Console extends JFrame { private val consoleTextField: JTextArea = new JTextArea() { this.setTitle("Output") this.setBounds(100, 100, 400, 200) consoleTextField.setEditable(false) consoleTextField.setMargin(new Insets(10, 10, 10, 10)) consoleTextField.setAlignmentX(0) this.getContentPane.add(consoleTextField, BorderLayout.CENTER) consoleTextField.setCaretPosition(0) val scroll: JScrollPane = new JScrollPane(consoleTextField, ScrollPaneConstants.VERTICAL_SCROLLBAR_ALWAYS, ScrollPaneConstants.HORIZONTAL_SCROLLBAR_NEVER) this.getContentPane.add(scroll) } def append(text: String): Unit = { if (consoleTextField.getText == "") consoleTextField.append(text) else consoleTextField.append("\\n" + text) } def showConsole(): Unit = this.setVisible(true) def runProcess(pr: Process): Unit = { var consoleOut: String = null var stdInput: BufferedReader = null var stdError: BufferedReader = null if (pr != null) { stdInput = new BufferedReader(new InputStreamReader(pr.getInputStream)) stdError = new BufferedReader(new InputStreamReader(pr.getErrorStream)) while ( { consoleOut = stdInput.readLine() consoleOut != null }) { this.append(consoleOut) } var errorOut: String = null while ( { errorOut = stdError.readLine() errorOut != null }) { this.append(errorOut) } } showConsole() } def runAdbProcess(pr: Process): Unit = { var deviceAvailable: Boolean = false var consoleOut: String = null var stdInput: BufferedReader = null var stdError: BufferedReader = null if (Adb.isAdbFilePresent && pr != null) { stdInput = new BufferedReader(new InputStreamReader(pr.getInputStream)) stdError = new BufferedReader(new InputStreamReader(pr.getErrorStream)) while ( { consoleOut = stdInput.readLine() consoleOut != null }) { if (consoleOut.contains(" device")) { deviceAvailable = true } this.append(consoleOut) } var errorOut: String = null while ( { errorOut = stdError.readLine() errorOut != null }) { this.append(errorOut) } } else { this.append("Error: ADB is not installed") } showConsole() } }
LorenK96/slide-desktop
src/main/scala/gui/Console.scala
Scala
gpl-2.0
2,909
package com.thangiee.lolhangouts.ui.utils.thirdpartylibsugar import android.view.View import com.afollestad.materialdialogs.MaterialDialog import com.afollestad.materialdialogs.MaterialDialog.{ButtonCallback, InputCallback, ListCallbackMultiChoice, ListCallbackSingleChoice} import scala.language.implicitConversions trait MaterialDialogSugar { type MultiChoice = (MaterialDialog, Array[Int], Array[CharSequence]) => Boolean type SingleChoice = (MaterialDialog, View, Int, CharSequence) => Boolean implicit class MaterialDialogSugar(builder: MaterialDialog.Builder) { private var positiveListener: Option[MaterialDialog => Unit] = None private var negativeListener: Option[MaterialDialog => Unit] = None private var neutralListener: Option[MaterialDialog => Unit] = None private var multiChoice: Option[MultiChoice] = None private var singleChoice: Option[SingleChoice] = None builder.callback(new ButtonCallback { override def onPositive(dialog: MaterialDialog): Unit = positiveListener.foreach(l => l(dialog)) override def onNegative(dialog: MaterialDialog): Unit = negativeListener.foreach(l => l(dialog)) override def onNeutral(dialog: MaterialDialog): Unit = negativeListener.foreach(l => l(dialog)) }) def onSingleChoice(f: SingleChoice): MaterialDialog.Builder = { singleChoice = Some(f) builder.itemsCallbackSingleChoice(0, f) } def onMultiChoice(f: MultiChoice): MaterialDialog.Builder = { multiChoice = Some(f) builder.itemsCallbackMultiChoice(null, f) } def onPositive(f: MaterialDialog => Unit): MaterialDialog.Builder = { positiveListener = Some(f) builder } def onNegative(f: MaterialDialog => Unit): MaterialDialog.Builder = { negativeListener = Some(f) builder } def onNeutral(f: MaterialDialog => Unit): MaterialDialog.Builder = { neutralListener = Some(f) builder } } implicit def func2InputCallback(f: (MaterialDialog, CharSequence) => Unit): InputCallback = (dialog: MaterialDialog, input: CharSequence) => f(dialog, input) implicit def func2ListCallbackMultiChoice(f: MultiChoice): ListCallbackMultiChoice = (dialog: MaterialDialog, which: Array[Integer], text: Array[CharSequence]) => f(dialog, which.map(_.toInt), text) implicit def func2ListCallbackSingleChoice(f: SingleChoice): ListCallbackSingleChoice = (dialog: MaterialDialog, view: View, i: Int, selection: CharSequence) => f(dialog, view, i, selection) } object MaterialDialogSugar extends MaterialDialogSugar
Thangiee/LoL-Hangouts
src/com/thangiee/lolhangouts/ui/utils/thirdpartylibsugar/MaterialDialogSugar.scala
Scala
apache-2.0
2,577
package org.http4s package servlet import java.util.EnumSet import javax.servlet.{DispatcherType, Filter} import javax.servlet.http.HttpServlet import org.http4s.server.{AsyncTimeoutSupport, ServerBuilder} trait ServletContainer extends ServerBuilder with AsyncTimeoutSupport { type Self <: ServletContainer /** * Mounts a servlet to the server. * * The http4s way is to create an [[HttpService]], which runs not just on servlet containers, * but all supported backends. This method is good for legacy scenarios, or for reusing parts * of the servlet ecosystem for an app that is committed to running on a servlet container. */ def mountServlet(servlet: HttpServlet, urlMapping: String, name: Option[String] = None): Self /** * Mounts a filter to the server. * * The http4s way is to create a middleware around an [[HttpService]], which runs not just on * servlet containers, but all supported backends. This method is good for legacy scenarios, * or for reusing parts of the servlet ecosystem for an app that is committed to running on * a servlet container. */ def mountFilter(filter: Filter, urlMapping: String, name: Option[String] = None, dispatches: EnumSet[DispatcherType] = EnumSet.of(DispatcherType.REQUEST, DispatcherType.FORWARD, DispatcherType.INCLUDE, DispatcherType.ASYNC)): Self /** * Sets the servlet I/O mode for reads and writes within the servlet. * Not to be confused with the server connectors. * * @see [[org.http4s.servlet.ServletIo]] */ def withServletIo(servletIo: ServletIo): Self } object ServletContainer { val DefaultServletIo = NonBlockingServletIo(DefaultChunkSize) /** * Trims an optional trailing slash and then appends "/\\u002b'. Translates an argument to * mountService into a standard servlet prefix mapping. */ def prefixMapping(prefix: String) = prefix.replaceAll("/?$", "") + "/*" }
ZizhengTai/http4s
servlet/src/main/scala/org/http4s/servlet/ServletContainer.scala
Scala
apache-2.0
1,989
import reflect.runtime.universe class C { private val yyy: Any = 1 @inline def foo = yyy } object Test extends dotty.runtime.LegacyApp { import universe._ val access = typeOf[C].decls .toList .filter(_.name.toString.endsWith("yyy")) .map(x => (x.name, x.isPrivate)) println(access.head) }
yusuke2255/dotty
tests/pending/run/t6608.scala
Scala
bsd-3-clause
314
package com.raquo.domtypes.jsdom.defs.events import org.scalajs.dom import scala.scalajs.js @js.native trait TypedTargetPointerEvent[+T <: dom.EventTarget] extends dom.PointerEvent with TypedTargetEvent[T]
raquo/scala-dom-types
js/src/main/scala/com/raquo/domtypes/jsdom/defs/events/TypedTargetPointerEvent.scala
Scala
mit
213