code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
*************************************************************************************
* Copyright 2015 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.web.rest.node
import com.normation.inventory.domain.NodeId
import net.liftweb.common.Box
import net.liftweb.common.Loggable
import net.liftweb.http.LiftResponse
import net.liftweb.http.Req
import net.liftweb.http.rest.RestHelper
import com.normation.rudder.web.rest.RestExtractorService
import com.normation.rudder.web.rest.RestUtils._
import net.liftweb.common._
import net.liftweb.json.JsonDSL._
import com.normation.inventory.domain._
import com.normation.rudder.web.rest.ApiVersion
case class NodeAPI6 (
apiV5 : NodeAPI5
, serviceV6 : NodeApiService6
, restExtractor : RestExtractorService
) extends NodeAPI with Loggable{
def v6Dispatch(version : ApiVersion) : PartialFunction[Req, () => Box[LiftResponse]] = {
case Get(Nil, req) =>
implicit val prettify = restExtractor.extractPrettify(req.params)
restExtractor.extractNodeDetailLevel(req.params) match {
case Full(level) =>
restExtractor.extractQuery(req.params) match {
case Full(None) =>
serviceV6.listNodes(AcceptedInventory, level, None, version)
case Full(Some(query)) =>
serviceV6.queryNodes(query,AcceptedInventory, level, version)
case eb:EmptyBox =>
val failMsg = eb ?~ "Node query not correctly sent"
toJsonError(None, failMsg.msg)("listAcceptedNodes",prettify)
}
case eb:EmptyBox =>
val failMsg = eb ?~ "Node detail level not correctly sent"
toJsonError(None, failMsg.msg)("listAcceptedNodes",prettify)
}
case Get("pending" :: Nil, req) =>
implicit val prettify = restExtractor.extractPrettify(req.params)
restExtractor.extractNodeDetailLevel(req.params) match {
case Full(level) => serviceV6.listNodes(PendingInventory, level, None, version)
case eb:EmptyBox =>
val failMsg = eb ?~ "node detail level not correctly sent"
toJsonError(None, failMsg.msg)("listAcceptedNodes",prettify)
}
}
// Node API Version 6 fallback to Node API v5 if request is not handled in V6
def requestDispatch(apiVersion: ApiVersion) : PartialFunction[Req, () => Box[LiftResponse]] = {
v6Dispatch(apiVersion) orElse apiV5.requestDispatch(apiVersion)
}
}
| armeniaca/rudder | rudder-web/src/main/scala/com/normation/rudder/web/rest/node/NodeAPI6.scala | Scala | gpl-3.0 | 3,980 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.graph
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.model.headers.Host
import com.netflix.atlas.chart.util.GraphAssertions
import com.netflix.atlas.chart.util.PngImage
import com.netflix.atlas.chart.util.SrcPath
import com.netflix.atlas.core.db.StaticDatabase
import com.netflix.atlas.core.util.Hash
import com.netflix.atlas.core.util.Strings
import com.netflix.atlas.json.Json
import com.typesafe.config.ConfigFactory
import munit.FunSuite
class GrapherSuite extends FunSuite {
private val bless = false
// SBT working directory gets updated with fork to be the dir for the project
private val baseDir = SrcPath.forProject("atlas-eval")
private val goldenDir = s"$baseDir/src/test/resources/graph/${getClass.getSimpleName}"
private val targetDir = s"$baseDir/target/${getClass.getSimpleName}"
private val graphAssertions =
new GraphAssertions(goldenDir, targetDir, (a, b) => assertEquals(a, b))
private val db = StaticDatabase.demo
private val grapher = Grapher(ConfigFactory.load())
override def afterAll(): Unit = {
graphAssertions.generateReport(getClass)
}
def imageTest(name: String)(uri: => String): Unit = {
test(name) {
val fname = Strings.zeroPad(Hash.sha1bytes(name), 40).substring(0, 8) + ".png"
val result = grapher.evalAndRender(Uri(uri), db)
val image = PngImage(result.data)
graphAssertions.assertEquals(image, fname, bless)
}
}
imageTest("simple expr") {
"/api/v1/graph?e=2012-01-01T00:00&q=name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and,:sum"
}
imageTest("timezone: UTC") {
"/api/v1/graph?e=2012-01-01T00:00&q=name,sps,:eq,:sum&tz=UTC"
}
imageTest("timezone: US/Pacific") {
"/api/v1/graph?e=2012-01-01T00:00&s=e-1d&q=name,sps,:eq,:sum&tz=US/Pacific"
}
imageTest("timezone: UTC and US/Pacific") {
"/api/v1/graph?e=2012-01-01T00:00&s=e-1d&q=name,sps,:eq,:sum&tz=UTC&tz=US/Pacific"
}
imageTest("line colors") {
"/api/v1/graph?e=2012-01-01T00:00&q=name,sps,:eq,:sum,:dup,1000,:add,f00,:color"
}
imageTest("legend text") {
"/api/v1/graph?e=2012-01-01T00:00&q=name,sps,:eq,:sum,starts+per+second,:legend"
}
imageTest("legend text using exact match var") {
"/api/v1/graph?e=2012-01-01T00:00&q=name,sps,:eq,:sum,$name,:legend"
}
imageTest("legend text using group by var") {
"/api/v1/graph?e=2012-01-01T00:00&q=name,sps,:eq,(,nf.cluster,),:by,$nf.cluster,:legend"
}
imageTest("legend text using atlas.offset") {
"/api/v1/graph?e=2012-01-01T00:00&q=" +
"(,0h,1d,1w,),(," +
"name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and,:sum," +
":swap,:offset," +
"$(name)+(offset%3D$(atlas.offset)),:legend," +
"),:each"
}
imageTest("group by and stack") {
"/api/v1/graph?e=2012-01-01T00:00&q=name,sps,:eq,(,nf.cluster,),:by,$nf.cluster,:legend&stack=1"
}
imageTest("group by, pct, and stack") {
"/api/v1/graph?e=2012-01-01T00:00&q=" +
"name,sps,:eq,(,nf.cluster,),:by,:pct,$nf.cluster,:legend" +
"&stack=1"
}
imageTest("upper and lower bounds") {
"/api/v1/graph?e=2012-01-01T00:00&q=" +
"name,sps,:eq,nf.cluster,nccp-.*,:re,:and,:sum,(,nf.cluster,),:by,$nf.cluster,:legend" +
"&stack=1&l=0&u=50e3"
}
private val baseAxisScaleQuery = "/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and," +
":dup,:sum," +
":swap,:count," +
":over,:over,:div,average,:legend," +
":rot,sum,:legend," +
":rot,count,:legend"
imageTest("axis using legacy o=1 param") {
baseAxisScaleQuery + "&o=1"
}
imageTest("axis using log scale") {
baseAxisScaleQuery + "&scale=log"
}
imageTest("axis using pow2 scale") {
baseAxisScaleQuery + "&scale=pow2"
}
imageTest("axis using sqrt scale") {
baseAxisScaleQuery + "&scale=sqrt"
}
imageTest("axis using linear scale") {
baseAxisScaleQuery + "&scale=linear"
}
imageTest("axis using legacy scale param overides o param") {
baseAxisScaleQuery + "&scale=linear&o=1"
}
private val baseStatAxisScaleQuery = "/api/v1/graph?e=2012-01-01T00:00&s=e-2d" +
"&q=name,sps,:eq,:sum,:dup,:dup,min,:stat,:sub,:swap,max,:stat,0.5,:mul,:sub"
imageTest("stat query with axis using log scale") {
baseStatAxisScaleQuery + "&scale=log"
}
imageTest("stat query with axis using pow2 scale") {
baseStatAxisScaleQuery + "&scale=pow2"
}
imageTest("stat query with axis using sqrt scale") {
baseStatAxisScaleQuery + "&scale=sqrt"
}
imageTest("stat query with axis using linear scale") {
baseStatAxisScaleQuery + "&scale=linear"
}
imageTest("average") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and,:avg," +
"avg+sps+for+silverlight,:legend"
}
imageTest("title and legends") {
"/api/v1/graph?s=e-1w&e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and,:avg,avg+sps+for+silverlight,:legend" +
"&no_legend=1&title=Silverlight+SPS&ylabel=Starts+per+second"
}
imageTest("line on area") {
// Area must be drawn first or line will be covered up
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=nf.cluster,nccp-silverlight,:eq,name,sps,:eq,:and,:sum,:dup,10000,:add,:area,:swap"
}
imageTest("stack, areas, and lines") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and,:sum,:area," +
"name,sps,:eq,nf.cluster,nccp-ps3,:eq,:and,:sum,:stack," +
"name,sps,:eq,:avg,100,:mul"
}
imageTest("transparency as part of color") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=nf.cluster,nccp-silverlight,:eq,name,sps,:eq,:and,:sum," +
":dup,10000,:add,:area,400000ff,:color"
}
imageTest("transparency using alpha") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=nf.cluster,nccp-silverlight,:eq,name,sps,:eq,:and,:sum," +
":dup,10000,:add,:area,40,:alpha"
}
imageTest("DES: delta as area") {
"/api/v1/graph?tz=UTC&e=2012-01-01T12:00&s=e-12h&w=750&h=150&l=0" +
"&q=nf.cluster,alerttest,:eq,name,requestsPerSecond,:eq,:and,:sum," +
":dup,:des-simple,0.9,:mul," +
":2over,:sub,:abs,:area,40,:alpha," +
":rot,$name,:legend," +
":rot,prediction,:legend," +
":rot,delta,:legend"
}
imageTest("DES: vspan showing trigger") {
"/api/v1/graph?tz=UTC&e=2012-01-01T12:00&s=e-12h&w=750&h=150&l=0" +
"&q=nf.cluster,alerttest,:eq,name,requestsPerSecond,:eq,:and,:sum" +
",:dup,:des-simple,0.9,:mul," +
":2over,:lt," +
":rot,$name,:legend," +
":rot,prediction,:legend," +
":rot,:vspan,60,:alpha,alert+triggered,:legend"
}
imageTest("smoothing using DES") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and,type,high-noise,:eq,:and,:sum," +
"10,0.145,0.01,:des" +
"&w=750&h=100&no_legend=1&s=e-12h"
}
imageTest("smoothing using trend") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-ps3,:eq,:and,:avg," +
":dup,:dup,:dup,5m,:trend,100,:add,5m+trend,:legend," +
":rot,10m,:trend,200,:add,10m+trend,:legend," +
":rot,20m,:trend,300,:add,20m+trend,:legend," +
":rot,original+line,:legend,:-rot" +
"&w=750&h=300&s=e-12h"
}
imageTest("smoothing using step 5m") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and,type,high-noise,:eq,:and,:sum" +
"&step=PT5M&w=750&h=100&no_legend=1&s=e-12h"
}
imageTest("smoothing using step 20m") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silverlight,:eq,:and,type,high-noise,:eq,:and,:sum" +
"&step=PT20M&w=750&h=100&no_legend=1&s=e-12h"
}
imageTest("math with time shifts") {
"/api/v1/graph?e=2012-01-01T12:00&s=e-12h&tz=UTC" +
"&q=nf.cluster,alerttest,:eq,name,requestsPerSecond,:eq,:and,:sum," +
":dup,1w,:offset,:sub,:area,delta+week+over+week,:legend" +
"&h=150&w=750"
}
imageTest("average over last 3w") {
"/api/v1/graph?e=2012-01-01T12:00&s=e-12h&tz=UTC" +
"&q=nf.cluster,alerttest,:eq,name,requestsPerSecond,:eq,:and,:sum," +
":dup,1w,:offset,:over,2w,:offset,:add,:over,3w,:offset,:add,3,:div," +
":2over,:swap,:over,:sub,:abs,:swap,:div,100,:mul," +
":rot,requestsPerSecond,:legend," +
":rot,average+for+previous+3+weeks,:legend," +
":rot,:area,40,:alpha,percent+delta,:legend" +
"&h=150&w=750"
}
imageTest("multi-Y") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=nf.node,alert1,:eq,:sum,nf.node,alert1,:eq,:count,1,:axis" +
"&ylabel.0=Axis%200&ylabel.1=Axis%201"
}
imageTest("significant time boundaries and tz=US/Pacific") {
"/api/v1/graph?q=name,sps,:eq,:sum&s=e-2d&e=2015-06-17T13:13&no_legend=1&tz=US/Pacific"
}
imageTest("significant time boundaries and tz=UTC") {
"/api/v1/graph?q=name,sps,:eq,:sum&s=e-2d&e=2015-06-17T13:13&no_legend=1&tz=UTC"
}
imageTest("daylight savings time transition") {
"/api/v1/graph?q=name,sps,:eq,:sum&s=e-4d&e=2015-03-10T13:13&no_legend=1&tz=US/Pacific"
}
imageTest("daylight savings time transition, with 1d step") {
"/api/v1/graph?q=name,sps,:eq,:sum&s=e-1d&e=2015-03-10T13:13&no_legend=1&tz=US/Pacific&step=1d"
}
imageTest("daylight savings time transition, US/Pacific and UTC") {
"/api/v1/graph?q=name,sps,:eq,:sum&s=e-4d&e=2015-03-10T13:13&no_legend=1&tz=US/Pacific&tz=UTC"
}
imageTest("daylight savings time transition, UTC, Pacific, and Eastern") {
"/api/v1/graph?q=name,sps,:eq,:sum&s=e-4d&e=2015-03-10T13:13&no_legend=1" +
"&tz=UTC&tz=US/Pacific&tz=US/Eastern&step=1d"
}
imageTest("vision flag") {
"/api/v1/graph?s=e-1d&e=2015-03-10T13:13" +
"&q=(,1,2,3,4,5,6,7,8,9,),(,nf.cluster,nccp-silverlight,:eq,name,sps,:eq,:and,:sum,:swap,:legend,),:each" +
"&vision=protanopia&no_legend=1&stack=1"
}
imageTest("z-order of stacked lines") {
"/api/v1/graph" +
"?q=t,name,sps,:eq,:sum,:set,t,:get,:stack,t,:get,1.1,:mul,6h,:offset,t,:get,4,:div,:stack" +
"&s=e-2d&e=2015-03-10T13:13"
}
imageTest("issue-1146") {
"/api/v1/graph?s=e-1d&e=2012-01-01T00:00&q=name,sps,:eq,:sum,:dup,30m,:offset,4,:axis&l.4=0"
}
imageTest("expr scoped palette") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,reds,:palette,:stack,name,sps,:eq,2,:lw,50e3,45e3"
}
imageTest("expr scoped palette in the middle") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,4,:lw,name,sps,:eq,(,nf.cluster,),:by,reds,:palette,:stack,50e3,45e3"
}
imageTest("multiple expressions with scoped palettes") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,:dup,reds,:palette,:stack,:swap,greens,:palette,:stack"
}
imageTest("expr palette overrides axis param") {
"/api/v1/graph?e=2012-01-01T00:00&palette=greens" +
"&q=name,sps,:eq,(,nf.cluster,),:by,reds,:palette,:stack"
}
imageTest("expr palette then color") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,reds,:palette,00f,:color,:stack"
}
imageTest("color then expr palette") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,00f,:color,reds,:palette,:stack"
}
imageTest("expr palette with alpha") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=50e3,4,:lw,name,sps,:eq,(,nf.cluster,),:by,reds,:palette,40,:alpha,:stack"
}
imageTest("expr scoped hashed palette") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,hash:reds,:palette,:stack"
}
imageTest("substitute max stat in legend") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,$atlas.max+is+max,:legend"
}
imageTest("substitute max stat in legend honors label mode") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,$atlas.max+is+max,:legend&tick_labels=binary"
}
imageTest("empty legend string") {
"/api/v1/graph?e=2012-01-01T00:00&q=name,sps,:eq,(,nf.cluster,),:by,$(),:legend"
}
imageTest("substitutions for ylabel, name present, cluster missing") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silver,:lt,:and,(,nf.cluster,),:by" +
"&ylabel=$name+$nf.cluster"
}
imageTest("substitutions for ylabel, name present, cluster present") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silver,:lt,:and,(,nf.cluster,),:by" +
"&ylabel=$name+$nf.cluster&axis_per_line=1"
}
imageTest("substitutions for title, name present, cluster missing") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,nf.cluster,nccp-silver,:lt,:and,(,nf.cluster,),:by" +
"&title=$name+$nf.cluster&axis_per_line=1"
}
imageTest("using dark24 palette") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,$nf.cluster,:legend" +
"&palette=dark24"
}
imageTest("using light24 palette") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,$nf.cluster,:legend" +
"&palette=light24"
}
imageTest("using dark theme") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,$nf.cluster,:legend" +
"&theme=dark"
}
imageTest("using dark theme with multi-Y") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,$nf.cluster,:legend,42,1,:axis" +
"&theme=dark"
}
imageTest("using dark theme with offset") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,:dup,1w,:offset" +
"&theme=dark"
}
imageTest("topk") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:topk&features=unstable"
}
imageTest("topk-others-min") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:topk-others-min&features=unstable"
}
imageTest("topk-others-max") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:topk-others-max&features=unstable"
}
imageTest("topk-others-sum") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:topk-others-sum&features=unstable"
}
imageTest("topk-others-avg") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:topk-others-avg&features=unstable"
}
imageTest("bottomk") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:bottomk&features=unstable"
}
imageTest("bottomk-others-min") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:bottomk-others-min&features=unstable"
}
imageTest("bottomk-others-max") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:bottomk-others-max&features=unstable"
}
imageTest("bottomk-others-sum") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:bottomk-others-sum&features=unstable"
}
imageTest("bottomk-others-avg") {
"/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,max,2,:bottomk-others-avg&features=unstable"
}
test("invalid stuff on stack") {
val uri = "/api/v1/graph?e=2012-01-01&q=name,sps,:eq,(,nf.cluster,),:by,foo"
val e = intercept[IllegalArgumentException] {
grapher.toGraphConfig(Uri(uri)).parsedQuery.get
}
assertEquals(e.getMessage, "expecting time series expr, found String 'foo'")
}
def renderTest(name: String)(uri: => String): Unit = {
test(name) {
val fname = Strings.zeroPad(Hash.sha1bytes(name), 40).substring(0, 8) + ".png"
val config = grapher.toGraphConfig(Uri(uri))
val styleData = config.exprs.map { styleExpr =>
val dataResults = styleExpr.expr.dataExprs.distinct.map { dataExpr =>
dataExpr -> db.execute(config.evalContext, dataExpr)
}.toMap
styleExpr -> styleExpr.expr.eval(config.evalContext, dataResults).data
}.toMap
val result = grapher.render(Uri(uri), styleData)
val image = PngImage(result.data)
graphAssertions.assertEquals(image, fname, bless)
}
}
renderTest("rendering with pre-evaluated data set, legends") {
"/api/v1/graph?e=2012-01-01T00:00" +
"&q=name,sps,:eq,(,nf.cluster,),:by,$nf.cluster,:legend,10e3,threshold,:legend,3,:lw"
}
renderTest("rendering with pre-evaluated data set, multi-y") {
"/api/v1/graph?e=2012-01-01T00:00&u.1=56e3" +
"&q=name,sps,:eq,(,nf.cluster,),:by,$nf.cluster,:legend,:stack,20e3,1,:axis,:area,50,:alpha"
}
test("stat vars are not included in tag map") {
val uri = "/api/v1/graph?e=2012-01-01&q=name,sps,:eq,nf.app,nccp,:eq,:and,:sum&format=json"
val json = grapher.evalAndRender(Uri(uri), db).data
val response = Json.decode[GrapherSuite.GraphResponse](json)
List("avg", "last", "max", "min", "total").foreach { stat =>
response.metrics.foreach { m =>
assert(!m.contains(s"atlas.$stat"))
}
}
}
private def mkRequest(host: String, expr: String = "name,sps,:eq,:sum"): HttpRequest = {
HttpRequest(uri = Uri(s"/api/v1/graph?q=$expr"), headers = List(Host(host)))
}
test("host rewrite: no match") {
val request = mkRequest("foo.example.com")
val config = grapher.toGraphConfig(request)
assertEquals(config, grapher.toGraphConfig(request.uri))
}
test("host rewrite: match") {
val request = mkRequest("foo.us-east-1.example.com")
val config = grapher.toGraphConfig(request)
assertNotEquals(config, grapher.toGraphConfig(request.uri))
assert(config.query.contains("region,us-east-1,:eq"))
assert(config.parsedQuery.get.forall(_.toString.contains("region,us-east-1,:eq")))
}
test("host rewrite: bad query") {
val request = mkRequest("foo.us-east-1.example.com", "a,b,:foo")
val config = grapher.toGraphConfig(request)
assertEquals(config.query, "a,b,:foo")
assert(config.parsedQuery.isFailure)
}
}
object GrapherSuite {
case class GraphResponse(
start: Long,
step: Long,
legend: List[String],
metrics: List[Map[String, String]],
values: Array[Array[Double]],
notices: List[String],
explain: Map[String, Long]
)
}
| Netflix/atlas | atlas-eval/src/test/scala/com/netflix/atlas/eval/graph/GrapherSuite.scala | Scala | apache-2.0 | 18,802 |
package org.sbuild.plugins.http
import java.io.File
import java.io.FileNotFoundException
import java.net.URL
import org.sbuild.internal.I18n
import org.sbuild.Path
import org.sbuild.SchemeResolver
import org.sbuild.SchemeHandler
import org.sbuild.SchemeHandler.SchemeContext
import org.sbuild.TargetContext
import org.sbuild.CmdlineMonitor
import org.sbuild.SBuildVersion
import org.sbuild.Project
import org.sbuild.Logger
/**
* An HTTP-Scheme handler, that will download the given URI into a directory preserving the URI as path.
* Example:
* The HttpSchemeHandler is configured to use '.sbuild/http' as download directory
* The file 'http://example.com/downloads/example.jar' will be downloaded into
* '.sbuild/http/example.com/downloads/example.jar'
*/
class HttpSchemeHandler(downloadDir: File = null,
forceDownload: Boolean = false,
proxySettings: ProxySettings = ProxySettings.AutoProxy)(implicit project: Project) extends HttpSchemeHandlerBase(
Option(downloadDir).getOrElse(Path(".sbuild/http")),
forceDownload)
with SchemeResolver {
Logger[HttpSchemeHandler].debug("Created " + this)
override def resolve(schemeCtx: SchemeContext, targetContext: TargetContext) = {
val lastModified = download(schemeCtx.path, project.monitor)
targetContext.targetLastModified = lastModified
}
override def toString() = super.toStringBase("project=" + project.projectFile)
}
class HttpSchemeHandlerBase(val downloadDir: File, val forceDownload: Boolean = false) extends SchemeHandler {
var online: Boolean = true
private val userAgent = s"SBuild/${SBuildVersion.osgiVersion} (HttpSchemeHandler)"
def url(path: String): URL = new URL("http:" + path)
override def localPath(schemeCtx: SchemeContext): String = "file:" + localFile(schemeCtx.path).getPath
def localFile(path: String): File = {
url(path)
// ok, path is a valid URL
new File(downloadDir, path)
}
/**
* @return The last modified time stamp of the file.
*/
def download(path: String, monitor: CmdlineMonitor): Long = {
val target = localFile(path)
if (online) {
if (!forceDownload && target.exists) {
target.lastModified
} else {
val url = this.url(path)
// println("Downloading " + url + "...")
HttpSupport.download(url.toString, target.getPath, monitor, Some(userAgent)) match {
case Some(e) => throw e
case _ => target.lastModified
}
}
} else {
if (target.exists) {
target.lastModified
} else {
val msg = I18n.marktr("File is not present and can not be downloaded in offline-mode: {0}")
throw new FileNotFoundException(I18n.notr(msg, target.getPath)) {
override def getLocalizedMessage: String = I18n[HttpSchemeHandlerBase].tr(msg, target.getPath)
}
}
}
}
def toStringBase(extra: String = "") = getClass.getSimpleName +
"(downloadDir=" + downloadDir +
",forceDownload=" + forceDownload +
",online=" + online +
"," + extra +
")"
override def toString() = toStringBase()
}
| SBuild-org/sbuild-http-plugin | org.sbuild.plugins.http/src/main/scala/org/sbuild/plugins/http/HttpSchemeHandler.scala | Scala | apache-2.0 | 3,138 |
@main def hello: Unit = {
val x: Formatt.ToFormat['a' *: EmptyTuple] = ""
}
| dotty-staging/dotty | tests/pos/i11393/Test_2.scala | Scala | apache-2.0 | 81 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.calcite
import java.util.Collections
import org.apache.calcite.plan.volcano.VolcanoPlanner
import java.lang.Iterable
import org.apache.calcite.jdbc.CalciteSchema
import org.apache.calcite.plan._
import org.apache.calcite.prepare.CalciteCatalogReader
import org.apache.calcite.rel.logical.LogicalAggregate
import org.apache.calcite.rex.RexBuilder
import org.apache.calcite.tools.RelBuilder.{AggCall, GroupKey}
import org.apache.calcite.tools.{FrameworkConfig, RelBuilder}
import org.apache.flink.table.calcite.FlinkRelBuilder.NamedWindowProperty
import org.apache.flink.table.expressions.WindowProperty
import org.apache.flink.table.plan.logical.LogicalWindow
import org.apache.flink.table.plan.logical.rel.LogicalWindowAggregate
/**
* Flink specific [[RelBuilder]] that changes the default type factory to a [[FlinkTypeFactory]].
*/
class FlinkRelBuilder(
context: Context,
relOptCluster: RelOptCluster,
relOptSchema: RelOptSchema)
extends RelBuilder(
context,
relOptCluster,
relOptSchema) {
def getPlanner: RelOptPlanner = cluster.getPlanner
def getCluster: RelOptCluster = relOptCluster
override def getTypeFactory: FlinkTypeFactory =
super.getTypeFactory.asInstanceOf[FlinkTypeFactory]
def aggregate(
window: LogicalWindow,
groupKey: GroupKey,
namedProperties: Seq[NamedWindowProperty],
aggCalls: Iterable[AggCall])
: RelBuilder = {
// build logical aggregate
val aggregate = super.aggregate(groupKey, aggCalls).build().asInstanceOf[LogicalAggregate]
// build logical window aggregate from it
push(LogicalWindowAggregate.create(window, namedProperties, aggregate))
this
}
}
object FlinkRelBuilder {
def create(config: FrameworkConfig): FlinkRelBuilder = {
// create Flink type factory
val typeSystem = config.getTypeSystem
val typeFactory = new FlinkTypeFactory(typeSystem)
// create context instances with Flink type factory
val planner = new VolcanoPlanner(config.getCostFactory, Contexts.empty())
planner.setExecutor(config.getExecutor)
planner.addRelTraitDef(ConventionTraitDef.INSTANCE)
val cluster = FlinkRelOptClusterFactory.create(planner, new RexBuilder(typeFactory))
val calciteSchema = CalciteSchema.from(config.getDefaultSchema)
val relOptSchema = new CalciteCatalogReader(
calciteSchema,
config.getParserConfig.caseSensitive(),
Collections.emptyList(),
typeFactory)
new FlinkRelBuilder(config.getContext, cluster, relOptSchema)
}
/**
* Information necessary to create a window aggregate.
*
* Similar to [[RelBuilder.AggCall]] or [[RelBuilder.GroupKey]].
*/
case class NamedWindowProperty(name: String, property: WindowProperty)
}
| DieBauer/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/calcite/FlinkRelBuilder.scala | Scala | apache-2.0 | 3,585 |
package controllers
import java.util.concurrent.TimeoutException
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import javax.inject.Inject
import javax.inject.Singleton
import models.Gallery
import play.api.Logger
import play.api.i18n.I18nSupport
import play.api.i18n.MessagesApi
import play.api.mvc.Action
import play.api.mvc.Controller
import reactivemongo.bson.BSONObjectID
import models.services.GalleryService
/**
* @author carlos
*/
@Singleton
class GalleryControl @Inject() (galService: GalleryService, val messagesApi: MessagesApi) extends Controller with I18nSupport {
implicit val timeout = 10.seconds
def gallery = Action.async { implicit request =>
val galls = galService.findListGall()
galls.map {
gall =>
Ok(views.html.gallery.list_gallery(gall))
}.recover {
case t: TimeoutException =>
Logger.error("Problem found in gallery list process")
InternalServerError(t.getMessage)
}
}
def galleryManager = Authenticated.async { implicit request =>
val galls = galService.findListGall()
galls.map {
gall =>
Ok(views.html.manager.gallery.list_gallery(gall))
}.recover {
case t: TimeoutException =>
Logger.error("Problem found in gallery list process")
InternalServerError(t.getMessage)
}
}
def add = Authenticated.async { implicit request =>
Gallery.formGall.bindFromRequest.fold(
formErr => Future.successful(Ok(views.html.manager.gallery.create_gallery(formErr)).flashing("fail" -> messagesApi("fail.add"))),
data => {
galService.find(data._id.getOrElse("")).flatMap {
case Some(_) =>
galService.updateGall(data).map {
case Some(x) => Redirect(routes.GalleryControl.galleryManager()).flashing("success" -> messagesApi("success.update"))
case None => Redirect(routes.GalleryControl.galleryManager()).flashing("fail" -> messagesApi("fail.update"))
}
case None =>
val gall = Gallery(
_id = Some(BSONObjectID.generate.stringify),
galName = Option.apply(data.galName).orNull,
galDesc = data.galDesc,
galURLSmall = data.galURLSmall,
galURLLarge = data.galURLLarge)
galService.addGall(gall)
Future.successful(Redirect(routes.GalleryControl.galleryManager()).flashing("success" -> messagesApi("success.add")))
}
}).recover {
case t: TimeoutException =>
Logger.error("Problem adding in gallery list process")
InternalServerError(t.getMessage)
}
}
def edit(id: String) = Authenticated.async { implicit request =>
galService.find(id).map {
case Some(gall) => Ok(views.html.manager.gallery.create_gallery(Gallery.formGall.fill(gall)))
case None => Redirect(routes.GalleryControl.galleryManager())
}
}
def remove(id: String) = Authenticated.async { implicit request =>
galService.removeGall(id).map {
case Some(_) => Redirect(routes.GalleryControl.galleryManager()).flashing("success" -> messagesApi("success.remove"))
case None => Redirect(routes.GalleryControl.galleryManager()).flashing("fail" -> messagesApi("fail.update"))
}
}
} | carlosFattor/DoceTentacaoScala | app/controllers/GalleryControl.scala | Scala | apache-2.0 | 3,357 |
package sk.scalagine.math
/**
* Created with IntelliJ IDEA.
* User: zladovan
* Date: 13.09.14
* Time: 23:15
*/
object MatrixImplicitConversions {
final implicit def matrixToMatrix2x2(m: Matrix[Vector2, Vector2]): Matrix2x2 =
new Matrix2x2(
m.data(0)(0), m.data(0)(1),
m.data(1)(0), m.data(1)(1))
final implicit def matrixToMatrix3x3(m: Matrix[Vector3, Vector3]): Matrix3x3 =
new Matrix3x3(
m.data(0)(0), m.data(0)(1), m.data(0)(2),
m.data(1)(0), m.data(1)(1), m.data(1)(2),
m.data(2)(0), m.data(2)(1), m.data(2)(2))
final implicit def matrixToMatrix4x4(m: Matrix[Vector4, Vector4]): Matrix4x4 =
new Matrix4x4(
m.data(0)(0), m.data(0)(1), m.data(0)(2), m.data(0)(3),
m.data(1)(0), m.data(1)(1), m.data(1)(2), m.data(1)(3),
m.data(2)(0), m.data(2)(1), m.data(2)(2), m.data(2)(3),
m.data(3)(0), m.data(3)(1), m.data(3)(2), m.data(3)(3))
}
| zladovan/scalagine | engine/math/src/main/scala/sk/scalagine/math/MatrixImplicitConversions.scala | Scala | mit | 917 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.nio.ByteBuffer
import java.util.{List => JList, Map => JMap}
import scala.collection.JavaConverters.seqAsJavaListConverter
import scala.collection.JavaConverters.mapAsJavaMapConverter
import org.apache.avro.Schema
import org.apache.avro.generic.IndexedRecord
import org.apache.hadoop.fs.Path
import org.apache.parquet.avro.AvroParquetWriter
import org.apache.spark.sql.Row
import org.apache.spark.sql.execution.datasources.parquet.test.avro._
import org.apache.spark.sql.test.SharedSQLContext
class ParquetAvroCompatibilitySuite extends ParquetCompatibilityTest with SharedSQLContext {
private def withWriter[T <: IndexedRecord]
(path: String, schema: Schema)
(f: AvroParquetWriter[T] => Unit): Unit = {
logInfo(
s"""Writing Avro records with the following Avro schema into Parquet file:
|
|${schema.toString(true)}
""".stripMargin)
val writer = new AvroParquetWriter[T](new Path(path), schema)
try f(writer) finally writer.close()
}
test("required primitives") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroPrimitives](path, AvroPrimitives.getClassSchema) { writer =>
(0 until 10).foreach { i =>
writer.write(
AvroPrimitives.newBuilder()
.setBoolColumn(i % 2 == 0)
.setIntColumn(i)
.setLongColumn(i.toLong * 10)
.setFloatColumn(i.toFloat + 0.1f)
.setDoubleColumn(i.toDouble + 0.2d)
.setBinaryColumn(ByteBuffer.wrap(s"val_$i".getBytes("UTF-8")))
.setStringColumn(s"val_$i")
.build())
}
}
logParquetSchema(path)
checkAnswer(sqlContext.read.parquet(path), (0 until 10).map { i =>
Row(
i % 2 == 0,
i,
i.toLong * 10,
i.toFloat + 0.1f,
i.toDouble + 0.2d,
s"val_$i".getBytes("UTF-8"),
s"val_$i")
})
}
}
test("optional primitives") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroOptionalPrimitives](path, AvroOptionalPrimitives.getClassSchema) { writer =>
(0 until 10).foreach { i =>
val record = if (i % 3 == 0) {
AvroOptionalPrimitives.newBuilder()
.setMaybeBoolColumn(null)
.setMaybeIntColumn(null)
.setMaybeLongColumn(null)
.setMaybeFloatColumn(null)
.setMaybeDoubleColumn(null)
.setMaybeBinaryColumn(null)
.setMaybeStringColumn(null)
.build()
} else {
AvroOptionalPrimitives.newBuilder()
.setMaybeBoolColumn(i % 2 == 0)
.setMaybeIntColumn(i)
.setMaybeLongColumn(i.toLong * 10)
.setMaybeFloatColumn(i.toFloat + 0.1f)
.setMaybeDoubleColumn(i.toDouble + 0.2d)
.setMaybeBinaryColumn(ByteBuffer.wrap(s"val_$i".getBytes("UTF-8")))
.setMaybeStringColumn(s"val_$i")
.build()
}
writer.write(record)
}
}
logParquetSchema(path)
checkAnswer(sqlContext.read.parquet(path), (0 until 10).map { i =>
if (i % 3 == 0) {
Row.apply(Seq.fill(7)(null): _*)
} else {
Row(
i % 2 == 0,
i,
i.toLong * 10,
i.toFloat + 0.1f,
i.toDouble + 0.2d,
s"val_$i".getBytes("UTF-8"),
s"val_$i")
}
})
}
}
test("non-nullable arrays") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroNonNullableArrays](path, AvroNonNullableArrays.getClassSchema) { writer =>
(0 until 10).foreach { i =>
val record = {
val builder =
AvroNonNullableArrays.newBuilder()
.setStringsColumn(Seq.tabulate(3)(i => s"val_$i").asJava)
if (i % 3 == 0) {
builder.setMaybeIntsColumn(null).build()
} else {
builder.setMaybeIntsColumn(Seq.tabulate(3)(Int.box).asJava).build()
}
}
writer.write(record)
}
}
logParquetSchema(path)
checkAnswer(sqlContext.read.parquet(path), (0 until 10).map { i =>
Row(
Seq.tabulate(3)(i => s"val_$i"),
if (i % 3 == 0) null else Seq.tabulate(3)(identity))
})
}
}
ignore("nullable arrays (parquet-avro 1.7.0 does not properly support this)") {
// TODO Complete this test case after upgrading to parquet-mr 1.8+
}
test("SPARK-10136 array of primitive array") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroArrayOfArray](path, AvroArrayOfArray.getClassSchema) { writer =>
(0 until 10).foreach { i =>
writer.write(AvroArrayOfArray.newBuilder()
.setIntArraysColumn(
Seq.tabulate(3, 3)((i, j) => i * 3 + j: Integer).map(_.asJava).asJava)
.build())
}
}
logParquetSchema(path)
checkAnswer(sqlContext.read.parquet(path), (0 until 10).map { i =>
Row(Seq.tabulate(3, 3)((i, j) => i * 3 + j))
})
}
}
test("map of primitive array") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroMapOfArray](path, AvroMapOfArray.getClassSchema) { writer =>
(0 until 10).foreach { i =>
writer.write(AvroMapOfArray.newBuilder()
.setStringToIntsColumn(
Seq.tabulate(3) { i =>
i.toString -> Seq.tabulate(3)(j => i + j: Integer).asJava
}.toMap.asJava)
.build())
}
}
logParquetSchema(path)
checkAnswer(sqlContext.read.parquet(path), (0 until 10).map { i =>
Row(Seq.tabulate(3)(i => i.toString -> Seq.tabulate(3)(j => i + j)).toMap)
})
}
}
test("various complex types") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[ParquetAvroCompat](path, ParquetAvroCompat.getClassSchema) { writer =>
(0 until 10).foreach(i => writer.write(makeParquetAvroCompat(i)))
}
logParquetSchema(path)
checkAnswer(sqlContext.read.parquet(path), (0 until 10).map { i =>
Row(
Seq.tabulate(3)(n => s"arr_${i + n}"),
Seq.tabulate(3)(n => n.toString -> (i + n: Integer)).toMap,
Seq.tabulate(3) { n =>
(i + n).toString -> Seq.tabulate(3) { m =>
Row(Seq.tabulate(3)(j => i + j + m), s"val_${i + m}")
}
}.toMap)
})
}
}
def makeParquetAvroCompat(i: Int): ParquetAvroCompat = {
def makeComplexColumn(i: Int): JMap[String, JList[Nested]] = {
Seq.tabulate(3) { n =>
(i + n).toString -> Seq.tabulate(3) { m =>
Nested
.newBuilder()
.setNestedIntsColumn(Seq.tabulate(3)(j => i + j + m: Integer).asJava)
.setNestedStringColumn(s"val_${i + m}")
.build()
}.asJava
}.toMap.asJava
}
ParquetAvroCompat
.newBuilder()
.setStringsColumn(Seq.tabulate(3)(n => s"arr_${i + n}").asJava)
.setStringToIntColumn(Seq.tabulate(3)(n => n.toString -> (i + n: Integer)).toMap.asJava)
.setComplexColumn(makeComplexColumn(i))
.build()
}
test("SPARK-9407 Push down predicates involving Parquet ENUM columns") {
import testImplicits._
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[ParquetEnum](path, ParquetEnum.getClassSchema) { writer =>
(0 until 4).foreach { i =>
writer.write(ParquetEnum.newBuilder().setSuit(Suit.values.apply(i)).build())
}
}
checkAnswer(sqlContext.read.parquet(path).filter('suit === "SPADES"), Row("SPADES"))
}
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetAvroCompatibilitySuite.scala | Scala | apache-2.0 | 8,726 |
package org.jetbrains.plugins.scala
package actions
import java.util.Properties
import com.intellij.ide.IdeView
import com.intellij.ide.actions.{CreateFileFromTemplateDialog, CreateTemplateInPackageAction}
import com.intellij.ide.fileTemplates.{FileTemplate, FileTemplateManager, JavaTemplateUtil}
import com.intellij.openapi.actionSystem._
import com.intellij.openapi.diagnostic.ControlFlowException
import com.intellij.openapi.fileTypes.ex.FileTypeManagerEx
import com.intellij.openapi.module.{Module, ModuleType}
import com.intellij.openapi.project.{DumbAware, Project}
import com.intellij.openapi.roots.ProjectRootManager
import com.intellij.openapi.ui.InputValidatorEx
import com.intellij.openapi.util.text.StringUtil
import com.intellij.psi._
import com.intellij.psi.codeStyle.CodeStyleManager
import org.jetbrains.annotations.NonNls
import org.jetbrains.jps.model.java.JavaModuleSourceRootTypes
import org.jetbrains.plugins.scala.codeInspection.ScalaInspectionBundle
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.project._
import org.jetbrains.sbt.project.module.SbtModuleType
/**
* User: Alexander Podkhalyuzin
* Date: 15.09.2009
*/
class NewScalaTypeDefinitionAction extends CreateTemplateInPackageAction[ScTypeDefinition](
ScalaBundle.message("newclass.menu.action.text"),
ScalaBundle.message("newclass.menu.action.description"),
Icons.CLASS,
JavaModuleSourceRootTypes.SOURCES
) with DumbAware {
override protected def buildDialog(project: Project, directory: PsiDirectory,
builder: CreateFileFromTemplateDialog.Builder): Unit = {
//noinspection ScalaExtractStringToBundle
{
builder.addKind("Class", Icons.CLASS, ScalaFileTemplateUtil.SCALA_CLASS)
builder.addKind("Case Class", Icons.CASE_CLASS, ScalaFileTemplateUtil.SCALA_CASE_CLASS)
builder.addKind("Object", Icons.OBJECT, ScalaFileTemplateUtil.SCALA_OBJECT)
builder.addKind("Case Object", Icons.CASE_OBJECT, ScalaFileTemplateUtil.SCALA_CASE_OBJECT)
builder.addKind("Trait", Icons.TRAIT, ScalaFileTemplateUtil.SCALA_TRAIT)
}
for {
template <- FileTemplateManager.getInstance(project).getAllTemplates
fileType = FileTypeManagerEx.getInstanceEx.getFileTypeByExtension(template.getExtension)
if fileType == ScalaFileType.INSTANCE && checkPackageExists(directory)
templateName = template.getName
} builder.addKind(templateName, fileType.getIcon, templateName)
builder.setTitle(ScalaBundle.message("create.new.scala.class"))
builder.setValidator(new InputValidatorEx {
override def getErrorText(inputString: String): String = {
if (inputString.length > 0 && !ScalaNamesUtil.isQualifiedName(inputString)) {
return ScalaBundle.message("this.is.not.a.valid.scala.qualified.name")
}
// Specifically make sure that the input string doesn't repeat an existing package prefix (twice).
// Technically, "org.example.application.org.example.application.Main" is not an error, but most likely it's so (and there's no way to display a warning).
for (sourceFolder <- Option(ProjectRootManager.getInstance(project).getFileIndex.getSourceFolder(directory.getVirtualFile));
packagePrefix = sourceFolder.getPackagePrefix if !packagePrefix.isEmpty
if (inputString + ".").startsWith(packagePrefix + ".")) {
return ScalaInspectionBundle.message("package.names.does.not.correspond.to.directory.structure.package.prefix", sourceFolder.getFile.getName, packagePrefix)
}
null
}
override def checkInput(inputString: String): Boolean = {
true
}
override def canClose(inputString: String): Boolean = {
!StringUtil.isEmptyOrSpaces(inputString) && getErrorText(inputString) == null
}
})
}
override def getActionName(directory: PsiDirectory, newName: String, templateName: String): String = {
ScalaBundle.message("newclass.menu.action.text")
}
override def getNavigationElement(createdElement: ScTypeDefinition): PsiElement = createdElement.extendsBlock
override def doCreate(directory: PsiDirectory, newName: String, templateName: String): ScTypeDefinition = {
createClassFromTemplate(directory, newName, templateName) match {
case scalaFile: ScalaFile =>
scalaFile.typeDefinitions.headOption.orNull
case _ => null
}
}
override def isAvailable(dataContext: DataContext): Boolean = {
super.isAvailable(dataContext) && isUnderSourceRoots(dataContext)
}
private def isUnderSourceRoots(dataContext: DataContext): Boolean = {
val module: Module = dataContext.getData(PlatformCoreDataKeys.MODULE.getName).asInstanceOf[Module]
val validModule =
if (module == null) false
else
ModuleType.get(module) match {
case _: SbtModuleType => true
case _ => module.hasScala
}
validModule && isUnderSourceRoots0(dataContext)
}
private def isUnderSourceRoots0(dataContext: DataContext) = {
val view = dataContext.getData(LangDataKeys.IDE_VIEW.getName).asInstanceOf[IdeView]
val project = dataContext.getData(CommonDataKeys.PROJECT.getName).asInstanceOf[Project]
if (view != null && project != null) {
val projectFileIndex = ProjectRootManager.getInstance(project).getFileIndex
val dirs = view.getDirectories
dirs.exists { dir =>
val aPackage = JavaDirectoryService.getInstance.getPackage(dir)
projectFileIndex.isInSourceContent(dir.getVirtualFile) && aPackage != null
}
} else false
}
private def createClassFromTemplate(directory: PsiDirectory, className: String, templateName: String,
parameters: String*): PsiFile = {
NewScalaTypeDefinitionAction.createFromTemplate(directory, className, templateName, parameters: _*)
}
override def checkPackageExists(directory: PsiDirectory): Boolean = JavaDirectoryService.getInstance.getPackage(directory) != null
}
object NewScalaTypeDefinitionAction {
@NonNls private[actions] val NAME_TEMPLATE_PROPERTY: String = "NAME"
@NonNls private[actions] val LOW_CASE_NAME_TEMPLATE_PROPERTY: String = "lowCaseName"
def createFromTemplate(directory: PsiDirectory, name: String, templateName: String, parameters: String*): PsiFile = {
val project = directory.getProject
val template: FileTemplate = FileTemplateManager.getInstance(project).getInternalTemplate(templateName)
val properties: Properties = new Properties(FileTemplateManager.getInstance(project).getDefaultProperties())
properties.setProperty(FileTemplate.ATTRIBUTE_PACKAGE_NAME,
ScalaNamesUtil.escapeKeywordsFqn(JavaTemplateUtil.getPackageName(directory)))
properties.setProperty(NAME_TEMPLATE_PROPERTY, name)
properties.setProperty(LOW_CASE_NAME_TEMPLATE_PROPERTY, name.substring(0, 1).toLowerCase + name.substring(1))
var i: Int = 0
while (i < parameters.length) {
{
properties.setProperty(parameters(i), parameters(i + 1))
}
i += 2
}
var text: String = null
try {
text = template.getText(properties)
}
catch {
case c: ControlFlowException => throw c
case e: Exception =>
throw new RuntimeException("Unable to load template for " + FileTemplateManager.getInstance(project).internalTemplateToSubject(templateName), e)
}
val factory: PsiFileFactory = PsiFileFactory.getInstance(project)
val scalaFileType = ScalaFileType.INSTANCE
val file: PsiFile = factory.createFileFromText(s"$name.${scalaFileType.getDefaultExtension}", scalaFileType, text)
CodeStyleManager.getInstance(project).reformat(file)
directory.add(file).asInstanceOf[PsiFile]
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/actions/NewScalaTypeDefinitionAction.scala | Scala | apache-2.0 | 7,978 |
package com.davidlukac.learn.scala
/**
* Created by davidlukac on 24/03/16.
*/
object LearnScalaRunner {
def main(args: Array[String]) {
LogicalFunctions.main(Array())
MathFunctions.main(Array())
}
}
| davidlukac/learning-scala | src/main/scala/com/davidlukac/learn/scala/LearnScalaRunner.scala | Scala | mit | 219 |
package io.scalajs.nodejs.readline
import io.scalajs.RawOptions
import io.scalajs.nodejs.events.IEventEmitter
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
import scala.scalajs.js.|
/**
* Readline allows reading of a stream (such as process.stdin) on a line-by-line basis.
* To use this module, do require('readline').
* Note that once you've invoked this module, your Node.js program will not terminate until you've closed the interface.
* @see https://nodejs.org/api/readline.html
* @author [email protected]
*/
@js.native
trait Readline extends IEventEmitter {
/**
* Clears current line of given TTY stream in a specified direction. <tt>dir</tt> should have one of following values:
* <ul>
* <li>-1 - to the left from cursor</li>
* <li>0 - the entire line</li>
* <li>1 - to the right from cursor</li>
* </ul>
* @example readline.clearLine(stream, dir)
*/
def clearLine(stream: js.Any, dir: Int): Unit = js.native
/**
* Clears the screen from the current position of the cursor down.
* @example readline.clearScreenDown(stream)
*/
def clearScreenDown(stream: js.Any): Unit = js.native
/**
* Creates a readline Interface instance.
* @example readline.createInterface(options)
*/
def createInterface(options: ReadlineOptions | RawOptions): Interface = js.native
/**
* Move cursor to the specified position in a given TTY stream.
* @example readline.cursorTo(stream, x, y)
*/
def cursorTo(stream: js.Any, x: Int, y: Int): Unit = js.native
/**
* Move cursor relative to it's current position in a given TTY stream.
* @example readline.moveCursor(stream, dx, dy)
*/
def moveCursor(stream: js.Any, dx: Int, dy: Int): Unit = js.native
}
/**
* Readline Singleton
* @author [email protected]
*/
@js.native
@JSImport("readline", JSImport.Namespace)
object Readline extends Readline | scalajs-io/nodejs | app/common/src/main/scala/io/scalajs/nodejs/readline/Readline.scala | Scala | apache-2.0 | 1,941 |
/*
Copyright (c) 2013-2016 Karol M. Stasiak
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package io.github.karols.units
import language.higherKinds
import language.implicitConversions
import language.existentials
import io.github.karols.units.internal.ratios._
import io.github.karols.units.internal.Bools._
import io.github.karols.units.internal.Integers._
import io.github.karols.units.internal.Strings._
import io.github.karols.units.internal.SingleUnits._
import io.github.karols.units.internal.UnitImpl._
import io.github.karols.units.internal.Conversions._
import io.github.karols.units.internal.UnionType._
import io.github.karols.units.internal.UnitName
import scala.math
import scala.math.{Numeric, Fractional}
object WithU {
// TODO: circumvent erasure-related problems
implicit def _orderingInstance[N, U<:MUnit](implicit o:Ordering[N]): Ordering[WithU[N,U]] =
new Ordering[WithU[N,U]]{
override def compare(x:WithU[N,U], y:WithU[N,U]) =
o.compare(x.value, y.value)
}
}
/**
A value with a unit of measure.
'''Warning: this is an experimental feature
and may be subject to removal or severe redesign.'''
*/
case class WithU[@specialized N, U<:MUnit](val value:N) {
@inline
def mkString(implicit name: UnitName[U]) = value.toString + name.toString
@inline
override def toString = value.toString
/** Add a value with the same unit. */
def +(i: WithU[N,U])(implicit n: Numeric[N]) = WithU[N,U](n.plus(value,i.value))
/** Subtract a value with the same unit. */
def -(i: WithU[N,U])(implicit n: Numeric[N]) = WithU[N,U](n.minus(value,i.value))
/** Negate this value. */
def unary_-(implicit n: Numeric[N]) = WithU[N,U](n.negate(value))
/** Multiply by a value with a unit. */
def *[V<:MUnit](i: WithU[N,V])(implicit n: Numeric[N]) =
WithU[N,U#Mul[V]](n.times(value,i.value))
/** Multiply by a dimensionless value. */
def *(i: N)(implicit n: Numeric[N]) =
WithU[N,U](n.times(value,i))
/** Multiply by a dimensionless value. */
def times(i: Int)(implicit n: Numeric[N]) =
WithU[N,U](n.times(value,n.fromInt(i)))
/** Divide by a value with a unit. */
def /[V<:MUnit](i: WithU[N,V])(implicit n: Fractional[N]) =
WithU[N,U#Mul[V#Invert]](n.div(value,i.value))
/** Divide by a dimensionless value. */
def /(i: N)(implicit n: Fractional[N]) =
WithU[N,U](n.div(value,i))
/** Divide by a dimensionless value. */
def dividedBy(i: Int)(implicit n: Fractional[N]) =
WithU[N,U](n.div(value,n.fromInt(i)))
def < (i: WithU[N,U])(implicit o:Ordering[N]) = o.compare(value,i.value) < 0
def <=(i: WithU[N,U])(implicit o:Ordering[N]) = o.compare(value,i.value) <= 0
def > (i: WithU[N,U])(implicit o:Ordering[N]) = o.compare(value,i.value) > 0
def >=(i: WithU[N,U])(implicit o:Ordering[N]) = o.compare(value,i.value) >= 0
} | KarolS/units | units/src/main/scala/io/github/karols/units/WithU.scala | Scala | mit | 3,761 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc
import java.net.URL
import sttp.model.UriInterpolator
package object http {
implicit class StringContextOps(val sc: StringContext) extends AnyVal {
def url(args: Any*): URL =
UriInterpolator.interpolate(sc, args: _*).toJavaUri.toURL
}
}
| hmrc/http-verbs | http-verbs-common/src/main/scala/uk/gov/hmrc/http/package.scala | Scala | apache-2.0 | 879 |
/*
* Copyright (C) 2014-2015 Really Inc. <http://really.io>
*/
package io.really.gorilla
import akka.actor.Props
import akka.testkit.{ EventFilter, TestProbe, TestActorRef }
import _root_.io.really._
import _root_.io.really.gorilla.SubscriptionManager.{ UpdateSubscriptionFields, Unsubscribe }
import _root_.io.really.protocol.ProtocolFormats.PushMessageWrites.{ Updated, Deleted }
import _root_.io.really.protocol.{ UpdateOp, SubscriptionFailure, UpdateCommand, FieldUpdatedOp }
import _root_.io.really.protocol.SubscriptionFailure.SubscriptionFailureWrites
import _root_.io.really.gorilla.GorillaEventCenter.ReplayerSubscribed
import _root_.io.really.fixture.PersistentModelStoreFixture
import _root_.io.really.model.persistent.ModelRegistry.RequestModel
import _root_.io.really.model.persistent.ModelRegistry.ModelResult
import _root_.io.really.model.persistent.PersistentModelStore
import _root_.io.really.model.persistent.ModelRegistry.ModelOperation.{ ModelUpdated, ModelDeleted }
import _root_.io.really.gorilla.mock.TestObjectSubscriber
import _root_.io.really.gorilla.mock.TestObjectSubscriber.{ Ping, GetFieldList, Pong }
import akka.persistence.{ Update => PersistenceUpdate }
import com.typesafe.config.ConfigFactory
import _root_.io.really.model._
import play.api.libs.json._
class ObjectSubscriberSpec(config: ReallyConfig) extends BaseActorSpecWithMongoDB(config) {
def this() = this(new ReallyConfig(ConfigFactory.parseString("""
really.core.akka.loggers = ["akka.testkit.TestEventListener"],
really.core.akka.loglevel = WARNING
really.core.gorilla.wait-for-replayer = 1ms
""").withFallback(TestConf.getConfig().getRawConfig)))
override lazy val globals = new TestReallyGlobals(config, system) {
override def objectSubscriberProps(rSubscription: RSubscription): Props =
Props(classOf[TestObjectSubscriber], rSubscription, this)
}
val userInfo = UserInfo(AuthProvider.Anonymous, "34567890", Some(R("/_anonymous/1234567")), Json.obj())
implicit val session = globals.session
val friendOnGetJs: JsScript =
"""
|hide("lName");
""".stripMargin
val friendModel = Model(
R / 'friend,
CollectionMetadata(23),
Map(
"fName" -> ValueField("fName", DataType.RString, None, None, true),
"lName" -> ValueField("lName", DataType.RString, None, None, true),
"age" -> ValueField("age", DataType.RLong, None, None, true)
),
JsHooks(
None,
Some(friendOnGetJs),
None,
None,
None,
None,
None
),
null,
List.empty
)
val brandOnGetJs: JsScript =
"""
|cancel(23, "Unexpected Error");
""".stripMargin
val brandModel = Model(
R / 'brand,
CollectionMetadata(23),
Map(
"name" -> ValueField("name", DataType.RString, None, None, true),
"since" -> ValueField("since", DataType.RLong, None, None, true)
),
JsHooks(
None,
Some(brandOnGetJs),
None,
None,
None,
None,
None
),
null,
List.empty
)
val models: List[Model] = List(BaseActorSpec.userModel, BaseActorSpec.carModel,
BaseActorSpec.companyModel, BaseActorSpec.authorModel, BaseActorSpec.postModel, friendModel, brandModel)
override def beforeAll() = {
super.beforeAll()
globals.persistentModelStore ! PersistentModelStore.UpdateModels(models)
globals.persistentModelStore ! PersistentModelStoreFixture.GetState
expectMsg(models)
globals.modelRegistry ! PersistenceUpdate(await = true)
globals.modelRegistry ! RequestModel.GetModel(BaseActorSpec.userModel.r, self)
expectMsg(ModelResult.ModelObject(BaseActorSpec.userModel, List.empty))
globals.modelRegistry ! RequestModel.GetModel(brandModel.r, self)
expectMsg(ModelResult.ModelObject(brandModel, List.empty))
}
"Object Subscriber" should "Initialized Successfully" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub1 = RSubscription(ctx, r, Set("name"), rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor1 = TestActorRef[TestObjectSubscriber](globals.objectSubscriberProps(rSub1))
objectSubscriberActor1.underlyingActor.fields shouldEqual Set("name")
objectSubscriberActor1.underlyingActor.r shouldEqual r
objectSubscriberActor1.underlyingActor.logTag shouldEqual s"ObjectSubscriber ${pushChannel.ref.path}$$$r"
val rSub2 = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor2 = system.actorOf(globals.objectSubscriberProps(rSub2))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor2, rSub2, Some(rev))))
objectSubscriberActor2 ! ReplayerSubscribed(replayer)
objectSubscriberActor2.tell(Ping, testProbe.ref)
testProbe.expectMsg(Pong)
objectSubscriberActor2.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("name", "age"))
}
it should "handle Unsubscribe successfully during starter receiver and self termination" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set("name"), rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
deathProbe.watch(objectSubscriberActor)
objectSubscriberActor ! SubscriptionManager.Unsubscribe
requestDelegate.expectMsg(SubscriptionManager.Unsubscribe)
deathProbe.expectTerminated(objectSubscriberActor)
}
it should "handle Unsubscribe during withModel and self termination" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 602
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
deathProbe.watch(objectSubscriberActor)
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor, rSub, Some(rev))))
objectSubscriberActor ! ReplayerSubscribed(replayer)
objectSubscriberActor.tell(Ping, testProbe.ref)
testProbe.expectMsg(Pong)
objectSubscriberActor.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("name", "age"))
objectSubscriberActor ! Unsubscribe
requestDelegate.expectMsg(Unsubscribe)
deathProbe.expectTerminated(objectSubscriberActor)
}
it should "update Internal field List Successfully" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set("name"), rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor, rSub, Some(rev))))
objectSubscriberActor ! ReplayerSubscribed(replayer)
objectSubscriberActor.tell(Ping, testProbe.ref)
testProbe.expectMsg(Pong)
objectSubscriberActor ! UpdateSubscriptionFields(Set("age"))
objectSubscriberActor.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("name", "age"))
}
it should "pass delete updates to push channel actor correctly and then terminates" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set("name"), rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor, rSub, Some(rev))))
objectSubscriberActor ! ReplayerSubscribed(replayer)
objectSubscriberActor.tell(Ping, testProbe.ref)
testProbe.expectMsg(Pong)
objectSubscriberActor.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("name"))
objectSubscriberActor ! GorillaLogDeletedEntry(r, rev, 1l, userInfo)
pushChannel.expectMsg(Deleted.toJson(r, userInfo))
deathProbe.watch(objectSubscriberActor)
deathProbe.expectTerminated(objectSubscriberActor)
}
it should "for empty list subscription change the internal state from empty list to all model fields" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriber = system.actorOf(globals.objectSubscriberProps(rSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriber, rSub, Some(rev))))
objectSubscriber ! ReplayerSubscribed(replayer)
objectSubscriber.tell(Ping, testProbe.ref)
testProbe.expectMsg(Pong)
objectSubscriber.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("name", "age"))
}
it should "fail with internal server error if the model version was inconsistent" in {
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
deathProbe.watch(objectSubscriberActor)
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor, rSub, Some(rev))))
objectSubscriberActor ! ReplayerSubscribed(replayer)
EventFilter.error(occurrences = 1, message = s"ObjectSubscriber ${pushChannel.ref.path}$$$r is going to die since" +
s" the subscription failed because of: Model Version inconsistency\\n error code: 502") intercept {
objectSubscriberActor ! GorillaLogUpdatedEntry(rSub.r, Json.obj(), 2L, 1L, ctx.auth, List())
}
}
it should "filter the hidden fields from the an empty list subscription and sent the rest of model fields" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val rev: Revision = 1L
val r = R / 'friend / 1
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val friendSub = rSub.copy(r = r)
val objectSubscriber = system.actorOf(globals.objectSubscriberProps(friendSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriber, friendSub, Some(rev))))
objectSubscriber ! ReplayerSubscribed(replayer)
val friendObject = Json.obj("_r" -> r, "_rev" -> 1L, "fName" -> "Ahmed", "age" -> 23, "lName" -> "Refaey")
val createdEvent = GorillaLogCreatedEntry(r, friendObject, 1L, 23L, ctx.auth)
objectSubscriber ! GorillaLogUpdatedEntry(rSub.r, Json.obj(), 1L, 23L, ctx.auth, List.empty)
pushChannel.expectMsg(Updated.toJson(r, 1L, List.empty, ctx.auth))
objectSubscriber.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("fName", "lName", "age"))
}
it should "filter the hidden fields from the the subscription list and sent the rest of the subscription list" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val rev: Revision = 1L
val r: R = R / 'friend / 2
val friendSub = RSubscription(ctx, r, Set("fName", "lName"), rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriber = system.actorOf(globals.objectSubscriberProps(friendSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriber, friendSub, Some(rev))))
objectSubscriber ! ReplayerSubscribed(replayer)
objectSubscriber.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("fName", "lName"))
val updates: List[UpdateOp] = List(
UpdateOp(UpdateCommand.Set, "fName", JsString("Ahmed")),
UpdateOp(UpdateCommand.Set, "lName", JsString("Refaey"))
)
objectSubscriber ! GorillaLogUpdatedEntry(friendSub.r, Json.obj(), 1L, 23L, ctx.auth, updates)
pushChannel.expectMsg(Updated.toJson(r, 1L, List(FieldUpdatedOp("fName", UpdateCommand.Set, Some(JsString("Ahmed")))), ctx.auth))
}
ignore should "pass nothing if the model.executeOnGet evaluated to Terminated" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val rev: Revision = 1L
val r = R / 'brand / 1
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val brandSub = rSub.copy(r = r, fields = Set("name", "since"))
val objectSubscriber = system.actorOf(globals.objectSubscriberProps(brandSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriber, brandSub, Some(rev))))
objectSubscriber ! ReplayerSubscribed(replayer)
objectSubscriber.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("name", "since"))
val brandObject = Json.obj("_r" -> r, "_rev" -> 1L, "name" -> "QuickSilver", "since" -> 1969)
val createdEvent = GorillaLogCreatedEntry(r, brandObject, 1L, 23L, ctx.auth)
objectSubscriber ! createdEvent
pushChannel.expectNoMsg()
}
it should "in case of subscription failure, log and acknowledge the delegate and then stop" in {
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
deathProbe.watch(objectSubscriberActor)
EventFilter.error(occurrences = 1, message = s"ObjectSubscriber ${rSub.pushChannel.path}$$${r} is going to die since the subscription failed because of: Internal Server Error\\n error code: 401") intercept {
objectSubscriberActor ! SubscriptionFailure(r, 401, "Test Error Reason")
}
pushChannel.expectMsg(SubscriptionFailureWrites.writes(SubscriptionFailure(r, 401, "Internal Server Error")))
deathProbe.expectTerminated(objectSubscriberActor)
}
it should "handle associated replayer termination" in {
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor, rSub, Some(rev))))
objectSubscriberActor ! ReplayerSubscribed(replayer)
deathProbe.watch(objectSubscriberActor)
EventFilter.error(occurrences = 1, message = s"ObjectSubscriber ${rSub.pushChannel.path}$$${r} is going to die since the subscription failed because of: Associated replayer stopped\\n error code: 505") intercept {
system.stop(replayer)
}
pushChannel.expectMsg(SubscriptionFailureWrites.writes(SubscriptionFailure(r, 505, "Internal Server Error")))
deathProbe.expectTerminated(objectSubscriberActor)
}
it should "handle ModelUpdated correctly" in {
val testProbe = TestProbe()
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val rev: Revision = 1L
val r: R = R / 'friend / 601
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor, rSub, Some(rev))))
objectSubscriberActor ! ReplayerSubscribed(replayer)
val updates1: List[UpdateOp] = List(
UpdateOp(UpdateCommand.Set, "age", JsNumber(23)),
UpdateOp(UpdateCommand.Set, "fName", JsString("Ahmed")),
UpdateOp(UpdateCommand.Set, "lName", JsString("Refaey"))
)
objectSubscriberActor ! GorillaLogUpdatedEntry(rSub.r, Json.obj(), 1L, 23L, ctx.auth, updates1)
pushChannel.expectMsg(Updated.toJson(r, 1L, List(
FieldUpdatedOp("fName", UpdateCommand.Set, Some(JsString("Ahmed"))),
FieldUpdatedOp("age", UpdateCommand.Set, Some(JsNumber(23)))
), ctx.auth))
objectSubscriberActor.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("fName", "lName", "age"))
val friendOnGetJs: JsScript =
"""
|hide("fName");
""".stripMargin
val newFriendModel = friendModel.copy(jsHooks = JsHooks(
None,
Some(friendOnGetJs),
None,
None,
None,
None,
None
))
objectSubscriberActor ! ModelUpdated(rSub.r.skeleton, newFriendModel, List())
val updates2: List[UpdateOp] = List(
UpdateOp(UpdateCommand.Set, "age", JsNumber(29)),
UpdateOp(UpdateCommand.Set, "fName", JsString("Neo")),
UpdateOp(UpdateCommand.Set, "lName", JsString("Anderson"))
)
objectSubscriberActor ! GorillaLogUpdatedEntry(rSub.r, Json.obj(), 1L, 23L, ctx.auth, updates2)
pushChannel.expectMsg(Updated.toJson(r, 1L, List(
FieldUpdatedOp("age", UpdateCommand.Set, Some(JsNumber(29))),
FieldUpdatedOp("lName", UpdateCommand.Set, Some(JsString("Anderson")))
), ctx.auth))
objectSubscriberActor.tell(GetFieldList, testProbe.ref)
testProbe.expectMsg(Set("fName", "lName", "age"))
}
it should "handle ModelDeleted, send subscription failed and terminates" in {
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor, rSub, Some(rev))))
objectSubscriberActor ! ReplayerSubscribed(replayer)
deathProbe.watch(objectSubscriberActor)
EventFilter.error(occurrences = 1, message = s"ObjectSubscriber ${rSub.pushChannel.path}$$${r} is going to die" +
s" since the subscription failed because of: received a DeletedModel message for: $r\\n error code: 501") intercept {
objectSubscriberActor ! ModelDeleted(rSub.r.skeleton)
}
pushChannel.expectMsg(SubscriptionFailureWrites.writes(SubscriptionFailure(r, 501, "Internal Server Error")))
deathProbe.expectTerminated(objectSubscriberActor)
}
it should "handle if the pushed update model version is not equal to the state version" in {
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriberActor, rSub, Some(rev))))
objectSubscriberActor ! ReplayerSubscribed(replayer)
deathProbe.watch(objectSubscriberActor)
EventFilter.error(occurrences = 1, message = s"ObjectSubscriber ${rSub.pushChannel.path}$$${r} is going to die since the subscription failed because of: Model Version inconsistency\\n error code: 502") intercept {
objectSubscriberActor ! GorillaLogUpdatedEntry(rSub.r, Json.obj(), 2L, 1L, ctx.auth, List())
}
pushChannel.expectMsg(SubscriptionFailureWrites.writes(SubscriptionFailure(r, 502, "Internal Server Error")))
deathProbe.expectTerminated(objectSubscriberActor)
}
it should "suicide if the associated Replayer did not send a ReplayerSubscribed for a configurable time" in {
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val deathProbe = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set.empty, rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriberActor = system.actorOf(globals.objectSubscriberProps(rSub))
deathProbe.watch(objectSubscriberActor)
objectSubscriberActor ! "To stash message 1"
objectSubscriberActor ! "To stash message 2"
objectSubscriberActor ! "To stash message 3"
deathProbe.expectTerminated(objectSubscriberActor)
}
it should "fail the subscription not a field in the subscription body relates to the object's model" in {
val requestDelegate = TestProbe()
val pushChannel = TestProbe()
val rev: Revision = 1L
val r: R = R / 'users / 601
val rSub = RSubscription(ctx, r, Set("notAField1", "notAField2"), rev, requestDelegate.ref, pushChannel.ref)
val objectSubscriber = system.actorOf(globals.objectSubscriberProps(rSub))
val replayer = system.actorOf(Props(new Replayer(globals, objectSubscriber, rSub, Some(rev))))
objectSubscriber ! ReplayerSubscribed(replayer)
val failureMessage = SubscriptionFailureWrites.writes(SubscriptionFailure(r, 506, "Internal Server Error"))
pushChannel.expectMsg(failureMessage)
}
}
| reallylabs/really | modules/really-core/src/test/scala/io/really/gorilla/ObjectSubscriberSpec.scala | Scala | apache-2.0 | 21,469 |
/*
* scala-swing (https://www.scala-lang.org)
*
* Copyright EPFL, Lightbend, Inc., contributors
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.swing
import scala.collection.mutable
/**
* A button mutex. At most one of its associated buttons is selected
* at a time.
*
* @see javax.swing.ButtonGroup
*/
class ButtonGroup(initialButtons: AbstractButton*) {
val peer: javax.swing.ButtonGroup = new javax.swing.ButtonGroup
val buttons: mutable.Set[AbstractButton] = new SetWrapper[AbstractButton] {
override def subtractOne(b: AbstractButton): this.type = { peer.remove(b.peer); this }
override def addOne (b: AbstractButton): this.type = { peer.add (b.peer); this }
def contains(b: AbstractButton): Boolean = this.iterator.contains(b)
override def size: Int = peer.getButtonCount
def iterator: Iterator[AbstractButton] = new Iterator[AbstractButton] {
private val elements = peer.getElements
def next(): AbstractButton = UIElement.cachedWrapper[AbstractButton](elements.nextElement())
def hasNext: Boolean = elements.hasMoreElements
}
}
buttons ++= initialButtons
//1.6: def deselectAll() { peer.clearSelection }
def selected: Option[AbstractButton] = buttons.find(_.selected)
def select(b: AbstractButton): Unit = peer.setSelected(b.peer.getModel, true)
}
| scala/scala-swing | src/main/scala/scala/swing/ButtonGroup.scala | Scala | apache-2.0 | 1,506 |
package mappings.common
object ErrorCodes {
// val MicroServiceErrorCode = "PR001"
val PostcodeMismatchErrorCode = "PR002"
val VrmLockedErrorCode = "PR003"
} | dvla/vrm-retention-online | app/mappings/common/ErrorCodes.scala | Scala | mit | 163 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.web
import com.twitter.finagle.httpx.Request
import com.twitter.util.Time
import org.scalatest.FunSuite
class QueryExtractorTest extends FunSuite {
val queryExtractor = new QueryExtractor(10)
def request(p: (String, String)*) = Request(p:_*)
test("require serviceName") {
assert(!queryExtractor(request()).isDefined)
}
test("parse params") {
val endTs = Time.now
val endTimestamp = endTs.inMicroseconds.toString
val r = request(
"serviceName" -> "myService",
"spanName" -> "mySpan",
"timestamp" -> endTimestamp,
"limit" -> "1000")
val actual = queryExtractor(r).get
assert(actual.serviceName === "myService")
assert(actual.spanName.get === "mySpan")
assert(actual.endTs === endTs.inMicroseconds)
assert(actual.limit === 1000)
}
test("default endDateTime") {
Time.withCurrentTimeFrozen { tc =>
val actual = queryExtractor(request("serviceName" -> "myService")).get
assert(actual.endTs === Time.now.sinceEpoch.inMicroseconds)
}
}
test("parse limit") {
val r = request("serviceName" -> "myService", "limit" -> "199")
val actual = queryExtractor(r).get
assert(actual.limit === 199)
}
test("default limit") {
val actual = new QueryExtractor(100).apply(request("serviceName" -> "myService")).get
assert(actual.limit === 100)
}
test("parse spanName 'all'") {
val r = request("serviceName" -> "myService", "spanName" -> "all")
val actual = queryExtractor(r).get
assert(!actual.spanName.isDefined)
}
test("parse spanName ''") {
val r = request("serviceName" -> "myService", "spanName" -> "")
val actual = queryExtractor(r).get
assert(!actual.spanName.isDefined)
}
test("parse spanName") {
val r = request("serviceName" -> "myService", "spanName" -> "something")
val actual = queryExtractor(r).get
assert(actual.spanName.get === "something")
}
test("parse annotations") {
val r = request(
"serviceName" -> "myService",
"annotationQuery" -> "finagle.retry and finagle.timeout")
val actual = queryExtractor(r).get
assert(actual.annotations.get.contains("finagle.retry"))
assert(actual.annotations.get.contains("finagle.timeout"))
}
test("parse key value annotations") {
val r = request(
"serviceName" -> "myService",
"annotationQuery" -> "http.responsecode=500")
val actual = queryExtractor(r).get
assert(
actual.binaryAnnotations.get === Map("http.responsecode" -> "500"))
}
test("parse key value annotations with slash") {
val r = request(
"serviceName" -> "myService",
"annotationQuery" -> "http.uri=/sessions")
val actual = queryExtractor(r).get
assert(
actual.binaryAnnotations.get === Map("http.uri" -> "/sessions"))
}
}
| jstanier/zipkin | zipkin-web/src/test/scala/com/twitter/zipkin/web/QueryExtractorTest.scala | Scala | apache-2.0 | 3,437 |
package org.template.recommendation
import org.apache.predictionio.controller.PPreparator
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import scala.io.Source // ADDED
import org.apache.predictionio.controller.Params // ADDED
// ADDED CustomPreparatorParams case class
case class CustomPreparatorParams(
filepath: String
) extends Params
class Preparator(pp: CustomPreparatorParams) // ADDED CustomPreparatorParams
extends PPreparator[TrainingData, PreparedData] {
def prepare(sc: SparkContext, trainingData: TrainingData): PreparedData = {
val noTrainItems = Source.fromFile(pp.filepath).getLines.toSet //CHANGED
val ratings = trainingData.ratings.filter( r =>
!noTrainItems.contains(r.item)
)
new PreparedData(ratings)
}
}
class PreparedData(
val ratings: RDD[Rating]
) extends Serializable
| alex9311/PredictionIO | examples/scala-parallel-recommendation/custom-prepartor/src/main/scala/Preparator.scala | Scala | apache-2.0 | 894 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.columnar.compression
import java.nio.{ByteBuffer, ByteOrder}
import java.nio.charset.StandardCharsets
import org.apache.commons.lang3.RandomStringUtils
import org.apache.commons.math3.distribution.LogNormalDistribution
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, GenericMutableRow}
import org.apache.spark.sql.execution.columnar.{BOOLEAN, INT, LONG, NativeColumnType, SHORT, STRING}
import org.apache.spark.sql.types.AtomicType
import org.apache.spark.util.Benchmark
import org.apache.spark.util.Utils._
/**
* Benchmark to decoders using various compression schemes.
*/
object CompressionSchemeBenchmark extends AllCompressionSchemes {
private[this] def allocateLocal(size: Int): ByteBuffer = {
ByteBuffer.allocate(size).order(ByteOrder.nativeOrder)
}
private[this] def genLowerSkewData() = {
val rng = new LogNormalDistribution(0.0, 0.01)
() => rng.sample
}
private[this] def genHigherSkewData() = {
val rng = new LogNormalDistribution(0.0, 1.0)
() => rng.sample
}
private[this] def prepareEncodeInternal[T <: AtomicType](
count: Int,
tpe: NativeColumnType[T],
supportedScheme: CompressionScheme,
input: ByteBuffer): ((ByteBuffer, ByteBuffer) => ByteBuffer, Double, ByteBuffer) = {
assert(supportedScheme.supports(tpe))
def toRow(d: Any) = new GenericInternalRow(Array[Any](d))
val encoder = supportedScheme.encoder(tpe)
for (i <- 0 until count) {
encoder.gatherCompressibilityStats(toRow(tpe.extract(input)), 0)
}
input.rewind()
val compressedSize = if (encoder.compressedSize == 0) {
input.remaining()
} else {
encoder.compressedSize
}
(encoder.compress, encoder.compressionRatio, allocateLocal(4 + compressedSize))
}
private[this] def runEncodeBenchmark[T <: AtomicType](
name: String,
iters: Int,
count: Int,
tpe: NativeColumnType[T],
input: ByteBuffer): Unit = {
val benchmark = new Benchmark(name, iters * count)
schemes.filter(_.supports(tpe)).map { scheme =>
val (compressFunc, compressionRatio, buf) = prepareEncodeInternal(count, tpe, scheme, input)
val label = s"${getFormattedClassName(scheme)}(${compressionRatio.formatted("%.3f")})"
benchmark.addCase(label)({ i: Int =>
for (n <- 0L until iters) {
compressFunc(input, buf)
input.rewind()
buf.rewind()
}
})
}
benchmark.run()
}
private[this] def runDecodeBenchmark[T <: AtomicType](
name: String,
iters: Int,
count: Int,
tpe: NativeColumnType[T],
input: ByteBuffer): Unit = {
val benchmark = new Benchmark(name, iters * count)
schemes.filter(_.supports(tpe)).map { scheme =>
val (compressFunc, _, buf) = prepareEncodeInternal(count, tpe, scheme, input)
val compressedBuf = compressFunc(input, buf)
val label = s"${getFormattedClassName(scheme)}"
input.rewind()
benchmark.addCase(label)({ i: Int =>
val rowBuf = new GenericMutableRow(1)
for (n <- 0L until iters) {
compressedBuf.rewind.position(4)
val decoder = scheme.decoder(compressedBuf, tpe)
while (decoder.hasNext) {
decoder.next(rowBuf, 0)
}
}
})
}
benchmark.run()
}
def bitEncodingBenchmark(iters: Int): Unit = {
val count = 65536
val testData = allocateLocal(count * BOOLEAN.defaultSize)
val g = {
val rng = genLowerSkewData()
() => (rng().toInt % 2).toByte
}
for (i <- 0 until count) {
testData.put(i * BOOLEAN.defaultSize, g())
}
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// BOOLEAN Encode: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough(1.000) 3 / 4 19300.2 0.1 1.0X
// RunLengthEncoding(2.491) 923 / 939 72.7 13.8 0.0X
// BooleanBitSet(0.125) 359 / 363 187.1 5.3 0.0X
runEncodeBenchmark("BOOLEAN Encode", iters, count, BOOLEAN, testData)
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// BOOLEAN Decode: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough 129 / 136 519.8 1.9 1.0X
// RunLengthEncoding 613 / 623 109.4 9.1 0.2X
// BooleanBitSet 1196 / 1222 56.1 17.8 0.1X
runDecodeBenchmark("BOOLEAN Decode", iters, count, BOOLEAN, testData)
}
def shortEncodingBenchmark(iters: Int): Unit = {
val count = 65536
val testData = allocateLocal(count * SHORT.defaultSize)
val g1 = genLowerSkewData()
for (i <- 0 until count) {
testData.putShort(i * SHORT.defaultSize, g1().toShort)
}
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// SHORT Encode (Lower Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough(1.000) 6 / 7 10971.4 0.1 1.0X
// RunLengthEncoding(1.510) 1526 / 1542 44.0 22.7 0.0X
runEncodeBenchmark("SHORT Encode (Lower Skew)", iters, count, SHORT, testData)
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// SHORT Decode (Lower Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough 811 / 837 82.8 12.1 1.0X
// RunLengthEncoding 1219 / 1266 55.1 18.2 0.7X
runDecodeBenchmark("SHORT Decode (Lower Skew)", iters, count, SHORT, testData)
val g2 = genHigherSkewData()
for (i <- 0 until count) {
testData.putShort(i * SHORT.defaultSize, g2().toShort)
}
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// SHORT Encode (Higher Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough(1.000) 7 / 7 10112.4 0.1 1.0X
// RunLengthEncoding(2.009) 1623 / 1661 41.4 24.2 0.0X
runEncodeBenchmark("SHORT Encode (Higher Skew)", iters, count, SHORT, testData)
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// SHORT Decode (Higher Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough 818 / 827 82.0 12.2 1.0X
// RunLengthEncoding 1202 / 1237 55.8 17.9 0.7X
runDecodeBenchmark("SHORT Decode (Higher Skew)", iters, count, SHORT, testData)
}
def intEncodingBenchmark(iters: Int): Unit = {
val count = 65536
val testData = allocateLocal(count * INT.defaultSize)
val g1 = genLowerSkewData()
for (i <- 0 until count) {
testData.putInt(i * INT.defaultSize, g1().toInt)
}
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// INT Encode (Lower Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough(1.000) 18 / 19 3716.4 0.3 1.0X
// RunLengthEncoding(1.001) 1992 / 2056 33.7 29.7 0.0X
// DictionaryEncoding(0.500) 723 / 739 92.8 10.8 0.0X
// IntDelta(0.250) 368 / 377 182.2 5.5 0.0X
runEncodeBenchmark("INT Encode (Lower Skew)", iters, count, INT, testData)
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// INT Decode (Lower Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough 821 / 845 81.8 12.2 1.0X
// RunLengthEncoding 1246 / 1256 53.9 18.6 0.7X
// DictionaryEncoding 757 / 766 88.6 11.3 1.1X
// IntDelta 680 / 689 98.7 10.1 1.2X
runDecodeBenchmark("INT Decode (Lower Skew)", iters, count, INT, testData)
val g2 = genHigherSkewData()
for (i <- 0 until count) {
testData.putInt(i * INT.defaultSize, g2().toInt)
}
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// INT Encode (Higher Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough(1.000) 17 / 19 3888.4 0.3 1.0X
// RunLengthEncoding(1.339) 2127 / 2148 31.5 31.7 0.0X
// DictionaryEncoding(0.501) 960 / 972 69.9 14.3 0.0X
// IntDelta(0.250) 362 / 366 185.5 5.4 0.0X
runEncodeBenchmark("INT Encode (Higher Skew)", iters, count, INT, testData)
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// INT Decode (Higher Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough 838 / 884 80.1 12.5 1.0X
// RunLengthEncoding 1287 / 1311 52.1 19.2 0.7X
// DictionaryEncoding 844 / 859 79.5 12.6 1.0X
// IntDelta 764 / 784 87.8 11.4 1.1X
runDecodeBenchmark("INT Decode (Higher Skew)", iters, count, INT, testData)
}
def longEncodingBenchmark(iters: Int): Unit = {
val count = 65536
val testData = allocateLocal(count * LONG.defaultSize)
val g1 = genLowerSkewData()
for (i <- 0 until count) {
testData.putLong(i * LONG.defaultSize, g1().toLong)
}
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// LONG Encode (Lower Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough(1.000) 37 / 38 1804.8 0.6 1.0X
// RunLengthEncoding(0.748) 2065 / 2094 32.5 30.8 0.0X
// DictionaryEncoding(0.250) 950 / 962 70.6 14.2 0.0X
// LongDelta(0.125) 475 / 482 141.2 7.1 0.1X
runEncodeBenchmark("LONG Encode (Lower Skew)", iters, count, LONG, testData)
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// LONG Decode (Lower Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough 888 / 894 75.5 13.2 1.0X
// RunLengthEncoding 1301 / 1311 51.6 19.4 0.7X
// DictionaryEncoding 887 / 904 75.7 13.2 1.0X
// LongDelta 693 / 735 96.8 10.3 1.3X
runDecodeBenchmark("LONG Decode (Lower Skew)", iters, count, LONG, testData)
val g2 = genHigherSkewData()
for (i <- 0 until count) {
testData.putLong(i * LONG.defaultSize, g2().toLong)
}
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// LONG Encode (Higher Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough(1.000) 34 / 35 1963.9 0.5 1.0X
// RunLengthEncoding(0.999) 2260 / 3021 29.7 33.7 0.0X
// DictionaryEncoding(0.251) 1270 / 1438 52.8 18.9 0.0X
// LongDelta(0.125) 496 / 509 135.3 7.4 0.1X
runEncodeBenchmark("LONG Encode (Higher Skew)", iters, count, LONG, testData)
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// LONG Decode (Higher Skew): Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough 965 / 1494 69.5 14.4 1.0X
// RunLengthEncoding 1350 / 1378 49.7 20.1 0.7X
// DictionaryEncoding 892 / 924 75.2 13.3 1.1X
// LongDelta 817 / 847 82.2 12.2 1.2X
runDecodeBenchmark("LONG Decode (Higher Skew)", iters, count, LONG, testData)
}
def stringEncodingBenchmark(iters: Int): Unit = {
val count = 65536
val strLen = 8
val tableSize = 16
val testData = allocateLocal(count * (4 + strLen))
val g = {
val dataTable = (0 until tableSize).map(_ => RandomStringUtils.randomAlphabetic(strLen))
val rng = genHigherSkewData()
() => dataTable(rng().toInt % tableSize)
}
for (i <- 0 until count) {
testData.putInt(strLen)
testData.put(g().getBytes(StandardCharsets.UTF_8))
}
testData.rewind()
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// STRING Encode: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough(1.000) 56 / 57 1197.9 0.8 1.0X
// RunLengthEncoding(0.893) 4892 / 4937 13.7 72.9 0.0X
// DictionaryEncoding(0.167) 2968 / 2992 22.6 44.2 0.0X
runEncodeBenchmark("STRING Encode", iters, count, STRING, testData)
// Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
// STRING Decode: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
// -------------------------------------------------------------------------------------------
// PassThrough 2422 / 2449 27.7 36.1 1.0X
// RunLengthEncoding 2885 / 3018 23.3 43.0 0.8X
// DictionaryEncoding 2716 / 2752 24.7 40.5 0.9X
runDecodeBenchmark("STRING Decode", iters, count, STRING, testData)
}
def main(args: Array[String]): Unit = {
bitEncodingBenchmark(1024)
shortEncodingBenchmark(1024)
intEncodingBenchmark(1024)
longEncodingBenchmark(1024)
stringEncodingBenchmark(1024)
}
}
| gioenn/xSpark | sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala | Scala | apache-2.0 | 16,714 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.cogmath.collection
/** A set which uses object identity to determine equality of members, not
* the "equals" and "hashCode" methods.
*
* We incur a slight overhead by introducing determinism in all cases. This
* would become important if the user invokes an iterator, foreach, toSeq, etc.
*
* @author Greg Snider and Dick Carter
*/
private [cogx] class IdentityHashSet[A <: AnyRef] extends IdentityHashSetDeterministic[A] {
/** Copies all the elements from the specified set to this set. */
def putAll(s: IdentityHashSet[A]) {
this ++= s
}
/** Create a copy of this set. */
def copy: IdentityHashSet[A] = {
val newSet = new IdentityHashSet[A]()
newSet.putAll(this)
newSet
}
}
| hpe-cct/cct-core | src/main/scala/cogx/cogmath/collection/IdentityHashSet.scala | Scala | apache-2.0 | 1,381 |
package datacollector
import java.io.File
import java.util.concurrent.TimeUnit
import akka.actor.{ Props, ActorRef, ActorPath, Actor }
import akka.actor.Actor.Receive
import akka.event.Logging
import datacollector.HeartBeatActor.{ Start, HeartBeatRequest, HeartBeat }
import scala.concurrent.duration.FiniteDuration
/**
* Logs download statistics for watched actors for every n hours.
*
* @author Emre Çelikten
* @date 30/06/2015-14:24
*/
class HeartBeatActor(
watchedActors: Map[ActorPath, String],
configuration: ConfigurationModule,
logger: LoggerModule
) extends Actor {
implicit val loggingAdapter = Logging(context.system, this)
implicit def ec = context.system.dispatcher
// TODO: Point of failure: What happens if resolve fails?
def scheduleHeartBeat(actor: ActorPath) = context.actorSelection(actor).resolveOne(FiniteDuration(30, TimeUnit.SECONDS)).map(ref => context.system.scheduler.scheduleOnce(configuration.heartBeatDuration, ref, HeartBeatRequest))
override def receive: Receive = {
case HeartBeat(numDownloads) =>
watchedActors.get(sender().path) match {
case Some(actorName) =>
logger.info(s"$actorName reports $numDownloads downloads.", sendEmail = configuration.emailOnHeartBeat)
case None => logger.warn(s"${sender()} could not be matched to a name, but reports $numDownloads downloads.")
}
scheduleHeartBeat(sender().path)
case Start =>
// Schedule heart beat messages for all actors that we watch
watchedActors.foreach { case (path, _) => scheduleHeartBeat(path) }
logger.info("Heart beat initialized.")
}
}
object HeartBeatActor {
def props(watchedActors: Map[ActorPath, String]): Props = Props(new HeartBeatActor(watchedActors, Configuration.configuration, Logger))
sealed trait HeartBeatActorMessage
case class HeartBeat(numDownloads: Long) extends HeartBeatActorMessage
case object Start extends HeartBeatActorMessage
case object HeartBeatRequest extends HeartBeatActorMessage
} | emrecelikten/foursquare-data-collector | src/main/scala/datacollector/HeartBeatActor.scala | Scala | gpl-3.0 | 2,028 |
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks
import java.util
import java.util.concurrent.TimeUnit
import android.app.Application
import android.content.pm.PackageManager
import android.preference.PreferenceManager
import com.github.shadowsocks.database.{DBHelper, ProfileManager}
import com.github.shadowsocks.utils.{Console, Key, Utils}
import com.google.android.gms.analytics.{GoogleAnalytics, HitBuilders}
import com.google.android.gms.common.api.ResultCallback
import com.google.android.gms.tagmanager.{ContainerHolder, TagManager}
object ShadowsocksApplication {
var instance: ShadowsocksApplication = _
lazy val dbHelper = new DBHelper(instance)
final val SIG_FUNC = "getSignature"
var containerHolder: ContainerHolder = _
lazy val tracker = GoogleAnalytics.getInstance(instance).newTracker(R.xml.tracker)
lazy val settings = PreferenceManager.getDefaultSharedPreferences(instance)
lazy val profileManager = new ProfileManager(settings, dbHelper)
val isRoot = Console.isRoot
def isVpnEnabled = !(isRoot && settings.getBoolean(Key.isNAT, !Utils.isLollipopOrAbove))
def getVersionName = try {
instance.getPackageManager.getPackageInfo(instance.getPackageName, 0).versionName
} catch {
case _: PackageManager.NameNotFoundException => "Package name not found"
case _: Throwable => null
}
// send event
def track(category: String, action: String) = tracker.send(new HitBuilders.EventBuilder()
.setAction(action)
.setLabel(getVersionName)
.build())
def profileId = settings.getInt(Key.profileId, -1)
def profileId(i: Int) = settings.edit.putInt(Key.profileId, i).apply
def proxy = settings.getString(Key.proxy, "")
def currentProfile = profileManager.getProfile(profileId)
def switchProfile(id: Int) = {
profileId(id)
profileManager.load(id)
}
}
class ShadowsocksApplication extends Application {
import ShadowsocksApplication._
override def onCreate() {
ShadowsocksApplication.instance = this
val tm = TagManager.getInstance(this)
val pending = tm.loadContainerPreferNonDefault("GTM-NT8WS8", R.raw.gtm_default_container)
val callback = new ResultCallback[ContainerHolder] {
override def onResult(holder: ContainerHolder): Unit = {
if (!holder.getStatus.isSuccess) {
return
}
containerHolder = holder
val container = holder.getContainer
container.registerFunctionCallMacroCallback(SIG_FUNC,
(functionName: String, parameters: util.Map[String, AnyRef]) => {
if (functionName == SIG_FUNC) {
Utils.getSignature(getApplicationContext)
}
null
})
}
}
pending.setResultCallback(callback, 2, TimeUnit.SECONDS)
}
}
| tenwx/shadowsocks-android | src/main/scala/com/github/shadowsocks/ShadowsocksApplication.scala | Scala | gpl-3.0 | 4,517 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.coap.connection
import com.datamountaineer.streamreactor.connect.coap.configs.CoapSetting
import com.typesafe.scalalogging.StrictLogging
import org.eclipse.californium.core.{CoapClient, CoapResponse}
/**
* Created by [email protected] on 29/12/2016.
* stream-reactor
*/
abstract class CoapManager(setting: CoapSetting) extends StrictLogging {
val client: CoapClient = buildClient
def buildClient(): CoapClient = {
val client = DTLSConnectionFn(setting)
import scala.collection.JavaConverters._
//discover and check the requested resources
Option(client.discover())
.map(_.asScala)
.getOrElse(Set.empty).map(r => {
logger.info(s"Discovered resources ${r.getURI}")
r.getURI
})
client.setURI(s"${setting.uri}/${setting.target}")
client
}
def delete(): CoapResponse = client.delete()
}
| datamountaineer/stream-reactor | kafka-connect-coap/src/main/scala/com/datamountaineer/streamreactor/connect/coap/connection/CoapManager.scala | Scala | apache-2.0 | 1,514 |
package spark.deploy.master
private[spark] object WorkerState extends Enumeration("ALIVE", "DEAD", "DECOMMISSIONED") {
type WorkerState = Value
val ALIVE, DEAD, DECOMMISSIONED = Value
}
| koeninger/spark | core/src/main/scala/spark/deploy/master/WorkerState.scala | Scala | bsd-3-clause | 192 |
class Hamming(strand1: String, strand2: String) {
require(strand1.length == strand2.length)
def distance = commonPairs.count {
case (a, b) => a != b
}
private def commonPairs = strand1.zip(strand2)
}
object Hamming {
def compute(strand1: String, strand2: String) =
new Hamming(strand1, strand2).distance
}
| nlochschmidt/xscala | hamming/example.scala | Scala | mit | 329 |
package shapeless.datatype.mappable
import shapeless._
import shapeless.labelled.FieldType
import scala.language.higherKinds
trait ToMappable[L <: HList, M] extends Serializable {
def apply(l: L): M
}
trait LowPriorityToMappable1 {
implicit def hconsToMappable1[K <: Symbol, V, T <: HList, M](implicit
wit: Witness.Aux[K],
mt: MappableType[M, V],
toT: Lazy[ToMappable[T, M]]
): ToMappable[FieldType[K, V] :: T, M] = new ToMappable[FieldType[K, V] :: T, M] {
override def apply(l: FieldType[K, V] :: T): M =
mt.put(wit.value.name, l.head, toT.value(l.tail))
}
}
trait LowPriorityToMappableOption1 extends LowPriorityToMappable1 {
implicit def hconsToMappableOption1[K <: Symbol, V, T <: HList, M](implicit
wit: Witness.Aux[K],
mt: MappableType[M, V],
toT: Lazy[ToMappable[T, M]]
): ToMappable[FieldType[K, Option[V]] :: T, M] = new ToMappable[FieldType[K, Option[V]] :: T, M] {
override def apply(l: FieldType[K, Option[V]] :: T): M =
mt.put(wit.value.name, l.head, toT.value(l.tail))
}
}
trait LowPriorityToMappableSeq1 extends LowPriorityToMappableOption1 {
implicit def hconsToMappableSeq1[K <: Symbol, V, T <: HList, M, S[_]](implicit
wit: Witness.Aux[K],
mt: MappableType[M, V],
toT: Lazy[ToMappable[T, M]],
toSeq: S[V] => Seq[V]
): ToMappable[FieldType[K, S[V]] :: T, M] = new ToMappable[FieldType[K, S[V]] :: T, M] {
override def apply(l: FieldType[K, S[V]] :: T): M =
mt.put(wit.value.name, toSeq(l.head), toT.value(l.tail))
}
}
trait LowPriorityToMappable0 extends LowPriorityToMappableSeq1 {
implicit def hconsToMappable0[K <: Symbol, V, H <: HList, T <: HList, M: CanNest](implicit
wit: Witness.Aux[K],
gen: LabelledGeneric.Aux[V, H],
mbt: BaseMappableType[M],
toH: Lazy[ToMappable[H, M]],
toT: Lazy[ToMappable[T, M]]
): ToMappable[FieldType[K, V] :: T, M] = new ToMappable[FieldType[K, V] :: T, M] {
override def apply(l: FieldType[K, V] :: T): M =
mbt.put(wit.value.name, toH.value(gen.to(l.head)), toT.value(l.tail))
}
}
trait LowPriorityToMappableOption0 extends LowPriorityToMappable0 {
implicit def hconsToMappableOption0[K <: Symbol, V, H <: HList, T <: HList, M: CanNest](implicit
wit: Witness.Aux[K],
gen: LabelledGeneric.Aux[V, H],
mbt: BaseMappableType[M],
toH: Lazy[ToMappable[H, M]],
toT: Lazy[ToMappable[T, M]]
): ToMappable[FieldType[K, Option[V]] :: T, M] = new ToMappable[FieldType[K, Option[V]] :: T, M] {
override def apply(l: FieldType[K, Option[V]] :: T): M =
mbt.put(wit.value.name, l.head.map(h => toH.value(gen.to(h))), toT.value(l.tail))
}
}
trait LowPriorityToMappableSeq0 extends LowPriorityToMappableOption0 {
implicit def hconsToMappableSeq0[K <: Symbol, V, H <: HList, T <: HList, M: CanNest, S[_]](
implicit
wit: Witness.Aux[K],
gen: LabelledGeneric.Aux[V, H],
mbt: BaseMappableType[M],
toH: Lazy[ToMappable[H, M]],
toT: Lazy[ToMappable[T, M]],
toSeq: S[V] => Seq[V]
): ToMappable[FieldType[K, S[V]] :: T, M] = new ToMappable[FieldType[K, S[V]] :: T, M] {
override def apply(l: FieldType[K, S[V]] :: T): M =
mbt.put(wit.value.name, toSeq(l.head).map(h => toH.value(gen.to(h))), toT.value(l.tail))
}
}
object ToMappable extends LowPriorityToMappableSeq0 {
implicit def hnilToMappable[M](implicit mbt: BaseMappableType[M]): ToMappable[HNil, M] =
new ToMappable[HNil, M] {
override def apply(l: HNil): M = mbt.base
}
}
| nevillelyh/shapeless-datatype | core/src/main/scala/shapeless/datatype/mappable/ToMappable.scala | Scala | apache-2.0 | 3,489 |
package org.jetbrains.plugins.hocon.lexer
import com.intellij.psi.TokenType
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.hocon.lang.HoconLanguage
sealed class HoconTokenType(debugString: String) extends IElementType(debugString, HoconLanguage)
object HoconTokenType extends TokenType {
val InlineWhitespace = new HoconTokenType("INLINE_WHITESPACE")
val LineBreakingWhitespace = new HoconTokenType("LINE_BREAKING_WHITESPACE")
val BadCharacter = new HoconTokenType("BAD_CHARACTER")
val LBrace = new HoconTokenType("LBRACE")
val RBrace = new HoconTokenType("RBRACE")
val LBracket = new HoconTokenType("LBRACKET")
val RBracket = new HoconTokenType("RBRACKET")
val Colon = new HoconTokenType("COLON")
val Comma = new HoconTokenType("COMMA")
val Equals = new HoconTokenType("EQUALS")
val PlusEquals = new HoconTokenType("PLUS_EQUALS")
val Period = new HoconTokenType("PERIOD")
val Dollar = new HoconTokenType("DOLLAR")
val SubLBrace = new HoconTokenType("SUB_LBRACE")
val QMark = new HoconTokenType("QMARK")
val SubRBrace = new HoconTokenType("SUB_RBRACE")
val HashComment = new HoconTokenType("HASH_COMMENT")
val DoubleSlashComment = new HoconTokenType("DOUBLE_SLASH_COMMENT")
val UnquotedChars = new HoconTokenType("UNQUOTED_CHARS")
val QuotedString = new HoconTokenType("QUOTED_STRING")
val MultilineString = new HoconTokenType("MULTILINE_STRING")
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/hocon/lexer/HoconTokenType.scala | Scala | apache-2.0 | 1,420 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.ems.core.user.event
import org.beangle.commons.event.Event
import org.beangle.ems.core.user.model.Role
import org.beangle.ems.core.user.model.User
class RoleEvent(r: Role) extends Event(r) {
def role = getSource.asInstanceOf[Role]
}
class RolePermissionEvent(role: Role) extends RoleEvent(role)
class RoleCreationEvent(role: Role) extends RoleEvent(role)
class RoleRemoveEvent(role: Role) extends RoleEvent(role)
class UserEvent(r: User) extends Event(r) {
def user = getSource.asInstanceOf[User]
}
class UserAlterationEvent(user: User) extends UserEvent(user)
class UserCreationEvent(user: User) extends UserEvent(user)
class UserRemoveEvent(user: User) extends UserEvent(user)
class UserStatusEvent(user: User) extends UserEvent(user)
| beangle/ems | core/src/main/scala/org/beangle/ems/core/user/event/event.scala | Scala | lgpl-3.0 | 1,492 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator
import java.io.PrintStream
import java.nio.ByteBuffer
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.locks.ReentrantLock
import com.yammer.metrics.core.Gauge
import kafka.api.{ApiVersion, KAFKA_0_10_1_IV0}
import kafka.common.{MessageFormatter, _}
import kafka.metrics.KafkaMetricsGroup
import kafka.server.ReplicaManager
import kafka.utils.CoreUtils.inLock
import kafka.utils._
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.protocol.types.Type._
import org.apache.kafka.common.protocol.types.{ArrayOf, Field, Schema, Struct}
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.OffsetFetchResponse
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.utils.{Time, Utils}
import scala.collection.JavaConverters._
import scala.collection._
class GroupMetadataManager(val brokerId: Int,
val interBrokerProtocolVersion: ApiVersion,
val config: OffsetConfig,
replicaManager: ReplicaManager,
zkUtils: ZkUtils,
time: Time) extends Logging with KafkaMetricsGroup {
private val compressionType: CompressionType = CompressionType.forId(config.offsetsTopicCompressionCodec.codec)
private val groupMetadataCache = new Pool[String, GroupMetadata]
/* lock protecting access to loading and owned partition sets */
private val partitionLock = new ReentrantLock()
/* partitions of consumer groups that are being loaded, its lock should be always called BEFORE the group lock if needed */
private val loadingPartitions: mutable.Set[Int] = mutable.Set()
/* partitions of consumer groups that are assigned, using the same loading partition lock */
private val ownedPartitions: mutable.Set[Int] = mutable.Set()
/* shutting down flag */
private val shuttingDown = new AtomicBoolean(false)
/* number of partitions for the consumer metadata topic */
private val groupMetadataTopicPartitionCount = getOffsetsTopicPartitionCount
/* single-thread scheduler to handle offset/group metadata cache loading and unloading */
private val scheduler = new KafkaScheduler(threads = 1, threadNamePrefix = "group-metadata-manager-")
this.logIdent = "[Group Metadata Manager on Broker " + brokerId + "]: "
newGauge("NumOffsets",
new Gauge[Int] {
def value = groupMetadataCache.values.map(group => {
group synchronized { group.numOffsets }
}).sum
}
)
newGauge("NumGroups",
new Gauge[Int] {
def value = groupMetadataCache.size
}
)
def enableMetadataExpiration() {
scheduler.startup()
scheduler.schedule(name = "delete-expired-group-metadata",
fun = cleanupGroupMetadata,
period = config.offsetsRetentionCheckIntervalMs,
unit = TimeUnit.MILLISECONDS)
}
def currentGroups(): Iterable[GroupMetadata] = groupMetadataCache.values
def isPartitionOwned(partition: Int) = inLock(partitionLock) { ownedPartitions.contains(partition) }
def isPartitionLoading(partition: Int) = inLock(partitionLock) { loadingPartitions.contains(partition) }
def partitionFor(groupId: String): Int = Utils.abs(groupId.hashCode) % groupMetadataTopicPartitionCount
def isGroupLocal(groupId: String): Boolean = isPartitionOwned(partitionFor(groupId))
def isGroupLoading(groupId: String): Boolean = isPartitionLoading(partitionFor(groupId))
def isLoading(): Boolean = inLock(partitionLock) { loadingPartitions.nonEmpty }
/**
* Get the group associated with the given groupId, or null if not found
*/
def getGroup(groupId: String): Option[GroupMetadata] = {
Option(groupMetadataCache.get(groupId))
}
/**
* Add a group or get the group associated with the given groupId if it already exists
*/
def addGroup(group: GroupMetadata): GroupMetadata = {
val currentGroup = groupMetadataCache.putIfNotExists(group.groupId, group)
if (currentGroup != null) {
currentGroup
} else {
group
}
}
def prepareStoreGroup(group: GroupMetadata,
groupAssignment: Map[String, Array[Byte]],
responseCallback: Errors => Unit): Option[DelayedStore] = {
getMagicAndTimestamp(partitionFor(group.groupId)) match {
case Some((magicValue, timestampType, timestamp)) =>
val groupMetadataValueVersion = {
if (interBrokerProtocolVersion < KAFKA_0_10_1_IV0)
0.toShort
else
GroupMetadataManager.CURRENT_GROUP_VALUE_SCHEMA_VERSION
}
val record = Record.create(magicValue, timestampType, timestamp,
GroupMetadataManager.groupMetadataKey(group.groupId),
GroupMetadataManager.groupMetadataValue(group, groupAssignment, version = groupMetadataValueVersion))
val groupMetadataPartition = new TopicPartition(Topic.GroupMetadataTopicName, partitionFor(group.groupId))
val groupMetadataRecords = Map(groupMetadataPartition -> MemoryRecords.withRecords(timestampType, compressionType, record))
val generationId = group.generationId
// set the callback function to insert the created group into cache after log append completed
def putCacheCallback(responseStatus: Map[TopicPartition, PartitionResponse]) {
// the append response should only contain the topics partition
if (responseStatus.size != 1 || !responseStatus.contains(groupMetadataPartition))
throw new IllegalStateException("Append status %s should only have one partition %s"
.format(responseStatus, groupMetadataPartition))
// construct the error status in the propagated assignment response
// in the cache
val status = responseStatus(groupMetadataPartition)
val statusError = Errors.forCode(status.errorCode)
val responseError = if (statusError == Errors.NONE) {
Errors.NONE
} else {
debug(s"Metadata from group ${group.groupId} with generation $generationId failed when appending to log " +
s"due to ${statusError.exceptionName}")
// transform the log append error code to the corresponding the commit status error code
statusError match {
case Errors.UNKNOWN_TOPIC_OR_PARTITION
| Errors.NOT_ENOUGH_REPLICAS
| Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND =>
Errors.GROUP_COORDINATOR_NOT_AVAILABLE
case Errors.NOT_LEADER_FOR_PARTITION =>
Errors.NOT_COORDINATOR_FOR_GROUP
case Errors.REQUEST_TIMED_OUT =>
Errors.REBALANCE_IN_PROGRESS
case Errors.MESSAGE_TOO_LARGE
| Errors.RECORD_LIST_TOO_LARGE
| Errors.INVALID_FETCH_SIZE =>
error(s"Appending metadata message for group ${group.groupId} generation $generationId failed due to " +
s"${statusError.exceptionName}, returning UNKNOWN error code to the client")
Errors.UNKNOWN
case other =>
error(s"Appending metadata message for group ${group.groupId} generation $generationId failed " +
s"due to unexpected error: ${statusError.exceptionName}")
other
}
}
responseCallback(responseError)
}
Some(DelayedStore(groupMetadataRecords, putCacheCallback))
case None =>
responseCallback(Errors.NOT_COORDINATOR_FOR_GROUP)
None
}
}
def store(delayedStore: DelayedStore) {
// call replica manager to append the group message
replicaManager.appendRecords(
config.offsetCommitTimeoutMs.toLong,
config.offsetCommitRequiredAcks,
true, // allow appending to internal offset topic
delayedStore.partitionRecords,
delayedStore.callback)
}
/**
* Store offsets by appending it to the replicated log and then inserting to cache
*/
def prepareStoreOffsets(group: GroupMetadata,
consumerId: String,
generationId: Int,
offsetMetadata: immutable.Map[TopicPartition, OffsetAndMetadata],
responseCallback: immutable.Map[TopicPartition, Short] => Unit): Option[DelayedStore] = {
// first filter out partitions with offset metadata size exceeding limit
val filteredOffsetMetadata = offsetMetadata.filter { case (_, offsetAndMetadata) =>
validateOffsetMetadataLength(offsetAndMetadata.metadata)
}
// construct the message set to append
getMagicAndTimestamp(partitionFor(group.groupId)) match {
case Some((magicValue, timestampType, timestamp)) =>
val records = filteredOffsetMetadata.map { case (topicPartition, offsetAndMetadata) =>
Record.create(magicValue, timestampType, timestamp,
GroupMetadataManager.offsetCommitKey(group.groupId, topicPartition.topic, topicPartition.partition),
GroupMetadataManager.offsetCommitValue(offsetAndMetadata))
}.toSeq
val offsetTopicPartition = new TopicPartition(Topic.GroupMetadataTopicName, partitionFor(group.groupId))
val entries = Map(offsetTopicPartition -> MemoryRecords.withRecords(timestampType, compressionType, records:_*))
// set the callback function to insert offsets into cache after log append completed
def putCacheCallback(responseStatus: Map[TopicPartition, PartitionResponse]) {
// the append response should only contain the topics partition
if (responseStatus.size != 1 || ! responseStatus.contains(offsetTopicPartition))
throw new IllegalStateException("Append status %s should only have one partition %s"
.format(responseStatus, offsetTopicPartition))
// construct the commit response status and insert
// the offset and metadata to cache if the append status has no error
val status = responseStatus(offsetTopicPartition)
val statusError = Errors.forCode(status.errorCode)
val responseCode =
group synchronized {
if (statusError == Errors.NONE) {
if (!group.is(Dead)) {
filteredOffsetMetadata.foreach { case (topicPartition, offsetAndMetadata) =>
group.completePendingOffsetWrite(topicPartition, offsetAndMetadata)
}
}
Errors.NONE.code
} else {
if (!group.is(Dead)) {
filteredOffsetMetadata.foreach { case (topicPartition, offsetAndMetadata) =>
group.failPendingOffsetWrite(topicPartition, offsetAndMetadata)
}
}
debug(s"Offset commit $filteredOffsetMetadata from group ${group.groupId}, consumer $consumerId " +
s"with generation $generationId failed when appending to log due to ${statusError.exceptionName}")
// transform the log append error code to the corresponding the commit status error code
val responseError = statusError match {
case Errors.UNKNOWN_TOPIC_OR_PARTITION
| Errors.NOT_ENOUGH_REPLICAS
| Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND =>
Errors.GROUP_COORDINATOR_NOT_AVAILABLE
case Errors.NOT_LEADER_FOR_PARTITION =>
Errors.NOT_COORDINATOR_FOR_GROUP
case Errors.MESSAGE_TOO_LARGE
| Errors.RECORD_LIST_TOO_LARGE
| Errors.INVALID_FETCH_SIZE =>
Errors.INVALID_COMMIT_OFFSET_SIZE
case other => other
}
responseError.code
}
}
// compute the final error codes for the commit response
val commitStatus = offsetMetadata.map { case (topicPartition, offsetAndMetadata) =>
if (validateOffsetMetadataLength(offsetAndMetadata.metadata))
(topicPartition, responseCode)
else
(topicPartition, Errors.OFFSET_METADATA_TOO_LARGE.code)
}
// finally trigger the callback logic passed from the API layer
responseCallback(commitStatus)
}
group synchronized {
group.prepareOffsetCommit(offsetMetadata)
}
Some(DelayedStore(entries, putCacheCallback))
case None =>
val commitStatus = offsetMetadata.map { case (topicPartition, offsetAndMetadata) =>
(topicPartition, Errors.NOT_COORDINATOR_FOR_GROUP.code)
}
responseCallback(commitStatus)
None
}
}
/**
* The most important guarantee that this API provides is that it should never return a stale offset. i.e., it either
* returns the current offset or it begins to sync the cache from the log (and returns an error code).
*/
def getOffsets(groupId: String, topicPartitions: Seq[TopicPartition]): Map[TopicPartition, OffsetFetchResponse.PartitionData] = {
trace("Getting offsets %s for group %s.".format(topicPartitions, groupId))
val group = groupMetadataCache.get(groupId)
if (group == null) {
topicPartitions.map { topicPartition =>
(topicPartition, new OffsetFetchResponse.PartitionData(OffsetFetchResponse.INVALID_OFFSET, "", Errors.NONE.code))
}.toMap
} else {
group synchronized {
if (group.is(Dead)) {
topicPartitions.map { topicPartition =>
(topicPartition, new OffsetFetchResponse.PartitionData(OffsetFetchResponse.INVALID_OFFSET, "", Errors.NONE.code))
}.toMap
} else {
if (topicPartitions.isEmpty) {
// Return offsets for all partitions owned by this consumer group. (this only applies to consumers that commit offsets to Kafka.)
group.allOffsets.map { case (topicPartition, offsetAndMetadata) =>
(topicPartition, new OffsetFetchResponse.PartitionData(offsetAndMetadata.offset, offsetAndMetadata.metadata, Errors.NONE.code))
}
} else {
topicPartitions.map { topicPartition =>
group.offset(topicPartition) match {
case None => (topicPartition, new OffsetFetchResponse.PartitionData(OffsetFetchResponse.INVALID_OFFSET, "", Errors.NONE.code))
case Some(offsetAndMetadata) =>
(topicPartition, new OffsetFetchResponse.PartitionData(offsetAndMetadata.offset, offsetAndMetadata.metadata, Errors.NONE.code))
}
}.toMap
}
}
}
}
}
/**
* Asynchronously read the partition from the offsets topic and populate the cache
*/
def loadGroupsForPartition(offsetsPartition: Int,
onGroupLoaded: GroupMetadata => Unit) {
val topicPartition = new TopicPartition(Topic.GroupMetadataTopicName, offsetsPartition)
scheduler.schedule(topicPartition.toString, loadGroupsAndOffsets)
def loadGroupsAndOffsets() {
info("Loading offsets and group metadata from " + topicPartition)
inLock(partitionLock) {
if (loadingPartitions.contains(offsetsPartition)) {
info("Offset load from %s already in progress.".format(topicPartition))
return
} else {
loadingPartitions.add(offsetsPartition)
}
}
val startMs = time.milliseconds()
try {
replicaManager.logManager.getLog(topicPartition) match {
case Some(log) =>
var currOffset = log.logSegments.head.baseOffset
val buffer = ByteBuffer.allocate(config.loadBufferSize)
// loop breaks if leader changes at any time during the load, since getHighWatermark is -1
val loadedOffsets = mutable.Map[GroupTopicPartition, OffsetAndMetadata]()
val removedOffsets = mutable.Set[GroupTopicPartition]()
val loadedGroups = mutable.Map[String, GroupMetadata]()
val removedGroups = mutable.Set[String]()
while (currOffset < getHighWatermark(offsetsPartition) && !shuttingDown.get()) {
buffer.clear()
val fileRecords = log.read(currOffset, config.loadBufferSize, minOneMessage = true).records.asInstanceOf[FileRecords]
fileRecords.readInto(buffer, 0)
MemoryRecords.readableRecords(buffer).deepEntries.asScala.foreach { entry =>
val record = entry.record
require(record.hasKey, "Offset entry key should not be null")
val baseKey = GroupMetadataManager.readMessageKey(record.key)
if (baseKey.isInstanceOf[OffsetKey]) {
// load offset
val key = baseKey.key.asInstanceOf[GroupTopicPartition]
if (record.hasNullValue) {
loadedOffsets.remove(key)
removedOffsets.add(key)
} else {
val value = GroupMetadataManager.readOffsetMessageValue(record.value)
loadedOffsets.put(key, value)
removedOffsets.remove(key)
}
} else {
// load group metadata
val groupId = baseKey.key.asInstanceOf[String]
val groupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, record.value)
if (groupMetadata != null) {
trace(s"Loaded group metadata for group ${groupMetadata.groupId} with generation ${groupMetadata.generationId}")
removedGroups.remove(groupId)
loadedGroups.put(groupId, groupMetadata)
} else {
loadedGroups.remove(groupId)
removedGroups.add(groupId)
}
}
currOffset = entry.nextOffset
}
}
val (groupOffsets, noGroupOffsets) = loadedOffsets
.groupBy(_._1.group)
.mapValues(_.map{ case (groupTopicPartition, offsetAndMetadata) => (groupTopicPartition.topicPartition, offsetAndMetadata)})
.partition(value => loadedGroups.contains(value._1))
loadedGroups.values.foreach { group =>
val offsets = groupOffsets.getOrElse(group.groupId, Map.empty)
loadGroup(group, offsets)
onGroupLoaded(group)
}
noGroupOffsets.foreach { case (groupId, offsets) =>
val group = new GroupMetadata(groupId)
loadGroup(group, offsets)
onGroupLoaded(group)
}
removedGroups.foreach { groupId =>
if (groupMetadataCache.contains(groupId))
throw new IllegalStateException(s"Unexpected unload of active group $groupId while " +
s"loading partition $topicPartition")
}
if (!shuttingDown.get())
info("Finished loading offsets from %s in %d milliseconds."
.format(topicPartition, time.milliseconds() - startMs))
case None =>
warn("No log found for " + topicPartition)
}
}
catch {
case t: Throwable =>
error("Error in loading offsets from " + topicPartition, t)
}
finally {
inLock(partitionLock) {
ownedPartitions.add(offsetsPartition)
loadingPartitions.remove(offsetsPartition)
}
}
}
}
private def loadGroup(group: GroupMetadata, offsets: Iterable[(TopicPartition, OffsetAndMetadata)]): Unit = {
val currentGroup = addGroup(group)
if (group != currentGroup) {
debug(s"Attempt to load group ${group.groupId} from log with generation ${group.generationId} failed " +
s"because there is already a cached group with generation ${currentGroup.generationId}")
} else {
offsets.foreach {
case (topicPartition, offsetAndMetadata) => {
val offset = offsetAndMetadata.copy (
expireTimestamp = {
// special handling for version 0:
// set the expiration time stamp as commit time stamp + server default retention time
if (offsetAndMetadata.expireTimestamp == org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_TIMESTAMP)
offsetAndMetadata.commitTimestamp + config.offsetsRetentionMs
else
offsetAndMetadata.expireTimestamp
}
)
trace("Loaded offset %s for %s.".format(offset, topicPartition))
group.completePendingOffsetWrite(topicPartition, offset)
}
}
}
}
/**
* When this broker becomes a follower for an offsets topic partition clear out the cache for groups that belong to
* that partition.
*
* @param offsetsPartition Groups belonging to this partition of the offsets topic will be deleted from the cache.
*/
def removeGroupsForPartition(offsetsPartition: Int,
onGroupUnloaded: GroupMetadata => Unit) {
val topicPartition = new TopicPartition(Topic.GroupMetadataTopicName, offsetsPartition)
scheduler.schedule(topicPartition.toString, removeGroupsAndOffsets)
def removeGroupsAndOffsets() {
var numOffsetsRemoved = 0
var numGroupsRemoved = 0
inLock(partitionLock) {
// we need to guard the group removal in cache in the loading partition lock
// to prevent coordinator's check-and-get-group race condition
ownedPartitions.remove(offsetsPartition)
for (group <- groupMetadataCache.values) {
if (partitionFor(group.groupId) == offsetsPartition) {
onGroupUnloaded(group)
groupMetadataCache.remove(group.groupId, group)
numGroupsRemoved += 1
numOffsetsRemoved += group.numOffsets
}
}
}
if (numOffsetsRemoved > 0)
info(s"Removed $numOffsetsRemoved cached offsets for $topicPartition on follower transition.")
if (numGroupsRemoved > 0)
info(s"Removed $numGroupsRemoved cached groups for $topicPartition on follower transition.")
}
}
// visible for testing
private[coordinator] def cleanupGroupMetadata() {
val startMs = time.milliseconds()
var offsetsRemoved = 0
groupMetadataCache.foreach { case (groupId, group) =>
val (expiredOffsets, groupIsDead, generation) = group synchronized {
// remove expired offsets from the cache
val expiredOffsets = group.removeExpiredOffsets(startMs)
if (group.is(Empty) && !group.hasOffsets) {
info(s"Group $groupId transitioned to Dead in generation ${group.generationId}")
group.transitionTo(Dead)
}
(expiredOffsets, group.is(Dead), group.generationId)
}
val offsetsPartition = partitionFor(groupId)
val appendPartition = new TopicPartition(Topic.GroupMetadataTopicName, offsetsPartition)
getMagicAndTimestamp(offsetsPartition) match {
case Some((magicValue, timestampType, timestamp)) =>
val partitionOpt = replicaManager.getPartition(appendPartition)
partitionOpt.foreach { partition =>
val tombstones = expiredOffsets.map { case (topicPartition, offsetAndMetadata) =>
trace(s"Removing expired offset and metadata for $groupId, $topicPartition: $offsetAndMetadata")
val commitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition.topic, topicPartition.partition)
Record.create(magicValue, timestampType, timestamp, commitKey, null)
}.toBuffer
trace(s"Marked ${expiredOffsets.size} offsets in $appendPartition for deletion.")
// We avoid writing the tombstone when the generationId is 0, since this group is only using
// Kafka for offset storage.
if (groupIsDead && groupMetadataCache.remove(groupId, group) && generation > 0) {
// Append the tombstone messages to the partition. It is okay if the replicas don't receive these (say,
// if we crash or leaders move) since the new leaders will still expire the consumers with heartbeat and
// retry removing this group.
tombstones += Record.create(magicValue, timestampType, timestamp, GroupMetadataManager.groupMetadataKey(group.groupId), null)
trace(s"Group $groupId removed from the metadata cache and marked for deletion in $appendPartition.")
}
if (tombstones.nonEmpty) {
try {
// do not need to require acks since even if the tombstone is lost,
// it will be appended again in the next purge cycle
partition.appendRecordsToLeader(MemoryRecords.withRecords(timestampType, compressionType, tombstones: _*))
offsetsRemoved += expiredOffsets.size
trace(s"Successfully appended ${tombstones.size} tombstones to $appendPartition for expired offsets and/or metadata for group $groupId")
} catch {
case t: Throwable =>
error(s"Failed to append ${tombstones.size} tombstones to $appendPartition for expired offsets and/or metadata for group $groupId.", t)
// ignore and continue
}
}
}
case None =>
info(s"BrokerId $brokerId is no longer a coordinator for the group $groupId. Proceeding cleanup for other alive groups")
}
}
info(s"Removed $offsetsRemoved expired offsets in ${time.milliseconds() - startMs} milliseconds.")
}
private def getHighWatermark(partitionId: Int): Long = {
val partitionOpt = replicaManager.getPartition(new TopicPartition(Topic.GroupMetadataTopicName, partitionId))
val hw = partitionOpt.map { partition =>
partition.leaderReplicaIfLocal().map(_.highWatermark.messageOffset).getOrElse(-1L)
}.getOrElse(-1L)
hw
}
/*
* Check if the offset metadata length is valid
*/
private def validateOffsetMetadataLength(metadata: String) : Boolean = {
metadata == null || metadata.length() <= config.maxMetadataSize
}
def shutdown() {
shuttingDown.set(true)
if (scheduler.isStarted)
scheduler.shutdown()
// TODO: clear the caches
}
/**
* Gets the partition count of the offsets topic from ZooKeeper.
* If the topic does not exist, the configured partition count is returned.
*/
private def getOffsetsTopicPartitionCount = {
val topic = Topic.GroupMetadataTopicName
val topicData = zkUtils.getPartitionAssignmentForTopics(Seq(topic))
if (topicData(topic).nonEmpty)
topicData(topic).size
else
config.offsetsTopicNumPartitions
}
/**
* Check if the replica is local and return the message format version and timestamp
*
* @param partition Partition of GroupMetadataTopic
* @return Option[(MessageFormatVersion, TimeStamp)] if replica is local, None otherwise
*/
private def getMagicAndTimestamp(partition: Int): Option[(Byte, TimestampType, Long)] = {
val groupMetadataTopicPartition = new TopicPartition(Topic.GroupMetadataTopicName, partition)
replicaManager.getMagicAndTimestampType(groupMetadataTopicPartition).map { case (messageFormatVersion, timestampType) =>
val timestamp = if (messageFormatVersion == Record.MAGIC_VALUE_V0) Record.NO_TIMESTAMP else time.milliseconds()
(messageFormatVersion, timestampType, timestamp)
}
}
/**
* Add the partition into the owned list
*
* NOTE: this is for test only
*/
def addPartitionOwnership(partition: Int) {
inLock(partitionLock) {
ownedPartitions.add(partition)
}
}
}
/**
* Messages stored for the group topic has versions for both the key and value fields. Key
* version is used to indicate the type of the message (also to differentiate different types
* of messages from being compacted together if they have the same field values); and value
* version is used to evolve the messages within their data types:
*
* key version 0: group consumption offset
* -> value version 0: [offset, metadata, timestamp]
*
* key version 1: group consumption offset
* -> value version 1: [offset, metadata, commit_timestamp, expire_timestamp]
*
* key version 2: group metadata
* -> value version 0: [protocol_type, generation, protocol, leader, members]
*/
object GroupMetadataManager {
private val CURRENT_OFFSET_KEY_SCHEMA_VERSION = 1.toShort
private val CURRENT_GROUP_KEY_SCHEMA_VERSION = 2.toShort
private val OFFSET_COMMIT_KEY_SCHEMA = new Schema(new Field("group", STRING),
new Field("topic", STRING),
new Field("partition", INT32))
private val OFFSET_KEY_GROUP_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("group")
private val OFFSET_KEY_TOPIC_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("topic")
private val OFFSET_KEY_PARTITION_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("partition")
private val OFFSET_COMMIT_VALUE_SCHEMA_V0 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("timestamp", INT64))
private val OFFSET_VALUE_OFFSET_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("offset")
private val OFFSET_VALUE_METADATA_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("metadata")
private val OFFSET_VALUE_TIMESTAMP_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("timestamp")
private val OFFSET_COMMIT_VALUE_SCHEMA_V1 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("commit_timestamp", INT64),
new Field("expire_timestamp", INT64))
private val OFFSET_VALUE_OFFSET_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("offset")
private val OFFSET_VALUE_METADATA_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("metadata")
private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("commit_timestamp")
private val OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("expire_timestamp")
private val GROUP_METADATA_KEY_SCHEMA = new Schema(new Field("group", STRING))
private val GROUP_KEY_GROUP_FIELD = GROUP_METADATA_KEY_SCHEMA.get("group")
private val MEMBER_ID_KEY = "member_id"
private val CLIENT_ID_KEY = "client_id"
private val CLIENT_HOST_KEY = "client_host"
private val REBALANCE_TIMEOUT_KEY = "rebalance_timeout"
private val SESSION_TIMEOUT_KEY = "session_timeout"
private val SUBSCRIPTION_KEY = "subscription"
private val ASSIGNMENT_KEY = "assignment"
private val MEMBER_METADATA_V0 = new Schema(
new Field(MEMBER_ID_KEY, STRING),
new Field(CLIENT_ID_KEY, STRING),
new Field(CLIENT_HOST_KEY, STRING),
new Field(SESSION_TIMEOUT_KEY, INT32),
new Field(SUBSCRIPTION_KEY, BYTES),
new Field(ASSIGNMENT_KEY, BYTES))
private val MEMBER_METADATA_V1 = new Schema(
new Field(MEMBER_ID_KEY, STRING),
new Field(CLIENT_ID_KEY, STRING),
new Field(CLIENT_HOST_KEY, STRING),
new Field(REBALANCE_TIMEOUT_KEY, INT32),
new Field(SESSION_TIMEOUT_KEY, INT32),
new Field(SUBSCRIPTION_KEY, BYTES),
new Field(ASSIGNMENT_KEY, BYTES))
private val PROTOCOL_TYPE_KEY = "protocol_type"
private val GENERATION_KEY = "generation"
private val PROTOCOL_KEY = "protocol"
private val LEADER_KEY = "leader"
private val MEMBERS_KEY = "members"
private val GROUP_METADATA_VALUE_SCHEMA_V0 = new Schema(
new Field(PROTOCOL_TYPE_KEY, STRING),
new Field(GENERATION_KEY, INT32),
new Field(PROTOCOL_KEY, NULLABLE_STRING),
new Field(LEADER_KEY, NULLABLE_STRING),
new Field(MEMBERS_KEY, new ArrayOf(MEMBER_METADATA_V0)))
private val GROUP_METADATA_VALUE_SCHEMA_V1 = new Schema(
new Field(PROTOCOL_TYPE_KEY, STRING),
new Field(GENERATION_KEY, INT32),
new Field(PROTOCOL_KEY, NULLABLE_STRING),
new Field(LEADER_KEY, NULLABLE_STRING),
new Field(MEMBERS_KEY, new ArrayOf(MEMBER_METADATA_V1)))
// map of versions to key schemas as data types
private val MESSAGE_TYPE_SCHEMAS = Map(
0 -> OFFSET_COMMIT_KEY_SCHEMA,
1 -> OFFSET_COMMIT_KEY_SCHEMA,
2 -> GROUP_METADATA_KEY_SCHEMA)
// map of version of offset value schemas
private val OFFSET_VALUE_SCHEMAS = Map(
0 -> OFFSET_COMMIT_VALUE_SCHEMA_V0,
1 -> OFFSET_COMMIT_VALUE_SCHEMA_V1)
private val CURRENT_OFFSET_VALUE_SCHEMA_VERSION = 1.toShort
// map of version of group metadata value schemas
private val GROUP_VALUE_SCHEMAS = Map(
0 -> GROUP_METADATA_VALUE_SCHEMA_V0,
1 -> GROUP_METADATA_VALUE_SCHEMA_V1)
private val CURRENT_GROUP_VALUE_SCHEMA_VERSION = 1.toShort
private val CURRENT_OFFSET_KEY_SCHEMA = schemaForKey(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
private val CURRENT_GROUP_KEY_SCHEMA = schemaForKey(CURRENT_GROUP_KEY_SCHEMA_VERSION)
private val CURRENT_OFFSET_VALUE_SCHEMA = schemaForOffset(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
private val CURRENT_GROUP_VALUE_SCHEMA = schemaForGroup(CURRENT_GROUP_VALUE_SCHEMA_VERSION)
private def schemaForKey(version: Int) = {
val schemaOpt = MESSAGE_TYPE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
private def schemaForOffset(version: Int) = {
val schemaOpt = OFFSET_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
private def schemaForGroup(version: Int) = {
val schemaOpt = GROUP_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown group metadata version " + version)
}
}
/**
* Generates the key for offset commit message for given (group, topic, partition)
*
* @return key for offset commit message
*/
private def offsetCommitKey(group: String, topic: String, partition: Int, versionId: Short = 0): Array[Byte] = {
val key = new Struct(CURRENT_OFFSET_KEY_SCHEMA)
key.set(OFFSET_KEY_GROUP_FIELD, group)
key.set(OFFSET_KEY_TOPIC_FIELD, topic)
key.set(OFFSET_KEY_PARTITION_FIELD, partition)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the key for group metadata message for given group
*
* @return key bytes for group metadata message
*/
def groupMetadataKey(group: String): Array[Byte] = {
val key = new Struct(CURRENT_GROUP_KEY_SCHEMA)
key.set(GROUP_KEY_GROUP_FIELD, group)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_GROUP_KEY_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the payload for offset commit message from given offset and metadata
*
* @param offsetAndMetadata consumer's current offset and metadata
* @return payload for offset commit message
*/
private def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata): Array[Byte] = {
// generate commit value with schema version 1
val value = new Struct(CURRENT_OFFSET_VALUE_SCHEMA)
value.set(OFFSET_VALUE_OFFSET_FIELD_V1, offsetAndMetadata.offset)
value.set(OFFSET_VALUE_METADATA_FIELD_V1, offsetAndMetadata.metadata)
value.set(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1, offsetAndMetadata.commitTimestamp)
value.set(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1, offsetAndMetadata.expireTimestamp)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
value.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the payload for group metadata message from given offset and metadata
* assuming the generation id, selected protocol, leader and member assignment are all available
*
* @param groupMetadata current group metadata
* @param assignment the assignment for the rebalancing generation
* @param version the version of the value message to use
* @return payload for offset commit message
*/
def groupMetadataValue(groupMetadata: GroupMetadata,
assignment: Map[String, Array[Byte]],
version: Short = 0): Array[Byte] = {
val value = if (version == 0) new Struct(GROUP_METADATA_VALUE_SCHEMA_V0) else new Struct(CURRENT_GROUP_VALUE_SCHEMA)
value.set(PROTOCOL_TYPE_KEY, groupMetadata.protocolType.getOrElse(""))
value.set(GENERATION_KEY, groupMetadata.generationId)
value.set(PROTOCOL_KEY, groupMetadata.protocol)
value.set(LEADER_KEY, groupMetadata.leaderId)
val memberArray = groupMetadata.allMemberMetadata.map {
case memberMetadata =>
val memberStruct = value.instance(MEMBERS_KEY)
memberStruct.set(MEMBER_ID_KEY, memberMetadata.memberId)
memberStruct.set(CLIENT_ID_KEY, memberMetadata.clientId)
memberStruct.set(CLIENT_HOST_KEY, memberMetadata.clientHost)
memberStruct.set(SESSION_TIMEOUT_KEY, memberMetadata.sessionTimeoutMs)
if (version > 0)
memberStruct.set(REBALANCE_TIMEOUT_KEY, memberMetadata.rebalanceTimeoutMs)
val metadata = memberMetadata.metadata(groupMetadata.protocol)
memberStruct.set(SUBSCRIPTION_KEY, ByteBuffer.wrap(metadata))
val memberAssignment = assignment(memberMetadata.memberId)
assert(memberAssignment != null)
memberStruct.set(ASSIGNMENT_KEY, ByteBuffer.wrap(memberAssignment))
memberStruct
}
value.set(MEMBERS_KEY, memberArray.toArray)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
byteBuffer.putShort(version)
value.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Decodes the offset messages' key
*
* @param buffer input byte-buffer
* @return an GroupTopicPartition object
*/
def readMessageKey(buffer: ByteBuffer): BaseKey = {
val version = buffer.getShort
val keySchema = schemaForKey(version)
val key = keySchema.read(buffer)
if (version <= CURRENT_OFFSET_KEY_SCHEMA_VERSION) {
// version 0 and 1 refer to offset
val group = key.get(OFFSET_KEY_GROUP_FIELD).asInstanceOf[String]
val topic = key.get(OFFSET_KEY_TOPIC_FIELD).asInstanceOf[String]
val partition = key.get(OFFSET_KEY_PARTITION_FIELD).asInstanceOf[Int]
OffsetKey(version, GroupTopicPartition(group, new TopicPartition(topic, partition)))
} else if (version == CURRENT_GROUP_KEY_SCHEMA_VERSION) {
// version 2 refers to offset
val group = key.get(GROUP_KEY_GROUP_FIELD).asInstanceOf[String]
GroupMetadataKey(version, group)
} else {
throw new IllegalStateException("Unknown version " + version + " for group metadata message")
}
}
/**
* Decodes the offset messages' payload and retrieves offset and metadata from it
*
* @param buffer input byte-buffer
* @return an offset-metadata object from the message
*/
def readOffsetMessageValue(buffer: ByteBuffer): OffsetAndMetadata = {
if (buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
val valueSchema = schemaForOffset(version)
val value = valueSchema.read(buffer)
if (version == 0) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V0).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V0).asInstanceOf[String]
val timestamp = value.get(OFFSET_VALUE_TIMESTAMP_FIELD_V0).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, timestamp)
} else if (version == 1) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V1).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V1).asInstanceOf[String]
val commitTimestamp = value.get(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
val expireTimestamp = value.get(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, commitTimestamp, expireTimestamp)
} else {
throw new IllegalStateException("Unknown offset message version")
}
}
}
/**
* Decodes the group metadata messages' payload and retrieves its member metadatafrom it
*
* @param buffer input byte-buffer
* @return a group metadata object from the message
*/
def readGroupMessageValue(groupId: String, buffer: ByteBuffer): GroupMetadata = {
if (buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
val valueSchema = schemaForGroup(version)
val value = valueSchema.read(buffer)
if (version == 0 || version == 1) {
val protocolType = value.get(PROTOCOL_TYPE_KEY).asInstanceOf[String]
val memberMetadataArray = value.getArray(MEMBERS_KEY)
val initialState = if (memberMetadataArray.isEmpty) Empty else Stable
val group = new GroupMetadata(groupId, initialState)
group.generationId = value.get(GENERATION_KEY).asInstanceOf[Int]
group.leaderId = value.get(LEADER_KEY).asInstanceOf[String]
group.protocol = value.get(PROTOCOL_KEY).asInstanceOf[String]
memberMetadataArray.foreach { memberMetadataObj =>
val memberMetadata = memberMetadataObj.asInstanceOf[Struct]
val memberId = memberMetadata.get(MEMBER_ID_KEY).asInstanceOf[String]
val clientId = memberMetadata.get(CLIENT_ID_KEY).asInstanceOf[String]
val clientHost = memberMetadata.get(CLIENT_HOST_KEY).asInstanceOf[String]
val sessionTimeout = memberMetadata.get(SESSION_TIMEOUT_KEY).asInstanceOf[Int]
val rebalanceTimeout = if (version == 0) sessionTimeout else memberMetadata.get(REBALANCE_TIMEOUT_KEY).asInstanceOf[Int]
val subscription = Utils.toArray(memberMetadata.get(SUBSCRIPTION_KEY).asInstanceOf[ByteBuffer])
val member = new MemberMetadata(memberId, groupId, clientId, clientHost, rebalanceTimeout, sessionTimeout,
protocolType, List((group.protocol, subscription)))
member.assignment = Utils.toArray(memberMetadata.get(ASSIGNMENT_KEY).asInstanceOf[ByteBuffer])
group.add(memberId, member)
}
group
} else {
throw new IllegalStateException("Unknown group metadata message version")
}
}
}
// Formatter for use with tools such as console consumer: Consumer should also set exclude.internal.topics to false.
// (specify --formatter "kafka.coordinator.GroupMetadataManager\\$OffsetsMessageFormatter" when consuming __consumer_offsets)
class OffsetsMessageFormatter extends MessageFormatter {
def writeTo(consumerRecord: ConsumerRecord[Array[Byte], Array[Byte]], output: PrintStream) {
Option(consumerRecord.key).map(key => GroupMetadataManager.readMessageKey(ByteBuffer.wrap(key))).foreach {
// Only print if the message is an offset record.
// We ignore the timestamp of the message because GroupMetadataMessage has its own timestamp.
case offsetKey: OffsetKey =>
val groupTopicPartition = offsetKey.key
val value = consumerRecord.value
val formattedValue =
if (value == null) "NULL"
else GroupMetadataManager.readOffsetMessageValue(ByteBuffer.wrap(value)).toString
output.write(groupTopicPartition.toString.getBytes)
output.write("::".getBytes)
output.write(formattedValue.getBytes)
output.write("\\n".getBytes)
case _ => // no-op
}
}
}
// Formatter for use with tools to read group metadata history
class GroupMetadataMessageFormatter extends MessageFormatter {
def writeTo(consumerRecord: ConsumerRecord[Array[Byte], Array[Byte]], output: PrintStream) {
Option(consumerRecord.key).map(key => GroupMetadataManager.readMessageKey(ByteBuffer.wrap(key))).foreach {
// Only print if the message is a group metadata record.
// We ignore the timestamp of the message because GroupMetadataMessage has its own timestamp.
case groupMetadataKey: GroupMetadataKey =>
val groupId = groupMetadataKey.key
val value = consumerRecord.value
val formattedValue =
if (value == null) "NULL"
else GroupMetadataManager.readGroupMessageValue(groupId, ByteBuffer.wrap(value)).toString
output.write(groupId.getBytes)
output.write("::".getBytes)
output.write(formattedValue.getBytes)
output.write("\\n".getBytes)
case _ => // no-op
}
}
}
}
case class DelayedStore(partitionRecords: Map[TopicPartition, MemoryRecords],
callback: Map[TopicPartition, PartitionResponse] => Unit)
case class GroupTopicPartition(group: String, topicPartition: TopicPartition) {
def this(group: String, topic: String, partition: Int) =
this(group, new TopicPartition(topic, partition))
override def toString =
"[%s,%s,%d]".format(group, topicPartition.topic, topicPartition.partition)
}
trait BaseKey{
def version: Short
def key: Object
}
case class OffsetKey(version: Short, key: GroupTopicPartition) extends BaseKey {
override def toString = key.toString
}
case class GroupMetadataKey(version: Short, key: String) extends BaseKey {
override def toString = key
}
| eribeiro/kafka | core/src/main/scala/kafka/coordinator/GroupMetadataManager.scala | Scala | apache-2.0 | 46,660 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller, Vishnu Gowda Harish
* @version 1.3
* @date Tue Mar 8 18:30:36 EST 2016
* @see LICENSE (MIT style license file).
*/
package testing.linalgebra
import org.junit.Test
import scala.math.{abs, max, min, sqrt}
import scalation.linalgebra.{VectorD, VectorI, VectorL,RleVectorD}
import scalation.math.double_exp
import scalation.random.{Randi0, RandomVecD, Uniform}
import testing.Tester
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `RleVectorD_T` driver class conducts unit testing on the `RleVectorD` class
* by invoking the RleVectorD_T testing object. Run 'test-only' to test `RleVectorD`
* or 'test' to run all unit tests.
*------------------------------------------------------------------------------
* > test-only testing.linalgebra.RleVectorD_T
* > test
*/
class RleVectorD_T { @Test def testAll () { RleVectorD_T } }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `RleVectorD_T` object conducts unit testing on the `RleVectorD` class using the
* `Tester` trait. It compares correctness/performance of a method/operator 'call'
* to an 'oracle' and optionally a 'contender'.
*------------------------------------------------------------------------------
* All methods except 'this', 'apply', 'update', 'foreach' and 'hashCode' should be tested.
* May skip '=op' if 'op' is tested, e.g., skip '+=' if '+' is tested.
* Also the 'equals' and 'toString' are tested implicitly.
* Depending on the 'CORRECT' flag, it will either test correctness or performance.
* Note, if the code for the 'contender' or even the 'oracle' is significantly faster,
* the method/operator may need be to re-coded.
*------------------------------------------------------------------------------
* To run directly, uncomment "// with App" and run 'test:runMain'.
* > test:runMain testing.linalgebra.RleVectorD_T
*/
object RleVectorD_T extends Tester //with App
{
// Reassign parameters from `Tester` trait as needed
DEBUG = false // debug flag
CORRECT = true // test correctness/performance
FOCUS = "" // method/operator to focus on, "" => all
KLASS = "RleVectorD" // the class under test
ITER = 10 // number of test iterations
// Size parameter(s) used for variables in 'test' (customize per class)
private val dim = 100 // vector size
// Random variate generators (customize per class)
private val rv = RandomVecD (dim = dim, density = 1.0) // random vector generator
private val rn = Uniform (0.0, 100.0) // random double generator
private val rj = Randi0 (0, dim) // random integer/index generator
// Variables used in 'test' (customize per class)
private val x = rv.repgen // first rle vector
private val y = rv.repgen // second rle vector
private var t = rn.gen // random value
private var z = x.toDense // dense version of first rle vector
private var s = 0.0 // scalar value
private var j = 0 // first integer/index value
private var k = 0 // second integer/index value
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Randomize all variables used in `Tester`s 'test' method.
*/
def randomize ()
{
x set rv.repgen ()
y set rv.repgen ()
s = rn.gen
j = rj.igen
k = rj.igen
t = rn.gen
z = x.toDense
} // randomize
testClass ()
println ("\\nTest no argument methods/unary operators")
test ("unary-", -x,
{ val c = for (i <- x.indices) yield -x(i); RleVectorD (c) })
test ("abs", x.abs,
RleVectorD (for (i <- x.indices) yield abs (x(i))))
test ("argmax", x.argmax (),
x().indexOf (x().max))
test ("argmaxPos", x.argmaxPos (),
x().filter (_ >= 0.0).indexOf (x().max))
test ("argmin", x.argmin (),
x().indexOf (x().min))
test ("argminNeg", x.argminNeg (),
x().filter (_ <= 0.0).indexOf (x().min))
test ("countNeg", x.countNeg,
x().filter (_ < 0.0).size)
test ("countPos", x.countPos,
x().filter (_ > 0.0).size)
test ("cumulate", x.cumulate,
{ var sum = 0.0; VectorD (for (i <- x.indices) yield { sum += x(i); sum }) })
test ("distinct", x.distinct.toDense,
z.distinct)
test ("countinct", x.countinct,
x().distinct.length)
test ("expand", x.expand (),
x ++ new VectorD (x.dim))
test ("firstNeg", x.firstNeg (),
x().indexWhere (_ < 0.0))
test ("firstPos", x.firstPos (),
x().indexWhere (_ > 0.0))
test ("isNonnegative", x.isNonnegative,
! x().exists (_ < 0.0))
test ("isSorted", x.isSorted,
{ def isSo: Boolean = { for (i <- 1 until x.dim if x(i) < x(i-1)) return false; true }; isSo })
test ("max", x.max (),
x().max)
test ("min", x.min (),
x().min)
test ("normSq", x.normSq,
x dot x)
test ("norm", x.norm,
sqrt (x.normSq))
test ("norm1", x.norm1,
x.abs.sum)
test ("normalize", x.normalize,
x * (1.0 / x().sum))
test ("normalizeU", x.normalizeU,
x * (1.0 / x.norm))
test ("normalize1", x.normalize1,
x * (1.0 / x().max))
// test ("rank", x.rank,
// null)
test ("recip", x.recip,
new RleVectorD (x.one (x.dim) / x))
test ("reverse", x.reverse.reverse,
x)
test ("size", x.size,
x().size)
test ("sum", x.sum,
x().sum)
test ("sumAbs", x.sumAbs,
(for (i <- x.indices) yield abs (x(i))).sum)
test ("sumPos", x.sumPos,
(for (i <- x.indices) yield max (x(i), 0.0)).sum)
test ("swap", { x.swap (j, k); x },
{ val t = x(k); x(k) = x(j); x(j) = t; x })
test ("toInt", x.toInt,
VectorI (x ().map (_.toInt)))
test ("toLong", x.toLong,
VectorL (x ().map (_.toLong)))
test ("toDouble", x.toDouble,
VectorD (x ().map (_.toDouble)))
println ("\\nTest one argument methods/binary operators")
test ("++", x ++ y,
VectorD (x() ++ y()))
test ("++", x ++ s,
RleVectorD (x() :+ s))
test ("+", x + y,
RleVectorD (for (i <- x.indices) yield x(i) + y(i)))
test ("+", x + s,
RleVectorD (for (i <- x.indices) yield x(i) + s))
test ("+", x + (1, s),
{ x(1) += s; x.toDense })
test ("-", x - y,
RleVectorD (for (i <- x.indices) yield x(i) - y(i)))
test ("-", x - s,
RleVectorD (for (i <- x.indices) yield x(i) - s))
test ("-", x - (1, s),
{ x(1) -= s; x.toDense })
test ("*", x * y,
RleVectorD (for (i <- x.indices) yield x(i) * y(i)))
test ("*", x * s,
RleVectorD (for (i <- x.indices) yield x(i) * s))
test ("/", x / y,
RleVectorD (for (i <- x.indices) yield x(i) / y(i)))
test ("/", x / s,
RleVectorD (for (i <- x.indices) yield x(i) / s))
test ("~^", x ~^ s,
RleVectorD (for (i <- x.indices) yield x(i) ~^ s))
test ("contains", x contains s,
x() contains s)
test ("dot", x dot y,
(x * y).sum)
test ("exists", x.exists (_ > s),
x().exists (_ > s))
test ("filter", x.filter (_ > s),
RleVectorD(z.filter (_ > s)))
test ("filterPos", VectorI (x.filterPos (_ > s)),
VectorI (z.filterPos (_ > s)))
test ("indexOf", x indexOf j,
x() indexOf j)
test ("indexWhere", x.indexWhere (_ > j),
x().indexWhere (_ > j))
test ("map", x.map ((z: Double) => z * s),
RleVectorD (x().map (_ * s)))
test ("max", x max y,
VectorD (for (i <- x.indices) yield x(i) max y(i)))
test ("min", x min y,
VectorD (for (i <- x.indices) yield x(i) min y(i)))
test ("oneAt", x oneAt j,
{ val z = new VectorD (x.dim); z(j) = 1.0; RleVectorD (z) })
test ("_oneAt", x _oneAt j,
{ val z = new VectorD (x.dim); z(j) = -1.0; RleVectorD (z) })
test ("sameDimensions", x sameDimensions y,
x().length <= y().length)
test ("set", { x set s; x },
RleVectorD (Array.fill (x.dim)(s)))
test ("set", { x set y(); x },
RleVectorD (y()))
test ("sumNE", x sumNE 1,
x().sum - x(1))
test ("slice", x.slice (j, k),
VectorD (x().slice (j, k)))
test ("select", x.select (Array (j, k)),
{ val idx = Array (j, k); VectorD (for (i <- idx) yield x(i)) })
} // RleVectorD_T object
| NBKlepp/fda | scalation_1.3/scalation_mathstat/src/test/scala/testing/linalgebra/RleVectorD_T.scala | Scala | mit | 11,083 |
def foo[A <% String : Manifest](x: Int = 45) = x
foo[Int]()(<caret>)
// implicit ev$1: Int => String, ev$2: Manifest[Int] | triplequote/intellij-scala | scala/scala-impl/testdata/parameterInfo/functionParameterInfo/simple/SyntheticParameter.scala | Scala | apache-2.0 | 122 |
/*
* Copyright 2016 Guy Van den Broeck and Wannes Meert (UCLA and KU Leuven)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.ucla.cs.starai.forclift.languages.focnf
import scala.io._
import scala.math._
import edu.ucla.cs.starai.forclift._
import edu.ucla.cs.starai.forclift.util._
import edu.ucla.cs.starai.forclift.inference._
import edu.ucla.cs.starai.forclift.constraints._
import edu.ucla.cs.starai.forclift.formulas.visitor.Skolemization
import edu.ucla.cs.starai.forclift.formulas.visitor.Skolemizer
import edu.ucla.cs.starai.forclift.languages.StatRelModel
/** Default namespace for FOCNF predicates. */
class FOCNFNameSpace extends NameSpace[Any, String] {
var lastUsedVar = -1;
override protected def createName(obj: Any) = {
obj match {
case variable: Var => {
lastUsedVar += 1
val n = variable.asInstanceOf[Var].toString(lastUsedVar)
n(0).toLower + n.substring(1, n.length)
}
case constant: Constant => {
val n = constant.toString
n.capitalize
}
case pred: Predicate => {
val n = pred.toString
n(0).toLower + n.substring(1, n.length)
}
case _ => obj.toString
}
}
}
class FOCNFSkolemNameSpace extends EFNameSpace {
override def createName(f: Formula): String = {
nbPredicates += 1
"ef_" + nbPredicates + ""
}
}
object FOCNFSyntax extends FormulaSyntax {
override val kwName = "FO-CNF"
override val kwNegation = "-"
override val kwConjunction = "*"
override val kwDisjunction = " "
override val kwImplication = "*"
override val kwImplicationRev = "*"
override val kwEquivalence = "*"
override val kwWildcardNegation = "*"
override val kwExistential = "?"
override val kwUniversal = "!"
override val kwQuantSep = " "
override val kwComment = "c "
override val kwParentheses = "" :: "" :: Nil
}
//TODO Can be framed as a more general data structure for arbitrary formulas
case class FOCNF(
val formulas: List[Formula] = List(),
val predicates: Set[Predicate] = Set(),
val domainSizes: DomainSizes = DomainSizes.empty,
val predicateWeights: PredicateWeights = PredicateWeights.empty,
val atomWeights: List[(Atom,Weights)] = List()
) extends StatRelModel {
lazy val domains = predicates.flatMap{_.domains}.toSet
def weightedCNF(skolemize:Boolean=true):WeightedCNF = {
val focnf_sk = (if (skolemize) {
this.skolemize
} else {
val formulas2 = formulas.map{formula =>
// assumes NNF
formula.existToDisj
}
copy(formulas=formulas2)
})
val focnf_aw = focnf_sk.atomWeightsToFormula
val disjuncts = focnf_aw.formulas.flatMap{formula =>
formula.implFree.nnf().cnf match {
case cf:ConjFormula => cf.conjunctsToList
case df:DisjFormula => List(df)
case lf:LiteralFormula => List(lf)
case f => throw new IllegalStateException(s"ERROR: CNF is not a conjunction of disjuncts: $f")
}
}
val clauses = disjuncts.map{formula =>
val (pos_lits, neg_lits) = formula.cnf match {
case df:DisjFormula => df.disjunctsToList.foldLeft((Nil,Nil):(List[Atom],List[Atom])){(r,c) =>
c match {
case lit: LiteralFormula if lit.sign => (lit.atom :: r._1, r._2)
case lit: LiteralFormula if !lit.sign => (r._1, lit.atom :: r._2)
case f => throw new IllegalStateException(s"ERROR: Clause in CNF is not a disjunction: $f")
}
}
case lit: LiteralFormula if lit.sign => (lit.atom :: Nil, Nil)
case lit: LiteralFormula if !lit.sign => (Nil, lit.atom :: Nil)
case f => throw new IllegalStateException(s"ERROR: Clause in CNF is not a disjunction: $f")
}
Clause(pos_lits, neg_lits)
}
val cnf = new CNF(clauses)
val all_predicateWeights = focnf_aw.predicateWeights ++ predicates.filterNot(focnf_aw.predicateWeights.contains(_)).map { (_ -> Weights(1, 1)) }
WeightedCNF(cnf,
domainSizes,
all_predicateWeights)
}
override def toString: String = {
val domain_strings = domains.map{d=>
val domainSize = domainSizes.getOrElse(d,
throw new IllegalStateException(s"No domain size for domain $d")
)
s"d ${d.toString} ${domainSize.size} "+d.knownConstants.mkString(" ")
}.toList
val relation_strings = predicates.map("r "+_.toStringFull).toList
val pred_weight_strings = predicateWeights.map{case (pred,ws) =>
s"w $pred ${ws.posWDouble} ${ws.negWDouble}"
}.toList
val atom_weight_strings = atomWeights.map{case(atom,ws) =>
s"w $atom ${ws.posWDouble} ${ws.negWDouble}"
}.toList
val formula_strings = formulas.map{formula =>
val ns = new FOCNFNameSpace
formula.toString(ns, FOCNFSyntax)
}
("p fo-cnf" ::
domain_strings :::
relation_strings :::
pred_weight_strings :::
atom_weight_strings :::
formula_strings :::
Nil).mkString("\\n")
}
def skolemize: FOCNF = {
val exist_ns = new FOCNFSkolemNameSpace
val skolemizer = Skolemizer()
val (sk_formulas, sk_weights) = formulas.foldLeft(List[Formula](),PredicateWeights.empty){(r,formula) =>
val sk_result = skolemizer.skolemize(formula, exist_ns)
val (sk_formula, sk_formulas, sk_preds, sk_weights) = sk_result
// Constraints can be ignored, not supported in FO-CNF
(sk_formula :: sk_formulas.map{_._1} ::: r._1,
sk_weights ++ r._2)
}
copy(formulas=sk_formulas,
predicateWeights=predicateWeights++sk_weights)
}
def groundAtom(atom:Atom): List[Atom] = {
if (atom.variables.isEmpty) {
List(atom)
} else {
val v = atom.variables.head
val domain = atom.domain(v)
val constants = domain.constants(domainSizes)
val groundedOnce = constants.map{c => atom.substitute{vv: Var => if (vv == v) c else vv}}
groundedOnce.flatMap{groundAtom(_)}
}
}
/**
* Translate atom weights to weighted predicates and formulas.
*/
def atomWeightsToFormula: FOCNF = {
// Exhaustively add all groundings if one of the predicate weights is zero
val atoms = atomWeights.map{case (atom,ws) => atom}.toSet
val zero_pred = predicateWeights.filter{case (pred,ws) => ws.posWDouble == 0 || ws.negWDouble == 0}.toList
val new_atomWeights = zero_pred.flatMap{case (pred,ws) =>
val atom = pred((0 until pred.arity).map(i => new Var):_*)
val ground_atoms = groundAtom(atom).toSet
val new_atoms = ground_atoms -- atoms
new_atoms.map((_,ws))
}
// Add all the atom weights as f <=> atom.
val (new_formulas, new_weights, new_preds) = (atomWeights++new_atomWeights).foldLeft(List[Formula](),PredicateWeights.empty,List[Predicate]()){(r,c) =>
val (atom,ws) = c
val default_ws = predicateWeights.get(atom.predicate) match {
case Some(w) => w
case None => Weights(1,1)
}
val pos_w = (if (default_ws.posWDouble == 0) ws.posWDouble else ws.posWDouble/default_ws.posWDouble)
val neg_w = (if (default_ws.negWDouble == 0) ws.negWDouble else ws.negWDouble/default_ws.negWDouble)
if (pos_w == 1 && neg_w == 1) {
r
} else {
val new_pred = new Predicate(Symbol(s"f_${atom.predicate.toString}_"+atom.args.mkString("_")), 0, Nil)
val new_formula = EqFormula(LiteralFormula(new_pred()), LiteralFormula(atom))
val new_ws = Weights(pos_w,neg_w)
(new_formula :: r._1,
r._2 + (new_pred,new_ws),
new_pred :: r._3)
}
}
val new_pred_ws = (predicateWeights -- zero_pred.map(_._1)) ++ new_weights
copy(formulas=formulas ::: new_formulas,
predicates=predicates ++ new_preds,
predicateWeights=new_pred_ws,
atomWeights=Nil)
}
}
| UCLA-StarAI/Forclift | src/main/scala/edu/ucla/cs/starai/forclift/languages/focnf/FOCNF.scala | Scala | apache-2.0 | 8,387 |
package sp.domain.logic
import play.api.libs.json._
import scala.util.Try
import java.time._
import sp.domain._
/**
* To use the attributes, you also need to include the json formaters
* import sp.domain.Logic._ to get it all
*/
object AttributeLogic extends AttributeLogics
trait AttributeLogics {
// Attribute logic
implicit def stringToSPValue(x: String): SPValue = SPValue(x)
implicit def intToSPValue(x: Int): SPValue = SPValue(x)
implicit def boolToSPValue(x: Boolean): SPValue = SPValue(x)
implicit class SPValueLogic(value: SPValue) {
def to[T](implicit fjs: JSReads[T]): Try[T] = {
Try{ value.as[T] }
}
def pretty: String = Json.prettyPrint(value)
def toJson: String = Json.stringify(value)
/**
* Special equal that also handles numbers and bools that are wrapped in strings
* @param obj
* @return
*/
def ===(obj: scala.Any): Boolean = {
super.equals(obj) ||
(obj.isInstanceOf[SPValue] &&
(value.fixStringedTypes == obj.asInstanceOf[SPValue].fixStringedTypes))
}
def fixStringedTypes: SPValue = {
value match {
case JsString(str) if str.nonEmpty =>
Try{SPValue(str.toInt)}
.orElse(Try{SPValue(str.toBoolean)})
.orElse(Try{SPValue(str.toDouble)})
.getOrElse(value)
case _ => value
}
}
def getAs[T](key: String = "")(implicit fjs: JSReads[T]): Option[T] = {
value match {
case x: SPAttributes => x.getAs[T](key)
case x if key.isEmpty => value.to[T].toOption
case x => None
}
}
}
def timeStamp: SPValue = {
import JsonLogic._
Json.toJson(ZonedDateTime.now)
}
implicit class SPAttributesLogic(x: SPAttributes) {
def addTimeStamp(): SPAttributes = {
x + ("time" -> timeStamp)
}
def merge(xs: SPAttributes): SPAttributes = x.deepMerge(xs)
def get(key: String): Option[SPValue] = {
x \\ key match {
case JsDefined(res) => Some(res)
case e: JsUndefined if key.isEmpty => Some(x)
case e: JsUndefined => None
}
}
def getAs[T](key: String = "")(implicit fjs: JSReads[T]): Option[T] = {
for {
x <- get(key)
t <- x.asOpt[T]
} yield t
}
def to[T](implicit fjs: JSReads[T]): Try[T] = Try{x.as[T]}
def find(key: String): List[SPValue] = x \\\\ key toList
def findAs[T](key: String)(implicit fjs: JSReads[T]): List[T] = {
find(key).flatMap(_.asOpt[T])
}
def findType[T](implicit fjs: JSReads[T]): List[T] = {
def extrType(xs: List[JsValue]): List[T] = {
xs.collect {
case l: JsObject =>
l.asOpt[T] match {
case Some(found) => List(found)
case None => l.findType[T]
}
case l: JsArray =>
extrType(l.value.toList)
}.flatten
}
extrType(x.values.toList)
}
def pretty: String = Json.prettyPrint(x)
def toJson: String = Json.stringify(x)
}
} | sequenceplanner/sp-domain | src/main/scala/sp/domain/logic/AttributeLogic.scala | Scala | mit | 3,044 |
package it.polimi.genomics.core.DataStructures.MetaJoinCondition
/**
* generic trait for [[MetaJoinCondition]] attributes
*/
sealed trait AttributeEvaluationStrategy
/**
* Default evaluation, two attributes match if both end
* with [[attribute]] .
*
* @param attribute the attribute name [prefix.]* name
*/
case class Default(attribute : String) extends AttributeEvaluationStrategy {
override def toString() : String = attribute
}
/**
* Only attributes exactly as [[attribute]] will match;
* no further prefixes are allowed.
*
* @param attribute the attribute name [prefix.]* name
*/
case class Exact(attribute : String) extends AttributeEvaluationStrategy {
override def toString() : String = "Exact ( " + attribute + " ) "
}
/**
* Two attributes match if they both end with [[attribute]]
* and, if they have a further prefixes, the two prefix
* sequence are identical
*
* @param attribute the attribute name [prefix.]* name
*/
case class FullName(attribute : String) extends AttributeEvaluationStrategy{
override def toString() : String = "FullName ( " + attribute + " ) "
}
/**
* Represent a metadata join condition between two datasets. The condition must be in the form:
* left->attr1==right->attr1 AND left->attr2==right->attr2 AND ....
* and it is stored as: List(attr1, attr2, ...)
* @param attributes the list of the attributes name involved in the condition
*/
case class MetaJoinCondition(attributes : List[AttributeEvaluationStrategy], negation:Boolean = false) {
}
| DEIB-GECO/GMQL | GMQL-Core/src/main/scala/it/polimi/genomics/core/DataStructures/MetaJoinCondition/MetaJoinCondition.scala | Scala | apache-2.0 | 1,531 |
package chapter19
/**
* 19.5 하위 바운드
*
* Queue[T] 를 Queue[+T]로 만들 수 없는데, 그 이유는 T는 enqueue 메소드의 파라미터 타입인데,
* 그 위치는 부정적인 위치이기 때문이다.
*
* enqueue를 다형성으로 더 일반화하고, 타입 파라미터에 하위바운드(lower bound)를 사용하는 것이다.
*
* U >: T
* T를 U의 하위바운드로 지정. 따라서 U는 T의 슈퍼타입이어야만 한다.
* 즉 U가 Any 일 때, T는 String, Int 등 다 된다는 것이다.
*
* 이는 타입 위주 설계(type-driven design)의 좋은 예다.
*
* 지금껏 설명한 것은 자바의 와일드카드에서 볼 수 있는 사용 위치 변성(use-site variance) 보다
* 선언 위치(declaration site) 변성을 선호하는 주된 이유다. 변성처리는 어려운 일이며, 사용자들은
* 실수하기 쉽기 때문에, 와일드 카드나 제네릭은 너무 복잡하다는 인상만 받고, 포기해버리곤 한다.
*
* 스칼라의 컴파일러는, 사용자가 제공하고자 하는 메소드가 실제로 사용가능한지를 다시 한번 확인해준다.
*/
/*
trait Queue[+T] {
def head: T
def tail: Queue[T]
def enqueue[U >: T](x: U): Queue[U]
}
object Queue {
def apply[T](xs: T*): Queue[T] = new QueueImpl[T](xs.toList, Nil)
private class QueueImpl[T] (
private val leading: List[T],
private val trailing: List[T]
) extends Queue[T] {
def mirror = {
if (leading.isEmpty)
new QueueImpl(trailing.reverse, Nil)
else
this
}
def head: T = mirror.leading.head
def tail: QueueImpl[T] = {
val q = mirror
new QueueImpl(q.leading.tail, q.trailing)
}
def enqueue[U >: T](x: U) = new QueueImpl[U](leading, x :: trailing)
}
}
*/
object c19_i05 extends App {
/*
val x: Queue[Any] = Queue(1,2,3)
val x2 = x.enqueue("Abc")
println(x2.tail.tail.tail.head)
*/
} | seraekim/srkim-lang-scala | src/main/java/chapter19/c19_i05.scala | Scala | bsd-3-clause | 1,945 |
/*
* Copyright (c) 2011, Daniel Spiewak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
* - Neither the name of "Anti-XML" nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.codecommit.antixml.util
import scala.collection.IndexedSeqLike
import scala.collection.generic.CanBuildFrom
import scala.collection.immutable.{IndexedSeq, VectorBuilder}
import scala.collection.mutable.{ArrayBuffer, Builder}
private[antixml] sealed trait VectorCase[+A] extends IndexedSeq[A] with IndexedSeqLike[A, VectorCase[A]] {
override protected[this] def newBuilder: Builder[A, VectorCase[A]] = VectorCase.newBuilder[A]
def +:[B >: A](b: B): VectorCase[B]
def :+[B >: A](b: B): VectorCase[B]
def apply(index: Int): A
def updated[B >: A](index: Int, b: B): VectorCase[B]
def ++[B >: A](that: VectorCase[B]): VectorCase[B]
def toVector: Vector[A]
}
private[antixml] object VectorCase {
implicit def canBuildFrom[A]: CanBuildFrom[Traversable[_], A, VectorCase[A]] = new CanBuildFrom[Traversable[_], A, VectorCase[A]] {
def apply() = newBuilder[A]
def apply(from: Traversable[_]) = newBuilder[A]
}
def empty[A] = VectorN[A](Vector.empty)
def newBuilder[A]: Builder[A, VectorCase[A]] = new Builder[A, VectorCase[A]] { this: Builder[A, VectorCase[A]] =>
val small = new ArrayBuffer[A](4)
var builder: VectorBuilder[A] = _
def +=(a: A) = {
if (builder == null) {
small += a
if (small.length > 4) {
builder = new VectorBuilder[A]
builder ++= small
}
} else {
builder += a
}
this
}
override def ++=(seq: TraversableOnce[A]) = {
if (builder == null) {
small ++= seq
if (small.length > 4) {
builder = new VectorBuilder[A]
builder ++= small
}
} else {
builder ++= seq
}
this
}
def result() = {
if (builder == null) {
small.length match {
case 0 => Vector0
case 1 => Vector1(small(0))
case 2 => Vector2(small(0), small(1))
case 3 => Vector3(small(0), small(1), small(2))
case 4 => Vector4(small(0), small(1), small(2), small(3))
}
} else {
VectorN(builder.result())
}
}
def clear() = this
}
def apply[A](as: A*) = fromSeq(as)
def fromSeq[A](seq: Seq[A]) = seq match {
case c: VectorCase[A] => c
case _ if seq.lengthCompare(0) <= 0 => Vector0
case _ if seq.lengthCompare(1) <= 0 => Vector1(seq(0))
case _ if seq.lengthCompare(2) <= 0 => Vector2(seq(0), seq(1))
case _ if seq.lengthCompare(3) <= 0 => Vector3(seq(0), seq(1), seq(2))
case _ if seq.lengthCompare(4) <= 0 => Vector4(seq(0), seq(1), seq(2), seq(3))
case vec: Vector[A] => VectorN(vec)
case _ => VectorN(Vector(seq: _*))
}
}
private[antixml] case object Vector0 extends VectorCase[Nothing] {
def length = 0
def +:[B](b: B) = Vector1(b)
def :+[B](b: B) = Vector1(b)
def apply(index: Int) = sys.error("Apply on empty vector")
def updated[B](index: Int, b: B) = sys.error("Updated on empty vector")
def ++[B](that: VectorCase[B]) = that
override def iterator = Iterator.empty
override def foreach[U](f: Nothing => U) {}
def toVector = Vector()
}
private[antixml] case class Vector1[+A](_1: A) extends VectorCase[A] {
def length = 1
def +:[B >: A](b: B) = Vector2(b, _1)
def :+[B >: A](b: B) = Vector2(_1, b)
def apply(index: Int) = {
if (index == 0)
_1
else
throw new IndexOutOfBoundsException(index.toString)
}
def updated[B >: A](index: Int, b: B) = {
if (index == 0)
Vector1(b)
else
throw new IndexOutOfBoundsException(index.toString)
}
def ++[B >: A](that: VectorCase[B]) = that match {
case Vector0 => this
case Vector1(_2) => Vector2(_1, _2)
case Vector2(_2, _3) => Vector3(_1, _2, _3)
case Vector3(_2, _3, _4) => Vector4(_1, _2, _3, _4)
case _: Vector4[B] | _: VectorN[B] =>
VectorN(_1 +: that.toVector)
}
override def foreach[U](f: A => U) {
f(_1)
}
// TODO more methods
def toVector = Vector(_1)
}
private[antixml] case class Vector2[+A](_1: A, _2: A) extends VectorCase[A] {
def length = 2
def +:[B >: A](b: B) = Vector3(b, _1, _2)
def :+[B >: A](b: B) = Vector3(_1, _2, b)
def apply(index: Int) = index match {
case 0 => _1
case 1 => _2
case _ => throw new IndexOutOfBoundsException(index.toString)
}
def updated[B >: A](index: Int, b: B) = index match {
case 0 => Vector2(b, _2)
case 1 => Vector2(_1, b)
case _ => throw new IndexOutOfBoundsException(index.toString)
}
def ++[B >: A](that: VectorCase[B]) = that match {
case Vector0 => this
case Vector1(_3) => Vector3(_1, _2, _3)
case Vector2(_3, _4) => Vector4(_1, _2, _3, _4)
case _: Vector3[B] | _: Vector4[B] | _: VectorN[B] =>
VectorN(Vector(_1, _2) ++ that.toVector)
}
override def foreach[U](f: A => U) {
f(_1)
f(_2)
}
// TODO more methods
def toVector = Vector(_1, _2)
}
private[antixml] case class Vector3[+A](_1: A, _2: A, _3: A) extends VectorCase[A] {
def length = 3
def +:[B >: A](b: B) = Vector4(b, _1, _2, _3)
def :+[B >: A](b: B) = Vector4(_1, _2, _3, b)
def apply(index: Int) = index match {
case 0 => _1
case 1 => _2
case 2 => _3
case _ => throw new IndexOutOfBoundsException(index.toString)
}
def updated[B >: A](index: Int, b: B) = index match {
case 0 => Vector3(b, _2, _3)
case 1 => Vector3(_1, b, _3)
case 2 => Vector3(_1, _2, b)
case _ => throw new IndexOutOfBoundsException(index.toString)
}
def ++[B >: A](that: VectorCase[B]) = that match {
case Vector0 => this
case Vector1(_4) => Vector4(_1, _2, _3, _4)
case _: Vector2[B] | _: Vector3[B] | _: Vector4[B] | _: VectorN[B] =>
VectorN(Vector(_1, _2, _3) ++ that.toVector)
}
override def foreach[U](f: A => U) {
f(_1)
f(_2)
f(_3)
}
// TODO more methods
def toVector = Vector(_1, _2, _3)
}
private[antixml] case class Vector4[+A](_1: A, _2: A, _3: A, _4: A) extends VectorCase[A] {
def length = 4
def +:[B >: A](b: B) = VectorN(Vector(b, _1, _2, _3, _4))
def :+[B >: A](b: B) = VectorN(Vector(_1, _2, _3, _4, b))
def apply(index: Int) = index match {
case 0 => _1
case 1 => _2
case 2 => _3
case 3 => _4
case _ => throw new IndexOutOfBoundsException(index.toString)
}
def updated[B >: A](index: Int, b: B) = index match {
case 0 => Vector4(b, _2, _3, _4)
case 1 => Vector4(_1, b, _3, _4)
case 2 => Vector4(_1, _2, b, _4)
case 3 => Vector4(_1, _2, _3, b)
case _ => throw new IndexOutOfBoundsException(index.toString)
}
def ++[B >: A](that: VectorCase[B]) = that match {
case Vector0 => this
case _: Vector1[B] | _: Vector2[B] | _: Vector3[B] | _: Vector4[B] | _: VectorN[B] =>
VectorN(Vector(_1, _2, _3, _4) ++ that.toVector)
}
override def foreach[U](f: A => U) {
f(_1)
f(_2)
f(_3)
f(_4)
}
// TODO more methods
def toVector = Vector(_1, _2, _3, _4)
}
private[antixml] case class VectorN[+A](vector: Vector[A]) extends VectorCase[A] {
def length = vector.length
def +:[B >:A](b: B) = VectorN(b +: vector)
def :+[B >:A](b: B) = VectorN(vector :+ b)
def apply(index: Int) = vector(index)
def updated[B >: A](index: Int, b: B) = VectorN(vector.updated(index, b))
def ++[B >: A](that: VectorCase[B]) = VectorN(vector ++ that.toVector)
override def drop(n: Int) = {
if (n <= 0) {
this
} else {
(vector.length - n) match {
case x if x <= 0 => Vector0
case 1 => Vector1(vector(vector.length - 1))
case 2 => Vector2(vector(vector.length - 2), vector(vector.length - 1))
case 3 => Vector3(vector(vector.length - 3), vector(vector.length - 2), vector(vector.length - 1))
case 4 => Vector4(vector(vector.length - 4), vector(vector.length - 3), vector(vector.length - 2), vector(vector.length - 1))
case _ => VectorN(vector drop n)
}
}
}
override def dropRight(n: Int) = {
if (n <= 0) {
this
} else {
(vector.length - n) match {
case x if x <= 0 => Vector0
case 1 => Vector1(vector(0))
case 2 => Vector2(vector(0), vector(1))
case 3 => Vector3(vector(0), vector(1), vector(2))
case 4 => Vector4(vector(0), vector(1), vector(2), vector(3))
case _ => VectorN(vector dropRight n)
}
}
}
override def init = (vector.length - 1) match {
case x if x <= 0 => Vector0
case 1 => Vector1(vector(0))
case 2 => Vector2(vector(0), vector(1))
case 3 => Vector3(vector(0), vector(1), vector(2))
case 4 => Vector4(vector(0), vector(1), vector(2), vector(3))
case _ => VectorN(vector.init)
}
override def slice(from: Int, until: Int) = take(until).drop(from)
override def splitAt(n: Int) = (take(n), drop(n))
override def tail = (vector.length - 1) match {
case x if x <= 0 => Vector0
case 1 => Vector1(vector(1))
case 2 => Vector2(vector(1), vector(2))
case 3 => Vector3(vector(1), vector(2), vector(3))
case 4 => Vector4(vector(1), vector(2), vector(3), vector(4))
case _ => VectorN(vector.tail)
}
override def take(n: Int) = {
if (n >= length) {
this
} else {
n match {
case x if x <= 0 => Vector0
case 1 => Vector1(vector(0))
case 2 => Vector2(vector(0), vector(1))
case 3 => Vector3(vector(0), vector(1), vector(2))
case 4 => Vector4(vector(0), vector(1), vector(2), vector(3))
case _ => VectorN(vector take n)
}
}
}
override def takeRight(n: Int) = {
if (n >= length) {
this
} else {
n match {
case x if x <= 0 => Vector0
case 1 => Vector1(vector(vector.length - 1))
case 2 => Vector2(vector(vector.length - 2), vector(vector.length - 1))
case 3 => Vector3(vector(vector.length - 3), vector(vector.length - 2), vector(vector.length - 1))
case 4 => Vector4(vector(vector.length - 4), vector(vector.length - 3), vector(vector.length - 2), vector(vector.length - 1))
case _ => VectorN(vector takeRight n)
}
}
}
// note: this actually defeats a HotSpot optimization in trivial micro-benchmarks
override def iterator = vector.iterator
override def foreach[U](f: A => U) {vector.foreach(f)}
def toVector = vector
}
| djspiewak/anti-xml | src/main/scala/com/codecommit/antixml/util/vectorCases.scala | Scala | bsd-3-clause | 11,992 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.ui
import java.util.Properties
import scala.collection.mutable.ListBuffer
import org.json4s.jackson.JsonMethods._
import org.scalatest.BeforeAndAfter
import org.apache.spark._
import org.apache.spark.LocalSparkContext._
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.config
import org.apache.spark.internal.config.Status._
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler._
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
import org.apache.spark.sql.catalyst.util.quietly
import org.apache.spark.sql.execution.{LeafExecNode, QueryExecution, SparkPlanInfo, SQLExecution}
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.sql.internal.StaticSQLConf.UI_RETAINED_EXECUTIONS
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.status.ElementTrackingStore
import org.apache.spark.util.{AccumulatorMetadata, JsonProtocol, LongAccumulator}
import org.apache.spark.util.kvstore.InMemoryStore
class SQLAppStatusListenerSuite extends SharedSparkSession with JsonTestUtils
with BeforeAndAfter {
import testImplicits._
override protected def sparkConf = {
super.sparkConf.set(LIVE_ENTITY_UPDATE_PERIOD, 0L).set(ASYNC_TRACKING_ENABLED, false)
}
private var kvstore: ElementTrackingStore = _
after {
if (kvstore != null) {
kvstore.close()
kvstore = null
}
}
private def createTestDataFrame: DataFrame = {
Seq(
(1, 1),
(2, 2)
).toDF().filter("_1 > 1")
}
private def createProperties(executionId: Long): Properties = {
val properties = new Properties()
properties.setProperty(SQLExecution.EXECUTION_ID_KEY, executionId.toString)
properties
}
private def createStageInfo(stageId: Int, attemptId: Int): StageInfo = {
new StageInfo(stageId = stageId,
attemptId = attemptId,
// The following fields are not used in tests
name = "",
numTasks = 0,
rddInfos = Nil,
parentIds = Nil,
details = "")
}
private def createTaskInfo(
taskId: Int,
attemptNumber: Int,
accums: Map[Long, Long] = Map.empty): TaskInfo = {
val info = new TaskInfo(
taskId = taskId,
attemptNumber = attemptNumber,
// The following fields are not used in tests
index = 0,
launchTime = 0,
executorId = "",
host = "",
taskLocality = null,
speculative = false)
info.markFinished(TaskState.FINISHED, 1L)
info.setAccumulables(createAccumulatorInfos(accums))
info
}
private def createAccumulatorInfos(accumulatorUpdates: Map[Long, Long]): Seq[AccumulableInfo] = {
accumulatorUpdates.map { case (id, value) =>
val acc = new LongAccumulator
acc.metadata = AccumulatorMetadata(id, None, false)
acc.toInfo(Some(value), None)
}.toSeq
}
private def assertJobs(
exec: Option[SQLExecutionUIData],
running: Seq[Int] = Nil,
completed: Seq[Int] = Nil,
failed: Seq[Int] = Nil): Unit = {
val actualRunning = new ListBuffer[Int]()
val actualCompleted = new ListBuffer[Int]()
val actualFailed = new ListBuffer[Int]()
exec.get.jobs.foreach { case (jobId, jobStatus) =>
jobStatus match {
case JobExecutionStatus.RUNNING => actualRunning += jobId
case JobExecutionStatus.SUCCEEDED => actualCompleted += jobId
case JobExecutionStatus.FAILED => actualFailed += jobId
case _ => fail(s"Unexpected status $jobStatus")
}
}
assert(actualRunning.sorted === running)
assert(actualCompleted.sorted === completed)
assert(actualFailed.sorted === failed)
}
private def createStatusStore(): SQLAppStatusStore = {
val conf = sparkContext.conf
kvstore = new ElementTrackingStore(new InMemoryStore, conf)
val listener = new SQLAppStatusListener(conf, kvstore, live = true)
new SQLAppStatusStore(kvstore, Some(listener))
}
test("basic") {
def checkAnswer(actual: Map[Long, String], expected: Map[Long, Long]): Unit = {
assert(actual.size == expected.size)
expected.foreach { case (id, value) =>
// The values in actual can be SQL metrics meaning that they contain additional formatting
// when converted to string. Verify that they start with the expected value.
// TODO: this is brittle. There is no requirement that the actual string needs to start
// with the accumulator value.
assert(actual.contains(id))
val v = actual(id).trim
assert(v.startsWith(value.toString), s"Wrong value for accumulator $id")
}
}
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
val accumulatorIds =
SparkPlanGraph(SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan))
.allNodes.flatMap(_.metrics.map(_.accumulatorId))
// Assume all accumulators are long
var accumulatorValue = 0L
val accumulatorUpdates = accumulatorIds.map { id =>
accumulatorValue += 1L
(id, accumulatorValue)
}.toMap
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq(
createStageInfo(0, 0),
createStageInfo(1, 0)
),
createProperties(executionId)))
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 0)))
assert(statusStore.executionMetrics(executionId).isEmpty)
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 0, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 2))
// Driver accumulator updates don't belong to this execution should be filtered and no
// exception will be thrown.
listener.onOtherEvent(SparkListenerDriverAccumUpdates(0, Seq((999L, 2L))))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 2))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 0, createAccumulatorInfos(accumulatorUpdates.mapValues(_ * 2)))
)))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 3))
// Retrying a stage should reset the metrics
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 1)))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 1, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 1, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 2))
// Ignore the task end for the first attempt
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 100)),
new ExecutorMetrics,
null))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 2))
// Finish two tasks
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 1,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 2)),
new ExecutorMetrics,
null))
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 1,
taskType = "",
reason = null,
createTaskInfo(1, 0, accums = accumulatorUpdates.mapValues(_ * 3)),
new ExecutorMetrics,
null))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 5))
// Summit a new stage
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(1, 0)))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 1, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 1, 0, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 7))
// Finish two tasks
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 3)),
new ExecutorMetrics,
null))
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(1, 0, accums = accumulatorUpdates.mapValues(_ * 3)),
new ExecutorMetrics,
null))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 11))
assertJobs(statusStore.execution(executionId), running = Seq(0))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
assertJobs(statusStore.execution(executionId), completed = Seq(0))
checkAnswer(statusStore.executionMetrics(executionId), accumulatorUpdates.mapValues(_ * 11))
}
test("onExecutionEnd happens before onJobEnd(JobSucceeded)") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
assertJobs(statusStore.execution(executionId), completed = Seq(0))
}
test("onExecutionEnd happens before multiple onJobEnd(JobSucceeded)s") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
listener.onJobStart(SparkListenerJobStart(
jobId = 1,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 1,
time = System.currentTimeMillis(),
JobSucceeded
))
assertJobs(statusStore.execution(executionId), completed = Seq(0, 1))
}
test("onExecutionEnd happens before onJobEnd(JobFailed)") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq.empty,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobFailed(new RuntimeException("Oops"))
))
assertJobs(statusStore.execution(executionId), failed = Seq(0))
}
test("onJobStart happens after onExecutionEnd shouldn't overwrite kvstore") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq(createStageInfo(0, 0)),
createProperties(executionId)))
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 0)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobFailed(new RuntimeException("Oops"))))
assert(listener.noLiveData())
assert(statusStore.execution(executionId).get.completionTime.nonEmpty)
}
test("handle one execution with multiple jobs") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
var stageId = 0
def twoStageJob(jobId: Int): Unit = {
val stages = Seq(stageId, stageId + 1).map { id => createStageInfo(id, 0)}
stageId += 2
listener.onJobStart(SparkListenerJobStart(
jobId = jobId,
time = System.currentTimeMillis(),
stageInfos = stages,
createProperties(executionId)))
stages.foreach { s =>
listener.onStageSubmitted(SparkListenerStageSubmitted(s))
listener.onStageCompleted(SparkListenerStageCompleted(s))
}
listener.onJobEnd(SparkListenerJobEnd(
jobId = jobId,
time = System.currentTimeMillis(),
JobSucceeded
))
}
// submit two jobs with the same executionId
twoStageJob(0)
twoStageJob(1)
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
assertJobs(statusStore.execution(0), completed = 0 to 1)
assert(statusStore.execution(0).get.stages === (0 to 3).toSet)
}
test("SPARK-11126: no memory leak when running non SQL jobs") {
val listener = spark.sharedState.statusStore.listener.get
// At the beginning of this test case, there should be no live data in the listener.
assert(listener.noLiveData())
spark.sparkContext.parallelize(1 to 10).foreach(i => ())
spark.sparkContext.listenerBus.waitUntilEmpty()
// Listener should ignore the non-SQL stages, as the stage data are only removed when SQL
// execution ends, which will not be triggered for non-SQL jobs.
assert(listener.noLiveData())
}
test("driver side SQL metrics") {
val statusStore = spark.sharedState.statusStore
val oldCount = statusStore.executionsList().size
val expectedAccumValue = 12345
val expectedAccumValue2 = 54321
val physicalPlan = MyPlan(sqlContext.sparkContext, expectedAccumValue, expectedAccumValue2)
val dummyQueryExecution = new QueryExecution(spark, LocalRelation()) {
override lazy val sparkPlan = physicalPlan
override lazy val executedPlan = physicalPlan
}
SQLExecution.withNewExecutionId(spark, dummyQueryExecution) {
physicalPlan.execute().collect()
}
// Wait until the new execution is started and being tracked.
while (statusStore.executionsCount() < oldCount) {
Thread.sleep(100)
}
// Wait for listener to finish computing the metrics for the execution.
while (statusStore.executionsList().isEmpty ||
statusStore.executionsList().last.metricValues == null) {
Thread.sleep(100)
}
val execId = statusStore.executionsList().last.executionId
val metrics = statusStore.executionMetrics(execId)
val driverMetric = physicalPlan.metrics("dummy")
val driverMetric2 = physicalPlan.metrics("dummy2")
val expectedValue = SQLMetrics.stringValue(driverMetric.metricType, Seq(expectedAccumValue))
val expectedValue2 = SQLMetrics.stringValue(driverMetric2.metricType, Seq(expectedAccumValue2))
assert(metrics.contains(driverMetric.id))
assert(metrics(driverMetric.id) === expectedValue)
assert(metrics.contains(driverMetric2.id))
assert(metrics(driverMetric2.id) === expectedValue2)
}
test("roundtripping SparkListenerDriverAccumUpdates through JsonProtocol (SPARK-18462)") {
val event = SparkListenerDriverAccumUpdates(1L, Seq((2L, 3L)))
val json = JsonProtocol.sparkEventToJson(event)
assertValidDataInJson(json,
parse("""
|{
| "Event": "org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates",
| "executionId": 1,
| "accumUpdates": [[2,3]]
|}
""".stripMargin))
JsonProtocol.sparkEventFromJson(json) match {
case SparkListenerDriverAccumUpdates(executionId, accums) =>
assert(executionId == 1L)
accums.foreach { case (a, b) =>
assert(a == 2L)
assert(b == 3L)
}
}
// Test a case where the numbers in the JSON can only fit in longs:
val longJson = parse(
"""
|{
| "Event": "org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates",
| "executionId": 4294967294,
| "accumUpdates": [[4294967294,3]]
|}
""".stripMargin)
JsonProtocol.sparkEventFromJson(longJson) match {
case SparkListenerDriverAccumUpdates(executionId, accums) =>
assert(executionId == 4294967294L)
accums.foreach { case (a, b) =>
assert(a == 4294967294L)
assert(b == 3L)
}
}
}
test("eviction should respect execution completion time") {
val conf = sparkContext.conf.clone().set(UI_RETAINED_EXECUTIONS.key, "2")
kvstore = new ElementTrackingStore(new InMemoryStore, conf)
val listener = new SQLAppStatusListener(conf, kvstore, live = true)
val statusStore = new SQLAppStatusStore(kvstore, Some(listener))
var time = 0
val df = createTestDataFrame
// Start execution 1 and execution 2
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
1,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
2,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
// Stop execution 2 before execution 1
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionEnd(2, time))
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionEnd(1, time))
// Start execution 3 and execution 2 should be evicted.
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
3,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
assert(statusStore.executionsCount === 2)
assert(statusStore.execution(2) === None)
}
}
/**
* A dummy [[org.apache.spark.sql.execution.SparkPlan]] that updates a [[SQLMetrics]]
* on the driver.
*/
private case class MyPlan(sc: SparkContext, expectedValue: Long, expectedValue2: Long)
extends LeafExecNode {
override def sparkContext: SparkContext = sc
override def output: Seq[Attribute] = Seq()
override val metrics: Map[String, SQLMetric] = Map(
"dummy" -> SQLMetrics.createMetric(sc, "dummy"),
"dummy2" -> SQLMetrics.createMetric(sc, "dummy2"))
override def doExecute(): RDD[InternalRow] = {
longMetric("dummy") += expectedValue
longMetric("dummy2") += expectedValue2
// postDriverMetricUpdates may happen multiple time in a query.
// (normally from different operators, but for the sake of testing, from one operator)
SQLMetrics.postDriverMetricUpdates(
sc,
sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY),
Seq(metrics("dummy")))
SQLMetrics.postDriverMetricUpdates(
sc,
sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY),
Seq(metrics("dummy2")))
sc.emptyRDD
}
}
class SQLAppStatusListenerMemoryLeakSuite extends SparkFunSuite {
test("no memory leak") {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
.set(config.TASK_MAX_FAILURES, 1) // Don't retry the tasks to run this test quickly
.set(UI_RETAINED_EXECUTIONS.key, "50") // Set it to 50 to run this test quickly
.set(ASYNC_TRACKING_ENABLED, false)
withSpark(new SparkContext(conf)) { sc =>
quietly {
val spark = new SparkSession(sc)
import spark.implicits._
// Run 100 successful executions and 100 failed executions.
// Each execution only has one job and one stage.
for (i <- 0 until 100) {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
df.collect()
try {
df.foreach(_ => throw new RuntimeException("Oops"))
} catch {
case e: SparkException => // This is expected for a failed job
}
}
sc.listenerBus.waitUntilEmpty()
val statusStore = spark.sharedState.statusStore
assert(statusStore.executionsCount() <= 50)
assert(statusStore.planGraphCount() <= 50)
// No live data should be left behind after all executions end.
assert(statusStore.listener.get.noLiveData())
}
}
}
}
| bdrillard/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala | Scala | apache-2.0 | 23,932 |
import annotation.Annotation
object Test3 {
class E[T >: Nothing <: String](s: T) extends Annotation
class B
// val a = new E[B](new B)
@E[B](new B) val b = "hi"
}
object Test4 {
class E[T <: String](s: T) extends Annotation
class B
val b: String @E[B](new B) = "hi"
}
| yusuke2255/dotty | tests/untried/neg/t935.scala | Scala | bsd-3-clause | 285 |
package scala.slick.compiler
import scala.slick.ast._
import Util.nodeToNodeOps
import TypeUtil._
/** For SQL back-ends which do not support real boolean types for fields and
* general expressions but which do have special boolean expressions and
* operators, this phase injects conversions between fake and real boolean
* values.
*
* The default for booleans in the AST is to use the fake type. There are
* specific places where a real boolean is required or produced, so we
* inject a call to ToRealBoolean or ToFakeBoolean as needed. */
class RewriteBooleans extends Phase {
import RewriteBooleans._
val name = "rewriteBooleans"
def apply(state: CompilerState) =
state.map { n => ClientSideOp.mapServerSide(n)(rewriteRec) }
def rewriteRec(n: Node): Node = {
val n2 = n.nodeMapChildren(rewriteRec, true)
val n3 = rewrite(n2)
if(n3 ne n2) logger.debug(s"Rewriting $n2 to $n3")
n3
}
/** Rewrite a single Node. This method can be overridden in subclasses to
* change the situations in which conversions are applied. */
def rewrite(n: Node): Node = n match {
// These boolean operators accept and produce real booleans
case Apply(sym @ (Library.And | Library.Or | Library.Not), ch) =>
toFake(Apply(sym, ch.map(n => toReal(n)))(n.nodeType))
// All other boolean-typed operators produce real booleans but accept fake ones
case Apply(sym, ch) :@ tpe if isBooleanLike(tpe) =>
toFake(Apply(sym, ch)(n.nodeType))
// Where clauses, join conditions and case clauses need real boolean predicates
case n @ Comprehension(_, where, _, _, _, _, _) =>
n.copy(where = where.map(toReal)).nodeTyped(n.nodeType)
case n @ Join(_, _, _, _, _, on) =>
n.copy(on = toReal(on)).nodeTyped(n.nodeType)
case cond @ IfThenElse(_) =>
cond.mapConditionClauses(toReal)
case n => n
}
/** Create a conversion to a fake boolean, cancelling out an existing
* conversion to a real boolean. */
def toFake(n: Node) = n match {
case ToRealBoolean(ch) => ch
case _ => ToFakeBoolean.typed(n.nodeType, n)
}
/** Create a conversion to a real boolean, cancelling out an existing
* conversion to a fake boolean. */
def toReal(n: Node) = n match {
case ToFakeBoolean(ch) => ch
case _ => ToRealBoolean.typed(n.nodeType, n)
}
/** Check if a type is equivalent to the Scala Boolean type or a (possibly
* nested) Option of that type. */
def isBooleanLike(t: Type): Boolean = t match {
case t: TypedType[_] if t.scalaType == ScalaBaseType.booleanType => true
case t: OptionType => isBooleanLike(t.elementType)
case _ => false
}
}
object RewriteBooleans {
val ToFakeBoolean = new FunctionSymbol("RewriteBooleans.ToFakeBoolean")
val ToRealBoolean = new FunctionSymbol("RewriteBooleans.ToRealBoolean")
}
| nuodb/slick | src/main/scala/scala/slick/compiler/RewriteBooleans.scala | Scala | bsd-2-clause | 2,839 |
package smtlib
package theories
import trees.Terms._
import Operations._
object Ints {
object IntSort {
def apply(): Sort = {
Sort(Identifier(SSymbol("Int")))
}
def unapply(sort: Sort): Boolean = sort match {
case Sort(Identifier(SSymbol("Int"), Seq()), Seq()) => true
case _ => false
}
}
object NumeralLit {
def apply(value: BigInt): Term = SNumeral(value)
def unapply(term: Term): Option[BigInt] = term match {
case SNumeral(value) => Some(value)
case _ => None
}
}
object Divisible {
def apply(n: BigInt, t: Term): Term =
FunctionApplication(
QualifiedIdentifier(Identifier(SSymbol("divisible"), Seq(SNumeral(n)))),
Seq(t)
)
def unapply(term: Term): Option[(BigInt, Term)] = term match {
case FunctionApplication(
QualifiedIdentifier(
Identifier(SSymbol("divisible"), Seq(SNumeral(n))),
None
), Seq(t)) => Some((n, t))
case _ => None
}
}
object Neg extends Operation1 { override val name = "-" }
object Add extends Operation2 { override val name = "+" }
object Sub extends Operation2 { override val name = "-" }
object Mul extends Operation2 { override val name = "*" }
object Div extends Operation2 { override val name = "div" }
object Mod extends Operation2 { override val name = "mod" }
object Abs extends Operation1 { override val name = "abs" }
object LessThan extends Operation2 { override val name = "<" }
object LessEquals extends Operation2 { override val name = "<=" }
object GreaterThan extends Operation2 { override val name = ">" }
object GreaterEquals extends Operation2 { override val name = ">=" }
}
| regb/scala-smtlib | src/main/scala/smtlib/theories/Ints.scala | Scala | mit | 1,708 |
package io.continuum.bokeh
trait FillProps { self: HasFields with Vectorization =>
object fill_color extends Vectorized[Color](Color.Gray)
object fill_alpha extends Vectorized[Percent](1.0)
}
trait LineProps { self: HasFields with Vectorization =>
object line_color extends Vectorized[Color](Color.Black)
object line_width extends Vectorized[Double](1.0)
object line_alpha extends Vectorized[Percent]
object line_join extends Field[LineJoin]
object line_cap extends Field[LineCap]
object line_dash extends Field[List[Int]]
object line_dash_offset extends Field[Int]
}
trait TextProps { self: HasFields with Vectorization =>
object text_font extends Field[String]
object text_font_size extends Vectorized[FontSize](12 pt)
object text_font_style extends Field[FontStyle]
object text_color extends Vectorized[Color]("#444444")
object text_alpha extends Vectorized[Percent]
object text_align extends Field[TextAlign]
object text_baseline extends Field[TextBaseline]
}
| bokeh/bokeh-scala | bokeh/src/main/scala/Mixins.scala | Scala | mit | 1,031 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.gatling.core.Predef._
import io.gatling.jdbc.Predef._
import io.gatling.http.Predef._
class Feeders {
{
//#random-mail-generator
import scala.util.Random
val feeder = Iterator.continually(Map("email" -> (Random.alphanumeric.take(20).mkString + "@foo.com")))
//#random-mail-generator
//#feed
feed(feeder)
//#feed
//#feed-multiple
feed(feeder, 2)
//#feed-multiple
csv("foo")
//#strategies
.queue // default behavior: use an Iterator on the underlying sequence
.random // randomly pick an entry in the sequence
.shuffle // shuffle entries, then behave like queue
.circular // go back to the top of the sequence once the end is reached
//#strategies
}
{
//#feeder-from-array-with-random
val feeder = Array(
Map("foo" -> "foo1", "bar" -> "bar1"),
Map("foo" -> "foo2", "bar" -> "bar2"),
Map("foo" -> "foo3", "bar" -> "bar3")).random
//#feeder-from-array-with-random
}
{
//#sep-values-feeders
val csvFeeder = csv("foo.csv") // use a comma separator
val tsvFeeder = tsv("foo.tsv") // use a tabulation separator
val ssvFeeder = ssv("foo.ssv") // use a semicolon separator
val customSeparatorFeeder = separatedValues("foo.txt", '#') // use your own separator
//#sep-values-feeders
}
{
//#escape-char
val csvFeeder = csv("foo.csv", escapeChar = '\\\\')
//#escape-char
}
{
//#json-feeders
val jsonFileFeeder = jsonFile("foo.json")
val jsonUrlFeeder = jsonUrl("http://me.com/foo.json")
//#json-feeders
}
{
//#jdbc-feeder
jdbcFeeder("databaseUrl", "username", "password", "SELECT * FROM users")
//#jdbc-feeder
}
{
//#sitemap-feeder
val feeder = sitemap("/path/to/sitemap/file")
//#sitemap-feeder
}
{
//#redis-LPOP
import com.redis._
import io.gatling.redis.feeder.RedisFeeder
val redisPool = new RedisClientPool("localhost", 6379)
// use a list, so there's one single value per record, which is here named "foo"
val feeder = RedisFeeder(redisPool, "foo")
//#redis-LPOP
}
{
import com.redis._
import io.gatling.redis.feeder.RedisFeeder
val clientPool = new RedisClientPool("localhost", 6379)
//#redis-SPOP
// read data using SPOP command from a set named "foo"
val feeder = RedisFeeder(clientPool, "foo", RedisFeeder.SPOP)
//#redis-SPOP
}
{
//#redis-1million
import java.io.{ File, PrintWriter }
import io.gatling.redis.util.RedisHelper._
def generateOneMillionUrls(): Unit = {
val writer = new PrintWriter(new File("/tmp/loadtest.txt"))
try {
for (i <- 0 to 1000000) {
val url = "test?id=" + i
// note the list name "URLS" here
writer.write(generateRedisProtocol("LPUSH", "URLS", url))
}
} finally {
writer.close()
}
}
//#redis-1million
generateOneMillionUrls()
}
{
//#convert
csv("myFile.csv").convert {
case ("attributeThatShouldBeAnInt", string) => string.toInt
}
//#convert
}
{
//#non-shared
val records = csv("foo.csv").records
foreach(records, "record") {
exec(flattenMapIntoAttributes("${record}"))
}
//#non-shared
}
{
//#user-dependent-data
import io.gatling.core.feeder._
import java.util.concurrent.ThreadLocalRandom
// index records by project
val recordsByProject: Map[String, IndexedSeq[Record[String]]] =
csv("projectIssue.csv").records.groupBy{ record => record("project") }
// convert the Map values to get only the issues instead of the full records
val issuesByProject: Map[String, IndexedSeq[String]] =
recordsByProject.mapValues{ records => records.map {record => record("issue")} }
// inject project
feed(csv("userProject.csv"))
.exec { session =>
// fetch project from session
session("project").validate[String].map { project =>
// fetch project's issues
val issues = issuesByProject(project)
// randomly select an issue
val selectedIssue = issues(ThreadLocalRandom.current.nextInt(issues.length))
// inject the issue in the session
session.set("issue", selectedIssue)
}
}
//#user-dependent-data
}
}
| MykolaB/gatling | src/sphinx/session/code/Feeders.scala | Scala | apache-2.0 | 4,949 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.authenticator.format
import silhouette.authenticator.format.SatReads._
import silhouette.authenticator.{ Authenticator, AuthenticatorException, StatefulReads }
import scala.concurrent.{ ExecutionContext, Future }
/**
* A reads which transforms a SAT (simple authentication token) into an authenticator.
*
* A simple authentication token represents a string that cannot store authenticator related data in it. Instead
* it needs a mapping between this string and the authenticator related data, which is commonly handled through a
* backing store.
*
* @param reader The reader to retrieve the [[Authenticator]] for the given token from persistence layer.
* @param ex The execution context.
*/
final case class SatReads(reader: String => Future[Option[Authenticator]])(
implicit
ex: ExecutionContext
) extends StatefulReads[String] {
/**
* Transforms a simple authentication token into an [[Authenticator]].
*
* @param token The simple authentication token to transform.
* @return An authenticator on success, an error on failure.
*/
override def read(token: String): Future[Authenticator] = reader(token).map(_.getOrElse(
throw new AuthenticatorException(MissingAuthenticator.format(token))
))
}
/**
* The companion object.
*/
object SatReads {
val MissingAuthenticator: String = "Cannot get authenticator for token `%s` from given reader"
}
| mohiva/silhouette | modules/authenticator/src/main/scala/silhouette/authenticator/format/SatReads.scala | Scala | apache-2.0 | 2,176 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.testng.example
import org.scalatest._
import org.scalatest.testng._
import org.testng.annotations.Test
import org.testng.annotations.BeforeMethod
import org.testng.annotations.BeforeClass
import org.testng.annotations.BeforeSuite
import org.testng.annotations.AfterMethod
import org.testng.annotations.AfterClass
import org.testng.annotations.AfterSuite
import org.testng.annotations.DataProvider
class ExampleTestNGSuite extends TestNGSuite {
@AfterSuite
def failAfterSuite(){ throw new Exception("fail in before method") }
@BeforeMethod def passBeforeMethod(){}
@BeforeClass def passBeforeClass(){}
@BeforeSuite def passBeforeSuite(){}
@AfterMethod def passAfterMethod(){}
@AfterClass def passAfterClass(){}
@AfterSuite def passAfterSuite(){}
@Test(invocationCount = 10) def thisTestRunsTenTimes = {}
@Test(groups = Array("runMe"))
def testWithException(){
throw new Exception("exception!!!")
}
@Test(groups = Array("runMe")) def testWithAssertFail = assert( 1 === 2, "assert fail!!!" )
@Test(dependsOnMethods = Array("testWithException")) def testToGetSkipped = {}
@DataProvider(name = "andValues")
def andValues = {
val and = Array("0", "1")
for( x <- and; y <- and ) yield Array(x,y)
}
@Test(dataProvider = "andValues")
def testAndStates(a: String, b: String){
println("a=" + a + ", b=" + b)
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/testng/example/ExampleTestNGSuite.scala | Scala | apache-2.0 | 1,998 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd
import java.io.{ File, FileNotFoundException, InputStream }
import java.util.regex.Pattern
import htsjdk.samtools.{ IndexedBamInputFormat, SAMFileHeader, ValidationStringency }
import org.apache.avro.Schema
import org.apache.avro.file.DataFileStream
import org.apache.avro.generic.IndexedRecord
import org.apache.avro.specific.{ SpecificDatumReader, SpecificRecord, SpecificRecordBase }
import org.apache.hadoop.fs.{ FileStatus, FileSystem, Path }
import org.apache.hadoop.io.{ LongWritable, Text }
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.parquet.avro.{ AvroParquetInputFormat, AvroReadSupport }
import org.apache.parquet.filter2.predicate.FilterPredicate
import org.apache.parquet.hadoop.ParquetInputFormat
import org.apache.parquet.hadoop.util.ContextUtil
import org.apache.spark.rdd.MetricsContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.{ Logging, SparkContext }
import org.bdgenomics.adam.converters._
import org.bdgenomics.adam.instrumentation.Timers._
import org.bdgenomics.adam.io._
import org.bdgenomics.adam.models._
import org.bdgenomics.adam.projections.{ AlignmentRecordField, NucleotideContigFragmentField, Projection }
import org.bdgenomics.adam.rdd.contig.NucleotideContigFragmentRDDFunctions
import org.bdgenomics.adam.rdd.features._
import org.bdgenomics.adam.rdd.fragment.FragmentRDDFunctions
import org.bdgenomics.adam.rdd.read.{
AlignedReadRDD,
AlignmentRecordRDD,
AlignmentRecordRDDFunctions,
UnalignedReadRDD
}
import org.bdgenomics.adam.rdd.variation._
import org.bdgenomics.adam.rich.RichAlignmentRecord
import org.bdgenomics.adam.util.{ TwoBitFile, ReferenceContigMap, ReferenceFile }
import org.bdgenomics.formats.avro._
import org.bdgenomics.utils.instrumentation.Metrics
import org.bdgenomics.utils.io.LocalFileByteAccess
import org.bdgenomics.utils.misc.HadoopUtil
import org.seqdoop.hadoop_bam._
import org.seqdoop.hadoop_bam.util.SAMHeaderReader
import scala.collection.JavaConversions._
import scala.collection.Map
import scala.reflect.ClassTag
object ADAMContext {
// Add ADAM Spark context methods
implicit def sparkContextToADAMContext(sc: SparkContext): ADAMContext = new ADAMContext(sc)
// Add generic RDD methods for all types of ADAM RDDs
implicit def rddToADAMRDD[T](rdd: RDD[T])(implicit ev1: T => IndexedRecord, ev2: Manifest[T]): ADAMRDDFunctions[T] = new ADAMRDDFunctions(rdd)
// Add methods specific to Read RDDs
implicit def rddToADAMRecordRDD(rdd: RDD[AlignmentRecord]) = new AlignmentRecordRDDFunctions(rdd)
implicit def rddToFragmentRDD(rdd: RDD[Fragment]) = new FragmentRDDFunctions(rdd)
// Add methods specific to the ADAMNucleotideContig RDDs
implicit def rddToContigFragmentRDD(rdd: RDD[NucleotideContigFragment]) = new NucleotideContigFragmentRDDFunctions(rdd)
// implicit conversions for variant related rdds
implicit def rddToVariantContextRDD(rdd: RDD[VariantContext]) = new VariantContextRDDFunctions(rdd)
implicit def rddToADAMGenotypeRDD(rdd: RDD[Genotype]) = new GenotypeRDDFunctions(rdd)
// add gene feature rdd functions
implicit def convertBaseFeatureRDDToFeatureRDD(rdd: RDD[Feature]) = new FeatureRDDFunctions(rdd)
// Add implicits for the rich adam objects
implicit def recordToRichRecord(record: AlignmentRecord): RichAlignmentRecord = new RichAlignmentRecord(record)
// implicit java to scala type conversions
implicit def listToJavaList[A](list: List[A]): java.util.List[A] = seqAsJavaList(list)
implicit def javaListToList[A](list: java.util.List[A]): List[A] = asScalaBuffer(list).toList
implicit def javaSetToSet[A](set: java.util.Set[A]): Set[A] = {
// toSet is necessary to make set immutable
asScalaSet(set).toSet
}
implicit def intListToJavaIntegerList(list: List[Int]): java.util.List[java.lang.Integer] = {
seqAsJavaList(list.map(i => i: java.lang.Integer))
}
// implicit def charSequenceToString(cs: CharSequence): String = cs.toString
// implicit def charSequenceToList(cs: CharSequence): List[Char] = cs.toCharArray.toList
implicit def mapToJavaMap[A, B](map: Map[A, B]): java.util.Map[A, B] = mapAsJavaMap(map)
implicit def javaMapToMap[A, B](map: java.util.Map[A, B]): Map[A, B] = mapAsScalaMap(map).toMap
implicit def iterableToJavaCollection[A](i: Iterable[A]): java.util.Collection[A] = asJavaCollection(i)
implicit def setToJavaSet[A](set: Set[A]): java.util.Set[A] = setAsJavaSet(set)
implicit def genomicRDDToRDD[T](gRdd: GenomicRDD[T]): RDD[T] = gRdd.rdd
}
import org.bdgenomics.adam.rdd.ADAMContext._
class ADAMContext(@transient val sc: SparkContext) extends Serializable with Logging {
private[rdd] def adamBamDictionaryLoad(filePath: String): SequenceDictionary = {
val samHeader = SAMHeaderReader.readSAMHeaderFrom(new Path(filePath), sc.hadoopConfiguration)
adamBamDictionaryLoad(samHeader)
}
private[rdd] def adamBamDictionaryLoad(samHeader: SAMFileHeader): SequenceDictionary = {
SequenceDictionary(samHeader)
}
private[rdd] def adamBamLoadReadGroups(samHeader: SAMFileHeader): RecordGroupDictionary = {
RecordGroupDictionary.fromSAMHeader(samHeader)
}
/**
* This method will create a new RDD.
* @param filePath The path to the input data
* @param predicate An optional pushdown predicate to use when reading the data
* @param projection An option projection schema to use when reading the data
* @tparam T The type of records to return
* @return An RDD with records of the specified type
*/
def loadParquet[T](
filePath: String,
predicate: Option[FilterPredicate] = None,
projection: Option[Schema] = None)(implicit ev1: T => SpecificRecord, ev2: Manifest[T]): RDD[T] = {
//make sure a type was specified
//not using require as to make the message clearer
if (manifest[T] == manifest[scala.Nothing])
throw new IllegalArgumentException("Type inference failed; when loading please specify a specific type. " +
"e.g.:\nval reads: RDD[AlignmentRecord] = ...\nbut not\nval reads = ...\nwithout a return type")
log.info("Reading the ADAM file at %s to create RDD".format(filePath))
val job = HadoopUtil.newJob(sc)
ParquetInputFormat.setReadSupportClass(job, classOf[AvroReadSupport[T]])
predicate.foreach { (pred) =>
log.info("Using the specified push-down predicate")
ParquetInputFormat.setFilterPredicate(job.getConfiguration, pred)
}
if (projection.isDefined) {
log.info("Using the specified projection schema")
AvroParquetInputFormat.setRequestedProjection(job, projection.get)
}
val records = sc.newAPIHadoopFile(
filePath,
classOf[ParquetInputFormat[T]],
classOf[Void],
manifest[T].runtimeClass.asInstanceOf[Class[T]],
ContextUtil.getConfiguration(job)
)
val instrumented = if (Metrics.isRecording) records.instrument() else records
val mapped = instrumented.map(p => p._2)
if (predicate.isDefined) {
// Strip the nulls that the predicate returns
mapped.filter(p => p != null.asInstanceOf[T])
} else {
mapped
}
}
/**
* This method should create a new SequenceDictionary from any parquet file which contains
* records that have the requisite reference{Name,Id,Length,Url} fields.
*
* (If the path is a BAM or SAM file, and the implicit type is an Read, then it just defaults
* to reading the SequenceDictionary out of the BAM header in the normal way.)
*
* @param filePath The path to the input data
* @tparam T The type of records to return
* @return A sequenceDictionary containing the names and indices of all the sequences to which the records
* in the corresponding file are aligned.
*/
def adamDictionaryLoad[T](filePath: String)(implicit ev1: T => SpecificRecord, ev2: Manifest[T]): SequenceDictionary = {
// This funkiness is required because (a) ADAMRecords require a different projection from any
// other flattened schema, and (b) because the SequenceRecord.fromADAMRecord, below, is going
// to be called through a flatMap rather than through a map tranformation on the underlying record RDD.
val isADAMRecord = classOf[AlignmentRecord].isAssignableFrom(manifest[T].runtimeClass)
val isADAMContig = classOf[NucleotideContigFragment].isAssignableFrom(manifest[T].runtimeClass)
val projection =
if (isADAMRecord) {
Projection(
AlignmentRecordField.contig,
AlignmentRecordField.mateContig,
AlignmentRecordField.readPaired,
AlignmentRecordField.firstOfPair,
AlignmentRecordField.readMapped,
AlignmentRecordField.mateMapped
)
} else if (isADAMContig) {
Projection(NucleotideContigFragmentField.contig)
} else {
Projection(AlignmentRecordField.contig)
}
if (filePath.endsWith(".bam") || filePath.endsWith(".sam")) {
if (isADAMRecord)
adamBamDictionaryLoad(filePath)
else
throw new IllegalArgumentException("If you're reading a BAM/SAM file, the record type must be Read")
} else {
val projected: RDD[T] = loadParquet[T](filePath, None, projection = Some(projection))
val recs: RDD[SequenceRecord] =
if (isADAMRecord) {
projected.asInstanceOf[RDD[AlignmentRecord]].distinct().flatMap(rec => SequenceRecord.fromADAMRecord(rec))
} else if (isADAMContig) {
projected.asInstanceOf[RDD[NucleotideContigFragment]].distinct().map(ctg => SequenceRecord.fromADAMContigFragment(ctg))
} else {
projected.distinct().map(SequenceRecord.fromSpecificRecord(_))
}
val dict = recs.aggregate(SequenceDictionary())(
(dict: SequenceDictionary, rec: SequenceRecord) => dict + rec,
(dict1: SequenceDictionary, dict2: SequenceDictionary) => dict1 ++ dict2
)
dict
}
}
/**
* Loads a SAM/BAM file.
*
* This reads the sequence and record group dictionaries from the SAM/BAM file
* header. SAMRecords are read from the file and converted to the
* AlignmentRecord schema.
*
* @param filePath Path to the file on disk.
*
* @return Returns an AlignmentRecordRDD which wraps the RDD of reads,
* sequence dictionary representing the contigs these reads are aligned to
* if the reads are aligned, and the record group dictionary for the reads
* if one is available.
*
* @see loadAlignments
*/
def loadBam(filePath: String): AlignmentRecordRDD = {
val path = new Path(filePath)
val fs =
Option(
FileSystem.get(path.toUri, sc.hadoopConfiguration)
).getOrElse(
throw new FileNotFoundException(
s"Couldn't find filesystem for ${path.toUri} with Hadoop configuration ${sc.hadoopConfiguration}"
)
)
val bamFiles =
Option(
if (fs.isDirectory(path)) fs.listStatus(path) else fs.globStatus(path)
).getOrElse(
throw new FileNotFoundException(
s"Couldn't find any files matching ${path.toUri}"
)
)
val (seqDict, readGroups) =
bamFiles
.map(fs => fs.getPath)
.flatMap(fp => {
try {
// We need to separately read the header, so that we can inject the sequence dictionary
// data into each individual Read (see the argument to samRecordConverter.convert,
// below).
val samHeader = SAMHeaderReader.readSAMHeaderFrom(fp, sc.hadoopConfiguration)
log.info("Loaded header from " + fp)
val sd = adamBamDictionaryLoad(samHeader)
val rg = adamBamLoadReadGroups(samHeader)
Some((sd, rg))
} catch {
case e: Throwable => {
log.error(
s"Loading failed for $fp:n${e.getMessage}\n\t${e.getStackTrace.take(25).map(_.toString).mkString("\n\t")}"
)
None
}
}
}).reduce((kv1, kv2) => {
(kv1._1 ++ kv2._1, kv1._2 ++ kv2._2)
})
val job = HadoopUtil.newJob(sc)
val records = sc.newAPIHadoopFile(filePath, classOf[AnySAMInputFormat], classOf[LongWritable],
classOf[SAMRecordWritable], ContextUtil.getConfiguration(job))
if (Metrics.isRecording) records.instrument() else records
val samRecordConverter = new SAMRecordConverter
AlignedReadRDD(records.map(p => samRecordConverter.convert(p._2.get, seqDict, readGroups)),
seqDict,
readGroups)
}
/**
* Functions like loadBam, but uses bam index files to look at fewer blocks,
* and only returns records within a specified ReferenceRegion. Bam index file required.
* @param filePath The path to the input data. Currently this path must correspond to
* a single Bam file. The bam index file associated needs to have the same name.
* @param viewRegion The ReferenceRegion we are filtering on
*/
def loadIndexedBam(
filePath: String, viewRegion: ReferenceRegion): RDD[AlignmentRecord] = {
val path = new Path(filePath)
val fs = FileSystem.get(path.toUri, sc.hadoopConfiguration)
assert(!fs.isDirectory(path))
val bamfile: Array[FileStatus] = fs.globStatus(path)
require(bamfile.size == 1)
val (seqDict, readGroups) = bamfile
.map(fs => fs.getPath)
.flatMap(fp => {
try {
// We need to separately read the header, so that we can inject the sequence dictionary
// data into each individual Read (see the argument to samRecordConverter.convert,
// below).
val samHeader = SAMHeaderReader.readSAMHeaderFrom(fp, sc.hadoopConfiguration)
log.info("Loaded header from " + fp)
val sd = adamBamDictionaryLoad(samHeader)
val rg = adamBamLoadReadGroups(samHeader)
Some((sd, rg))
} catch {
case _: Throwable => {
log.error("Loading failed for " + fp)
None
}
}
}).reduce((kv1, kv2) => {
(kv1._1 ++ kv2._1, kv1._2 ++ kv2._2)
})
val samDict = SAMHeaderReader.readSAMHeaderFrom(path, sc.hadoopConfiguration).getSequenceDictionary
IndexedBamInputFormat.setVars(
new Path(filePath),
new Path(filePath + ".bai"),
viewRegion,
samDict
)
val job = HadoopUtil.newJob(sc)
val records = sc.newAPIHadoopFile(filePath, classOf[IndexedBamInputFormat], classOf[LongWritable],
classOf[SAMRecordWritable], ContextUtil.getConfiguration(job))
if (Metrics.isRecording) records.instrument() else records
val samRecordConverter = new SAMRecordConverter
records.map(p => samRecordConverter.convert(p._2.get, seqDict, readGroups))
}
/**
* Loads Avro data from a Hadoop File System.
*
* This method uses the SparkContext wrapped by this class to identify our
* underlying file system. We then use the underlying FileSystem imp'l to
* open the Avro file, and we read the Avro files into a Seq.
*
* Frustratingly enough, although all records generated by the Avro IDL
* compiler have a static SCHEMA$ field, this field does not belong to
* the SpecificRecordBase abstract class, or the SpecificRecord interface.
* As such, we must force the user to pass in the schema.
*
* @tparam T The type of the specific record we are loading.
*
* @param filename Path to load file from.
* @param schema Schema of records we are loading.
*
* @return Returns a Seq containing the avro records.
*/
private def loadAvro[T <: SpecificRecordBase](filename: String,
schema: Schema)(
implicit tTag: ClassTag[T]): Seq[T] = {
// get our current file system
val fs = FileSystem.get(sc.hadoopConfiguration)
// get an input stream
val is = fs.open(new Path(filename))
.asInstanceOf[InputStream]
// set up avro for reading
val dr = new SpecificDatumReader[T](schema)
val fr = new DataFileStream[T](is, dr)
// get iterator and create an empty list
val iter = fr.iterator
var list = List.empty[T]
// !!!!!
// important implementation note:
// !!!!!
//
// in theory, we should be able to call iter.toSeq to get a Seq of the
// specific records we are reading. this would allow us to avoid needing
// to manually pop things into a list.
//
// however! this causes odd problems that seem to be related to some sort of
// lazy execution inside of scala. specifically, if you go
// iter.toSeq.map(fn) in scala, this seems to be compiled into a lazy data
// structure where the map call is only executed when the Seq itself is
// actually accessed (e.g., via seq.apply(i), seq.head, etc.). typically,
// this would be OK, but if the Seq[T] goes into a spark closure, the closure
// cleaner will fail with a NotSerializableException, since SpecificRecord's
// are not java serializable. specifically, we see this happen when using
// this function to load RecordGroupMetadata when creating a
// RecordGroupDictionary.
//
// good news is, you can work around this by explicitly walking the iterator
// and building a collection, which is what we do here. this would not be
// efficient if we were loading a large amount of avro data (since we're
// loading all the data into memory), but currently, we are just using this
// code for building sequence/record group dictionaries, which are fairly
// small (seq dict is O(30) entries, rgd is O(20n) entries, where n is the
// number of samples).
while (iter.hasNext) {
list = iter.next :: list
}
// close file
fr.close()
is.close()
// reverse list and return as seq
list.reverse
.toSeq
}
/**
* Loads alignment data from a Parquet file.
*
* @param filePath The path of the file to load.
* @param predicate An optional predicate to push down into the file.
* @param projection An optional schema designating the fields to project.
*
* @return Returns an AlignmentRecordRDD which wraps the RDD of reads,
* sequence dictionary representing the contigs these reads are aligned to
* if the reads are aligned, and the record group dictionary for the reads
* if one is available.
*
* @note The sequence dictionary is read from an avro file stored at
* filePath.seqdict and the record group dictionary is read from an
* avro file stored at filePath.rgdict. These files are pure avro,
* not Parquet.
*
* @see loadAlignments
*/
def loadParquetAlignments(
filePath: String,
predicate: Option[FilterPredicate] = None,
projection: Option[Schema] = None): AlignmentRecordRDD = {
// load from disk
val rdd = loadParquet[AlignmentRecord](filePath, predicate, projection)
val avroSd = loadAvro[Contig]("%s.seqdict".format(filePath),
Contig.SCHEMA$)
val avroRgd = loadAvro[RecordGroupMetadata]("%s.rgdict".format(filePath),
RecordGroupMetadata.SCHEMA$)
// convert avro to sequence dictionary
val sd = new SequenceDictionary(avroSd.map(SequenceRecord.fromADAMContig)
.toVector)
// convert avro to record group dictionary
val rgd = new RecordGroupDictionary(avroRgd.map(RecordGroup.fromAvro))
AlignedReadRDD(rdd, sd, rgd)
}
def loadInterleavedFastq(
filePath: String): AlignmentRecordRDD = {
val job = HadoopUtil.newJob(sc)
val records = sc.newAPIHadoopFile(
filePath,
classOf[InterleavedFastqInputFormat],
classOf[Void],
classOf[Text],
ContextUtil.getConfiguration(job)
)
if (Metrics.isRecording) records.instrument() else records
// convert records
val fastqRecordConverter = new FastqRecordConverter
UnalignedReadRDD.fromRdd(records.flatMap(fastqRecordConverter.convertPair))
}
def loadFastq(
filePath1: String,
filePath2Opt: Option[String],
recordGroupOpt: Option[String] = None,
stringency: ValidationStringency = ValidationStringency.STRICT): AlignmentRecordRDD = {
filePath2Opt match {
case Some(filePath2) => loadPairedFastq(filePath1, filePath2, recordGroupOpt, stringency)
case None => loadUnpairedFastq(filePath1, stringency = stringency)
}
}
def loadPairedFastq(
filePath1: String,
filePath2: String,
recordGroupOpt: Option[String],
stringency: ValidationStringency): AlignmentRecordRDD = {
val reads1 = loadUnpairedFastq(filePath1, setFirstOfPair = true, stringency = stringency)
val reads2 = loadUnpairedFastq(filePath2, setSecondOfPair = true, stringency = stringency)
stringency match {
case ValidationStringency.STRICT | ValidationStringency.LENIENT =>
val count1 = reads1.cache.count
val count2 = reads2.cache.count
if (count1 != count2) {
val msg = s"Fastq 1 ($filePath1) has $count1 reads, fastq 2 ($filePath2) has $count2 reads"
if (stringency == ValidationStringency.STRICT)
throw new IllegalArgumentException(msg)
else {
// ValidationStringency.LENIENT
logError(msg)
}
}
case ValidationStringency.SILENT =>
}
UnalignedReadRDD.fromRdd(reads1 ++ reads2)
}
def loadUnpairedFastq(
filePath: String,
recordGroupOpt: Option[String] = None,
setFirstOfPair: Boolean = false,
setSecondOfPair: Boolean = false,
stringency: ValidationStringency = ValidationStringency.STRICT): AlignmentRecordRDD = {
val job = HadoopUtil.newJob(sc)
val records = sc.newAPIHadoopFile(
filePath,
classOf[SingleFastqInputFormat],
classOf[Void],
classOf[Text],
ContextUtil.getConfiguration(job)
)
if (Metrics.isRecording) records.instrument() else records
// convert records
val fastqRecordConverter = new FastqRecordConverter
UnalignedReadRDD.fromRdd(records.map(
fastqRecordConverter.convertRead(
_,
recordGroupOpt.map(recordGroup =>
if (recordGroup.isEmpty)
filePath.substring(filePath.lastIndexOf("/") + 1)
else
recordGroup),
setFirstOfPair,
setSecondOfPair,
stringency
)
))
}
def loadVcf(filePath: String, sd: Option[SequenceDictionary]): RDD[VariantContext] = {
val job = HadoopUtil.newJob(sc)
val vcc = new VariantContextConverter(sd)
val records = sc.newAPIHadoopFile(
filePath,
classOf[VCFInputFormat], classOf[LongWritable], classOf[VariantContextWritable],
ContextUtil.getConfiguration(job)
)
if (Metrics.isRecording) records.instrument() else records
records.flatMap(p => vcc.convert(p._2.get))
}
def loadParquetGenotypes(
filePath: String,
predicate: Option[FilterPredicate] = None,
projection: Option[Schema] = None): RDD[Genotype] = {
loadParquet[Genotype](filePath, predicate, projection)
}
def loadParquetVariants(
filePath: String,
predicate: Option[FilterPredicate] = None,
projection: Option[Schema] = None): RDD[Variant] = {
loadParquet[Variant](filePath, predicate, projection)
}
def loadFasta(
filePath: String,
fragmentLength: Long): RDD[NucleotideContigFragment] = {
val fastaData: RDD[(LongWritable, Text)] = sc.newAPIHadoopFile(
filePath,
classOf[TextInputFormat],
classOf[LongWritable],
classOf[Text]
)
if (Metrics.isRecording) fastaData.instrument() else fastaData
val remapData = fastaData.map(kv => (kv._1.get, kv._2.toString))
FastaConverter(remapData, fragmentLength)
}
def loadInterleavedFastqAsFragments(
filePath: String): RDD[Fragment] = {
val job = HadoopUtil.newJob(sc)
val records = sc.newAPIHadoopFile(
filePath,
classOf[InterleavedFastqInputFormat],
classOf[Void],
classOf[Text],
ContextUtil.getConfiguration(job)
)
if (Metrics.isRecording) records.instrument() else records
// convert records
val fastqRecordConverter = new FastqRecordConverter
records.map(fastqRecordConverter.convertFragment)
}
def loadGTF(filePath: String): RDD[Feature] = {
val records = sc.textFile(filePath).flatMap(new GTFParser().parse)
if (Metrics.isRecording) records.instrument() else records
}
def loadBED(filePath: String): RDD[Feature] = {
val records = sc.textFile(filePath).flatMap(new BEDParser().parse)
if (Metrics.isRecording) records.instrument() else records
}
def loadNarrowPeak(filePath: String): RDD[Feature] = {
val records = sc.textFile(filePath).flatMap(new NarrowPeakParser().parse)
if (Metrics.isRecording) records.instrument() else records
}
def loadIntervalList(filePath: String): RDD[Feature] = {
val parsedLines = sc.textFile(filePath).map(new IntervalListParser().parse)
val (seqDict, records) = (SequenceDictionary(parsedLines.flatMap(_._1).collect(): _*), parsedLines.flatMap(_._2))
val seqDictMap = seqDict.records.map(sr => sr.name -> sr).toMap
val recordsWithContigs = for {
record <- records
seqRecord <- seqDictMap.get(record.getContig.getContigName)
} yield Feature.newBuilder(record)
.setContig(
Contig.newBuilder()
.setContigName(seqRecord.name)
.setReferenceURL(seqRecord.url.orNull)
.setContigMD5(seqRecord.md5.orNull)
.setContigLength(seqRecord.length)
.build()
)
.build()
if (Metrics.isRecording) recordsWithContigs.instrument() else recordsWithContigs
}
def loadParquetFeatures(
filePath: String,
predicate: Option[FilterPredicate] = None,
projection: Option[Schema] = None): RDD[Feature] = {
loadParquet[Feature](filePath, predicate, projection)
}
def loadParquetContigFragments(
filePath: String,
predicate: Option[FilterPredicate] = None,
projection: Option[Schema] = None): RDD[NucleotideContigFragment] = {
loadParquet[NucleotideContigFragment](filePath, predicate, projection)
}
def loadParquetFragments(
filePath: String,
predicate: Option[FilterPredicate] = None,
projection: Option[Schema] = None): RDD[Fragment] = {
loadParquet[Fragment](filePath, predicate, projection)
}
def loadVcfAnnotations(
filePath: String,
sd: Option[SequenceDictionary] = None): RDD[DatabaseVariantAnnotation] = {
val job = HadoopUtil.newJob(sc)
val vcc = new VariantContextConverter(sd)
val records = sc.newAPIHadoopFile(
filePath,
classOf[VCFInputFormat], classOf[LongWritable], classOf[VariantContextWritable],
ContextUtil.getConfiguration(job)
)
if (Metrics.isRecording) records.instrument() else records
records.map(p => vcc.convertToAnnotation(p._2.get))
}
def loadParquetVariantAnnotations(
filePath: String,
predicate: Option[FilterPredicate] = None,
projection: Option[Schema] = None): RDD[DatabaseVariantAnnotation] = {
loadParquet[DatabaseVariantAnnotation](filePath, predicate, projection)
}
def loadVariantAnnotations(
filePath: String,
projection: Option[Schema] = None,
sd: Option[SequenceDictionary] = None): RDD[DatabaseVariantAnnotation] = {
if (filePath.endsWith(".vcf")) {
log.info("Loading " + filePath + " as VCF, and converting to variant annotations. Projection is ignored.")
loadVcfAnnotations(filePath, sd)
} else {
log.info("Loading " + filePath + " as Parquet containing DatabaseVariantAnnotations.")
sd.foreach(sd => log.warn("Sequence dictionary for translation ignored if loading ADAM from Parquet."))
loadParquetVariantAnnotations(filePath, None, projection)
}
}
def loadFeatures(
filePath: String,
projection: Option[Schema] = None): RDD[Feature] = {
if (filePath.endsWith(".bed")) {
log.info(s"Loading $filePath as BED and converting to features. Projection is ignored.")
loadBED(filePath)
} else if (filePath.endsWith(".gtf") ||
filePath.endsWith(".gff")) {
log.info(s"Loading $filePath as GTF/GFF and converting to features. Projection is ignored.")
loadGTF(filePath)
} else if (filePath.endsWith(".narrowPeak") ||
filePath.endsWith(".narrowpeak")) {
log.info(s"Loading $filePath as NarrowPeak and converting to features. Projection is ignored.")
loadNarrowPeak(filePath)
} else if (filePath.endsWith(".interval_list")) {
log.info(s"Loading $filePath as IntervalList and converting to features. Projection is ignored.")
loadIntervalList(filePath)
} else {
log.info(s"Loading $filePath as Parquet containing Features.")
loadParquetFeatures(filePath, None, projection)
}
}
def loadGenes(
filePath: String,
projection: Option[Schema] = None): RDD[Gene] = {
import ADAMContext._
loadFeatures(filePath, projection).asGenes()
}
def loadReferenceFile(filePath: String, fragmentLength: Long): ReferenceFile = {
if (filePath.endsWith(".2bit")) {
//TODO(ryan): S3ByteAccess
new TwoBitFile(new LocalFileByteAccess(new File(filePath)))
} else {
ReferenceContigMap(loadSequence(filePath, fragmentLength = fragmentLength))
}
}
def loadSequence(
filePath: String,
projection: Option[Schema] = None,
fragmentLength: Long = 10000): RDD[NucleotideContigFragment] = {
if (filePath.endsWith(".fa") ||
filePath.endsWith(".fasta")) {
log.info("Loading " + filePath + " as FASTA and converting to NucleotideContigFragment. Projection is ignored.")
loadFasta(
filePath,
fragmentLength
)
} else {
log.info("Loading " + filePath + " as Parquet containing NucleotideContigFragments.")
loadParquetContigFragments(filePath, None, projection)
}
}
def loadGenotypes(
filePath: String,
projection: Option[Schema] = None,
sd: Option[SequenceDictionary] = None): RDD[Genotype] = {
if (filePath.endsWith(".vcf")) {
log.info("Loading " + filePath + " as VCF, and converting to Genotypes. Projection is ignored.")
loadVcf(filePath, sd).flatMap(_.genotypes)
} else {
log.info("Loading " + filePath + " as Parquet containing Genotypes. Sequence dictionary for translation is ignored.")
loadParquetGenotypes(filePath, None, projection)
}
}
def loadVariants(
filePath: String,
projection: Option[Schema] = None,
sd: Option[SequenceDictionary] = None): RDD[Variant] = {
if (filePath.endsWith(".vcf")) {
log.info("Loading " + filePath + " as VCF, and converting to Variants. Projection is ignored.")
loadVcf(filePath, sd).map(_.variant.variant)
} else {
log.info("Loading " + filePath + " as Parquet containing Variants. Sequence dictionary for translation is ignored.")
loadParquetVariants(filePath, None, projection)
}
}
/**
* Loads alignments from a given path, and infers the input type.
*
* This method can load:
*
* * AlignmentRecords via Parquet (default)
* * SAM/BAM (.sam, .bam)
* * FASTQ (interleaved, single end, paired end) (.ifq, .fq/.fastq)
* * FASTA (.fa, .fasta)
* * NucleotideContigFragments via Parquet (.contig.adam)
*
* As hinted above, the input type is inferred from the file path extension.
*
* @param filePath Path to load data from.
* @param projection The fields to project; ignored if not Parquet.
* @param filePath2Opt The path to load a second end of FASTQ data from.
* Ignored if not FASTQ.
* @param recordGroupOpt Optional record group name to set if loading FASTQ.
* @param stringency Validation stringency used on FASTQ import/merging.
*
* @return Returns an AlignmentRecordRDD which wraps the RDD of reads,
* sequence dictionary representing the contigs these reads are aligned to
* if the reads are aligned, and the record group dictionary for the reads
* if one is available.
*
* @see loadBam
* @see loadParquetAlignments
* @see loadInterleavedFastq
* @see loadFastq
* @see loadFasta
*/
def loadAlignments(
filePath: String,
projection: Option[Schema] = None,
filePath2Opt: Option[String] = None,
recordGroupOpt: Option[String] = None,
stringency: ValidationStringency = ValidationStringency.STRICT): AlignmentRecordRDD = LoadAlignmentRecords.time {
if (filePath.endsWith(".sam") ||
filePath.endsWith(".bam")) {
log.info("Loading " + filePath + " as SAM/BAM and converting to AlignmentRecords. Projection is ignored.")
loadBam(filePath)
} else if (filePath.endsWith(".ifq")) {
log.info("Loading " + filePath + " as interleaved FASTQ and converting to AlignmentRecords. Projection is ignored.")
loadInterleavedFastq(filePath)
} else if (filePath.endsWith(".fq") ||
filePath.endsWith(".fastq")) {
log.info("Loading " + filePath + " as unpaired FASTQ and converting to AlignmentRecords. Projection is ignored.")
loadFastq(filePath, filePath2Opt, recordGroupOpt, stringency)
} else if (filePath.endsWith(".fa") ||
filePath.endsWith(".fasta")) {
log.info("Loading " + filePath + " as FASTA and converting to AlignmentRecords. Projection is ignored.")
import ADAMContext._
UnalignedReadRDD(loadFasta(filePath, fragmentLength = 10000).toReads,
RecordGroupDictionary.empty)
} else if (filePath.endsWith("contig.adam")) {
log.info("Loading " + filePath + " as Parquet of NucleotideContigFragment and converting to AlignmentRecords. Projection is ignored.")
UnalignedReadRDD(loadParquet[NucleotideContigFragment](filePath).toReads,
RecordGroupDictionary.empty)
} else {
log.info("Loading " + filePath + " as Parquet of AlignmentRecords.")
loadParquetAlignments(filePath, None, projection)
}
}
def loadFragments(filePath: String): RDD[Fragment] = LoadFragments.time {
if (filePath.endsWith(".sam") ||
filePath.endsWith(".bam")) {
log.info("Loading " + filePath + " as SAM/BAM and converting to Fragments.")
loadBam(filePath).rdd.toFragments
} else if (filePath.endsWith(".reads.adam")) {
log.info("Loading " + filePath + " as ADAM AlignmentRecords and converting to Fragments.")
loadAlignments(filePath).rdd.toFragments
} else if (filePath.endsWith(".ifq")) {
log.info("Loading interleaved FASTQ " + filePath + " and converting to Fragments.")
loadInterleavedFastqAsFragments(filePath)
} else {
loadParquetFragments(filePath)
}
}
/**
* Takes a sequence of Path objects and loads alignments using that path.
*
* This infers the type of each path, and thus can be used to load a mixture
* of different files from disk. I.e., if you want to load 2 BAM files and
* 3 Parquet files, this is the method you are looking for!
*
* The RDDs obtained from loading each file are simply unioned together,
* while the record group dictionaries are naively merged. The sequence
* dictionaries are merged in a way that dedupes the sequence records in
* each dictionary.
*
* @param paths The locations of the files to load.
* @return Returns an AlignmentRecordRDD which wraps the RDD of reads,
* sequence dictionary representing the contigs these reads are aligned to
* if the reads are aligned, and the record group dictionary for the reads
* if one is available.
*
* @see loadAlignments
*/
def loadAlignmentsFromPaths(paths: Seq[Path]): AlignmentRecordRDD = {
val alignmentData = paths.map(p => loadAlignments(p.toString))
val rdd = sc.union(alignmentData.map(_.rdd))
val sd = alignmentData.map(_.sequences).reduce(_ ++ _)
val rgd = alignmentData.map(_.recordGroups).reduce(_ ++ _)
AlignedReadRDD(rdd, sd, rgd)
}
/**
* Searches a path recursively, returning the names of all directories in the tree whose
* name matches the given regex.
*
* @param path The path to begin the search at
* @param regex A regular expression
* @return A sequence of Path objects corresponding to the identified directories.
*/
def findFiles(path: Path, regex: String): Seq[Path] = {
if (regex == null) {
Seq(path)
} else {
val statuses = FileSystem.get(sc.hadoopConfiguration).listStatus(path)
val r = Pattern.compile(regex)
val (matches, recurse) = statuses.filter(HadoopUtil.isDirectory).map(s => s.getPath).partition(p => r.matcher(p.getName).matches())
matches.toSeq ++ recurse.flatMap(p => findFiles(p, regex))
}
}
}
| rnpandya/adam | adam-core/src/main/scala/org/bdgenomics/adam/rdd/ADAMContext.scala | Scala | apache-2.0 | 37,421 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
class FlatSpecMixedInMatchersSpec extends FlatSpec with Matchers {
"This spec" should "work OK" in {
"hello" should startWith ("he")
"hello" should endWith ("lo")
"hello" should include ("el")
"hello" should startWith regex ("h*")
"hello" should endWith regex (".*o")
"hello" should include regex ("l*")
}
it should "still work OK" in {
"dude" should not startWith ("he")
"dude" should not endWith ("lo")
"dude" should not include ("el")
"dude" should not startWith regex ("h*l")
"dude" should not endWith regex ("e*o")
"dude" should not include regex ("e*l")
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/FlatSpecMixedInMatchersSpec.scala | Scala | apache-2.0 | 1,245 |
package phoenix.collection.immutable.trees
import phoenix.collection.immutable.graphs.Graph
/**
* Created by Satya Prakash on 16/08/14.
*/
trait BinaryTree[+T] extends Tree[T] | Satyapr/data-structures-and-algorithms | Scala/src/phoenix/collection/immutable/trees/BinaryTree.scala | Scala | bsd-2-clause | 179 |
/*
* Copyright (c) 2011-2017 Interfaculty Department of Geoinformatics, University of
* Salzburg (Z_GIS) & Institute of Geological and Nuclear Sciences Limited (GNS Science)
* in the SMART Aquifer Characterisation (SAC) programme funded by the New Zealand
* Ministry of Business, Innovation and Employment (MBIE)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.owc
import java.net.URL
import java.sql.Connection
import java.util.UUID
import anorm.SqlParser._
import anorm._
import info.smart.models.owc100._
import utils.ClassnameLogger
import utils.StringUtils.OptionUuidConverters
/** ************
* OwcStyleSet
* ************/
object OwcStyleSetDAO extends ClassnameLogger {
/**
* content_uuid.map(u => u.toUuidOption.map(findOwcContentByUuid(_)).getOrElse(None)).getOrElse(None)
*
* @return
*/
private def owcStyleSetParser(implicit connection: Connection): RowParser[OwcStyleSet] = {
str("owc_stylesets.name") ~
str("title") ~
get[Option[String]]("owc_stylesets.abstrakt") ~
get[Option[Boolean]]("owc_stylesets.is_default") ~
get[Option[String]]("owc_stylesets.legend_url") ~
get[Option[String]]("owc_stylesets.content_uuid") ~
str("owc_stylesets.uuid") map {
case name ~ title ~ abstrakt ~ isDefault ~ legendUrl ~ content_uuid ~ uuidstring =>
OwcStyleSet(name = name,
legendUrl = legendUrl.map(new URL(_)),
title = title,
abstrakt = abstrakt,
default = isDefault,
content = content_uuid.map(u => u.toUuidOption.map(OwcContentDAO.findOwcContentByUuid(_)).getOrElse(None)).getOrElse(None),
uuid = UUID.fromString(uuidstring))
}
}
/**
* Retrieve all OwcStyleSet.
*
* @param connection implicit connection
* @return
*/
def getAllOwcStyleSets(implicit connection: Connection): Seq[OwcStyleSet] = {
SQL(s"select owc_stylesets.* from $tableOwcStyleSets").as(owcStyleSetParser *)
}
/**
* Find OwcStyleSets by uuid
*
* @param uuid
* @param connection implicit connection
* @return
*/
def findOwcStyleSetByUuid(uuid: UUID)(implicit connection: Connection): Option[OwcStyleSet] = {
SQL(s"""select owc_stylesets.* from $tableOwcStyleSets where uuid = '${uuid.toString}'""").as(owcStyleSetParser.singleOpt)
}
/**
* Create an OwcStyleSet.
*
* @param owcStyleSet
* @param connection implicit connection should be managed via transaction from calling entity
* @return
*/
def createOwcStyleSet(owcStyleSet: OwcStyleSet)(implicit connection: Connection): Option[OwcStyleSet] = {
val pre: Boolean = if (owcStyleSet.content.isDefined) {
val exists = OwcContentDAO.findOwcContentByUuid(owcStyleSet.content.get.uuid).isDefined
if (exists) {
logger.error(s"(createOwcStyleSet) OwcContent with UUID: ${owcStyleSet.content.get.uuid} exists already, won't create OwcStyleSet")
false
} else {
val insert = OwcContentDAO.createOwcContent(owcStyleSet.content.get)
insert.isDefined
}
} else {
true
}
if (pre) {
val rowCount = SQL(
s"""insert into $tableOwcStyleSets values (
{uuid}, {name}, {title}, {abstrakt}, {isDefault}, {legendUrl}, {content}
)""").on(
'uuid -> owcStyleSet.uuid.toString,
'name -> owcStyleSet.name,
'title -> owcStyleSet.title,
'abstrakt -> owcStyleSet.abstrakt,
'isDefault -> owcStyleSet.default,
'legendUrl -> owcStyleSet.legendUrl.map(_.toString),
'content -> owcStyleSet.content.map(_.uuid.toString)
).executeUpdate()
rowCount match {
case 1 => Some(owcStyleSet)
case _ => {
logger.error("OwcStyleSet couldn't be created")
None
}
}
} else {
logger.error("Precondition failed, won't create OwcStyleSet")
None
}
}
/**
* Update single OwcStyleSet
*
* @param owcStyleSet
* @param connection implicit connection should be managed via transaction from calling entity
* @return
*/
def updateOwcStyleSet(owcStyleSet: OwcStyleSet)(implicit connection: Connection): Option[OwcStyleSet] = {
val pre: Boolean = if (owcStyleSet.content.isDefined) {
val exists = OwcContentDAO.findOwcContentByUuid(owcStyleSet.content.get.uuid).isDefined
if (exists) {
val update = OwcContentDAO.updateOwcContent(owcStyleSet.content.get)
update.isDefined
} else {
val insert = OwcContentDAO.createOwcContent(owcStyleSet.content.get)
insert.isDefined
}
} else {
val toBeDeleted = findOwcStyleSetByUuid(owcStyleSet.uuid).map(_.content).getOrElse(None)
if (toBeDeleted.isDefined) {
OwcContentDAO.deleteOwcContent(toBeDeleted.get)
} else {
true
}
}
if (pre) {
val rowCount1 = SQL(
s"""update $tableOwcStyleSets set
name = {name},
title = {title},
abstrakt = {abstrakt},
is_default = {isDefault},
legend_url = {legendUrl},
content_uuid = {content} where uuid = {uuid}""").on(
'name -> owcStyleSet.name,
'title -> owcStyleSet.title,
'abstrakt -> owcStyleSet.abstrakt,
'isDefault -> owcStyleSet.default,
'legendUrl -> owcStyleSet.legendUrl.map(_.toString),
'content -> owcStyleSet.content.map(_.uuid.toString),
'uuid -> owcStyleSet.uuid.toString
).executeUpdate()
rowCount1 match {
case 1 => Some(owcStyleSet)
case _ => logger.error("OwcStyleSet couldn't be updated")
None
}
} else {
logger.error("Precondition failed, won't update OwcStyleSet")
None
}
}
/**
* delete an OwcStyleSet by uuid
*
* @param owcStyleSet
* @param connection implicit connection should be managed via transaction from calling entity
* @return
*/
def deleteOwcStyleSet(owcStyleSet: OwcStyleSet)(implicit connection: Connection): Boolean = {
val pre: Boolean = if (owcStyleSet.content.isDefined) {
val exists = OwcContentDAO.findOwcContentByUuid(owcStyleSet.content.get.uuid).isDefined
if (exists) {
val delete = OwcContentDAO.deleteOwcContent(owcStyleSet.content.get)
delete
} else {
true
}
} else {
true
}
if (pre) {
val rowCount = SQL(s"delete from $tableOwcStyleSets where uuid = {uuid}").on(
'uuid -> owcStyleSet.uuid.toString
).executeUpdate()
rowCount match {
case 1 => true
case _ => logger.error("OwcStyleSet couldn't be deleted")
false
}
} else {
logger.error("Precondition failed, won't delete OwcStyleSet")
false
}
}
}
| ZGIS/smart-portal-backend | app/models/owc/OwcStyleSetDAO.scala | Scala | apache-2.0 | 7,560 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.frontend.v2_3.parser
import org.neo4j.cypher.internal.frontend.v2_3.InputPosition
import org.parboiled.Context
import org.parboiled.buffers.InputBuffer
object BufferPosition {
def apply(buffer: InputBuffer, offset: Int): InputPosition = {
val position = buffer.getPosition(offset)
InputPosition(offset, position.line, position.column)
}
}
object ContextPosition {
def apply(ctx: Context[Any]): InputPosition =
BufferPosition(ctx.getInputBuffer, ctx.getMatchRange.start)
}
| HuangLS/neo4j | community/cypher/frontend-2.3/src/main/scala/org/neo4j/cypher/internal/frontend/v2_3/parser/BufferPosition.scala | Scala | apache-2.0 | 1,323 |
// See LICENSE.txt for license details.
package templates
import chisel3.iotesters.{PeekPokeTester, Driver, ChiselFlatSpec}
/**
* SingleCounter test harness
*/
class SingleCounterTests(c: SingleCounter) extends PeekPokeTester(c) {
// def check(wire: Any, value: Any, printon: Bool) {
// val a = peek(wire)
// if (printon) println("Expect " + a + " to be " + value)
// expect(wire, value)
// }
var numEnabledCycles = 0
var expectedCount = 0
var expectedDone = 0
val stops = List(64)
val strides = List(1, 6, 7)
val starts = List(0, 5)
step(1)
reset(1)
stops.foreach { stop =>
strides.foreach { stride =>
starts.foreach { start =>
numEnabledCycles = 0
var saturate = 1
val gap = 0
var enable = 1
def testOneStep() = {
step(1)
numEnabledCycles += enable
val count = saturate match {
case 1 =>
val count = if (start + numEnabledCycles * (gap + stride*c.par) < stop) {
(start + numEnabledCycles * (gap + stride*(c.par)))
} else {
if ((stop-start) % (gap + stride*(c.par)) == 0) (stop - (gap + stride*(c.par))) else stop - (stop-start) % (gap + stride*(c.par))
}
count
case 0 =>
val numSteps = ( (stop-start) / (gap + stride*c.par)) // integer type
val numUntilWrap = if (numSteps * (gap + stride*c.par) == (stop-start)) numSteps else numSteps+1
val numWrappedEnabledCycles = numEnabledCycles % numUntilWrap
val count = if (start + numWrappedEnabledCycles * (gap + stride*c.par) < stop) (start + numWrappedEnabledCycles * (gap + stride*(c.par))) else (stop - stop % (gap + stride*(c.par)))
count
}
val done = if ( (count + c.par*stride + gap >= stop) & (enable == 1) ) 1 else 0
// val a = peek(c.io.output.count(0))
// val b = peek(c.io.output.count(1))
// val d = peek(c.io.output.count(2))
// val cc = peek(c.io.output.done)
// println(s"SingleCounters at $a, $b, (want $count), stop $stop done? $cc expected? $done because ${(count + c.par*stride + gap)} satmode $saturate")
// if (cc != done | a != count | b != {count + stride} | d != {count + 2*stride}) println(" ERROR!!!!!!!!!!!!!! \\n\\n")
// Check signal values
(0 until c.par).foreach { i => expect(c.io.output.count(i), count + (i * stride)) }
expect(c.io.output.done, done)
expectedCount = count
expectedDone = done
}
poke(c.io.input.enable, 0)
poke(c.io.input.start, start)
step(5)
poke(c.io.input.stride, stride)
poke(c.io.input.reset, 1)
step(1)
poke(c.io.input.stop, stop)
poke(c.io.input.gap, gap)
poke(c.io.input.enable, enable)
poke(c.io.input.saturate, saturate)
poke(c.io.input.reset, 0)
for (i <- 1 until 5) {
testOneStep()
}
// Test stall
enable = 0
poke(c.io.input.enable, enable)
for (i <- 1 until 5) {
testOneStep()
}
// Continue
enable = 1
poke(c.io.input.enable, enable)
for (i <- 1 until stop) {
testOneStep()
}
// Reset and go again
numEnabledCycles = 0
poke(c.io.input.reset, 1)
step(1)
poke(c.io.input.reset, 0)
for (i <- 1 until stop+2) {
testOneStep()
}
// Reset and test non-saturating mode
saturate = 0
poke(c.io.input.saturate, saturate)
numEnabledCycles = 0
poke(c.io.input.reset, 1)
step(1)
poke(c.io.input.reset, 0)
for (i <- 1 until stop+2) {
testOneStep()
}
poke(c.io.input.enable, 0)
poke(c.io.input.reset, 1)
step(1)
reset(1)
poke(c.io.input.reset, 0)
}
}
}
}
class CompactingCounterTests(c: CompactingCounter) extends PeekPokeTester(c) {
// def check(wire: Any, value: Any, printon: Bool) {
// val a = peek(wire)
// if (printon) println("Expect " + a + " to be " + value)
// expect(wire, value)
// }
var numEnabledCycles = 0
var expectedCount = 0
var expectedDone = 0
step(1)
reset(1)
var enable = 0
def testOneStep(ens: Seq[Int]) = {
step(1)
val num_enabled = ens.reduce{_+_}
numEnabledCycles += num_enabled
(0 until c.lanes).foreach{i => poke(c.io.input.enables(i), ens(i))}
step(1)
(0 until c.lanes).foreach{i => poke(c.io.input.enables(i), 0)}
val done = if ( ((numEnabledCycles % c.depth) + num_enabled >= c.depth) & (enable == 1) ) 1 else 0
// val a = peek(c.io.output.count(0))
// val b = peek(c.io.output.count(1))
// val cc = peek(c.io.output.done)
// println(s"SingleCounters at $a, $b, (want $count), stop $stop done? $cc expected? $done because ${(count + c.par*stride + gap)} satmode $saturate")
// if (cc != done) println(" ERROR!!!!!!!!!!!!!! \\n\\n")
// Check signal values
expect(c.io.output.done, done)
// expect(c.io.output.count, numEnabledCycles % c.depth)
}
poke(c.io.input.dir, 1)
(0 until c.lanes).foreach{i => poke(c.io.input.enables(i), 0)}
step(5)
poke(c.io.input.reset, 1)
step(1)
poke(c.io.input.reset, 0)
step(1)
for (i <- 1 until 20) {
// Generate enable vector
val ens = (0 until c.lanes).map{i => rnd.nextInt(2)}
testOneStep(ens)
}
// Test stall
for (i <- 1 until 5) {
val ens = (0 until c.lanes).map{i => 0}
testOneStep(ens)
}
// Continue
for (i <- 1 until c.depth) {
// Generate enable vector
val ens = (0 until c.lanes).map{i => rnd.nextInt(2)}
testOneStep(ens)
}
// Reset and go again
numEnabledCycles = 0
poke(c.io.input.reset, 1)
step(1)
poke(c.io.input.reset, 0)
for (i <- 1 until c.depth) {
// Generate enable vector
val ens = (0 until c.lanes).map{i => rnd.nextInt(2)}
testOneStep(ens)
}
}
class CounterTests(c: Counter) extends PeekPokeTester(c) {
// Test triple nested counter
val depth = 3
var numEnabledCycles = 0
// var expectedCounts = List(0,0,0)
// var expectedDones = List(0,0,0)
val gap = List(0,0,0)
val stops = List(List(10,12,15), List(11,13,16), List(12,14,50))
val strides = List(List(3,3,3),List(1,1,1), List(3, 4, 5))
val start = List(0,0,0) // TODO: Test new starts
var enable = 1
var saturate = 1
step(1)
reset(1)
stops.foreach { stop =>
strides.foreach { stride =>
// println("------" + stop + "--------" + stride)
val alignedMax = stop.zip(stride).zip(c.par.reverse).map {case ((m,s),p) =>
if (m % (s*p) == 0) m else m - (m % (s*p)) + (s*p)
}
numEnabledCycles = 0
val stepSizes = c.par.reverse.zipWithIndex.map{case (p,i) => p*stride(i) + gap(i)}
val totalTicks = alignedMax.reduce{_*_} / stepSizes.reduce{_*_}
def testOneStep() = {
step(1)
if (enable == 1) numEnabledCycles += 1
val expectedCksum = numEnabledCycles
val done = if (numEnabledCycles == stop.reduce{_*_}) 1 else 0
c.par.reverse.zipWithIndex.foreach{ case (p,ii) =>
val i = c.par.length - ii - 1
val ticksToInc = (alignedMax.take(i+1).reduce{_*_} * stepSizes(i)) / (alignedMax(i) * stepSizes.take(i+1).reduce{_*_})
val period = ticksToInc*alignedMax(i) / stepSizes(i)
val increments = (numEnabledCycles) / ticksToInc
val base = if (saturate == 1) {
if (numEnabledCycles >= totalTicks) {
alignedMax(i) - c.par(ii)*stride(i)
} else {
(increments * stepSizes(i)) % alignedMax(i)
}
} else {
increments % alignedMax(i) // TODO: Not sure if this is correct, only testing saturating ctrs now
}
val ctrAddr = c.par.take(ii+1).reduce{_+_} - c.par(ii)
(0 until c.par(ii)).foreach{ k =>
val test = peek(c.io.output.counts(ctrAddr+k))
val expected = base + k*stride(i)
// if (test != base + k*stride(i)) {
// println(s"""Step ${numEnabledCycles}: (checking ctr${i}.${k} @ ${ctrAddr+k} (hw: ${test} =? ${base+k*stride(i)})
// tic each ${ticksToInc} from ${alignedMax.take(i+1).reduce{_*_}} / ${alignedMax(i)}),
// increments = ${increments}
// base = ${base} (incs % ${alignedMax(i)})
// """)
// }
// if (test != expected) println("WRONG!")
// println("[stat] counter " + {ctrAddr + k} + " is " + test + " but expected " + expected + "(" + base + "+" + k + "*" + stride(i) + ")")
expect(c.io.output.counts(ctrAddr+k), expected)
}
}
// println("")
}
enable = 0
poke(c.io.input.enable, enable)
c.io.input.starts.zipWithIndex.foreach{ case (wire, i) => poke(wire,start(start.length - 1 -i)) }
step(5)
c.io.input.stops.zipWithIndex.foreach{ case (wire, i) => poke(wire,stop(stop.length - 1 - i)) }
c.io.input.strides.zipWithIndex.foreach{ case (wire, i) => poke(wire,stride(stride.length - 1 - i)) }
poke(c.io.input.reset, 1)
step(1)
enable = 1
poke(c.io.input.enable, enable)
poke(c.io.input.saturate, saturate)
poke(c.io.input.reset, 0)
// for (i <- 0 until (totalTicks*1.1).toInt) {
for (i <- 0 until (20).toInt) {
testOneStep
}
enable = 0
poke(c.io.input.enable, enable)
poke(c.io.input.reset, 1)
step(1)
reset(1)
poke(c.io.input.reset, 0)
}
}
}
// class SingleCounterTester extends ChiselFlatSpec {
// behavior of "SingleCounter"
// backends foreach {backend =>
// it should s"correctly add randomly generated numbers $backend" in {
// Driver(() => new SingleCounter(3))(c => new SingleCounterTests(c)) should be (true)
// }
// }
// }
// class CounterTester extends ChiselFlatSpec {
// behavior of "Counter"
// backends foreach {backend =>
// it should s"correctly add randomly generated numbers $backend" in {
// Driver(() => new Counter(List(2,2,2)))(c => new CounterTests(c)) should be (true)
// }
// }
// }
| stanford-ppl/spatial-lang | spatial/core/resources/chiselgen/template-level/tests/templates/Counter.scala | Scala | mit | 10,321 |
object T1 {
trait T[A]
class C extends T[String]
object Test {
def main(args: Array[String]): Unit = {
classOf[C].getTypeParameters
}
}
}
object T2 {
trait T[A]
class C extends T[String]
object Test {
def main(args: Array[String]): Unit = {
val x = classOf[C]
x.getTypeParameters
}
}
}
object T3 {
trait T[A]
class C extends T[String]
object Test {
def main(args: Array[String]): Unit = {
val x: Class[C] = classOf[C]
x.getTypeParameters
}
}
} | loskutov/intellij-scala | testdata/scalacTests/pos/t4305.scala | Scala | apache-2.0 | 523 |
package progscala2.traits.ui
/**
* Created by younggi on 11/11/16.
*/
abstract class Widget
| younggi/books | programming_scala/progscala2/src/main/scala/progscala2/traits/ui/Widget.scala | Scala | mit | 95 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.zookeeper
import java.util.Locale
import java.util.concurrent.locks.{ReentrantLock, ReentrantReadWriteLock}
import java.util.concurrent.{ArrayBlockingQueue, ConcurrentHashMap, CountDownLatch, Semaphore, TimeUnit}
import com.yammer.metrics.core.{Gauge, MetricName}
import kafka.metrics.KafkaMetricsGroup
import kafka.utils.CoreUtils.{inLock, inReadLock, inWriteLock}
import kafka.utils.{KafkaScheduler, Logging}
import org.apache.kafka.common.utils.Time
import org.apache.zookeeper.AsyncCallback.{ACLCallback, Children2Callback, DataCallback, StatCallback, StringCallback, VoidCallback}
import org.apache.zookeeper.KeeperException.Code
import org.apache.zookeeper.Watcher.Event.{EventType, KeeperState}
import org.apache.zookeeper.ZooKeeper.States
import org.apache.zookeeper.data.{ACL, Stat}
import org.apache.zookeeper.{CreateMode, KeeperException, WatchedEvent, Watcher, ZooKeeper}
import scala.collection.JavaConverters._
import scala.collection.mutable.Set
/**
* A ZooKeeper client that encourages pipelined requests.
*
* @param connectString comma separated host:port pairs, each corresponding to a zk server
* @param sessionTimeoutMs session timeout in milliseconds
* @param connectionTimeoutMs connection timeout in milliseconds
* @param maxInFlightRequests maximum number of unacknowledged requests the client will send before blocking.
*/
class ZooKeeperClient(connectString: String,
sessionTimeoutMs: Int,
connectionTimeoutMs: Int,
maxInFlightRequests: Int,
time: Time,
metricGroup: String,
metricType: String) extends Logging with KafkaMetricsGroup {
this.logIdent = "[ZooKeeperClient] "
private val initializationLock = new ReentrantReadWriteLock()
private val isConnectedOrExpiredLock = new ReentrantLock()
private val isConnectedOrExpiredCondition = isConnectedOrExpiredLock.newCondition()
private val zNodeChangeHandlers = new ConcurrentHashMap[String, ZNodeChangeHandler]().asScala
private val zNodeChildChangeHandlers = new ConcurrentHashMap[String, ZNodeChildChangeHandler]().asScala
private val inFlightRequests = new Semaphore(maxInFlightRequests)
private val stateChangeHandlers = new ConcurrentHashMap[String, StateChangeHandler]().asScala
private[zookeeper] val expiryScheduler = new KafkaScheduler(threads = 1, "zk-session-expiry-handler")
private val metricNames = Set[String]()
// The state map has to be created before creating ZooKeeper since it's needed in the ZooKeeper callback.
private val stateToMeterMap = {
import KeeperState._
val stateToEventTypeMap = Map(
Disconnected -> "Disconnects",
SyncConnected -> "SyncConnects",
AuthFailed -> "AuthFailures",
ConnectedReadOnly -> "ReadOnlyConnects",
SaslAuthenticated -> "SaslAuthentications",
Expired -> "Expires"
)
stateToEventTypeMap.map { case (state, eventType) =>
val name = s"ZooKeeper${eventType}PerSec"
metricNames += name
state -> newMeter(name, eventType.toLowerCase(Locale.ROOT), TimeUnit.SECONDS)
}
}
info(s"Initializing a new session to $connectString.")
// Fail-fast if there's an error during construction (so don't call initialize, which retries forever)
@volatile private var zooKeeper = new ZooKeeper(connectString, sessionTimeoutMs, ZooKeeperClientWatcher)
newGauge("SessionState", new Gauge[String] {
override def value: String = Option(connectionState.toString).getOrElse("DISCONNECTED")
})
metricNames += "SessionState"
expiryScheduler.startup()
waitUntilConnected(connectionTimeoutMs, TimeUnit.MILLISECONDS)
override def metricName(name: String, metricTags: scala.collection.Map[String, String]): MetricName = {
explicitMetricName(metricGroup, metricType, name, metricTags)
}
/**
* Return the state of the ZooKeeper connection.
*/
def connectionState: States = zooKeeper.getState
/**
* Send a request and wait for its response. See handle(Seq[AsyncRequest]) for details.
*
* @param request a single request to send and wait on.
* @return an instance of the response with the specific type (e.g. CreateRequest -> CreateResponse).
*/
def handleRequest[Req <: AsyncRequest](request: Req): Req#Response = {
handleRequests(Seq(request)).head
}
/**
* Send a pipelined sequence of requests and wait for all of their responses.
*
* The watch flag on each outgoing request will be set if we've already registered a handler for the
* path associated with the request.
*
* @param requests a sequence of requests to send and wait on.
* @return the responses for the requests. If all requests have the same type, the responses will have the respective
* response type (e.g. Seq[CreateRequest] -> Seq[CreateResponse]). Otherwise, the most specific common supertype
* will be used (e.g. Seq[AsyncRequest] -> Seq[AsyncResponse]).
*/
def handleRequests[Req <: AsyncRequest](requests: Seq[Req]): Seq[Req#Response] = {
if (requests.isEmpty)
Seq.empty
else {
val countDownLatch = new CountDownLatch(requests.size)
val responseQueue = new ArrayBlockingQueue[Req#Response](requests.size)
requests.foreach { request =>
inFlightRequests.acquire()
try {
inReadLock(initializationLock) {
send(request) { response =>
responseQueue.add(response)
inFlightRequests.release()
countDownLatch.countDown()
}
}
} catch {
case e: Throwable =>
inFlightRequests.release()
throw e
}
}
countDownLatch.await()
responseQueue.asScala.toBuffer
}
}
// Visibility to override for testing
private[zookeeper] def send[Req <: AsyncRequest](request: Req)(processResponse: Req#Response => Unit): Unit = {
// Safe to cast as we always create a response of the right type
def callback(response: AsyncResponse): Unit = processResponse(response.asInstanceOf[Req#Response])
def responseMetadata(sendTimeMs: Long) = new ResponseMetadata(sendTimeMs, receivedTimeMs = time.hiResClockMs())
val sendTimeMs = time.hiResClockMs()
request match {
case ExistsRequest(path, ctx) =>
zooKeeper.exists(path, shouldWatch(request), new StatCallback {
override def processResult(rc: Int, path: String, ctx: Any, stat: Stat): Unit =
callback(ExistsResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case GetDataRequest(path, ctx) =>
zooKeeper.getData(path, shouldWatch(request), new DataCallback {
override def processResult(rc: Int, path: String, ctx: Any, data: Array[Byte], stat: Stat): Unit =
callback(GetDataResponse(Code.get(rc), path, Option(ctx), data, stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case GetChildrenRequest(path, ctx) =>
zooKeeper.getChildren(path, shouldWatch(request), new Children2Callback {
override def processResult(rc: Int, path: String, ctx: Any, children: java.util.List[String], stat: Stat): Unit =
callback(GetChildrenResponse(Code.get(rc), path, Option(ctx),
Option(children).map(_.asScala).getOrElse(Seq.empty), stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case CreateRequest(path, data, acl, createMode, ctx) =>
zooKeeper.create(path, data, acl.asJava, createMode, new StringCallback {
override def processResult(rc: Int, path: String, ctx: Any, name: String): Unit =
callback(CreateResponse(Code.get(rc), path, Option(ctx), name, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case SetDataRequest(path, data, version, ctx) =>
zooKeeper.setData(path, data, version, new StatCallback {
override def processResult(rc: Int, path: String, ctx: Any, stat: Stat): Unit =
callback(SetDataResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
case DeleteRequest(path, version, ctx) =>
zooKeeper.delete(path, version, new VoidCallback {
override def processResult(rc: Int, path: String, ctx: Any): Unit =
callback(DeleteResponse(Code.get(rc), path, Option(ctx), responseMetadata(sendTimeMs)))
}, ctx.orNull)
case GetAclRequest(path, ctx) =>
zooKeeper.getACL(path, null, new ACLCallback {
override def processResult(rc: Int, path: String, ctx: Any, acl: java.util.List[ACL], stat: Stat): Unit = {
callback(GetAclResponse(Code.get(rc), path, Option(ctx), Option(acl).map(_.asScala).getOrElse(Seq.empty),
stat, responseMetadata(sendTimeMs)))
}}, ctx.orNull)
case SetAclRequest(path, acl, version, ctx) =>
zooKeeper.setACL(path, acl.asJava, version, new StatCallback {
override def processResult(rc: Int, path: String, ctx: Any, stat: Stat): Unit =
callback(SetAclResponse(Code.get(rc), path, Option(ctx), stat, responseMetadata(sendTimeMs)))
}, ctx.orNull)
}
}
/**
* Wait indefinitely until the underlying zookeeper client to reaches the CONNECTED state.
* @throws ZooKeeperClientAuthFailedException if the authentication failed either before or while waiting for connection.
* @throws ZooKeeperClientExpiredException if the session expired either before or while waiting for connection.
*/
def waitUntilConnected(): Unit = inLock(isConnectedOrExpiredLock) {
waitUntilConnected(Long.MaxValue, TimeUnit.MILLISECONDS)
}
private def waitUntilConnected(timeout: Long, timeUnit: TimeUnit): Unit = {
info("Waiting until connected.")
var nanos = timeUnit.toNanos(timeout)
inLock(isConnectedOrExpiredLock) {
var state = connectionState
while (!state.isConnected && state.isAlive) {
if (nanos <= 0) {
throw new ZooKeeperClientTimeoutException(s"Timed out waiting for connection while in state: $state")
}
nanos = isConnectedOrExpiredCondition.awaitNanos(nanos)
state = connectionState
}
if (state == States.AUTH_FAILED) {
throw new ZooKeeperClientAuthFailedException("Auth failed either before or while waiting for connection")
} else if (state == States.CLOSED) {
throw new ZooKeeperClientExpiredException("Session expired either before or while waiting for connection")
}
}
info("Connected.")
}
// If this method is changed, the documentation for registerZNodeChangeHandler and/or registerZNodeChildChangeHandler
// may need to be updated.
private def shouldWatch(request: AsyncRequest): Boolean = request match {
case _: GetChildrenRequest => zNodeChildChangeHandlers.contains(request.path)
case _: ExistsRequest | _: GetDataRequest => zNodeChangeHandlers.contains(request.path)
case _ => throw new IllegalArgumentException(s"Request $request is not watchable")
}
/**
* Register the handler to ZooKeeperClient. This is just a local operation. This does not actually register a watcher.
*
* The watcher is only registered once the user calls handle(AsyncRequest) or handle(Seq[AsyncRequest])
* with either a GetDataRequest or ExistsRequest.
*
* NOTE: zookeeper only allows registration to a nonexistent znode with ExistsRequest.
*
* @param zNodeChangeHandler the handler to register
*/
def registerZNodeChangeHandler(zNodeChangeHandler: ZNodeChangeHandler): Unit = {
zNodeChangeHandlers.put(zNodeChangeHandler.path, zNodeChangeHandler)
}
/**
* Unregister the handler from ZooKeeperClient. This is just a local operation.
* @param path the path of the handler to unregister
*/
def unregisterZNodeChangeHandler(path: String): Unit = {
zNodeChangeHandlers.remove(path)
}
/**
* Register the handler to ZooKeeperClient. This is just a local operation. This does not actually register a watcher.
*
* The watcher is only registered once the user calls handle(AsyncRequest) or handle(Seq[AsyncRequest]) with a GetChildrenRequest.
*
* @param zNodeChildChangeHandler the handler to register
*/
def registerZNodeChildChangeHandler(zNodeChildChangeHandler: ZNodeChildChangeHandler): Unit = {
zNodeChildChangeHandlers.put(zNodeChildChangeHandler.path, zNodeChildChangeHandler)
}
/**
* Unregister the handler from ZooKeeperClient. This is just a local operation.
* @param path the path of the handler to unregister
*/
def unregisterZNodeChildChangeHandler(path: String): Unit = {
zNodeChildChangeHandlers.remove(path)
}
/**
* @param stateChangeHandler
*/
def registerStateChangeHandler(stateChangeHandler: StateChangeHandler): Unit = inReadLock(initializationLock) {
if (stateChangeHandler != null)
stateChangeHandlers.put(stateChangeHandler.name, stateChangeHandler)
}
/**
*
* @param name
*/
def unregisterStateChangeHandler(name: String): Unit = inReadLock(initializationLock) {
stateChangeHandlers.remove(name)
}
def close(): Unit = {
info("Closing.")
inWriteLock(initializationLock) {
zNodeChangeHandlers.clear()
zNodeChildChangeHandlers.clear()
stateChangeHandlers.clear()
zooKeeper.close()
metricNames.foreach(removeMetric(_))
}
// Shutdown scheduler outside of lock to avoid deadlock if scheduler
// is waiting for lock to process session expiry
expiryScheduler.shutdown()
info("Closed.")
}
def sessionId: Long = inReadLock(initializationLock) {
zooKeeper.getSessionId
}
// Only for testing
private[kafka] def currentZooKeeper: ZooKeeper = inReadLock(initializationLock) {
zooKeeper
}
private def reinitialize(): Unit = {
// Initialization callbacks are invoked outside of the lock to avoid deadlock potential since their completion
// may require additional Zookeeper requests, which will block to acquire the initialization lock
stateChangeHandlers.values.foreach(callBeforeInitializingSession _)
inWriteLock(initializationLock) {
if (!connectionState.isAlive) {
zooKeeper.close()
info(s"Initializing a new session to $connectString.")
// retry forever until ZooKeeper can be instantiated
var connected = false
while (!connected) {
try {
zooKeeper = new ZooKeeper(connectString, sessionTimeoutMs, ZooKeeperClientWatcher)
connected = true
} catch {
case e: Exception =>
info("Error when recreating ZooKeeper, retrying after a short sleep", e)
Thread.sleep(1000)
}
}
}
}
stateChangeHandlers.values.foreach(callAfterInitializingSession _)
}
/**
* Close the zookeeper client to force session reinitialization. This is visible for testing only.
*/
private[zookeeper] def forceReinitialize(): Unit = {
zooKeeper.close()
reinitialize()
}
private def callBeforeInitializingSession(handler: StateChangeHandler): Unit = {
try {
handler.beforeInitializingSession()
} catch {
case t: Throwable =>
error(s"Uncaught error in handler ${handler.name}", t)
}
}
private def callAfterInitializingSession(handler: StateChangeHandler): Unit = {
try {
handler.afterInitializingSession()
} catch {
case t: Throwable =>
error(s"Uncaught error in handler ${handler.name}", t)
}
}
// Visibility for testing
private[zookeeper] def scheduleSessionExpiryHandler(): Unit = {
expiryScheduler.scheduleOnce("zk-session-expired", () => {
info("Session expired.")
reinitialize()
})
}
// package level visibility for testing only
private[zookeeper] object ZooKeeperClientWatcher extends Watcher {
override def process(event: WatchedEvent): Unit = {
debug(s"Received event: $event")
Option(event.getPath) match {
case None =>
val state = event.getState
stateToMeterMap.get(state).foreach(_.mark())
inLock(isConnectedOrExpiredLock) {
isConnectedOrExpiredCondition.signalAll()
}
if (state == KeeperState.AuthFailed) {
error("Auth failed.")
stateChangeHandlers.values.foreach(_.onAuthFailure())
} else if (state == KeeperState.Expired) {
scheduleSessionExpiryHandler()
}
case Some(path) =>
(event.getType: @unchecked) match {
case EventType.NodeChildrenChanged => zNodeChildChangeHandlers.get(path).foreach(_.handleChildChange())
case EventType.NodeCreated => zNodeChangeHandlers.get(path).foreach(_.handleCreation())
case EventType.NodeDeleted => zNodeChangeHandlers.get(path).foreach(_.handleDeletion())
case EventType.NodeDataChanged => zNodeChangeHandlers.get(path).foreach(_.handleDataChange())
}
}
}
}
}
trait StateChangeHandler {
val name: String
def beforeInitializingSession(): Unit = {}
def afterInitializingSession(): Unit = {}
def onAuthFailure(): Unit = {}
}
trait ZNodeChangeHandler {
val path: String
def handleCreation(): Unit = {}
def handleDeletion(): Unit = {}
def handleDataChange(): Unit = {}
}
trait ZNodeChildChangeHandler {
val path: String
def handleChildChange(): Unit = {}
}
sealed trait AsyncRequest {
/**
* This type member allows us to define methods that take requests and return responses with the correct types.
* See ``ZooKeeperClient.handleRequests`` for example.
*/
type Response <: AsyncResponse
def path: String
def ctx: Option[Any]
}
case class CreateRequest(path: String, data: Array[Byte], acl: Seq[ACL], createMode: CreateMode,
ctx: Option[Any] = None) extends AsyncRequest {
type Response = CreateResponse
}
case class DeleteRequest(path: String, version: Int, ctx: Option[Any] = None) extends AsyncRequest {
type Response = DeleteResponse
}
case class ExistsRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest {
type Response = ExistsResponse
}
case class GetDataRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest {
type Response = GetDataResponse
}
case class SetDataRequest(path: String, data: Array[Byte], version: Int, ctx: Option[Any] = None) extends AsyncRequest {
type Response = SetDataResponse
}
case class GetAclRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest {
type Response = GetAclResponse
}
case class SetAclRequest(path: String, acl: Seq[ACL], version: Int, ctx: Option[Any] = None) extends AsyncRequest {
type Response = SetAclResponse
}
case class GetChildrenRequest(path: String, ctx: Option[Any] = None) extends AsyncRequest {
type Response = GetChildrenResponse
}
sealed abstract class AsyncResponse {
def resultCode: Code
def path: String
def ctx: Option[Any]
/** Return None if the result code is OK and KeeperException otherwise. */
def resultException: Option[KeeperException] =
if (resultCode == Code.OK) None else Some(KeeperException.create(resultCode, path))
/**
* Throw KeeperException if the result code is not OK.
*/
def maybeThrow(): Unit = {
if (resultCode != Code.OK)
throw KeeperException.create(resultCode, path)
}
def metadata: ResponseMetadata
}
case class ResponseMetadata(sendTimeMs: Long, receivedTimeMs: Long) {
def responseTimeMs: Long = receivedTimeMs - sendTimeMs
}
case class CreateResponse(resultCode: Code, path: String, ctx: Option[Any], name: String, metadata: ResponseMetadata) extends AsyncResponse
case class DeleteResponse(resultCode: Code, path: String, ctx: Option[Any], metadata: ResponseMetadata) extends AsyncResponse
case class ExistsResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat, metadata: ResponseMetadata) extends AsyncResponse
case class GetDataResponse(resultCode: Code, path: String, ctx: Option[Any], data: Array[Byte], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
case class SetDataResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat, metadata: ResponseMetadata) extends AsyncResponse
case class GetAclResponse(resultCode: Code, path: String, ctx: Option[Any], acl: Seq[ACL], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
case class SetAclResponse(resultCode: Code, path: String, ctx: Option[Any], stat: Stat, metadata: ResponseMetadata) extends AsyncResponse
case class GetChildrenResponse(resultCode: Code, path: String, ctx: Option[Any], children: Seq[String], stat: Stat,
metadata: ResponseMetadata) extends AsyncResponse
class ZooKeeperClientException(message: String) extends RuntimeException(message)
class ZooKeeperClientExpiredException(message: String) extends ZooKeeperClientException(message)
class ZooKeeperClientAuthFailedException(message: String) extends ZooKeeperClientException(message)
class ZooKeeperClientTimeoutException(message: String) extends ZooKeeperClientException(message)
| sebadiaz/kafka | core/src/main/scala/kafka/zookeeper/ZooKeeperClient.scala | Scala | apache-2.0 | 21,988 |
package models.util
import org.squeryl.dsl.GroupWithMeasures
import play.api.libs.json.{Json, JsValue}
trait GroupMeasureConversion {
def toMap[A](groupMeasure:List[GroupWithMeasures[A,A]]):Map[A,A] = {
groupMeasure.foldLeft(Map[A,A]()) { (prev,curr) =>
prev + (curr.key -> curr.measures)
}
}
def toJson(map:Map[Long,Long]):JsValue = {
val stringMap = map.foldLeft(Map[String, Long]()) { (prev, curr) =>
prev + (curr._1.toString -> curr._2)
}
Json.toJson(stringMap)
}
def mergeMaps(listOfMaps:List[Map[Long,Long]]):Map[Long,Long] = {
(Map[Long,Long]() /: (for (m <- listOfMaps; kv <- m) yield kv)) { (a, kv) =>
a + ( if(a.contains(kv._1)) kv._1 -> (a(kv._1) + kv._2) else kv)
}
}
}
| haffla/stream-compare | app/models/util/GroupMeasureConversion.scala | Scala | gpl-3.0 | 745 |
/*
Copyright 2016 ScalABM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.economicsl.agora.markets
import org.apache.commons.math3.analysis.solvers.AllowedSolution
/** Classes for modeling auction mechanisms.
*
* Key high-level abstraction: Auction mechanisms combine a matching rule with a pricing rule. Auction mechanisms can
* be either continuous or periodic.
*/
package object auctions {
/** Class implemented a simple configuration object for a non-linear equation solver. */
case class BrentSolverConfig(maxEvaluations: Int,
min: Double,
max: Double,
startValue: Double,
allowedSolution: AllowedSolution,
relativeAccuracy: Double,
absoluteAccuracy: Double,
functionValueAccuracy: Double,
maximalOrder: Int)
}
| EconomicSL/agora | src/main/scala/org/economicsl/agora/markets/auctions/package.scala | Scala | apache-2.0 | 1,473 |
import akka.actor.{Props, ActorSystem}
import akka.testkit.{ImplicitSender, TestKit}
import com.frenchcoder.scalamones.elastic.ElasticJsonProtocol
import com.frenchcoder.scalamones.elastic.Stat.NodesStat
import com.frenchcoder.scalamones.service.KpiProvider
import com.frenchcoder.scalamones.service.KpiProvider.{KpiNotify, KpiMonitor}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import spray.http._
import spray.httpx.SprayJsonSupport
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
class KpiProviderSpec(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
with WordSpecLike with Matchers with BeforeAndAfterAll {
def this() = this(ActorSystem("ManagerSpec"))
val emptyNodeStatJson = "{\\"cluster_name\\":\\"elasticsearch\\",\\"nodes\\":{\\"5thWy2-dRCynqF7J4_Cpqw\\":{\\"timestamp\\":1439738312505,\\"name\\":\\"Harmonica\\",\\"transport_address\\":\\"inet[/172.17.0.1:9300]\\",\\"host\\":\\"1bf4f8e0cc6c\\",\\"ip\\":[\\"inet[/172.17.0.1:9300]\\",\\"NONE\\"]}}}"
val otherNodeStatJson = "{\\"cluster_name\\":\\"other\\",\\"nodes\\":{\\"5thWy2-dRCynqF7J4_Cpqw\\":{\\"timestamp\\":1439738312505,\\"name\\":\\"Harmonica\\",\\"transport_address\\":\\"inet[/172.17.0.1:9300]\\",\\"host\\":\\"1bf4f8e0cc6c\\",\\"ip\\":[\\"inet[/172.17.0.1:9300]\\",\\"NONE\\"]}}}"
var requestCount = 0;
override def afterAll {
TestKit.shutdownActorSystem(system)
}
def sendAndReceive(response:String)(request: HttpRequest) : Future[HttpResponse] = {
Future(HttpResponse(StatusCodes.OK, HttpEntity(ContentTypes.`application/json`, response.getBytes())))
}
def blockingSendAndReceive(response:String)(request: HttpRequest): Future[HttpResponse] = {
requestCount = requestCount + 1
akka.pattern.after[HttpResponse](1.seconds, using = system.scheduler)(
Future(HttpResponse(StatusCodes.OK, HttpEntity(ContentTypes.`application/json`, response.getBytes())))
)
}
"A KpiProvider" must {
"respond to monitor request" in {
import SprayJsonSupport._
import ElasticJsonProtocol._
val provider = system.actorOf(Props(new KpiProvider[NodesStat, NodesStat](sendAndReceive(emptyNodeStatJson), "dummyUrl", (n => n))))
provider ! KpiMonitor(self)
val nodeValue = expectMsgType[KpiNotify[NodesStat]]
}
"not send a request while there is another one pending" in {
import SprayJsonSupport._
import ElasticJsonProtocol._
// Count request
val provider = system.actorOf(Props(new KpiProvider[NodesStat, NodesStat](blockingSendAndReceive(otherNodeStatJson), "dummyUrl", (n => n))))
provider ! KpiMonitor(self)
provider ! KpiMonitor(self)
val nodeValue = expectMsgType[KpiNotify[NodesStat]]
requestCount should be (1)
}
}
}
| jpthomasset/scalamones | src/test/scala-2.11/KpiProviderSpec.scala | Scala | mit | 2,788 |
package org.vitrivr.adampro.data.index.structures.ecp
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, Dataset}
import org.vitrivr.adampro.config.AttributeNames
import org.vitrivr.adampro.data.datatypes.TupleID.TupleID
import org.vitrivr.adampro.data.datatypes.vector.Vector._
import org.vitrivr.adampro.data.index.Index.IndexTypeName
import org.vitrivr.adampro.data.index._
import org.vitrivr.adampro.data.index.structures.IndexTypes
import org.vitrivr.adampro.query.distance.DistanceFunction
import org.vitrivr.adampro.data.datatypes.vector.Vector
import org.vitrivr.adampro.process.SharedComponentContext
import org.vitrivr.adampro.query.tracker.QueryTracker
import scala.util.Random
/**
* adamtwo
*
* Ivan Giangreco
* October 2015
*/
class ECPIndexGenerator(centroidBasedLeaders: Boolean, distance: DistanceFunction, nrefs: Option[Int])(@transient implicit val ac: SharedComponentContext) extends IndexGenerator {
override val indextypename: IndexTypeName = IndexTypes.ECPINDEX
private val MAX_NUM_OF_LEADERS = 200
/**
*
* @param data raw data to index
* @return
*/
override def index(data: DataFrame, attribute : String)(tracker : QueryTracker): (DataFrame, Serializable) = {
log.trace("eCP index started indexing")
val nleaders = math.min(math.max(nrefs.getOrElse(math.sqrt(data.count()).toInt), MINIMUM_NUMBER_OF_TUPLE), MAX_NUM_OF_LEADERS)
val sample = getSample(nleaders, attribute)(data)
val leadersBc = ac.sc.broadcast(sample.zipWithIndex.map { case (vector, idx) => IndexingTaskTuple(idx, vector.ap_indexable) }) //use own ids, not id of data
tracker.addBroadcast(leadersBc)
log.trace("eCP index chosen " + sample.length + " leaders")
val minIdUDF = udf((c: DenseSparkVector) => {
leadersBc.value.map({ l =>
(l.ap_id.toByte, distance.apply(Vector.conv_dspark2vec(c), l.ap_indexable))
}).minBy(_._2)._1
})
val indexed = data.withColumn(AttributeNames.featureIndexColumnName, minIdUDF(data(attribute))).persist()
import ac.spark.implicits._
val leaders = if (centroidBasedLeaders) {
log.trace("eCP index updating leaders, make centroid-based")
indexed.map(r => (r.getAs[Int](AttributeNames.internalIdColumnName), r.getAs[DenseSparkVector](attribute)))
.groupByKey(_._1)
.mapGroups {
case (key, values) => {
val tmp = values.toArray.map(x => (x._2, 1))
.reduce[(DenseSparkVector, Int)] { case (x1, x2) => (x1._1.zip(x2._1).map { case (xx1, xx2) => xx1 + xx2 }, x1._2 + x2._2) }
val count = tmp._2
val centroid = tmp._1.map(x => x / tmp._2.toFloat)
key ->(centroid, count)
}
}
.map(x => ECPLeader(x._1, Vector.conv_draw2vec(x._2._1), x._2._2))
.collect.toSeq
} else {
val counts = indexed.stat.countMinSketch(col(AttributeNames.featureIndexColumnName), nleaders, nleaders, Random.nextInt)
leadersBc.value.map(x => ECPLeader(x.ap_id, x.ap_indexable, counts.estimateCount(x.ap_id.toInt)))
}
val meta = ECPIndexMetaData(leaders, distance)
(indexed, meta)
}
}
class ECPIndexGeneratorFactory extends IndexGeneratorFactory {
/**
* @param distance distance function
* @param properties indexing properties
*/
def getIndexGenerator(distance: DistanceFunction, properties: Map[String, String] = Map[String, String]())(implicit ac: SharedComponentContext): IndexGenerator = {
val trainingSize = properties.get("ntraining").map(_.toInt)
val nrefs = if (properties.contains("ntraining")) {
Some(properties.get("ntraining").get.toInt)
} else if (properties.contains("n")) {
Some(math.sqrt(properties.get("n").get.toInt).toInt)
} else {
None
}
val leaderTypeDescription = properties.getOrElse("leadertype", "simple")
val leaderType = leaderTypeDescription.toLowerCase match {
//possibly extend with other types and introduce enum
case "centroid" => true
case "simple" => false
}
new ECPIndexGenerator(leaderType, distance, nrefs)
}
/**
*
* @return
*/
override def parametersInfo: Seq[ParameterInfo] = Seq(
new ParameterInfo("ntraining", "number of training tuples", Seq[String]()),
new ParameterInfo("leadertype", "choosing existing leader or recomputing centroid of cluster", Seq("simple", "centroid"))
)
} | dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/data/index/structures/ecp/ECPIndexGenerator.scala | Scala | mit | 4,434 |
object Test extends dotty.runtime.LegacyApp {
println(raw"[\\n\\t'${'"'}$$\\n]")
}
| folone/dotty | tests/run/rawstrings.scala | Scala | bsd-3-clause | 82 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.batchpools
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.io.graph._
import scala.Seq
import scala.collection._
import scala.util.hashing._
/**
* Wraps a try-catch block around the source and tries to step over issues.
*
* Use this for data from unstable sources.
*/
final class RetryOnException(override val builder: RetryOnExceptionBuilder,
override val seed: InstanceSeed,
override val source: BatchPool)
extends Prefetcher[RetryOnExceptionBuilder] {
val noRetries
: Int = builder.noRetries
override val outputHints
: BuildHints = inputHints
override def draw()
: BatchPoolDrawContext = {
var i = 0
do {
try {
return source.draw()
}
catch {
case e: Exception =>
logger.error(s"FailSafeWrapper.current exception caught -> $e")
System.gc()
System.runFinalization()
i += 1
}
} while(i <= noRetries)
throw new UnknownError
}
}
final class RetryOnExceptionBuilder
extends PrefetcherBuilder[RetryOnExceptionBuilder] {
override def repr
: RetryOnExceptionBuilder = this
private var _noRetries
: Int = 10
def noRetries
: Int = _noRetries
def noRetries_=(value: Int): Unit = {
require(value >= 0)
_noRetries = value
}
def setNoRetries(value: Int)
: RetryOnExceptionBuilder = {
noRetries_=(value)
this
}
override protected def doToString()
: List[Any] = _noRetries :: super.doToString()
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), _noRetries.hashCode())
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[RetryOnExceptionBuilder]
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: RetryOnExceptionBuilder =>
_noRetries == other._noRetries
case _ =>
false
})
override protected def doCopy()
: RetryOnExceptionBuilder = RetryOnExceptionBuilder()
override def copyTo(other: InstanceBuilder)
: Unit = {
super.copyTo(other)
other match {
case other: RetryOnExceptionBuilder =>
other._noRetries = _noRetries
case _ =>
}
}
// ---------------------------------------------------------------------------
// Record set construction
// --------------------------------------------------------------------------
override def doBuild(source: BatchPool,
seed: InstanceSeed)
: RetryOnException = new RetryOnException(this, seed, source)
}
object RetryOnExceptionBuilder {
final def apply()
: RetryOnExceptionBuilder = new RetryOnExceptionBuilder
final def apply(source: BatchPoolBuilder)
: RetryOnExceptionBuilder = apply().setSource(source)
} | bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/batchpools/RetryOnException.scala | Scala | apache-2.0 | 3,518 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.table.functions.BuiltInFunctionDefinitions
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.functions.bridging.BridgingSqlFunction
import org.apache.flink.table.planner.utils.ShortcutUtils
import org.apache.flink.table.runtime.functions.table.UnnestRowsFunction
import org.apache.flink.table.types.logical.utils.LogicalTypeUtils.toRowType
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptRule._
import org.apache.calcite.plan.hep.HepRelVertex
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelOptRuleOperand}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.core.Uncollect
import org.apache.calcite.rel.logical._
import java.util.Collections
/**
* Planner rule that rewrites UNNEST to explode function.
*
* Note: This class can only be used in HepPlanner.
*/
class LogicalUnnestRule(
operand: RelOptRuleOperand,
description: String)
extends RelOptRule(operand, description) {
override def matches(call: RelOptRuleCall): Boolean = {
val join: LogicalCorrelate = call.rel(0)
val right = getRel(join.getRight)
right match {
// a filter is pushed above the table function
case filter: LogicalFilter =>
getRel(filter.getInput) match {
case u: Uncollect => !u.withOrdinality
case p: LogicalProject => getRel(p.getInput) match {
case u: Uncollect => !u.withOrdinality
case _ => false
}
case _ => false
}
case project: LogicalProject =>
getRel(project.getInput) match {
case u: Uncollect => !u.withOrdinality
case _ => false
}
case u: Uncollect => !u.withOrdinality
case _ => false
}
}
override def onMatch(call: RelOptRuleCall): Unit = {
val correlate: LogicalCorrelate = call.rel(0)
val outer = getRel(correlate.getLeft)
val array = getRel(correlate.getRight)
def convert(relNode: RelNode): RelNode = {
relNode match {
case rs: HepRelVertex =>
convert(getRel(rs))
case f: LogicalProject =>
f.copy(f.getTraitSet, ImmutableList.of(convert(getRel(f.getInput))))
case f: LogicalFilter =>
f.copy(f.getTraitSet, ImmutableList.of(convert(getRel(f.getInput))))
case uc: Uncollect =>
// convert Uncollect into TableFunctionScan
val cluster = correlate.getCluster
val typeFactory = ShortcutUtils.unwrapTypeFactory(cluster)
val relDataType = uc.getInput.getRowType.getFieldList.get(0).getValue
val logicalType = FlinkTypeFactory.toLogicalType(relDataType)
val sqlFunction = BridgingSqlFunction.of(
cluster,
BuiltInFunctionDefinitions.INTERNAL_UNNEST_ROWS)
val rexCall = cluster.getRexBuilder.makeCall(
typeFactory.createFieldTypeFromLogicalType(
toRowType(UnnestRowsFunction.getUnnestedType(logicalType))),
sqlFunction,
getRel(uc.getInput).asInstanceOf[LogicalProject].getProjects)
new LogicalTableFunctionScan(
cluster,
correlate.getTraitSet,
Collections.emptyList(),
rexCall,
null,
rexCall.getType,
null)
}
}
// convert unnest into table function scan
val tableFunctionScan = convert(array)
// create correlate with table function scan as input
val newCorrelate =
correlate.copy(correlate.getTraitSet, ImmutableList.of(outer, tableFunctionScan))
call.transformTo(newCorrelate)
}
private def getRel(rel: RelNode): RelNode = {
rel match {
case vertex: HepRelVertex => vertex.getCurrentRel
case _ => rel
}
}
}
object LogicalUnnestRule {
val INSTANCE = new LogicalUnnestRule(
operand(classOf[LogicalCorrelate], any),
"LogicalUnnestRule")
}
| godfreyhe/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/LogicalUnnestRule.scala | Scala | apache-2.0 | 4,826 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.rx
import wvlet.airspec.AirSpec
/**
*/
class RxOptionTest extends AirSpec {
test("eval") {
val opt = Rx.option(Some("world"))
val v = opt.map(x => s"hello ${x}")
v.run(x => x shouldBe Some("hello world"))
}
test("none") {
val opt = Rx.none
val v = opt.map(x => s"hello ${x}")
v.run(x => x shouldBe empty)
}
test("filter true") {
val opt = Rx.option(Some("world"))
val v = opt.filter(_.startsWith("world")).map(x => s"hello ${x}")
v.run(x => x shouldBe Some("hello world"))
}
test("filter false") {
val opt = Rx.option(Some("world"))
val v = opt.filter(_.startsWith("xxx")).map(x => s"hello ${x}")
v.run(_ shouldBe empty)
}
test("flatMap") {
val opt = Rx.option(Some("hello"))
val v = opt.flatMap(x => Rx.const(s"hello ${x}"))
v.run(_ shouldBe Some("hello hello"))
}
test("for-comprehension") {
val a = for (x <- Rx.option(Some("hello"))) yield {
x + " world"
}
a.run(_ shouldBe Some("hello world"))
}
test("toOption") {
val opt = Rx.const(Some("hello")).toOption
val a = opt.map(x => s"${x} option")
a.run(_ shouldBe Some("hello option"))
}
test("option variable") {
val v = Rx.optionVariable(Some("hello"))
val o = v.map { x =>
s"${x} world"
}
o.run(_ shouldBe Some("hello world"))
}
test("eval option variable") {
val v = Rx.optionVariable(Some("hello"))
v.run(_ shouldBe Some("hello"))
}
test("set option variable") {
val v = Rx.optionVariable(Some("hello"))
val o = v.map { x =>
s"${x} world"
}
v.set(Some("good morning"))
o.run(_ shouldBe Some("good morning world"))
// We need to cancel the run to unregister the subscription
.cancel
v.set(None)
o.run(_ shouldBe empty)
}
test("convert RxVar to RxOptionVar") {
val v = Rx.variable(Some("hello")).toOption
val o = v.map { x =>
s"${x} world"
}
o.run(_ shouldBe Some("hello world")).cancel
v := None
o.run(_ shouldBe empty).cancel
v := Some("good morning")
o.run(_ shouldBe Some("good morning world")).cancel
}
test("getOrElse") {
val opt = Rx.option(Some("hello"))
opt.getOrElse("world").run(_ shouldBe "hello")
}
test("getOrElse None") {
val opt = Rx.none
opt.getOrElse("world").run(_ shouldBe "world")
}
test("orElse") {
val opt = Rx.option(Some("hello"))
opt.orElse(Some("world")).run(_ shouldBe Some("hello"))
}
test("orElse None") {
val opt = Rx.none
opt.orElse(Some("world")).run(_ shouldBe Some("world"))
}
test("transform") {
val opt = Rx.option(Some("hello"))
opt
.transform {
case Some(x) => x
case None => "world"
}.run(_ shouldBe "hello")
}
test("transform None") {
val opt = Rx.none
opt
.transform {
case Some(x) => x
case None => "world"
}.run(_ shouldBe "world")
}
test("transformOption") {
val opt = Rx.option(Some("hello"))
opt
.transformOption {
case Some(x) => Some(x)
case None => Some("world")
}
.run(_ shouldBe Some("hello"))
}
test("transformOption None") {
val opt = Rx.none
opt
.transformOption {
case Some(x) => Some(x)
case None => Some("world")
}
.run(_ shouldBe Some("world"))
}
}
| wvlet/airframe | airframe-rx/src/test/scala/wvlet/airframe/rx/RxOptionTest.scala | Scala | apache-2.0 | 3,967 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package toplevel
package typedef
import com.intellij.lang.ASTNode
import com.intellij.lang.java.lexer.JavaLexer
import com.intellij.openapi.project.{DumbService, DumbServiceImpl, Project}
import com.intellij.openapi.util.TextRange
import com.intellij.psi._
import com.intellij.psi.impl.light.LightField
import com.intellij.psi.scope.PsiScopeProcessor
import com.intellij.psi.stubs.StubElement
import com.intellij.psi.tree.IElementType
import com.intellij.psi.util.{PsiModificationTracker, PsiUtil}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.typedef.TypeDefinitionMembers.SignatureNodes
import org.jetbrains.plugins.scala.lang.psi.light.{EmptyPrivateConstructor, PsiClassWrapper}
import org.jetbrains.plugins.scala.lang.psi.stubs.ScTemplateDefinitionStub
import org.jetbrains.plugins.scala.lang.psi.types.{ScSubstitutor, PhysicalSignature}
import org.jetbrains.plugins.scala.lang.resolve.ResolveUtils
import org.jetbrains.plugins.scala.lang.resolve.processor.BaseProcessor
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
/**
* @author Alexander Podkhalyuzin
* Date: 20.02.2008
*/
class ScObjectImpl protected (stub: StubElement[ScTemplateDefinition], nodeType: IElementType, node: ASTNode)
extends ScTypeDefinitionImpl(stub, nodeType, node) with ScObject with ScTemplateDefinition {
override def additionalJavaNames: Array[String] = {
fakeCompanionClass match {
case Some(c) => Array(c.getName)
case _ => Array.empty
}
}
override def getNavigationElement: PsiElement = {
if (isSyntheticObject) {
ScalaPsiUtil.getCompanionModule(this) match {
case Some(clazz) => return clazz.getNavigationElement
case _ =>
}
}
super.getNavigationElement
}
override def getContainingFile: PsiFile = {
if (isSyntheticObject) {
ScalaPsiUtil.getCompanionModule(this) match {
case Some(clazz) => return clazz.getContainingFile
case _ =>
}
}
super.getContainingFile
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case visitor: ScalaElementVisitor => super.accept(visitor)
case _ => super.accept(visitor)
}
}
def this(node: ASTNode) = {this(null, null, node)}
def this(stub: ScTemplateDefinitionStub) = {this(stub, ScalaElementTypes.OBJECT_DEF, null)}
override def toString: String = (if (isPackageObject) "ScPackageObject: " else "ScObject: ") + name
override def getIconInner = if (isPackageObject) Icons.PACKAGE_OBJECT else Icons.OBJECT
override def getName: String = {
if (isPackageObject) return "package$"
super.getName + "$"
}
override def hasModifierProperty(name: String): Boolean = {
if (name == "final") return true
super[ScTypeDefinitionImpl].hasModifierProperty(name)
}
override def isObject : Boolean = true
override def isPackageObject: Boolean = {
val stub = getStub
if (stub != null) {
stub.asInstanceOf[ScTemplateDefinitionStub].isPackageObject
} else findChildByType[PsiElement](ScalaTokenTypes.kPACKAGE) != null || name == "`package`"
}
def hasPackageKeyword: Boolean = findChildByType[PsiElement](ScalaTokenTypes.kPACKAGE) != null
override def isCase = hasModifierProperty("case")
override def processDeclarationsForTemplateBody(processor: PsiScopeProcessor,
state: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
if (DumbService.getInstance(getProject).isDumb) return true
if (!super[ScTemplateDefinition].processDeclarationsForTemplateBody(processor, state, lastParent, place)) return false
if (isPackageObject && name != "`package`") {
val newState = state.put(BaseProcessor.FROM_TYPE_KEY, null)
val qual = qualifiedName
val facade = JavaPsiFacade.getInstance(getProject)
val pack = facade.findPackage(qual) //do not wrap into ScPackage to avoid SOE
if (pack != null && !ResolveUtils.packageProcessDeclarations(pack, processor, newState, lastParent, place))
return false
}
true
}
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement,
place: PsiElement): Boolean = {
if (isPackageObject) {
import org.jetbrains.plugins.scala.lang.psi.impl.ScPackageImpl._
startPackageObjectProcessing()
try {
super[ScTemplateDefinition].processDeclarations(processor, state, lastParent, place)
} catch {
case ignore: DoNotProcessPackageObjectException => true //do nothing, just let's move on
} finally {
stopPackageObjectProcessing()
}
} else {
super[ScTemplateDefinition].processDeclarations(processor, state, lastParent, place)
}
}
override protected def syntheticMethodsWithOverrideImpl: Seq[PsiMethod] = {
if (isSyntheticObject) Seq.empty
else ScalaPsiUtil.getCompanionModule(this) match {
case Some(c: ScClass) if c.isCase =>
val res = new ArrayBuffer[PsiMethod]
c.getSyntheticMethodsText.foreach(s => {
try {
val method = ScalaPsiElementFactory.createMethodWithContext(s, c.getContext, c)
method.setSynthetic(this)
method.syntheticCaseClass = Some(c)
res += method
}
catch {
case e: Exception => //do not add methods with wrong signature
}
})
res.toSeq
case _ => Seq.empty
}
}
override protected def syntheticMethodsNoOverrideImpl: Seq[PsiMethod] = SyntheticMembersInjector.inject(this)
def fakeCompanionClass: Option[PsiClass] = {
ScalaPsiUtil.getCompanionModule(this) match {
case Some(module) => None
case None => Some(new PsiClassWrapper(this, getQualifiedName.substring(0, getQualifiedName.length() - 1),
getName.substring(0, getName.length() - 1)))
}
}
def fakeCompanionClassOrCompanionClass: PsiClass = {
fakeCompanionClass match {
case Some(clazz) => clazz
case _ =>
ScalaPsiUtil.getCompanionModule(this).get
}
}
@volatile
private var moduleField: Option[PsiField] = null
@volatile
private var moduleFieldModCount: Long = 0L
private def getModuleField: Option[PsiField] = {
val count = getManager.getModificationTracker.getOutOfCodeBlockModificationCount
if (moduleField != null && moduleFieldModCount == count) return moduleField
val fieldOption =
if (getQualifiedName.split('.').exists(JavaLexer.isKeyword(_, PsiUtil.getLanguageLevel(this)))) None else {
val field: LightField = new LightField(getManager, JavaPsiFacade.getInstance(getProject).getElementFactory.createFieldFromText(
"public final static " + getQualifiedName + " MODULE$", this
), this)
field.setNavigationElement(this)
Some(field)
}
moduleField = fieldOption
moduleFieldModCount = count
fieldOption
}
override def getFields: Array[PsiField] = {
getModuleField.toArray
}
override def findFieldByName(name: String, checkBases: Boolean): PsiField = {
name match {
case "MODULE$" => getModuleField.orNull
case _ => null
}
}
override def getInnerClasses: Array[PsiClass] = Array.empty
override def getMethods: Array[PsiMethod] = {
getAllMethods.filter(_.containingClass == this)
}
override def getAllMethods: Array[PsiMethod] = {
val res = new ArrayBuffer[PsiMethod]()
res ++= getConstructors
TypeDefinitionMembers.SignatureNodes.forAllSignatureNodes(this) { node =>
val isInterface = node.info.namedElement match {
case t: ScTypedDefinition if t.isAbstractMember => true
case _ => false
}
this.processPsiMethodsForNode(node, isStatic = false, isInterface = isInterface)(res += _)
}
for (synthetic <- syntheticMethodsNoOverride) {
this.processPsiMethodsForNode(new SignatureNodes.Node(new PhysicalSignature(synthetic, ScSubstitutor.empty),
ScSubstitutor.empty),
isStatic = false, isInterface = isInterface)(res += _)
}
res.toArray
}
@volatile
private var emptyObjectConstructor: EmptyPrivateConstructor = null
@volatile
private var emptyObjectConstructorModCount: Long = 0L
override def getConstructors: Array[PsiMethod] = {
val curModCount = getManager.getModificationTracker.getOutOfCodeBlockModificationCount
if (emptyObjectConstructor != null && emptyObjectConstructorModCount == curModCount) {
return Array(emptyObjectConstructor)
}
val res = new EmptyPrivateConstructor(this)
emptyObjectConstructorModCount = curModCount
emptyObjectConstructor = res
Array(res)
}
override def isPhysical: Boolean = {
if (isSyntheticObject) false
else super.isPhysical
}
override def getTextRange: TextRange = {
if (isSyntheticObject) null
else super.getTextRange
}
override def getInterfaces: Array[PsiClass] = {
getSupers.filter(_.isInterface)
}
private val hardParameterlessSignatures: mutable.WeakHashMap[Project, TypeDefinitionMembers.ParameterlessNodes.Map] =
new mutable.WeakHashMap[Project, TypeDefinitionMembers.ParameterlessNodes.Map]
def getHardParameterlessSignatures: TypeDefinitionMembers.ParameterlessNodes.Map = {
hardParameterlessSignatures.getOrElseUpdate(getProject, TypeDefinitionMembers.ParameterlessNodes.build(this))
}
private val hardTypes: mutable.WeakHashMap[Project, TypeDefinitionMembers.TypeNodes.Map] =
new mutable.WeakHashMap[Project, TypeDefinitionMembers.TypeNodes.Map]
def getHardTypes: TypeDefinitionMembers.TypeNodes.Map = {
hardTypes.getOrElseUpdate(getProject, TypeDefinitionMembers.TypeNodes.build(this))
}
private val hardSignatures: mutable.WeakHashMap[Project, TypeDefinitionMembers.SignatureNodes.Map] =
new mutable.WeakHashMap[Project, TypeDefinitionMembers.SignatureNodes.Map]
def getHardSignatures: TypeDefinitionMembers.SignatureNodes.Map = {
hardSignatures.getOrElseUpdate(getProject, TypeDefinitionMembers.SignatureNodes.build(this))
}
}
| SergeevPavel/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/typedef/ScObjectImpl.scala | Scala | apache-2.0 | 10,669 |
package edu.duke.oit.vw.models
import edu.duke.oit.vw.utils._
import edu.duke.oit.vw.solr.Vivo
import java.util.Date
import java.text.SimpleDateFormat
case class Gift(uri:String,
vivoType: String,
label: String,
attributes:Option[Map[String, String]])
extends VivoAttributes(uri, vivoType, label, attributes) with AddToJson
{
override def uris():List[String] = {
uri :: super.uris
}
}
object Gift extends AttributeParams {
def fromUri(vivo: Vivo, uriContext:Map[String, Any]) = {
val data = vivo.selectFromTemplate("sparql/gifts.ssp", uriContext)
val existingData = data.filter(datum => !datum.isEmpty)
existingData.map(build(_)).asInstanceOf[List[Gift]]
}
def build(gift:Map[Symbol,String]) = {
new Gift(uri = gift('gift).stripBrackets(),
vivoType = gift('type).stripBrackets(),
label = gift('label),
attributes = parseAttributes(gift, List('gift,'type,'label)))
}
}
| OIT-ADS-Web/vivo_widgets | src/main/scala/models/Gift.scala | Scala | bsd-3-clause | 1,038 |
package org.refptr.iscala
package tests
import org.specs2.mutable.Specification
class InterpreterSpec extends Specification with InterpreterUtil {
sequential
"Interpreter" should {
import Results._
val Obj = "^([^@]+)@([0-9a-fA-F]+)$".r
"support primitive values" in {
interpret("1") must beLike { case NoOutput(Value(1, "Int", Plain("1"))) => ok }
interpret("1.0") must beLike { case NoOutput(Value(1.0, "Double", Plain("1.0"))) => ok }
interpret("true") must beLike { case NoOutput(Value(true, "Boolean", Plain("true"))) => ok }
interpret("false") must beLike { case NoOutput(Value(false, "Boolean", Plain("false"))) => ok }
interpret("\"XXX\"") must beLike { case NoOutput(Value("XXX", "String", Plain("XXX"))) => ok }
}
"support function values" in {
interpret("() => 1") must beLike { case NoOutput(Value(_, "() => Int", Plain("<function0>"))) => ok }
interpret("(x: Int) => x + 1") must beLike { case NoOutput(Value(_, "Int => Int", Plain("<function1>"))) => ok }
interpret("(x: Int, y: Int) => x*y + 1") must beLike { case NoOutput(Value(_, "(Int, Int) => Int", Plain("<function2>"))) => ok }
}
"support container values" in {
interpret("List(1, 2, 3)") must beLike { case NoOutput(Value(_, "List[Int]", Plain("List(1, 2, 3)"))) => ok }
interpret("Array(1, 2, 3)") must beLike { case NoOutput(Value(_, "Array[Int]", Plain("Array(1, 2, 3)"))) => ok }
}
"support null values" in {
interpret("null") must beLike { case NoOutput(Value(null, "Null", Plain("null"))) => ok }
interpret("null: String") must beLike { case NoOutput(Value(null, "String", Plain("null"))) => ok }
}
"support unit value" in {
interpret("()") must beLike { case NoOutput(NoValue) => ok }
}
"support imports" in {
interpret("import scala.xml") must beLike { case NoOutput(NoValue) => ok }
}
"support printing" in {
interpret("println(\"XXX\")") must beLike {
case Output(NoValue, _, "") => ok // TODO: "XXX\n"
}
interpret("print(\"XXX\")") must beLike {
case Output(NoValue, _, "") => ok // TODO: "XXX"
}
interpret("System.out.println(\"XXX\")") must beLike {
case Output(NoValue, _, "") => ok // TODO: "XXX\n"
}
interpret("System.out.print(\"XXX\")") must beLike {
case Output(NoValue, _, "") => ok // TODO: "XXX"
}
}
"support long running code" in {
interpret("(1 to 5).foreach { i => println(i); Thread.sleep(1000) }") must beLike {
case Output(NoValue, _, "") => ok // TODO: "1\n2\n3\n4\n5\n"
}
}
"support arithmetics" in {
interpret("1 + 2 + 3") must beLike { case NoOutput(Value(_, "Int", Plain("6"))) => ok }
}
"support defining values" in {
interpret("val x = 1") must beLike { case NoOutput(Value(1, "Int", Plain("1"))) => ok }
interpret("x") must beLike { case NoOutput(Value(1, "Int", Plain("1"))) => ok }
interpret("100*x + 17") must beLike { case NoOutput(Value(117, "Int", Plain("117"))) => ok }
}
"support defining variables" in {
interpret("var y = 1") must beLike { case NoOutput(Value(1, "Int", Plain("1"))) => ok }
interpret("y") must beLike { case NoOutput(Value(1, "Int", Plain("1"))) => ok }
interpret("100*y + 17") must beLike { case NoOutput(Value(117, "Int", Plain("117"))) => ok }
interpret("y = 2") must beLike { case NoOutput(Value(2, "Int", Plain("2"))) => ok }
interpret("100*y + 17") must beLike { case NoOutput(Value(217, "Int", Plain("217"))) => ok }
}
"support defining lazy values" in {
interpret("var initialized = false") must beLike { case NoOutput(Value(false, "Boolean", _)) => ok }
interpret("lazy val z = { initialized = true; 1 }") must beLike { case NoOutput(NoValue) => ok }
interpret("initialized") must beLike { case NoOutput(Value(false, "Boolean", _)) => ok }
interpret("z + 1") must beLike { case NoOutput(Value(_, "Int", _)) => ok }
interpret("initialized") must beLike { case NoOutput(Value(true, "Boolean", _)) => ok }
}
"support defining classes" in {
interpret("class Foo(a: Int) { def bar(b: String) = b*a }") must beLike {
case NoOutput(NoValue) => ok
}
interpret("val foo = new Foo(5)") must beLike {
case NoOutput(Value(_, "Foo", _)) => ok
}
interpret("foo.bar(\"xyz\")") must beLike {
case NoOutput(Value(_, "String", Plain("xyzxyzxyzxyzxyz"))) => ok
}
}
"support exceptions" in {
interpret("1/0") must beLike {
case NoOutput(Exception("java.lang.ArithmeticException", "/ by zero", _, exc: ArithmeticException)) => ok
}
interpret("java.util.UUID.fromString(\"xyz\")") must beLike {
case NoOutput(Exception("java.lang.IllegalArgumentException", "Invalid UUID string: xyz", _, exc: IllegalArgumentException)) => ok
}
interpret("throw new java.lang.IllegalArgumentException") must beLike {
case NoOutput(Exception("java.lang.IllegalArgumentException", "", _, exc: IllegalArgumentException)) => ok
}
interpret("throw new java.lang.IllegalArgumentException(\"custom message\")") must beLike {
case NoOutput(Exception("java.lang.IllegalArgumentException", "custom message", _, exc: IllegalArgumentException)) => ok
}
interpret("throw new java.lang.IllegalArgumentException(foo.getClass.getName)") must beLike {
case NoOutput(Exception("java.lang.IllegalArgumentException", "Foo", _, exc: IllegalArgumentException)) => ok
}
}
"support custom exceptions" in {
interpret("class MyException(x: Int) extends Exception(s\"failed with $x\")") must beLike {
case NoOutput(NoValue) => ok
}
interpret("throw new MyException(117)") must beLike {
case NoOutput(Exception("MyException", "failed with 117", _, _)) => ok
}
}
"support value patterns" in {
interpret("""val obj = "^([^@]+)@([0-9a-fA-F]+)$".r""") must beLike {
case NoOutput(Value(_, "scala.util.matching.Regex", Plain("^([^@]+)@([0-9a-fA-F]+)$"))) => ok
}
interpret("""val obj(name, hash) = "Macros$@88a4ee1"""") must beLike {
case NoOutput(Value(_, "String", Plain("88a4ee1"))) => ok
}
interpret("name") must beLike {
case NoOutput(Value(_, "String", Plain("Macros$"))) => ok
}
interpret("hash") must beLike {
case NoOutput(Value(_, "String", Plain("88a4ee1"))) => ok
}
interpret("""val obj(name, hash) = "Macros$@88a4ee1x"""") must beLike {
case NoOutput(Exception("scala.MatchError", "Macros$@88a4ee1x (of class java.lang.String)", _, exc: scala.MatchError)) => ok
}
}
"support macros" in {
interpret("""
import scala.language.experimental.macros
import scala.reflect.macros.Context
object Macros {
def membersImpl[A: c.WeakTypeTag](c: Context): c.Expr[List[String]] = {
import c.universe._
val tpe = weakTypeOf[A]
val members = tpe.declarations.map(_.name.decoded).toList.distinct
val literals = members.map(member => Literal(Constant(member)))
c.Expr[List[String]](Apply(reify(List).tree, literals))
}
def members[A] = macro membersImpl[A]
}
""") must beLike {
case NoOutput(Value(_, "Macros.type", Plain(Obj("Macros$", _)))) => ok
}
val plain = "List(<init>, toByte, toShort, toChar, toInt, toLong, toFloat, toDouble, unary_~, unary_+, unary_-, +, <<, >>>, >>, ==, !=, <, <=, >, >=, |, &, ^, -, *, /, %, getClass)"
interpret("Macros.members[Int]") must beLike {
case NoOutput(Value(_, "List[String]", Plain(plain))) => ok
}
}
"support display framework" in {
interpret("Nil") must beLike {
case NoOutput(Value(_, "scala.collection.immutable.Nil.type", Plain("List()"))) => ok
}
interpret("implicit val PlainNil = org.refptr.iscala.display.PlainDisplay[Nil.type](_ => \"Nil\")") must beLike {
case NoOutput(Value(_, "org.refptr.iscala.display.PlainDisplay[scala.collection.immutable.Nil.type]", _)) => ok
}
interpret("Nil") must beLike {
case NoOutput(Value(_, "scala.collection.immutable.Nil.type", Plain("Nil"))) => ok
}
interpret("implicit val PlainNil = org.refptr.iscala.display.PlainDisplay[Nil.type](_ => \"NIL\")") must beLike {
case NoOutput(Value(_, "org.refptr.iscala.display.PlainDisplay[scala.collection.immutable.Nil.type]", _)) => ok
}
interpret("Nil") must beLike {
case NoOutput(Value(_, "scala.collection.immutable.Nil.type", Plain("NIL"))) => ok
}
interpret("implicit val PlainNil = org.refptr.iscala.display.PlainDisplay[Nothing](obj => ???)") must beLike {
case NoOutput(Value(_, "org.refptr.iscala.display.PlainDisplay[Nothing]", _)) => ok
}
interpret("Nil") must beLike {
case NoOutput(Value(_, "scala.collection.immutable.Nil.type", Plain("List()"))) => ok
}
}
"support empty input" in {
interpret("") must beLike {
case NoOutput(NoValue) => ok
}
interpret(" ") must beLike {
case NoOutput(Incomplete) => ok
}
}
"support typeInfo()" in {
intp.typeInfo("val a =") === None
intp.typeInfo("val a = 5") === Some("Int")
intp.typeInfo("val a = 5; val b = \"foo\"") === Some("Int <and> String")
}
"support completions()" in {
intp.completions("1.") === List(
"%", "&", "*", "+", "-", "/", ">", ">=", ">>", ">>>", "^",
"asInstanceOf", "isInstanceOf",
"toByte", "toChar", "toDouble", "toFloat", "toInt", "toLong", "toShort", "toString",
"unary_+", "unary_-", "unary_~",
"|")
intp.completions("1.to") === List("toByte", "toChar", "toDouble", "toFloat", "toInt", "toLong", "toShort", "toString")
intp.completions("1.toC") === List("toChar")
intp.completions("1.toCx") === Nil
intp.completions("List(1).") === Nil
}
}
}
| nkhuyu/IScala | src/test/scala/Interpreter.scala | Scala | mit | 11,398 |
/*
* Copyright (c) 2013 Scott Abernethy.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package controllers
import play.api.mvc.{Action, Controller}
import play.api.libs.json.{JsBoolean, JsString, JsValue, Json}
import util.{DatePresentation, Permission}
import model.{PresenceState, Environment, Gateway}
import concurrent.Future
import util.Context.playDefault
import gate.{T, Lock, Unlock, RetryAsap}
import play.api.Logger
import state.ChangedGateway
object Gateways extends Controller with Permission {
lazy val stream = Environment.actorSystem.actorFor("/user/StateStream")
lazy val watcher = Environment.actorSystem.actorFor("/user/Watcher")
def list = PermittedAction { request =>
val futureGs = Future( Gateway.forCultist(request.cultistId) )(util.Context.dbOperations)
import util.Context.playDefault
Async {
futureGs.map(gs => Ok(Json.toJson(gs.map(_.toJson))))
}
}
def add = PermittedAction(parse.json) { request =>
val json: JsValue = request.body
(json \\ "uri", json \\ "path", json \\ "password", json \\ "mode" \\ "source", json \\ "mode" \\ "sink") match {
case (JsString(uri), JsString(path), JsString(password), JsBoolean(source), JsBoolean(sink)) => {
val gateway = new Gateway
gateway.cultistId = request.cultistId
gateway.location = uri.trim
gateway.path = path.trim
gateway.password = password.trim
gateway.source = source
gateway.sink = sink
Gateway.save(gateway)
stream ! ChangedGateway(gateway.id, request.cultistId)
Ok("Ok")
}
case _ => {
BadRequest
}
}
}
def get(id: Long) = PermittedAction { request =>
val fGateway = Future( Gateway.find(id) )(util.Context.dbOperations)
import util.Context.playDefault
Async {
fGateway.map{
case Some(g) if (g.cultistId == request.cultistId) => Ok(g.toJson)
case _ => BadRequest
}
}
}
def update(id: Long) = PermittedAction(parse.json) { request =>
val json: JsValue = request.body
(Gateway.find(id), json \\ "uri", json \\ "path", json \\ "password", json \\ "mode" \\ "source", json \\ "mode" \\ "sink") match {
case (Some(g), JsString(uri), JsString(path), passwordJs, JsBoolean(source), JsBoolean(sink)) if (g.cultistId == request.cultistId) => {
g.location = uri.trim
g.path = path.trim
g.source = source
g.sink = sink
passwordJs match {
case JsString(password) if (password.trim.length > 0) => g.password = password.trim
case _ =>
}
Gateway.save(g)
stream ! ChangedGateway(g.id, request.cultistId)
Ok("Ok")
}
case _ => BadRequest
}
}
def lock = PermittedAction(parse.json) { request =>
val msg = request.body \\ "enable" match {
case JsBoolean(false) => Unlock(request.cultistId)
case _ => Lock(request.cultistId)
}
watcher ! msg
Ok("Ok")
}
def retry = PermittedAction { request =>
watcher ! RetryAsap(request.cultistId)
Ok("Ok")
}
def sources = InsaneAction { request =>
val report = Gateway.sourceReport
val at = T.now.getTime
import util.Context.playDefault
Async {
report.map(lines =>
Ok(views.html.report.sources(at, lines))
)
}
}
} | scott-abernethy/opener-of-the-way | app/controllers/Gateways.scala | Scala | gpl-3.0 | 3,935 |
package com.sksamuel.elastic4s.http.index
import com.sksamuel.elastic4s.JsonFormat
import com.sksamuel.elastic4s.http.HttpExecutable
import com.sksamuel.elastic4s.http.search.queries.QueryBuilderFn
import com.sksamuel.elastic4s.indexes.{CreateIndexTemplateDefinition, DeleteIndexTemplateDefinition, GetIndexTemplateDefinition}
import com.sksamuel.elastic4s.mappings.MappingContentBuilder
import org.apache.http.entity.{ContentType, StringEntity}
import org.elasticsearch.client.{ResponseListener, RestClient}
import org.elasticsearch.common.xcontent.{XContentBuilder, XContentFactory}
import scala.collection.JavaConverters._
import scala.concurrent.Future
case class CreateIndexTemplateResponse()
case class DeleteIndexTemplateResponse()
case class GetIndexTemplateResponse()
trait IndexTemplateImplicits {
implicit object CreateIndexTemplateHttpExecutable extends HttpExecutable[CreateIndexTemplateDefinition, CreateIndexTemplateResponse] {
override def execute(client: RestClient,
request: CreateIndexTemplateDefinition,
format: JsonFormat[CreateIndexTemplateResponse]): Future[CreateIndexTemplateResponse] = {
val endpoint = s"/_template/" + request.name
val body = CreateIndexTemplateBodyFn(request)
val entity = new StringEntity(body.string, ContentType.APPLICATION_JSON)
val fn = client.performRequestAsync("PUT", endpoint, Map.empty[String, String].asJava, entity, _: ResponseListener)
executeAsyncAndMapResponse(fn, format)
}
}
implicit object DeleteIndexTemplateHttpExecutable extends HttpExecutable[DeleteIndexTemplateDefinition, DeleteIndexTemplateResponse] {
override def execute(client: RestClient,
request: DeleteIndexTemplateDefinition,
format: JsonFormat[DeleteIndexTemplateResponse]): Future[DeleteIndexTemplateResponse] = {
val endpoint = s"/_template/" + request.name
val fn = client.performRequestAsync("DELETE", endpoint, _: ResponseListener)
executeAsyncAndMapResponse(fn, format)
}
}
implicit object GetIndexTemplateHttpExecutable extends HttpExecutable[GetIndexTemplateDefinition, GetIndexTemplateResponse] {
override def execute(client: RestClient,
request: GetIndexTemplateDefinition,
format: JsonFormat[GetIndexTemplateResponse]): Future[GetIndexTemplateResponse] = {
val endpoint = s"/_template/" + request.name
val fn = client.performRequestAsync("GET", endpoint, _: ResponseListener)
executeAsyncAndMapResponse(fn, format)
}
}
}
object CreateIndexTemplateBodyFn {
def apply(create: CreateIndexTemplateDefinition): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject()
builder.field("template", create.pattern)
create.order.foreach(builder.field("order", _))
create.version.foreach(builder.field("version", _))
if (create.settings.getAsMap.size > 0) {
builder.startObject("settings")
create.settings.getAsMap.asScala.foreach {
case (key, value) => builder.field(key, value)
}
builder.endObject()
}
if (create.mappings.nonEmpty) {
builder.startObject("mappings")
create.mappings.foreach { mapping =>
builder rawValue MappingContentBuilder.buildWithName(mapping, mapping.`type`).bytes()
}
builder.endObject()
}
if (create.alias.nonEmpty) {
builder.startObject("aliases")
create.alias.foreach { a =>
builder.startObject(a.name)
a.routing.foreach(builder.field("routing", _))
a.filter.foreach { filter =>
builder.rawField("filter", QueryBuilderFn(filter).bytes)
}
builder.endObject()
}
builder.endObject()
}
builder.endObject()
builder
}
}
| FabienPennequin/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/index/IndexTemplateImplicits.scala | Scala | apache-2.0 | 3,857 |
import sbt._
import Keys._
import PlayProject._
object ApplicationBuild extends Build {
val appName = "submission"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
"journalio" % "journalio" % "1.2"
)
val main = PlayProject(appName, appVersion, appDependencies, mainLang = SCALA).settings(
resolvers += "Journal.IO" at "https://raw.github.com/sbtourist/Journal.IO/master/m2/repo"
)
}
| lunatech-labs/lunatech-submission | project/Build.scala | Scala | apache-2.0 | 433 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.inject
import java.util.concurrent.atomic.AtomicInteger
import org.specs2.mutable.Specification
import scala.collection.mutable
import scala.concurrent.duration._
import scala.concurrent.{ Await, Future }
class DefaultApplicationLifecycleSpec extends Specification {
import scala.concurrent.ExecutionContext.Implicits.global
"DefaultApplicationLifecycle" should {
// This test ensure's two things
// 1. Stop Hooks will be called in LIFO order
// 2. Stop Hooks won't datarace, they will never run in parallel
"stop all the hooks in the correct order" in {
val lifecycle = new DefaultApplicationLifecycle()
val buffer = mutable.ListBuffer[Int]()
lifecycle.addStopHook(() => Future(buffer.append(1)))
lifecycle.addStopHook(() => Future(buffer.append(2)))
lifecycle.addStopHook(() => Future(buffer.append(3)))
Await.result(lifecycle.stop(), 10.seconds)
Await.result(lifecycle.stop(), 10.seconds)
buffer.toList must beEqualTo(List(3, 2, 1))
}
"continue when a hook returns a failed future" in {
val lifecycle = new DefaultApplicationLifecycle()
val buffer = mutable.ListBuffer[Int]()
lifecycle.addStopHook(() => Future(buffer.append(1)))
lifecycle.addStopHook(() => Future.failed(new RuntimeException("Failed stop hook")))
lifecycle.addStopHook(() => Future(buffer.append(3)))
Await.result(lifecycle.stop(), 10.seconds)
Await.result(lifecycle.stop(), 10.seconds)
buffer.toList must beEqualTo(List(3, 1))
}
"continue when a hook throws an exception" in {
val lifecycle = new DefaultApplicationLifecycle()
val buffer = mutable.ListBuffer[Int]()
lifecycle.addStopHook(() => Future(buffer.append(1)))
lifecycle.addStopHook(() => throw new RuntimeException("Failed stop hook"))
lifecycle.addStopHook(() => Future(buffer.append(3)))
Await.result(lifecycle.stop(), 10.seconds)
Await.result(lifecycle.stop(), 10.seconds)
buffer.toList must beEqualTo(List(3, 1))
}
"runs stop() only once" in {
val counter = new AtomicInteger(0)
val lifecycle = new DefaultApplicationLifecycle()
lifecycle.addStopHook{
() =>
counter.incrementAndGet()
Future.successful(())
}
val f1 = lifecycle.stop()
val f2 = lifecycle.stop()
val f3 = lifecycle.stop()
val f4 = lifecycle.stop()
Await.result(Future.sequence(Seq(f1, f2, f3, f4)), 10.seconds)
counter.get() must beEqualTo(1)
}
}
}
| Shenker93/playframework | framework/src/play/src/test/scala/play/api/inject/DefaultApplicationLifecycleSpec.scala | Scala | apache-2.0 | 2,638 |
package exsplay.examples.user.rest.sync
import exsplay.api.action.Action
import spray.http.HttpMethods._
import exsplay.api.routing.Route
/**
* User: wert
* Date: 21.07.13
* Time: 2:13
*/
object Router {
val routes: PartialFunction[Route, Action] = {
case Route(GET, "v1" :: "users" :: id :: Nil) => RestUserController.get(id)
case Route(PUT, "v1" :: "users" :: id :: Nil) => RestUserController.update(id)
case Route(POST, "v1" :: "users" :: Nil) => RestUserController.create
case Route(GET, "v1" :: "users" :: Nil) => RestUserController.find
}
}
| wertlex/exsplay | examples/src/main/scala/exsplay/examples/user/rest/sync/Router.scala | Scala | apache-2.0 | 612 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.mutable
import org.apache.spark.annotation.{DeveloperApi, Experimental, Unstable}
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, TableFunctionRegistry}
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry.FunctionBuilder
import org.apache.spark.sql.catalyst.analysis.TableFunctionRegistry.TableFunctionBuilder
import org.apache.spark.sql.catalyst.expressions.ExpressionInfo
import org.apache.spark.sql.catalyst.parser.ParserInterface
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.{ColumnarRule, SparkPlan}
/**
* :: Experimental ::
* Holder for injection points to the [[SparkSession]]. We make NO guarantee about the stability
* regarding binary compatibility and source compatibility of methods here.
*
* This current provides the following extension points:
*
* <ul>
* <li>Analyzer Rules.</li>
* <li>Check Analysis Rules.</li>
* <li>Optimizer Rules.</li>
* <li>Pre CBO Rules.</li>
* <li>Planning Strategies.</li>
* <li>Customized Parser.</li>
* <li>(External) Catalog listeners.</li>
* <li>Columnar Rules.</li>
* <li>Adaptive Query Stage Preparation Rules.</li>
* </ul>
*
* The extensions can be used by calling `withExtensions` on the [[SparkSession.Builder]], for
* example:
* {{{
* SparkSession.builder()
* .master("...")
* .config("...", true)
* .withExtensions { extensions =>
* extensions.injectResolutionRule { session =>
* ...
* }
* extensions.injectParser { (session, parser) =>
* ...
* }
* }
* .getOrCreate()
* }}}
*
* The extensions can also be used by setting the Spark SQL configuration property
* `spark.sql.extensions`. Multiple extensions can be set using a comma-separated list. For example:
* {{{
* SparkSession.builder()
* .master("...")
* .config("spark.sql.extensions", "org.example.MyExtensions,org.example.YourExtensions")
* .getOrCreate()
*
* class MyExtensions extends Function1[SparkSessionExtensions, Unit] {
* override def apply(extensions: SparkSessionExtensions): Unit = {
* extensions.injectResolutionRule { session =>
* ...
* }
* extensions.injectParser { (session, parser) =>
* ...
* }
* }
* }
*
* class YourExtensions extends SparkSessionExtensionsProvider {
* override def apply(extensions: SparkSessionExtensions): Unit = {
* extensions.injectResolutionRule { session =>
* ...
* }
* extensions.injectFunction(...)
* }
* }
* }}}
*
* Note that none of the injected builders should assume that the [[SparkSession]] is fully
* initialized and should not touch the session's internals (e.g. the SessionState).
*/
@DeveloperApi
@Experimental
@Unstable
class SparkSessionExtensions {
type RuleBuilder = SparkSession => Rule[LogicalPlan]
type CheckRuleBuilder = SparkSession => LogicalPlan => Unit
type StrategyBuilder = SparkSession => Strategy
type ParserBuilder = (SparkSession, ParserInterface) => ParserInterface
type FunctionDescription = (FunctionIdentifier, ExpressionInfo, FunctionBuilder)
type TableFunctionDescription = (FunctionIdentifier, ExpressionInfo, TableFunctionBuilder)
type ColumnarRuleBuilder = SparkSession => ColumnarRule
type QueryStagePrepRuleBuilder = SparkSession => Rule[SparkPlan]
private[this] val columnarRuleBuilders = mutable.Buffer.empty[ColumnarRuleBuilder]
private[this] val queryStagePrepRuleBuilders = mutable.Buffer.empty[QueryStagePrepRuleBuilder]
/**
* Build the override rules for columnar execution.
*/
private[sql] def buildColumnarRules(session: SparkSession): Seq[ColumnarRule] = {
columnarRuleBuilders.map(_.apply(session)).toSeq
}
/**
* Build the override rules for the query stage preparation phase of adaptive query execution.
*/
private[sql] def buildQueryStagePrepRules(session: SparkSession): Seq[Rule[SparkPlan]] = {
queryStagePrepRuleBuilders.map(_.apply(session)).toSeq
}
/**
* Inject a rule that can override the columnar execution of an executor.
*/
def injectColumnar(builder: ColumnarRuleBuilder): Unit = {
columnarRuleBuilders += builder
}
/**
* Inject a rule that can override the query stage preparation phase of adaptive query
* execution.
*/
def injectQueryStagePrepRule(builder: QueryStagePrepRuleBuilder): Unit = {
queryStagePrepRuleBuilders += builder
}
private[this] val resolutionRuleBuilders = mutable.Buffer.empty[RuleBuilder]
/**
* Build the analyzer resolution `Rule`s using the given [[SparkSession]].
*/
private[sql] def buildResolutionRules(session: SparkSession): Seq[Rule[LogicalPlan]] = {
resolutionRuleBuilders.map(_.apply(session)).toSeq
}
/**
* Inject an analyzer resolution `Rule` builder into the [[SparkSession]]. These analyzer
* rules will be executed as part of the resolution phase of analysis.
*/
def injectResolutionRule(builder: RuleBuilder): Unit = {
resolutionRuleBuilders += builder
}
private[this] val postHocResolutionRuleBuilders = mutable.Buffer.empty[RuleBuilder]
/**
* Build the analyzer post-hoc resolution `Rule`s using the given [[SparkSession]].
*/
private[sql] def buildPostHocResolutionRules(session: SparkSession): Seq[Rule[LogicalPlan]] = {
postHocResolutionRuleBuilders.map(_.apply(session)).toSeq
}
/**
* Inject an analyzer `Rule` builder into the [[SparkSession]]. These analyzer
* rules will be executed after resolution.
*/
def injectPostHocResolutionRule(builder: RuleBuilder): Unit = {
postHocResolutionRuleBuilders += builder
}
private[this] val checkRuleBuilders = mutable.Buffer.empty[CheckRuleBuilder]
/**
* Build the check analysis `Rule`s using the given [[SparkSession]].
*/
private[sql] def buildCheckRules(session: SparkSession): Seq[LogicalPlan => Unit] = {
checkRuleBuilders.map(_.apply(session)).toSeq
}
/**
* Inject an check analysis `Rule` builder into the [[SparkSession]]. The injected rules will
* be executed after the analysis phase. A check analysis rule is used to detect problems with a
* LogicalPlan and should throw an exception when a problem is found.
*/
def injectCheckRule(builder: CheckRuleBuilder): Unit = {
checkRuleBuilders += builder
}
private[this] val optimizerRules = mutable.Buffer.empty[RuleBuilder]
private[sql] def buildOptimizerRules(session: SparkSession): Seq[Rule[LogicalPlan]] = {
optimizerRules.map(_.apply(session)).toSeq
}
/**
* Inject an optimizer `Rule` builder into the [[SparkSession]]. The injected rules will be
* executed during the operator optimization batch. An optimizer rule is used to improve the
* quality of an analyzed logical plan; these rules should never modify the result of the
* LogicalPlan.
*/
def injectOptimizerRule(builder: RuleBuilder): Unit = {
optimizerRules += builder
}
private[this] val preCBORules = mutable.Buffer.empty[RuleBuilder]
private[sql] def buildPreCBORules(session: SparkSession): Seq[Rule[LogicalPlan]] = {
preCBORules.map(_.apply(session)).toSeq
}
/**
* Inject an optimizer `Rule` builder that rewrites logical plans into the [[SparkSession]].
* The injected rules will be executed once after the operator optimization batch and
* before any cost-based optimization rules that depend on stats.
*/
def injectPreCBORule(builder: RuleBuilder): Unit = {
preCBORules += builder
}
private[this] val plannerStrategyBuilders = mutable.Buffer.empty[StrategyBuilder]
private[sql] def buildPlannerStrategies(session: SparkSession): Seq[Strategy] = {
plannerStrategyBuilders.map(_.apply(session)).toSeq
}
/**
* Inject a planner `Strategy` builder into the [[SparkSession]]. The injected strategy will
* be used to convert a `LogicalPlan` into a executable
* [[org.apache.spark.sql.execution.SparkPlan]].
*/
def injectPlannerStrategy(builder: StrategyBuilder): Unit = {
plannerStrategyBuilders += builder
}
private[this] val parserBuilders = mutable.Buffer.empty[ParserBuilder]
private[sql] def buildParser(
session: SparkSession,
initial: ParserInterface): ParserInterface = {
parserBuilders.foldLeft(initial) { (parser, builder) =>
builder(session, parser)
}
}
/**
* Inject a custom parser into the [[SparkSession]]. Note that the builder is passed a session
* and an initial parser. The latter allows for a user to create a partial parser and to delegate
* to the underlying parser for completeness. If a user injects more parsers, then the parsers
* are stacked on top of each other.
*/
def injectParser(builder: ParserBuilder): Unit = {
parserBuilders += builder
}
private[this] val injectedFunctions = mutable.Buffer.empty[FunctionDescription]
private[this] val injectedTableFunctions = mutable.Buffer.empty[TableFunctionDescription]
private[sql] def registerFunctions(functionRegistry: FunctionRegistry) = {
for ((name, expressionInfo, function) <- injectedFunctions) {
functionRegistry.registerFunction(name, expressionInfo, function)
}
functionRegistry
}
private[sql] def registerTableFunctions(tableFunctionRegistry: TableFunctionRegistry) = {
for ((name, expressionInfo, function) <- injectedTableFunctions) {
tableFunctionRegistry.registerFunction(name, expressionInfo, function)
}
tableFunctionRegistry
}
/**
* Injects a custom function into the [[org.apache.spark.sql.catalyst.analysis.FunctionRegistry]]
* at runtime for all sessions.
*/
def injectFunction(functionDescription: FunctionDescription): Unit = {
injectedFunctions += functionDescription
}
/**
* Injects a custom function into the
* [[org.apache.spark.sql.catalyst.analysis.TableFunctionRegistry]] at runtime for all sessions.
*/
def injectTableFunction(functionDescription: TableFunctionDescription): Unit = {
injectedTableFunctions += functionDescription
}
}
| ueshin/apache-spark | sql/core/src/main/scala/org/apache/spark/sql/SparkSessionExtensions.scala | Scala | apache-2.0 | 11,017 |
package example
import common._
object Lists {
/**
* This method computes the sum of all elements in the list xs. There are
* multiple techniques that can be used for implementing this method, and
* you will learn during the class.
*
* For this example assignment you can use the following methods in class
* `List`:
*
* - `xs.isEmpty: Boolean` returns `true` if the list `xs` is empty
* - `xs.head: Int` returns the head element of the list `xs`. If the list
* is empty an exception is thrown
* - `xs.tail: List[Int]` returns the tail of the list `xs`, i.e. the the
* list `xs` without its `head` element
*
* ''Hint:'' instead of writing a `for` or `while` loop, think of a recursive
* solution.
*
* @param xs A list of natural numbers
* @return The sum of all elements in `xs`
*/
def sum(xs: List[Int]): Int = if (xs.isEmpty) 0 else xs.head + sum(xs.tail)
/**
* This method returns the largest element in a list of integers. If the
* list `xs` is empty it throws a `java.util.NoSuchElementException`.
*
* You can use the same methods of the class `List` as mentioned above.
*
* ''Hint:'' Again, think of a recursive solution instead of using looping
* constructs. You might need to define an auxiliary method.
*
* @param xs A list of natural numbers
* @return The largest element in `xs`
* @throws java.util.NoSuchElementException if `xs` is an empty list
*/
def max(xs: List[Int]): Int = if (xs.isEmpty) throw new java.util.NoSuchElementException else max(xs.head, xs.tail)
def max(mx: Int, xs: List[Int]): Int = if (xs.isEmpty) mx else if (mx > xs.head) max(mx, xs.tail) else max(xs.head, xs.tail)
}
| relyah/CourseraFunctionalProgramming | assignments/00gettingstarted/example/src/main/scala/example/Lists.scala | Scala | gpl-2.0 | 1,723 |
package com.fuscus.seien.appli.controller
import com.fuscus.seien.appli.cont.{ FormCont, ActionCont }
import com.fuscus.seien.infra.core.AppLogger
import play.api.Logger
import play.api.data.Form
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.libs.json.JsValue
import play.api.mvc._
import scala.concurrent.Future
/**
* Created by watawuwu on 15/07/02.
*/
trait AppController extends Controller with AppLogger {
val securityResponseHeaderSeq = List(
"Content-Type-Options" -> "nosniff",
"X-XSS-Protection" -> "1; mode=block",
"X-Frame-Options" -> "deny",
"Content-Security-Policy" -> "default-src 'none'")
val corsResponseHeaderSeq = Seq(
"Access-Control-Allow-Origin" -> "*",
"Access-Control-Allow-Methods" -> "GET, POST, DELETE, PUT")
def corsCont(request: Request[AnyContent]): ActionCont[Request[AnyContent]] =
ActionCont { (f: Request[AnyContent] => Future[Result]) =>
f(request).map(_.withHeaders(corsResponseHeaderSeq: _*))
}
def securityHeaderCont(request: Request[AnyContent]): ActionCont[Request[AnyContent]] =
ActionCont { (f: Request[AnyContent] => Future[Result]) =>
f(request).map(_.withHeaders(securityResponseHeaderSeq: _*))
}
def responseHeaderCont(request: Request[AnyContent]): ActionCont[Request[AnyContent]] = {
for {
_ <- corsCont(request)
cont <- securityHeaderCont(request)
} yield cont
}
def simpleFormValidate[A](form: Form[A], request: Request[_]): ActionCont[A] =
FormCont.hasErrors(form, request)(_ => Future.successful(BadRequest))
def simpleFormValidate[A](form: Form[A], request: Map[String, String]): ActionCont[A] =
FormCont.hasErrors(form, request)(_ => Future.successful(BadRequest))
def simpleFormValidate[A](form: Form[A], request: JsValue): ActionCont[A] =
FormCont.hasErrors(form, request)(_ => Future.successful(BadRequest))
}
| watawuwu/seien-backend | modules/appli/app/com/fuscus/seien/appli/controller/AppController.scala | Scala | mit | 1,919 |
package com.github.maiflai
import java.nio.file._
import org.sonar.api.batch.fs.InputFile
package object sonar {
type DiscoverXmlFiles = Path => Seq[Path]
type ParseXmlFile = Path => TestSuiteResult
type DiscoverScalaFile = TestClass => Option[InputFile]
val discoverXmlFiles: DiscoverXmlFiles = path => {
import scala.collection.JavaConversions._
Files.newDirectoryStream(path).iterator().
filter(scalatest.isTestSuite).
toSeq
}
def source(fs: org.sonar.api.batch.fs.FileSystem, roots: Seq[Path]): DiscoverScalaFile = testClass => {
import scala.collection.JavaConversions._
val outerClass = testClass.value.takeWhile(_ != '!')
val bestGuessFile = outerClass.replace('.', '/').+(".scala")
val searchLocations = roots.map(_.resolve(bestGuessFile))
searchLocations.flatMap { location =>
val predicate = fs.predicates().hasRelativePath(location.toString)
fs.inputFiles(predicate).headOption
}.headOption
}
}
| maiflai/sonar-scalatest | plugin/src/main/scala/com/github/maiflai/sonar/package.scala | Scala | unlicense | 986 |
package mesosphere.marathon
package core.task.tracker.impl
import akka.Done
import akka.actor.{ ActorRef, Status }
import akka.event.EventStream
import akka.testkit.TestProbe
import ch.qos.logback.classic.Level
import com.google.inject.Provider
import mesosphere.AkkaUnitTest
import mesosphere.marathon.core.CoreGuiceModule
import mesosphere.marathon.core.base.ConstantClock
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.health.HealthCheckManager
import mesosphere.marathon.core.instance.TestInstanceBuilder
import mesosphere.marathon.core.instance.update.{ InstanceUpdateEffect, InstanceUpdateOpResolver, InstanceUpdateOperation, InstanceUpdated }
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.pod.PodDefinition
import mesosphere.marathon.core.task.bus.{ MesosTaskStatusTestHelper, TaskStatusEmitter }
import mesosphere.marathon.core.task.update.impl.steps._
import mesosphere.marathon.state.{ AppDefinition, PathId, Timestamp }
import mesosphere.marathon.storage.repository.InstanceRepository
import mesosphere.marathon.test.{ CaptureLogEvents, _ }
import org.apache.mesos.SchedulerDriver
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{ Failure, Success, Try }
class InstanceOpProcessorImplTest extends AkkaUnitTest {
// ignored by the TaskOpProcessorImpl
val deadline = Timestamp.zero
class Fixture {
lazy val config = MarathonTestHelper.defaultConfig()
lazy val instanceTrackerProbe = TestProbe()
lazy val opSender = TestProbe()
lazy val instanceRepository = mock[InstanceRepository]
lazy val stateOpResolver = mock[InstanceUpdateOpResolver]
lazy val clock = ConstantClock()
lazy val now = clock.now()
lazy val healthCheckManager: HealthCheckManager = mock[HealthCheckManager]
lazy val healthCheckManagerProvider: Provider[HealthCheckManager] = new Provider[HealthCheckManager] {
override def get(): HealthCheckManager = healthCheckManager
}
lazy val schedulerActor: TestProbe = TestProbe()
lazy val schedulerActorProvider = new Provider[ActorRef] {
override def get(): ActorRef = schedulerActor.ref
}
lazy val groupManager: GroupManager = mock[GroupManager]
lazy val groupManagerProvider: Provider[GroupManager] = new Provider[GroupManager] {
override def get(): GroupManager = groupManager
}
lazy val launchQueue: LaunchQueue = mock[LaunchQueue]
lazy val launchQueueProvider: Provider[LaunchQueue] = new Provider[LaunchQueue] {
override def get(): LaunchQueue = launchQueue
}
lazy val schedulerDriver: SchedulerDriver = mock[SchedulerDriver]
lazy val eventBus: EventStream = mock[EventStream]
lazy val taskStatusEmitter: TaskStatusEmitter = mock[TaskStatusEmitter]
lazy val taskStatusEmitterProvider: Provider[TaskStatusEmitter] = new Provider[TaskStatusEmitter] {
override def get(): TaskStatusEmitter = taskStatusEmitter
}
lazy val guiceModule = new CoreGuiceModule(system.settings.config)
// Use module method to ensure that we keep the list of steps in sync with the test.
lazy val statusUpdateSteps = guiceModule.taskStatusUpdateSteps(
notifyHealthCheckManager,
notifyRateLimiter,
notifyLaunchQueue,
emitUpdate,
postToEventStream,
scaleApp
)
// task status update steps
lazy val notifyHealthCheckManager = new NotifyHealthCheckManagerStepImpl(healthCheckManagerProvider)
lazy val notifyRateLimiter = new NotifyRateLimiterStepImpl(launchQueueProvider, groupManagerProvider)
lazy val postToEventStream = new PostToEventStreamStepImpl(eventBus)
lazy val notifyLaunchQueue = new NotifyLaunchQueueStepImpl(launchQueueProvider)
lazy val emitUpdate = new TaskStatusEmitterPublishStepImpl(taskStatusEmitterProvider)
lazy val scaleApp = new ScaleAppUpdateStepImpl(schedulerActorProvider)
lazy val processor = new InstanceOpProcessorImpl(instanceTrackerProbe.ref, instanceRepository, stateOpResolver, config)
def verifyNoMoreInteractions(): Unit = {
instanceTrackerProbe.expectNoMsg(0.seconds)
noMoreInteractions(instanceRepository)
noMoreInteractions(stateOpResolver)
}
}
"InstanceOpProcessorImpl" should {
"process update with success" in {
val f = new Fixture
val appId = PathId("/app")
Given("a taskRepository")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val stateOp = builder.stateOpUpdate(MesosTaskStatusTestHelper.runningHealthy())
val mesosStatus = stateOp.mesosStatus
val expectedEffect = InstanceUpdateEffect.Update(instance, Some(instance), events = Nil)
val ack = InstanceTrackerActor.Ack(f.opSender.ref, expectedEffect)
f.stateOpResolver.resolve(stateOp) returns Future.successful(expectedEffect)
f.instanceRepository.get(instance.instanceId) returns Future.successful(Some(instance))
f.instanceRepository.store(instance) returns Future.successful(Done)
When("the processor processes an update")
val result = f.processor.process(
InstanceOpProcessor.Operation(deadline, f.opSender.ref, instance.instanceId, stateOp)
)
And("the taskTracker replies immediately")
f.instanceTrackerProbe.expectMsg(InstanceTrackerActor.StateChanged(ack))
f.instanceTrackerProbe.reply(())
And("the processor replies with unit accordingly")
result.futureValue should be(()) // first wait for the call to complete
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
And("it calls store")
verify(f.instanceRepository).store(instance)
And("no more interactions")
f.verifyNoMoreInteractions()
}
"process update with failing taskRepository.store but successful load of existing task" in {
val f = new Fixture
val appId = PathId("/app")
Given("a taskRepository and existing task")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val stateOp = builder.stateOpUpdate(MesosTaskStatusTestHelper.running())
val expectedEffect = InstanceUpdateEffect.Update(instance, Some(instance), events = Nil)
val ack = InstanceTrackerActor.Ack(f.opSender.ref, expectedEffect)
f.stateOpResolver.resolve(stateOp) returns Future.successful(expectedEffect)
f.instanceRepository.store(instance) returns Future.failed(new RuntimeException("fail"))
f.instanceRepository.get(instance.instanceId) returns Future.successful(Some(instance))
When("the processor processes an update")
var result: Try[Unit] = Failure(new RuntimeException("test executing failed"))
val logs = CaptureLogEvents.forBlock {
val resultF = f.processor.process(
InstanceOpProcessor.Operation(deadline, f.opSender.ref, instance.instanceId, stateOp)
)
f.instanceTrackerProbe.expectMsg(InstanceTrackerActor.StateChanged(ack))
f.instanceTrackerProbe.reply(())
result = Try(resultF.futureValue) // linter:ignore:VariableAssignedUnusedValue // we need to complete the future here to get all the logs
}
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
Then("it calls store")
verify(f.instanceRepository).store(instance)
And("logs a warning after detecting the error")
logs.filter(l => l.getLevel == Level.WARN && l.getMessage.contains(s"[${instance.instanceId.idString}]")) should have size 1
And("loads the task")
verify(f.instanceRepository).get(instance.instanceId)
And("it replies with unit immediately because the task is as expected")
result should be(Success(()))
And("no more interactions")
f.verifyNoMoreInteractions()
}
"process update with failing taskRepository.store and successful load of non-existing task" in {
val f = new Fixture
val appId = PathId("/app")
Given("a taskRepository and no task")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val stateOp = builder.stateOpUpdate(MesosTaskStatusTestHelper.running())
val expectedEffect = InstanceUpdateEffect.Update(instance, Some(instance), events = Nil)
val storeException: RuntimeException = new scala.RuntimeException("fail")
val ack = InstanceTrackerActor.Ack(f.opSender.ref, InstanceUpdateEffect.Failure(storeException))
f.stateOpResolver.resolve(stateOp) returns Future.successful(expectedEffect)
f.instanceRepository.store(instance) returns Future.failed(storeException)
f.instanceRepository.get(instance.instanceId) returns Future.successful(None)
When("the processor processes an update")
var result: Try[Unit] = Failure(new RuntimeException("test executing failed"))
val logs = CaptureLogEvents.forBlock {
val resultF = f.processor.process(
InstanceOpProcessor.Operation(deadline, f.opSender.ref, instance.instanceId, stateOp)
)
f.instanceTrackerProbe.expectMsg(InstanceTrackerActor.StateChanged(ack))
f.instanceTrackerProbe.reply(())
result = Try(resultF.futureValue) // linter:ignore:VariableAssignedUnusedValue // we need to complete the future here to get all the logs
}
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
Then("it calls store")
verify(f.instanceRepository).store(instance)
And("logs a warning after detecting the error")
logs.filter(l => l.getLevel == Level.WARN && l.getMessage.contains(s"[${instance.instanceId.idString}]")) should have size 1
And("loads the task")
verify(f.instanceRepository).get(instance.instanceId)
And("it replies with unit immediately because the task is as expected")
result should be(Success(()))
And("no more interactions")
f.verifyNoMoreInteractions()
}
"process update with failing taskRepository.store and load also fails" in {
val f = new Fixture
val appId = PathId("/app")
Given("a taskRepository and existing task")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val storeFailed: RuntimeException = new scala.RuntimeException("store failed")
val stateOp = builder.stateOpUpdate(MesosTaskStatusTestHelper.running())
val expectedEffect = InstanceUpdateEffect.Update(instance, Some(instance), events = Nil)
f.stateOpResolver.resolve(stateOp) returns Future.successful(expectedEffect)
f.instanceRepository.store(instance) returns Future.failed(storeFailed)
f.instanceRepository.get(instance.instanceId) returns Future.failed(new RuntimeException("task failed"))
When("the processor processes an update")
var result: Try[Unit] = Failure(new RuntimeException("test executing failed"))
val logs = CaptureLogEvents.forBlock {
result = Try(f.processor.process( // linter:ignore:VariableAssignedUnusedValue
InstanceOpProcessor.Operation(deadline, f.opSender.ref, instance.instanceId, stateOp)
).futureValue) // we need to complete the future here to get all the logs
}
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
Then("it calls store")
verify(f.instanceRepository).store(instance)
And("loads the task")
verify(f.instanceRepository).get(instance.instanceId)
And("it replies with the original error")
result.isFailure shouldBe true
result.failed.get.getCause.getMessage should be(storeFailed.getMessage)
And("logs a two warnings, for store and for task")
logs.filter(l => l.getLevel == Level.WARN && l.getMessage.contains(s"[${instance.instanceId.idString}]")) should have size 2
And("no more interactions")
f.verifyNoMoreInteractions()
}
"process expunge with success" in {
val f = new Fixture
val appId = PathId("/app")
Given("a taskRepository")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val stateOp = builder.stateOpExpunge()
val expectedEffect = InstanceUpdateEffect.Expunge(instance, events = Nil)
val ack = InstanceTrackerActor.Ack(f.opSender.ref, expectedEffect)
f.stateOpResolver.resolve(stateOp) returns Future.successful(expectedEffect)
f.instanceRepository.delete(instance.instanceId) returns Future.successful(Done)
When("the processor processes an update")
val result = f.processor.process(
InstanceOpProcessor.Operation(deadline, f.opSender.ref, instance.instanceId, InstanceUpdateOperation.ForceExpunge(instance.instanceId))
)
f.instanceTrackerProbe.expectMsg(InstanceTrackerActor.StateChanged(ack))
f.instanceTrackerProbe.reply(())
Then("it replies with unit immediately")
result.futureValue should be(())
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
And("it calls expunge")
verify(f.instanceRepository).delete(instance.instanceId)
And("no more interactions")
f.verifyNoMoreInteractions()
}
"process expunge, expunge fails but task reload confirms that task is gone" in {
val f = new Fixture
val appId = PathId("/app")
Given("a taskRepository")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val stateOp = builder.stateOpExpunge()
val expectedEffect = InstanceUpdateEffect.Expunge(instance, events = Nil)
val ack = InstanceTrackerActor.Ack(f.opSender.ref, expectedEffect)
f.stateOpResolver.resolve(stateOp) returns Future.successful(expectedEffect)
f.instanceRepository.delete(instance.instanceId) returns Future.failed(new RuntimeException("expunge fails"))
f.instanceRepository.get(instance.instanceId) returns Future.successful(None)
When("the processor processes an update")
val result = f.processor.process(
InstanceOpProcessor.Operation(deadline, f.opSender.ref, instance.instanceId, InstanceUpdateOperation.ForceExpunge(instance.instanceId))
)
f.instanceTrackerProbe.expectMsg(InstanceTrackerActor.StateChanged(ack))
f.instanceTrackerProbe.reply(())
Then("it replies with unit immediately")
result.futureValue should be(())
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
And("it calls expunge")
verify(f.instanceRepository).delete(instance.instanceId)
And("it reloads the task")
verify(f.instanceRepository).get(instance.instanceId)
And("the taskTracker gets the update")
And("no more interactions")
f.verifyNoMoreInteractions()
}
"process expunge, expunge fails and task reload suggests that task is still there" in {
val f = new Fixture
val appId = PathId("/app")
Given("a taskRepository")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val expungeException: RuntimeException = new scala.RuntimeException("expunge fails")
val stateOp = builder.stateOpExpunge()
val resolvedEffect = InstanceUpdateEffect.Expunge(instance, events = Nil)
val ack = InstanceTrackerActor.Ack(f.opSender.ref, InstanceUpdateEffect.Failure(expungeException))
f.stateOpResolver.resolve(stateOp) returns Future.successful(resolvedEffect)
f.instanceRepository.delete(instance.instanceId) returns Future.failed(expungeException)
f.instanceRepository.get(instance.instanceId) returns Future.successful(Some(instance))
When("the processor processes an update")
val result = f.processor.process(
InstanceOpProcessor.Operation(deadline, f.opSender.ref, instance.instanceId, InstanceUpdateOperation.ForceExpunge(instance.instanceId))
)
f.instanceTrackerProbe.expectMsg(InstanceTrackerActor.StateChanged(ack))
f.instanceTrackerProbe.reply(())
Then("it replies with unit immediately")
result.futureValue should be(()) // first we make sure that the call completes
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
And("it calls expunge")
verify(f.instanceRepository).delete(instance.instanceId)
And("it reloads the task")
verify(f.instanceRepository).get(instance.instanceId)
And("no more interactions")
f.verifyNoMoreInteractions()
}
"process statusUpdate with NoChange" in {
val f = new Fixture
val appId = PathId("/app")
Given("a statusUpdateResolver and an update")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val stateOp = builder.stateOpUpdate(MesosTaskStatusTestHelper.running())
val expectedEffect = InstanceUpdateEffect.Noop(instance.instanceId)
f.stateOpResolver.resolve(stateOp) returns Future.successful(expectedEffect)
f.instanceRepository.get(instance.instanceId) returns Future.successful(Some(instance))
When("the processor processes an update")
val result = f.processor.process(
InstanceOpProcessor.Operation(deadline, testActor, instance.instanceId, stateOp)
)
Then("it replies with unit immediately")
result.futureValue should be(())
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
And("the initiator gets its ack")
expectMsg(expectedEffect)
And("no more interactions")
f.verifyNoMoreInteractions()
}
"process statusUpdate with Failure" in {
val f = new Fixture
val appId = PathId("/app")
Given("a statusUpdateResolver and an update")
val builder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId)
val instance = builder.getInstance()
val stateOp = builder.stateOpReservationTimeout()
val exception = new RuntimeException("ReservationTimeout on LaunchedEphemeral is unexpected")
val expectedEffect = InstanceUpdateEffect.Failure(exception)
f.stateOpResolver.resolve(stateOp) returns Future.successful(expectedEffect)
f.instanceRepository.get(instance.instanceId) returns Future.successful(Some(instance))
When("the processor processes an update")
val result = f.processor.process(
InstanceOpProcessor.Operation(deadline, testActor, instance.instanceId, stateOp)
)
Then("it replies with unit immediately")
result.futureValue should be(())
Then("The StateOpResolver is called")
verify(f.stateOpResolver).resolve(stateOp)
And("the initiator gets its ack")
expectMsg(Status.Failure(exception))
And("no more interactions")
f.verifyNoMoreInteractions()
}
"the rate limiter will inform the launch queue of apps" in {
val f = new Fixture
val appId = PathId("/pod")
val app = AppDefinition(id = appId)
val version = Timestamp.now()
val builder = TestInstanceBuilder.newBuilder(appId, version, version).addTaskDropped()
f.groupManager.appVersion(appId, version.toOffsetDateTime) returns Future.successful(Some(app))
f.groupManager.podVersion(appId, version.toOffsetDateTime) returns Future.successful(None)
f.notifyRateLimiter.process(InstanceUpdated(builder.instance, None, Nil)).futureValue
verify(f.groupManager).appVersion(appId, version.toOffsetDateTime)
verify(f.groupManager).podVersion(appId, version.toOffsetDateTime)
verify(f.launchQueue).addDelay(app)
}
"the rate limiter will inform the launch queue of pods" in {
val f = new Fixture
val podId = PathId("/pod")
val pod = PodDefinition(id = podId)
val version = Timestamp.now()
val builder = TestInstanceBuilder.newBuilder(podId, version, version).addTaskDropped()
f.groupManager.appVersion(podId, version.toOffsetDateTime) returns Future.successful(None)
f.groupManager.podVersion(podId, version.toOffsetDateTime) returns Future.successful(Some(pod))
f.notifyRateLimiter.process(InstanceUpdated(builder.instance, None, Nil)).futureValue
verify(f.groupManager).appVersion(podId, version.toOffsetDateTime)
verify(f.groupManager).podVersion(podId, version.toOffsetDateTime)
verify(f.launchQueue).addDelay(pod)
}
}
}
| natemurthy/marathon | src/test/scala/mesosphere/marathon/core/task/tracker/impl/InstanceOpProcessorImplTest.scala | Scala | apache-2.0 | 20,664 |
/*
* @author Philip Stutz
* @author Mihaela Verman
*
* Copyright 2012 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.worker
import java.io.DataInputStream
import java.io.DataOutputStream
import java.io.File
import java.io.FileInputStream
import java.io.FileOutputStream
import java.lang.management.ManagementFactory
import scala.annotation.elidable
import scala.annotation.elidable.ASSERTION
import scala.annotation.tailrec
import scala.util.Random
import scala.collection.mutable.Queue
import com.signalcollect.Edge
import com.signalcollect.GraphEditor
import com.signalcollect.Vertex
import com.signalcollect.interfaces.ComplexAggregation
import com.signalcollect.interfaces.EdgeAddedToNonExistentVertexHandler
import com.signalcollect.interfaces.EdgeAddedToNonExistentVertexHandlerFactory
import com.signalcollect.interfaces.EdgeId
import com.signalcollect.interfaces.ExistingVertexHandler
import com.signalcollect.interfaces.ExistingVertexHandlerFactory
import com.signalcollect.interfaces.MessageBus
import com.signalcollect.interfaces.NodeStatistics
import com.signalcollect.interfaces.Scheduler
import com.signalcollect.interfaces.SchedulerFactory
import com.signalcollect.interfaces.SentMessagesStats
import com.signalcollect.interfaces.Storage
import com.signalcollect.interfaces.StorageFactory
import com.signalcollect.interfaces.UndeliverableSignalHandler
import com.signalcollect.interfaces.UndeliverableSignalHandlerFactory
import com.signalcollect.interfaces.Worker
import com.signalcollect.interfaces.WorkerApi
import com.signalcollect.interfaces.WorkerStatistics
import com.signalcollect.interfaces.WorkerStatus
import com.signalcollect.serialization.DefaultSerializer
import com.sun.management.OperatingSystemMXBean
import akka.actor.ActorRef
import akka.event.LoggingAdapter
import com.signalcollect.util.IteratorConcatenator
import scala.util.control.NonFatal
/**
* Main implementation of the WorkerApi interface.
*/
class WorkerImplementation[@specialized(Int, Long) Id, Signal](
val workerId: Int,
val numberOfWorkers: Int,
val numberOfNodes: Int,
val isEagerIdleDetectionEnabled: Boolean,
val isThrottlingEnabled: Boolean,
val supportBlockingGraphModificationsInVertex: Boolean,
val messageBus: MessageBus[Id, Signal],
val log: LoggingAdapter,
val storageFactory: StorageFactory[Id, Signal],
val schedulerFactory: SchedulerFactory[Id, Signal],
val existingVertexHandlerFactory: ExistingVertexHandlerFactory[Id, Signal],
val undeliverableSignalHandlerFactory: UndeliverableSignalHandlerFactory[Id, Signal],
val edgeAddedToNonExistentVertexHandlerFactory: EdgeAddedToNonExistentVertexHandlerFactory[Id, Signal],
var signalThreshold: Double,
var collectThreshold: Double)
extends Worker[Id, Signal] {
val pendingModifications = new IteratorConcatenator[GraphEditor[Id, Signal] => Unit]()
val workersPerNode = numberOfWorkers / numberOfNodes // Assumes that there is the same number of workers on all nodes.
val nodeId = getNodeId(workerId)
val pingPongSchedulingIntervalInMilliseconds = 4 // schedule pingpong exchange every 8ms
val maxPongDelay = 4e+6 // pong is considered delayed after waiting for 4ms
var scheduler: Scheduler[Id, Signal] = _
var graphEditor: GraphEditor[Id, Signal] = _
initialize
var messageBusFlushed: Boolean = _
var isIdleDetectionEnabled: Boolean = _
var slowPongDetected: Boolean = _ // If the worker had to wait too long for the last pong reply to its ping request.
var operationsScheduled: Boolean = _ // If executing operations has been scheduled.
var isIdle: Boolean = _ // Idle status that was last reported to the coordinator.
var isPaused: Boolean = _
var allWorkDoneWhenContinueSent: Boolean = _
var lastStatusUpdate: Long = _
var vertexStore: Storage[Id, Signal] = _
var pingSentTimestamp: Long = _
var pingPongScheduled: Boolean = _
var waitingForPong: Boolean = _
var existingVertexHandler: ExistingVertexHandler[Id, Signal] = _
var undeliverableSignalHandler: UndeliverableSignalHandler[Id, Signal] = _
var edgeAddedToNonExistentVertexHandler: EdgeAddedToNonExistentVertexHandler[Id, Signal] = _
isIdleDetectionEnabled = false // This one should not be reset.
operationsScheduled = false // This one should not be reset.
val counters: WorkerOperationCounters = new WorkerOperationCounters()
def initialize(): Unit = {
messageBusFlushed = true
slowPongDetected = false
isIdle = true
isPaused = true
allWorkDoneWhenContinueSent = false
lastStatusUpdate = System.currentTimeMillis
vertexStore = storageFactory.createInstance
pendingModifications.clear
pingSentTimestamp = 0
pingPongScheduled = false
waitingForPong = false
scheduler = schedulerFactory.createInstance(this)
graphEditor = if (supportBlockingGraphModificationsInVertex) {
new WorkerGraphEditor[Id, Signal](workerId, this, messageBus)
} else {
messageBus.getGraphEditor
}
existingVertexHandler = existingVertexHandlerFactory.createInstance
undeliverableSignalHandler = undeliverableSignalHandlerFactory.createInstance
edgeAddedToNonExistentVertexHandler = edgeAddedToNonExistentVertexHandlerFactory.createInstance
}
def getNodeId(workerId: Int): Int = workerId / workersPerNode
def getRandomPingPongPartner(): Int = Random.nextInt(numberOfWorkers)
def sendPing(partner: Int)(): Unit = {
if (messageBus.isInitialized) {
pingPongScheduled = true
waitingForPong = true
pingSentTimestamp = System.nanoTime
messageBus.sendToWorkerUncounted(partner, Ping(workerId))
}
}
/**
* Resets all state apart from that which is part of the constructor.
* Also does not reset the part of the counters which is part of
* termination detection.
*/
override def reset(): Unit = {
initialize
counters.resetOperationCounters
messageBus.reset
}
override def shutdown(): Unit = {
vertexStore.close()
}
def isAllWorkDone(): Boolean = {
if (isPaused) {
pendingModifications.isEmpty
} else {
isConverged
}
}
override def initializeIdleDetection(): Unit = {
isIdleDetectionEnabled = true
// Ensure that the current status is immediately reported.
if (isEagerIdleDetectionEnabled) {
messageBus.sendToNodeUncounted(nodeId, getWorkerStatusForNode)
} else {
sendStatusToCoordinator
}
// Initiate PingPong throttling.
if (isThrottlingEnabled) {
if (numberOfNodes > 1) {
// Sent to a random worker on the next node initially.
val partnerNodeId = (nodeId + 1) % (numberOfNodes - 1)
val workerOnNode = Random.nextInt(workersPerNode)
val workerId = partnerNodeId * workersPerNode + workerOnNode
sendPing(workerId)
} else {
sendPing(getRandomPingPongPartner)
}
}
}
def sendStatusToCoordinator(): Unit = {
if (messageBus.isInitialized) {
val status = getWorkerStatusForCoordinator
messageBus.sendToCoordinatorUncounted(status)
}
}
def isConverged: Boolean = {
vertexStore.toCollect.isEmpty &&
vertexStore.toSignal.isEmpty &&
messageBusFlushed &&
!pendingModifications.hasNext
}
def executeCollectOperationOfVertex(vertex: Vertex[Id, _, Id, Signal], addToSignal: Boolean = true) {
counters.collectOperationsExecuted += 1
vertex.executeCollectOperation(graphEditor)
if (addToSignal && vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
vertexStore.updateStateOfVertex(vertex)
}
def executeSignalOperationOfVertex(vertex: Vertex[Id, _, Id, Signal]) {
counters.signalOperationsExecuted += 1
vertex.executeSignalOperation(graphEditor)
vertexStore.updateStateOfVertex(vertex)
}
def processBulkSignalWithoutIds(signals: Array[Signal], targetIds: Array[Id]) {
val size = signals.length
var i = 0
while (i < size) {
processSignalWithoutSourceId(signals(i), targetIds(i))
i += 1
}
}
override def processSignalWithSourceId(signal: Signal, targetId: Id, sourceId: Id) {
val vertex = vertexStore.vertices.get(targetId)
if (vertex != null) {
if (vertex.deliverSignalWithSourceId(signal, sourceId, graphEditor)) {
counters.collectOperationsExecuted += 1
if (vertex.scoreSignal > signalThreshold) {
scheduler.handleCollectOnDelivery(vertex)
}
} else {
if (vertex.scoreCollect > collectThreshold) {
vertexStore.toCollect.put(vertex)
}
}
vertexStore.updateStateOfVertex(vertex)
} else {
undeliverableSignalHandler.vertexForSignalNotFound(signal, targetId, Some(sourceId), graphEditor)
}
messageBusFlushed = false
}
override def processSignalWithoutSourceId(signal: Signal, targetId: Id) {
val vertex = vertexStore.vertices.get(targetId)
if (vertex != null) {
if (vertex.deliverSignalWithoutSourceId(signal, graphEditor)) {
counters.collectOperationsExecuted += 1
if (vertex.scoreSignal > signalThreshold) {
scheduler.handleCollectOnDelivery(vertex)
}
} else {
if (vertex.scoreCollect > collectThreshold) {
vertexStore.toCollect.put(vertex)
}
}
vertexStore.updateStateOfVertex(vertex)
} else {
undeliverableSignalHandler.vertexForSignalNotFound(signal, targetId, None, graphEditor)
}
messageBusFlushed = false
}
override def startComputation {
if (pendingModifications.hasNext) {
log.warning("Need to call `awaitIdle` after executiong `loadGraph` or pending operations will interfere with the computation.")
}
isPaused = false
}
override def pauseComputation {
isPaused = true
}
override def signalStep: Boolean = {
counters.signalSteps += 1
vertexStore.toSignal.process(executeSignalOperationOfVertex(_))
messageBus.flush
messageBusFlushed = true
true // always returns true, just to make it blocking.
}
override def collectStep: Boolean = {
counters.collectSteps += 1
vertexStore.toCollect.process(executeCollectOperationOfVertex(_))
vertexStore.toSignal.isEmpty
}
override def addVertex(vertex: Vertex[Id, _, Id, Signal]) {
if (vertexStore.vertices.put(vertex)) {
counters.verticesAdded += 1
counters.outgoingEdgesAdded += vertex.edgeCount
try {
vertex.afterInitialization(graphEditor)
} catch {
case NonFatal(e) => log.error(e, s"Error in `afterInitialization` method of vertex with ID ${vertex.id}: ${e.getMessage}")
}
if (vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
vertexStore.updateStateOfVertex(vertex)
} else {
val existing = vertexStore.vertices.get(vertex.id)
existingVertexHandler.mergeVertices(existing, vertex, graphEditor)
vertexStore.updateStateOfVertex(existing)
}
messageBusFlushed = false
}
override def addEdge(sourceId: Id, edge: Edge[Id]) {
def addEdgeToVertex(vertex: Vertex[Id, _, Id, Signal]) {
if (vertex.addEdge(edge, graphEditor)) {
counters.outgoingEdgesAdded += 1
if (vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
vertexStore.updateStateOfVertex(vertex)
}
}
val v = vertexStore.vertices.get(sourceId)
if (v == null) {
val vertexOption = edgeAddedToNonExistentVertexHandler.handleImpossibleEdgeAddition(edge, sourceId, graphEditor)
vertexOption.foreach { vertex =>
addVertex(vertex)
addEdgeToVertex(vertex)
}
} else {
addEdgeToVertex(v)
}
messageBusFlushed = false
}
override def removeEdge(edgeId: EdgeId[Id]) {
val vertex = vertexStore.vertices.get(edgeId.sourceId)
if (vertex != null) {
if (vertex.removeEdge(edgeId.targetId, graphEditor)) {
counters.outgoingEdgesRemoved += 1
if (vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
vertexStore.updateStateOfVertex(vertex)
} else {
log.warning("Outgoing edge not found when trying to remove edge with id " + edgeId)
}
} else {
log.warning("Source vertex not found found when trying to remove outgoing edge with id " + edgeId)
}
}
override def removeVertex(vertexId: Id) {
val vertex = vertexStore.vertices.get(vertexId)
if (vertex != null) {
processRemoveVertex(vertex)
} else {
log.warning("Should remove vertex with id " + vertexId + ": could not find this vertex.")
}
}
protected def processRemoveVertex(vertex: Vertex[Id, _, Id, Signal]) {
val edgesRemoved = vertex.removeAllEdges(graphEditor)
counters.outgoingEdgesRemoved += edgesRemoved
counters.verticesRemoved += 1
vertex.beforeRemoval(graphEditor)
vertexStore.vertices.remove(vertex.id)
vertexStore.toCollect.remove(vertex.id)
vertexStore.toSignal.remove(vertex.id)
}
override def modifyGraph(graphModification: GraphEditor[Id, Signal] => Unit, vertexIdHint: Option[Id]) {
graphModification(graphEditor)
messageBusFlushed = false
}
override def loadGraph(graphModifications: Iterator[GraphEditor[Id, Signal] => Unit], vertexIdHint: Option[Id]) {
pendingModifications.appendIterator(graphModifications) // To avoid https://issues.scala-lang.org/browse/SI-8428, which is not really fixed.
}
override def setSignalThreshold(st: Double) {
signalThreshold = st
}
override def setCollectThreshold(ct: Double) {
collectThreshold = ct
}
override def recalculateScores {
vertexStore.vertices.foreach(recalculateVertexScores(_))
}
override def recalculateScoresForVertexWithId(vertexId: Id) {
val vertex = vertexStore.vertices.get(vertexId)
if (vertex != null) {
recalculateVertexScores(vertex)
}
}
protected def recalculateVertexScores(vertex: Vertex[Id, _, Id, Signal]) {
if (vertex.scoreCollect > collectThreshold) {
vertexStore.toCollect.put(vertex)
}
if (vertex.scoreSignal > signalThreshold) {
vertexStore.toSignal.put(vertex)
}
}
override def forVertexWithId[VertexType <: Vertex[Id, _, Id, Signal], ResultType](vertexId: Id, f: VertexType => ResultType): ResultType = {
val vertex = vertexStore.vertices.get(vertexId)
if (vertex != null) {
val result = f(vertex.asInstanceOf[VertexType])
vertexStore.updateStateOfVertex(vertex)
result
} else {
throw new Exception("Vertex with id " + vertexId + " not found.")
}
}
override def foreachVertex(f: Vertex[Id, _, Id, Signal] => Unit) {
vertexStore.vertices.foreach { vertex =>
f(vertex)
vertexStore.updateStateOfVertex(vertex)
}
}
override def foreachVertexWithGraphEditor(f: GraphEditor[Id, Signal] => Vertex[Id, _, Id, Signal] => Unit) {
val function = f(graphEditor)
vertexStore.vertices.foreach { vertex =>
function(vertex)
vertexStore.updateStateOfVertex(vertex)
}
messageBusFlushed = false
}
override def aggregateOnWorker[WorkerResult](aggregationOperation: ComplexAggregation[WorkerResult, _]): WorkerResult = {
aggregationOperation.aggregationOnWorker(vertexStore.vertices.stream)
}
override def aggregateAll[WorkerResult, EndResult](aggregationOperation: ComplexAggregation[WorkerResult, EndResult]): EndResult = {
throw new UnsupportedOperationException("AkkaWorker does not support this operation.")
}
/**
* Creates a snapshot of all the vertices in all workers.
* Does not store the toSignal/toCollect collections or pending messages.
* Should only be used when the workers are idle.
* Overwrites any previous snapshot that might exist.
*/
override def snapshot {
// Overwrites previous file if it should exist.
val snapshotFileOutput = new DataOutputStream(new FileOutputStream(s"$workerId.snapshot"))
try {
vertexStore.vertices.foreach { vertex =>
val bytes = DefaultSerializer.write(vertex)
snapshotFileOutput.writeInt(bytes.length)
snapshotFileOutput.write(bytes)
}
} catch {
case t: Throwable =>
val msg = s"Problem while serializing a vertex, this will prevent 'restore' from working correctly: ${t.getMessage}"
println(msg)
t.printStackTrace
log.error(t, msg)
} finally {
snapshotFileOutput.close
}
}
/**
* Restores the last snapshot of all the vertices in all workers.
* Does not store the toSignal/toCollect collections or pending messages.
* Should only be used when the workers are idle.
*/
override def restore {
reset
val maxSerializedSize = 64768
val snapshotFile = new File(s"$workerId.snapshot")
val buffer = new Array[Byte](maxSerializedSize)
if (snapshotFile.exists) {
val snapshotFileInput = new DataInputStream(new FileInputStream(snapshotFile))
val buffer = new Array[Byte](maxSerializedSize)
while (snapshotFileInput.available > 0) {
val serializedLength = snapshotFileInput.readInt
assert(serializedLength <= maxSerializedSize)
val bytesRead = snapshotFileInput.read(buffer, 0, serializedLength)
assert(bytesRead == serializedLength)
val vertex = DefaultSerializer.read[Vertex[Id, _, Id, Signal]](buffer)
addVertex(vertex)
}
snapshotFileInput.close
}
}
/**
* Deletes the worker snapshots if they exist.
*/
override def deleteSnapshot {
val snapshotFile = new File(s"$workerId.snapshot")
if (snapshotFile.exists) {
snapshotFile.delete
}
}
def getWorkerStatusForCoordinator: WorkerStatus = {
WorkerStatus(
workerId = workerId,
timeStamp = System.nanoTime,
isIdle = isIdle,
isPaused = isPaused,
messagesSent = SentMessagesStats(
messageBus.messagesSentToWorkers,
messageBus.messagesSentToNodes,
messageBus.messagesSentToCoordinator,
messageBus.messagesSentToOthers),
messagesReceived = counters.messagesReceived)
}
def getWorkerStatusForNode: WorkerStatus = {
val ws = WorkerStatus(
workerId = workerId,
timeStamp = System.nanoTime,
isIdle = isIdle,
isPaused = isPaused,
messagesSent = SentMessagesStats(
messageBus.messagesSentToWorkers,
messageBus.messagesSentToNodes,
messageBus.messagesSentToCoordinator,
messageBus.messagesSentToOthers),
messagesReceived = counters.messagesReceived)
ws.messagesSent.nodes(nodeId) = ws.messagesSent.nodes(nodeId)
ws
}
override def getIndividualWorkerStatistics: List[WorkerStatistics] = List(getWorkerStatistics)
override def getWorkerStatistics: WorkerStatistics = {
WorkerStatistics(
workerId = Some(workerId),
toSignalSize = vertexStore.toSignal.size,
toCollectSize = vertexStore.toCollect.size,
collectOperationsExecuted = counters.collectOperationsExecuted,
signalOperationsExecuted = counters.signalOperationsExecuted,
numberOfVertices = vertexStore.vertices.size,
verticesAdded = counters.verticesAdded,
verticesRemoved = counters.verticesRemoved,
numberOfOutgoingEdges = counters.outgoingEdgesAdded - counters.outgoingEdgesRemoved, //only valid if no edges are removed during execution
outgoingEdgesAdded = counters.outgoingEdgesAdded,
outgoingEdgesRemoved = counters.outgoingEdgesRemoved,
signalMessagesReceived = counters.signalMessagesReceived,
bulkSignalMessagesReceived = counters.bulkSignalMessagesReceived,
continueMessagesReceived = counters.continueMessagesReceived,
requestMessagesReceived = counters.requestMessagesReceived,
otherMessagesReceived = counters.otherMessagesReceived,
messagesSentToWorkers = messageBus.messagesSentToWorkers.sum,
messagesSentToNodes = messageBus.messagesSentToNodes.sum,
messagesSentToCoordinator = messageBus.messagesSentToCoordinator,
messagesSentToOthers = messageBus.messagesSentToOthers)
}
// TODO: Move this method to Node and use proper node id.
override def getIndividualNodeStatistics: List[NodeStatistics] = List(getNodeStatistics)
// TODO: Move this method to Node and use proper node id.
override def getNodeStatistics: NodeStatistics = {
val runtime: Runtime = Runtime.getRuntime
try {
val osBean: OperatingSystemMXBean = ManagementFactory.getPlatformMXBean(classOf[OperatingSystemMXBean])
NodeStatistics(
nodeId = Some(workerId),
os = System.getProperty("os.name"),
runtime_mem_total = runtime.totalMemory,
runtime_mem_max = runtime.maxMemory,
runtime_mem_free = runtime.freeMemory,
runtime_cores = runtime.availableProcessors,
jmx_committed_vms = osBean.getCommittedVirtualMemorySize,
jmx_mem_free = osBean.getFreePhysicalMemorySize,
jmx_mem_total = osBean.getTotalPhysicalMemorySize,
jmx_swap_free = osBean.getFreeSwapSpaceSize,
jmx_swap_total = osBean.getTotalSwapSpaceSize,
jmx_process_load = osBean.getProcessCpuLoad,
jmx_process_time = osBean.getProcessCpuTime,
jmx_system_load = osBean.getSystemCpuLoad)
} catch {
case notSupported: NoSuchMethodError =>
NodeStatistics(
nodeId = Some(workerId),
os = System.getProperty("os.name"),
runtime_mem_total = runtime.totalMemory,
runtime_mem_max = runtime.maxMemory,
runtime_mem_free = runtime.freeMemory,
runtime_cores = runtime.availableProcessors)
}
}
protected def logIntialization(): Unit = {
if (messageBus.isInitialized) {
val msg = s"Worker $workerId has a fully initialized message bus."
log.debug(msg)
}
}
def registerWorker(otherWorkerId: Int, worker: ActorRef) {
counters.requestMessagesReceived -= 1 // Registration messages are not counted.
messageBus.registerWorker(otherWorkerId, worker)
logIntialization
}
def registerNode(nodeId: Int, node: ActorRef) {
counters.requestMessagesReceived -= 1 // Registration messages are not counted.
messageBus.registerNode(nodeId, node)
logIntialization
}
def registerCoordinator(coordinator: ActorRef) {
counters.requestMessagesReceived -= 1 // Registration messages are not counted.
messageBus.registerCoordinator(coordinator)
logIntialization
}
}
trait WorkerInterceptor[Id, Signal] extends WorkerApi[Id, Signal] {
abstract override def addVertex(vertex: Vertex[Id, _, Id, Signal]) = {
println("addVertex")
super.addVertex(vertex)
}
abstract override def addEdge(sourceId: Id, edge: Edge[Id]) = {
println("addEdge")
super.addEdge(sourceId, edge)
}
abstract override def removeVertex(vertexId: Id) = {
println("removeVertex")
super.removeVertex(vertexId)
}
abstract override def removeEdge(edgeId: EdgeId[Id]) = {
println("removeEdge")
super.removeEdge(edgeId)
}
abstract override def processSignalWithSourceId(signal: Signal, targetId: Id, sourceId: Id) = {
println("processSignalWithSourceId")
super.processSignalWithSourceId(signal, targetId, sourceId)
}
abstract override def processSignalWithoutSourceId(signal: Signal, targetId: Id) = {
println("processSignalWithoutSourceId")
super.processSignalWithoutSourceId(signal, targetId)
}
abstract override def modifyGraph(graphModification: GraphEditor[Id, Signal] => Unit, vertexIdHint: Option[Id] = None) = {
println("modifyGraph")
super.modifyGraph(graphModification, vertexIdHint)
}
abstract override def loadGraph(graphModifications: Iterator[GraphEditor[Id, Signal] => Unit], vertexIdHint: Option[Id] = None) = {
println("loadGraph")
super.loadGraph(graphModifications, vertexIdHint)
}
abstract override def setSignalThreshold(signalThreshold: Double) = {
println("setSignalThreshold")
super.setSignalThreshold(signalThreshold)
}
abstract override def setCollectThreshold(collectThreshold: Double) = {
println("setCollectThreshold")
super.setCollectThreshold(collectThreshold)
}
abstract override def recalculateScores = {
println("recalculateScores")
super.recalculateScores
}
abstract override def recalculateScoresForVertexWithId(vertexId: Id) = {
println("recalculateScoresForVertexWithId")
super.recalculateScoresForVertexWithId(vertexId)
}
abstract override def forVertexWithId[VertexType <: Vertex[Id, _, Id, Signal], ResultType](vertexId: Id, f: VertexType => ResultType): ResultType = {
println("forVertexWithId")
super.forVertexWithId(vertexId, f)
}
abstract override def foreachVertex(f: Vertex[Id, _, Id, Signal] => Unit) = {
println("foreachVertex")
super.foreachVertex(f)
}
abstract override def foreachVertexWithGraphEditor(f: GraphEditor[Id, Signal] => Vertex[Id, _, Id, Signal] => Unit) {
println("foreachVertexWithGraphEditor")
super.foreachVertexWithGraphEditor(f)
}
abstract override def aggregateOnWorker[WorkerResult](aggregationOperation: ComplexAggregation[WorkerResult, _]): WorkerResult = {
println("aggregateOnWorker")
super.aggregateOnWorker(aggregationOperation)
}
abstract override def aggregateAll[WorkerResult, EndResult](aggregationOperation: ComplexAggregation[WorkerResult, EndResult]): EndResult = {
println("aggregateAll")
super.aggregateAll(aggregationOperation)
}
abstract override def pauseComputation() = {
println("pauseComputation")
super.pauseComputation
}
abstract override def startComputation() = {
println("startComputation")
super.startComputation
}
abstract override def signalStep(): Boolean = {
println("signalStep")
super.signalStep
}
abstract override def collectStep(): Boolean = {
println("collectStep")
super.collectStep
}
abstract override def getWorkerStatistics(): WorkerStatistics = {
println("getWorkerStatistics")
super.getWorkerStatistics
}
abstract override def getIndividualWorkerStatistics: List[WorkerStatistics] = {
println("getIndividualWorkerStatistics")
super.getIndividualWorkerStatistics
}
abstract override def reset(): Unit = {
println("reset")
super.reset
}
abstract override def shutdown(): Unit = {
println("shutdown")
super.shutdown
}
abstract override def initializeIdleDetection(): Unit = {
println("initializeIdleDetection")
super.initializeIdleDetection
}
abstract override def getNodeStatistics(): NodeStatistics = {
println("getNodeStatistics")
super.getNodeStatistics
}
abstract override def getIndividualNodeStatistics(): List[NodeStatistics] = {
println("getIndividualNodeStatistics")
super.getIndividualNodeStatistics
}
abstract override def snapshot(): Unit = {
println("snapshot")
super.snapshot
}
abstract override def restore(): Unit = {
println("restore")
super.restore
}
abstract override def deleteSnapshot(): Unit = {
println("deleteSnapshot")
super.deleteSnapshot
}
}
| uzh/signal-collect | src/main/scala/com/signalcollect/worker/WorkerImplementation.scala | Scala | apache-2.0 | 27,721 |
package com.criteo.cuttle
import java.sql.{Connection, PreparedStatement, ResultSet}
import scala.concurrent.Future
import cats.effect.IO
import doobie.util.transactor.Transactor
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.FunSuite
import com.criteo.cuttle.ThreadPools.Implicits.sideEffectThreadPool
import com.criteo.cuttle.ThreadPools.Implicits.sideEffectContextShift
import com.criteo.cuttle.ThreadPools._
import com.criteo.cuttle.Utils.logger
import com.criteo.cuttle.Metrics.Prometheus
class ExecutorSpec extends FunSuite with TestScheduling {
test("Executor should return metrics aggregated by job and tag") {
val connection: Connection = {
val mockConnection = mock(classOf[Connection])
val statement = mock(classOf[PreparedStatement])
val resultSet = mock(classOf[ResultSet])
when(mockConnection.prepareStatement(any(classOf[String]))).thenReturn(statement)
when(statement.executeQuery()).thenReturn(resultSet)
mockConnection
}
val testExecutor = new Executor[TestScheduling](
Seq.empty,
xa = Transactor
.fromConnection[IO](connection, sideEffectThreadPool)
.copy(strategy0 = doobie.util.transactor.Strategy.void),
logger,
"project_name",
"test_version"
)(RetryStrategy.ExponentialBackoffRetryStrategy)
testExecutor.updateFinishedExecutionCounters(buildExecutionForJob(fooJob), "success")
testExecutor.updateFinishedExecutionCounters(buildExecutionForJob(fooJob), "success")
testExecutor.updateFinishedExecutionCounters(buildExecutionForJob(untaggedJob), "success")
testExecutor.updateFinishedExecutionCounters(buildExecutionForJob(fooBarJob), "success")
testExecutor.updateFinishedExecutionCounters(buildExecutionForJob(untaggedJob), "failure")
testExecutor.updateFinishedExecutionCounters(buildExecutionForJob(fooBarJob), "failure")
val metrics = Prometheus.serialize(
testExecutor.getMetrics(Set(fooJob))(
getStateAtomic = _ => {
((5, 1), 2)
},
runningExecutions = Seq(
buildExecutionForJob(fooJob) -> ExecutionStatus.ExecutionRunning,
buildExecutionForJob(fooJob) -> ExecutionStatus.ExecutionRunning,
buildExecutionForJob(fooJob) -> ExecutionStatus.ExecutionWaiting,
buildExecutionForJob(fooBarJob) -> ExecutionStatus.ExecutionRunning,
buildExecutionForJob(untaggedJob) -> ExecutionStatus.ExecutionRunning,
buildExecutionForJob(untaggedJob) -> ExecutionStatus.ExecutionWaiting
),
failingExecutions = Seq(
buildExecutionForJob(fooBarJob),
buildExecutionForJob(fooBarJob),
buildExecutionForJob(untaggedJob)
)
)
)
println(metrics)
val expectedMetrics =
"""# HELP cuttle_scheduler_stat_count The number of jobs that we have in concrete states
|# TYPE cuttle_scheduler_stat_count gauge
|cuttle_scheduler_stat_count {type="running"} 5
|cuttle_scheduler_stat_count {type="waiting"} 1
|cuttle_scheduler_stat_count {type="failing"} 2
|# HELP cuttle_scheduler_stat_count_by_tag The number of executions that we have in concrete states by tag
|# TYPE cuttle_scheduler_stat_count_by_tag gauge
|cuttle_scheduler_stat_count_by_tag {tag="foo", type="waiting"} 1
|cuttle_scheduler_stat_count_by_tag {tag="foo", type="running"} 3
|cuttle_scheduler_stat_count_by_tag {tag="bar", type="failing"} 2
|cuttle_scheduler_stat_count_by_tag {tag="foo", type="failing"} 2
|cuttle_scheduler_stat_count_by_tag {tag="bar", type="running"} 1
|# HELP cuttle_scheduler_stat_count_by_job The number of executions that we have in concrete states by job
|# TYPE cuttle_scheduler_stat_count_by_job gauge
|cuttle_scheduler_stat_count_by_job {job="untagged_job", type="waiting"} 1
|cuttle_scheduler_stat_count_by_job {job="foo_bar_job", type="running"} 1
|cuttle_scheduler_stat_count_by_job {job="untagged_job", type="running"} 1
|cuttle_scheduler_stat_count_by_job {job="foo_job", type="waiting"} 1
|cuttle_scheduler_stat_count_by_job {job="untagged_job", type="failing"} 1
|cuttle_scheduler_stat_count_by_job {job="foo_job", type="running"} 2
|cuttle_scheduler_stat_count_by_job {job="foo_bar_job", type="failing"} 2
|# HELP cuttle_executions_total The number of finished executions that we have in concrete states by job and by tag
|# TYPE cuttle_executions_total counter
|cuttle_executions_total {job_id="foo_job", tags="foo", type="failure"} 0
|cuttle_executions_total {job_id="foo_bar_job", tags="foo,bar", type="success"} 1
|cuttle_executions_total {job_id="foo_job", tags="foo", type="success"} 2
|cuttle_executions_total {job_id="foo_bar_job", tags="foo,bar", type="failure"} 1
|cuttle_executions_total {job_id="untagged_job", type="success"} 1
|cuttle_executions_total {job_id="untagged_job", type="failure"} 1
|""".stripMargin
assert(metrics == expectedMetrics)
}
private def buildJob(jobId: String, tags: Set[Tag] = Set.empty): Job[TestScheduling] =
Job(jobId, TestScheduling(), jobId, tags = tags) { implicit execution =>
Future { Completed }(execution.executionContext)
}
private def buildExecutionForJob(job: Job[TestScheduling]): Execution[TestScheduling] =
Execution[TestScheduling](
id = java.util.UUID.randomUUID.toString,
job = job,
context = TestContext(),
streams = new ExecutionStreams {
override private[cuttle] def writeln(str: CharSequence): Unit = ???
},
platforms = Seq.empty,
"project_name",
"test_version",
List.empty
)
private val fooTag = Tag("foo")
private val barTag = Tag("bar")
private val fooJob: Job[TestScheduling] = buildJob("foo_job", Set(fooTag))
private val fooBarJob: Job[TestScheduling] = buildJob("foo_bar_job", Set(fooTag, barTag))
private val untaggedJob: Job[TestScheduling] = buildJob("untagged_job")
}
| criteo/cuttle | core/src/test/scala/com/criteo/cuttle/ExecutorSpec.scala | Scala | apache-2.0 | 6,150 |
package breeze.linalg
import breeze.linalg.support._
import breeze.generic.UFunc.{InPlaceImpl, UImpl, InPlaceImpl2, UImpl2}
/**
* Class for classes that are broadcasting their columns.
* That is denseMatrix(::, *) /= denseVector
* @param underlying the tensor (or equivalent) being broadcasted
* @tparam T the type of the tensor
*/
case class BroadcastedColumns[T, B](underlying: T) extends BroadcastedLike[T, B, BroadcastedColumns[T, B]] {
def repr = this
}
object BroadcastedColumns {
implicit def canMapValues[T, ColumnType, ResultColumn, Result]
(implicit cc: CanCollapseAxis[T, Axis._0.type, ColumnType, ResultColumn, Result])
:CanMapValues[BroadcastedColumns[T, ColumnType], ColumnType, ResultColumn, Result] = {
new CanMapValues[BroadcastedColumns[T, ColumnType], ColumnType, ResultColumn, Result] {
def map(from: BroadcastedColumns[T, ColumnType], fn: (ColumnType) => ResultColumn): Result = {
cc(from.underlying, Axis._0){fn}
}
/** Maps all active key-value pairs from the given collection. */
def mapActive(from: BroadcastedColumns[T, ColumnType], fn: (ColumnType) => ResultColumn): Result = {
cc(from.underlying, Axis._0){fn}
}
}
}
implicit def handholdCMV[T, ColumnType] = new CanMapValues.HandHold[BroadcastedColumns[T, ColumnType], ColumnType]
implicit def broadcastOp[Op, T, ColumnType, OpResult, Result](implicit handhold: CanCollapseAxis.HandHold[T, Axis._0.type, ColumnType],
op: UImpl[Op, ColumnType, OpResult],
cc: CanCollapseAxis[T, Axis._0.type, ColumnType, OpResult, Result]):UImpl[Op, BroadcastedColumns[T, ColumnType], Result] = {
new UImpl[Op, BroadcastedColumns[T, ColumnType], Result] {
def apply(v: BroadcastedColumns[T, ColumnType]): Result = {
cc(v.underlying, Axis._0){op(_)}
}
}
}
implicit def broadcastInplaceOp[Op, T, ColumnType, RHS, OpResult](implicit handhold: CanCollapseAxis.HandHold[T, Axis._0.type, ColumnType],
op: InPlaceImpl[Op, ColumnType],
cc: CanIterateAxis[T, Axis._0.type, ColumnType]):InPlaceImpl[Op, BroadcastedColumns[T, ColumnType]] = {
new InPlaceImpl[Op, BroadcastedColumns[T, ColumnType]] {
def apply(v: BroadcastedColumns[T, ColumnType]) {
cc(v.underlying, Axis._0){op(_)}
}
}
}
implicit def broadcastOp2[Op, T, ColumnType, RHS, OpResult, Result](implicit handhold: CanCollapseAxis.HandHold[T, Axis._0.type, ColumnType],
op: UImpl2[Op, ColumnType, RHS, OpResult],
cc: CanCollapseAxis[T, Axis._0.type, ColumnType, OpResult, Result]):UImpl2[Op, BroadcastedColumns[T, ColumnType], RHS, Result] = {
new UImpl2[Op, BroadcastedColumns[T, ColumnType], RHS, Result] {
def apply(v: BroadcastedColumns[T, ColumnType], v2: RHS): Result = {
cc(v.underlying, Axis._0){op(_, v2)}
}
}
}
implicit def broadcastInplaceOp2[Op, T, ColumnType, RHS, OpResult](implicit handhold: CanCollapseAxis.HandHold[T, Axis._0.type, ColumnType],
op: InPlaceImpl2[Op, ColumnType, RHS],
cc: CanIterateAxis[T, Axis._0.type, ColumnType]):InPlaceImpl2[Op, BroadcastedColumns[T, ColumnType], RHS] = {
new InPlaceImpl2[Op, BroadcastedColumns[T, ColumnType], RHS] {
def apply(v: BroadcastedColumns[T, ColumnType], v2: RHS) {
cc(v.underlying, Axis._0){op(_, v2)}
}
}
}
}
| wavelets/breeze | src/main/scala/breeze/linalg/BroadcastedColumns.scala | Scala | apache-2.0 | 3,905 |
package com.peterpotts.mobius
import scala.math.BigDecimal.RoundingMode
case class Matrix(left: Vector, right: Vector) {
lazy val transpose = Matrix(Vector(left.top, right.top), Vector(left.bottom, right.bottom))
lazy val determinant = left.top * right.bottom - left.bottom * right.top
lazy val spin = determinant.signum
lazy val inverse = Matrix(Vector(right.bottom, -left.bottom), Vector(-right.top, left.top))
lazy val complement = Matrix(Vector(right.top, right.bottom), Vector(-left.top, -left.bottom))
lazy val gcd = left.gcd gcd right.gcd
lazy val normal = gcd == BigInt(1)
lazy val normalize = if (normal) this else this / gcd
lazy val magnitude = math.abs(left.magnitude - right.magnitude)
lazy val (min, max) = if (spin < 0) (left, right) else (right, left)
lazy val valid = min.signum >= 0 && max.signum >= 0
lazy val isPositive = left.signum == right.signum && right.signum != 0
lazy val info = Matrix(max, min)
lazy val empty = info.isPositive == info.inverse.isPositive
lazy val decimal = {
val lower = min.decimal
val upper = max.decimal
val diff = upper - lower
lower.setScale(math.min(lower.precision, upper.precision) - diff.precision, RoundingMode.HALF_UP)
}
def isSubsetOf(that: Matrix): Boolean = (that.inverse * this).isPositive
def *(that: BigInt): Matrix = Matrix(left * that, right * that)
def /(that: BigInt): Matrix = Matrix(left / that, right / that)
def *(that: Vector): Vector = Vector(
left.top * that.top + right.top * that.bottom,
left.bottom * that.top + right.bottom * that.bottom)
def *(that: Matrix): Matrix = Matrix(this * that.left, this * that.right)
def *(that: Tensor): Tensor = Tensor(this * that.left, this * that.right)
}
object Matrix {
val identity = Matrix(Vector(1, 0), Vector(0, 1))
val negation = Matrix(Vector(-1, 0), Vector(0, 1))
val reciprocal = Matrix(Vector(0, 1), Vector(1, 0))
}
| peterpotts/mobius | src/main/scala/com/peterpotts/mobius/Matrix.scala | Scala | mit | 1,923 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.io.IOException
import java.util.{List => JList}
import javax.security.auth.login.LoginException
import scala.collection.JavaConverters._
import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.shims.Utils
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hive.service.{AbstractService, Service, ServiceException}
import org.apache.hive.service.Service.STATE
import org.apache.hive.service.auth.HiveAuthFactory
import org.apache.hive.service.cli._
import org.apache.hive.service.server.HiveServer2
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
private[hive] class SparkSQLCLIService(hiveServer: HiveServer2, sqlContext: SQLContext)
extends CLIService(hiveServer)
with ReflectedCompositeService {
override def init(hiveConf: HiveConf) {
setSuperField(this, "hiveConf", hiveConf)
val sparkSqlSessionManager = new SparkSQLSessionManager(hiveServer, sqlContext)
setSuperField(this, "sessionManager", sparkSqlSessionManager)
addService(sparkSqlSessionManager)
var sparkServiceUGI: UserGroupInformation = null
var httpUGI: UserGroupInformation = null
if (UserGroupInformation.isSecurityEnabled) {
try {
HiveAuthFactory.loginFromKeytab(hiveConf)
sparkServiceUGI = Utils.getUGI()
setSuperField(this, "serviceUGI", sparkServiceUGI)
} catch {
case e @ (_: IOException | _: LoginException) =>
throw new ServiceException("Unable to login to kerberos with given principal/keytab", e)
}
// Try creating spnego UGI if it is configured.
val principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL).trim
val keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB).trim
if (principal.nonEmpty && keyTabFile.nonEmpty) {
try {
httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf)
setSuperField(this, "httpUGI", httpUGI)
} catch {
case e: IOException =>
throw new ServiceException("Unable to login to spnego with given principal " +
s"$principal and keytab $keyTabFile: $e", e)
}
}
}
initCompositeService(hiveConf)
}
override def getInfo(sessionHandle: SessionHandle, getInfoType: GetInfoType): GetInfoValue = {
getInfoType match {
case GetInfoType.CLI_SERVER_NAME => new GetInfoValue("Spark SQL")
case GetInfoType.CLI_DBMS_NAME => new GetInfoValue("Spark SQL")
case GetInfoType.CLI_DBMS_VER => new GetInfoValue(sqlContext.sparkContext.version)
case _ => super.getInfo(sessionHandle, getInfoType)
}
}
}
private[thriftserver] trait ReflectedCompositeService { this: AbstractService =>
def initCompositeService(hiveConf: HiveConf) {
// Emulating `CompositeService.init(hiveConf)`
val serviceList = getAncestorField[JList[Service]](this, 2, "serviceList")
serviceList.asScala.foreach(_.init(hiveConf))
// Emulating `AbstractService.init(hiveConf)`
invoke(classOf[AbstractService], this, "ensureCurrentState", classOf[STATE] -> STATE.NOTINITED)
setAncestorField(this, 3, "hiveConf", hiveConf)
invoke(classOf[AbstractService], this, "changeState", classOf[STATE] -> STATE.INITED)
getAncestorField[Log](this, 3, "LOG").info(s"Service: $getName is inited.")
}
}
| minixalpha/spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala | Scala | apache-2.0 | 4,320 |
class C1 {
def a {}
case class A
}
class C2 extends C1 {
def b {}
case class B
}
class C3 extends C2 {
def c {}
case class C
println(/* line: 2 */a)
println(/* line: 7 */b)
println(/* line: 12 */c)
println(super./* line: 2 */a)
println(super./* line: 7 */b)
println(super./* resolved: false */c)
println(/* */A.getClass)
println(classOf[/* line: 3 */A])
println(/* */B.getClass)
println(classOf[/* line: 8 */B])
println(/* */C.getClass)
println(classOf[/* line: 13 */C])
println(super./* */A.getClass)
println(classOf[super./* line: 3 */A])
println(super./* */B.getClass)
println(classOf[super./* line: 8 */B])
println(super./* resolved: false */C.getClass)
println(classOf[super./* resolved: false */C])
} | LPTK/intellij-scala | testdata/resolve2/inheritance/super/multiple/Class.scala | Scala | apache-2.0 | 764 |
package org.bitcoins.script.crypto
import org.scalatest.{MustMatchers, FlatSpec}
/**
* Created by chris on 1/8/16.
*/
class CryptoOperationsFactoryTest extends FlatSpec with MustMatchers {
"CryptoOperationsFactory" must "match strings with crypto operations" in {
CryptoOperation.fromString("OP_CHECKSIG") must be (Some(OP_CHECKSIG))
CryptoOperation.fromString("OP_HASH160") must be (Some(OP_HASH160))
CryptoOperation.fromString("OP_SHA256") must be (Some(OP_SHA256))
CryptoOperation.fromString("RANDOM") must be (None)
}
}
| Christewart/scalacoin | src/test/scala/org/bitcoins/script/crypto/CryptoOperationsFactoryTest.scala | Scala | mit | 549 |
package com.tdl.study.scala
/**
* 特质
*/
object Func003 {
trait Ord {
def < (that: Any): Boolean
def <= (that: Any): Boolean = (this < that) || (this == that)
def > (that: Any): Boolean = !(this <= that)
def >= (that: Any): Boolean = !(this < that)
}
class Date(y: Int, m: Int, d: Int) extends Ord {
def year = y
def month = m
def day = d
override def toString: String = s"$year-$month-$day"
override def equals(that: Any): Boolean = {
that.isInstanceOf[Date] && {
val o = that.asInstanceOf[Date]
o.day == day && o.month == month && o.year == year
}
}
def <(that: Any): Boolean = {
if (!that.isInstanceOf[Date])
sys.error("cannot compare " + that + " and a Date")
val o = that.asInstanceOf[Date]
(year < o.year) || (year == o.year && (month < o.month || (month == o.month && (day < o.day))))
}
}
}
| taideli/study-parent | study-scala/src/test/scala/com/tdl/study/scala/Func003.scala | Scala | gpl-3.0 | 921 |
package rsce.entity
import java.util.concurrent.ConcurrentHashMap
import com.google.inject.Guice
import rsce.entity.traits.Networked
class World {
val networkedEntities = new ConcurrentHashMap[Int, Entity with Networked]
def getNetworkedEntity(id : Int) = networkedEntities.get(id)
def addNetworkedEntity(entity : Entity with Networked) = networkedEntities.put(entity.getChannelId, entity)
def removeNetworkedEntity(id : Int) = networkedEntities.remove(id)
} | Joe0/RSC-Emulator | src/main/scala/rsce/entity/World.scala | Scala | mit | 471 |
package skinny.controller
import org.scalatra.test.scalatest._
import org.scalatra.{ FutureSupport, AsyncResult }
import scala.concurrent.{ Await, Future, ExecutionContext }
import scala.concurrent.duration._
import scala.language.postfixOps
// on Scala 2.10.0 ScalaTest #equal matcher with inner case classes works but it fails on higher version
case class Sample(id: Long, firstName: String)
case class Person(name: Option[String] = None, parent: Person, children: Seq[Person] = Nil)
class JSONFeatureSpec extends ScalatraFlatSpec {
behavior of "JSONFeature"
object SampleController extends SkinnyServlet with FutureSupport {
implicit val executor = ExecutionContext.Implicits.global
get("/sync") {
responseAsJSON(Sample(1, "Alice"))
}
get("/async") {
val fSample = Future { Sample(1, "Alice") }
new AsyncResult() {
override val is: Future[_] = fSample.map(responseAsJSON(_))
}
}
def toJSONString1 = toJSONString(Sample(1, "Alice"))
def toJSONString2 = toJSONString(List(Sample(1, "Alice"), Sample(2, "Bob")))
def toJSONString3 = toPrettyJSONString(List(Sample(1, "Alice"), Sample(2, "Bob")))
def toJSONString4 = toJSONString(Sample(1, "Alice"), false)
def toJSONString5 = toJSONString(List(Sample(1, "Alice"), Sample(2, "Bob")), false)
def toJSONString6 = toPrettyJSONString(List(Sample(1, "Alice"), Sample(2, "Bob")), false)
val alice = Person(Some("Alice"), null)
val bob = Person(Some("Bob"), alice, Nil)
val chris = Person(Some("Chris"), alice, Seq(bob))
val dennis = Person(Some("Dennis"), alice, Seq(bob, chris))
def toJSONString7 = toJSONString(dennis)
def fromJSON1: Option[Sample] = fromJSONString[Sample]("""{"id":1,"first_name":"Alice"}""")
def fromJSON2: Option[List[Sample]] = fromJSONString[List[Sample]]("""[{"id":1,"first_name":"Alice"},{"id":2,"first_name":"Bob"}]""")
def fromJSON3: Option[Sample] = fromJSONString[Sample]("""{"id":1,"firstName":"Alice"}""")
def fromJSON4: Option[List[Sample]] = fromJSONString[List[Sample]]("""[{"id":1,"firstName":"Alice"},{"id":2,"firstName":"Bob"}]""")
def fromJSON5: Option[Person] = fromJSONString[Person](
"""{"name":"Dennis","parent":{"name":"Alice","parent":null,"children":[]},"children":[{"name":"Bob","parent":{"name":"Alice","parent":null,"children":[]},"children":[]},{"name":"Chris","parent":{"name":"Alice","parent":null,"children":[]},"children":[{"name":"Bob","parent":{"name":"Alice","parent":null,"children":[]},"children":[]}]}]}""")
}
object Sample2Controller extends SkinnyController {
override def useUnderscoreKeysForJSON = false
override def useJSONVulnerabilityProtection = true
def toJSONString1 = toJSONString(Sample(1, "Alice"))
def toJSONString2 = toJSONString(List(Sample(1, "Alice"), Sample(2, "Bob")))
}
it should "have toJSON" in {
SampleController.toJSONString1 should equal("""{"id":1,"first_name":"Alice"}""")
SampleController.toJSONString2 should equal("""[{"id":1,"first_name":"Alice"},{"id":2,"first_name":"Bob"}]""")
SampleController.toJSONString3 should equal(
"""[ {
| "id" : 1,
| "first_name" : "Alice"
|}, {
| "id" : 2,
| "first_name" : "Bob"
|} ]""".stripMargin)
SampleController.toJSONString4 should equal("""{"id":1,"firstName":"Alice"}""")
SampleController.toJSONString5 should equal("""[{"id":1,"firstName":"Alice"},{"id":2,"firstName":"Bob"}]""")
SampleController.toJSONString6 should equal(
"""[ {
| "id" : 1,
| "firstName" : "Alice"
|}, {
| "id" : 2,
| "firstName" : "Bob"
|} ]""".stripMargin)
SampleController.toJSONString7 should equal(
"""{"name":"Dennis","parent":{"name":"Alice","parent":null,"children":[]},"children":[{"name":"Bob","parent":{"name":"Alice","parent":null,"children":[]},"children":[]},{"name":"Chris","parent":{"name":"Alice","parent":null,"children":[]},"children":[{"name":"Bob","parent":{"name":"Alice","parent":null,"children":[]},"children":[]}]}]}""")
// Normal synced responseAsJson should work
addServlet(SampleController, "/*")
get("/sync") {
body should equal("""{"id":1,"first_name":"Alice"}""")
}
// Test the async version
implicit val ec = ExecutionContext.Implicits.global
val listOfFutureBodies = (1 to 5).map(_ => Future { get("/async") { body } })
val fListOfBodies = Future.sequence(listOfFutureBodies)
Await.result(fListOfBodies, atMost = Duration.Inf).foreach(_ should equal("""{"id":1,"first_name":"Alice"}"""))
}
it should "have fromJSON" in {
SampleController.fromJSON1.get should equal(Sample(1, "Alice"))
SampleController.fromJSON2.get should equal(List(Sample(1, "Alice"), Sample(2, "Bob")))
SampleController.fromJSON3.get should equal(Sample(1, "Alice"))
SampleController.fromJSON4.get should equal(List(Sample(1, "Alice"), Sample(2, "Bob")))
SampleController.fromJSON5.get should equal(SampleController.dennis)
}
it should "have toJSON for camelCase" in {
Sample2Controller.toJSONString1 should equal(
""")]}',
|{"id":1,"firstName":"Alice"}""".stripMargin)
Sample2Controller.toJSONString2 should equal(
""")]}',
|[{"id":1,"firstName":"Alice"},{"id":2,"firstName":"Bob"}]""".stripMargin)
}
}
| BlackPrincess/skinny-framework | framework/src/test/scala/skinny/controller/JSONFeatureSpec.scala | Scala | mit | 5,386 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.twitter.zipkin.builder.Scribe
import com.twitter.zipkin.cassandra
import com.twitter.zipkin.collector.builder.CollectorServiceBuilder
import com.twitter.zipkin.storage.Store
val keyspaceBuilder = cassandra.Keyspace.static(nodes = Set("dev-cassandra1.finntech.no"), port = 7613)
val cassandraBuilder = Store.Builder(
cassandra.StorageBuilder(keyspaceBuilder),
cassandra.IndexBuilder(keyspaceBuilder),
cassandra.AggregatesBuilder(keyspaceBuilder)
)
CollectorServiceBuilder(Scribe.Interface(categories = Set("zipkin")))
.writeTo(cassandraBuilder)
| eirslett/zipkin | zipkin-collector-service/config/collector-cassandra.scala | Scala | apache-2.0 | 1,165 |
/**
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* OpenAPI spec version:
*
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package io.swagger.client.model
case class CreateCombatParameters (
actorIds: List[String]
)
| CucumisSativus/rpgRollerBackend | functionalTest/src/main/scala/io/swagger/client/model/CreateCombatParameters.scala | Scala | mit | 418 |
package com.datawizards.sparklocal.rdd
import com.datawizards.sparklocal.SparkLocalBaseTest
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CountTest extends SparkLocalBaseTest {
test("Count result") {
assert(RDDAPI(Seq(1,2,3)).count() == 3)
}
test("Count equal") {
assertRDDOperationReturnsSameResult(Seq(1,2,3)) {
ds => ds.count()
}
}
}
| piotr-kalanski/spark-local | src/test/scala/com/datawizards/sparklocal/rdd/CountTest.scala | Scala | apache-2.0 | 430 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
/**
* A <code>Reporter</code> that prints test status information to
* the standard output stream.
*
* @author Bill Venners
*/
private[scalatest] class StandardOutReporter(presentAllDurations: Boolean,
presentInColor: Boolean, presentShortStackTraces: Boolean, presentFullStackTraces: Boolean)
extends PrintReporter(Console.out, presentAllDurations, presentInColor,
presentShortStackTraces, presentFullStackTraces) {
def this() = this(false, false, false, false)
/**
* Does nothing, because don't want to dispose the standard output stream.
*/
override def dispose() {
}
}
| yyuu/scalatest | src/main/scala/org/scalatest/tools/StandardOutReporter.scala | Scala | apache-2.0 | 1,260 |
/*
* Copyright © 2014 TU Berlin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package compiler.opt
import api._
import api.flink.FlinkNtv
import compiler.BaseCompilerSpec
import compiler.FlinkCompiler
import compiler.RuntimeCompiler
import compiler.ir.ComprehensionSyntax._
import compiler.ir.DSCFAnnotations._
import test.schema.Graphs._
import java.util.UUID
class FlinkSpecializeLoopsSpec extends BaseCompilerSpec with FlinkAware {
override val compiler = new RuntimeCompiler with FlinkCompiler
import compiler._
import u.reify
lazy val testPipeline: u.Expr[Any] => u.Tree =
pipeline(true)(
Core.lift,
FlinkSpecializeLoops.specializeLoops.timed
).compose(_.tree)
lazy val dscfPipeline: u.Expr[Any] => u.Tree =
pipeline(true)(
Core.lift
).compose(_.tree)
protected override def wrapInClass(tree: u.Tree): u.Tree = {
import u.Quasiquote
val Cls = api.TypeName(UUID.randomUUID().toString)
val run = api.TermName(RuntimeCompiler.default.runMethod)
val prs = api.Tree.closure(tree).map { sym =>
val x = sym.name
val T = sym.info
q"val $x: $T"
}
q"""
class $Cls {
def $run(..$prs)(implicit flink: ${FlinkAPI.ExecutionEnvironment}) = $tree
}
"""
}
type Seq3 = Edge[Int]
val edges = for (c <- 'a' to 'y') yield Edge(c.toInt, c.toInt + 1)
val N = 5
"desugared for-loop #1" in withDefaultFlinkEnv(implicit flink => {
val act = testPipeline(reify {
val N = this.N
val edges = this.edges
val paths = FlinkDataSet(edges)
val from = Predef.intWrapper(0)
val to = from.until(N)
val it = to.toIterator
val i$1 = null.asInstanceOf[Int]
@whileLoop def while$1(i: Int, paths: DataBag[Edge[Int]]): DataBag[Edge[Int]] = {
val hasNext = it.hasNext
@loopBody def body$1(): DataBag[Edge[Int]] = {
val next = it.next()
val delta = comprehension[Edge[Int], DataBag]({
val e1 = generator[Edge[Int], DataBag]({
paths
})
val e2 = generator[Edge[Int], DataBag]({
paths
})
guard({
val dst = e1.dst
val src = e2.src
val eqs = dst == src
eqs
})
head[Edge[Int]]({
val src = e1.src
val dst = e2.dst
val edg = Edge(src, dst)
edg
})
})
val union = paths union delta
val distinct = union.distinct
while$1(next, distinct)
}
@suffix def suffix$1(): DataBag[Edge[Int]] = {
paths
}
if (hasNext) body$1()
else suffix$1()
}
while$1(i$1, paths)
})
val exp = dscfPipeline(reify {
val N = this.N
val edges = this.edges
val paths = FlinkDataSet(edges)
val reslt = FlinkNtv.iterate(paths)(N, paths => {
val delta = comprehension[Edge[Int], DataBag]({
val e1 = generator[Edge[Int], DataBag]({
paths
})
val e2 = generator[Edge[Int], DataBag]({
paths
})
guard({
val dst = e1.dst
val src = e2.src
val eqs = dst == src
eqs
})
head[Edge[Int]]({
val src = e1.src
val dst = e2.dst
val edg = Edge(src, dst)
edg
})
})
val union = paths union delta
val distinct = union.distinct
distinct
})
reslt
})
act shouldBe alphaEqTo(exp)
})
"while loop #1" in withDefaultFlinkEnv(implicit flink => {
val act = testPipeline(reify {
val N = this.N
val edges = this.edges
val paths = FlinkDataSet(edges)
val i$1 = 0
@whileLoop def while$1(i: Int, paths: DataBag[Edge[Int]]): DataBag[Edge[Int]] = {
val hasNext = i < N
@loopBody def body$1(): DataBag[Edge[Int]] = {
val i$2 = i + 1
val delta = comprehension[Edge[Int], DataBag]({
val e1 = generator[Edge[Int], DataBag]({
paths
})
val e2 = generator[Edge[Int], DataBag]({
paths
})
guard({
val dst = e1.dst
val src = e2.src
val eqs = dst == src
eqs
})
head[Edge[Int]]({
val src = e1.src
val dst = e2.dst
val edg = Edge(src, dst)
edg
})
})
val union = paths union delta
val distinct = union.distinct
while$1(i$2, distinct)
}
@suffix def suffix$1(): DataBag[Edge[Int]] = {
paths
}
if (hasNext) body$1()
else suffix$1()
}
while$1(i$1, paths)
})
val exp = dscfPipeline(reify {
val N = this.N
val edges = this.edges
val paths = FlinkDataSet(edges)
val reslt = FlinkNtv.iterate(paths)(N, paths => {
val delta = comprehension[Edge[Int], DataBag]({
val e1 = generator[Edge[Int], DataBag]({
paths
})
val e2 = generator[Edge[Int], DataBag]({
paths
})
guard({
val dst = e1.dst
val src = e2.src
val eqs = dst == src
eqs
})
head[Edge[Int]]({
val src = e1.src
val dst = e2.dst
val edg = Edge(src, dst)
edg
})
})
val union = paths union delta
val distinct = union.distinct
distinct
})
reslt
})
act shouldBe alphaEqTo(exp)
})
"while loop #2" in withDefaultFlinkEnv(implicit flink => {
val act = testPipeline(reify {
val N = this.N
val edges = this.edges
val paths = FlinkDataSet(edges)
val i$1 = 0
@whileLoop def while$1(i: Int, paths: DataBag[Edge[Int]]): DataBag[Edge[Int]] = {
val hasNext = i < N
@loopBody def body$1(): DataBag[Edge[Int]] = {
val i$2 = i + 1
val delta = comprehension[Edge[Int], DataBag]({
val e1 = generator[Edge[Int], DataBag]({
paths
})
val e2 = generator[Edge[Int], DataBag]({
paths
})
guard({
val dst = e1.dst
val src = e2.src
val eqs = dst == src
eqs
})
head[Edge[Int]]({
val src = e1.src
val dst = e2.dst
val edg = Edge(src, dst)
edg
})
})
val grpKey = (e: Edge[Int]) => {
val src = e.src
src
}
val groups = delta.groupBy(grpKey)
val result = comprehension[Edge[Int], DataBag]({
val group = generator[Group[Int, DataBag[Edge[Int]]], DataBag]({
groups
})
head[Edge[Int]]({
val src = group.key
val xs1 = group.values
val rs1 = xs1.size
val dst = rs1.toInt
val edg = Edge(src, dst)
edg
})
})
while$1(i$2, result)
}
@suffix def suffix$1(): DataBag[Edge[Int]] = {
paths
}
if (hasNext) body$1()
else suffix$1()
}
while$1(i$1, paths)
})
val exp = dscfPipeline(reify {
val N = this.N
val edges = this.edges
val paths = FlinkDataSet(edges)
val reslt = FlinkNtv.iterate(paths)(N, paths => {
val delta = comprehension[Edge[Int], DataBag]({
val e1 = generator[Edge[Int], DataBag]({
paths
})
val e2 = generator[Edge[Int], DataBag]({
paths
})
guard({
val dst = e1.dst
val src = e2.src
val eqs = dst == src
eqs
})
head[Edge[Int]]({
val src = e1.src
val dst = e2.dst
val edg = Edge(src, dst)
edg
})
})
val grpKey = (e: Edge[Int]) => {
val src = e.src
src
}
val groups = delta.groupBy(grpKey)
val result = comprehension[Edge[Int], DataBag]({
val group = generator[Group[Int, DataBag[Edge[Int]]], DataBag]({
groups
})
head[Edge[Int]]({
val src = group.key
val xs1 = group.values
val rs1 = xs1.size
val dst = rs1.toInt
val edg = Edge(src, dst)
edg
})
})
result
})
reslt
})
act shouldBe alphaEqTo(exp)
})
}
| emmalanguage/emma | emma-flink/src/test/scala/org/emmalanguage/compiler/opt/FlinkSpecializeLoopsSpec.scala | Scala | apache-2.0 | 9,399 |
package im.tox.antox.utils
import java.util
import android.content.{BroadcastReceiver, Context, Intent}
import android.net.{NetworkInfo, ConnectivityManager}
import android.preference.PreferenceManager
import im.tox.antox.tox.ToxSingleton
import scala.collection.JavaConversions._
trait ConnectionTypeChangeListener {
//only called when network is connected
def connectionTypeChange(connectionType: Int)
}
object ConnectionManager {
private val listenerList = new util.ArrayList[ConnectionTypeChangeListener]()
private var lastConnectionType: Option[Int] = None
def addConnectionTypeChangeListener(listener: ConnectionTypeChangeListener): Unit = {
listenerList.add(listener)
}
def getConnctionType(context: Context): Int = {
val connectivityManager = context.getSystemService(Context.CONNECTIVITY_SERVICE)
.asInstanceOf[ConnectivityManager]
connectivityManager.getActiveNetworkInfo.getType
}
}
class ConnectionManager extends BroadcastReceiver {
override def onReceive(context: Context, intent: Intent) {
val connectivityManager = context.getSystemService(Context.CONNECTIVITY_SERVICE).asInstanceOf[ConnectivityManager]
val networkInfo = connectivityManager.getActiveNetworkInfo
if (networkInfo != null && networkInfo.isConnected) {
val connectionType = ConnectionManager.getConnctionType(context)
if (ConnectionManager.lastConnectionType.isEmpty || connectionType != ConnectionManager.lastConnectionType.get) {
for (listener <- ConnectionManager.listenerList) {
listener.connectionTypeChange(connectionType)
}
ConnectionManager.lastConnectionType = Some(connectionType)
}
if (ToxSingleton.dhtNodes.length == 0) {
ToxSingleton.updateDhtNodes(context)
}
}
}
}
| Ansa89/Antox | app/src/main/scala/im/tox/antox/utils/ConnectionManager.scala | Scala | gpl-3.0 | 1,828 |
package org.mitre.mandolin.mlp.spark
import org.mitre.mandolin.mlp._
import org.mitre.mandolin.optimize.spark.DistributedOnlineOptimizer
import org.mitre.mandolin.optimize.Updater
import org.apache.spark.SparkContext
import org.apache.spark.AccumulatorParam
object DistributedMMLPOptimizer {
def getDistributedOptimizer(sc: SparkContext, appSettings: MandolinMLPSettings, network: ANNetwork) = {
val weights = network.generateRandomWeights
val composeStrategy = appSettings.composeStrategy match {
case "maximum" => Maximum
case "average" => Average
case _ => Minimum
}
val l1ArrayVals = network.layers map {l => l.ltype.l1}
val l2ArrayVals = network.layers map {l => l.ltype.l2}
val mnArrayVals = network.layers map {l => l.ltype.maxNorm}
val l1Array = if (l1ArrayVals.max > 0.0) Some(l1ArrayVals.toArray) else None
val l2Array = if (l2ArrayVals.max > 0.0) Some(l2ArrayVals.toArray) else None
val maxNormArray = if (mnArrayVals.max > 0.0) Some(mnArrayVals.toArray) else None
def getAccumulator[U <: Updater[MMLPWeights, MMLPLossGradient, U]] = new AccumulatorParam[U] {
def zero(v: U) = {
v.resetLearningRates(0.0f)
v
}
def addInPlace(v1: U, v2: U) = v1 compose v2
}
appSettings.method match {
case "adagrad" =>
val sumSquared = network.generateZeroedLayout
sumSquared set appSettings.initialLearnRate // set to the initial learning rate
val updater = new MMLPAdaGradUpdater(sumSquared, appSettings.initialLearnRate, maxNormArray=maxNormArray, l1Array=l1Array, l2Array=l2Array,
compose = composeStrategy)
val evaluator = new MMLPInstanceEvaluator[MMLPAdaGradUpdater](network)
new DistributedOnlineOptimizer[MMLPFactor, MMLPWeights, MMLPLossGradient, MMLPAdaGradUpdater](sc, weights, evaluator, updater, appSettings)
case "adadelta" =>
val sumSquared = network.generateZeroedLayout
val prevUpdates = network.generateZeroedLayout
val up = new MMLPAdaDeltaUpdater(sumSquared, prevUpdates, appSettings.epsilon, appSettings.rho, compose = composeStrategy, maxNorm=appSettings.maxNorm)
val evaluator = new MMLPInstanceEvaluator[MMLPAdaDeltaUpdater](network)
new DistributedOnlineOptimizer[MMLPFactor, MMLPWeights, MMLPLossGradient, MMLPAdaDeltaUpdater](sc, weights, evaluator, up, appSettings)
case "rmsprop" =>
val sumSquared = network.generateZeroedLayout
sumSquared set appSettings.initialLearnRate // set to the initial learning rate
val updater = new MMLPRMSPropUpdater(sumSquared, appSettings.initialLearnRate, maxNormArray=maxNormArray, l1Array=l1Array, l2Array=l2Array,
compose = composeStrategy)
val evaluator = new MMLPInstanceEvaluator[MMLPRMSPropUpdater](network)
new DistributedOnlineOptimizer[MMLPFactor, MMLPWeights, MMLPLossGradient, MMLPRMSPropUpdater](sc, weights, evaluator, updater, appSettings)
case "nasgd" => // Nesterov accelerated
val momentum = network.generateZeroedLayout
val uu = new MMLPSgdUpdater(momentum, true, appSettings.initialLearnRate, maxNormArray=maxNormArray, l1Array=l1Array, l2Array=l2Array,
compose = composeStrategy)
val evaluator = new MMLPInstanceEvaluator[MMLPSgdUpdater](network)
new DistributedOnlineOptimizer[MMLPFactor, MMLPWeights, MMLPLossGradient, MMLPSgdUpdater](sc, weights, evaluator, uu, appSettings)
case "adam" =>
val mom1 = network.generateZeroedLayout
val mom2 = network.generateZeroedLayout
val uu = new MMLPAdamUpdater(0.001f, 0.9f, 0.999f, mom1, mom2, maxNormArray=maxNormArray, l1Array = l1Array, l2Array = l2Array,
composeSt = composeStrategy)
val evaluator = new MMLPInstanceEvaluator[MMLPAdamUpdater](network)
new DistributedOnlineOptimizer[MMLPFactor, MMLPWeights, MMLPLossGradient, MMLPAdamUpdater](sc, weights, evaluator, uu, appSettings)
case "sgd" =>
val up = new BasicMMLPSgdUpdater(appSettings.initialLearnRate)
val evaluator = new MMLPInstanceEvaluator[BasicMMLPSgdUpdater](network)
new DistributedOnlineOptimizer[MMLPFactor, MMLPWeights, MMLPLossGradient, BasicMMLPSgdUpdater](sc, weights, evaluator, up, appSettings)
case a => throw new RuntimeException("Unrecognized online training method: " + a)
}
}
} | project-mandolin/mandolin | mandolin-spark/src/main/scala/org/mitre/mandolin/mlp/spark/DistributedMMLPOptimizer.scala | Scala | apache-2.0 | 4,419 |
package org.retistruen.view
import org.joda.time.Seconds._
import org.joda.time.Minutes._
import org.retistruen.jmx.JMX
import org.retistruen._
import org.retistruen.instrument.reduce.Max
import akka.actor.ActorSystem
class MyModel(override val actorSystem: ActorSystem) extends Model("mymodel")(actorSystem) with JMX {
val s1 = source[Double]("s1")
s1 --> mean
s1 --> max --> rec
s1 --> max(50) --> rec
s1 --> mean(50) --> rec
s1 --> mean(200) --> rec
val min1 = s1 --> collect(seconds(15))
min1 --> reduce.max --> rec
min1 --> reduce.min --> rec
min1 --> reduce.mean --> rec
min1 --> reduce.stddev --> rec
min1 --> reduce("parity", { seq: Seq[Double] ⇒
if (seq.isEmpty) None
else Some(seq.map(d ⇒ 1 - (d % 2)).sum)
}) // cálculo de "paridad" del bloque
s1 --> collect(minutes(5)) --> reduce.mean
val s2 = source[Double]("s2")
s2 --> rec
s2 --> max(10) --> rec
s2 --> mean --> rec
val s3 = osource[Double]("s3")
s3 --> collect(seconds(10)) --> reduce.mode --> rec(1000)
val s3c = s3 --> collect(seconds(15))
s3c --> reduce.open
s3c --> reduce.close
s3c --> reduce.max
s3c --> reduce.min
s3c --> reduce.median
s3c --> reduce.mean
s3c --> reduce.mode
s3c --> reduce.percentile(90)
s3c --> reduce.percentile(10)
s3c --> reduce.range
}
object ShowMyModel {
def main(args: Array[String]): Unit = {
val as = ActorSystem()
val model = new MyModel(as)
model.registerMBeans
new ModelViewer(model).show
model.s1 << 0
model.s2 << 0
(1 to 1000000) foreach { v ⇒
model.s1 << math.random * (math.random * v)
Thread.sleep((math.random * 5).toInt)
model.s3 << (math.random * 100).toInt
}
Thread.sleep(10000)
as.shutdown
}
}
| plalloni/retistruen | src/test/scala/org/retistruen/view/ShowModel.scala | Scala | mit | 1,775 |
package com.plasmaconduit.framework
import com.plasmaconduit.edge.http._
import io.netty.buffer.{Unpooled, ByteBuf}
import io.netty.handler.codec.http.Cookie
trait HttpRequest[R <: HttpRequest[R]] { self =>
val method: HttpMethod
val version: HttpVersion
val uri: String
val path: String
val queryStringParameters: Map[String, String]
val headers: HttpHeaders
val cookies: Option[Set[Cookie]]
val body: ByteBuf
val request: Option[HttpServerRequest]
val pathVars: Map[String, String]
val flash: HttpSession
val session: HttpSession
val cookieMap: Map[String, Set[Cookie]]
def withPathVars(vars: Map[String, String]): R
def withFlash(newFlash: HttpSession): R
def withSession(newSession: HttpSession): R
}
final case class SimpleHttpRequest(method: HttpMethod = HttpGet,
version: HttpVersion = HttpVersion11,
uri: String = "",
path: String = "/",
queryStringParameters: Map[String, String] = Map(),
headers: HttpHeaders = MapHttpHeadersWrapper(Map()),
cookies: Option[Set[Cookie]] = None,
body: ByteBuf = Unpooled.wrappedBuffer("".getBytes("UTF-8")),
request: Option[HttpServerRequest] = None,
pathVars: Map[String, String] = Map(),
flash: HttpSession = HttpSession(),
session: HttpSession = HttpSession()) extends HttpRequest[SimpleHttpRequest]
{
lazy val cookieMap: Map[String, Set[Cookie]] = cookies match {
case None => Map()
case Some(c) => c.foldLeft(Map[String, Set[Cookie]]()) { (map, cookie) =>
map.get(cookie.getName) match {
case Some(existing) => map + (cookie.getName -> (existing + cookie))
case None => map + (cookie.getName -> Set(cookie))
}
}
}
def withPathVars(vars: Map[String, String]): SimpleHttpRequest = {
copy(pathVars = vars)
}
def withFlash(newFlash: HttpSession): SimpleHttpRequest = {
copy(flash = newFlash)
}
def withSession(newSession: HttpSession): SimpleHttpRequest = {
copy(session = newSession)
}
}
object SimpleHttpRequest {
def fromHttpServerRequest(request: HttpServerRequest): SimpleHttpRequest = SimpleHttpRequest(
method = request.method,
version = request.version,
uri = request.uri,
path = request.path,
queryStringParameters = request.queryParams,
headers = request.headers,
cookies = request.cookies,
body = request.body,
request = Some(request)
)
} | plasmaconduit/plasmaconduit-framework | src/main/scala/com/plasmaconduit/framework/HttpRequest.scala | Scala | mit | 2,881 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.gridfs.helpers
import java.nio.channels.{AsynchronousByteChannel, AsynchronousFileChannel}
import com.mongodb.async.client.gridfs.helpers.{AsynchronousChannelHelper => JAsynchronousChannelHelper}
import org.mongodb.scala.gridfs.{AsyncInputStream, AsyncOutputStream}
/**
* A helper class to convert to AsynchronousByteChannel or AsynchronousFileChannel instances into
* [[org.mongodb.scala.gridfs.AsyncInputStream]] or [[org.mongodb.scala.gridfs.AsyncOutputStream]] instances.
*
* @note Requires Java 7 or greater.
* @since 1.2
*/
@deprecated("Use `Observable[ByteBuffer]` instead", "2.8.0")
object AsynchronousChannelHelper {
/**
* Converts a AsynchronousByteChannel into a AsyncInputStream
*
* @param asynchronousByteChannel the AsynchronousByteChannel
* @return the AsyncInputStream
*/
def channelToInputStream(asynchronousByteChannel: AsynchronousByteChannel): AsyncInputStream =
JAsynchronousChannelHelper.channelToInputStream(asynchronousByteChannel)
/**
* Converts a AsynchronousFileChannel into a AsyncInputStream
*
* @param asynchronousFileChannel the AsynchronousFileChannel
* @return the AsyncInputStream
*/
def channelToInputStream(asynchronousFileChannel: AsynchronousFileChannel): AsyncInputStream =
JAsynchronousChannelHelper.channelToInputStream(asynchronousFileChannel)
/**
* Converts a AsynchronousByteChannel into a AsyncOutputStream
*
* @param asynchronousByteChannel the AsynchronousByteChannel
* @return the AsyncOutputStream
*/
def channelToOutputStream(asynchronousByteChannel: AsynchronousByteChannel): AsyncOutputStream =
JAsynchronousChannelHelper.channelToOutputStream(asynchronousByteChannel)
/**
* Converts a AsynchronousFileChannel into a AsyncOutputStream
*
* @param asynchronousFileChannel the AsynchronousFileChannel
* @return the AsyncOutputStream
*/
def channelToOutputStream(asynchronousFileChannel: AsynchronousFileChannel): AsyncOutputStream =
JAsynchronousChannelHelper.channelToOutputStream(asynchronousFileChannel)
}
| rozza/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/gridfs/helpers/AsynchronousChannelHelper.scala | Scala | apache-2.0 | 2,694 |
package org.psliwa.idea.composerJson.intellij.codeAssist
import javax.swing.Icon
import com.intellij.codeInsight.completion.{InsertHandler, InsertionContext}
import com.intellij.codeInsight.lookup.{LookupElement, LookupElementPresentation, LookupValueWithPriority}
import com.intellij.psi.PsiElement
final private[codeAssist] class BaseLookupElement(
val name: String,
val icon: Option[Icon] = None,
val quoted: Boolean = true,
val insertHandler: Option[InsertHandler[LookupElement]] = None,
val psiElement: Option[PsiElement] = None,
val description: String = "",
val priority: Option[Int] = None
) extends LookupElement {
private val presentation = new LookupElementPresentation
presentation.setIcon(icon.orNull)
presentation.setItemText(name)
presentation.setTypeGrayed(true)
presentation.setTypeText(if (description == "") null else description)
presentation.setStrikeout(description.startsWith("DEPRECATED"))
override def getLookupString: String = name
override def renderElement(presentation: LookupElementPresentation): Unit = presentation.copyFrom(this.presentation)
override def handleInsert(context: InsertionContext): Unit = insertHandler.foreach(_.handleInsert(context, this))
def withInsertHandler(insertHandler: InsertHandler[LookupElement]): BaseLookupElement = {
new BaseLookupElement(name, icon, quoted, Some(insertHandler), psiElement, description, priority)
}
def withPsiElement(psiElement: PsiElement): BaseLookupElement = {
new BaseLookupElement(name, icon, quoted, insertHandler, Some(psiElement), description, priority)
}
override def getObject: AnyRef = psiElement.getOrElse(this)
override def equals(other: Any): Boolean = other match {
case that: BaseLookupElement =>
name == that.name &&
icon == that.icon &&
quoted == that.quoted &&
insertHandler == that.insertHandler &&
psiElement == that.psiElement &&
description == that.description
case _ => false
}
override def hashCode(): Int = {
val state = Seq(name, icon, quoted, insertHandler, psiElement, description)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
}
| psliwa/idea-composer-plugin | src/main/scala/org/psliwa/idea/composerJson/intellij/codeAssist/BaseLookupElement.scala | Scala | mit | 2,198 |
package gsd.linux.cnf
import org.sat4j.tools.xplain.Xplain
import org.sat4j.minisat.SolverFactory
import collection.JavaConversions
import org.sat4j.specs.{IConstr, ISolver}
import collection.mutable.HashMap
trait XplainSupport extends SATSolver[Xplain[ISolver]] {
this: ConstraintMap =>
import JavaConversions._
val xplain = new Xplain[ISolver](super.newSolver)
abstract override def newSolver = xplain
/**
* Returns a list of clauses that causes an unsatisfiable result.
* Can only be called after the solver returns !isSatisfiable.
*/
def explain: List[Clause] =
xplain.explain.toList map constraints.apply
} | leutheus/linux-variability-analysis-tools.fm-translation | src/main/scala/gsd/linux/cnf/XplainSupport.scala | Scala | lgpl-3.0 | 645 |
package playground.spray
import akka.actor.{ActorSystem, Actor, Props}
import akka.pattern.ask
import akka.util.Timeout
import spray.routing.SimpleRoutingApp
import scala.concurrent.duration._
import scala.util._
object PersonRepository {
case class Person(name: String, age: Int)
/* ==== Messages ==== */
case class SavePerson(name: String, age: Int)
case class GetPerson(name: String)
case class DeletePerson(name: String, age: Int)
case object Done
}
class PersonRepository extends Actor {
import PersonRepository._
var persons = List.empty[Person]
def receive = {
case SavePerson(name: String, age: Int) => {
persons :+= Person(name, age)
sender ! Done
}
case GetPerson(name: String) => {
sender ! persons.filter( _.name == name ).toString
}
case DeletePerson(name: String, age: Int) => {
persons = persons filter { _ != Person(name, age) }
sender ! Done
}
}
}
/**
* Simple REST API using Spray.
*/
object REST extends App with SimpleRoutingApp {
import PersonRepository._
implicit val system = ActorSystem("rest-system")
implicit val timeout = Timeout(5.seconds)
implicit val ex = system.dispatcher
val personRepo = system.actorOf(Props[PersonRepository])
startServer(interface = "localhost", port = 8998) {
path("person") {
put {
parameter('name, 'age.as[Int]) { (name, age) =>
onComplete(personRepo ? SavePerson(name, age)) {
case Success(_) => complete { "Done" }
case Failure(e) => complete { e }
}
}
} ~
get {
parameter('name) { name =>
onComplete(personRepo ? GetPerson(name)) {
case Success(p) => complete { p.toString }
case Failure(e) => complete { e }
}
}
} ~
delete {
parameter('name, 'age.as[Int]) { (name, age) =>
onComplete(personRepo ? DeletePerson(name, age)) {
case Success(_) => complete { "Done" }
case Failure(e) => complete { e }
}
}
}
}
}
}
// vim: set ts=2 sw=2 et:
| ataraxer/spray-playground | src/main/scala/REST.scala | Scala | mit | 2,128 |
/*******************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package hydrograph.engine.spark.components.adapter
import hydrograph.engine.core.component.generator.S3FileTransferEntityGenerator
import hydrograph.engine.jaxb.commontypes.TypeBaseComponent
import hydrograph.engine.spark.components.AWSS3FileTransferComponent
import hydrograph.engine.spark.components.adapter.base.RunProgramAdapterBase
import hydrograph.engine.spark.components.base.CommandComponentSparkFlow
import hydrograph.engine.spark.components.platform.BaseComponentParams
/**
* Created for S3FileTransferAdapter on 9/7/2017.
*/
class S3FileTransferAdapter(typeBaseComponent: TypeBaseComponent) extends RunProgramAdapterBase{
private var s3FileTransferEntityGenerator:S3FileTransferEntityGenerator=null;
private var awsS3FileTransferComponent:AWSS3FileTransferComponent=null;
override def createGenerator(): Unit = {
s3FileTransferEntityGenerator = new S3FileTransferEntityGenerator(typeBaseComponent)
}
override def createComponent(baseComponentParams: BaseComponentParams): Unit = {
awsS3FileTransferComponent=new AWSS3FileTransferComponent(s3FileTransferEntityGenerator.getEntity);
}
override def getComponent(): CommandComponentSparkFlow = awsS3FileTransferComponent
}
| capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/components/adapter/S3FileTransferAdapter.scala | Scala | apache-2.0 | 1,995 |
package bifrost
import akka.actor.ActorRef
import bifrost.blocks.BifrostBlock
import bifrost.forging.{Forger, ForgingSettings}
import bifrost.scorexMod.GenericNodeViewHolder
import bifrost.LocalInterface
import bifrost.transaction.bifrostTransaction.BifrostTransaction
import bifrost.transaction.box.proposition.ProofOfKnowledgeProposition
import bifrost.transaction.state.PrivateKey25519
class BifrostLocalInterface(override val viewHolderRef: ActorRef, forgerRef: ActorRef, forgingSettings: ForgingSettings)
extends LocalInterface[ProofOfKnowledgeProposition[PrivateKey25519], BifrostTransaction, BifrostBlock] {
import LocalInterface._
type P = ProofOfKnowledgeProposition[PrivateKey25519]
type TX = BifrostTransaction
type PMOD = BifrostBlock
override def preStart(): Unit = {
val events = Seq(
GenericNodeViewHolder.EventType.StartingPersistentModifierApplication,
GenericNodeViewHolder.EventType.FailedTransaction,
GenericNodeViewHolder.EventType.FailedPersistentModifier,
GenericNodeViewHolder.EventType.SuccessfulTransaction,
GenericNodeViewHolder.EventType.SuccessfulPersistentModifier
)
viewHolderRef ! GenericNodeViewHolder.Subscribe(events)
}
private def viewHolderEvents: Receive = {
case stm: GenericNodeViewHolder.StartingPersistentModifierApplication[P, TX, PMOD] =>
onStartingPersistentModifierApplication(stm.modifier)
case ft: GenericNodeViewHolder.FailedTransaction[P, TX] =>
onFailedTransaction(ft.transaction)
case fm: GenericNodeViewHolder.FailedModification[P, TX, PMOD] =>
onFailedModification(fm.modifier)
case st: GenericNodeViewHolder.SuccessfulTransaction[P, TX] =>
onSuccessfulTransaction(st.transaction)
case sm: GenericNodeViewHolder.SuccessfulModification[P, TX, PMOD] =>
onSuccessfulModification(sm.modifier)
}
override protected def onStartingPersistentModifierApplication(pmod: BifrostBlock): Unit = {}
override protected def onFailedTransaction(tx: BifrostTransaction): Unit = {}
override protected def onFailedModification(mod: BifrostBlock): Unit = {}
override protected def onSuccessfulTransaction(tx: BifrostTransaction): Unit = {}
override protected def onSuccessfulModification(mod: BifrostBlock): Unit = {}
override protected def onNoBetterNeighbour(): Unit = forgerRef ! Forger.StartForging
override protected def onBetterNeighbourAppeared(): Unit = forgerRef ! Forger.StopForging
override def receive: Receive = viewHolderEvents orElse {
case NoBetterNeighbour => onNoBetterNeighbour()
case BetterNeighbourAppeared => onBetterNeighbourAppeared()
case lt: LocallyGeneratedTransaction[P, TX] => viewHolderRef ! lt
case lm: LocallyGeneratedModifier[P, TX, PMOD] => viewHolderRef ! lm
case a: Any => log.error("Strange input: " + a)
}
} | Topl/Project-Bifrost | src/main/scala/bifrost/BifrostLocalInterface.scala | Scala | mpl-2.0 | 2,844 |
package filodb.memory.format.vectors
import java.nio.ByteBuffer
import debox.Buffer
import spire.syntax.cfor._
import filodb.memory.{BinaryRegion, MemFactory}
import filodb.memory.format._
import filodb.memory.format.BinaryVector.BinaryVectorPtr
import filodb.memory.format.Encodings._
import filodb.memory.format.MemoryReader._
object DoubleVector {
/**
* Creates a new MaskedDoubleAppendingVector, allocating a byte array of the right size for the max #
* of elements plus a bit mask.
* @param maxElements initial maximum number of elements this vector will hold. Will automatically grow.
*/
def appendingVector(memFactory: MemFactory, maxElements: Int): BinaryAppendableVector[Double] = {
val bytesRequired = 12 + BitmapMask.numBytesRequired(maxElements) + 8 + 8 * maxElements
val addr = memFactory.allocateOffheap(bytesRequired)
val dispose = () => memFactory.freeMemory(addr)
GrowableVector(memFactory, new MaskedDoubleAppendingVector(addr, bytesRequired, maxElements, dispose))
}
/**
* Creates a DoubleAppendingVector - does not grow and does not have bit mask. All values are marked
* as available.
* @param maxElements the max number of elements the vector will hold. Not expandable
* @param detectDrops if true, then use a DoubleCounterAppender instead to detect drops
*/
def appendingVectorNoNA(memFactory: MemFactory, maxElements: Int, detectDrops: Boolean = false):
BinaryAppendableVector[Double] = {
val bytesRequired = 8 + 8 * maxElements
val addr = memFactory.allocateOffheap(bytesRequired)
val dispose = () => memFactory.freeMemory(addr)
if (detectDrops) new DoubleCounterAppender(addr, bytesRequired, dispose)
else new DoubleAppendingVector(addr, bytesRequired, dispose)
}
/**
* Quickly create a DoubleVector from a sequence of Doubles which can be optimized.
*/
def apply(memFactory: MemFactory, data: Seq[Double]): BinaryAppendableVector[Double] = {
val vect = appendingVectorNoNA(memFactory, data.length)
data.foreach(vect.addData)
vect
}
def apply(buffer: ByteBuffer): DoubleVectorDataReader = {
require(buffer.isDirect)
apply(MemoryReader.fromByteBuffer(buffer), 0)
}
import WireFormat._
/**
* Parses the type of vector from the WireFormat word at address+4 and returns the appropriate
* DoubleVectorDataReader object for parsing it
*/
def apply(acc: MemoryReader, vector: BinaryVectorPtr): DoubleVectorDataReader = {
val reader = BinaryVector.vectorType(acc, vector) match {
case x if x == WireFormat(VECTORTYPE_DELTA2, SUBTYPE_INT_NOMASK) => DoubleLongWrapDataReader
case x if x == WireFormat(VECTORTYPE_DELTA2, SUBTYPE_REPEATED) => DoubleLongWrapDataReader
case x if x == WireFormat(VECTORTYPE_BINSIMPLE, SUBTYPE_PRIMITIVE) => MaskedDoubleDataReader
case x if x == WireFormat(VECTORTYPE_BINSIMPLE, SUBTYPE_PRIMITIVE_NOMASK) => DoubleVectorDataReader64
}
if (PrimitiveVectorReader.dropped(acc, vector)) new CorrectingDoubleVectorReader(reader, acc, vector)
else reader
}
/**
* For now, since most Prometheus double data is in fact integral, we take a really simple approach:
* 1. First if all doubles are integral, use DeltaDeltaVector to encode
* 2. If not, don't compress
*
* In future, try some schemes that work for doubles:
* - XOR of initial value, bitshift, store using less bits (similar to Gorilla but not differential)
* - Delta2/slope double diff first, shift doubles by offset, then use XOR technique
* (XOR works better when numbers are same sign plus smaller exponent range)
* - slightly lossy version of any of above
* - http://vis.cs.ucdavis.edu/vis2014papers/TVCG/papers/2674_20tvcg12-lindstrom-2346458.pdf
* - (slightly lossy) normalize exponents and convert to fixed point, then compress using int/long techniques
*/
def optimize(memFactory: MemFactory, vector: OptimizingPrimitiveAppender[Double]): BinaryVectorPtr = {
val longWrapper = new LongDoubleWrapper(vector)
if (longWrapper.allIntegrals) {
DeltaDeltaVector.fromLongVector(memFactory, longWrapper)
.getOrElse {
if (vector.noNAs) vector.dataVect(memFactory) else vector.getVect(memFactory)
}
} else {
if (vector.noNAs) vector.dataVect(memFactory) else vector.getVect(memFactory)
}
}
}
final case class DoubleCorrection(lastValue: Double, correction: Double = 0.0) extends CorrectionMeta
/**
* An iterator optimized for speed and type-specific to avoid boxing.
* It has no hasNext() method - because it is guaranteed to visit every element, and this way
* you can avoid another method call for performance.
*/
trait DoubleIterator extends TypedIterator {
def next: Double
}
/**
* A VectorDataReader object that supports fast extraction of Double data BinaryVectors
* +0000 4-byte length word
* +0004 2-byte WireFormat
* +0006 2-byte nbits / signed / bitshift
* +0008 start of packed Double data
*/
trait DoubleVectorDataReader extends CounterVectorReader {
/**
* Retrieves the element at position/row n, where n=0 is the first element of the vector.
*/
def apply(acc: MemoryReader, vector: BinaryVectorPtr, n: Int): Double
// This length method works assuming nbits is divisible into 32
def length(acc: MemoryReader, vector: BinaryVectorPtr): Int =
(numBytes(acc, vector) - PrimitiveVector.HeaderLen) / 8
/**
* Returns a DoubleIterator to efficiently go through the elements of the vector. The user is responsible for
* knowing how many elements to process. There is no hasNext.
* All elements are iterated through, even those designated as "not available".
* Costs an allocation for the iterator but allows potential performance gains too.
* @param vector the BinaryVectorPtr native address of the BinaryVector
* @param startElement the starting element # in the vector, by default 0 (the first one)
*/
def iterate(acc: MemoryReader, vector: BinaryVectorPtr, startElement: Int = 0): DoubleIterator
def debugString(acc: MemoryReader, vector: BinaryVectorPtr, sep: String = ","): String = {
val it = iterate(acc, vector)
val size = length(acc, vector)
(0 to size).map(_ => it.next).mkString(sep)
}
/**
* Sums up the Double values in the vector from position start to position end.
* @param vector the BinaryVectorPtr native address of the BinaryVector
* @param start the starting element # in the vector to sum, 0 == first element
* @param end the ending element # in the vector to sum, inclusive
* @param ignoreNaN if true, ignore samples which have NaN value (sometimes used for special purposes)
*/
def sum(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int, ignoreNaN: Boolean = true): Double
/**
* Counts the values excluding NaN / not available bits
*/
def count(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int): Int
def changes(acc: MemoryReader, vector: BinaryVectorPtr,
start: Int, end: Int, prev: Double, ignorePrev: Boolean = false): (Double, Double)
/**
* Converts the BinaryVector to an unboxed Buffer.
* Only returns elements that are "available".
*/
// NOTE: I know this code is repeated but I don't want to have to debug specialization/unboxing/traits right now
def toBuffer(acc: MemoryReader, vector: BinaryVectorPtr, startElement: Int = 0): Buffer[Double] = {
val newBuf = Buffer.empty[Double]
val dataIt = iterate(acc, vector, startElement)
val availIt = iterateAvailable(acc, vector, startElement)
val len = length(acc, vector)
cforRange { startElement until len } { n =>
val item = dataIt.next
if (availIt.next) newBuf += item
}
newBuf
}
def detectDropAndCorrection(acc: MemoryReader,
vector: BinaryVectorPtr,
meta: CorrectionMeta): CorrectionMeta = meta match {
case NoCorrection => meta // No last value, cannot compare. Just pass it on.
case DoubleCorrection(lastValue, correction) =>
val firstValue = apply(acc, vector, 0)
// Last value is the new delta correction
if (firstValue < lastValue) DoubleCorrection(lastValue, correction + lastValue)
else meta
}
// Default implementation for vectors with no correction
def updateCorrection(acc: MemoryReader, vector: BinaryVectorPtr, meta: CorrectionMeta): CorrectionMeta =
meta match {
// Return the last value and simply pass on the previous correction value
case DoubleCorrection(_, corr) => DoubleCorrection(apply(acc, vector, length(acc, vector) - 1), corr)
case NoCorrection => DoubleCorrection(apply(acc, vector, length(acc, vector) - 1), 0.0)
}
/**
* Retrieves the element at position/row n, with counter correction, taking into account a previous
* correction factor. Calling this method with increasing n should result in nondecreasing
* values starting no lower than the initial correction factor in correctionMeta.
* NOTE: this is a default implementation for vectors having no correction
*/
def correctedValue(acc: MemoryReader, vector: BinaryVectorPtr, n: Int, meta: CorrectionMeta): Double = meta match {
// Since this is a vector that needs no correction, simply add the correction amount to the original value
case DoubleCorrection(_, corr) => apply(acc, vector, n) + corr
case NoCorrection => apply(acc, vector, n)
}
// Default implementation with no drops detected
def dropPositions(acc2: MemoryReader, vector: BinaryVectorPtr): debox.Buffer[Int] = debox.Buffer.empty[Int]
}
/**
* VectorDataReader for a Double BinaryVector using full 64-bits for a Double value
* Right now this is for non-corrected ingested vectors.
*/
object DoubleVectorDataReader64 extends DoubleVectorDataReader {
import PrimitiveVector.OffsetData
class Double64Iterator(acc: MemoryReader, var addr: BinaryRegion.NativePointer) extends DoubleIterator {
final def next: Double = {
val data = acc.getDouble(addr)
addr += 8
data
}
}
final def apply(acc: MemoryReader, vector: BinaryVectorPtr, n: Int): Double =
acc.getDouble(vector + OffsetData + n * 8)
def iterate(acc: MemoryReader, vector: BinaryVectorPtr, startElement: Int = 0): DoubleIterator =
new Double64Iterator(acc, vector + OffsetData + startElement * 8)
// end is inclusive
final def sum(acc: MemoryReader, vector: BinaryVectorPtr,
start: Int, end: Int, ignoreNaN: Boolean = true): Double = {
require(start >= 0 && end < length(acc, vector), s"($start, $end) is out of bounds, " +
s"length=${length(acc, vector)}")
var addr = vector + OffsetData + start * 8
val untilAddr = vector + OffsetData + end * 8 + 8 // one past the end
var sum: Double = Double.NaN
if (ignoreNaN) {
while (addr < untilAddr) {
val nextDbl = acc.getDouble(addr)
// There are many possible values of NaN. Use a function to ignore them reliably.
if (!java.lang.Double.isNaN(nextDbl)) {
if (sum.isNaN) sum = 0d
sum += nextDbl
}
addr += 8
}
} else {
sum = 0d
while (addr < untilAddr) {
sum += acc.getDouble(addr)
addr += 8
}
}
sum
}
final def count(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int): Int = {
require(start >= 0 && end < length(acc, vector), s"($start, $end) is out of bounds, " +
s"length=${length(acc, vector)}")
var addr = vector + OffsetData + start * 8
val untilAddr = vector + OffsetData + end * 8 + 8 // one past the end
var count = 0
while (addr < untilAddr) {
val nextDbl = acc.getDouble(addr)
// There are many possible values of NaN. Use a function to ignore them reliably.
if (!java.lang.Double.isNaN(nextDbl)) count += 1
addr += 8
}
count
}
final def changes(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int,
prev: Double, ignorePrev: Boolean = false):
(Double, Double) = {
require(start >= 0 && end < length(acc, vector), s"($start, $end) is out of bounds, " +
s"length=${length(acc, vector)}")
var addr = vector + OffsetData + start * 8
val untilAddr = vector + OffsetData + end * 8 + 8 // one past the end
var changes = 0d
var prevVector : Double = prev
while (addr < untilAddr) {
val nextDbl = acc.getDouble(addr)
// There are many possible values of NaN. Use a function to ignore them reliably.
if (!java.lang.Double.isNaN(nextDbl) && prevVector != nextDbl && !java.lang.Double.isNaN(prevVector)) {
changes += 1
}
addr += 8
prevVector = nextDbl
}
(changes, prevVector)
}
}
// Corrects and caches ONE underlying chunk.
// The algorithm is naive - just go through and correct all values. Total correction for whole vector is passed.
// Works fine for randomly accessible vectors.
class CorrectingDoubleVectorReader(inner: DoubleVectorDataReader, acc: MemoryReader, vect: BinaryVectorPtr)
extends DoubleVectorDataReader {
override def length(acc2: MemoryReader, vector: BinaryVectorPtr): Int = inner.length(acc2, vector)
def apply(acc2: MemoryReader, vector: BinaryVectorPtr, n: Int): Double = inner(acc2, vector, n)
def iterate(acc2: MemoryReader, vector: BinaryVectorPtr, startElement: Int = 0): DoubleIterator =
inner.iterate(acc2, vector, startElement)
def sum(acc2: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int, ignoreNaN: Boolean = true): Double =
inner.sum(acc2, vector, start, end, ignoreNaN)
def count(acc2: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int): Int =
inner.count(acc2, vector, start, end)
def changes(acc2: MemoryReader, vector: BinaryVectorPtr,
start: Int, end: Int, prev: Double, ignorePrev: Boolean = false): (Double, Double) =
inner.changes(acc2, vector, start, end, prev)
private var _correction = 0.0
private val _drops = debox.Buffer.empty[Int] // to track counter drop positions
// Lazily correct - not all queries want corrected data
lazy val corrected = {
// if asked, lazily create corrected values and resets list
val _corrected = new Array[Double](length(acc, vect))
val it = iterate(acc, vect, 0)
var last = Double.MinValue
cforRange { 0 until length(acc, vect) } { pos =>
val nextVal = it.next
if (nextVal < last) { // reset!
_correction += last
_drops += pos
}
_corrected(pos) = nextVal + _correction
last = nextVal
}
_corrected
}
override def dropPositions(acc2: MemoryReader, vector: BinaryVectorPtr): debox.Buffer[Int] = {
assert(vector == vect && acc == acc2)
corrected // access it since it is lazy
_drops
}
override def correctedValue(acc2: MemoryReader, vector: BinaryVectorPtr,
n: Int, correctionMeta: CorrectionMeta): Double = {
assert(vector == vect && acc == acc2)
correctionMeta match {
// corrected value + any carryover correction
case DoubleCorrection(_, corr) => corrected(n) + corr
case NoCorrection => corrected(n)
}
}
override def updateCorrection(acc2: MemoryReader, vector: BinaryVectorPtr, meta: CorrectionMeta): CorrectionMeta = {
assert(vector == vect && acc == acc2)
val lastValue = apply(acc2, vector, length(acc2, vector) - 1)
// Return the last (original) value and all corrections onward
meta match {
case DoubleCorrection(_, corr) => DoubleCorrection(lastValue, corr + _correction)
case NoCorrection => DoubleCorrection(lastValue, _correction)
}
}
}
/**
* VectorDataReader for a masked (NA bit) Double BinaryVector, uses underlying DataReader for subvector
*/
object MaskedDoubleDataReader extends DoubleVectorDataReader with BitmapMaskVector {
final def apply(acc: MemoryReader, vector: BinaryVectorPtr, n: Int): Double = {
val subvect = subvectAddr(acc, vector)
DoubleVector(acc, subvect).apply(acc, subvect, n)
}
override def length(acc: MemoryReader, vector: BinaryVectorPtr): Int = super.length(acc, subvectAddr(acc, vector))
final def sum(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int, ignoreNaN: Boolean = true): Double =
DoubleVector(acc, subvectAddr(acc, vector)).sum(acc, subvectAddr(acc, vector), start, end, ignoreNaN)
final def count(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int): Int =
DoubleVector(acc, subvectAddr(acc, vector)).count(acc, subvectAddr(acc, vector), start, end)
override def iterate(acc: MemoryReader, vector: BinaryVectorPtr, startElement: Int = 0): DoubleIterator =
DoubleVector(acc, subvectAddr(acc, vector)).iterate(acc, subvectAddr(acc, vector), startElement)
override def changes(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int,
prev: Double, ignorePrev: Boolean = false): (Double, Double) =
DoubleVector(acc, subvectAddr(acc, vector)).changes(acc, subvectAddr(acc, vector), start, end, prev)
}
class DoubleAppendingVector(addr: BinaryRegion.NativePointer, maxBytes: Int, val dispose: () => Unit)
extends PrimitiveAppendableVector[Double](addr, maxBytes, 64, true) {
final def addNA(): AddResponse = addData(0.0)
def addData(data: Double): AddResponse = checkOffset() match {
case Ack =>
UnsafeUtils.setDouble(writeOffset, data)
incWriteOffset(8)
Ack
case other: AddResponse => other
}
final def addFromReaderNoNA(reader: RowReader, col: Int): AddResponse = addData(reader.getDouble(col))
final def minMax: (Double, Double) = {
var min = Double.MaxValue
var max = Double.MinValue
cforRange { 0 until length } { index =>
val data = apply(index)
if (data < min) min = data
if (data > max) max = data
}
(min, max)
}
private final val readVect = DoubleVector(nativePtrReader, addr)
final def apply(index: Int): Double = readVect.apply(nativePtrReader, addr, index)
def reader: VectorDataReader = readVect
final def copyToBuffer: Buffer[Double] = DoubleVectorDataReader64.toBuffer(nativePtrReader, addr)
override def optimize(memFactory: MemFactory, hint: EncodingHint = AutoDetect): BinaryVectorPtr =
DoubleVector.optimize(memFactory, this)
}
/**
* A Double appender for incrementing counters that detects if there is a drop in ingested value,
* and if so, marks the Reset/Drop bit (bit 15 of the NBits/Offset u16). Also marks this bit in
* encoded chunks as well.
*/
class DoubleCounterAppender(addr: BinaryRegion.NativePointer, maxBytes: Int, dispose: () => Unit)
extends DoubleAppendingVector(addr, maxBytes, dispose) {
private var last = Double.MinValue
override final def addData(data: Double): AddResponse = {
if (data < last) PrimitiveVectorReader.markDrop(MemoryAccessor.nativePtrAccessor, addr)
last = data
super.addData(data)
}
override def optimize(memFactory: MemFactory, hint: EncodingHint = AutoDetect): BinaryVectorPtr = {
val newChunk = DoubleVector.optimize(memFactory, this)
if (PrimitiveVectorReader.dropped(nativePtrReader, addr))
PrimitiveVectorReader.markDrop(MemoryAccessor.nativePtrAccessor, newChunk)
newChunk
}
override def reader: VectorDataReader = DoubleVector(nativePtrReader, addr)
}
class MaskedDoubleAppendingVector(addr: BinaryRegion.NativePointer,
val maxBytes: Int,
val maxElements: Int,
val dispose: () => Unit) extends
// First four bytes: offset to DoubleBinaryVector
BitmapMaskAppendableVector[Double](addr, maxElements) with OptimizingPrimitiveAppender[Double] {
def vectMajorType: Int = WireFormat.VECTORTYPE_BINSIMPLE
def vectSubType: Int = WireFormat.SUBTYPE_PRIMITIVE
def nbits: Short = 64
val subVect = new DoubleAppendingVector(addr + subVectOffset, maxBytes - subVectOffset, dispose)
def copyToBuffer: Buffer[Double] = MaskedDoubleDataReader.toBuffer(nativePtrReader, addr)
final def minMax: (Double, Double) = {
var min = Double.MaxValue
var max = Double.MinValue
cforRange { 0 until length } { index =>
if (isAvailable(index)) {
val data = subVect.apply(index)
if (data < min) min = data
if (data > max) max = data
}
}
(min, max)
}
final def dataVect(memFactory: MemFactory): BinaryVectorPtr = subVect.freeze(memFactory)
override def optimize(memFactory: MemFactory, hint: EncodingHint = AutoDetect): BinaryVectorPtr =
DoubleVector.optimize(memFactory, this)
override def newInstance(memFactory: MemFactory, growFactor: Int = 2): BinaryAppendableVector[Double] = {
val newAddr = memFactory.allocateOffheap(maxBytes * growFactor)
val dispose = () => memFactory.freeMemory(newAddr)
new MaskedDoubleAppendingVector(newAddr, maxBytes * growFactor, maxElements * growFactor, dispose)
}
}
/**
* A wrapper around Double appenders that returns Longs. Designed to feed into the Long/DeltaDelta optimizers
* so that an optimized int representation of double vector can be produced in one pass without
* appending to another Long based AppendingVector first.
*/
private[vectors] class LongDoubleWrapper(val inner: OptimizingPrimitiveAppender[Double])
extends AppendableVectorWrapper[Long, Double] {
val MaxLongDouble = Long.MaxValue.toDouble
final def nonIntegrals: Int = {
var nonInts = 0
cforRange { 0 until length } { index =>
if (inner.isAvailable(index)) {
val data = inner.apply(index)
if (data > MaxLongDouble || (Math.rint(data) != data)) nonInts += 1
}
}
nonInts
}
val allIntegrals: Boolean = (nonIntegrals == 0)
final def addData(value: Long): AddResponse = inner.addData(value.toDouble)
final def apply(index: Int): Long = inner(index).toLong
}
/**
* A wrapper to return Doubles from a Long vector... for when one can compress Double vectors as DDVs
*/
object DoubleLongWrapDataReader extends DoubleVectorDataReader {
class DoubleLongWrapIterator(innerIt: LongIterator) extends DoubleIterator {
final def next: Double = innerIt.next.toDouble
}
override def length(acc: MemoryReader, vector: BinaryVectorPtr): Int =
LongBinaryVector(acc, vector).length(acc, vector)
final def apply(acc: MemoryReader, vector: BinaryVectorPtr, n: Int): Double =
LongBinaryVector(acc, vector)(acc, vector, n).toDouble
final def sum(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int, ignoreNaN: Boolean = true): Double =
LongBinaryVector(acc, vector).sum(acc, vector, start, end) // Long vectors cannot contain NaN, ignore it
final def count(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int): Int = end - start + 1
final def iterate(acc: MemoryReader, vector: BinaryVectorPtr, startElement: Int = 0): DoubleIterator =
new DoubleLongWrapIterator(LongBinaryVector(acc, vector).iterate(acc, vector, startElement))
final def changes(acc: MemoryReader, vector: BinaryVectorPtr, start: Int, end: Int,
prev: Double, ignorePrev: Boolean = false):
(Double, Double) = {
val ignorePrev = if (prev.isNaN) true
else false
val changes = LongBinaryVector(acc, vector).changes(acc, vector, start, end, prev.toLong, ignorePrev)
(changes._1.toDouble, changes._2.toDouble)
}
}
| tuplejump/FiloDB | memory/src/main/scala/filodb.memory/format/vectors/DoubleVector.scala | Scala | apache-2.0 | 23,580 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.