code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.webtrends.harness.component.zookeeper
import java.util
import java.util.UUID
import org.apache.curator.x.discovery.details.InstanceProvider
import org.apache.curator.x.discovery.{ServiceInstance, UriSpec}
import org.specs2.mutable.SpecificationWithJUnit
import scala.collection.JavaConverters._
class WookieeWeightedStrategySpec extends SpecificationWithJUnit {
class MockInstanceProvider(instances: Seq[ServiceInstance[WookieeServiceDetails]]) extends InstanceProvider[WookieeServiceDetails] {
override def getInstances: util.List[ServiceInstance[WookieeServiceDetails]] = instances.toList.asJava
}
def builderInstance(id: Int, weight: Int) = ServiceInstance.builder[WookieeServiceDetails]()
.uriSpec(new UriSpec(s"akka.tcp://server@localhost:8080/"))
.id(id.toString)
.name(UUID.randomUUID().toString)
.payload(new WookieeServiceDetails(weight))
.port(8080)
.build()
"WookieeWeightedStrategy" should {
"returns null when no instances" in {
val instances = Seq.empty[ServiceInstance[WookieeServiceDetails]]
val instanceProvider = new MockInstanceProvider(instances)
val strategy = new WookieeWeightedStrategy()
strategy.getInstance(instanceProvider) mustEqual null
}
"default to round-robin when weights are all the same" in {
val instances = (0 to 10).map(i => builderInstance(i, 0))
val instanceProvider = new MockInstanceProvider(instances)
val strategy = new WookieeWeightedStrategy()
(0 to 10).map(i => strategy.getInstance(instanceProvider).getId == i.toString).reduce(_ && _) mustEqual true
}
"pick the lowest weighted instance" in {
val instances = (1 to 10).map(i => builderInstance(i,i)) ++ Seq(builderInstance(0,0))
val instanceProvider = new MockInstanceProvider(instances)
val strategy = new WookieeWeightedStrategy()
strategy.getInstance(instanceProvider).getId mustEqual "0"
}
"pick the lowest as weight changes" in {
val instances = (10 to 20).map(i => builderInstance(i,i)) ++ Seq( builderInstance(5,5))
val instanceProvider = new MockInstanceProvider(instances)
val strategy = new WookieeWeightedStrategy()
// first check prior to updated instance weights has lowest 5
strategy.getInstance(instanceProvider).getId mustEqual "5"
// second check after weight for instance 5 has increased and now id 10 is lowest
val updatedInstances = (10 to 20).map(i => builderInstance(i,i)) ++ Seq( builderInstance(5, 15))
val updatedProvider = new MockInstanceProvider(updatedInstances)
strategy.getInstance(updatedProvider).getId mustEqual "10"
}
}
}
| Webtrends/wookiee-zookeeper | src/test/scala/com/webtrends/harness/component/zookeeper/WookieeWeightedStrategySpec.scala | Scala | apache-2.0 | 2,688 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import _root_.java.util.concurrent.atomic.AtomicInteger
import org.apache.calcite.plan.RelOptUtil
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.sql2rel.RelDecorrelator
import org.apache.calcite.tools.RuleSet
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.io.DiscardingOutputFormat
import org.apache.flink.api.java.typeutils.GenericTypeInfo
import org.apache.flink.api.java.{DataSet, ExecutionEnvironment}
import org.apache.flink.table.descriptors.{BatchTableDescriptor, ConnectorDescriptor}
import org.apache.flink.table.explain.PlanJsonParser
import org.apache.flink.table.expressions.{Expression, TimeAttribute}
import org.apache.flink.table.plan.nodes.FlinkConventions
import org.apache.flink.table.plan.nodes.dataset.DataSetRel
import org.apache.flink.table.plan.rules.FlinkRuleSets
import org.apache.flink.table.plan.schema._
import org.apache.flink.table.runtime.MapRunner
import org.apache.flink.table.sinks._
import org.apache.flink.table.sources.{BatchTableSource, TableSource}
import org.apache.flink.types.Row
/**
* The abstract base class for batch TableEnvironments.
*
* A TableEnvironment can be used to:
* - convert a [[DataSet]] to a [[Table]]
* - register a [[DataSet]] in the [[TableEnvironment]]'s catalog
* - register a [[Table]] in the [[TableEnvironment]]'s catalog
* - scan a registered table to obtain a [[Table]]
* - specify a SQL query on registered tables to obtain a [[Table]]
* - convert a [[Table]] into a [[DataSet]]
* - explain the AST and execution plan of a [[Table]]
*
* @param execEnv The [[ExecutionEnvironment]] which is wrapped in this [[BatchTableEnvironment]].
* @param config The [[TableConfig]] of this [[BatchTableEnvironment]].
*/
abstract class BatchTableEnvironment(
private[flink] val execEnv: ExecutionEnvironment,
config: TableConfig)
extends TableEnvironment(config) {
// a counter for unique table names.
private val nameCntr: AtomicInteger = new AtomicInteger(0)
// the naming pattern for internally registered tables.
private val internalNamePattern = "^_DataSetTable_[0-9]+$".r
override def queryConfig: BatchQueryConfig = new BatchQueryConfig
/**
* Checks if the chosen table name is valid.
*
* @param name The table name to check.
*/
override protected def checkValidTableName(name: String): Unit = {
val m = internalNamePattern.findFirstIn(name)
m match {
case Some(_) =>
throw new TableException(s"Illegal Table name. " +
s"Please choose a name that does not contain the pattern $internalNamePattern")
case None =>
}
}
/** Returns a unique table name according to the internal naming pattern. */
override protected def createUniqueTableName(): String =
"_DataSetTable_" + nameCntr.getAndIncrement()
/**
* Registers an internal [[BatchTableSource]] in this [[TableEnvironment]]'s catalog without
* name checking. Registered tables can be referenced in SQL queries.
*
* @param name The name under which the [[TableSource]] is registered.
* @param tableSource The [[TableSource]] to register.
*/
override protected def registerTableSourceInternal(
name: String,
tableSource: TableSource[_])
: Unit = {
tableSource match {
// check for proper batch table source
case batchTableSource: BatchTableSource[_] =>
// check if a table (source or sink) is registered
getTable(name) match {
// table source and/or sink is registered
case Some(table: TableSourceSinkTable[_, _]) => table.tableSourceTable match {
// wrapper contains source
case Some(_: TableSourceTable[_]) =>
throw new TableException(s"Table '$name' already exists. " +
s"Please choose a different name.")
// wrapper contains only sink (not source)
case _ =>
val enrichedTable = new TableSourceSinkTable(
Some(new BatchTableSourceTable(batchTableSource)),
table.tableSinkTable)
replaceRegisteredTable(name, enrichedTable)
}
// no table is registered
case _ =>
val newTable = new TableSourceSinkTable(
Some(new BatchTableSourceTable(batchTableSource)),
None)
registerTableInternal(name, newTable)
}
// not a batch table source
case _ =>
throw new TableException("Only BatchTableSource can be registered in " +
"BatchTableEnvironment.")
}
}
/**
* Creates a table source and/or table sink from a descriptor.
*
* Descriptors allow for declaring the communication to external systems in an
* implementation-agnostic way. The classpath is scanned for suitable table factories that match
* the desired configuration.
*
* The following example shows how to read from a connector using a JSON format and
* registering a table source as "MyTable":
*
* {{{
*
* tableEnv
* .connect(
* new ExternalSystemXYZ()
* .version("0.11"))
* .withFormat(
* new Json()
* .jsonSchema("{...}")
* .failOnMissingField(false))
* .withSchema(
* new Schema()
* .field("user-name", "VARCHAR").from("u_name")
* .field("count", "DECIMAL")
* .registerSource("MyTable")
* }}}
*
* @param connectorDescriptor connector descriptor describing the external system
*/
def connect(connectorDescriptor: ConnectorDescriptor): BatchTableDescriptor = {
new BatchTableDescriptor(this, connectorDescriptor)
}
/**
* Registers an external [[TableSink]] with given field names and types in this
* [[TableEnvironment]]'s catalog.
* Registered sink tables can be referenced in SQL DML statements.
*
* Example:
*
* {{{
* // create a table sink and its field names and types
* val fieldNames: Array[String] = Array("a", "b", "c")
* val fieldTypes: Array[TypeInformation[_]] = Array(Types.STRING, Types.INT, Types.LONG)
* val tableSink: BatchTableSink = new YourTableSinkImpl(...)
*
* // register the table sink in the catalog
* tableEnv.registerTableSink("output_table", fieldNames, fieldsTypes, tableSink)
*
* // use the registered sink
* tableEnv.sqlUpdate("INSERT INTO output_table SELECT a, b, c FROM sourceTable")
* }}}
*
* @param name The name under which the [[TableSink]] is registered.
* @param fieldNames The field names to register with the [[TableSink]].
* @param fieldTypes The field types to register with the [[TableSink]].
* @param tableSink The [[TableSink]] to register.
*/
def registerTableSink(
name: String,
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]],
tableSink: TableSink[_]): Unit = {
// validate
checkValidTableName(name)
if (fieldNames == null) throw TableException("fieldNames must not be null.")
if (fieldTypes == null) throw TableException("fieldTypes must not be null.")
if (fieldNames.length == 0) throw new TableException("fieldNames must not be empty.")
if (fieldNames.length != fieldTypes.length) {
throw new TableException("Same number of field names and types required.")
}
// configure and register
val configuredSink = tableSink.configure(fieldNames, fieldTypes)
registerTableSinkInternal(name, configuredSink)
}
/**
* Registers an external [[TableSink]] with already configured field names and field types in
* this [[TableEnvironment]]'s catalog.
* Registered sink tables can be referenced in SQL DML statements.
*
* @param name The name under which the [[TableSink]] is registered.
* @param configuredSink The configured [[TableSink]] to register.
*/
def registerTableSink(name: String, configuredSink: TableSink[_]): Unit = {
registerTableSinkInternal(name, configuredSink)
}
private def registerTableSinkInternal(name: String, configuredSink: TableSink[_]): Unit = {
// validate
checkValidTableName(name)
if (configuredSink.getFieldNames == null || configuredSink.getFieldTypes == null) {
throw new TableException("Table sink is not configured.")
}
if (configuredSink.getFieldNames.length == 0) {
throw new TableException("Field names must not be empty.")
}
if (configuredSink.getFieldNames.length != configuredSink.getFieldTypes.length) {
throw new TableException("Same number of field names and types required.")
}
// register
configuredSink match {
// check for proper batch table sink
case _: BatchTableSink[_] =>
// check if a table (source or sink) is registered
getTable(name) match {
// table source and/or sink is registered
case Some(table: TableSourceSinkTable[_, _]) => table.tableSinkTable match {
// wrapper contains sink
case Some(_: TableSinkTable[_]) =>
throw new TableException(s"Table '$name' already exists. " +
s"Please choose a different name.")
// wrapper contains only source (not sink)
case _ =>
val enrichedTable = new TableSourceSinkTable(
table.tableSourceTable,
Some(new TableSinkTable(configuredSink)))
replaceRegisteredTable(name, enrichedTable)
}
// no table is registered
case _ =>
val newTable = new TableSourceSinkTable(
None,
Some(new TableSinkTable(configuredSink)))
registerTableInternal(name, newTable)
}
// not a batch table sink
case _ =>
throw new TableException("Only BatchTableSink can be registered in BatchTableEnvironment.")
}
}
/**
* Writes a [[Table]] to a [[TableSink]].
*
* Internally, the [[Table]] is translated into a [[DataSet]] and handed over to the
* [[TableSink]] to write it.
*
* @param table The [[Table]] to write.
* @param sink The [[TableSink]] to write the [[Table]] to.
* @param queryConfig The configuration for the query to generate.
* @tparam T The expected type of the [[DataSet]] which represents the [[Table]].
*/
override private[flink] def writeToSink[T](
table: Table,
sink: TableSink[T],
queryConfig: QueryConfig): Unit = {
// Check the query configuration to be a batch one.
val batchQueryConfig = queryConfig match {
case batchConfig: BatchQueryConfig => batchConfig
case _ =>
throw new TableException("BatchQueryConfig required to configure batch query.")
}
sink match {
case batchSink: BatchTableSink[T] =>
val outputType = sink.getOutputType
// translate the Table into a DataSet and provide the type that the TableSink expects.
val result: DataSet[T] = translate(table, batchQueryConfig)(outputType)
// Give the DataSet to the TableSink to emit it.
batchSink.emitDataSet(result)
case _ =>
throw new TableException("BatchTableSink required to emit batch Table.")
}
}
/**
* Creates a final converter that maps the internal row type to external type.
*
* @param physicalTypeInfo the input of the sink
* @param schema the input schema with correct field names (esp. for POJO field mapping)
* @param requestedTypeInfo the output type of the sink
* @param functionName name of the map function. Must not be unique but has to be a
* valid Java class identifier.
*/
protected def getConversionMapper[IN, OUT](
physicalTypeInfo: TypeInformation[IN],
schema: RowSchema,
requestedTypeInfo: TypeInformation[OUT],
functionName: String)
: Option[MapFunction[IN, OUT]] = {
val converterFunction = generateRowConverterFunction[OUT](
physicalTypeInfo.asInstanceOf[TypeInformation[Row]],
schema,
requestedTypeInfo,
functionName
)
// add a runner if we need conversion
converterFunction.map { func =>
new MapRunner[IN, OUT](
func.name,
func.code,
func.returnType)
}
}
/**
* Returns the AST of the specified Table API and SQL queries and the execution plan to compute
* the result of the given [[Table]].
*
* @param table The table for which the AST and execution plan will be returned.
* @param extended Flag to include detailed optimizer estimates.
*/
private[flink] def explain(table: Table, extended: Boolean): String = {
val ast = table.getRelNode
val optimizedPlan = optimize(ast)
val dataSet = translate[Row](optimizedPlan, ast.getRowType, queryConfig) (
new GenericTypeInfo (classOf[Row]))
dataSet.output(new DiscardingOutputFormat[Row])
val env = dataSet.getExecutionEnvironment
val jasonSqlPlan = env.getExecutionPlan
val sqlPlan = PlanJsonParser.getSqlExecutionPlan(jasonSqlPlan, extended)
s"== Abstract Syntax Tree ==" +
System.lineSeparator +
s"${RelOptUtil.toString(ast)}" +
System.lineSeparator +
s"== Optimized Logical Plan ==" +
System.lineSeparator +
s"${RelOptUtil.toString(optimizedPlan)}" +
System.lineSeparator +
s"== Physical Execution Plan ==" +
System.lineSeparator +
s"$sqlPlan"
}
/**
* Returns the AST of the specified Table API and SQL queries and the execution plan to compute
* the result of the given [[Table]].
*
* @param table The table for which the AST and execution plan will be returned.
*/
def explain(table: Table): String = explain(table: Table, extended = false)
/**
* Registers a [[DataSet]] as a table under a given name in the [[TableEnvironment]]'s catalog.
*
* @param name The name under which the table is registered in the catalog.
* @param dataSet The [[DataSet]] to register as table in the catalog.
* @tparam T the type of the [[DataSet]].
*/
protected def registerDataSetInternal[T](name: String, dataSet: DataSet[T]): Unit = {
val (fieldNames, fieldIndexes) = getFieldInfo[T](dataSet.getType)
val dataSetTable = new DataSetTable[T](
dataSet,
fieldIndexes,
fieldNames
)
registerTableInternal(name, dataSetTable)
}
/**
* Registers a [[DataSet]] as a table under a given name with field names as specified by
* field expressions in the [[TableEnvironment]]'s catalog.
*
* @param name The name under which the table is registered in the catalog.
* @param dataSet The [[DataSet]] to register as table in the catalog.
* @param fields The field expressions to define the field names of the table.
* @tparam T The type of the [[DataSet]].
*/
protected def registerDataSetInternal[T](
name: String, dataSet: DataSet[T], fields: Array[Expression]): Unit = {
val inputType = dataSet.getType
val (fieldNames, fieldIndexes) = getFieldInfo[T](
inputType,
fields)
if (fields.exists(_.isInstanceOf[TimeAttribute])) {
throw new ValidationException(
".rowtime and .proctime time indicators are not allowed in a batch environment.")
}
val dataSetTable = new DataSetTable[T](dataSet, fieldIndexes, fieldNames)
registerTableInternal(name, dataSetTable)
}
/**
* Returns the built-in normalization rules that are defined by the environment.
*/
protected def getBuiltInNormRuleSet: RuleSet = FlinkRuleSets.DATASET_NORM_RULES
/**
* Returns the built-in optimization rules that are defined by the environment.
*/
protected def getBuiltInPhysicalOptRuleSet: RuleSet = FlinkRuleSets.DATASET_OPT_RULES
/**
* Generates the optimized [[RelNode]] tree from the original relational node tree.
*
* @param relNode The original [[RelNode]] tree
* @return The optimized [[RelNode]] tree
*/
private[flink] def optimize(relNode: RelNode): RelNode = {
// 0. convert sub-queries before query decorrelation
val convSubQueryPlan = runHepPlanner(
HepMatchOrder.BOTTOM_UP, FlinkRuleSets.TABLE_SUBQUERY_RULES, relNode, relNode.getTraitSet)
// 0. convert table references
val fullRelNode = runHepPlanner(
HepMatchOrder.BOTTOM_UP,
FlinkRuleSets.TABLE_REF_RULES,
convSubQueryPlan,
relNode.getTraitSet)
// 1. decorrelate
val decorPlan = RelDecorrelator.decorrelateQuery(fullRelNode)
// 2. normalize the logical plan
val normRuleSet = getNormRuleSet
val normalizedPlan = if (normRuleSet.iterator().hasNext) {
runHepPlanner(HepMatchOrder.BOTTOM_UP, normRuleSet, decorPlan, decorPlan.getTraitSet)
} else {
decorPlan
}
// 3. optimize the logical Flink plan
val logicalOptRuleSet = getLogicalOptRuleSet
val logicalOutputProps = relNode.getTraitSet.replace(FlinkConventions.LOGICAL).simplify()
val logicalPlan = if (logicalOptRuleSet.iterator().hasNext) {
runVolcanoPlanner(logicalOptRuleSet, normalizedPlan, logicalOutputProps)
} else {
normalizedPlan
}
// 4. optimize the physical Flink plan
val physicalOptRuleSet = getPhysicalOptRuleSet
val physicalOutputProps = relNode.getTraitSet.replace(FlinkConventions.DATASET).simplify()
val physicalPlan = if (physicalOptRuleSet.iterator().hasNext) {
runVolcanoPlanner(physicalOptRuleSet, logicalPlan, physicalOutputProps)
} else {
logicalPlan
}
physicalPlan
}
/**
* Translates a [[Table]] into a [[DataSet]].
*
* The transformation involves optimizing the relational expression tree as defined by
* Table API calls and / or SQL queries and generating corresponding [[DataSet]] operators.
*
* @param table The root node of the relational expression tree.
* @param queryConfig The configuration for the query to generate.
* @param tpe The [[TypeInformation]] of the resulting [[DataSet]].
* @tparam A The type of the resulting [[DataSet]].
* @return The [[DataSet]] that corresponds to the translated [[Table]].
*/
protected def translate[A](
table: Table,
queryConfig: BatchQueryConfig)(implicit tpe: TypeInformation[A]): DataSet[A] = {
val relNode = table.getRelNode
val dataSetPlan = optimize(relNode)
translate(dataSetPlan, relNode.getRowType, queryConfig)
}
/**
* Translates a logical [[RelNode]] into a [[DataSet]]. Converts to target type if necessary.
*
* @param logicalPlan The root node of the relational expression tree.
* @param logicalType The row type of the result. Since the logicalPlan can lose the
* field naming during optimization we pass the row type separately.
* @param queryConfig The configuration for the query to generate.
* @param tpe The [[TypeInformation]] of the resulting [[DataSet]].
* @tparam A The type of the resulting [[DataSet]].
* @return The [[DataSet]] that corresponds to the translated [[Table]].
*/
protected def translate[A](
logicalPlan: RelNode,
logicalType: RelDataType,
queryConfig: BatchQueryConfig)(implicit tpe: TypeInformation[A]): DataSet[A] = {
TableEnvironment.validateType(tpe)
logicalPlan match {
case node: DataSetRel =>
val plan = node.translateToPlan(this, queryConfig)
val conversion =
getConversionMapper(
plan.getType,
new RowSchema(logicalType),
tpe,
"DataSetSinkConversion")
conversion match {
case None => plan.asInstanceOf[DataSet[A]] // no conversion necessary
case Some(mapFunction: MapFunction[Row, A]) =>
plan.map(mapFunction)
.returns(tpe)
.name(s"to: ${tpe.getTypeClass.getSimpleName}")
.asInstanceOf[DataSet[A]]
}
case _ =>
throw TableException("Cannot generate DataSet due to an invalid logical plan. " +
"This is a bug and should not happen. Please file an issue.")
}
}
}
| yew1eb/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/BatchTableEnvironment.scala | Scala | apache-2.0 | 21,194 |
/***
* Copyright 2017 Andrea Lorenzani
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***/
package name.lorenzani.andrea.homeaway.services
import com.twitter.finagle.http._
import com.twitter.util.Await
import name.lorenzani.andrea.homeaway.datastore._
import name.lorenzani.andrea.homeaway.json.JsonUtil
import org.scalatest.{FlatSpec, Matchers}
import scala.util.Try
class GetRequestHandlerTest extends FlatSpec with Matchers {
private val ds = new SimpleMapStore
private val listing = Listing(None, None, Address("addr", "post", None, None, None, "UK"), None)
val key1 = ds.add(ListingWrapper(listing))
val key2 = ds.add(ListingWrapper(listing))
val rh = new GetRequestHandler(ds)
behavior of "GetRequestHandlerTest"
it should "serve requests for /listing/<someid>" in{
val result = rh.handle(Request(Method.Get, s"/listing/$key1"))
val content = for { res <- result
if res.status == Status.Ok
content = res.contentString }
yield content
val res = Await.result(content)
res should be ("{\\"listing\\":{\\"id\\":\\""+key1+"\\",\\"contact\\":null,\\"address\\":{\\"address\\":\\"addr\\",\\"postalCode\\":\\"post\\",\\"countryCode\\":null,\\"city\\":null,\\"state\\":null,\\"country\\":\\"UK\\"},\\"location\\":null}}")
val notfound = rh.handle(Request(Method.Get, "/listing/noid"))
val contentFail = for { res <- notfound
if res.status == Status.BadRequest
content = res.contentString }
yield content
val newres = Await.result(contentFail)
newres should be ("Not found")
}
it should "eventually serve requests for /listings" in{
val result = rh.handle(Request(Method.Get, "/listings"))
val content = for { res <- result
if res.status == Status.Ok
content = res.contentString }
yield content
val res = Await.result(content)
JsonUtil.fromJson[List[ListingWrapper]](res).size should be (2)
}
it should "eventually serve requests for /size" in{
val result = rh.handle(Request(Method.Get, "/size"))
val content = for { res <- result
if res.status == Status.Ok
content = res.contentString }
yield content
val res = Await.result(content)
res should be ("{\\"size\\":2}")
}
it should "send BadRequest othrwise" in{
val result = Try{ rh.handle(Request(Method.Get, "/")) }
result.isFailure should be (true)
}
}
| andrealorenzani/HAVacationRental | src/test/scala/name/lorenzani/andrea/homeaway/services/GetRequestHandlerTest.scala | Scala | apache-2.0 | 3,032 |
/**
* Majyyka
*
* MajyykLimitAlgo.scala
*
* @author Myo-kun
* @license Lesser GNU Public License v3 (http://www.gnu.org/licenses/lgpl.html)
*/
package myokun.mods.majyyka.core.player
object MajyykLimitAlgo {
def getMajyykLimitForLevel(level:Int):Double = {
return (Math.sqrt(level)) * 100
}
} | myoKun345/Majyyka | majyyka_common/myokun/mods/majyyka/core/player/MajyykLimitAlgo.scala | Scala | lgpl-3.0 | 326 |
package pl.msitko.xml.parsing
import pl.msitko.xml.BasicJvmSpec
import pl.msitko.xml.entities._
class XmlParserJvmSpec extends XmlParserSpec with BasicJvmSpec {
// tests written here specifically (as opposed to `XmlParserSpec`) are ones
// which documents different behavior between JVM and JS implementation
"parse complete EntityReference" in {
val res = parse(xmlWithEntityJsVsJvm)
val expectedRoot = labeledElement("html",
labeledElement("body",
labeledElement("p",
EntityReference("simple", "replacement"),
Text(" abc "),
EntityReference("test-entity", "This <em>is</em> an entity."),
Text(" def"),
Text("<")
)
)
)
res.root should === (expectedRoot)
}
"respect ParserConfig.replaceEntityReferences == true" in {
implicit val cfg = ParserConfig.Default.copy(replaceEntityReferences = true)
val res = parseEitherWithConfig(xmlWithEntityJsVsJvm).right.get
val expectedRoot = labeledElement("html",
labeledElement("body",
labeledElement("p",
Text("replacement abc "),
Text("This "),
labeledElement("em", Text("is")),
Text(" an entity. def"),
Text("<")
)
)
)
res.root should === (expectedRoot)
}
"parse basic XML entities as text for replaceEntityReferences == true" in {
val input = "<a>&><"'</a>"
implicit val cfg = ParserConfig.Default.copy(replaceEntityReferences = true)
val res = parseEitherWithConfig(input).right.get
res should === (XmlDocument.noProlog(labeledElement("a",
Text("&"), Text(">"), Text("<"), Text("\\""), Text("'")
)))
}
}
| note/xml-lens | io/jvm/src/test/scala/pl/msitko/xml/parsing/XmlParserJvmSpec.scala | Scala | mit | 1,713 |
package test
class A {
sealed class B
sealed def f = 0
sealed val v = 0
}
| folone/dotty | tests/untried/neg/t1838.scala | Scala | bsd-3-clause | 81 |
package com.insweat.hssd.lib.essence.thypes
import com.insweat.hssd.lib.essence.SchemaLike
import com.insweat.hssd.lib.essence.SimpleThypeLike
import com.insweat.hssd.lib.essence.Element
import scala.collection.immutable.HashMap
import com.insweat.hssd.lib.essence.Thype
import scala.xml.XML
import scala.xml.Utility
import com.insweat.hssd.lib.constraints.Constraint
import com.insweat.hssd.lib.essence.ValueData
class TupleThype(
sch: SchemaLike,
override val name: String,
override val description: String,
val attributes: HashMap[String, String],
elems: Element*
) extends Thype(sch) with SimpleThypeLike {
val elements = HashMap(elems map { e => e.name -> e}: _*)
override val constraint: Option[Constraint] = Some(TupleConstraint)
/**
* Deserializes s into a value, from a string in the format:
*
* 'key1="val1" key2="val2" ...'
*
* where val1, val2, ... are XML-escaped.
*
* If enclosed with any XML tag, this string makes a valid XML element.
* For example, <Tuple key1="val1" key2="val2" ... /> is a valid XML
* element.
*/
override def parse(s: String): Any = parseNullOrValue(s) { trimmed =>
val root = XML.loadString(s"<Tuple $trimmed />")
elements.map { e =>
val (name, elem) = e
val valueStr = root.attribute(name) match {
case Some(seq) => seq.headOption match {
case Some(attr) => attr.text
case None => null
}
case None => null
}
var value = elem.thype.parse(valueStr)
if(value == null) {
value = elem.defaultValue.getOrElse(null)
}
name -> value
}
}
/**
* Serializes o, the value, to a string in the format:
*
* 'key1="val1" key2="val2" ...'
*
* where val1, val2, ... are XML-escaped.
*
* This string is finally persisted.
*
* @See parse for why val1, val2, ... need to be escaped
*/
override def repr(o: Any): String = if(o == null) "" else {
val values = o.asInstanceOf[HashMap[String, Any]]
val keys = elements.keySet.toArray.sorted
val pairs = keys.map { key =>
val elem = elements.get(key).get
val thype = elem.thype
val valueRepr = thype.repr(values.get(key).getOrElse(null))
s"""$key="${Utility.escape(valueRepr)}""""
}
pairs.mkString(" ")
}
override def fixed(o: Any): Any = if(o == null) null else {
val values = o.asInstanceOf[HashMap[String, Any]]
elements.map { e =>
val (name, elem) = e
val thype = elem.thype
name -> thype.fixed(values.get(name).getOrElse(null))
}
}
override def compile {
if(!compiled) {
elements.foreach{ e =>
val (name, elem) = e
elem.compile(this)
if(!elem.thype.isInstanceOf[SimpleThypeLike]) {
throw new IllegalArgumentException(
"A tuple can only contain simple thypes, " +
s"got ${elem.thype} in $this.")
}
if(elem.thype.isInstanceOf[TupleThype]) {
throw new IllegalArgumentException(
"A tuple cannot contain other tuples, " +
s"got ${elem.thype} in $this.")
}
}
super.compile
}
}
}
private object TupleConstraint extends Constraint {
override val name = "com.insweat.hssd.constraints.tupleConstraint"
override def apply(vd: ValueData, value: Any): Option[String] = {
if(value == null) None else {
val thype = vd.element.thype.asInstanceOf[TupleThype]
val values = value.asInstanceOf[HashMap[String, Any]]
var errors: List[String] = Nil
thype.elements.foreach { e =>
val (name, elem) = e
val value = values.get(name).getOrElse(null)
elem.thype.constraint match {
case Some(c) => c.apply(vd, value) match {
case Some(error) => errors ::= error
case None => // pass
}
case None => // pass
}
elem.constraints.foreach{ c =>
c.apply(vd, value) match {
case Some(error) => errors ::= error
case None => // pass
}
}
}
if(!errors.isEmpty) {
Some(errors.mkString(","))
}
else {
None
}
}
}
}
| insweat/hssd | com.insweat.hssd.lib/src/com/insweat/hssd/lib/essence/thypes/TupleThype.scala | Scala | lgpl-3.0 | 4,909 |
package io.eels.component.orc
import java.util.concurrent.atomic.AtomicInteger
import java.util.function.IntUnaryOperator
import com.sksamuel.exts.Logging
import com.typesafe.config.ConfigFactory
import io.eels.Row
import io.eels.schema.StructType
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.ql.exec.vector.ColumnVector
import org.apache.orc.OrcFile.CompressionStrategy
import org.apache.orc.OrcProto.CompressionKind
import org.apache.orc.{OrcConf, OrcFile, TypeDescription}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
class OrcWriter(path: Path,
structType: StructType,
bloomFilterColumns: Seq[String],
rowIndexStride: Option[Int])(implicit conf: Configuration) extends Logging {
private val schema: TypeDescription = OrcSchemaFns.toOrcSchema(structType)
logger.debug(s"Creating orc writer for schema $schema")
private val config = ConfigFactory.load()
private val batchSize = {
val size = config.getInt("eel.orc.sink.batchSize")
Math.max(Math.min(1024, size), 1)
}
logger.info(s"Orc writer will use batchsize=$batchSize")
private val buffer = new ArrayBuffer[Row](batchSize)
private val serializers = schema.getChildren.asScala.map(OrcSerializer.forType).toArray
private val batch = schema.createRowBatch(batchSize)
OrcConf.COMPRESSION_STRATEGY.setString(conf, CompressionStrategy.COMPRESSION.name)
OrcConf.COMPRESS.setString(conf, CompressionKind.SNAPPY.name)
private val options = OrcFile.writerOptions(conf).setSchema(schema)
rowIndexStride.foreach { size =>
options.rowIndexStride(size)
logger.info(s"Using stripe size = $size")
}
if (bloomFilterColumns.nonEmpty) {
options.bloomFilterColumns(bloomFilterColumns.mkString(","))
logger.info(s"Using bloomFilterColumns = $bloomFilterColumns")
}
private lazy val writer = OrcFile.createWriter(path, options)
private val _records = new AtomicInteger(0)
def write(row: Row): Unit = {
buffer.append(row)
if (buffer.size == batchSize)
flush()
}
def records = _records.get()
def flush(): Unit = {
def writecol[T <: ColumnVector](rowIndex: Int, colIndex: Int, row: Row): Unit = {
val value = row.values(colIndex)
val vector = batch.cols(colIndex).asInstanceOf[T]
val serializer = serializers(colIndex).asInstanceOf[OrcSerializer[T]]
serializer.writeToVector(rowIndex, vector, value)
}
// don't use foreach here, using old school for loops for perf
for (rowIndex <- buffer.indices) {
val row = buffer(rowIndex)
for (colIndex <- batch.cols.indices) {
writecol(rowIndex, colIndex, row)
}
}
batch.size = buffer.size
writer.addRowBatch(batch)
_records.updateAndGet(new IntUnaryOperator {
override def applyAsInt(operand: Int): Int = operand + batch.size
})
buffer.clear()
batch.reset()
}
def close(): Long = {
if (buffer.nonEmpty)
flush()
writer.close()
val count = writer.getNumberOfRows
logger.info(s"Orc writer wrote $count rows")
count
}
}
| stheppi/eel | eel-orc/src/main/scala/io/eels/component/orc/OrcWriter.scala | Scala | apache-2.0 | 3,171 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.tools
import spark._
import java.lang.reflect.Method
import scala.collection.mutable.ArrayBuffer
import spark.api.java._
import spark.streaming.{PairDStreamFunctions, DStream, StreamingContext}
import spark.streaming.api.java.{JavaPairDStream, JavaDStream, JavaStreamingContext}
import scala.Tuple2
private[spark] abstract class SparkType(val name: String)
private[spark] case class BaseType(override val name: String) extends SparkType(name) {
override def toString: String = {
name
}
}
private[spark]
case class ParameterizedType(override val name: String,
parameters: Seq[SparkType],
typebounds: String = "") extends SparkType(name) {
override def toString: String = {
if (typebounds != "") {
typebounds + " " + name + "<" + parameters.mkString(", ") + ">"
} else {
name + "<" + parameters.mkString(", ") + ">"
}
}
}
private[spark]
case class SparkMethod(name: String, returnType: SparkType, parameters: Seq[SparkType]) {
override def toString: String = {
returnType + " " + name + "(" + parameters.mkString(", ") + ")"
}
}
/**
* A tool for identifying methods that need to be ported from Scala to the Java API.
*
* It uses reflection to find methods in the Scala API and rewrites those methods' signatures
* into appropriate Java equivalents. If those equivalent methods have not been implemented in
* the Java API, they are printed.
*/
object JavaAPICompletenessChecker {
private def parseType(typeStr: String): SparkType = {
if (!typeStr.contains("<")) {
// Base types might begin with "class" or "interface", so we have to strip that off:
BaseType(typeStr.trim.split(" ").last)
} else if (typeStr.endsWith("[]")) {
ParameterizedType("Array", Seq(parseType(typeStr.stripSuffix("[]"))))
} else {
val parts = typeStr.split("<", 2)
val name = parts(0).trim
assert (parts(1).last == '>')
val parameters = parts(1).dropRight(1)
ParameterizedType(name, parseTypeList(parameters))
}
}
private def parseTypeList(typeStr: String): Seq[SparkType] = {
val types: ArrayBuffer[SparkType] = new ArrayBuffer[SparkType]
var stack = 0
var token: StringBuffer = new StringBuffer()
for (c <- typeStr.trim) {
if (c == ',' && stack == 0) {
types += parseType(token.toString)
token = new StringBuffer()
} else if (c == ' ' && stack != 0) {
// continue
} else {
if (c == '<') {
stack += 1
} else if (c == '>') {
stack -= 1
}
token.append(c)
}
}
assert (stack == 0)
if (token.toString != "") {
types += parseType(token.toString)
}
types.toSeq
}
private def parseReturnType(typeStr: String): SparkType = {
if (typeStr(0) == '<') {
val parts = typeStr.drop(0).split(">", 2)
val parsed = parseType(parts(1)).asInstanceOf[ParameterizedType]
ParameterizedType(parsed.name, parsed.parameters, parts(0))
} else {
parseType(typeStr)
}
}
private def toSparkMethod(method: Method): SparkMethod = {
val returnType = parseReturnType(method.getGenericReturnType.toString)
val name = method.getName
val parameters = method.getGenericParameterTypes.map(t => parseType(t.toString))
SparkMethod(name, returnType, parameters)
}
private def toJavaType(scalaType: SparkType): SparkType = {
val renameSubstitutions = Map(
"scala.collection.Map" -> "java.util.Map",
// TODO: the JavaStreamingContext API accepts Array arguments
// instead of Lists, so this isn't a trivial translation / sub:
"scala.collection.Seq" -> "java.util.List",
"scala.Function2" -> "spark.api.java.function.Function2",
"scala.collection.Iterator" -> "java.util.Iterator",
"scala.collection.mutable.Queue" -> "java.util.Queue",
"double" -> "java.lang.Double"
)
// Keep applying the substitutions until we've reached a fixedpoint.
def applySubs(scalaType: SparkType): SparkType = {
scalaType match {
case ParameterizedType(name, parameters, typebounds) =>
name match {
case "spark.RDD" =>
if (parameters(0).name == classOf[Tuple2[_, _]].getName) {
val tupleParams =
parameters(0).asInstanceOf[ParameterizedType].parameters.map(toJavaType)
ParameterizedType(classOf[JavaPairRDD[_, _]].getName, tupleParams)
} else {
ParameterizedType(classOf[JavaRDD[_]].getName, parameters.map(toJavaType))
}
case "spark.streaming.DStream" =>
if (parameters(0).name == classOf[Tuple2[_, _]].getName) {
val tupleParams =
parameters(0).asInstanceOf[ParameterizedType].parameters.map(toJavaType)
ParameterizedType("spark.streaming.api.java.JavaPairDStream", tupleParams)
} else {
ParameterizedType("spark.streaming.api.java.JavaDStream",
parameters.map(toJavaType))
}
// TODO: Spark Streaming uses Guava's Optional in place of Option, leading to some
// false-positives here:
case "scala.Option" =>
toJavaType(parameters(0))
case "scala.Function1" =>
val firstParamName = parameters.last.name
if (firstParamName.startsWith("scala.collection.Traversable") ||
firstParamName.startsWith("scala.collection.Iterator")) {
ParameterizedType("spark.api.java.function.FlatMapFunction",
Seq(parameters(0),
parameters.last.asInstanceOf[ParameterizedType].parameters(0)).map(toJavaType))
} else if (firstParamName == "scala.runtime.BoxedUnit") {
ParameterizedType("spark.api.java.function.VoidFunction",
parameters.dropRight(1).map(toJavaType))
} else {
ParameterizedType("spark.api.java.function.Function", parameters.map(toJavaType))
}
case _ =>
ParameterizedType(renameSubstitutions.getOrElse(name, name),
parameters.map(toJavaType))
}
case BaseType(name) =>
if (renameSubstitutions.contains(name)) {
BaseType(renameSubstitutions(name))
} else {
scalaType
}
}
}
var oldType = scalaType
var newType = applySubs(scalaType)
while (oldType != newType) {
oldType = newType
newType = applySubs(scalaType)
}
newType
}
private def toJavaMethod(method: SparkMethod): SparkMethod = {
val params = method.parameters
.filterNot(_.name == "scala.reflect.ClassManifest").map(toJavaType)
SparkMethod(method.name, toJavaType(method.returnType), params)
}
private def isExcludedByName(method: Method): Boolean = {
val name = method.getDeclaringClass.getName + "." + method.getName
// Scala methods that are declared as private[mypackage] become public in the resulting
// Java bytecode. As a result, we need to manually exclude those methods here.
// This list also includes a few methods that are only used by the web UI or other
// internal Spark components.
val excludedNames = Seq(
"spark.RDD.origin",
"spark.RDD.elementClassManifest",
"spark.RDD.checkpointData",
"spark.RDD.partitioner",
"spark.RDD.partitions",
"spark.RDD.firstParent",
"spark.RDD.doCheckpoint",
"spark.RDD.markCheckpointed",
"spark.RDD.clearDependencies",
"spark.RDD.getDependencies",
"spark.RDD.getPartitions",
"spark.RDD.dependencies",
"spark.RDD.getPreferredLocations",
"spark.RDD.collectPartitions",
"spark.RDD.computeOrReadCheckpoint",
"spark.PairRDDFunctions.getKeyClass",
"spark.PairRDDFunctions.getValueClass",
"spark.SparkContext.stringToText",
"spark.SparkContext.makeRDD",
"spark.SparkContext.runJob",
"spark.SparkContext.runApproximateJob",
"spark.SparkContext.clean",
"spark.SparkContext.metadataCleaner",
"spark.SparkContext.ui",
"spark.SparkContext.newShuffleId",
"spark.SparkContext.newRddId",
"spark.SparkContext.cleanup",
"spark.SparkContext.receiverJobThread",
"spark.SparkContext.getRDDStorageInfo",
"spark.SparkContext.addedFiles",
"spark.SparkContext.addedJars",
"spark.SparkContext.persistentRdds",
"spark.SparkContext.executorEnvs",
"spark.SparkContext.checkpointDir",
"spark.SparkContext.getSparkHome",
"spark.SparkContext.executorMemoryRequested",
"spark.SparkContext.getExecutorStorageStatus",
"spark.streaming.DStream.generatedRDDs",
"spark.streaming.DStream.zeroTime",
"spark.streaming.DStream.rememberDuration",
"spark.streaming.DStream.storageLevel",
"spark.streaming.DStream.mustCheckpoint",
"spark.streaming.DStream.checkpointDuration",
"spark.streaming.DStream.checkpointData",
"spark.streaming.DStream.graph",
"spark.streaming.DStream.isInitialized",
"spark.streaming.DStream.parentRememberDuration",
"spark.streaming.DStream.initialize",
"spark.streaming.DStream.validate",
"spark.streaming.DStream.setContext",
"spark.streaming.DStream.setGraph",
"spark.streaming.DStream.remember",
"spark.streaming.DStream.getOrCompute",
"spark.streaming.DStream.generateJob",
"spark.streaming.DStream.clearOldMetadata",
"spark.streaming.DStream.addMetadata",
"spark.streaming.DStream.updateCheckpointData",
"spark.streaming.DStream.restoreCheckpointData",
"spark.streaming.DStream.isTimeValid",
"spark.streaming.StreamingContext.nextNetworkInputStreamId",
"spark.streaming.StreamingContext.networkInputTracker",
"spark.streaming.StreamingContext.checkpointDir",
"spark.streaming.StreamingContext.checkpointDuration",
"spark.streaming.StreamingContext.receiverJobThread",
"spark.streaming.StreamingContext.scheduler",
"spark.streaming.StreamingContext.initialCheckpoint",
"spark.streaming.StreamingContext.getNewNetworkStreamId",
"spark.streaming.StreamingContext.validate",
"spark.streaming.StreamingContext.createNewSparkContext",
"spark.streaming.StreamingContext.rddToFileName",
"spark.streaming.StreamingContext.getSparkCheckpointDir",
"spark.streaming.StreamingContext.env",
"spark.streaming.StreamingContext.graph",
"spark.streaming.StreamingContext.isCheckpointPresent"
)
val excludedPatterns = Seq(
"""^spark\\.SparkContext\\..*To.*Functions""",
"""^spark\\.SparkContext\\..*WritableConverter""",
"""^spark\\.SparkContext\\..*To.*Writable"""
).map(_.r)
lazy val excludedByPattern =
!excludedPatterns.map(_.findFirstIn(name)).filter(_.isDefined).isEmpty
name.contains("$") || excludedNames.contains(name) || excludedByPattern
}
private def isExcludedByInterface(method: Method): Boolean = {
val excludedInterfaces =
Set("spark.Logging", "org.apache.hadoop.mapreduce.HadoopMapReduceUtil")
def toComparisionKey(method: Method) =
(method.getReturnType, method.getName, method.getGenericReturnType)
val interfaces = method.getDeclaringClass.getInterfaces.filter { i =>
excludedInterfaces.contains(i.getName)
}
val excludedMethods = interfaces.flatMap(_.getMethods.map(toComparisionKey))
excludedMethods.contains(toComparisionKey(method))
}
private def printMissingMethods(scalaClass: Class[_], javaClass: Class[_]) {
val methods = scalaClass.getMethods
.filterNot(_.isAccessible)
.filterNot(isExcludedByName)
.filterNot(isExcludedByInterface)
val javaEquivalents = methods.map(m => toJavaMethod(toSparkMethod(m))).toSet
val javaMethods = javaClass.getMethods.map(toSparkMethod).toSet
val missingMethods = javaEquivalents -- javaMethods
for (method <- missingMethods) {
println(method)
}
}
def main(args: Array[String]) {
println("Missing RDD methods")
printMissingMethods(classOf[RDD[_]], classOf[JavaRDD[_]])
println()
println("Missing PairRDD methods")
printMissingMethods(classOf[PairRDDFunctions[_, _]], classOf[JavaPairRDD[_, _]])
println()
println("Missing DoubleRDD methods")
printMissingMethods(classOf[DoubleRDDFunctions], classOf[JavaDoubleRDD])
println()
println("Missing OrderedRDD methods")
printMissingMethods(classOf[OrderedRDDFunctions[_, _]], classOf[JavaPairRDD[_, _]])
println()
println("Missing SparkContext methods")
printMissingMethods(classOf[SparkContext], classOf[JavaSparkContext])
println()
println("Missing StreamingContext methods")
printMissingMethods(classOf[StreamingContext], classOf[JavaStreamingContext])
println()
println("Missing DStream methods")
printMissingMethods(classOf[DStream[_]], classOf[JavaDStream[_]])
println()
println("Missing PairDStream methods")
printMissingMethods(classOf[PairDStreamFunctions[_, _]], classOf[JavaPairDStream[_, _]])
println()
}
}
| wgpshashank/spark | tools/src/main/scala/spark/tools/JavaAPICompletenessChecker.scala | Scala | apache-2.0 | 14,100 |
import sbt._
import sbt.Keys._
import sbtassembly.Plugin._
import sbtassembly.Plugin.AssemblyKeys._
object ProjectBuild extends Build {
lazy val project = "encog"
lazy val root = Project(id = project,
base = file("."),
settings = Project.defaultSettings ++ assemblySettings)
.settings(
organization := "eu.shiftforward",
version := "0.1-SNAPSHOT",
scalaVersion := "2.10.0",
resolvers ++= Seq(
"Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/",
"Typesafe Snapshots Repository" at "http://repo.typesafe.com/typesafe/snapshots/",
"Sonatype Repository" at "http://oss.sonatype.org/content/repositories/releases",
"Sonatype Snapshots Repository" at "http://oss.sonatype.org/content/repositories/snapshots",
"BerkeleyDB JE Repository" at "http://download.oracle.com/maven/"
),
libraryDependencies ++= Seq(
"org.encog" % "encog-core" % "3.1.+",
"org.specs2" %% "specs2" % "1.13" % "test",
"junit" % "junit" % "4.11" % "test"
),
testOptions in Test += Tests.Argument(TestFrameworks.Specs2, "junitxml", "console"),
scalacOptions ++= Seq("-deprecation", "-unchecked")
)
}
| hugoferreira/chessocr | project/Build.scala | Scala | mit | 1,386 |
/*
* Copyright 2007-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package util {
import _root_.java.net.InetAddress
import _root_.java.util.Properties
import Helpers._
import common._
/**
* Configuration management utilities.
*
* If you want to provide a configuration file for a subset of your application
* or for a specifig environment, Lift expects configuration files to be named
* in a manner relating to the context in which they are being used. The standard
* name format is:
*
* <pre>
* modeName.userName.hostName.props
*
* examples:
* dpp.yak.props
* test.dpp.yak.props
* production.moose.props
* staging.dpp.props
* test.default.props
* default.props
* </pre>
*
* with hostName and userName being optional, and modeName being one of
* "test", "staging", "production", "pilot", "profile", or "default".
* The standard Lift properties file extension is "props".
*/
object Props {
/**
* Get the configuration property value for the specified key.
* @param name key for the property to get
* @return the value of the property if defined
*/
def get(name: String): Box[String] = Box(props.get(name))
// def apply(name: String): String = props(name)
def getInt(name: String): Box[Int] = get(name).map(toInt) // toInt(props.get(name))
def getInt(name: String, defVal: Int): Int = getInt(name) openOr defVal // props.get(name).map(toInt(_)) getOrElse defVal
def getLong(name: String): Box[Long] = props.get(name).flatMap(asLong)
def getLong(name: String, defVal: Long): Long = getLong(name) openOr defVal // props.get(name).map(toLong(_)) getOrElse defVal
def getBool(name: String): Box[Boolean] = props.get(name).map(toBoolean) // (props.get(name))
def getBool(name: String, defVal: Boolean): Boolean = getBool(name) openOr defVal // props.get(name).map(toBoolean(_)) getOrElse defVal
def get(name: String, defVal: String) = props.get(name) getOrElse defVal
/**
* Determine whether the specified properties exist.
* @param what the properties to test
* @return the subset of strings in 'what' that do not correspond to
* keys for available properties.
*/
def require(what: String*) = what.filter(!props.contains(_))
/**
* Ensure that all of the specified properties exist; throw an exception if
* any of the specified values are not keys for available properties.
*/
def requireOrDie(what: String*) {
require(what :_*).toList match {
case Nil =>
case bad => throw new Exception("The following required properties are not defined: "+bad.mkString(","))
}
}
/**
* Enumeration of available run modes.
*/
object RunModes extends Enumeration {
val Development = Value(1, "Development")
val Test = Value(2, "Test")
val Staging = Value(3, "Staging")
val Production = Value(4, "Production")
val Pilot = Value(5, "Pilot")
val Profile = Value(6, "Profile")
}
import RunModes._
val propFileName = "lift.props"
val fileName = "lift.props"
/**
* The mode for which to retrieve properties, retrieved by System.getProperty("run.mode").
* Recognized modes are "development", "test", "profile", "pilot", "staging" and "production"
* with the default run mode being development.
*/
lazy val mode = {
Box.legacyNullTest((System.getProperty("run.mode"))).map(_.toLowerCase) match {
case Full("test") => Test
case Full("production") => Production
case Full("staging") => Staging
case Full("pilot") => Pilot
case Full("profile") => Profile
case Full("development") => Development
case _ =>
val exp = new Exception
exp.fillInStackTrace
if (exp.getStackTrace.find(st => st.getClassName.indexOf("SurefireBooter") >= 0).isDefined) Test
else Development
}
}
/**
* Is the system in production mode (apply full optimizations)
*/
lazy val productionMode: Boolean = mode == RunModes.Production ||
mode == RunModes.Pilot || mode == RunModes.Staging
/**
* Is the system in production mode (apply full optimizations)
*/
lazy val devMode: Boolean = mode == RunModes.Development
/**
* Is the system running in test mode
*/
lazy val testMode: Boolean = mode == RunModes.Test
/**
* The resource path segment corresponding to the current mode.
*/
lazy val modeName = mode match {
case Test => "test"
case Staging => "staging"
case Production => "production"
case Pilot => "pilot"
case Profile => "profile"
case _ => ""
}
private lazy val _modeName = dotLen(modeName)
private def dotLen(in: String): String = in match {
case null | "" => in
case x => x+"."
}
/**
* The resource path segment corresponding to the current system user
* (from System.getProperty("user.name"))
*/
lazy val userName = System.getProperty("user.name")
private lazy val _userName = dotLen(userName)
/**
* Is the app running in the Google App engine (the System property in.gae.j is set)
*/
lazy val inGAE: Boolean = System.getProperty("in.gae.j") != null
/**
* The resource path segment corresponding to the system hostname.
*/
lazy val hostName: String = (if (inGAE) "GAE" else InetAddress.getLocalHost.getHostName)
private lazy val _hostName = dotLen(hostName)
/**
* The list of paths to search for property file resources.
* Properties files may be found at either the classpath root or
* in /props
*/
lazy val toTry: List[() => String] = List(
() => "/props/" + _modeName + _userName + _hostName,
() => "/props/" + _modeName + _userName,
() => "/props/" + _modeName + _hostName,
() => "/props/" + _modeName + "default.",
() => "/" + _modeName + _userName + _hostName,
() => "/" + _modeName + _userName,
() => "/" + _modeName + _hostName,
() => "/" + _modeName + "default.")
/**
* The map of key/value pairs retrieved from the property file.
*/
lazy val props: Map[String, String] = {
import _root_.java.io.{ByteArrayInputStream}
import _root_.java.util.InvalidPropertiesFormatException
import _root_.java.util.{Map => JMap}
// find the first property file that is available
first(toTry){f => tryo(getClass.getResourceAsStream(f()+"props")).filter(_ ne null).
map{s => val ret = new Properties;
val ba = Helpers.readWholeStream(s)
try {
ret.loadFromXML(new ByteArrayInputStream(ba))
} catch {
case _: InvalidPropertiesFormatException =>
ret.load(new ByteArrayInputStream(ba))
}
ret
}} match {
// if we've got a propety file, create name/value pairs and turn them into a Map
case Full(prop) =>
Map(prop.entrySet.toArray.flatMap{
case s: JMap.Entry[_, _] => List((s.getKey.toString, s.getValue.toString))
case _ => Nil
} :_*)
case _ => Map()
}
}
}
}
}
| jeppenejsum/liftweb | framework/lift-base/lift-util/src/main/scala/net/liftweb/util/Props.scala | Scala | apache-2.0 | 7,568 |
package com.sdc.play.module.plausc.providers.oauth2
import org.codehaus.jackson.JsonNode
import com.sdc.play.module.plausc.user._
/**
* @author Dr. Erich W. Schreiner - Software Design & Consulting GmbH
* @version 0.1.0.0
* @since 0.1.0.0
*/
class FoursquareAuthInfo(token: String) extends OAuth2AuthInfo(token)
import FoursquareConstants._
import OAuth2Constants._
import scala.collection.JavaConverters._
import com.sdc.play.module.plausc.providers.util.JsonHelpers._
class FoursquareAuthUser(node: JsonNode, info: OAuth2AuthInfo, state: String)
extends BasicOAuth2AuthUser(asText(node,ID),info,state)
with ExtendedIdentity with PicturedIdentity {
override def getFirstName = asText(node, FIRST_NAME)
override def getLastName = asText(node, LAST_NAME)
val homeCity = asText(node, HOME_CITY)
override def getPicture = if (node.has(PHOTO)) {
val sb = new StringBuilder
sb.append(node.get(PHOTO).get(PREFIX).asText)
sb.append(ORIGINAL)
sb.append(node.get(PHOTO).get(SUFFIX).asText)
sb.toString
} else null
def getGender = asText(node, GENDER)
val mtype = asText(node, TYPE)
val bio = asText(node, BIO)
val contactNode = node.get(CONTACT)
val contact = if (contactNode != null)
contactNode.getFields.asScala map {e => (e.getKey, e.getValue.asText)} toMap
else Map[String,String]()
override def getProvider = PROVIDER_KEY
/**
* It is not guaranteed that an email is present for foursquare
*/
override def getEmail = getContactDetail(CONTACT_DETAIL_EMAIL)
def getContactDetail(key: String) = contact.get(key) orNull
override def getName: String = {
val sb = new StringBuilder
val hasFirstName = getFirstName != null && !getFirstName.isEmpty
val hasLastName = getLastName != null && !getLastName.isEmpty
if (hasFirstName) {
sb.append(getFirstName)
if (hasLastName) sb.append(" ")
}
if (hasLastName) {
sb.append(getLastName)
}
sb.toString
}
}
import play.api._
import play.libs.WS
import play.libs.WS.Response
import com.sdc.play.module.plausc._
class FoursquareAuthProvider(app: Application)
extends OAuth2AuthProvider[FoursquareAuthUser, FoursquareAuthInfo](app) {
override protected def buildInfo(r: Response): FoursquareAuthInfo = {
if (r.getStatus >= 400) {
throw new AccessTokenException(r.toString)
} else {
val result = r.asJson
Logger.debug(result.asText)
new FoursquareAuthInfo(result.get(ACCESS_TOKEN).asText)
}
}
override protected def transform(info: FoursquareAuthInfo, state: String): FoursquareAuthUser = {
val url = configuration.get.getString(USER_INFO_URL_SETTING_KEY).get
val r = WS.url(url).
setQueryParameter(OAUTH_TOKEN, info.token).
setQueryParameter("v", VERSION).
get.get(PlayAuthenticate.TIMEOUT)
val result = r.asJson
if (r.getStatus >= 400) {
throw new AuthException(result.get("meta").get("errorDetail").asText)
} else {
Logger.debug(result.toString)
new FoursquareAuthUser(result.get("response").get("user"), info, state)
}
}
override def getKey = PROVIDER_KEY
}
object FoursquareConstants {
val PROVIDER_KEY = "foursquare"
val USER_INFO_URL_SETTING_KEY = "userInfoUrl"
val OAUTH_TOKEN = "oauth_token"
val VERSION = "20120617"
val CONTACT_DETAIL_EMAIL = "email"
val CONTACT_DETAIL_TWITTER = "contact"
val CONTACT_DETAIL_FACEBOOK = "contact"
/**
* From:
* https://developer.foursquare.com/docs/responses/user
*/
val ID = "id" // "1188384"
val FIRST_NAME = "firstName" // "Joscha"
val LAST_NAME = "lastName" // "Feth"
val HOME_CITY = "homeCity" // "Metzingen, Baden-Württemberg"
val PHOTO = "photo" // "<prefix>/original/<suffix>"
val PREFIX = "prefix" // "https://is0.4sqi.net/img/user/"
val ORIGINAL = "original" // "original"
val SUFFIX = "suffix" // "/HZGTZQNRLA21ZIAD.jpg"
val GENDER = "gender" // "male"
val TYPE = "type" // "user"
val CONTACT = "contact" // {"email":
// "[email protected]",
// "twitter":
// "joschafeth",
// "facebook":
// "616473731"}
val BIO = "bio" // "lalala"
} | eschreiner/play2-scala-auth | code/app/com/sdc/play/module/plausc/providers/oauth2/Foursquare.scala | Scala | apache-2.0 | 4,134 |
package io.digitalmagic.akka.dsl
import java.time.Instant
import akka.actor.Props
import io.digitalmagic.coproduct.TListK.:::
import io.digitalmagic.coproduct.{Cop, CopK, TNilK}
import io.digitalmagic.akka.dsl.API._
import io.digitalmagic.akka.dsl.EventSourcedActorWithInterpreter.IndexFuture
import io.digitalmagic.akka.dsl.context.ProgramContextOps
import scalaz._
import scalaz.Scalaz._
import scala.reflect.ClassTag
object Adder {
trait MyEventType extends Event
case object MyEvent extends MyEventType {
override type TimestampType = Instant
override var timestamp: Instant = Instant.now
}
case class MyState(n: Int = 0) extends PersistentState {
override type EventType = MyEventType
}
val myStateProcessor: PersistentStateProcessor[MyState] = new PersistentStateProcessor[MyState] {
override def empty: MyState = MyState()
override def process(state: MyState, event: MyEventType): MyState = event match {
case MyEvent => state.copy(n = state.n + 1)
}
}
def props(implicit api1: Actor1.Query ~> LazyFuture, api2: Actor2.Query ~> LazyFuture): Props = Props(new Adder())
case object QueryAndAdd extends Command[Int]
}
trait AdderPrograms extends EventSourcedPrograms {
import Adder._
override type Environment = Unit
override val contextOps: ProgramContextOps = new ProgramContextOps
override type EntityIdType = Unit
override type EventType = MyEventType
override lazy val eventTypeTag: ClassTag[MyEventType] = implicitly
override type State = MyState
override lazy val stateTag: ClassTag[MyState] = implicitly
override lazy val persistentState: PersistentStateProcessor[State] = myStateProcessor
override type TransientState = Unit
override lazy val initialTransientState: TransientState = ()
override type QueryList = Actor1.Query ::: Actor2.Query ::: TNilK
override type QueryAlgebra[A] = CopK[QueryList, A]
override val algebraIsQuery: IsQuery[QueryAlgebra] = implicitly
override type Index = EmptyIndexList
override val clientRuntime: ClientRuntime[Index#List, Index] = implicitly
val a1 = new Actor1.Api[Program]
val a2 = new Actor2.Api[Program]
def queryAndAdd: Program[Int] = for {
v1 <- a1.getValue
v2 <- a2.getValue
_ <- emit(MyEvent)
} yield v1 + v2
override def getEnvironment(r: Request[_]): Unit = ()
override def processSnapshot(s: Any): Option[State] = s match {
case x: State => Some(x)
case _ => None
}
override def getProgram: Request ~> MaybeProgram = Lambda[Request ~> MaybeProgram] {
case QueryAndAdd => Some(queryAndAdd)
case _ => None
}
}
class Adder()(implicit val api1: Actor1.Query ~> LazyFuture, val api2: Actor2.Query ~> LazyFuture) extends AdderPrograms with EventSourcedActorWithInterpreter {
override def entityId: Unit = ()
override val persistenceId: String = s"${context.system.name}.MyExampleActor"
override def interpreter: QueryAlgebra ~> LazyFuture = CopK.NaturalTransformation.summon
override def indexInterpreter: Index#Algebra ~> IndexFuture = CopK.NaturalTransformation.summon
override def clientApiInterpreter: Index#ClientAlgebra ~> Const[Unit, *] = CopK.NaturalTransformation.summon
override def localApiInterpreter: Index#LocalAlgebra ~> Id = CopK.NaturalTransformation.summon
override def clientEventInterpreter: ClientEventInterpreter = Cop.Function.summon
} | digital-magic-io/akka-cqrs-dsl | akka-cqrs-dsl-core/src/test/scala/io/digitalmagic/akka/dsl/Adder.scala | Scala | apache-2.0 | 3,376 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon) and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.api.collections
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
import scala.util.control.NonFatal
import org.jboss.netty.buffer.ChannelBuffer
import reactivemongo.api._
import reactivemongo.api.commands.{ CountCommand, LastError, WriteConcern }
import reactivemongo.bson.buffer.{ ReadableBuffer, WritableBuffer }
import reactivemongo.core.nodeset.ProtocolMetadata
import reactivemongo.core.protocol._
import reactivemongo.core.netty.{
BufferSequence,
ChannelBufferReadableBuffer,
ChannelBufferWritableBuffer
}
import reactivemongo.core.errors.ConnectionNotInitialized
trait GenericCollectionProducer[P <: SerializationPack with Singleton, +C <: GenericCollection[P]] extends CollectionProducer[C]
trait GenericCollectionWithCommands[P <: SerializationPack with Singleton] { self: GenericCollection[P] =>
val pack: P
import reactivemongo.api.commands._
def runner = Command.run(pack)
def runCommand[R, C <: CollectionCommand with CommandWithResult[R]](command: C with CommandWithResult[R])(implicit writer: pack.Writer[ResolvedCollectionCommand[C]], reader: pack.Reader[R], ec: ExecutionContext): Future[R] =
runner(self, command)
def runCommand[C <: CollectionCommand](command: C)(implicit writer: pack.Writer[ResolvedCollectionCommand[C]]): CursorFetcher[pack.type, Cursor] =
runner(self, command)
def runValueCommand[A <: AnyVal, R <: BoxedAnyVal[A], C <: CollectionCommand with CommandWithResult[R]](command: C with CommandWithResult[R with BoxedAnyVal[A]])(implicit writer: pack.Writer[ResolvedCollectionCommand[C]], reader: pack.Reader[R], ec: ExecutionContext): Future[A] =
runner.unboxed(self, command)
}
trait BatchCommands[P <: SerializationPack] {
import reactivemongo.api.commands.{ AggregationFramework => AC, CountCommand => CC, InsertCommand => IC, UpdateCommand => UC, DeleteCommand => DC, DefaultWriteResult, LastError, ResolvedCollectionCommand, FindAndModifyCommand => FMC }
val pack: P
val CountCommand: CC[pack.type]
implicit def CountWriter: pack.Writer[ResolvedCollectionCommand[CountCommand.Count]]
implicit def CountResultReader: pack.Reader[CountCommand.CountResult]
val InsertCommand: IC[pack.type]
implicit def InsertWriter: pack.Writer[ResolvedCollectionCommand[InsertCommand.Insert]]
val UpdateCommand: UC[pack.type]
implicit def UpdateWriter: pack.Writer[ResolvedCollectionCommand[UpdateCommand.Update]]
implicit def UpdateReader: pack.Reader[UpdateCommand.UpdateResult]
val DeleteCommand: DC[pack.type]
implicit def DeleteWriter: pack.Writer[ResolvedCollectionCommand[DeleteCommand.Delete]]
val FindAndModifyCommand: FMC[pack.type]
implicit def FindAndModifyWriter: pack.Writer[ResolvedCollectionCommand[FindAndModifyCommand.FindAndModify]]
implicit def FindAndModifyReader: pack.Reader[FindAndModifyCommand.FindAndModifyResult]
val AggregationFramework: AC[pack.type]
implicit def AggregateWriter: pack.Writer[ResolvedCollectionCommand[AggregationFramework.Aggregate]]
implicit def AggregateReader: pack.Reader[AggregationFramework.AggregationResult]
implicit def DefaultWriteResultReader: pack.Reader[DefaultWriteResult]
implicit def LastErrorReader: pack.Reader[LastError]
}
/**
* A Collection that provides default methods using a `SerializationPack`
* (e.g. the default [[reactivemongo.api.BSONSerializationPack]]).
*
* Some methods of this collection accept instances of `Reader[T]` and `Writer[T]`, that transform any `T` instance into a document, compatible with the selected serialization pack, and vice-versa.
*
* @tparam P the serialization pack
*/
trait GenericCollection[P <: SerializationPack with Singleton] extends Collection with GenericCollectionWithCommands[P] with CollectionMetaCommands with reactivemongo.api.commands.ImplicitCommandHelpers[P] { self =>
val pack: P
protected val BatchCommands: BatchCommands[pack.type]
/** Alias for [[BatchCommands.AggregationFramework.PipelineOperator]] */
type PipelineOperator = BatchCommands.AggregationFramework.PipelineOperator
implicit def PackIdentityReader: pack.Reader[pack.Document] = pack.IdentityReader
implicit def PackIdentityWriter: pack.Writer[pack.Document] = pack.IdentityWriter
def failoverStrategy: FailoverStrategy
def genericQueryBuilder: GenericQueryBuilder[pack.type]
import BatchCommands._
import reactivemongo.api.commands.{
MultiBulkWriteResult,
UpdateWriteResult,
Upserted,
WriteResult
}
private def writeDoc(doc: pack.Document): ChannelBuffer = {
val buffer = ChannelBufferWritableBuffer()
pack.writeToBuffer(buffer, doc)
buffer.buffer
}
private def writeDoc[T](doc: T, writer: pack.Writer[T]) = {
val buffer = ChannelBufferWritableBuffer()
pack.serializeAndWrite(buffer, doc, writer)
buffer.buffer
}
protected def watchFailure[T](future: => Future[T]): Future[T] =
Try(future).recover { case NonFatal(e) => Future.failed(e) }.get
/**
* Find the documents matching the given criteria.
*
* This method accepts any query and projection object, provided that there is an implicit `Writer[S]` typeclass for handling them in the scope.
*
* Please take a look to the [[http://www.mongodb.org/display/DOCS/Querying mongodb documentation]] to know how querying works.
*
* @tparam S the type of the selector (the query). An implicit `Writer[S]` typeclass for handling it has to be in the scope.
*
* @param query The selector query.
*
* @return a [[GenericQueryBuilder]] that you can use to to customize the query. You can obtain a cursor by calling the method [[reactivemongo.api.Cursor]] on this query builder.
*/
def find[S](selector: S)(implicit swriter: pack.Writer[S]): GenericQueryBuilder[pack.type] = genericQueryBuilder.query(selector)
/**
* Find the documents matching the given criteria.
*
* This method accepts any selector and projection object, provided that there is an implicit `Writer[S]` typeclass for handling them in the scope.
*
* Please take a look to the [[http://www.mongodb.org/display/DOCS/Querying mongodb documentation]] to know how querying works.
*
* @tparam S the type of the selector (the query). An implicit `Writer[S]` typeclass for handling it has to be in the scope.
* @tparam P the type of the projection object. An implicit `Writer[P]` typeclass for handling it has to be in the scope.
*
* @param selector the query selector.
* @param projection Get only a subset of each matched documents. Defaults to None.
*
* @return a [[GenericQueryBuilder]] that you can use to to customize the query. You can obtain a cursor by calling the method [[reactivemongo.api.Cursor]] on this query builder.
*/
def find[S, P](selector: S, projection: P)(implicit swriter: pack.Writer[S], pwriter: pack.Writer[P]): GenericQueryBuilder[pack.type] =
genericQueryBuilder.query(selector).projection(projection)
/**
* Count the documents matching the given criteria.
*
* This method accepts any query or hint, the scope provides instances of appropriate typeclasses.
*
* Please take a look to the [[http://www.mongodb.org/display/DOCS/Querying mongodb documentation]] to know how querying works.
*
* @tparam H the type of hint. An implicit `H => Hint` conversion has to be in the scope.
*
* @param selector the query selector
* @param limit the maximum number of matching documents to count
* @param skip the number of matching documents to skip before counting
* @param hint the index to use (either the index name or the index document)
*/
def count[H](selector: Option[pack.Document] = None, limit: Int = 0, skip: Int = 0, hint: Option[H] = None)(implicit h: H => CountCommand.Hint, ec: ExecutionContext): Future[Int] = runValueCommand(CountCommand.Count(query = selector, limit, skip, hint.map(h)))
@inline private def defaultWriteConcern = db.connection.options.writeConcern
def bulkInsert(ordered: Boolean)(documents: ImplicitlyDocumentProducer*)(implicit ec: ExecutionContext): Future[MultiBulkWriteResult] =
db.connection.metadata.map { metadata =>
bulkInsert(documents.toStream.map(_.produce), ordered, defaultWriteConcern, metadata.maxBulkSize, metadata.maxBsonSize)
}.getOrElse(Future.failed(ConnectionNotInitialized.MissingMetadata))
def bulkInsert(ordered: Boolean, writeConcern: WriteConcern)(documents: ImplicitlyDocumentProducer*)(implicit ec: ExecutionContext): Future[MultiBulkWriteResult] =
db.connection.metadata.map { metadata =>
bulkInsert(documents.toStream.map(_.produce), ordered, writeConcern, metadata.maxBulkSize, metadata.maxBsonSize)
}.getOrElse(Future.failed(ConnectionNotInitialized.MissingMetadata))
def bulkInsert(ordered: Boolean, writeConcern: WriteConcern, bulkSize: Int, bulkByteSize: Int)(documents: ImplicitlyDocumentProducer*)(implicit ec: ExecutionContext): Future[MultiBulkWriteResult] =
bulkInsert(documents.toStream.map(_.produce), ordered, writeConcern, bulkSize, bulkByteSize)
def bulkInsert(documents: Stream[pack.Document], ordered: Boolean)(implicit ec: ExecutionContext): Future[MultiBulkWriteResult] =
bulkInsert(documents, ordered, defaultWriteConcern)
def bulkInsert(documents: Stream[pack.Document], ordered: Boolean, writeConcern: WriteConcern)(implicit ec: ExecutionContext): Future[MultiBulkWriteResult] =
db.connection.metadata.map { metadata =>
bulkInsert(documents, ordered, writeConcern, metadata.maxBulkSize, metadata.maxBsonSize)
}.getOrElse(Future.failed(ConnectionNotInitialized.MissingMetadata))
def bulkInsert(documents: Stream[pack.Document], ordered: Boolean, writeConcern: WriteConcern = defaultWriteConcern, bulkSize: Int, bulkByteSize: Int)(implicit ec: ExecutionContext): Future[MultiBulkWriteResult] = watchFailure {
def createBulk[R, A <: BulkMaker[R, A]](docs: Stream[pack.Document], command: A with BulkMaker[R, A]): Future[List[R]] = {
val (tail, nc) = command.fill(docs)
command.send().flatMap { wr =>
if (nc.isDefined) createBulk(tail, nc.get).map(wr2 => wr :: wr2)
else Future.successful(List(wr)) // done
}
}
val metadata = db.connection.metadata
if (!documents.isEmpty) {
val havingMetadata = Failover2(db.connection, failoverStrategy) { () =>
metadata.map(Future.successful).getOrElse(Future.failed(ConnectionNotInitialized.MissingMetadata))
}.future
havingMetadata.flatMap { metadata =>
if (metadata.maxWireVersion >= MongoWireVersion.V26) {
createBulk(documents, Mongo26WriteCommand.insert(ordered, writeConcern, metadata)).map { list =>
list.foldLeft(MultiBulkWriteResult())((r, w) => r.merge(w))
}
} else {
// Mongo 2.4 // TODO: Deprecate/remove
createBulk(documents, new Mongo24BulkInsert(Insert(0, fullCollectionName), writeConcern, metadata)).map { list =>
list.foldLeft(MultiBulkWriteResult())((r, w) => r.merge(w))
}
}
}
} else Future.successful(MultiBulkWriteResult(
ok = true,
n = 0,
nModified = 0,
upserted = Seq.empty,
writeErrors = Seq.empty,
writeConcernError = None,
code = None,
errmsg = None,
totalN = 0))
}
/**
* Inserts a document into the collection and wait for the [[reactivemongo.api.commands.WriteResult]].
*
* Please read the documentation about [[reactivemongo.core.commands.GetLastError]] to know how to use it properly.
*
* @tparam T the type of the document to insert. An implicit `Writer[T]` typeclass for handling it has to be in the scope.
*
* @param document the document to insert.
* @param writeConcern the [[reactivemongo.core.commands.GetLastError]] command message to send in order to control how the document is inserted. Defaults to GetLastError().
*
* @return a future [[reactivemongo.api.commands.WriteResult]] that can be used to check whether the insertion was successful.
*/
def insert[T](document: T, writeConcern: WriteConcern = defaultWriteConcern)(implicit writer: pack.Writer[T], ec: ExecutionContext): Future[WriteResult] = {
Failover2(db.connection, failoverStrategy) { () =>
db.connection.metadata match {
case Some(metadata) if metadata.maxWireVersion >= MongoWireVersion.V26 =>
runCommand(BatchCommands.InsertCommand.Insert(
writeConcern = writeConcern)(document)).flatMap { wr =>
val flattened = wr.flatten
if (!flattened.ok) {
// was ordered, with one doc => fail if has an error
Future.failed(flattened)
} else Future.successful(wr)
}
case Some(_) => // Mongo < 2.6 // TODO: Deprecates/remove
val op = Insert(0, fullCollectionName)
val bson = writeDoc(document, writer)
val checkedWriteRequest = CheckedWriteRequest(op, BufferSequence(bson), writeConcern)
db.connection.sendExpectingResponse(checkedWriteRequest).map(pack.readAndDeserialize(_, LastErrorReader))
case None =>
Future.failed(ConnectionNotInitialized.MissingMetadata)
}
}.future
}
/**
* Updates one or more documents matching the given selector with the given modifier or update object.
*
* @tparam S the type of the selector object. An implicit `Writer[S]` typeclass for handling it has to be in the scope.
* @tparam U the type of the modifier or update object. An implicit `Writer[U]` typeclass for handling it has to be in the scope.
*
* @param selector the selector object, for finding the documents to update.
* @param update the modifier object (with special keys like \\$set) or replacement object.
* @param writeConcern the [[reactivemongo.core.commands.GetLastError]] command message to send in order to control how the documents are updated. Defaults to GetLastError().
* @param upsert states whether the update objet should be inserted if no match found. Defaults to false.
* @param multi states whether the update may be done on all the matching documents.
*
* @return a future [[reactivemongo.api.commands.WriteResult]] that can be used to check whether the update was successful.
*/
def update[S, U](selector: S, update: U, writeConcern: WriteConcern = defaultWriteConcern, upsert: Boolean = false, multi: Boolean = false)(implicit selectorWriter: pack.Writer[S], updateWriter: pack.Writer[U], ec: ExecutionContext): Future[UpdateWriteResult] = Failover2(db.connection, failoverStrategy) { () =>
db.connection.metadata match {
case Some(metadata) if (
metadata.maxWireVersion >= MongoWireVersion.V26) => {
import BatchCommands.UpdateCommand.{ Update, UpdateElement }
runCommand(Update(writeConcern = writeConcern)(
UpdateElement(selector, update, upsert, multi))).flatMap { wr =>
val flattened = wr.flatten
if (!flattened.ok) {
// was ordered, with one doc => fail if has an error
Future.failed(flattened)
} else Future.successful(wr)
}
}
case Some(_) => { // Mongo < 2.6 // TODO: Deprecate/remove
val flags = 0 | (if (upsert) UpdateFlags.Upsert else 0) | (if (multi) UpdateFlags.MultiUpdate else 0)
val op = Update(fullCollectionName, flags)
val bson = writeDoc(selector, selectorWriter)
bson.writeBytes(writeDoc(update, updateWriter))
val checkedWriteRequest = CheckedWriteRequest(op, BufferSequence(bson), writeConcern)
db.connection.sendExpectingResponse(checkedWriteRequest).map { r =>
val res = pack.readAndDeserialize(r, LastErrorReader)
UpdateWriteResult(res.ok, res.n, res.n,
res.upserted.map(Upserted(-1, _)).toSeq,
Nil, None, res.code, res.err)
}
}
case None => Future.failed(ConnectionNotInitialized.MissingMetadata)
}
}.future
/**
* Returns an update modifier, to be used with [[findAndModify]].
*
* @param update the update to be applied
* @param fetchNewObject the command result must be the new object instead of the old one.
* @param upsert if true, creates a new document if no document matches the query, or if documents match the query, findAndModify performs an update
*/
def updateModifier[U](update: U, fetchNewObject: Boolean = false, upsert: Boolean = false)(implicit updateWriter: pack.Writer[U]): BatchCommands.FindAndModifyCommand.Update = BatchCommands.FindAndModifyCommand.Update(update, fetchNewObject, upsert)
/** Returns a removal modifier, to be used with [[findAndModify]]. */
lazy val removeModifier = BatchCommands.FindAndModifyCommand.Remove
/**
* Applies a [[http://docs.mongodb.org/manual/reference/command/findAndModify/ findAndModify]] operation. See [[findAndUpdate]] and [[findAndRemove]] convenient functions.
*
* {{{
* val updateOp = collection.updateModifier(
* BSONDocument("\\$set" -> BSONDocument("age" -> 35)))
*
* val personBeforeUpdate: Future[Person] =
* collection.findAndModify(BSONDocument("name" -> "Joline"), updateOp).
* map(_.result[Person])
*
* val removedPerson: Future[Person] = collection.findAndModify(
* BSONDocument("name" -> "Jack"), collection.removeModifier)
* }}}
*
* @param selector the query selector
* @param modifier the modify operator to be applied
* @param sort the optional document possibly indicating the sort criterias
* @param fields the field [[http://docs.mongodb.org/manual/tutorial/project-fields-from-query-results/#read-operations-projection projection]]
*/
def findAndModify[Q](selector: Q, modifier: BatchCommands.FindAndModifyCommand.Modify, sort: Option[pack.Document] = None, fields: Option[pack.Document] = None)(implicit selectorWriter: pack.Writer[Q], ec: ExecutionContext): Future[BatchCommands.FindAndModifyCommand.FindAndModifyResult] = {
import FindAndModifyCommand.{ ImplicitlyDocumentProducer => DP }
val command = BatchCommands.FindAndModifyCommand.FindAndModify(
query = selector,
modify = modifier,
sort = sort.map(implicitly[DP](_)),
fields = fields.map(implicitly[DP](_)))
runCommand(command)
}
/**
* Finds some matching document, and updates it (using [[findAndModify]]).
*
* {{{
* val person: Future[BSONDocument] = collection.findAndUpdate(
* BSONDocument("name" -> "James"),
* BSONDocument("\\$set" -> BSONDocument("age" -> 17)),
* fetchNewObject = true) // on success, return the update document:
* // { "age": 17 }
* }}}
*
* @param selector the query selector
* @param update the update to be applied
* @param fetchNewObject the command result must be the new object instead of the old one.
* @param upsert if true, creates a new document if no document matches the query, or if documents match the query, findAndModify performs an update
* @param sort the optional document possibly indicating the sort criterias
* @param fields the field [[http://docs.mongodb.org/manual/tutorial/project-fields-from-query-results/#read-operations-projection projection]]
*/
def findAndUpdate[Q, U](selector: Q, update: U, fetchNewObject: Boolean = false, upsert: Boolean = false, sort: Option[pack.Document] = None, fields: Option[pack.Document] = None)(implicit selectorWriter: pack.Writer[Q], updateWriter: pack.Writer[U], ec: ExecutionContext): Future[BatchCommands.FindAndModifyCommand.FindAndModifyResult] = {
val updateOp = updateModifier(update, fetchNewObject, upsert)
findAndModify(selector, updateOp, sort, fields)
}
/**
* Finds some matching document, and removes it (using [[findAndModify]]).
*
* {{{
* val removed: Future[Person] = collection.findAndRemove(
* BSONDocument("name" -> "Foo")).map(_.result[Person])
* }}}
*
* @param selector the query selector
* @param modifier the modify operator to be applied
* @param sort the optional document possibly indicating the sort criterias
* @param fields the field [[http://docs.mongodb.org/manual/tutorial/project-fields-from-query-results/#read-operations-projection projection]]
*/
def findAndRemove[Q](selector: Q, sort: Option[pack.Document] = None, fields: Option[pack.Document] = None)(implicit selectorWriter: pack.Writer[Q], ec: ExecutionContext): Future[BatchCommands.FindAndModifyCommand.FindAndModifyResult] = findAndModify[Q](selector, removeModifier, sort, fields)
/**
* [[http://docs.mongodb.org/manual/reference/command/aggregate/ Aggregates]] the matching documents.
*
* {{{
* import scala.concurrent.Future
* import scala.concurrent.ExecutionContext.Implicits.global
*
* import reactivemongo.bson._
* import reactivemongo.api.collections.bson.BSONCollection
*
* def populatedStates(cities: BSONCollection): Future[List[BSONDocument]] = {
* import cities.BatchCommands.AggregationFramework
* import AggregationFramework.{ Group, Match, SumField }
*
* cities.aggregate(Group(BSONString("$state"))(
* "totalPop" -> SumField("population")), List(
* Match(document("totalPop" ->
* document("$gte" -> 10000000L))))).map(_.documents)
* }
* }}}
*
* @param firstOperator the first operator of the pipeline
* @param otherOperators the sequence of MongoDB aggregation operations
* @param explain specifies to return the information on the processing of the pipeline
* @param allowDiskUse enables writing to temporary files
* @param cursor the cursor object for aggregation
*/
def aggregate(firstOperator: PipelineOperator, otherOperators: List[PipelineOperator] = Nil, explain: Boolean = false, allowDiskUse: Boolean = false, cursor: Option[BatchCommands.AggregationFramework.Cursor] = None)(implicit ec: ExecutionContext): Future[BatchCommands.AggregationFramework.AggregationResult] = {
import BatchCommands.AggregationFramework.Aggregate
import BatchCommands.{ AggregateWriter, AggregateReader }
runCommand(Aggregate(
firstOperator :: otherOperators, explain, allowDiskUse, cursor))
}
/**
* Remove the matched document(s) from the collection and wait for the [[reactivemongo.api.commands.WriteResult]] result.
*
* Please read the documentation about [[reactivemongo.core.commands.GetLastError]] to know how to use it properly.
*
* @tparam T the type of the selector of documents to remove. An implicit `Writer[T]` typeclass for handling it has to be in the scope.
*
* @param query the selector of documents to remove.
* @param writeConcern the [[reactivemongo.core.commands.GetLastError]] command message to send in order to control how the documents are removed. Defaults to GetLastError().
* @param firstMatchOnly states whether only the first matched documents has to be removed from this collection.
*
* @return a future [[reactivemongo.api.commands.WriteResult]] that can be used to check whether the removal was successful.
*/
def remove[T](query: T, writeConcern: WriteConcern = defaultWriteConcern, firstMatchOnly: Boolean = false)(implicit writer: pack.Writer[T], ec: ExecutionContext): Future[WriteResult] =
Failover2(db.connection, failoverStrategy) { () =>
db.connection.metadata match {
case Some(metadata) if metadata.maxWireVersion >= MongoWireVersion.V26 =>
import BatchCommands.DeleteCommand.{ Delete, DeleteElement }
val limit = if (firstMatchOnly) 1 else 0
runCommand(Delete(writeConcern = writeConcern)(
DeleteElement(query, limit))).flatMap { wr =>
val flattened = wr.flatten
if (!flattened.ok) {
// was ordered, with one doc => fail if has an error
Future.failed(flattened)
} else Future.successful(wr)
}
case Some(_) => // Mongo < 2.6 // TODO: Deprecate/remove
val op = Delete(fullCollectionName, if (firstMatchOnly) 1 else 0)
val bson = writeDoc(query, writer)
val checkedWriteRequest = CheckedWriteRequest(op, BufferSequence(bson), writeConcern)
db.connection.sendExpectingResponse(checkedWriteRequest).map(pack.readAndDeserialize(_, LastErrorReader))
case None =>
Future.failed(ConnectionNotInitialized.MissingMetadata)
}
}.future
/**
* Remove the matched document(s) from the collection without writeConcern.
*
* Please note that you cannot be sure that the matched documents have been effectively removed and when (hence the Unit return type).
*
* @tparam T the type of the selector of documents to remove. An implicit `Writer[T]` typeclass for handling it has to be in the scope.
*
* @param query the selector of documents to remove.
* @param firstMatchOnly states whether only the first matched documents has to be removed from this collection.
*/
def uncheckedRemove[T](query: T, firstMatchOnly: Boolean = false)(implicit writer: pack.Writer[T], ec: ExecutionContext): Unit = {
val op = Delete(fullCollectionName, if (firstMatchOnly) 1 else 0)
val bson = writeDoc(query, writer)
val message = RequestMaker(op, BufferSequence(bson))
db.connection.send(message)
}
/**
* Updates one or more documents matching the given selector with the given modifier or update object.
*
* Please note that you cannot be sure that the matched documents have been effectively updated and when (hence the Unit return type).
*
* @tparam S the type of the selector object. An implicit `Writer[S]` typeclass for handling it has to be in the scope.
* @tparam U the type of the modifier or update object. An implicit `Writer[U]` typeclass for handling it has to be in the scope.
*
* @param selector the selector object, for finding the documents to update.
* @param update the modifier object (with special keys like \\$set) or replacement object.
* @param upsert states whether the update objet should be inserted if no match found. Defaults to false.
* @param multi states whether the update may be done on all the matching documents.
*/
def uncheckedUpdate[S, U](selector: S, update: U, upsert: Boolean = false, multi: Boolean = false)(implicit selectorWriter: pack.Writer[S], updateWriter: pack.Writer[U]): Unit = {
val flags = 0 | (if (upsert) UpdateFlags.Upsert else 0) | (if (multi) UpdateFlags.MultiUpdate else 0)
val op = Update(fullCollectionName, flags)
val bson = writeDoc(selector, selectorWriter)
bson.writeBytes(writeDoc(update, updateWriter))
val message = RequestMaker(op, BufferSequence(bson))
db.connection.send(message)
}
/**
* Inserts a document into the collection without writeConcern.
*
* Please note that you cannot be sure that the document has been effectively written and when (hence the Unit return type).
*
* @tparam T the type of the document to insert. An implicit `Writer[T]` typeclass for handling it has to be in the scope.
*
* @param document the document to insert.
*/
def uncheckedInsert[T](document: T)(implicit writer: pack.Writer[T]): Unit = {
val op = Insert(0, fullCollectionName)
val bson = writeDoc(document, writer)
val message = RequestMaker(op, BufferSequence(bson))
db.connection.send(message)
}
protected object Mongo26WriteCommand {
def insert(ordered: Boolean, writeConcern: WriteConcern, metadata: ProtocolMetadata): Mongo26WriteCommand = new Mongo26WriteCommand("insert", ordered, writeConcern, metadata)
}
protected sealed trait BulkMaker[R, S <: BulkMaker[R, S]] {
def fill(docs: Stream[pack.Document]): (Stream[pack.Document], Option[S]) = {
@annotation.tailrec
def loop(docs: Stream[pack.Document]): (Stream[pack.Document], Option[S]) = {
if (docs.isEmpty) Stream.empty -> None
else {
val res = putOrIssueNewCommand(docs.head)
if (res.isDefined) docs.tail -> res
else loop(docs.tail)
}
}
loop(docs)
}
def putOrIssueNewCommand(doc: pack.Document): Option[S]
def result(): ChannelBuffer
def send()(implicit ec: ExecutionContext): Future[R]
}
protected class Mongo26WriteCommand private (tpe: String, ordered: Boolean, writeConcern: WriteConcern, metadata: ProtocolMetadata) extends BulkMaker[WriteResult, Mongo26WriteCommand] {
import reactivemongo.bson.lowlevel.LowLevelBsonDocWriter
private var done = false
private var docsN = 0
private val buf = ChannelBufferWritableBuffer()
private val writer = new LowLevelBsonDocWriter(buf)
val thresholdDocs = metadata.maxBulkSize
// minus 2 for the trailing '\\0'
val thresholdBytes = metadata.maxBsonSize - 2
init()
def putOrIssueNewCommand(doc: pack.Document): Option[Mongo26WriteCommand] = {
if (done)
throw new RuntimeException("violated assertion: Mongo26WriteCommand should not be used again after it is done")
if (docsN >= thresholdDocs) {
closeIfNecessary()
val nextCommand = new Mongo26WriteCommand(tpe, ordered, writeConcern, metadata)
nextCommand.putOrIssueNewCommand(doc)
Some(nextCommand)
} else {
val start = buf.index
buf.writeByte(0x03)
buf.writeCString(docsN.toString)
val start2 = buf.index
pack.writeToBuffer(buf, doc)
val result =
if (buf.index > thresholdBytes && docsN == 0) // first and already out of bound
throw new RuntimeException(s"Mongo26WriteCommand could not accept doc of size = ${buf.index - start} bytes")
else if (buf.index > thresholdBytes) {
val nextCommand = new Mongo26WriteCommand(tpe, ordered, writeConcern, metadata)
nextCommand.buf.writeByte(0x03)
nextCommand.buf.writeCString("0")
nextCommand.buf.buffer.writeBytes(buf.buffer, start2, buf.index - start2)
nextCommand.docsN = 1
buf.buffer.readerIndex(0)
buf.buffer.writerIndex(start)
closeIfNecessary()
Some(nextCommand)
} else None
docsN += 1
result
}
}
// TODO remove
def _debug(): Unit = {
import reactivemongo.bson.{ BSONDocument, buffer }, buffer.DefaultBufferHandler
val rix = buf.buffer.readerIndex
val wix = buf.buffer.writerIndex
val doc = DefaultBufferHandler.BSONDocumentBufferHandler.read(new ChannelBufferReadableBuffer(buf.buffer))
println(doc)
println(BSONDocument.pretty(doc))
buf.buffer.readerIndex(rix)
buf.buffer.writerIndex(wix)
}
def result(): ChannelBuffer = {
closeIfNecessary()
buf.buffer
}
def send()(implicit ec: ExecutionContext): Future[WriteResult] = {
val documents = BufferSequence(result())
val op = Query(0, db.name + ".$cmd", 0, 1)
val cursor = DefaultCursor(pack, op, documents, ReadPreference.primary, db.connection, failoverStrategy, true)(BatchCommands.DefaultWriteResultReader) //(Mongo26WriteCommand.DefaultWriteResultBufferReader)
cursor.headOption.flatMap {
case Some(wr) if wr.inError => Future.failed(wr)
case Some(wr) if wr.hasErrors && ordered => Future.failed(wr)
case Some(wr) => Future.successful(wr)
case None => Future.failed(new RuntimeException("no write result ?"))
}
}
private def closeIfNecessary(): Unit =
if (!done) {
done = true
writer.close // array
writer.close // doc
}
private def init(): Unit = {
writer.
putString(tpe, name).
putBoolean("ordered", ordered)
putWriteConcern()
writer.openArray("documents")
}
private def putWriteConcern(): Unit = {
import reactivemongo.api.commands.GetLastError
writer.openDocument("writeConcern")
writeConcern.w match {
case GetLastError.Majority => writer.putString("w", "majority")
case GetLastError.TagSet(tagSet) => writer.putString("w", tagSet)
case GetLastError.WaitForAknowledgments(n) => writer.putInt("w", n)
}
if (writeConcern.j) writer.putBoolean("j", true)
writeConcern.wtimeout foreach { writer.putInt("wtimeout", _) }
writer.close
}
}
protected class Mongo24BulkInsert(op: Insert, writeConcern: WriteConcern, metadata: ProtocolMetadata) extends BulkMaker[LastError, Mongo24BulkInsert] {
private var done = false
private var docsN = 0
private val buf = ChannelBufferWritableBuffer()
val thresholdDocs = metadata.maxBulkSize
// max size for docs is max bson size minus 16 bytes for header, 4 bytes for flags, and 124 bytes max for fullCollectionName
val thresholdBytes = metadata.maxBsonSize - (4 * 4 + 4 + 124)
def putOrIssueNewCommand(doc: pack.Document): Option[Mongo24BulkInsert] = {
if (done)
throw new RuntimeException("violated assertion: Mongo24BulkInsert should not be used again after it is done")
if (docsN >= thresholdDocs) {
val nextBulk = new Mongo24BulkInsert(op, writeConcern, metadata)
nextBulk.putOrIssueNewCommand(doc)
Some(nextBulk)
} else {
val start = buf.index
pack.writeToBuffer(buf, doc)
if (buf.index > thresholdBytes) {
if (docsN == 0) // first and already out of bound
throw new RuntimeException("Mongo24BulkInsert could not accept doc of size = ${buf.index - start} bytes")
val nextBulk = new Mongo24BulkInsert(op, writeConcern, metadata)
nextBulk.buf.buffer.writeBytes(buf.buffer, start, buf.index - start)
nextBulk.docsN = 1
buf.buffer.readerIndex(0)
buf.buffer.writerIndex(start)
done = true
Some(nextBulk)
} else {
docsN += 1
None
}
}
}
def result(): ChannelBuffer = {
done = true
buf.buffer
}
def resultAsCheckedWriteRequest(op: Insert, writeConcern: WriteConcern) = {
CheckedWriteRequest(op, BufferSequence(result()), writeConcern)
}
def send()(implicit ec: ExecutionContext): Future[LastError] = {
val f = () => db.connection.sendExpectingResponse(resultAsCheckedWriteRequest(op, writeConcern))
Failover2(db.connection, failoverStrategy)(f).future.map(pack.readAndDeserialize(_, LastErrorReader))
}
}
}
| avdv/ReactiveMongo | driver/src/main/scala/api/collections/genericcollection.scala | Scala | apache-2.0 | 35,160 |
/*
* Copyright 2015 Ringo Wathelet
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.kodekutters
import java.math.BigInteger
import java.security.SecureRandom
import java.util.Base64
import javax.security.cert.X509Certificate
/**
* utilities supporting the ACME protocol
*
* Reference: Let's Encrypt project at: https://letsencrypt.org/
*/
package object Util {
/**
* test if the current running java is at least version n.
* @param n the string representing the java version number to test (e.g. "1.8")
* @return true if n >= the current running java version, false for anything else.
*/
def isJavaAtLeast(n: String): Boolean = {
try {
n.toFloat >= System.getProperty("java.version").substring(0, 3).toFloat
}
catch {
case e: Exception => false
}
}
/**
* pad the input string with "=" and "=="
* @param x the input string to pad
* @return the input string padded with "=" and "=="
*/
def pad(x: String): String = {
x.length % 4 match {
case 2 => x + "=="
case 3 => x + "="
case _ => x
}
}
/**
* remove any "=" from the input string
* @param x the imput string
* @return the input string with all "=" removed
*/
def unpad(x: String): String = x.replace("=", "")
/**
* create a base64 encoded string from the input bytes
* @param x input byte array to encode
* @return a base64 encoded string
*/
def base64Encode(x: Array[Byte]): String = unpad(Base64.getUrlEncoder.encodeToString(x))
/**
* decode a base64 encoded string into a byte array
* @param x the input string
* @return the decoded string as a byte array
*/
def base64Decode(x: String): Array[Byte] = Base64.getUrlDecoder.decode(pad(x))
/**
* create a n bytes random number base 64 encoded string
* @param n number of bytes
* @return a n bytes random number base 64 encoded string
*/
def randomString(n: Int): String = {
require(n > 0, "Util package, randomString(n) should have n > 0")
val b = new Array[Byte](n)
SecureRandom.getInstanceStrong.nextBytes(b)
Base64.getEncoder.encodeToString(b)
}
/**
* create a new 64 bit random number
* @return a BigInteger, a 64 bit random number
*/
def new64BitRandom: BigInteger = new BigInteger(64, SecureRandom.getInstanceStrong)
/**
* create a nonce as a 16 bytes random number base 64 encoded string
* @return 16 bytes random number base 64 encoded string
*/
def newNonce: String = randomString(16)
/**
* convenience method, creates a option nonce as a 16 bytes random number base 64 encoded string
* @return 16 bytes random number base 64 encoded option string
*/
def newNonceOpt: Option[String] = Some(newNonce)
/**
* create a new random 32 bytes base 64 encoded string
* @return
*/
def newToken: String = randomString(32)
/**
* create a PEM representation of the input X509Certificate
* @param certificate the input X509Certificate
* @return a PEM string of the input X509Certificate
*/
def toPEM(certificate: X509Certificate): String = {
val derCert = certificate.getEncoded()
val pemCertPre = new String(Base64.getEncoder.encode(derCert), "UTF-8")
"-----BEGIN CERTIFICATE-----\\n" + pemCertPre + "-----END CERTIFICATE-----"
}
}
| workingDog/acme-protocol | src/main/scala/com/kodekutters/acme/Util.scala | Scala | apache-2.0 | 3,828 |
package com.datastax.spark.connector.util
import org.scalatest.{Matchers, FlatSpec}
import scala.collection.mutable.ArrayBuffer
class BufferedIterator2Spec extends FlatSpec with Matchers {
"BufferedIterator" should "return the same items as the standard Iterator" in {
val iterator = new BufferedIterator2(Seq(1, 2, 3, 4, 5).iterator)
iterator.hasNext shouldBe true
iterator.next() shouldBe 1
iterator.hasNext shouldBe true
iterator.next() shouldBe 2
iterator.hasNext shouldBe true
iterator.next() shouldBe 3
iterator.hasNext shouldBe true
iterator.next() shouldBe 4
iterator.hasNext shouldBe true
iterator.next() shouldBe 5
iterator.hasNext shouldBe false
}
it should "be convertible to a Seq" in {
val iterator = new BufferedIterator2(Seq(1, 2, 3, 4, 5).iterator)
iterator.toSeq should contain inOrder(1, 2, 3, 4, 5)
}
it should "wrap an empty iterator" in {
val iterator = new BufferedIterator2(Iterator.empty)
iterator.isEmpty shouldBe true
iterator.hasNext shouldBe false
}
it should "offer the head element without consuming the underlying iterator" in {
val iterator = new BufferedIterator2(Seq(1, 2, 3, 4, 5).iterator)
iterator.head shouldBe 1
iterator.next() shouldBe 1
}
it should "offer takeWhile that consumes only the elements matching the predicate" in {
val iterator = new BufferedIterator2(Seq(1, 2, 3, 4, 5).iterator)
val firstThree = iterator.takeWhile(_ <= 3).toList
firstThree should contain inOrder (1, 2, 3)
iterator.head shouldBe 4
iterator.next() shouldBe 4
}
it should "offer appendWhile that copies elements to ArrayBuffer and consumes only the elements matching the predicate" in {
val iterator = new BufferedIterator2(Seq(1, 2, 3, 4, 5).iterator)
val buffer = new ArrayBuffer[Int]
iterator.appendWhile(_ <= 3, buffer)
buffer should contain inOrder (1, 2, 3)
iterator.head shouldBe 4
iterator.next() shouldBe 4
}
it should "throw NoSuchElementException if trying to get next() element that doesn't exist" in {
val iterator = new BufferedIterator2(Seq(1, 2).iterator)
iterator.next()
iterator.next()
a [NoSuchElementException] should be thrownBy iterator.next()
}
}
| viirya/spark-cassandra-connector | spark-cassandra-connector/src/test/scala/com/datastax/spark/connector/util/BufferedIterator2Spec.scala | Scala | apache-2.0 | 2,272 |
package notebook.server
import java.io._
import java.net.URLDecoder
import java.text.SimpleDateFormat
import java.util.Date
import notebook.NBSerializer
import notebook.NBSerializer._
import org.apache.commons.io.FileUtils
import play.api.Logger
import play.api.libs.json._
import utils.Const.UTF_8
class NotebookManager(val name: String, val notebookDir: File) {
Logger.info("Notebook directory is: " + notebookDir.getCanonicalPath)
val extension = ".snb"
def getName(path: String) = path.split("/").filter(!_.isEmpty).last.dropRight(extension.length)
def notebookFile(path: String): File = {
val basePath = notebookDir.getCanonicalPath
val decodedPath = URLDecoder.decode(path, UTF_8)
val nbFile = new File(basePath, decodedPath)
// This check is probably not strictly necessary due to URL encoding of name
// (should escape any path traversal components), but let's be safe
require(nbFile.getCanonicalPath.startsWith(basePath),
"Unable to access notebook outside of notebooks path.")
nbFile
}
def incrementFileName(base: String) = {
Logger.info("Incremented Notebook at " + base)
val newPath: String = Stream.from(1).map(base + _ + extension).dropWhile { fn =>
val snb = notebookFile(fn)
val r = snb.exists()
Logger.info(s"SNB ${snb.getAbsolutePath} exists: $r")
r
}.head
Logger.info("Incremented Notebook is " + newPath)
newPath
}
def newNotebook(
path: String = "/",
customLocalRepo: Option[String] = None,
customRepos: Option[List[String]] = None,
customDeps: Option[List[String]] = None,
customImports: Option[List[String]] = None,
customSparkConf: Option[JsObject] = None) = {
val sep = if (path.last == '/') "" else "/"
val fpath = incrementFileName(path + sep + "Untitled")
val nb = Notebook(
Some(new Metadata(getName(fpath),
customLocalRepo = customLocalRepo,
customRepos = customRepos,
customDeps = customDeps,
customImports = customImports,
customSparkConf = customSparkConf)),
Some(Nil),
None,
None,
None
)
save(fpath, nb, overwrite = false)
fpath
}
def copyNotebook(nbPath: String) = {
val nbData = getNotebook(nbPath)
nbData.map { nb =>
val newPath = incrementFileName(nb._4.dropRight(extension.length))
val newName = getName(newPath)
val oldNB = NBSerializer.read(nb._3)
save(newPath, Notebook(oldNB.metadata.map(_.copy(name = newName)), oldNB.cells, oldNB.worksheets, oldNB.autosaved, None), false)
newPath
} getOrElse newNotebook()
}
def getNotebook(path: String) = {
Logger.info(s"getNotebook at path $path")
for (notebook <- load(path)) yield {
val data = FileUtils.readFileToString(notebookFile(path))
val df = new SimpleDateFormat("dd-MM-yyyy HH:mm:ss z'('Z')'")
val last_mtime = df.format(new Date(notebookFile(path).lastModified()))
(last_mtime, notebook.name, data, path)
}
}
def deleteNotebook(path: String) = {
Logger.info(s"deleteNotebook at path $path")
val file = notebookFile(path)
if (file.exists()) {
file.delete()
}
}
def rename(path: String, newpath: String) = {
Logger.info(s"rename from path $path to $newpath")
val newname = getName(newpath)
val oldfile = notebookFile(path)
Logger.debug(s"rename from path $path to $newpath: old file is ${oldfile.getAbsolutePath}")
load(path).foreach { notebook =>
val nb = if (notebook.name != newname) {
val meta = notebook.metadata.map(_.copy(name = newname)).orElse(Some(new Metadata(newname)))
notebook.copy(metadata = meta)
} else {
notebook
}
val newfile = notebookFile(newpath)
Logger.debug(s"rename from path $path to $newpath: new file is ${newfile.getAbsolutePath}")
oldfile.renameTo(newfile)
FileUtils.writeStringToFile(newfile, NBSerializer.write(nb))
}
(newname, newpath)
}
def save(path: String, notebook: Notebook, overwrite: Boolean) = {
Logger.info(s"save at path $path")
val file = notebookFile(path)
if (!overwrite && file.exists()) {
throw new NotebookExistsException("Notebook " + path + " already exists.")
}
FileUtils.writeStringToFile(file, NBSerializer.write(notebook))
val nb = load(path)
(nb.get.metadata.get.name, path)
}
def load(path: String): Option[Notebook] = {
Logger.info(s"Loading notebook at path $path")
val file = notebookFile(path)
if (file.exists())
Some(NBSerializer.read(FileUtils.readFileToString(file)))
else
None
}
}
class NotebookExistsException(message: String) extends IOException(message) | pb-pravin/spark-notebook | app/notebook/server/NotebookManager.scala | Scala | apache-2.0 | 4,730 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis
import org.orbeon.datatypes.LocationData
import org.orbeon.dom.Element
import org.orbeon.oxf.common.{OXFException, OrbeonLocationException}
import org.orbeon.oxf.util.StaticXPath.CompiledExpression
import org.orbeon.oxf.util.{IndentedLogger, XPath}
import org.orbeon.oxf.xforms._
import org.orbeon.oxf.xforms.analysis.controls.VariableTrait
import org.orbeon.oxf.xforms.function.{InstanceTrait, XXFormsInstanceTrait}
import org.orbeon.oxf.xml.dom.XmlExtendedLocationData
import org.orbeon.saxon.expr.PathMap.PathMapArc
import org.orbeon.saxon.expr._
import org.orbeon.saxon.om.Axis
import org.orbeon.xforms.Constants.ComponentSeparator
import org.orbeon.xforms.XFormsId
import org.orbeon.xforms.xbl.Scope
import org.orbeon.xml.NamespaceMapping
import scala.collection.mutable.LinkedHashSet
import scala.util.control.NonFatal
object PathMapXPathAnalysisBuilder {
/**
* Create a new XPathAnalysis based on an initial XPath expression.
*/
def apply(
partAnalysisCtx : PartAnalysisContextAfterTree,
xpathString : String,
namespaceMapping : NamespaceMapping,
baseAnalysis : Option[XPathAnalysis],
inScopeVariables : Map[String, VariableTrait],
pathMapContext : AnyRef,
scope : Scope,
defaultInstancePrefixedId : Option[String],
locationData : LocationData,
element : Element,
avt : Boolean)(implicit
logger : IndentedLogger
): XPathAnalysis = {
val compiledExpression =
XPath.compileExpression(
xpathString = xpathString,
namespaceMapping = namespaceMapping,
locationData = locationData,
functionLibrary = partAnalysisCtx.functionLibrary,
avt = avt
)
apply(
partAnalysisCtx,
compiledExpression,
baseAnalysis,
inScopeVariables,
pathMapContext,
scope,
defaultInstancePrefixedId,
element
)
}
/**
* Create a new XPathAnalysis based on an initial XPath expression.
*/
def apply(
partAnalysisCtx : PartAnalysisContextAfterTree,
compiledExpression : CompiledExpression,
baseAnalysis : Option[XPathAnalysis],
inScopeVariables : Map[String, VariableTrait],
pathMapContext : AnyRef,
scope : Scope,
defaultInstancePrefixedId : Option[String],
element : Element
): XPathAnalysis = {
val xpathString = compiledExpression.string
try {
// Create expression
val expression = compiledExpression.expression.getInternalExpression
val stringPathmap = new PathMap(new StringLiteral(""))
// In-scope variables
val variablePathMaps = {
val it =
for {
(name, variable) <- inScopeVariables.iterator
valueAnalysis = variable.variableAnalysis
if valueAnalysis.isDefined && valueAnalysis.get.figuredOutDependencies
} yield
(
name,
valueAnalysis match {
// Valid PathMap
case Some(analysis: PathMapXPathAnalysis) => analysis.pathmap.get
// Constant string
case _ => stringPathmap
}
)
it.toMap
}
def dependsOnFocus = (expression.getDependencies & StaticProperty.DEPENDS_ON_FOCUS) != 0
val pathmap: Option[PathMap] =
baseAnalysis match {
case Some(baseAnalysis: PathMapXPathAnalysis) if dependsOnFocus =>
// Expression depends on the context and has a context which has a pathmap
// We clone the base analysis and add to an existing PathMap
val clonedPathmap = baseAnalysis.pathmap.get.clone
clonedPathmap.setInScopeVariables(variablePathMaps)
clonedPathmap.setPathMapContext(pathMapContext)
val newNodeset = expression.addToPathMap(clonedPathmap, clonedPathmap.findFinalNodes)
if (! clonedPathmap.isInvalidated)
clonedPathmap.updateFinalNodes(newNodeset)
Some(clonedPathmap)
case Some(baseAnalysis) if dependsOnFocus =>
// Expression depends on the context but the context doesn't have a pathmap
//
// - if context is constant positive, context is a constant string
// - if context is constant negative, we can't handle this
if (baseAnalysis.figuredOutDependencies) Some(stringPathmap) else None
case _ =>
// Start with a new PathMap if:
// - we are at the top (i.e. does not have a context)
// - or the expression does not depend on the focus
// NOTE: We used to test on DEPENDS_ON_CONTEXT_ITEM above, but any use of the focus would otherwise create
// a root context expression in PathMap, which is not right.
Some(new PathMap(expression, variablePathMaps, pathMapContext))
}
pathmap match {
case Some(pathmap) if ! pathmap.isInvalidated =>
// Try to reduce ancestor axis before anything else
reduceAncestorAxis(pathmap)
// DEBUG
// dumpPathMap(XPath.GlobalConfiguration, xpathString, pathmap)
// We use LinkedHashMap/LinkedHashSet in part to keep unit tests reproducible
val valueDependentPaths = new MapSet[String, String]
val returnablePaths = new MapSet[String, String]
val dependentModels = new LinkedHashSet[String]
val dependentInstances = new LinkedHashSet[String]
case class InstancePath(instancePrefixedId: String, path: String)
// Process the pathmap to extract paths and other information useful for handling dependencies.
def processPaths(): Boolean = {
var stack = List[Expression]()
def createInstancePath(node: PathMap.PathMapNode): String Either Option[InstancePath] = {
// Expressions from root to leaf
val expressions = stack.reverse
// Start with first expression
extractInstancePrefixedId(partAnalysisCtx, scope, expressions.head, defaultInstancePrefixedId) match {
// First expression is instance() expression we can handle
case Right(Some(instancePrefixedId)) =>
// Continue with rest of expressions
buildPath(expressions.tail) match {
case Right(path) => Right(Some(InstancePath(instancePrefixedId, path)))
case Left(reason) => Left(reason)
}
// First expression is instance() but there is no default instance so we don't add the path
// (This can happen because we translate everything to start with instance() even if there is actually no default instance.)
case Right(None) => Right(None)
// Unable to handle first expression
case Left(reason) => Left(reason)
}
}
def processNode(node: PathMap.PathMapNode, ancestorAtomized: Boolean = false): Boolean = {
if (node.getArcs.isEmpty || node.isReturnable || node.isAtomized || ancestorAtomized)
createInstancePath(node) match {
case Right(Some(instancePath)) =>
// An instance path was created
// Remember dependencies for this path
val instancePrefixedId = instancePath.instancePrefixedId
val model = partAnalysisCtx.getModelByInstancePrefixedId(instancePrefixedId)
if (model eq null)
throw new OXFException("Reference to invalid instance: " + instancePrefixedId)
dependentModels.add(model.prefixedId)
dependentInstances.add(instancePrefixedId)
// NOTE: A same node can be both returnable AND atomized in a given expression
if (node.isReturnable)
returnablePaths.put(instancePath.instancePrefixedId, instancePath.path)
if (node.isAtomized)
valueDependentPaths.put(instancePath.instancePrefixedId, instancePath.path)
case Right(None) => // NOP: don't add the path as this is not considered a dependency
case Left(_) => return false // we can't deal with this path so stop here
}
// Process children nodes if any
for (arc <- node.getArcs) {
stack ::= arc.getStep
if (! processNode(arc.getTarget, node.isAtomized))
return false // we can't deal with this path so stop here
stack = stack.tail
}
// We managed to deal with this path
true
}
for (root <- pathmap.getPathMapRoots) {
stack ::= root.getRootExpression
if (! processNode(root))
return false
stack = stack.tail
}
true
}
if (processPaths())
// Success
new PathMapXPathAnalysis(
xpathString,
true,
valueDependentPaths,
returnablePaths,
dependentModels,
dependentInstances)(
Some(pathmap),
)
else
// Failure
new NegativeAnalysis(xpathString)
case _ =>
// Failure
new NegativeAnalysis(xpathString)
}
} catch {
case NonFatal(t) =>
throw OrbeonLocationException.wrapException(t,
XmlExtendedLocationData(
compiledExpression.locationData,
Some("analysing XPath expression"),
List("expression" -> xpathString),
Option(element)
)
)
}
}
private def extractInstancePrefixedId(
partAnalysisCtx : PartAnalysisContextAfterTree,
scope : Scope,
expression : Expression,
defaultInstancePrefixedId : Option[String]
): String Either Option[String] = {
// Local class used as marker for a rewritten StringLiteral in an expression
class PrefixedIdStringLiteral(value: CharSequence, val prefixedValue: String) extends StringLiteral(value)
expression match {
case instanceExpression: FunctionCall
if instanceExpression.isInstanceOf[InstanceTrait] || instanceExpression.isInstanceOf[XXFormsInstanceTrait] =>
val hasParameter = instanceExpression.getArguments.nonEmpty
if (! hasParameter) {
// instance() resolves to default instance for scope
defaultInstancePrefixedId match {
case Some(defaultInstancePrefixedId) =>
// Rewrite expression to add/replace its argument with a prefixed instance id
instanceExpression.setArguments(
Array(
new PrefixedIdStringLiteral(XFormsId.getStaticIdFromId(defaultInstancePrefixedId), defaultInstancePrefixedId)
)
)
Right(Some(defaultInstancePrefixedId))
case None =>
// Model does not have a default instance
// This is successful, but the path must not be added
Right(None)
}
} else {
val instanceNameExpression = instanceExpression.getArguments()(0)
instanceNameExpression match {
case stringLiteral: StringLiteral =>
val originalInstanceId = stringLiteral.getStringValue
val searchAncestors = expression.isInstanceOf[XXFormsInstanceTrait]
// This is a trick: we use RewrittenStringLiteral as a marker so we don't rewrite an
// instance() StringLiteral parameter twice
val alreadyRewritten = instanceNameExpression.isInstanceOf[PrefixedIdStringLiteral]
val prefixedInstanceId =
if (alreadyRewritten)
// Parameter associates a prefixed id
stringLiteral.asInstanceOf[PrefixedIdStringLiteral].prefixedValue
else if (searchAncestors)
// xxf:instance()
// NOTE: Absolute ids should also be supported. Right now search will fail with an
// absolute id. However, it is unlikely that literal absolute ids will be passed, so
// this is probably not a big deal.
partAnalysisCtx.findInstancePrefixedId(scope, originalInstanceId).orNull // can return `None`
else if (originalInstanceId.indexOf(ComponentSeparator) != -1)
// HACK: datatable e.g. uses instance(prefixedId)!
originalInstanceId // TODO: warn: could be a non-existing instance id
else
// Normal use of instance()
scope.prefixedIdForStaticId(originalInstanceId) // TODO: warn: could be a non-existing instance id
if (prefixedInstanceId ne null) {
// Instance found
// If needed, rewrite expression to replace its argument with a prefixed instance id
if (! alreadyRewritten)
instanceExpression.setArguments(Array(new PrefixedIdStringLiteral(originalInstanceId, prefixedInstanceId)))
Right(Some(prefixedInstanceId))
} else {
// Instance not found (could be reference to unknown instance e.g. author typo!)
// TODO: must also catch case where id is found but does not correspond to instance
// TODO: warn in log
// This is successful, but the path must not be added
Right(None)
}
case _ => Left("Can't handle non-literal instance name")
}
}
case _ => Left("Can't handle expression not starting with instance()")
}
}
private def buildPath(expressions: Seq[Expression]): String Either String = {
val sb = new StringBuilder
for (expression <- expressions) expression match {
case axisExpression: AxisExpression => axisExpression.getAxis match {
case Axis.SELF => // NOP
case axis @ (Axis.CHILD | Axis.ATTRIBUTE) =>
// Child or attribute axis
if (sb.nonEmpty)
sb.append('/')
val fingerprint = axisExpression.getNodeTest.getFingerprint
if (fingerprint != -1) {
if (axis == Axis.ATTRIBUTE)
sb.append("@")
sb.append(fingerprint)
} else
return Left("Can't handle path because of unnamed node: *")
case axis => return Left("Can't handle path because of unhandled axis: " + Axis.axisName(axis))
}
case expression: Expression => return Left("Can't handle path because of unhandled expression: " + expression.getClass.getName)
}
Right(sb.toString)
}
/**
* Given a raw PathMap, try to reduce ancestor and other axes.
*/
private def reduceAncestorAxis(pathmap: PathMap): Boolean = {
// Utility class to hold node and ark as otherwise we can't go back to the node from an arc
class NodeArc(val node: PathMap.PathMapNode, val arc: PathMap.PathMapArc)
var stack = List[NodeArc]()
// Return true if we moved an arc
def reduceAncestorAxis(node: PathMap.PathMapNode): Boolean = {
def moveArc(nodeArc: NodeArc, ancestorNode: PathMap.PathMapNode): Unit = {
if (ancestorNode ne null) {
// Move arcs
ancestorNode.addArcs(nodeArc.arc.getTarget.getArcs)
// Remove current arc from its node as it's been moved
nodeArc.node.removeArc(nodeArc.arc)
if (nodeArc.arc.getTarget.isReturnable)
ancestorNode.setReturnable(true)
if (nodeArc.arc.getTarget.isAtomized)
ancestorNode.setAtomized()
} else {
// Ignore for now
}
}
def ancestorsWithFingerprint(nodeName: Int): Seq[PathMapArc] = {
for {
nodeArc <- stack.tail // go from parent to root
e = nodeArc.arc.getStep
if e.getAxis == Axis.CHILD && e.getNodeTest.getFingerprint == nodeName
} yield nodeArc.arc
}
// Process children nodes
for (arc <- node.getArcs) {
val newNodeArc = new NodeArc(node, arc)
val step = arc.getStep
// TODO: handle ANCESTOR_OR_SELF axis
if (stack.nonEmpty) // all tests below assume at least a parent
step.getAxis match {
case Axis.ANCESTOR if step.getNodeTest.getFingerprint != -1 =>
// Found ancestor::foobar
val nodeName = step.getNodeTest.getFingerprint
val ancestorArcs = ancestorsWithFingerprint(nodeName)
if (ancestorArcs.nonEmpty) {
// There can be more than one ancestor with that fingerprint
for (ancestorArc <- ancestorArcs)
moveArc(newNodeArc, ancestorArc.getTarget)
return true
} else {
// E.g.: /a/b/ancestor::c
// TODO
}
case Axis.PARENT => // don't test fingerprint as we could handle /a/*/..
// Parent axis
if (stack.nonEmpty) {
val parentNodeArc = stack.head
moveArc(newNodeArc, parentNodeArc.node)
return true
} else {
// TODO: is this possible?
}
case Axis.FOLLOWING_SIBLING | Axis.PRECEDING_SIBLING =>
// Simplify preceding-sibling::foobar / following-sibling::foobar
val parentNodeArc = stack.head
if (stack.size > 2) {
val grandparentNodeArc = stack.tail.head
val newStep = new AxisExpression(parentNodeArc.arc.getStep.getAxis, step.getNodeTest)
newStep.setContainer(step.getContainer)
grandparentNodeArc.node.createArc(newStep, arc.getTarget)
node.removeArc(arc)
} else {
val newStep = new AxisExpression(Axis.CHILD, step.getNodeTest)
newStep.setContainer(step.getContainer)
parentNodeArc.node.createArc(newStep, arc.getTarget)
node.removeArc(arc)
}
case _ => // NOP
}
stack ::= newNodeArc
if (reduceAncestorAxis(arc.getTarget))
return true
stack = stack.tail
}
// We did not find a match
false
}
for (root <- pathmap.getPathMapRoots) {
// Apply as long as we find matches
while (reduceAncestorAxis(root))
stack = Nil
stack = Nil
}
true
}
// import java.io.ByteArrayOutputStream
// import org.orbeon.io.CharsetNames
// import org.orbeon.oxf.xml.dom.IOSupport
// import org.orbeon.saxon.Configuration
// import org.orbeon.saxon.expr.PathMap.PathMapNode
// import org.orbeon.saxon.trace.ExpressionPresenter
// import scala.xml._
//
// /**
// * Output the structure of the given pathmap to the standard output.
// */
// def dumpPathMap(configuration: Configuration, xpathString: String, pathmap: PathMap): Unit = {
//
// val result =
// <pathmap>
// <xpath>{xpathString}</xpath>
// {
// def explainAsXML(expression: Expression) = {
// // Use Saxon explain() and convert back to native XML
// val out = new ByteArrayOutputStream
// val presenter = new ExpressionPresenter(configuration, out)
// expression.explain(presenter)
// presenter.close()
// XML.loadString(out.toString(CharsetNames.Utf8))
// }
//
// def getStep(step: Expression, targetNode: PathMapNode) =
// <step atomized={targetNode.isAtomized.toString}
// returnable={targetNode.isReturnable.toString}
// unknown-dependencies={targetNode.hasUnknownDependencies.toString}>{step.toString}</step>
//
// def getArcs(node: PathMapNode): Node =
// <arcs>{ Seq(node.getArcs: _*) map (arc => <arc>{ Seq(getStep(arc.getStep, arc.getTarget), getArcs(arc.getTarget)) }</arc>) }</arcs>
//
// for (root <- pathmap.getPathMapRoots) yield
// <root>{ Seq(explainAsXML(root.getRootExpression), getStep(root.getRootExpression, root), getArcs(root)) }</root>
// }
// </pathmap>
//
// // Pretty print
// println(IOSupport.prettyfy(result.toString))
// }
// def externalAnalysisExperiment(expression: Expression, pathMap: PathMap, pathMapNodeSet: PathMap.PathMapNodeSet): PathMap.PathMapNodeSet = {
//
// expression match {
//
// case other =>
// val dependsOnFocus = (other.getDependencies & StaticProperty.DEPENDS_ON_FOCUS) != 0
// val attachmentPoint = pathMapNodeSet match {
// case null if dependsOnFocus =>
// // Result is new ContextItemExpression
// val contextItemExpression = new ContextItemExpression
// contextItemExpression.setContainer(expression.getContainer)
// new PathMap.PathMapNodeSet(pathMap.makeNewRoot(contextItemExpression))
// case _ =>
// // All other cases
// if (dependsOnFocus) pathMapNodeSet else null
// }
//
// val resultNodeSet = new PathMap.PathMapNodeSet
// for (child <- other.iterateSubExpressions)
// resultNodeSet.addNodeSet(externalAnalysisExperiment(child.asInstanceOf[Expression], pathMap, attachmentPoint))
//
// // Handle result differently if result type is atomic or not
// other.getItemType(other.getExecutable.getConfiguration.getTypeHierarchy) match {
// case atomicType: AtomicType =>
// // NOTE: Thought it would be right to call setAtomized(), but it isn't! E.g. count() returns an atomic type,
// // but it doesn't mean the result of its argument expression is atomized. sum() does, but that's handled by
// // the atomization of the argument to sum().
// // resultNodeSet.setAtomized()
// // If expression returns an atomic value then any nodes accessed don't contribute to the result
// null
// case _ => resultNodeSet
// }
// }
// }
}
| orbeon/orbeon-forms | xforms-compiler/jvm/src/main/scala/org/orbeon/oxf/xforms/analysis/PathMapXPathAnalysisBuilder.scala | Scala | lgpl-2.1 | 23,481 |
/*
* OpenURP, Agile University Resource Planning Solution
*
* Copyright (c) 2014-2015, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenURP. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.grade.app.model
import java.sql.Date
import org.beangle.data.model.LongId
import org.openurp.edu.base.model.Student
import org.openurp.edu.grade.audit.model.PlanAuditResult
//FIXME donot mapping
class PlanAuditLog extends LongId {
var standardUsed: String = _
var std: Student = _
var auditBy: String = _
var ip: String = _
var operateAt: Date = _
var passed: Boolean = _
var detail: String = _
def this(result: PlanAuditResult) {
this
std = result.std
passed = result.passed
}
}
| openurp/edu-core | grade/core/src/main/scala/org/openurp/edu/grade/app/model/PlanAuditLog.scala | Scala | gpl-3.0 | 1,311 |
package io.apibuilder.validation.helpers
trait PerformanceHelpers {
def time(numberIterations: Int)(f: Long => Any): Long = {
val start = System.currentTimeMillis()
0.to(numberIterations).foreach { i =>
f(i.toLong)
}
System.currentTimeMillis() - start
}
}
| flowcommerce/lib-apidoc-json-validation | src/test/scala/io/apibuilder/validation/helpers/PerformanceHelpers.scala | Scala | mit | 286 |
/*
*
* * Copyright 2014 websudos ltd.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.websudos.phantom.zookeeper
object TestTable extends DefaultZookeeperConnector {
val keySpace = "phantom"
}
object TestTable2 extends DefaultZookeeperConnector {
val keySpace = "phantom"
}
| nosheenzaza/phantom-data-centric | phantom-zookeeper/src/test/scala/com/websudos/phantom/zookeeper/TestTable.scala | Scala | gpl-2.0 | 850 |
package edu.gemini.spModel.core
import org.scalacheck.Prop.forAll
import org.specs2.ScalaCheck
import org.specs2.mutable.Specification
import AlmostEqual.AlmostEqualOps
object WavelengthSpec extends Specification with ScalaCheck with Arbitraries with Helpers {
"Wavelength Conversions" should {
"support nanometers" !
forAll { (w: Wavelength) =>
Wavelength.fromNanometers(w.toNanometers) ~= w
}
"support microns" !
forAll { (w: Wavelength) =>
Wavelength.fromMicrons(w.toMicrons) ~= w
}
}
"Wavelength Serialization" should {
"Support Java Binary" !
forAll { (w: Wavelength) =>
canSerialize(w)
}
}
}
| arturog8m/ocs | bundle/edu.gemini.spModel.core/src/test/scala/edu/gemini/spModel/core/WavelengthSpec.scala | Scala | bsd-3-clause | 693 |
package cd
class Motion(val callsign: CallSign,
val posOne: Vector3D,
val posTwo: Vector3D) {
def delta(): Vector3D =
posTwo.minus(this.posOne);
def findIntersection(other: Motion): Vector3D = {
var init1 = this.posOne
var init2 = other.posOne
var vec1 = delta()
var vec2 = other.delta()
val radius = Constants.PROXIMITY_RADIUS
// this test is not geometrical 3-d intersection test, it takes the fact that the aircraft move
// into account ; so it is more like a 4d test
// (it assumes that both of the aircraft have a constant speed over the tested interval)
// we thus have two points, each of them moving on its line segment at constant speed ; we are looking
// for times when the distance between these two points is smaller than r
// vec1 is vector of aircraft 1
// vec2 is vector of aircraft 2
// a = (V2 - V1)^T * (V2 - V1)
var a = vec2.minus(vec1).squaredMagnitude()
if (a != 0.0) {
// we are first looking for instances of time when the planes are exactly r from each other
// at least one plane is moving ; if the planes are moving in parallel, they do not have constant speed
// if the planes are moving in parallel, then
// if the faster starts behind the slower, we can have 2, 1, or 0 solutions
// if the faster plane starts in front of the slower, we can have 0 or 1 solutions
// if the planes are not moving in parallel, then
// point P1 = I1 + vV1
// point P2 = I2 + vV2
// - looking for v, such that dist(P1,P2) = || P1 - P2 || = r
// it follows that || P1 - P2 || = sqrt( < P1-P2, P1-P2 > )
// 0 = -r^2 + < P1 - P2, P1 - P2 >
// from properties of dot product
// 0 = -r^2 + <I1-I2,I1-I2> + v * 2<I1-I2, V1-V2> + v^2 *<V1-V2,V1-V2>
// so we calculate a, b, c - and solve the quadratic equation
// 0 = c + bv + av^2
// b = 2 * <I1-I2, V1-V2>
var b = 2.0 * init1.minus(init2).dot(vec1.minus(vec2))
// c = -r^2 + (I2 - I1)^T * (I2 - I1)
var c = -radius * radius + init2.minus(init1).squaredMagnitude()
var discr = b * b - 4.0 * a * c
if (discr < 0.0) {
return null
}
var v1 = (-b - Math.sqrt(discr)) / (2.0 * a)
var v2 = (-b + Math.sqrt(discr)) / (2.0 * a)
if (v1 <= v2 && ((v1 <= 1.0 && 1.0 <= v2) ||
(v1 <= 0.0 && 0.0 <= v2) ||
(0.0 <= v1 && v2 <= 1.0))) {
// Pick a good "time" at which to report the collision.
var v = 0.0d
if (v1 <= 0.0) {
// The collision started before this frame. Report it at the start of the frame.
v = 0.0
} else {
// The collision started during this frame. Report it at that moment.
v = v1
}
var result1 = init1.plus(vec1.times(v))
var result2 = init2.plus(vec2.times(v))
var result = result1.plus(result2).times(0.5)
if (result.x >= Constants.MIN_X &&
result.x <= Constants.MAX_X &&
result.y >= Constants.MIN_Y &&
result.y <= Constants.MAX_Y &&
result.z >= Constants.MIN_Z &&
result.z <= Constants.MAX_Z) {
return result;
}
}
return null
}
// the planes have the same speeds and are moving in parallel (or they are not moving at all)
// they thus have the same distance all the time ; we calculate it from the initial point
// dist = || i2 - i1 || = sqrt( ( i2 - i1 )^T * ( i2 - i1 ) )
var dist = init2.minus(init1).magnitude()
if (dist <= radius) {
return init1.plus(init2).times(0.5)
}
return null;
}
}
| cedricviaccoz/scala-native | benchmarks/src/main/scala/cd/Motion.scala | Scala | bsd-3-clause | 3,711 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.util
import java.io._
import java.nio.ByteBuffer
import java.util.{Iterator => JIterator}
import java.util.concurrent.{CountDownLatch, RejectedExecutionException, ThreadPoolExecutor, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.implicitConversions
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.mockito.ArgumentCaptor
import org.mockito.ArgumentMatchers.{any, anyLong, eq => meq}
import org.mockito.Mockito.{times, verify, when}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterEach, PrivateMethodTester}
import org.scalatest.Assertions._
import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.Eventually._
import org.scalatestplus.mockito.MockitoSugar
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite}
import org.apache.spark.streaming.scheduler._
import org.apache.spark.util.{CompletionIterator, ManualClock, ThreadUtils, Utils}
/** Common tests for WriteAheadLogs that we would like to test with different configurations. */
abstract class CommonWriteAheadLogTests(
allowBatching: Boolean,
closeFileAfterWrite: Boolean,
testTag: String = "")
extends SparkFunSuite with BeforeAndAfter {
import WriteAheadLogSuite._
protected val hadoopConf = new Configuration()
protected var tempDir: File = null
protected var testDir: String = null
protected var testFile: String = null
protected var writeAheadLog: WriteAheadLog = null
protected def testPrefix = if (testTag != "") testTag + " - " else testTag
before {
tempDir = Utils.createTempDir()
testDir = tempDir.toString
testFile = new File(tempDir, "testFile").toString
if (writeAheadLog != null) {
writeAheadLog.close()
writeAheadLog = null
}
}
after {
Utils.deleteRecursively(tempDir)
}
test(testPrefix + "read all logs") {
// Write data manually for testing reading through WriteAheadLog
val writtenData = (1 to 10).flatMap { i =>
val data = generateRandomData()
val file = testDir + s"/log-$i-$i"
writeDataManually(data, file, allowBatching)
data
}
val logDirectoryPath = new Path(testDir)
val fileSystem = HdfsUtils.getFileSystemForPath(logDirectoryPath, hadoopConf)
assert(fileSystem.exists(logDirectoryPath))
// Read data using manager and verify
val readData = readDataUsingWriteAheadLog(testDir, closeFileAfterWrite, allowBatching)
assert(readData === writtenData)
}
test(testPrefix + "write logs") {
// Write data with rotation using WriteAheadLog class
val dataToWrite = generateRandomData()
writeDataUsingWriteAheadLog(testDir, dataToWrite, closeFileAfterWrite = closeFileAfterWrite,
allowBatching = allowBatching)
// Read data manually to verify the written data
val logFiles = getLogFilesInDirectory(testDir)
assert(logFiles.size > 1)
val writtenData = readAndDeserializeDataManually(logFiles, allowBatching)
assert(writtenData === dataToWrite)
}
test(testPrefix + "read all logs after write") {
// Write data with manager, recover with new manager and verify
val dataToWrite = generateRandomData()
writeDataUsingWriteAheadLog(testDir, dataToWrite, closeFileAfterWrite, allowBatching)
val logFiles = getLogFilesInDirectory(testDir)
assert(logFiles.size > 1)
val readData = readDataUsingWriteAheadLog(testDir, closeFileAfterWrite, allowBatching)
assert(dataToWrite === readData)
}
test(testPrefix + "clean old logs") {
logCleanUpTest(waitForCompletion = false)
}
test(testPrefix + "clean old logs synchronously") {
logCleanUpTest(waitForCompletion = true)
}
private def logCleanUpTest(waitForCompletion: Boolean): Unit = {
// Write data with manager, recover with new manager and verify
val manualClock = new ManualClock
val dataToWrite = generateRandomData()
writeAheadLog = writeDataUsingWriteAheadLog(testDir, dataToWrite, closeFileAfterWrite,
allowBatching, manualClock, closeLog = false)
val logFiles = getLogFilesInDirectory(testDir)
assert(logFiles.size > 1)
writeAheadLog.clean(manualClock.getTimeMillis() / 2, waitForCompletion)
if (waitForCompletion) {
assert(getLogFilesInDirectory(testDir).size < logFiles.size)
} else {
eventually(Eventually.timeout(1.second), interval(10.milliseconds)) {
assert(getLogFilesInDirectory(testDir).size < logFiles.size)
}
}
writeAheadLog.close()
// Make sure it is idempotent.
writeAheadLog.close()
}
test(testPrefix + "handling file errors while reading rotating logs") {
// Generate a set of log files
val manualClock = new ManualClock
val dataToWrite1 = generateRandomData()
writeDataUsingWriteAheadLog(testDir, dataToWrite1, closeFileAfterWrite, allowBatching,
manualClock)
val logFiles1 = getLogFilesInDirectory(testDir)
assert(logFiles1.size > 1)
// Recover old files and generate a second set of log files
val dataToWrite2 = generateRandomData()
manualClock.advance(100000)
writeDataUsingWriteAheadLog(testDir, dataToWrite2, closeFileAfterWrite, allowBatching,
manualClock)
val logFiles2 = getLogFilesInDirectory(testDir)
assert(logFiles2.size > logFiles1.size)
// Read the files and verify that all the written data can be read
val readData1 = readDataUsingWriteAheadLog(testDir, closeFileAfterWrite, allowBatching)
assert(readData1 === (dataToWrite1 ++ dataToWrite2))
// Corrupt the first set of files so that they are basically unreadable
logFiles1.foreach { f =>
val raf = new FileOutputStream(f, true).getChannel()
raf.truncate(1)
raf.close()
}
// Verify that the corrupted files do not prevent reading of the second set of data
val readData = readDataUsingWriteAheadLog(testDir, closeFileAfterWrite, allowBatching)
assert(readData === dataToWrite2)
}
test(testPrefix + "do not create directories or files unless write") {
val nonexistentTempPath = File.createTempFile("test", "")
nonexistentTempPath.delete()
assert(!nonexistentTempPath.exists())
val writtenSegment = writeDataManually(generateRandomData(), testFile, allowBatching)
val wal = createWriteAheadLog(testDir, closeFileAfterWrite, allowBatching)
assert(!nonexistentTempPath.exists(), "Directory created just by creating log object")
if (allowBatching) {
intercept[UnsupportedOperationException](wal.read(writtenSegment.head))
} else {
wal.read(writtenSegment.head)
}
assert(!nonexistentTempPath.exists(), "Directory created just by attempting to read segment")
}
test(testPrefix + "parallel recovery not enabled if closeFileAfterWrite = false") {
// write some data
val writtenData = (1 to 10).flatMap { i =>
val data = generateRandomData()
val file = testDir + s"/log-$i-$i"
writeDataManually(data, file, allowBatching)
data
}
val wal = createWriteAheadLog(testDir, closeFileAfterWrite, allowBatching)
// create iterator but don't materialize it
val readData = wal.readAll().asScala.map(byteBufferToString)
wal.close()
if (closeFileAfterWrite) {
// the threadpool is shutdown by the wal.close call above, therefore we shouldn't be able
// to materialize the iterator with parallel recovery
intercept[RejectedExecutionException](readData.toArray)
} else {
assert(readData.toSeq === writtenData)
}
}
}
class FileBasedWriteAheadLogSuite
extends CommonWriteAheadLogTests(false, false, "FileBasedWriteAheadLog") {
import WriteAheadLogSuite._
test("FileBasedWriteAheadLog - seqToParIterator") {
/*
If the setting `closeFileAfterWrite` is enabled, we start generating a very large number of
files. This causes recovery to take a very long time. In order to make it quicker, we
parallelized the reading of these files. This test makes sure that we limit the number of
open files to the size of the number of threads in our thread pool rather than the size of
the list of files.
*/
val numThreads = 8
val fpool = ThreadUtils.newForkJoinPool("wal-test-thread-pool", numThreads)
val executionContext = ExecutionContext.fromExecutorService(fpool)
class GetMaxCounter {
private val value = new AtomicInteger()
@volatile private var max: Int = 0
def increment(): Unit = synchronized {
val atInstant = value.incrementAndGet()
if (atInstant > max) max = atInstant
}
def decrement(): Unit = synchronized { value.decrementAndGet() }
def get(): Int = synchronized { value.get() }
def getMax(): Int = synchronized { max }
}
try {
// If Jenkins is slow, we may not have a chance to run many threads simultaneously. Having
// a latch will make sure that all the threads can be launched altogether.
val latch = new CountDownLatch(1)
val testSeq = 1 to 1000
val counter = new GetMaxCounter()
def handle(value: Int): Iterator[Int] = {
new CompletionIterator[Int, Iterator[Int]](Iterator(value)) {
counter.increment()
// block so that other threads also launch
latch.await(10, TimeUnit.SECONDS)
override def completion(): Unit = { counter.decrement() }
}
}
@volatile var collected: Seq[Int] = Nil
val t = new Thread() {
override def run(): Unit = {
// run the calculation on a separate thread so that we can release the latch
val iterator = FileBasedWriteAheadLog.seqToParIterator[Int, Int](executionContext,
testSeq, handle)
collected = iterator.toSeq
}
}
t.start()
eventually(Eventually.timeout(10.seconds)) {
// make sure we are doing a parallel computation!
assert(counter.getMax() > 1)
}
latch.countDown()
t.join(10000)
assert(collected === testSeq)
// make sure we didn't open too many Iterators
assert(counter.getMax() <= numThreads)
} finally {
fpool.shutdownNow()
}
}
test("FileBasedWriteAheadLogWriter - writing data") {
val dataToWrite = generateRandomData()
val segments = writeDataUsingWriter(testFile, dataToWrite)
val writtenData = readDataManually(segments)
assert(writtenData === dataToWrite)
}
test("FileBasedWriteAheadLogWriter - syncing of data by writing and reading immediately") {
val dataToWrite = generateRandomData()
val writer = new FileBasedWriteAheadLogWriter(testFile, hadoopConf)
dataToWrite.foreach { data =>
val segment = writer.write(stringToByteBuffer(data))
val dataRead = readDataManually(Seq(segment)).head
assert(data === dataRead)
}
writer.close()
}
test("FileBasedWriteAheadLogReader - sequentially reading data") {
val writtenData = generateRandomData()
writeDataManually(writtenData, testFile, allowBatching = false)
val reader = new FileBasedWriteAheadLogReader(testFile, hadoopConf)
val readData = reader.toSeq.map(byteBufferToString)
assert(readData === writtenData)
assert(reader.hasNext === false)
intercept[Exception] {
reader.next()
}
reader.close()
}
test("FileBasedWriteAheadLogReader - sequentially reading data written with writer") {
val dataToWrite = generateRandomData()
writeDataUsingWriter(testFile, dataToWrite)
val readData = readDataUsingReader(testFile)
assert(readData === dataToWrite)
}
test("FileBasedWriteAheadLogReader - reading data written with writer after corrupted write") {
// Write data manually for testing the sequential reader
val dataToWrite = generateRandomData()
writeDataUsingWriter(testFile, dataToWrite)
val fileLength = new File(testFile).length()
// Append some garbage data to get the effect of a corrupted write
val fw = new FileWriter(testFile, true)
fw.append("This line appended to file!")
fw.close()
// Verify the data can be read and is same as the one correctly written
assert(readDataUsingReader(testFile) === dataToWrite)
// Corrupt the last correctly written file
val raf = new FileOutputStream(testFile, true).getChannel()
raf.truncate(fileLength - 1)
raf.close()
// Verify all the data except the last can be read
assert(readDataUsingReader(testFile) === (dataToWrite.dropRight(1)))
}
test("FileBasedWriteAheadLogReader - handles errors when file doesn't exist") {
// Write data manually for testing the sequential reader
val dataToWrite = generateRandomData()
writeDataUsingWriter(testFile, dataToWrite)
val tFile = new File(testFile)
assert(tFile.exists())
// Verify the data can be read and is same as the one correctly written
assert(readDataUsingReader(testFile) === dataToWrite)
tFile.delete()
assert(!tFile.exists())
val reader = new FileBasedWriteAheadLogReader(testFile, hadoopConf)
assert(!reader.hasNext)
reader.close()
// Verify that no exception is thrown if file doesn't exist
assert(readDataUsingReader(testFile) === Nil)
}
test("FileBasedWriteAheadLogRandomReader - reading data using random reader") {
// Write data manually for testing the random reader
val writtenData = generateRandomData()
val segments = writeDataManually(writtenData, testFile, allowBatching = false)
// Get a random order of these segments and read them back
val writtenDataAndSegments = writtenData.zip(segments).toSeq.permutations.take(10).flatten
val reader = new FileBasedWriteAheadLogRandomReader(testFile, hadoopConf)
writtenDataAndSegments.foreach { case (data, segment) =>
assert(data === byteBufferToString(reader.read(segment)))
}
reader.close()
}
test("FileBasedWriteAheadLogRandomReader- reading data using random reader written with writer") {
// Write data using writer for testing the random reader
val data = generateRandomData()
val segments = writeDataUsingWriter(testFile, data)
// Read a random sequence of segments and verify read data
val dataAndSegments = data.zip(segments).toSeq.permutations.take(10).flatten
val reader = new FileBasedWriteAheadLogRandomReader(testFile, hadoopConf)
dataAndSegments.foreach { case (data, segment) =>
assert(data === byteBufferToString(reader.read(segment)))
}
reader.close()
}
}
abstract class CloseFileAfterWriteTests(allowBatching: Boolean, testTag: String)
extends CommonWriteAheadLogTests(allowBatching, closeFileAfterWrite = true, testTag) {
import WriteAheadLogSuite._
test(testPrefix + "close after write flag") {
// Write data with rotation using WriteAheadLog class
val numFiles = 3
val dataToWrite = Seq.tabulate(numFiles)(_.toString)
// total advance time is less than 1000, therefore log shouldn't be rolled, but manually closed
writeDataUsingWriteAheadLog(testDir, dataToWrite, closeLog = false, clockAdvanceTime = 100,
closeFileAfterWrite = true, allowBatching = allowBatching)
// Read data manually to verify the written data
val logFiles = getLogFilesInDirectory(testDir)
assert(logFiles.size === numFiles)
val writtenData: Seq[String] = readAndDeserializeDataManually(logFiles, allowBatching)
assert(writtenData === dataToWrite)
}
}
class FileBasedWriteAheadLogWithFileCloseAfterWriteSuite
extends CloseFileAfterWriteTests(allowBatching = false, "FileBasedWriteAheadLog")
class BatchedWriteAheadLogSuite extends CommonWriteAheadLogTests(
allowBatching = true,
closeFileAfterWrite = false,
"BatchedWriteAheadLog")
with MockitoSugar
with BeforeAndAfterEach
with Eventually
with PrivateMethodTester {
import BatchedWriteAheadLog._
import WriteAheadLogSuite._
private var wal: WriteAheadLog = _
private var walHandle: WriteAheadLogRecordHandle = _
private var walBatchingThreadPool: ThreadPoolExecutor = _
private var walBatchingExecutionContext: ExecutionContextExecutorService = _
private val sparkConf = new SparkConf()
private val queueLength = PrivateMethod[Int](Symbol("getQueueLength"))
override def beforeEach(): Unit = {
super.beforeEach()
wal = mock[WriteAheadLog]
walHandle = mock[WriteAheadLogRecordHandle]
walBatchingThreadPool = ThreadUtils.newDaemonFixedThreadPool(8, "wal-test-thread-pool")
walBatchingExecutionContext = ExecutionContext.fromExecutorService(walBatchingThreadPool)
}
override def afterEach(): Unit = {
try {
if (walBatchingExecutionContext != null) {
walBatchingExecutionContext.shutdownNow()
}
} finally {
super.afterEach()
}
}
test("BatchedWriteAheadLog - serializing and deserializing batched records") {
val events = Seq(
BlockAdditionEvent(ReceivedBlockInfo(0, None, None, null)),
BatchAllocationEvent(null, null),
BatchCleanupEvent(Nil)
)
val buffers = events.map(e => Record(ByteBuffer.wrap(Utils.serialize(e)), 0L, null))
val batched = BatchedWriteAheadLog.aggregate(buffers)
val deaggregate = BatchedWriteAheadLog.deaggregate(batched).map(buffer =>
Utils.deserialize[ReceivedBlockTrackerLogEvent](buffer.array()))
assert(deaggregate.toSeq === events)
}
test("BatchedWriteAheadLog - failures in wrappedLog get bubbled up") {
when(wal.write(any[ByteBuffer], anyLong)).thenThrow(new RuntimeException("Hello!"))
// the BatchedWriteAheadLog should bubble up any exceptions that may have happened during writes
val batchedWal = new BatchedWriteAheadLog(wal, sparkConf)
val e = intercept[SparkException] {
val buffer = mock[ByteBuffer]
batchedWal.write(buffer, 2L)
}
assert(e.getCause.getMessage === "Hello!")
}
// we make the write requests in separate threads so that we don't block the test thread
private def writeAsync(wal: WriteAheadLog, event: String, time: Long): Promise[Unit] = {
val p = Promise[Unit]()
p.completeWith(Future[Unit] {
val v = wal.write(event, time)
assert(v === walHandle)
}(walBatchingExecutionContext))
p
}
test("BatchedWriteAheadLog - name log with the highest timestamp of aggregated entries") {
val blockingWal = new BlockingWriteAheadLog(wal, walHandle)
val batchedWal = new BatchedWriteAheadLog(blockingWal, sparkConf)
val event1 = "hello"
val event2 = "world"
val event3 = "this"
val event4 = "is"
val event5 = "doge"
// The queue.take() immediately takes the 3, and there is nothing left in the queue at that
// moment. Then the promise blocks the writing of 3. The rest get queued.
writeAsync(batchedWal, event1, 3L)
eventually(timeout(1.second)) {
assert(blockingWal.isBlocked)
assert(batchedWal.invokePrivate(queueLength()) === 0)
}
// rest of the records will be batched while it takes time for 3 to get written
writeAsync(batchedWal, event2, 5L)
writeAsync(batchedWal, event3, 8L)
// we would like event 5 to be written before event 4 in order to test that they get
// sorted before being aggregated
writeAsync(batchedWal, event5, 12L)
eventually(timeout(1.second)) {
assert(blockingWal.isBlocked)
assert(batchedWal.invokePrivate(queueLength()) === 3)
}
writeAsync(batchedWal, event4, 10L)
eventually(timeout(1.second)) {
assert(walBatchingThreadPool.getActiveCount === 5)
assert(batchedWal.invokePrivate(queueLength()) === 4)
}
blockingWal.allowWrite()
val buffer = wrapArrayArrayByte(Array(event1))
val queuedEvents = Set(event2, event3, event4, event5)
eventually(timeout(1.second)) {
assert(batchedWal.invokePrivate(queueLength()) === 0)
verify(wal, times(1)).write(meq(buffer), meq(3L))
// the file name should be the timestamp of the last record, as events should be naturally
// in order of timestamp, and we need the last element.
val bufferCaptor = ArgumentCaptor.forClass(classOf[ByteBuffer])
verify(wal, times(1)).write(bufferCaptor.capture(), meq(12L))
val records = BatchedWriteAheadLog.deaggregate(bufferCaptor.getValue).map(byteBufferToString)
assert(records.toSet === queuedEvents)
}
}
test("BatchedWriteAheadLog - shutdown properly") {
val batchedWal = new BatchedWriteAheadLog(wal, sparkConf)
batchedWal.close()
verify(wal, times(1)).close()
intercept[IllegalStateException](batchedWal.write(mock[ByteBuffer], 12L))
}
test("BatchedWriteAheadLog - fail everything in queue during shutdown") {
val blockingWal = new BlockingWriteAheadLog(wal, walHandle)
val batchedWal = new BatchedWriteAheadLog(blockingWal, sparkConf)
val event1 = "hello"
val event2 = "world"
val event3 = "this"
// The queue.take() immediately takes the 3, and there is nothing left in the queue at that
// moment. Then the promise blocks the writing of 3. The rest get queued.
val promise1 = writeAsync(batchedWal, event1, 3L)
eventually(timeout(1.second)) {
assert(blockingWal.isBlocked)
assert(batchedWal.invokePrivate(queueLength()) === 0)
}
// rest of the records will be batched while it takes time for 3 to get written
val promise2 = writeAsync(batchedWal, event2, 5L)
val promise3 = writeAsync(batchedWal, event3, 8L)
eventually(timeout(1.second)) {
assert(walBatchingThreadPool.getActiveCount === 3)
assert(blockingWal.isBlocked)
assert(batchedWal.invokePrivate(queueLength()) === 2) // event1 is being written
}
val writePromises = Seq(promise1, promise2, promise3)
batchedWal.close()
eventually(timeout(1.second)) {
assert(writePromises.forall(_.isCompleted))
assert(writePromises.forall(_.future.value.get.isFailure)) // all should have failed
}
}
}
class BatchedWriteAheadLogWithCloseFileAfterWriteSuite
extends CloseFileAfterWriteTests(allowBatching = true, "BatchedWriteAheadLog")
object WriteAheadLogSuite {
private val hadoopConf = new Configuration()
/** Write data to a file directly and return an array of the file segments written. */
def writeDataManually(
data: Seq[String],
file: String,
allowBatching: Boolean): Seq[FileBasedWriteAheadLogSegment] = {
val segments = new ArrayBuffer[FileBasedWriteAheadLogSegment]()
val writer = HdfsUtils.getOutputStream(file, hadoopConf)
def writeToStream(bytes: Array[Byte]): Unit = {
val offset = writer.getPos
writer.writeInt(bytes.size)
writer.write(bytes)
segments += FileBasedWriteAheadLogSegment(file, offset, bytes.size)
}
if (allowBatching) {
writeToStream(wrapArrayArrayByte(data.toArray[String]).array())
} else {
data.foreach { item =>
writeToStream(Utils.serialize(item))
}
}
writer.close()
segments.toSeq
}
/**
* Write data to a file using the writer class and return an array of the file segments written.
*/
def writeDataUsingWriter(
filePath: String,
data: Seq[String]): Seq[FileBasedWriteAheadLogSegment] = {
val writer = new FileBasedWriteAheadLogWriter(filePath, hadoopConf)
val segments = data.map {
item => writer.write(item)
}
writer.close()
segments
}
/** Write data to rotating files in log directory using the WriteAheadLog class. */
def writeDataUsingWriteAheadLog(
logDirectory: String,
data: Seq[String],
closeFileAfterWrite: Boolean,
allowBatching: Boolean,
manualClock: ManualClock = new ManualClock,
closeLog: Boolean = true,
clockAdvanceTime: Int = 500): WriteAheadLog = {
if (manualClock.getTimeMillis() < 100000) manualClock.setTime(10000)
val wal = createWriteAheadLog(logDirectory, closeFileAfterWrite, allowBatching)
// Ensure that 500 does not get sorted after 2000, so put a high base value.
data.foreach { item =>
manualClock.advance(clockAdvanceTime)
wal.write(item, manualClock.getTimeMillis())
}
if (closeLog) wal.close()
wal
}
/** Read data from a segments of a log file directly and return the list of byte buffers. */
def readDataManually(segments: Seq[FileBasedWriteAheadLogSegment]): Seq[String] = {
segments.map { segment =>
val reader = HdfsUtils.getInputStream(segment.path, hadoopConf)
try {
reader.seek(segment.offset)
val bytes = new Array[Byte](segment.length)
reader.readInt()
reader.readFully(bytes)
val data = Utils.deserialize[String](bytes)
reader.close()
data
} finally {
reader.close()
}
}
}
/** Read all the data from a log file directly and return the list of byte buffers. */
def readDataManually[T](file: String): Seq[T] = {
val reader = HdfsUtils.getInputStream(file, hadoopConf)
val buffer = new ArrayBuffer[T]
try {
while (true) {
// Read till EOF is thrown
val length = reader.readInt()
val bytes = new Array[Byte](length)
reader.read(bytes)
buffer += Utils.deserialize[T](bytes)
}
} catch {
case ex: EOFException =>
} finally {
reader.close()
}
buffer.toSeq
}
/** Read all the data from a log file using reader class and return the list of byte buffers. */
def readDataUsingReader(file: String): Seq[String] = {
val reader = new FileBasedWriteAheadLogReader(file, hadoopConf)
val readData = reader.toList.map(byteBufferToString)
reader.close()
readData
}
/** Read all the data in the log file in a directory using the WriteAheadLog class. */
def readDataUsingWriteAheadLog(
logDirectory: String,
closeFileAfterWrite: Boolean,
allowBatching: Boolean): Seq[String] = {
val wal = createWriteAheadLog(logDirectory, closeFileAfterWrite, allowBatching)
val data = wal.readAll().asScala.map(byteBufferToString).toArray
wal.close()
data
}
/** Get the log files in a directory. */
def getLogFilesInDirectory(directory: String): Seq[String] = {
val logDirectoryPath = new Path(directory)
val fileSystem = HdfsUtils.getFileSystemForPath(logDirectoryPath, hadoopConf)
if (fileSystem.exists(logDirectoryPath) &&
fileSystem.getFileStatus(logDirectoryPath).isDirectory) {
fileSystem.listStatus(logDirectoryPath).map { _.getPath() }.sortBy {
_.getName().split("-")(1).toLong
}.map {
_.toString.stripPrefix("file:")
}
} else {
Seq.empty
}
}
def createWriteAheadLog(
logDirectory: String,
closeFileAfterWrite: Boolean,
allowBatching: Boolean): WriteAheadLog = {
val sparkConf = new SparkConf
val wal = new FileBasedWriteAheadLog(sparkConf, logDirectory, hadoopConf, 1, 1,
closeFileAfterWrite)
if (allowBatching) new BatchedWriteAheadLog(wal, sparkConf) else wal
}
def generateRandomData(): Seq[String] = {
(1 to 100).map { _.toString }
}
def readAndDeserializeDataManually(logFiles: Seq[String], allowBatching: Boolean): Seq[String] = {
if (allowBatching) {
logFiles.flatMap { file =>
val data = readDataManually[Array[Array[Byte]]](file)
data.flatMap(byteArray => byteArray.map(Utils.deserialize[String]))
}
} else {
logFiles.flatMap { file => readDataManually[String](file)}
}
}
implicit def stringToByteBuffer(str: String): ByteBuffer = {
ByteBuffer.wrap(Utils.serialize(str))
}
implicit def byteBufferToString(byteBuffer: ByteBuffer): String = {
Utils.deserialize[String](byteBuffer.array)
}
def wrapArrayArrayByte[T](records: Array[T]): ByteBuffer = {
ByteBuffer.wrap(Utils.serialize[Array[Array[Byte]]](records.map(Utils.serialize[T])))
}
/**
* A wrapper WriteAheadLog that blocks the write function to allow batching with the
* BatchedWriteAheadLog.
*/
class BlockingWriteAheadLog(
wal: WriteAheadLog,
handle: WriteAheadLogRecordHandle) extends WriteAheadLog {
@volatile private var isWriteCalled: Boolean = false
@volatile private var blockWrite: Boolean = true
override def write(record: ByteBuffer, time: Long): WriteAheadLogRecordHandle = {
isWriteCalled = true
eventually(Eventually.timeout(2.second)) {
assert(!blockWrite)
}
wal.write(record, time)
isWriteCalled = false
handle
}
override def read(segment: WriteAheadLogRecordHandle): ByteBuffer = wal.read(segment)
override def readAll(): JIterator[ByteBuffer] = wal.readAll()
override def clean(threshTime: Long, waitForCompletion: Boolean): Unit = {
wal.clean(threshTime, waitForCompletion)
}
override def close(): Unit = wal.close()
def allowWrite(): Unit = {
blockWrite = false
}
def isBlocked: Boolean = isWriteCalled
}
}
| maropu/spark | streaming/src/test/scala/org/apache/spark/streaming/util/WriteAheadLogSuite.scala | Scala | apache-2.0 | 29,852 |
package uk.gov.gds.ier.transaction.overseas.openRegister
import uk.gov.gds.ier.test._
import uk.gov.gds.ier.transaction.overseas.InprogressOverseas
class OpenRegisterMustacheTests
extends MustacheTestSuite
with OpenRegisterForms
with OpenRegisterMustache {
it should "empty progress form should produce empty Model" in {
val emptyApplicationForm = openRegisterForm
val openRegisterModel = mustache.data(
emptyApplicationForm,
Call("POST", "/register-to-vote/overseas/ways-to-vote"),
InprogressOverseas()
).asInstanceOf[OpenRegisterModel]
openRegisterModel.question.title should be("Do you want to include your name and address on the open register?")
openRegisterModel.question.postUrl should be("/register-to-vote/overseas/ways-to-vote")
openRegisterModel.openRegister.value should be("false")
// value=false is part of a selectable UI widget, the 'empty model' is a bit a misnomer
}
it should "progress form with open register marked should produce Mustache Model with open register value present (true)" in {
val partiallyFilledApplicationForm = openRegisterForm.fill(
InprogressOverseas(
openRegisterOptin = Some(true)
)
)
val openRegisterModel = mustache.data(
partiallyFilledApplicationForm,
Call("POST", "/register-to-vote/overseas/ways-to-vote"),
InprogressOverseas()
).asInstanceOf[OpenRegisterModel]
openRegisterModel.question.title should be("Do you want to include your name and address on the open register?")
openRegisterModel.question.postUrl should be("/register-to-vote/overseas/ways-to-vote")
openRegisterModel.openRegister.attributes should be("")
}
it should "progress form with open register marked should produce Mustache Model with open register value present (false)" in {
val partiallyFilledApplicationForm = openRegisterForm.fill(
InprogressOverseas(
openRegisterOptin = Some(false)
)
)
val openRegisterModel = mustache.data(
partiallyFilledApplicationForm,
Call("POST", "/register-to-vote/overseas/ways-to-vote"),
InprogressOverseas()
).asInstanceOf[OpenRegisterModel]
openRegisterModel.question.title should be("Do you want to include your name and address on the open register?")
openRegisterModel.question.postUrl should be("/register-to-vote/overseas/ways-to-vote")
openRegisterModel.openRegister.attributes should be("checked=\\"checked\\"")
}
}
| michaeldfallen/ier-frontend | test/uk/gov/gds/ier/transaction/overseas/openRegister/OpenRegisterMustacheTests.scala | Scala | mit | 2,483 |
//@
package xyz.hyperreal.energize
import java.sql.{Connection, Statement, DriverManager}
import collection.mutable.{ArrayBuffer, HashMap, ListBuffer}
import collection.JavaConverters._
import org.mindrot.jbcrypt.BCrypt
import xyz.hyperreal.bvm._
import xyz.hyperreal.json.{DefaultJSONReader, JSON}
object Definition {
def dbconnect: (Connection, Statement, Database) = {
val url = DATABASE.getString( "url" )
val user = DATABASE.getString( "user" )
val password = DATABASE.getString( "password" )
dbconnect( url, user, password )
}
def dbconnect( url: String, user: String, password: String ): (Connection, Statement, Database) = {
val name = DATABASE.getString( "name" )
val driver = DATABASE.getString( "driver" )
dbconnect( name, driver, url, user, password )
}
def dbconnect( name: String, driver: String, url: String, user: String, password: String ) = {
Class.forName( driver )
val connection =
// JdbcConnectionPool.create( url, user, password ).getConnection
if (user eq null)
DriverManager.getConnection( url )
else
DriverManager.getConnection( url, user, password )
(connection, connection.createStatement, Database( name ))
}
def define( src: String, connection: Connection, statement: Statement, database: Database, key: String ): Processor =
define( io.Source.fromString(src), connection, statement, database, key )
def define( src: io.Source, connection: Connection, statement: Statement, database: Database, key: String ): Processor = {
val p = new EnergizeParser
define( p.parseFromSource(src, p.source), connection, statement, database, key )
}
def parsePath( path: String ): Option[PathSegment] =
if (path endsWith "/")
None
else
path split "/" toList match {
case Nil|List( "" ) => None
case a =>
if (a.head != "")
None
else {
val tail = a.tail
if (tail contains "")
None
else
Some( if (tail.length == 1) LiteralPathSegment(tail.head) else ConcatenationPathSegment(tail map LiteralPathSegment) )
}
}
def defineFromJSON( src: io.Source, connection: Connection, statement: Statement, database: Database, key: String ) = {
val s = src mkString
val json = DefaultJSONReader.fromString( s )
val decl = new ListBuffer[StatementAST]
for ((k, v) <- json)
k match {
case "tables" =>
for (tab <- v.asInstanceOf[List[JSON]]) {
val pro =
tab get "protection" match {
case None => None
case Some( groups: List[_] ) => Some( groups.asInstanceOf[List[String]].headOption )
case Some( _ ) => sys.error( "protection expected to be a list" )
}
// val priv = //todo: implement 'private' in defineFromJSON()
// tab get "private" match {
// case None => false
// case Some( _ ) => true
// }
val base =
if (tab contains "base")
parsePath( tab getString "base" )
else
None
val cols = new ListBuffer[ResourceField]
for (c <- tab.getList[JSON]( "fields" )) {
val typ = c getMap "type"
val cat = typ getString "category"
val array =
cat match {
case "primitive" => false
case "array" => true
case "one-to-many" => false
case "many-to-many" => true
}
val modifiers =
if (c contains "modifiers")
c getList[String] "modifiers" map (m => (null, m))
else
Nil
// val validators = //todo: validators in defineFromJSON()
// if (c contains "validators")
// c getList[String] "validators"
// else
// Nil
val args =
if (typ contains "parameters")
typ getList[AnyRef] "parameters"
else
Nil
cols += ResourceField( null, c getString "name", null, typ getString "type", args, modifiers, array )
}
decl += ResourceDefinition( pro, null, tab getString "name", base, cols toList, tab getBoolean "resource" )
}
case "routes" =>
for (routes <- v.asInstanceOf[List[JSON]]) {
val base =
if (routes contains "base")
parsePath( routes getString "base" )
else
None
val protection =
if (routes contains "protection") {
val p = routes getString "protection"
if (p eq null)
Some( None )
else
Some( Some(p) )
} else
None
val mappings =
for (m <- routes.getList[JSON]( "mappings" ))
yield {
val method = m getString "method"
val path = parsePath( m getString "path" ).get
val action =
m getString "language" match {
case "ESL" => parseExpression( m getString "action" )
//case "ECMAScript" => JavaScriptExpression( m getString "action" )
}
RouteMapping( null, method, path, None, action ) //todo: add route guards to JSON config
}
decl += RoutesDefinition( null, base, protection, mappings )
}
}
define( SourceAST(decl toList), connection, statement, database, key )
}
def parse( src: String ): SourceAST = parse( io.Source.fromString(src) )
def parse( src: io.Source ) = {
val p = new EnergizeParser
p.parseFromSource( src, p.source )
}
def compile( src: String, connection: Connection, db: Database, stat: Statement, internal: Boolean ): Definition = compile( parse(src), connection, db, stat, internal )
def compile( ast: SourceAST, connection: Connection, db: Database, stat: Statement, internal: Boolean ) = {
val compiler = new EnergizeCompiler
val code = compiler.compile( ast, connection, db, stat, internal )
Definition( code, compiler.resources, compiler.routes, compiler.conds )
}
def define( ast: SourceAST, connection: Connection, statement: Statement, db: Database, key: String ): Processor = {
val Definition( code, resources, routes, _ ) = compile( ast, connection, db, statement, false )
resources.values foreach {
case Resource( _, _, fields, _, _, _, _ ) =>
fields foreach {
case Field( _, t@SingleReferenceType(pos, table, _), _, _, _, _, _ ) =>
t.ref = resources.getOrElse( db.desensitize(table), problem(pos, s"'$table' not found") )
case Field( _, t@ManyReferenceType(pos, table, _), _, _, _, _, _ ) =>
t.ref = resources.getOrElse( db.desensitize(table), problem(pos, s"'$table' not found") )
case _ =>
}
}
val sorted = ResourceSorter.sort( resources.values ) getOrElse sys.error( "resources cannot be topologically ordered" )
val created = db.created( connection, resources )
if (resources.nonEmpty && !created) {
// print( xyz.hyperreal.table.TextTable(connection.getMetaData.getTables( null, null, tables.head._1, null )) )
//println( db.create(sorted) )
statement.execute( db.create(sorted) )
}
val media = resources( db.desensitize("_media_") )
val users = resources( db.desensitize("users") )
val proc = new Processor( code, connection, statement, db, resources.toMap, routes.toList, key, users )
resources.values foreach {
case r@Resource( name, _, fields, _, _, _, _ ) =>
val cnames1 = fields filterNot (_.typ.isInstanceOf[ManyReferenceType]) map (_.name)
val fieldstr = cnames1 map nameIn mkString ","
val values = Seq.fill( cnames1.length )( "?" ) mkString ","
if (db == PostgresDatabase) {
r.preparedInsert = connection.prepareStatement( s"INSERT INTO $name ($fieldstr) VALUES ($values) RETURNING $idIn" )
r.preparedFullInsert = connection.prepareStatement( s"INSERT INTO $name ($idIn, $fieldstr) VALUES (?, $values)" )
} else {
r.preparedInsert = connection.prepareStatement( s"INSERT INTO $name ($fieldstr) VALUES ($values)", Array(idIn) )
r.preparedFullInsert = connection.prepareStatement( s"INSERT INTO $name ($idIn, $fieldstr) VALUES (?, $values)" )
}
r.media = media
r.processor = proc
}
proc.vm.execute
if (!created) {
val admin =
Map( ADMIN.entrySet.asScala.toList.map( e =>
(e.getKey, ADMIN.getValue(e.getKey).unwrapped) match {
case (k, o: java.util.List[_]) => (k, o.asScala)
case ("password", p: String) => ("password", BCrypt.hashpw( p, BCrypt.gensalt ))
case (k, o) => (k, o)
}
) :+ "createdTime" -> now: _* )
users.insert( admin )
}
proc
}
}
case class Definition( code: Compilation, resources: HashMap[String, Resource], routes: ArrayBuffer[Route],
conds: ArrayBuffer[(ExpressionAST, ExpressionAST)] ) | vinctustech/energize | src/main/scala/Definition.scala | Scala | isc | 8,548 |
package com.databricks.spark.sql.perf.mllib.regression
import org.apache.spark.ml.PipelineStage
import org.apache.spark.ml.regression.DecisionTreeRegressor
import com.databricks.spark.sql.perf.mllib.OptionImplicits._
import com.databricks.spark.sql.perf.mllib._
object DecisionTreeRegression extends BenchmarkAlgorithm with TreeOrForestRegressor {
override def getPipelineStage(ctx: MLBenchContext): PipelineStage = {
import ctx.params._
new DecisionTreeRegressor()
.setMaxDepth(depth)
.setSeed(ctx.seed())
}
}
| databricks/spark-sql-perf | src/main/scala/com/databricks/spark/sql/perf/mllib/regression/DecisionTreeRegression.scala | Scala | apache-2.0 | 540 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.time
import munit.FunSuite
import java.util.Date
import scala.scalajs.js
class PlatformDateFormatSpec extends FunSuite {
private def getDate(dateString: String): Date = new Date(js.Date.parse(dateString).toLong)
test("parse a date without time") {
assertEquals(PlatformDateFormat.parse("2011-10-10"), Right(getDate("2011-10-10T00:00:00Z")))
}
test("parse a local date time") {
assertEquals(PlatformDateFormat.parse("2011-10-10T14:48:00"), Right(getDate("2011-10-10T14:48:00")))
}
test("parse a UTC date time") {
assertEquals(PlatformDateFormat.parse("2011-10-10T14:48:00Z"), Right(getDate("2011-10-10T14:48:00Z")))
}
test("parse a date time with an offset") {
assertEquals(PlatformDateFormat.parse("2011-10-10T14:48:00+0100"), Right(getDate("2011-10-10T13:48:00Z")))
}
test("fail in case of invalid date format") {
assertEquals(PlatformDateFormat.parse("2011-10-10XX14:48:00+0100").isLeft, true)
}
}
| planet42/Laika | core/js/src/test/scala/laika/time/PlatformDateFormatSpec.scala | Scala | apache-2.0 | 1,600 |
package scala.collection.immutable
import scala.language.higherKinds
import org.scalacheck.{Arbitrary, Gen, Properties, Shrink}
import org.scalacheck.commands.Commands
import scala.collection.mutable
import scala.util.{Success, Try}
object SetProperties extends Properties("immutable.Set builder implementations"){
type A = Int
property("Set builder stateful testing") = new SetBuilderStateProperties(Set.newBuilder[A]).property()
// TODO: If and when https://github.com/scala/bug/issues/11160 is fixed, uncomment this
// property("BitSet builder stateful testing") =
// new SetBuilderStateProperties(BitSet.newBuilder)(arbA = Arbitrary(Gen.choose(0, 10000))).property()
property("HashSet builder stateful testing") = new SetBuilderStateProperties(HashSet.newBuilder[A]).property()
property("ListSet builder stateful testing") = new SetBuilderStateProperties(ListSet.newBuilder[A]).property()
property("SortedSet builder stateful testing") = new SetBuilderStateProperties(SortedSet.newBuilder[A]).property()
property("TreeSet builder stateful testing") = new SetBuilderStateProperties(TreeSet.newBuilder[A]).property()
}
/** Generic stateful property testing for Set builders
*
* Usage: {{{
* class MyCollectionProperties extends Properties("my.Collection") {
* property("MyCollection builder stateful testing") =
* new SetBuilderStateProperties(MySet.newBuilder[A]).property() &&
* }
* }}}
* @param arbA gen for the elements of the Set
* @tparam To the type of Set under test
*/
class SetBuilderStateProperties[A, To <: Set[A]](newBuilder: => mutable.Builder[A, To])(implicit arbA: Arbitrary[A]) extends Commands {
override type State = Set[A]
override type Sut = mutable.Builder[A, To]
override def genInitialState: Gen[State] = Set.empty[A]
override def canCreateNewSut(newState: State, initSuts: scala.Iterable[State], runningSuts: scala.Iterable[Sut]) = true
override def newSut(state: State): mutable.Builder[A, To] = newBuilder.addAll(state)
override def destroySut(sut: Sut): Unit = ()
override def initialPreCondition(state: State) = state.isEmpty
import Gen._
lazy val _genCommand = Gen.oneOf(
const(Clear),
const(Result),
choose(0, 10000).map(SizeHint(_)),
arbA.arbitrary.map(a => AddOne(a)),
listOf(arbA.arbitrary).map(a => AddAll(a))
)
override def genCommand(state: State): Gen[Command] = _genCommand
override def shrinkState = Shrink.apply[State]( set => set.to(Stream).map(set - _) )
case object Clear extends UnitCommand {
override def postCondition(state: State, success: Boolean) = success
override def run(sut: Sut) = sut.clear()
override def nextState(state: State) = Set.empty
override def preCondition(state: State) = true
}
case object Result extends Command {
override type Result = State
override def postCondition(state: State, result: Try[Result]) = result == Success(state)
override def run(sut: Sut) = sut.result()
override def nextState(state: State) = state
override def preCondition(state: State) = true
}
case class SizeHint(size: Int) extends UnitCommand {
override def postCondition(state: State, success: Boolean) = success
override def run(sut: Sut) = sut.sizeHint(size)
override def nextState(state: State) = state
override def preCondition(state: State) = true
}
case class AddOne(elem: A) extends UnitCommand {
override def postCondition(state: State, success: Boolean) = success
override def run(sut: Sut) = sut.addOne(elem)
override def nextState(state: State) = state + elem
override def preCondition(state: State) = true
}
case class AddAll(elems: scala.collection.immutable.Seq[A]) extends UnitCommand {
override def postCondition(state: State, success: Boolean) = success
override def run(sut: Sut) = sut.addAll(elems)
override def nextState(state: State) = state ++ elems
override def preCondition(state: State) = true
}
}
| martijnhoekstra/scala | test/scalacheck/scala/collection/immutable/SetProperties.scala | Scala | apache-2.0 | 3,996 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.streams
import java.util.Optional
import java.util.concurrent.TimeUnit.NANOSECONDS
import java.util.concurrent.TimeoutException
import java.util.function.Consumer
import akka.NotUsed
import akka.http.org.squbs.util.JavaConverters._
import akka.japi.Pair
import akka.stream._
import akka.stream.scaladsl.{BidiFlow, Flow}
import akka.stream.stage.{GraphStage, _}
import com.typesafe.scalalogging.LazyLogging
import org.squbs.streams.TimeoutBidi._
import scala.collection.mutable
import scala.concurrent.duration._
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
/**
* A bidi [[GraphStageLogic]] that is used by [[TimeoutBidiOrdered]] and [[TimeoutBidiUnordered]] to wrap flows to add
* timeout functionality.
*
* Once an element is pushed from the wrapped flow (from fromWrapped), it first checks if the element is already
* timed out. If a timeout message has already been sent for that element to downstream, then the element from
* the wrapped flow is dropped.
*
* A timer gets scheduled when there is a downstream demand that's not immediately addressed. This is to make sure
* that a timeout response is sent to the downstream when upstream cannot address the demand on time.
*
* Timer precision is at best 10ms to avoid unnecessary timer scheduling cycles
*
* {{{
* +------+
* In ~> | | ~> In
* | bidi |
* Out <~ | | <~ FromWrapped
* +------+
* }}}
*
* @param shape the [[BidiShape]] that the timeout logic is applied on
* @tparam In the type of the elements that gets forwarded to the wrapped flow
* @tparam FromWrapped the type of the elements that the wrapped flow sends back
* @tparam Out the type of the elements that are pushed to downstream
*/
abstract class TimeoutGraphStageLogic[In, FromWrapped, Out](shape: BidiShape[In, In, FromWrapped, Out])
extends TimerGraphStageLogic(shape) {
private val in = shape.in1
private val fromWrapped = shape.in2
private val toWrapped = shape.out1
private val out = shape.out2
private[this] def timerName = "TimeoutGraphStateLogic"
private val timeoutAsNanos = timeoutDuration.toNanos
private val timeoutAsMillis = timeoutDuration.toMillis
private val precision = 10.milliseconds.toNanos
private var downstreamDemand = 0
private var upstreamFinished = false
protected def timeoutDuration: FiniteDuration
protected def enqueueInTimeoutQueue (elem: In): Unit
protected def onPushFromWrapped(elem: FromWrapped, isOutAvailable: Boolean): Option[Out]
protected def onScheduledTimeout(): Option[Out]
protected def onPullOut(): Option[Out]
protected def isBuffersEmpty: Boolean
protected def timeLeftForNextElemToTimeout: Long = {
timeoutAsMillis - NANOSECONDS.toMillis(System.nanoTime() - firstElemStartTime)
}
protected def expirationTime(): Long = System.nanoTime() - timeoutAsNanos - precision
protected def firstElemStartTime: Long
setHandler(in, new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
enqueueInTimeoutQueue(elem)
push(toWrapped, elem)
}
override def onUpstreamFinish(): Unit = complete(toWrapped)
override def onUpstreamFailure(ex: Throwable): Unit = fail(toWrapped, ex)
})
setHandler(toWrapped, new OutHandler {
override def onPull(): Unit = {
pull(in)
}
override def onDownstreamFinish(): Unit = completeStage()
})
setHandler(fromWrapped, new InHandler {
override def onPush(): Unit = {
onPushFromWrapped(grab(fromWrapped), isAvailable(out)) foreach { elem =>
push(out, elem)
}
if(downstreamDemand > 0) {
pull(fromWrapped)
downstreamDemand -= 1
}
}
override def onUpstreamFinish(): Unit = {
if(isBuffersEmpty) completeStage()
else upstreamFinished = true
}
override def onUpstreamFailure(ex: Throwable): Unit = fail(out, ex)
})
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if(!upstreamFinished || !isBuffersEmpty) {
onPullOut() match {
case Some(elem) => push(out, elem)
case None => if (!isTimerActive(timerName)) scheduleOnce(timerName, timeLeftForNextElemToTimeout.millis)
}
if (!isClosed(fromWrapped) && !hasBeenPulled(fromWrapped)) pull(fromWrapped)
else downstreamDemand += 1
} else complete(out)
}
override def onDownstreamFinish(): Unit = cancel(fromWrapped)
})
final override def onTimer(key: Any): Unit = {
if(!upstreamFinished || !isBuffersEmpty) {
if (isAvailable(out)) {
onScheduledTimeout() match {
case Some(elem) => push(out, elem)
case None => scheduleOnce(timerName, timeLeftForNextElemToTimeout.millis)
}
}
} else complete(out)
}
}
object TimeoutBidiFlowUnordered {
/**
* Creates a [[BidiFlow]] that can be joined with a [[Flow]] to add timeout functionality.
* This API is specifically for the flows that do not guarantee message ordering. For flows that guarantee message
* ordering, please use [[TimeoutBidiOrdered]].
*
* Timeout functionality requires each element to be uniquely identified, so it requires a [[Context]], of any type
* defined by the application, to be carried along with the flow's input and output as a [[Tuple2]] (Scala) or
* [[Pair]] (Java). The requirement is that either the [[Context]] itself or a mapping from [[Context]] should be
* able to uniquely identify an element. Here is the ways how a unique id can be retrieved:
*
* - [[Context]] itself is a type that can be used as a unique id, e.g., [[Int]], [[Long]], [[java.util.UUID]]
* - [[Context]] extends [[UniqueId.Provider]] and implements [[UniqueId.Provider.uniqueId]] method
* - [[Context]] is of type [[UniqueId.Envelope]]
* - [[Context]] can be mapped to a unique id by calling {{{uniqueIdMapper}}}
*
* @param timeout the duration after which the processing of an element would be considered timed out
* @param uniqueIdMapper the function that maps [[Context]] to a unique id
* @param cleanUp an optional clean up function to be applied on timed out elements when pushed
* @tparam In the type of the elements pulled from the upstream along with the [[Context]]
* @tparam Out the type of the elements that are pushed to downstream along with the [[Context]]
* @tparam Context the type of the context that is carried around along with the elements.
* @return a [[BidiFlow]] with timeout functionality
*/
def apply[In, Out, Context](timeout: FiniteDuration,
uniqueIdMapper: Context => Option[Any] = (_: Any) => None,
cleanUp: Out => Unit = (_: Out) => ()):
BidiFlow[(In, Context), (In, Context), (Out, Context), (Try[Out], Context), NotUsed] =
BidiFlow.fromGraph(TimeoutBidiUnordered(timeout, uniqueIdMapper, cleanUp))
/**
* Java API
*/
def create[In, Out, Context](timeout: FiniteDuration,
uniqueIdMapper: java.util.function.Function[Context, Optional[Any]],
cleanUp: Consumer[Out]):
akka.stream.javadsl.BidiFlow[Pair[In, Context], Pair[In, Context], Pair[Out, Context], Pair[Try[Out], Context], NotUsed] = {
toJava(apply(timeout, UniqueId.javaUniqueIdMapperAsScala(uniqueIdMapper), toScala(cleanUp)))
}
/**
* Java API
*/
def create[In, Out, Context](timeout: FiniteDuration,
uniqueIdMapper: java.util.function.Function[Context, Optional[Any]]):
akka.stream.javadsl.BidiFlow[Pair[In, Context], Pair[In, Context], Pair[Out, Context], Pair[Try[Out], Context], NotUsed] = {
toJava(apply[In, Out, Context](timeout, UniqueId.javaUniqueIdMapperAsScala(uniqueIdMapper)))
}
/**
* Java API
*/
def create[In, Out, Context](timeout: FiniteDuration,
cleanUp: Consumer[Out]):
akka.stream.javadsl.BidiFlow[Pair[In, Context], Pair[In, Context], Pair[Out, Context], Pair[Try[Out], Context], NotUsed] = {
toJava(apply(timeout = timeout, cleanUp = toScala(cleanUp)))
}
/**
* Java API
*/
def create[In, Out, Context](timeout: FiniteDuration):
akka.stream.javadsl.BidiFlow[Pair[In, Context], Pair[In, Context], Pair[Out, Context], Pair[Try[Out], Context], NotUsed] = {
toJava(apply[In, Out, Context](timeout))
}
}
object TimeoutBidiUnordered {
/**
* Creates a bidi [[GraphStage]] that is joined with a [[Flow]], that do not guarantee message ordering, to add
* timeout functionality.
*
* @param timeout the duration after which the processing of an element would be considered timed out.
* @param uniqueIdMapper the function that maps [[Context]] to a unique id
* @param cleanUp an optional clean up function to be applied on timed out elements when pushed
* @tparam In the type of the elements pulled from the upstream along with the [[Context]]
* @tparam Out the type of the elements that are pushed to downstream along with the [[Context]]
* @tparam Context the type of the context that is carried around along with the elements. The context may be of any
* type that can be used to uniquely identify each element.
* @return a [[TimeoutBidiUnordered]] that can be joined with a [[Flow]] with corresponding types to add timeout
* functionality.
*/
def apply[In, Out, Context](timeout: FiniteDuration,
uniqueIdMapper: Context => Option[Any] = (_: Any) => None,
cleanUp: Out => Unit = (_: Out) => ()):
TimeoutBidiUnordered[In, Out, Context] =
new TimeoutBidiUnordered(timeout, uniqueIdMapper, cleanUp)
}
/**
* A bidi [[GraphStage]] that is joined with flows to add timeout functionality. This bidi stage is used with flows
* that do not guarantee the message ordering. So, it requires a context to be carried along with the elements to
* uniquely identify each element.
*
*
* '''Emits when''' an element is available from the joined [[Flow]] or an element has already timed out
*
* '''Backpressures when''' the downstream backpressures
*
* '''Completes when''' upstream completes
*
* '''Cancels when''' downstream cancels
*
*
* {{{
* +------+
* (In, Context) ~> | | ~> (In, Context)
* | bidi |
* (Try[Out], Context) <~ | | <~ (Out, Context)
* +------+
* }}}
*
* @param timeout the duration after which the processing of an element would be considered timed out.
* @param uniqueIdMapper the function that maps a [[Context]] to a unique id
* @param cleanUp an optional clean up function to be applied on timed out elements when pushed
* @tparam In the type of the elements pulled from the upstream along with the [[Context]]
* @tparam Out the type of the elements that are pushed by the joined [[Flow]] along with the [[Context]].
* This then gets wrapped with a [[Try]] and pushed downstream with a [[Context]]
* @tparam Context the type of the context that is carried around along with the elements.
*/
final class TimeoutBidiUnordered[In, Out, Context](timeout: FiniteDuration,
uniqueIdMapper: Context => Option[Any],
cleanUp: Out => Unit)
extends GraphStage[BidiShape[(In, Context), (In, Context), (Out, Context), (Try[Out], Context)]] with LazyLogging {
private val in = Inlet[(In, Context)]("TimeoutBidiUnordered.in")
private val fromWrapped = Inlet[(Out, Context)]("TimeoutBidiUnordered.fromWrapped")
private val toWrapped = Outlet[(In, Context)]("TimeoutBidiUnordered.toWrapped")
private val out = Outlet[(Try[Out], Context)]("TimeoutBidiUnordered.out")
val shape = BidiShape(in, toWrapped, fromWrapped, out)
private[streams] def uniqueId(context: Context) =
uniqueIdMapper(context).getOrElse {
context match {
case uniqueIdProvider: UniqueId.Provider ⇒ uniqueIdProvider.uniqueId
case context ⇒ context
}
}
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimeoutGraphStageLogic(shape) {
val timeouts = mutable.LinkedHashMap.empty[Any, (Context, Long)]
val readyToPush = mutable.Queue[((Try[Out], Context), Long)]()
override protected def timeoutDuration: FiniteDuration = timeout
override protected def enqueueInTimeoutQueue(elemWithContext: (In, Context)): Unit = {
val (_, context) = elemWithContext
timeouts.put(uniqueId(context), (context, System.nanoTime()))
}
override protected def onPushFromWrapped(fromWrapped: (Out, Context), isOutAvailable: Boolean): Option[(Try[Out], Context)] = {
val (elem, context) = fromWrapped
timeouts.remove(uniqueId(context)).fold(tryCleanUp(elem, cleanUp)) { case (_, startTime) =>
readyToPush.enqueue(((Success(elem), context), startTime))
}
if(isOutAvailable) pickNextElemToPush()
else None
}
override protected def firstElemStartTime = timeouts.headOption map { case (_, (_, startTime)) => startTime } getOrElse 0
private def pickNextElemToPush(): Option[(Try[Out], Context)] = {
timeouts.headOption.filter { case(_, (_, firstElemStartTime)) =>
firstElemStartTime < expirationTime &&
!readyToPush.headOption.exists { case(_, readyToPushStartTime) =>
readyToPushStartTime <= firstElemStartTime
}
} map { case(id, (context, _)) =>
timeouts.remove(id)
(Failure(FlowTimeoutException()), context)
} orElse Try(readyToPush.dequeue()).toOption.map { case(elem, _) => elem }
}
override protected def onPullOut() = pickNextElemToPush()
override protected def onScheduledTimeout() = pickNextElemToPush()
override protected def isBuffersEmpty = timeouts.isEmpty && readyToPush.isEmpty
}
override def initialAttributes = Attributes.name("TimeoutBidiUnordered")
override def toString = "TimeoutBidiUnordered"
}
object TimeoutBidiFlowOrdered {
/**
* Creates a [[BidiFlow]] that can be joined with a [[Flow]] to add timeout functionality.
* This API is specifically for the flows that guarantee message ordering. For flows that do not guarantee message
* ordering, please use [[TimeoutBidiUnordered]].
*
* @param timeout the duration after which the processing of an element would be considered timed out
* @param cleanUp an optional clean up function to be applied on timed out elements when pushed
* @tparam In the type of the elements pulled from the upstream
* @tparam Out the type of the elements that are pushed to downstream
* @return a [[BidiFlow]] with timeout functionality
*/
def apply[In, Out](timeout: FiniteDuration, cleanUp: Out => Unit = (_: Out) => ()):
BidiFlow[In, In, Out, Try[Out], NotUsed] =
BidiFlow.fromGraph(TimeoutBidiOrdered(timeout, cleanUp))
/**
* Java API
*/
def create[In, Out](timeout: FiniteDuration,
cleanUp: Consumer[Out]):
akka.stream.javadsl.BidiFlow[In, In, Out, Try[Out], NotUsed] = {
apply(timeout, toScala(cleanUp)).asJava
}
def create[In, Out](timeout: FiniteDuration):
akka.stream.javadsl.BidiFlow[In, In, Out, Try[Out], NotUsed] = {
apply(timeout, (_: Out) => ()).asJava
}
}
object TimeoutBidiOrdered {
/**
* Creates a bidi [[GraphStage]] that is joined with flows, that guarantee message ordering, to add timeout
* functionality.
*
* @param timeout the duration after which the processing of an element would be considered timed out
* @param cleanUp an optional clean up function to be applied on timed out elements when pushed
* @tparam In the type of the elements pulled from the upstream
* @tparam Out the type of the elements that are pushed to downstream
* @return a [[TimeoutBidiOrdered]] that can be joined with a [[Flow]] with corresponding types to add timeout
* functionality.
*/
def apply[In, Out](timeout: FiniteDuration, cleanUp: Out => Unit = (_: Out) => Unit): TimeoutBidiOrdered[In, Out] =
new TimeoutBidiOrdered(timeout, cleanUp)
}
/**
* A bidi [[GraphStage]] that is joined with flows to add timeout functionality. This bidi stage is used with flows
* that guarantee the message ordering.
*
* '''Emits when''' an element is available from the wrapped flow or an element has already timed out
*
* '''Backpressures when''' the downstream backpressures
*
* '''Completes when''' upstream completes
*
* '''Cancels when''' downstream cancels
*
* {{{
* +------+
* In ~> | | ~> In
* | bidi |
* Try[Out] <~ | | <~ Out
* +------+
* }}}
*
* @param timeout the duration after which the processing of an element would be considered timed out.
* @param cleanUp an optional clean up function to be applied on timed out elements when pushed
* @tparam In the type of the elements pulled from the upstream and pushed down to joined flow
* @tparam Out the type of the elements that are pushed to downstream
*/
final class TimeoutBidiOrdered[In, Out](timeout: FiniteDuration, cleanUp: Out => Unit) extends GraphStage[BidiShape[In, In, Out, Try[Out]]] {
val in = Inlet[In]("TimeoutBidiOrdered.in")
val fromWrapped = Inlet[Out]("TimeoutBidiOrdered.fromWrapped")
val toWrapped = Outlet[In]("TimeoutBidiOrdered.toWrapped")
val out = Outlet[Try[Out]]("TimeoutBidiOrdered.out")
val shape = BidiShape(in, toWrapped, fromWrapped, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimeoutGraphStageLogic(shape) {
val timeouts = mutable.Queue[TimeoutTracker]()
override def timeoutDuration: FiniteDuration = timeout
override def enqueueInTimeoutQueue(elem: In): Unit = timeouts.enqueue(TimeoutTracker(System.nanoTime(), false))
override def onPushFromWrapped(elem: Out, isOutAvailable: Boolean): Option[Try[Out]] = {
if (isOutAvailable) {
if (timeouts.dequeue().isTimedOut) {
tryCleanUp(elem, cleanUp)
None
}
else Some(Success(elem))
} else None
}
override def firstElemStartTime: Long = timeouts.find(!_.isTimedOut).map(_.startTime).getOrElse(0)
override def onPullOut() = None
override def onScheduledTimeout() = {
timeouts.find(!_.isTimedOut).filter(_.startTime < expirationTime).map { elem =>
elem.isTimedOut = true
Failure(FlowTimeoutException())
}
}
override def isBuffersEmpty = timeouts.isEmpty || timeouts.forall(_.isTimedOut == true)
}
override def initialAttributes = Attributes.name("TimeoutBidiOrdered")
override def toString = "TimeoutBidiOrdered"
case class TimeoutTracker(startTime: Long, var isTimedOut: Boolean)
}
/**
* Exception thrown when an element times out.
*
* @param msg Defaults to "Flow timed out!"
*/
case class FlowTimeoutException(msg: String = "Flow timed out!") extends TimeoutException(msg)
object TimeoutBidi {
private[streams] def tryCleanUp[Out](elem: Out, cleanUp: Out => Unit): Unit = {
Try(cleanUp(elem)).recover {
case NonFatal(_) => ()
case ex => throw ex
}
}
private[streams] def toScala[T](consumer: Consumer[T]) = (t: T) => consumer.accept(t)
} | mzeltser69/squbs | squbs-ext/src/main/scala/org/squbs/streams/TimeoutBidi.scala | Scala | apache-2.0 | 20,080 |
package org.scala_tools.maven.mojo.extractor
import scala.tools.nsc._
import scala.tools.nsc.reporters._
import org.apache.maven.project.MavenProject
import org.apache.maven.plugin.descriptor.MojoDescriptor;
import scala.collection.JavaConversions._
import org.scala_tools.maven.mojo.util.MavenProjectTools
class MojoExtractorCompiler(project: MavenProject) extends MavenProjectTools with MojoExtractionInfo {
//Method to extract mojo description from a source file.
def extract(sourceFiles: String*): Seq[MojoDescriptor] = {
//helper method to initialize settings
def initialize: (Settings, Reporter) = {
val settings = new Settings();
//TODO - Set settings
settings.classpath.value = getCompileClasspathString(project)
settings.stop.tryToSetColon(List("constructors"))
settings.sourcepath.tryToSet(project.getCompileSourceRoots().asInstanceOf[java.util.List[String]].toList)
val reporter = new ConsoleReporter(settings);
(settings, reporter)
}
//helper method to execute presentation compiler
def execute(settings: Settings, reporter: Reporter) = {
val compiler = new Global(settings, reporter) with MojoAnnotationExtractor {
//override def onlyPresentation = true
}
//Extract mojo description
def extractMojos(unit: compiler.CompilationUnit) = {
for (info <- compiler.parseCompilationUnitBody(unit.body)) yield {
extractMojoDescriptor(info)
}
}
val run = new compiler.Run
run.compile(sourceFiles.toList)
for (unit <- run.units if !unit.isJava) yield {
extractMojos(unit)
}
}
val (settings, reporter) = initialize
execute(settings, reporter).toList.flatMap(x => x)
}
}
import org.scala_tools.maven.mojo.annotations._
trait MojoAnnotationExtractor extends CompilationUnits {
self: Global =>
/**Pulls all mojo classes out of the body of source code. */
def parseCompilationUnitBody(body: Tree) = {
/**Slow method to go look for the definition of a parent class */
def pullParentClass(symbol: Symbol) = {
currentRun.units.toList.flatMap {
unit =>
val x = unit.body.filter(_.isInstanceOf[ClassDef]).find(_.symbol == symbol).map(_.asInstanceOf[ClassDef])
x
}
}
/**Pulls the name of the parent class */
def pullParentClassSymbol(parent: Tree) = {
parent match {
case t@TypeTree() => Some(t.symbol)
case _ => None
}
}
object string {
def unapply(tree: Tree) = {
tree match {
case Literal(constant) => constant.tag match {
case StringTag => Some(constant.stringValue)
case _ => None
}
case _ => None
}
}
}
object boolean {
def unapply(tree: Tree) = {
tree match {
case Literal(constant) => constant.tag match {
case BooleanTag => Some(constant.booleanValue)
case _ => None
}
// In case a default value was used, this will ignore the value and use true.
case sel@Select(_, _) =>
sel.tpe.toString match {
case "Boolean" => Some(true)
case _ => None
}
case _ => None
}
}
}
/**Parses a list of annotations into a list of MojoAnnotationInfo classes */
def parseAnnotations(annotations: List[AnnotationInfo]) = {
for(annotation <- annotations) yield {
annotation.args match {
case Nil => MavenAnnotation(annotation.atp.safeToString)
case string(value) :: Nil => MavenAnnotation(annotation.atp.safeToString, value)
case boolean(value) :: Nil => MavenAnnotation(annotation.atp.safeToString, value)
case string(value1) :: string(value2) :: Nil => MavenAnnotation(annotation.atp.safeToString, value1, value2)
case x => throw new IllegalArgumentException("Annotation (%s) is not supported".format(annotation))
}
}
}
/**Pulls out information about all injectable variables based on mojo annotaitons. */
def parseMojoInjectedVars(classImpl: Tree) = {
for{node@ValDef(_, name, tpt, _) <- classImpl.children
annotation <- node.symbol.annotations
if annotation.atp.safeToString == classOf[parameter].getName
} yield {
val varInfo = new MojoInjectedVarInfo(name.toString, tpt.toString, parseAnnotations(node.symbol.annotations))
varInfo
}
}
object isGoal {
def unapply(classDef: ClassDef) = classDef.symbol.annotations.exists(_.toString.contains("org.scala_tools.maven.mojo.annotations.goal"))
}
object Goal {
def unapply(classDef: ClassDef): Option[(String, List[MavenAnnotation], List[MojoInjectedVarInfo])] = Some((classDef.symbol.tpe.safeToString, parseAnnotations(classDef.symbol.annotations), parseMojoInjectedVars(classDef.impl)))
}
/**Pulls out mojo information froma mojo class */
def parseMojoClass(mojoClass: ClassDef): MojoClassInfo = mojoClass match {
case Goal(name, annotations, args) =>
val parentSymbols = mojoClass.impl.parents.toList.flatMap(pullParentClassSymbol)
val parentClasses = parentSymbols.flatMap(pullParentClass)
val parentArgs = parentClasses.flatMap(x => parseMojoInjectedVars(x.impl))
//Combine all mojo injectable variables...
val finalArgs = args ++ parentArgs
val mojoInfo = new MojoClassInfo(name, annotations, finalArgs)
mojoInfo
}
var mojoInfos: List[MojoClassInfo] = List()
body.foreach {
x =>
x match {
case c@isGoal() =>
mojoInfos = parseMojoClass(c) :: mojoInfos
case c: Tree => Unit
}
}
mojoInfos
}
/**Attempts to pull a static value from the given tree.
*
* @returns
* Some ( value ) if a value is found, None otherwise
*/
def extractStaticValue(tree: Tree) = tree match {
case Literal(c) => Some(extractConstantValue(c))
case _ => None
}
/**
* Extracts a constant variable's runtime value
*/
def extractConstantValue(c: Constant) = {
//TODO - handle other values
c.stringValue
}
}
| jacekszymanski/mvnplugins | scala-mojo-support/src/main/java/org/scala_tools/maven/mojo/extractor/MojoExtractorCompiler.scala | Scala | apache-2.0 | 6,239 |
package org.sat4j.scala
import org.sat4j.specs.ContradictionException
object Logic {
/** A pretty printer for logic syntax trees. */
object PrettyPrint {
def apply(e: Exp): String = e match {
case True => "True"
case False => "False"
case Not(True) => "~True"
case Not(False) => "~False"
case v: AnonymousVariable => v.toString
case Not(v: AnonymousVariable) => "~" + v.toString
case Ident(s) => s.toString
case IndexedIdent(s, is) => s.toString + is.mkString("(", ",", ")")
case Not(Ident(s)) => "~" + s
case Not(IndexedIdent(s, is)) => "~" + s.toString + is.mkString("(", ",", ")")
case Not(b) => "~(" + apply(b) + ")"
case And(b1, b2) => "(" + apply(b1) + " & " + apply(b2) + ")"
case Or(b1, b2) => "(" + apply(b1) + " | " + apply(b2) + ")"
case Implies(b1, b2) => "(" + apply(b1) + " -> " + apply(b2) + ")"
case Iff(b1, b2) => "(" + apply(b1) + " <-> " + apply(b2) + ")"
case CardEQ(bs, k) => "(" + bs.map(apply).mkString(" + ") + " === " + k + ")"
case CardLE(bs, k) => "(" + bs.map(apply).mkString(" + ") + " <= " + k + ")"
case CardLT(bs, k) => "(" + bs.map(apply).mkString(" + ") + " < " + k + ")"
case CardGE(bs, k) => "(" + bs.map(apply).mkString(" + ") + " >= " + k + ")"
case CardGT(bs, k) => "(" + bs.map(apply).mkString(" + ") + " > " + k + ")"
}
def apply(cnfList: List[List[BoolExp]]): String =
cnfList match {
case Nil => "";
case c :: t =>
val line =
for (l <- c) yield apply(l)
"\n" + (line mkString " ") + apply(t)
}
}
/** Abstract base class of all DSL expressions. */
abstract class Exp
/** Abstract base class of all boolean valued expressions. */
abstract class BoolExp extends Exp {
def &(b: BoolExp) = And(this, b)
def |(b: BoolExp) = Or(this, b)
def implies(b: BoolExp) = Implies(this, b)
def iff(b: BoolExp) = Iff(this, b)
def unary_~() = Not(this)
def ? = Or(this, Not(this))
def +(b: BoolExp) = Card(List(b, this))
def ===(k: Int) = CardEQ(List(this), k)
def <=(k: Int) = CardLE(List(this), k)
def <(k: Int) = CardLT(List(this), k)
def >=(k: Int) = CardGE(List(this), k)
def >(k: Int) = CardGT(List(this), k)
def toCnfList(context: Context) = {
isAlreadyInCnf(this) match {
case (true, Some(x)) => x
case _ =>
val next = context.nextAnonymousVar
val translated = tseitinListSimple(this, List(), context)._2
assert(!(context._createdVars isEmpty))
List(next) :: translated
}
}
/* If the BoolExp is a literal, then the following function returns the name of the literal and its sign*/
def isALiteral: Option[(String, Boolean)] = this match {
case Ident(s) => Some(s.toString, true)
case Not(Ident(s)) => Some(s.toString, false)
case AnonymousVariable(c) => Some(this.toString, true)
case Not(AnonymousVariable(c)) => Some(this.toString, false)
case _ => None
}
override def toString: String = PrettyPrint(this)
}
/** Base class for boolean constants True and False. */
abstract class BoolValue extends BoolExp
/** Truth. */
case object True extends BoolValue
/** Falsity. */
case object False extends BoolValue
/** Logical conjunction operator. */
private[Logic] case class And(b1: BoolExp, b2: BoolExp) extends BoolExp
/** Logical disjunction operator. */
private[Logic] case class Or(b1: BoolExp, b2: BoolExp) extends BoolExp
/** Logical implication operator. */
private[Logic] case class Implies(b1: BoolExp, b2: BoolExp) extends BoolExp
/** Logical equivalence operator. */
private[Logic] case class Iff(b1: BoolExp, b2: BoolExp) extends BoolExp
/** Base class for cardinality operators. */
abstract class CardExp extends BoolExp
/** Cardinality equals k operator. */
private[Logic] case class CardEQ(bs: List[BoolExp], k: Int) extends CardExp
/** Cardinality less than or equals k operator. */
private[Logic] case class CardLE(bs: List[BoolExp], k: Int) extends CardExp
/** Cardinality less than k operator. */
private[Logic] case class CardLT(bs: List[BoolExp], k: Int) extends CardExp
/** Cardinality greater than or equals k operator. */
private[Logic] case class CardGE(bs: List[BoolExp], k: Int) extends CardExp
/** Cardinality greater than k operator. */
private[Logic] case class CardGT(bs: List[BoolExp], k: Int) extends CardExp
/** Abstract base class of all integer valued expressions. */
protected abstract class IntExp extends Exp
/** Cardinality operator. */
case class Card(bs: List[BoolExp]) extends IntExp {
def +(b: BoolExp) = Card(b :: bs)
def ===(k: Int) = CardEQ(bs.reverse, k)
def <=(k: Int) = CardLE(bs.reverse, k)
def <(k: Int) = CardLT(bs.reverse, k)
def >=(k: Int) = CardGE(bs.reverse, k)
def >(k: Int) = CardGT(bs.reverse, k)
}
/** Logical negation operator. */
private[Logic] case class Not(b: BoolExp) extends BoolExp
abstract class Identifier extends BoolExp
/** Logical proposition identifier. */
private[Logic] case class Ident[U](name: U) extends Identifier {
def apply(indices: Int*) = IndexedIdent(name, indices.toList)
}
/** Logical proposition identifier. */
private[Logic] case class IndexedIdent[U](name: U, indices: List[Int] = Nil) extends Identifier {
}
/** Anonymous logical proposition. */
private[Logic] case class AnonymousVariable(context: Context) extends BoolExp {
private val id = context.nextVarId
override def toString = "_nv#" + id
override def equals(o: Any) = o match {
case x: AnonymousVariable => id == x.id
case _ => false
}
override def hashCode() = id
}
class Context {
private var _varId = 0
def nextVarId = {
_varId += 1
_varId
}
var _createdVars = List[AnonymousVariable]()
/** create new propositional variables for translation into CNF */
def newVar = if (_cachedVar != null) {
val tmp = _cachedVar
_cachedVar = null
tmp
} else uncachedNewVar
def uncachedNewVar = {
val v = new AnonymousVariable(this)
_createdVars = v :: _createdVars
v
}
def nextAnonymousVar = if (_cachedVar == null) {
_cachedVar = uncachedNewVar
_cachedVar
} else _cachedVar
private var _cachedVar = uncachedNewVar
def init() {
_varId = 0
_createdVars = List()
_cachedVar = uncachedNewVar
}
}
/** n-ary conjunction. */
def and(l: BoolExp*): BoolExp = and(l.toList)
/** n-ary conjunction. */
def and(l: List[BoolExp]): BoolExp = l match {
case Nil => True
case b :: Nil => b
case b :: t => l.reduceLeft {
(b1, b2) => And(b1, b2)
}
}
/** n-ary disjunction. */
def or(l: BoolExp*): BoolExp = or(l.toList)
/** n-ary disjunction. */
def or(l: List[BoolExp]): BoolExp = l match {
case Nil => False
case b :: Nil => b
case b :: t => l.reduceLeft {
(b1, b2) => Or(b1, b2)
}
}
/** Implicit conversion from string to logical identifier */
implicit def identFromString(s: String): Ident[String] = Ident(s)
/** Implicit conversion from string to logical identifier */
implicit def identFromSymbol(i: Symbol): Ident[Symbol] = Ident(i)
/** Convert any Scala object into a propositional variable */
def toProp[U](u: U): Ident[U] = Ident(u)
/** Returns true iff the given expression is already in conjunctive normal form. */
private def isAlreadyInCnf(f: BoolExp): (Boolean, Option[List[List[BoolExp]]]) = f match {
case And(b1, b2) =>
val (r1, l1) = isAlreadyInCnf(b1)
if (r1) {
val (r2, l2) = isAlreadyInCnf(b2)
if (r2) (true, Some(l1.get ++ l2.get))
else (false, None)
} else (false, None)
case Or(b1, b2) => isDisjunction(f)
case _ => isLiteral(f)
}
private def isDisjunction(f: BoolExp): (Boolean, Option[List[List[BoolExp]]]) = f match {
case Or(b1, b2) =>
val (r1, l1) = isDisjunction(b1)
if (r1) {
val (r2, l2) = isDisjunction(b2)
if (r2) (true, Some(List(l1.get(0) ++ l2.get(0))))
else (false, None)
} else (false, None)
case _ => isLiteral(f)
}
private def isLiteral(f: BoolExp): (Boolean, Option[List[List[BoolExp]]]) = f match {
case True => (true, Some(List(List(True))))
case False => (true, Some(List(List(False))))
case Ident(_) => (true, Some(List(List(f))))
case Not(Ident(_)) => (true, Some(List(List(f))))
case _ => (false, None)
}
private def tseitinListSimple(b: BoolExp, l: List[List[BoolExp]], context: Context): (BoolExp, List[List[BoolExp]]) = {
b match {
case True => (True, List())
case Not(False) => (True, List())
case False => (False, List())
case Not(True) => (False, List())
case Ident(s) => (Ident(s), List())
case IndexedIdent(s, i) => (IndexedIdent(s, i), List())
case Not(b1) =>
val v = context.newVar
val t1 = tseitinListSimple(b1, List(), context)
(v, List(~t1._1, ~v) :: List(t1._1, v) :: t1._2)
case And(b1, b2) =>
val v = context.newVar
val t1 = tseitinListSimple(b1, List(), context)
val t2 = tseitinListSimple(b2, List(), context)
(v, List(~t1._1, ~t2._1, v) :: List(t1._1, ~v) :: List(t2._1, ~v) :: t1._2 ++ t2._2)
case Or(b1, b2) =>
val v = context.newVar
val t1 = tseitinListSimple(b1, List(), context)
val t2 = tseitinListSimple(b2, List(), context)
(v, List(t1._1, t2._1, ~v) :: List(~t1._1, v) :: List(~t2._1, v) :: t1._2 ++ t2._2)
case Implies(b1, b2) =>
val v = context.newVar
val t1 = tseitinListSimple(b1, List(), context)
val t2 = tseitinListSimple(b2, List(), context)
(v, List(~t1._1, t2._1, ~v) :: List(t1._1, v) :: List(~t2._1, v) :: t1._2 ++ t2._2)
case Iff(b1, b2) =>
val v = context.newVar
val t1 = tseitinListSimple(b1, List(), context)
val t2 = tseitinListSimple(b2, List(), context)
(v, List(~t1._1, t2._1, ~v) :: List(t1._1, ~t2._1, ~v) :: List(t1._1, t2._1, v) :: List(~t1._1, ~t2._1, v) :: t1._2 ++ t2._2)
}
}
private def simplifyClause(c: List[BoolExp]): List[BoolExp] = c match {
case Nil => List()
case True :: t => List(True)
case Not(False) :: t => List(True)
case False :: t => simplifyClause(t)
case Not(True) :: t => simplifyClause(t)
case h :: t => h :: simplifyClause(t)
}
def simplifyCnf(l: List[List[BoolExp]]): List[List[BoolExp]] = l match {
case Nil => List()
case h :: t =>
val s = simplifyClause(h)
s match {
case List() => List(List())
case List(True) => simplifyCnf(t)
case _ => s :: simplifyCnf(t)
}
}
def encode(cnf: BoolExp, context: Context): (List[List[Int]], Map[BoolExp, Int]) = {
context.init()
encode(simplifyCnf(cnf.toCnfList(context)))
}
def encode(cnf: List[List[BoolExp]]): (List[List[Int]], Map[BoolExp, Int]) = {
encodeCnf0(cnf, Map[BoolExp, Int]())
}
// FIXME: Type check doesn't work as U is erased at compile time
def encode[U](exp: BoolExp): (List[List[Int]], Map[U, Int]) = {
val (cnfList, identMap) = encode(exp.toCnfList(new Context))
val outMap: Map[U, Int] = identMap map (p => (p._1 match {
case Ident(n) => n match {
case a: U => a
case _ => throw new IllegalArgumentException
}
case _ => throw new IllegalArgumentException
}, p._2))
(cnfList, outMap)
}
def decode[U](cnf: List[List[Int]], indexes: Map[U, Int]): BoolExp = {
val flat = cnf.flatten
val idents = indexes map {
case (x, y) => (y, if (cnf contains y) Ident(x) else ~Ident(x))
}
def disj(d: List[Int]): BoolExp = (False.asInstanceOf[BoolExp] /: d)((b, i) => b | (if (i > 0) idents(i) else Not(idents(-i))))
(True.asInstanceOf[BoolExp] /: cnf)(_ & disj(_))
}
private def encodeCnf0(cnf: List[List[BoolExp]], m: Map[BoolExp, Int]): (List[List[Int]], Map[BoolExp, Int]) = cnf match {
case Nil => (List(), m)
case h :: t =>
val p = encodeClause0(h, m)
p match {
case (Nil, _) => (List(List()), m)
case (l, mUpdated) =>
val cnfT = encodeCnf0(t, mUpdated)
(l :: cnfT._1, cnfT._2)
}
}
private def inv(x: Int): Int = -x
private def encodeClause0(c: List[BoolExp], m: Map[BoolExp, Int]): (List[Int], Map[BoolExp, Int]) = c match {
case Nil => (List(), m)
case (s: AnonymousVariable) :: q => encodeClause1(s, q, m, x => x)
case (s: Ident[_]) :: q => encodeClause1(s, q, m, x => x)
case (s: IndexedIdent[_]) :: q => encodeClause1(s, q, m, x => x)
case (Not(s: AnonymousVariable)) :: q => encodeClause1(s, q, m, inv)
case (Not(s: Ident[_])) :: q => encodeClause1(s, q, m, inv)
case (Not(s: IndexedIdent[_])) :: q => encodeClause1(s, q, m, inv)
case _ => throw new Exception("There is something that is not a litteral in the clause " + PrettyPrint(List(c)))
}
def encodeClause1(c: BoolExp, q: List[BoolExp], m: Map[BoolExp, Int], f: Int => Int): (List[Int], Map[BoolExp, Int]) = m.get(c) match {
case Some(i) =>
val p = encodeClause0(q, m)
(f(i) :: p._1, p._2)
case None =>
val n = m.size + 1
val p = encodeClause0(q, m.updated(c, n))
(f(n) :: p._1, p._2)
}
def isSat[U](f: BoolExp): (Boolean, Option[Map[U, Boolean]]) = {
val (cnf, m) = encode(f, new Context)
val mapRev = m map {
case (x, y) => (y, x)
}
val problem = new Problem
try {
cnf.foldLeft(problem) {
(p, c) => p += Clause(c)
}
val res = problem.solve
res match {
case Satisfiable =>
(true, Some(modelToMap(problem.model, mapRev)))
case Unsatisfiable => (false, None)
case _ => throw new IllegalStateException("Got a time out")
}
} catch {
case e: ContradictionException => (false, None)
}
}
def modelToMap[U](model: Array[Int], mapRev: Map[Int, BoolExp]): Map[U, Boolean] = {
val listeBoolExp = model.toList map {
x => if (x > 0) mapRev(x) -> true else mapRev(-x) -> false
}
val mapIdentBool = listeBoolExp filter {
case (s: AnonymousVariable, _) => false
case (Ident(s), _) => true
case _ => false
}
val mapUBool = mapIdentBool map {
case (Ident(s: U), b) => (s, b)
case _ => throw new IllegalStateException
}
mapUBool.toMap
}
def isValid[U](f: BoolExp): (Boolean, Option[Map[U, Boolean]]) = {
val (b, m) = isSat[U](~f)
(!b, m)
}
def allSat[U](f: BoolExp): (Boolean, Option[List[Map[U, Boolean]]]) = {
val (cnf, m) = encode(f, new Context)
val mapRev = m map {
case (x, y) => (y, x)
}
val problem = new Problem
try {
cnf.foldLeft(problem) {
(p, c) => p += Clause(c)
}
val res = problem.enumerate
res match {
case (Satisfiable, list) =>
val resList: List[Map[U, Boolean]] = list map {
a => modelToMap[U](a, mapRev)
}
(true, Some(resList))
case (Unsatisfiable, list) => (false, None)
case (Unknown, list) =>
val resList: List[Map[U, Boolean]] = list map {
a => modelToMap[U](a, mapRev)
}
(false, Some(resList))
}
} catch {
case e: ContradictionException => (false, None)
}
}
}
| dreef3/glowing-avenger | src/main/scala/org/sat4j/scala/Logic.scala | Scala | mit | 15,571 |
// package benchmarks
// package cec
// package cec2005
// import benchmarks.cec.Helper
// import benchmarks.dimension._
// import benchmarks.dimension.implicits._
// import benchmarks.matrix._
// import benchmarks.matrix.implicits._
// import shapeless._
// import shapeless.ops.nat.ToInt
// import spire.implicits._
// import spire.math.{ ceil, floor }
// import cilib.{ Dist, RVar }
// trait F1Params[N <: Nat, A] { val params: (Dimension[N, A], A) }
// trait F2Params[N <: Nat, A] { val params: (Dimension[N, A], A) }
// trait F3Params[N <: Nat, A] { val params: (Dimension[N, A], Matrix[N, N, A], A) }
// trait F4Params[N <: Nat, A] { val params: (Dimension[N, A], A, RVar[A]) }
// trait F5Params[N <: Nat, A] { val params: (Dimension[N, A], Matrix[N, N, A], Double) }
// trait F6Params[N <: Nat, A] { val params: (Dimension[N, A], Double) }
// trait F7Params[N <: Nat, A] { val params: (Dimension[N, A], Matrix[N, N, A], A) }
// trait F8Params[N <: Nat, A] { val params: (Dimension[N, A], Matrix[N, N, A], A) }
// trait F9Params[N <: Nat, A] { val params: (Dimension[N, A], Double) }
// trait F10Params[N <: Nat, A] { val params: (Dimension[N, A], Matrix[N, N, A], A) }
// trait F11Params[N <: Nat, A] { val params: (Dimension[N, A], Matrix[N, N, A], A) }
// trait F12Params[N <: Nat, A] {
// val params: (Dimension[N, A], Matrix[N, N, A], Matrix[N, N, A], A)
// }
// trait F13Params[N <: Nat, A] { val params: (Dimension[N, A], Double) }
// trait F14Params[N <: Nat, A] { val params: (Dimension[N, A], Matrix[N, N, A], A) }
// trait F15Params[N <: Nat, A] {
// val params: (Dimension10[Dimension[N, A]], Dimension10[Matrix[N, N, A]], A)
// }
// trait F16Params[N <: Nat, A] {
// val params: (Dimension10[Dimension[N, A]], Dimension10[Matrix[N, N, A]], A)
// }
// trait F17Params[A] { val params: (A, RVar[A]) }
// trait F18Params[N <: Nat, A] {
// val params: (Dimension10[Dimension[N, A]], Dimension10[Matrix[N, N, A]], A)
// }
// trait F19Params[A] { val params: A }
// trait F20Params[N <: Nat, A] { val params: (Dimension10[Dimension[N, A]], A) }
// trait F21Params[N <: Nat, A] {
// val params: (Dimension10[Dimension[N, A]], Dimension10[Matrix[N, N, A]], A)
// }
// trait F22Params[N <: Nat, A] {
// val params: (Dimension10[Dimension[N, A]], Dimension10[Matrix[N, N, A]], A)
// }
// trait F23Params[A] { val params: A }
// trait F24Params[N <: Nat, A] {
// val params: (Dimension10[Dimension[N, A]], Dimension10[Matrix[N, N, A]], A, RVar[A])
// }
// trait F25Params[A] { val params: A }
// sealed trait CECSized[N <: Nat]
// trait Params {
// implicit object cecSized2 extends CECSized[_2]
// implicit object cecSized10 extends CECSized[_10]
// implicit object cecSized30 extends CECSized[_30]
// implicit object cecSized50 extends CECSized[_50]
// val helper = Helper("cec2005")
// implicit def f1Params[N <: Nat: ToInt: CECSized] =
// new F1Params[N, Double] {
// val params = (
// helper.shiftFromResource("sphere_func_data.txt"),
// helper.fbiasFromResource(1)
// )
// }
// implicit def f2Params[N <: Nat: ToInt: CECSized] =
// new F2Params[N, Double] {
// val params = (
// helper.shiftFromResource("schwefel_102_data.txt"),
// helper.fbiasFromResource(2)
// )
// }
// implicit def f3Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F3Params[N, Double] {
// val params = (
// helper.shiftFromResource("high_cond_elliptic_rot_data.txt"),
// helper.matrixFromResource(s"elliptic_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(3)
// )
// }
// implicit def f4Params[N <: Nat: ToInt: CECSized] =
// new F4Params[N, Double] {
// val params = (
// helper.shiftFromResource("schwefel_102_data.txt"),
// helper.fbiasFromResource(4),
// Dist.stdNormal
// )
// }
// implicit def f5Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F5Params[N, Double] {
// val params = {
// val dim = ev.apply
// val shift = helper.shiftFromResource("schwefel_206_data.txt")
// val o = shift.zipWithIndex map {
// case (oi, i) =>
// if ((i + 1) <= ceil(dim / 4.0)) -100.0
// else if ((i + 1) >= floor((3.0 * dim) / 4.0)) 100.0
// else oi
// }
// (
// o,
// helper.matrixFromResourceTail(s"schwefel_206_data.txt").t,
// helper.fbiasFromResource(5)
// )
// }
// }
// implicit def f6Params[N <: Nat: ToInt: CECSized] =
// new F6Params[N, Double] {
// val params = (
// helper.shiftFromResource("rosenbrock_func_data.txt"),
// helper.fbiasFromResource(6)
// )
// }
// implicit def f7Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F7Params[N, Double] {
// val params = (
// helper.shiftFromResource("griewank_func_data.txt"),
// helper.matrixFromResource(s"griewank_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(7)
// )
// }
// implicit def f8Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F8Params[N, Double] {
// val params = {
// val o = helper.shiftFromResource("ackley_func_data.txt").zipWithIndex.map {
// case (oi, i) => if (i % 2 == 0) -32.0 else oi
// }
// (
// o,
// helper.matrixFromResource(s"ackley_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(8)
// )
// }
// }
// implicit def f9Params[N <: Nat: ToInt: CECSized] =
// new F9Params[N, Double] {
// val params = (
// helper.shiftFromResource("rastrigin_func_data.txt"),
// helper.fbiasFromResource(9)
// )
// }
// implicit def f10Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F10Params[N, Double] {
// val params = (
// helper.shiftFromResource("rastrigin_func_data.txt"),
// helper.matrixFromResource(s"rastrigin_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(10)
// )
// }
// implicit def f11Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F11Params[N, Double] {
// val params = (
// helper.shiftFromResource("weierstrass_data.txt"),
// helper.matrixFromResource(s"weierstrass_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(11)
// )
// }
// implicit def f12Params[N <: Nat: CECSized: ToInt] =
// new F12Params[N, Double] {
// val params = (
// helper.shiftFromResourceF("schwefel_213_data.txt", _.last),
// helper.matrixFromResourceF("schwefel_213_data.txt", _.take(100)),
// helper.matrixFromResourceF("schwefel_213_data.txt", _.drop(100).take(100)),
// helper.fbiasFromResource(12)
// )
// }
// implicit def f13Params[N <: Nat: CECSized: ToInt] =
// new F13Params[N, Double] {
// val params = (
// helper.shiftFromResource("EF8F2_func_data.txt"),
// helper.fbiasFromResource(13)
// )
// }
// implicit def f14Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F14Params[N, Double] {
// val params = (
// helper.shiftFromResource("E_ScafferF6_func_data.txt"),
// helper.matrixFromResource(s"E_ScafferF6_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(14)
// )
// }
// implicit def f15Params[N <: Nat: CECSized: GTEq1](implicit ev: ToInt[N]) =
// new F15Params[N, Double] {
// val params = (
// Sized.wrap(helper.shiftsFromResource("hybrid_func1_data.txt").toVector),
// Sized.wrap(Vector.fill(10)(Matrix.eye[N, Double])),
// helper.fbiasFromResource(15)
// )
// }
// implicit def f16Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F16Params[N, Double] {
// val params = (
// Sized.wrap(helper.shiftsFromResource("hybrid_func1_data.txt").toVector),
// helper.matrix10FromResource(s"hybrid_func1_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(16)
// )
// }
// implicit val f17Params =
// new F17Params[Double] {
// val params = (helper.fbiasFromResource(17), Dist.stdNormal)
// }
// implicit def f18Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F18Params[N, Double] {
// val params = {
// val o: List[Dimension[N, Double]] =
// helper.shiftsFromResource("hybrid_func2_data.txt")
// val shift: Dimension10[Dimension[N, Double]] =
// Sized.wrap((o.init :+ o.last.map(_ => 0.0)).toVector)
// val m = helper.matrix10FromResource(s"hybrid_func2_M_D${ev.apply}.txt")
// (shift, m, helper.fbiasFromResource(18))
// }
// }
// implicit val f19Params =
// new F19Params[Double] {
// val params = helper.fbiasFromResource(19)
// }
// implicit def f20Params[N <: Nat: CECSized: ToInt] =
// new F20Params[N, Double] {
// val params = {
// val o: List[Dimension[N, Double]] =
// helper.shiftsFromResource("hybrid_func2_data.txt")
// val head = o.head.zipWithIndex map {
// case (oi, i) => if (i % 2 == 1) 5.0 else oi
// }
// val middle = o.tail.init
// val last = o.last.map(_ => 0.0)
// val shift: Dimension10[Dimension[N, Double]] =
// Sized.wrap(((head :: middle) :+ last).toVector)
// (shift, helper.fbiasFromResource(20))
// }
// }
// implicit def f21Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F21Params[N, Double] {
// val params = (
// Sized.wrap(helper.shiftsFromResource("hybrid_func3_data.txt").toVector),
// helper.matrix10FromResource(s"hybrid_func3_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(21)
// )
// }
// implicit def f22Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F22Params[N, Double] {
// val params = (
// Sized.wrap(helper.shiftsFromResource("hybrid_func3_data.txt").toVector),
// helper.matrix10FromResource(s"hybrid_func3_HM_D${ev.apply}.txt"),
// helper.fbiasFromResource(22)
// )
// }
// implicit val f23Params =
// new F23Params[Double] {
// val params = helper.fbiasFromResource(23)
// }
// implicit def f24Params[N <: Nat: CECSized](implicit ev: ToInt[N]) =
// new F24Params[N, Double] {
// val params = (
// Sized.wrap(helper.shiftsFromResource("hybrid_func4_data.txt").toVector),
// helper.matrix10FromResource(s"hybrid_func4_M_D${ev.apply}.txt"),
// helper.fbiasFromResource(24),
// Dist.stdNormal
// )
// }
// implicit val f25Params =
// new F25Params[Double] {
// val params = helper.fbiasFromResource(25)
// }
// }
| cirg-up/benchmarks | src/main/scala/benchmarks/cec/cec2005/Params.scala | Scala | apache-2.0 | 11,003 |
package sparkapps
import org.apache.spark.mllib.classification._
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by ludwineprobst on 14/10/2014.
*/
object ClassifyMailWithSVMWithSGD {
def sparkJob() = {
// Specify the path to your data file
val conf = new SparkConf()
.setAppName("Spark classify mail as spam or non-spam with naive bayes")
//Add more config if needed
.setMaster("local")
val sc = new SparkContext(conf)
// load the data
val data = sc.textFile("spambase.csv")
val parsedData = data.map { line =>
val parts = line.split(',').map(_.toDouble)
// prepare data into RDD[LabeledPoint]
// LabeledPoint is a couple (label, features)
LabeledPoint(parts(0), Vectors.dense(parts.tail))
}
// Split data into 2 sets : training (60%) and test (40%).
val splits = parsedData.randomSplit(Array(0.6, 0.4), seed = 11L)
// training split
val training = splits(0)
// test split
val test = splits(1)
// training model on training set with 100 iterations
val model = SVMWithSGD.train(parsedData, 100)
val predictionAndLabel = test.map(p => (model.predict(p.features), p.label))
val metrics = new BinaryClassificationMetrics(predictionAndLabel)
val accuracy = 1.0 * predictionAndLabel.filter(x => x._1 == x._2).count() / test.count()
println("accuracy " + accuracy)
println("metrics " + metrics.areaUnderPR())
println("metrics " + metrics.areaUnderROC())
}
def main(args: Array[String])= sparkJob()
}
| Ludwsam/SparkTemplate | src/main/scala/sparkapps/ClassifyMailWithSVMWithSGD.scala | Scala | mit | 1,727 |
package spray.can.websocket
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import spray.can.Http
import spray.can.server.UHttp
import spray.can.websocket
import spray.can.websocket.frame.Frame
import spray.can.websocket.frame.FrameStream
trait WebSocketServerWorker extends Actor with ActorLogging {
/**
* The HttpServerConnection actor, which holds the pipelines
*/
def serverConnection: ActorRef
def receive = handshaking orElse closeLogic
def closeLogic: Receive = {
case ev: Http.ConnectionClosed =>
context.stop(self)
log.debug("Connection closed on event: {}", ev)
}
def handshaking: Receive = {
// when a client request for upgrading to websocket comes in, we send
// UHttp.Upgrade to upgrade to websocket pipelines with an accepting response.
case websocket.HandshakeRequest(state) =>
state match {
case wsFailure: websocket.HandshakeFailure => sender() ! wsFailure.response
case wsContext: websocket.HandshakeContext => sender() ! UHttp.UpgradeServer(websocket.pipelineStage(self, wsContext), wsContext.response)
}
// upgraded successfully
case UHttp.Upgraded =>
context.become(businessLogic orElse closeLogic)
self ! websocket.UpgradedToWebSocket // notify Upgraded to WebSocket protocol
}
def businessLogic: Receive
def send(frame: Frame) {
serverConnection ! FrameCommand(frame)
}
def send(frame: FrameStream) {
serverConnection ! FrameStreamCommand(frame)
}
}
| smootoo/simple-spray-websockets | src/main/scala/spray/can/websocket/WebSocketServerWorker.scala | Scala | unlicense | 1,533 |
/**
DrawPanel
*/
package org.loom.scaffold
import javax.swing._
import java.awt._
import java.awt.event._
import org.loom.interaction._
class DrawPanel() extends JPanel {
println("DrawPanel loaded")
var paused: Boolean = false
val drawManager: DrawManager = new DrawManager()
val interactionManager: InteractionManager = new InteractionManager(drawManager)
val keyL: KeyListener = new KeyPressListener(interactionManager)
addKeyListener(keyL)
val mC: MouseAdapter = new MouseClick(interactionManager).asInstanceOf[MouseAdapter]
addMouseListener(mC)
val mML: MouseMotionListener = new MouseMotion(interactionManager).asInstanceOf[MouseMotionListener]
addMouseMotionListener(mML)
setFocusable(true)
var dBuffer: Image = null
AnimationActor.setDrawPanel(this)
AnimationActor.start()
def animationUpdate(): Unit = {
//println("updating animation");
if (!paused) {
drawManager.update()
}
}
def animationRender(): Unit = {
//println("rendering animation");
if (dBuffer == null) {//only runs first time through
dBuffer = createImage(Config.width, Config.height);
} else {
var dBufferGraphics = dBuffer.getGraphics();
drawManager.draw(dBufferGraphics);
var g: Graphics = this.getGraphics();
paintScreen(g);
}
}
def paintScreen(g: Graphics): Unit = {
var g2D: Graphics2D = g.asInstanceOf[Graphics2D]
if (dBuffer != null) {
g2D.drawImage(dBuffer, 0, 0, null)
} else {
System.out.println("unable to create double buffer")
}
}
}
| brogan/Loom | src/org/loom/scaffold/DrawPanel.scala | Scala | gpl-3.0 | 1,698 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.process.temporalDensity
import java.util.Date
import com.typesafe.scalalogging.slf4j.Logging
import org.geotools.data.Query
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureSource}
import org.geotools.data.store.ReTypingFeatureCollection
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.feature.visitor.{AbstractCalcResult, CalcResult, FeatureCalc}
import org.geotools.process.factory.{DescribeParameter, DescribeProcess, DescribeResult}
import org.geotools.util.NullProgressListener
import org.joda.time.Interval
import org.locationtech.geomesa.accumulo.index.QueryHints
import org.locationtech.geomesa.accumulo.iterators.TemporalDensityIterator.createFeatureType
import org.opengis.feature.Feature
import org.opengis.feature.simple.SimpleFeature
@DescribeProcess(
title = "Temporal Density Process",
description = "Returns a histogram of how many data points fall in different time buckets within an interval."
)
class TemporalDensityProcess extends Logging {
@DescribeResult(description = "Output feature collection")
def execute(
@DescribeParameter(
name = "features",
description = "The feature set on which to query")
features: SimpleFeatureCollection,
@DescribeParameter(
name = "startDate",
description = "The start of the time interval")
startDate: Date,
@DescribeParameter(
name = "endDate",
description = "The end of the time interval")
endDate: Date,
@DescribeParameter(
name = "buckets",
min = 1,
description = "How many buckets we want to divide our time interval into.")
buckets: Int
): SimpleFeatureCollection = {
logger.debug("Attempting Geomesa temporal density on type " + features.getClass.getName)
if (features.isInstanceOf[ReTypingFeatureCollection]) {
logger.warn("WARNING: layer name in geoserver must match feature type name in geomesa")
}
val interval = new Interval(startDate.getTime, endDate.getTime)
val visitor = new TemporalDensityVisitor(features, interval, buckets)
features.accepts(visitor, new NullProgressListener)
visitor.getResult.asInstanceOf[TDResult].results
}
}
class TemporalDensityVisitor(features: SimpleFeatureCollection, interval: Interval, buckets: Int)
extends FeatureCalc with Logging {
val retType = createFeatureType(features.getSchema())
val manualVisitResults = new DefaultFeatureCollection(null, retType)
// Called for non AccumuloFeatureCollections
def visit(feature: Feature): Unit = {
val sf = feature.asInstanceOf[SimpleFeature]
manualVisitResults.add(sf)
}
var resultCalc: TDResult = new TDResult(manualVisitResults)
override def getResult: CalcResult = resultCalc
def setValue(r: SimpleFeatureCollection) = resultCalc = TDResult(r)
def query(source: SimpleFeatureSource, query: Query) = {
logger.debug("Running Geomesa temporal density process on source type " + source.getClass.getName)
query.getHints.put(QueryHints.TEMPORAL_DENSITY_KEY, java.lang.Boolean.TRUE)
query.getHints.put(QueryHints.TIME_INTERVAL_KEY, interval)
query.getHints.put(QueryHints.TIME_BUCKETS_KEY, buckets)
source.getFeatures(query)
}
}
case class TDResult(results: SimpleFeatureCollection) extends AbstractCalcResult
| drackaer/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/process/temporalDensity/TemporalDensityProcess.scala | Scala | apache-2.0 | 4,004 |
package lila.user
import play.api.mvc.{ Request, RequestHeader }
sealed trait UserContext {
val req: RequestHeader
val me: Option[User]
def isAuth = me.isDefined
def isAnon = !isAuth
def is(user: User): Boolean = me ?? (user ==)
def userId = me map (_.id)
def username = me map (_.username)
def troll = me.??(_.troll)
def ip = req.remoteAddress
}
sealed abstract class BaseUserContext(val req: RequestHeader, val me: Option[User]) extends UserContext {
override def toString = "%s %s %s".format(
me.fold("Anonymous")(_.username),
req.remoteAddress,
req.headers.get("User-Agent") | "?"
)
}
final class BodyUserContext[A](val body: Request[A], m: Option[User])
extends BaseUserContext(body, m)
final class HeaderUserContext(r: RequestHeader, m: Option[User])
extends BaseUserContext(r, m)
trait UserContextWrapper extends UserContext {
val userContext: UserContext
val req = userContext.req
val me = userContext.me
val kid = me.??(_.kid)
val noKid = !kid
}
object UserContext {
def apply(req: RequestHeader, me: Option[User]): HeaderUserContext =
new HeaderUserContext(req, me)
def apply[A](req: Request[A], me: Option[User]): BodyUserContext[A] =
new BodyUserContext(req, me)
}
| r0k3/lila | modules/user/src/main/UserContext.scala | Scala | mit | 1,256 |
package io.youi.drawable.stats
class RenderStats {
private val samples = 1000
private var firstRender = 0L
private var lastRender = 0L
private var renderStart = 0L
private var renderFinish = 0L
private val count: Var[Long] = Var(0L)
private var lastElapsed = 0.0
private val elapsed: Array[Double] = new Array[Double](samples)
private var position: Int = 0
(0 until samples).foreach { index =>
elapsed(index) = -1.0
}
def draw(drawable: Drawable, context: Context, x: Double, y: Double): Unit = {
val start = System.nanoTime()
drawable.draw(context, x, y)
val finished = System.nanoTime()
val elapsed = (finished - start) / 1000000000.0
this.elapsed(position) = elapsed
position += 1
if (position == samples) {
position = 0
}
if (firstRender == 0L) {
firstRender = finished
}
lastRender = finished
renderStart = start
renderFinish = finished
count.static(count + 1)
lastElapsed = elapsed
}
def average: Double = {
var count = 0.0
var sum = 0.0
elapsed.foreach { e =>
if (e != -1.0) {
sum += e
count += 1.0
}
}
sum / count
}
def min: Double = {
elapsed.foldLeft(1000.0)((min, current) => if (current != -1.0) math.min(min, current) else min)
}
def max: Double = {
elapsed.max match {
case -1.0 => 0.0
case d => d
}
}
def renders: Val[Long] = count
def current: Double = lastElapsed
def fps: Int = math.round(1.0 / current).toInt
def averageFPS: Int = math.round(1.0 / average).toInt
override def toString: String = {
f"Current: $fps fps ($current%2.2f), Average: $averageFPS ($average%2.2f), Min: $min%2.2f, Max: $max%2.2f, Renders: ${renders()}"
}
}
| outr/youi | ui/js/src/main/scala/io/youi/drawable/stats/RenderStats.scala | Scala | mit | 1,767 |
package vep.app.user
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import vep.app.user.adhesion.AdhesionIntegrationModule
import vep.app.user.profile.ProfileIntegrationModule
import vep.app.user.registration.RegistrationIntegrationModule
import vep.app.user.session.SessionIntegrationModule
trait UserIntegrationModule
extends RegistrationIntegrationModule
with SessionIntegrationModule
with ProfileIntegrationModule
with AdhesionIntegrationModule {
lazy val userService = new UserService()
lazy val userRoute: Route = registrationRouter.route ~ sessionRouter.route ~ profileRouter.route ~ adhesionRouter.route
}
| kneelnrise/vep | src/main/scala/vep/app/user/UserIntegrationModule.scala | Scala | mit | 673 |
package com.dslplatform.api.client
import scala.reflect.ClassTag
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration
import scala.concurrent.Await
import scala.collection.mutable.Buffer
import com.dslplatform.api.patterns.Cube
import com.dslplatform.api.patterns.Searchable
import com.dslplatform.api.patterns.ServiceLocator
import com.dslplatform.api.patterns.Specification
/** Utility class for building olap cube analysis.
*/
class CubeBuilder[TCube <: Cube[TSource]: ClassTag, TSource <: Searchable: ClassTag](
cube: Cube[TSource]) {
private var specification: Option[Specification[TSource]] = None
private val dimensions: Buffer[String] = Buffer.empty
private val facts: Buffer[String] = Buffer.empty
private var limit: Option[Int] = None
private var offset: Option[Int] = None
private val order: Buffer[(String, Boolean)] = Buffer.empty
/** Restrict analysis on data subset
*
* @param specification use provided specification to filter data used for analysis
* @return self
*/
def where(specification: Specification[TSource]) = filter(specification)
/** Restrict analysis on data subset
*
* @param specification use provided specification to filter data used for analysis
* @return self
*/
def filter(specification: Specification[TSource]) = {
this.specification = Option(specification)
this
}
/** Add dimension or fact to the result
*
* @param dimensionOrFact dimension or fact which will be shown in result
* @return self
*/
def use(dimensionOrFact: String) = {
require(dimensionOrFact ne null, "null value provided for dimension or fact")
require(dimensionOrFact.length != 0, "empty value provided for dimension or fact")
if (cube.dimensions.contains(dimensionOrFact)) {
dimensions += dimensionOrFact
} else if (cube.facts.contains(dimensionOrFact)) {
facts += dimensionOrFact
} else {
throw new IllegalArgumentException("Unknown dimension or fact: " + dimensionOrFact)
}
this
}
private def orderBy(property: String, ascending: Boolean) = {
if (property == null || property == "")
throw new IllegalArgumentException("property can't be empty");
order += property -> ascending
this
}
/** Order result ascending using a provided property or path
*
* @param property name of domain objects property or path
* @return self
*/
def ascending(property: String) = orderBy(property, true)
/** Order result descending using a provided property or path
*
* @param property name of domain objects property or path
* @return self
*/
def descending(property: String) = orderBy(property, false)
/** Limit total number of results to provided value
*
* @param limit maximum number of results
* @return self
*/
def limit(limit: Int) = take(limit)
/** Limit total number of results to provided value
*
* @param limit maximum number of results
* @return self
*/
def take(limit: Int): this.type = {
this.limit = Some(limit)
this
}
/** Skip specified number of initial results
*
* @param offset number of results to skip
* @return self
*/
def offset(offset: Int) = skip(offset)
/** Skip specified number of initial results
*
* @param offset number of results to skip
* @return self
*/
def skip(offset: Int): this.type = {
this.offset = Some(offset)
this
}
/** Runs the analysis using provided configuration.
* Result will be deserialized to TResult
*
* @return sequence of specified data types
*/
def analyze[TResult: ClassTag](
implicit locator: ServiceLocator,
ec: ExecutionContext,
duration: Duration): Seq[TResult] = {
require(locator ne null, "locator not provided")
require(ec ne null, "execution context not provided")
require(duration ne null, "duration not provided")
val proxy = locator.resolve[StandardProxy]
Await.result(
proxy.olapCube[TCube, TSource, TResult](
specification,
dimensions,
facts,
limit,
offset,
order.toMap),
duration)
}
/** Runs the analysis using provided configuration.
* Result will be deserialized into sequence of Map[String, Any]
*
* @return analysis result
*/
def analyzeMap(
implicit locator: ServiceLocator,
ec: ExecutionContext,
duration: Duration): Seq[Map[String, Any]] = {
require(locator ne null, "locator not provided")
require(ec ne null, "execution context not provided")
require(duration ne null, "duration not provided")
val proxy = locator.resolve[StandardProxy]
Await.result(
proxy.olapCube[TCube, TSource, Map[String, Any]](
specification,
dimensions,
facts,
limit,
offset,
order.toMap),
duration)
}
}
| ngs-doo/dsl-client-scala | http/src/main/scala/com/dslplatform/api/client/CubeBuilder.scala | Scala | bsd-3-clause | 5,006 |
/*
* Copyright (c) 2014 the original author or authors.
*
* Licensed under the MIT License;
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://opensource.org/licenses/MIT
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package git
package util
import java.io.{BufferedInputStream, File, FileInputStream, FileOutputStream}
import java.nio.file.Files
import java.nio.file.attribute.FileTime
case class FileStat(
ctime: Int,
ctimeFractions: Int,
mtime: Int,
mtimeFractions: Int,
device: Int,
inode: Int,
mode: Int,
uid: Int,
gid: Int,
size: Int
)
object FileUtil {
/** Returns the file contents as a String. */
def readString(file: File): String = new String(readContents(file).toArray)
/** Returns the file contents as a byte sequence. */
def readContents(file: File): Seq[Byte] = {
val bis = new BufferedInputStream(new FileInputStream(file))
val bytes = try {
Stream.continually(bis.read).takeWhile(-1 !=).map(_.toByte).toList
} finally {
bis.close()
}
bytes
}
/** Writes the given data to the given file. */
def writeToFile(file: File, data: Seq[Byte]): Unit = {
val fos = new FileOutputStream(file)
try {
fos.write(data.toArray)
} finally {
fos.close()
}
}
/** Creates a file with the given contents. */
def createFileWithContents(path: String, data: String): Unit = {
val f = new File(path)
f.createNewFile()
FileUtil.writeToFile(f, data.getBytes.toList)
}
/** Returns every file under the given folder recursively. */
def recursiveListFiles(file: File, ignoreDirectories: Seq[File] = Seq()): Array[File] = {
val these = file.listFiles.filterNot((f) => ignoreDirectories.contains(f))
these ++ these.filter(_.isDirectory).flatMap((f: File) => recursiveListFiles(f, ignoreDirectories = ignoreDirectories))
}
/** Implementation of Unix stat(2). */
def stat(file: File): FileStat = {
val isUnix = Files.getFileStore(file.toPath).supportsFileAttributeView("unix")
val (ctimeSeconds, ctimeFractions) = {
if (isUnix) {
val ctime = Files.getAttribute(file.toPath, "unix:ctime").asInstanceOf[FileTime].toMillis
val ctimeSeconds = math.floor(ctime / 1000).toInt
val ctimeFractions = (ctime - ctimeSeconds).toInt
(ctimeSeconds, ctimeFractions)
} else {
val ctime = Files.getAttribute(file.toPath, "basic:creationTime").asInstanceOf[FileTime].toMillis
val ctimeSeconds = math.floor(ctime / 1000).toInt
val ctimeFractions = (ctime - ctimeSeconds).toInt
(ctimeSeconds, 0/*ctimeFractions*/)
}
}
val mtime = Files.getAttribute(file.toPath, "basic:lastModifiedTime").asInstanceOf[FileTime].toMillis
val mtimeSeconds = math.floor(mtime / 1000).toInt
val mtimeFractions = 0//(mtime - mtimeSeconds).toInt
val device = if (isUnix) Files.getAttribute(file.toPath, "unix:dev").asInstanceOf[Int] else 0
val inode = if (isUnix) Files.getAttribute(file.toPath, "unix:ino").asInstanceOf[Int] else 0
val mode = if (isUnix) Files.getAttribute(file.toPath, "unix:mode").asInstanceOf[Int] else 0 // 100644, 1000 0001 1010 0100 for regular file and readable
val uid = if (isUnix) Files.getAttribute(file.toPath, "unix:uid").asInstanceOf[Int] else 0
val gid = if (isUnix) Files.getAttribute(file.toPath, "unix:gid").asInstanceOf[Int] else 0
val size = Files.getAttribute(file.toPath, "basic:size").asInstanceOf[Long].toInt
FileStat(ctimeSeconds, ctimeFractions, mtimeSeconds, mtimeFractions, device, inode, mode, uid, gid, size)
}
} | kaisellgren/ScalaGit | src/main/scala/git/util/FileUtil.scala | Scala | mit | 4,055 |
package io.coppermine
case class Position(commit: Long, prepare: Long)
| YoEight/scala-eventstore-lean | src/main/scala/io/coppermine/Position.scala | Scala | bsd-3-clause | 72 |
package com.twitter.finagle.ssl
/**
* Protocols represent the versions of the TLS protocol which should
* be enabled with a given TLS [[Engine]].
*
* @note Java users: See [[ProtocolsConfig]].
*/
sealed trait Protocols
object Protocols {
/**
* Indicates that the determination for which TLS protocols are supported
* should be delegated to the engine factory.
*/
case object Unspecified extends Protocols
/**
* Indicates that only these specific protocols should be enabled for
* a particular engine.
*
* @param protocols A list of protocols which should be enabled for a
* particular engine. The set of protocols in this list must be a subset
* of the set of protocols supported by the underlying engine.
*
* {{{
* val protocols = Protocols.Enabled(Seq("TLSv1.2"))
* }}}
*/
case class Enabled(protocols: Seq[String]) extends Protocols
}
| luciferous/finagle | finagle-core/src/main/scala/com/twitter/finagle/ssl/Protocols.scala | Scala | apache-2.0 | 899 |
package com.karasiq.nanoboard
import java.io.File
/**
* Official implementation compatibility util
* @see [[https://github.com/nanoboard/nanoboard]]
*/
object NanoboardLegacy {
/**
* Reads places.txt file in `nanoboard/1.*` client format
* @param file File path
*/
def placesFromTxt(file: String): Vector[String] = {
if (new File(file).isFile) {
val source = io.Source.fromFile(file, "UTF-8")
try {
source.getLines()
.filter(_.nonEmpty)
.toVector
} finally source.close()
} else {
Vector.empty
}
}
/**
* Reads categories.txt file in `nanoboard/1.*` client format
* @param file File path
*/
def categoriesFromTxt(file: String): Vector[NanoboardCategory] = {
if (new File(file).isFile) {
val source = io.Source.fromFile(file, "UTF-8")
try {
source
.getLines()
.filter(_.nonEmpty)
.grouped(2)
.collect {
case Seq(hash, name) ⇒
NanoboardCategory(hash, name)
}
.toVector
} finally source.close()
} else {
Vector.empty
}
}
}
| Karasiq/nanoboard | library/src/main/scala/com/karasiq/nanoboard/NanoboardLegacy.scala | Scala | apache-2.0 | 1,160 |
package scala.meta
package tests.prettyprinters
import scala.meta.internal.trees.Origin
import scala.meta.testkit.StructurallyEqual
import org.scalameta.logger
import org.scalatest.FunSuite
class PrettyPrinterSuite extends FunSuite {
implicit class XtensionResetOrigin[T <: Tree](tree: T) {
def resetAllOrigins: T = {
tree
.transform {
case tree: Tree => tree.withOrigin(Origin.None)
}
.asInstanceOf[T]
}
}
def checkOk(code: String): Unit = {
test(logger.revealWhitespace(code)) {
val before: Stat = code.parse[Stat].get.resetAllOrigins
val after: Stat = before.syntax.parse[Stat].get
StructurallyEqual(before, after) match {
case Left(err) =>
fail(
s"""Not Structurally equal: ${err.toString}:
|before: ${before.structure}
|after : ${after.structure}
""".stripMargin)
case _ => Nil
}
}
}
checkOk("val x = 1")
checkOk("""(_: Throwable) ⇒ 1""")
checkOk("""1 join (())""")
checkOk("""foo match{ case _ => _ => false}""")
}
| MasseGuillaume/scalameta | scalameta/testkit/src/test/scala/scala/meta/tests/prettyprinters/PrettyPrinterSuite.scala | Scala | bsd-3-clause | 1,112 |
/*
* InputAttrFolder.scala
* (Mellite)
*
* Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.mellite.impl.proc
import de.sciss.lucre.{Disposable, Folder, IdentMap, Obj}
import de.sciss.lucre.synth.Txn
import de.sciss.mellite.impl.proc.ProcObjView.LinkTarget
import de.sciss.span.Span
final class InputAttrFolder[T <: Txn[T]](val parent: ProcObjView.Timeline[T], val key: String,
f: Folder[T], tx0: T)
extends InputAttrImpl[T] {
override def toString: String = s"InputAttrFolder(parent = $parent, key = $key)"
type Entry = Obj[T]
protected def mkTarget(entry: Obj[T])(implicit tx: T): LinkTarget[T] =
new LinkTargetFolder[T](this, tx.newHandle(entry))
private[this] val fH = tx0.newHandle(f)
def folder(implicit tx: T): Folder[T] = fH()
protected val viewMap: IdentMap[T, Elem] = tx0.newIdentMap
// EDT
private[this] var edtSet = Set.empty[Elem] // XXX TODO --- do we need a multi-set in theory?
protected def elemOverlappingEDT(start: Long, stop: Long): Iterator[Elem] = edtSet.iterator
protected def elemAddedEDT (elem: Elem): Unit = edtSet += elem
protected def elemRemovedEDT(elem: Elem): Unit = edtSet -= elem
private[this] val observer: Disposable[T] =
f.changed.react { implicit tx => upd => upd.changes.foreach {
case Folder.Added (_ /* index */, child) =>
addAttrIn(Span.From(0L), entry = child, value = child, fire = true)
case Folder.Removed(_ /* index */, child) => removeAttrIn(entryId = child.id)
}} (tx0)
override def dispose()(implicit tx: T): Unit = {
super.dispose()
observer.dispose()
}
// ---- init ----
f.iterator(tx0).foreach { child =>
addAttrIn(Span.From(0L), entry = child, value = child, fire = false)(tx0)
}
} | Sciss/Mellite | app/src/main/scala/de/sciss/mellite/impl/proc/InputAttrFolder.scala | Scala | agpl-3.0 | 1,990 |
package blended.jms.bridge.internal
import blended.jms.utils.ProviderAware
import blended.testsupport.scalatest.LoggingFreeSpec
class ProviderFilterSpec extends LoggingFreeSpec {
case class Dummy(
vendor : String,
provider: String
) extends ProviderAware
"The Providerfilter should" - {
"match if filter conditions are correct" in {
val p1 = Dummy("TestVendor", "TestProvider")
assert(ProviderFilter("TestVendor", "TestProvider").matches(p1))
assert(ProviderFilter("TestVendor").matches(p1))
assert(ProviderFilter("TestVendor", "Test.*").matches(p1))
assert(ProviderFilter("TestVendor", ".*Provider").matches(p1))
}
"not match if filter conditions are not correct" in {
val p1 = Dummy("TestVendor", "TestProvider")
assert(!ProviderFilter("foo", "TestProvider").matches(p1))
assert(!ProviderFilter("foo").matches(p1))
assert(!ProviderFilter("TestVendor", "blah.*").matches(p1))
}
}
}
| lefou/blended | blended.jms.bridge/src/test/scala/blended/jms/bridge/internal/ProviderFilterSpec.scala | Scala | apache-2.0 | 980 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.logical
import org.apache.calcite.plan.RelOptRule.{any, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall}
import org.apache.calcite.rex.RexOver
import org.apache.flink.table.plan.nodes.logical.{FlinkLogicalCalc, FlinkLogicalSnapshot}
/**
* Transpose [[FlinkLogicalCalc]] past into [[FlinkLogicalSnapshot]].
*/
class CalcSnapshotTransposeRule extends RelOptRule(
operand(classOf[FlinkLogicalCalc],
operand(classOf[FlinkLogicalSnapshot], any())),
"CalcSnapshotTransposeRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val calc = call.rel[FlinkLogicalCalc](0)
// Don't push a calc which contains windowed aggregates into a snapshot for now.
!RexOver.containsOver(calc.getProgram)
}
override def onMatch(call: RelOptRuleCall): Unit = {
val calc = call.rel[FlinkLogicalCalc](0)
val snapshot = call.rel[FlinkLogicalSnapshot](1)
val newClac = calc.copy(calc.getTraitSet, snapshot.getInputs)
val newSnapshot = snapshot.copy(snapshot.getTraitSet, newClac, snapshot.getPeriod)
call.transformTo(newSnapshot)
}
}
object CalcSnapshotTransposeRule {
val INSTANCE = new CalcSnapshotTransposeRule
}
| shaoxuan-wang/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/rules/logical/CalcSnapshotTransposeRule.scala | Scala | apache-2.0 | 2,025 |
/*******************************************************************************
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
*
* Copyright (c) 2013,2014 by Peter Pilgrim, Addiscombe, Surrey, XeNoNiQUe UK
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU GPL v3.0
* which accompanies this distribution, and is available at:
* http://www.gnu.org/licenses/gpl-3.0.txt
*
* Developers:
* Peter Pilgrim -- design, development and implementation
* -- Blog: http://www.xenonique.co.uk/blog/
* -- Twitter: @peter_pilgrim
*
* Contributors:
*
*******************************************************************************/
package uk.co.xenonique.digitalone.servlet
import java.util.Date
import javax.servlet.annotation.WebServlet
import javax.servlet.http.{HttpServlet, HttpServletResponse, HttpServletRequest}
/**
* The type SimpleServlet
*
* @author Peter Pilgrim
*/
@WebServlet(Array("/simple"))
class SimpleServlet extends HttpServlet {
override def doGet(req: HttpServletRequest,
resp: HttpServletResponse): Unit = {
resp.setContentType("application/json");
val writer = resp.getWriter();
writer.println(
s"""
|{
| "name": "Two Tribes",
| "class": "${this.getClass().getName}",
| "date": "${new Date()}"
|}
""".stripMargin )
}
}
| peterpilgrim/digital-scala-javaone-2014 | src/main/scala/uk/co/xenonique/digitalone/servlet/SimpleServlet.scala | Scala | gpl-3.0 | 1,476 |
package org.elasticmq
sealed abstract class VisibilityTimeout
case class MillisVisibilityTimeout(millis: Long) extends VisibilityTimeout {
val seconds = millis / 1000
}
object MillisVisibilityTimeout {
def fromSeconds(seconds: Long) = MillisVisibilityTimeout(seconds * 1000)
}
object DefaultVisibilityTimeout extends VisibilityTimeout
| adamw/elasticmq | core/src/main/scala/org/elasticmq/MillisVisibilityTimeout.scala | Scala | apache-2.0 | 343 |
/*
* # Trove
*
* This file is part of Trove - A FREE desktop budgeting application that
* helps you track your finances, FREES you from complex budgeting, and
* enables you to build your TROVE of savings!
*
* Copyright © 2016-2019 Eric John Fredericks.
*
* Trove is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Trove is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Trove. If not, see <http://www.gnu.org/licenses/>.
*/
package trove.core
import trove.services.AccountsService
trait Project {
def name: String
def accountsService: AccountsService
}
| emanchgo/budgetfree | src/main/scala/trove/core/Project.scala | Scala | gpl-3.0 | 1,046 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.dispatch
import java.util.concurrent._
import akka.event.Logging.{ Debug, Error, LogEventException }
import akka.actor._
import akka.dispatch.sysmsg._
import akka.event.{ BusLogging, EventStream }
import com.typesafe.config.{ ConfigFactory, Config }
import akka.util.{ Unsafe, Index }
import scala.annotation.tailrec
import scala.concurrent.forkjoin.{ ForkJoinTask, ForkJoinPool }
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext
import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal
import scala.util.Try
import java.{ util ⇒ ju }
final case class Envelope private (val message: Any, val sender: ActorRef)
object Envelope {
def apply(message: Any, sender: ActorRef, system: ActorSystem): Envelope = {
if (message == null) throw new InvalidMessageException("Message is null")
new Envelope(message, if (sender ne Actor.noSender) sender else system.deadLetters)
}
}
final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Batchable {
final override def isBatchable: Boolean = runnable match {
case b: Batchable ⇒ b.isBatchable
case _: scala.concurrent.OnCompleteRunnable ⇒ true
case _ ⇒ false
}
def run(): Unit =
try runnable.run() catch {
case NonFatal(e) ⇒ eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage))
} finally cleanup()
}
/**
* INTERNAL API
*/
private[akka] trait LoadMetrics { self: Executor ⇒
def atFullThrottle(): Boolean
}
/**
* INTERNAL API
*/
private[akka] object MessageDispatcher {
val UNSCHEDULED = 0 //WARNING DO NOT CHANGE THE VALUE OF THIS: It relies on the faster init of 0 in AbstractMessageDispatcher
val SCHEDULED = 1
val RESCHEDULED = 2
// dispatcher debugging helper using println (see below)
// since this is a compile-time constant, scalac will elide code behind if (MessageDispatcher.debug) (RK checked with 2.9.1)
final val debug = false // Deliberately without type ascription to make it a compile-time constant
lazy val actors = new Index[MessageDispatcher, ActorRef](16, _ compareTo _)
def printActors(): Unit =
if (debug) {
for {
d ← actors.keys
a ← { println(d + " inhabitants: " + d.inhabitants); actors.valueIterator(d) }
} {
val status = if (a.isTerminated) " (terminated)" else " (alive)"
val messages = a match {
case r: ActorRefWithCell ⇒ " " + r.underlying.numberOfMessages + " messages"
case _ ⇒ " " + a.getClass
}
val parent = a match {
case i: InternalActorRef ⇒ ", parent: " + i.getParent
case _ ⇒ ""
}
println(" -> " + a + status + messages + parent)
}
}
}
abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator) extends AbstractMessageDispatcher with BatchingExecutor with ExecutionContextExecutor {
import MessageDispatcher._
import AbstractMessageDispatcher.{ inhabitantsOffset, shutdownScheduleOffset }
import configurator.prerequisites
val mailboxes = prerequisites.mailboxes
val eventStream = prerequisites.eventStream
@volatile private[this] var _inhabitantsDoNotCallMeDirectly: Long = _ // DO NOT TOUCH!
@volatile private[this] var _shutdownScheduleDoNotCallMeDirectly: Int = _ // DO NOT TOUCH!
@tailrec private final def addInhabitants(add: Long): Long = {
val c = inhabitants
val r = c + add
if (r < 0) {
// We haven't succeeded in decreasing the inhabitants yet but the simple fact that we're trying to
// go below zero means that there is an imbalance and we might as well throw the exception
val e = new IllegalStateException("ACTOR SYSTEM CORRUPTED!!! A dispatcher can't have less than 0 inhabitants!")
reportFailure(e)
throw e
}
if (Unsafe.instance.compareAndSwapLong(this, inhabitantsOffset, c, r)) r else addInhabitants(add)
}
final def inhabitants: Long = Unsafe.instance.getLongVolatile(this, inhabitantsOffset)
private final def shutdownSchedule: Int = Unsafe.instance.getIntVolatile(this, shutdownScheduleOffset)
private final def updateShutdownSchedule(expect: Int, update: Int): Boolean = Unsafe.instance.compareAndSwapInt(this, shutdownScheduleOffset, expect, update)
/**
* Creates and returns a mailbox for the given actor.
*/
protected[akka] def createMailbox(actor: Cell, mailboxType: MailboxType): Mailbox
/**
* Identifier of this dispatcher, corresponds to the full key
* of the dispatcher configuration.
*/
def id: String
/**
* Attaches the specified actor instance to this dispatcher, which includes
* scheduling it to run for the first time (Create() is expected to have
* been enqueued by the ActorCell upon mailbox creation).
*/
final def attach(actor: ActorCell): Unit = {
register(actor)
registerForExecution(actor.mailbox, false, true)
}
/**
* Detaches the specified actor instance from this dispatcher
*/
final def detach(actor: ActorCell): Unit = try unregister(actor) finally ifSensibleToDoSoThenScheduleShutdown()
final override protected def unbatchedExecute(r: Runnable): Unit = {
val invocation = TaskInvocation(eventStream, r, taskCleanup)
addInhabitants(+1)
try {
executeTask(invocation)
} catch {
case t: Throwable ⇒
addInhabitants(-1)
throw t
}
}
override def reportFailure(t: Throwable): Unit = t match {
case e: LogEventException ⇒ eventStream.publish(e.event)
case _ ⇒ eventStream.publish(Error(t, getClass.getName, getClass, t.getMessage))
}
@tailrec
private final def ifSensibleToDoSoThenScheduleShutdown(): Unit = {
if (inhabitants <= 0) shutdownSchedule match {
case UNSCHEDULED ⇒
if (updateShutdownSchedule(UNSCHEDULED, SCHEDULED)) scheduleShutdownAction()
else ifSensibleToDoSoThenScheduleShutdown()
case SCHEDULED ⇒
if (updateShutdownSchedule(SCHEDULED, RESCHEDULED)) ()
else ifSensibleToDoSoThenScheduleShutdown()
case RESCHEDULED ⇒
}
}
private def scheduleShutdownAction(): Unit = {
// IllegalStateException is thrown if scheduler has been shutdown
try prerequisites.scheduler.scheduleOnce(shutdownTimeout, shutdownAction)(new ExecutionContext {
override def execute(runnable: Runnable): Unit = runnable.run()
override def reportFailure(t: Throwable): Unit = MessageDispatcher.this.reportFailure(t)
}) catch {
case _: IllegalStateException ⇒ shutdown()
}
}
private final val taskCleanup: () ⇒ Unit = () ⇒ if (addInhabitants(-1) == 0) ifSensibleToDoSoThenScheduleShutdown()
/**
* If you override it, you must call it. But only ever once. See "attach" for only invocation.
*
* INTERNAL API
*/
protected[akka] def register(actor: ActorCell) {
if (debug) actors.put(this, actor.self)
addInhabitants(+1)
}
/**
* If you override it, you must call it. But only ever once. See "detach" for the only invocation
*
* INTERNAL API
*/
protected[akka] def unregister(actor: ActorCell) {
if (debug) actors.remove(this, actor.self)
addInhabitants(-1)
val mailBox = actor.swapMailbox(mailboxes.deadLetterMailbox)
mailBox.becomeClosed()
mailBox.cleanUp()
}
private val shutdownAction = new Runnable {
@tailrec
final def run() {
shutdownSchedule match {
case SCHEDULED ⇒
try {
if (inhabitants == 0) shutdown() //Warning, racy
} finally {
while (!updateShutdownSchedule(shutdownSchedule, UNSCHEDULED)) {}
}
case RESCHEDULED ⇒
if (updateShutdownSchedule(RESCHEDULED, SCHEDULED)) scheduleShutdownAction()
else run()
case UNSCHEDULED ⇒
}
}
}
/**
* When the dispatcher no longer has any actors registered, how long will it wait until it shuts itself down,
* defaulting to your akka configs "akka.actor.default-dispatcher.shutdown-timeout" or default specified in
* reference.conf
*
* INTERNAL API
*/
protected[akka] def shutdownTimeout: FiniteDuration
/**
* After the call to this method, the dispatcher mustn't begin any new message processing for the specified reference
*/
protected[akka] def suspend(actor: ActorCell): Unit = {
val mbox = actor.mailbox
if ((mbox.actor eq actor) && (mbox.dispatcher eq this))
mbox.suspend()
}
/*
* After the call to this method, the dispatcher must begin any new message processing for the specified reference
*/
protected[akka] def resume(actor: ActorCell): Unit = {
val mbox = actor.mailbox
if ((mbox.actor eq actor) && (mbox.dispatcher eq this) && mbox.resume())
registerForExecution(mbox, false, false)
}
/**
* Will be called when the dispatcher is to queue an invocation for execution
*
* INTERNAL API
*/
protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage)
/**
* Will be called when the dispatcher is to queue an invocation for execution
*
* INTERNAL API
*/
protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope)
/**
* Suggest to register the provided mailbox for execution
*
* INTERNAL API
*/
protected[akka] def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean
// TODO check whether this should not actually be a property of the mailbox
/**
* INTERNAL API
*/
protected[akka] def throughput: Int
/**
* INTERNAL API
*/
protected[akka] def throughputDeadlineTime: Duration
/**
* INTERNAL API
*/
@inline protected[akka] final val isThroughputDeadlineTimeDefined = throughputDeadlineTime.toMillis > 0
/**
* INTERNAL API
*/
protected[akka] def executeTask(invocation: TaskInvocation)
/**
* Called one time every time an actor is detached from this dispatcher and this dispatcher has no actors left attached
* Must be idempotent
*
* INTERNAL API
*/
protected[akka] def shutdown(): Unit
}
/**
* An ExecutorServiceConfigurator is a class that given some prerequisites and a configuration can create instances of ExecutorService
*/
abstract class ExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceFactoryProvider
/**
* Base class to be used for hooking in new dispatchers into Dispatchers.
*/
abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites: DispatcherPrerequisites) {
val config: Config = new CachingConfig(_config)
/**
* Returns an instance of MessageDispatcher given the configuration.
* Depending on the needs the implementation may return a new instance for
* each invocation or return the same instance every time.
*/
def dispatcher(): MessageDispatcher
def configureExecutor(): ExecutorServiceConfigurator = {
def configurator(executor: String): ExecutorServiceConfigurator = executor match {
case null | "" | "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites)
case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites)
case fqcn ⇒
val args = List(
classOf[Config] -> config,
classOf[DispatcherPrerequisites] -> prerequisites)
prerequisites.dynamicAccess.createInstanceFor[ExecutorServiceConfigurator](fqcn, args).recover({
case exception ⇒ throw new IllegalArgumentException(
("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s],
make sure it has an accessible constructor with a [%s,%s] signature""")
.format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception)
}).get
}
config.getString("executor") match {
case "default-executor" ⇒ new DefaultExecutorServiceConfigurator(config.getConfig("default-executor"), prerequisites, configurator(config.getString("default-executor.fallback")))
case other ⇒ configurator(other)
}
}
}
class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) {
val threadPoolConfig: ThreadPoolConfig = createThreadPoolConfigBuilder(config, prerequisites).config
protected def createThreadPoolConfigBuilder(config: Config, prerequisites: DispatcherPrerequisites): ThreadPoolConfigBuilder = {
import akka.util.Helpers.ConfigOps
ThreadPoolConfigBuilder(ThreadPoolConfig())
.setKeepAliveTime(config.getMillisDuration("keep-alive-time"))
.setAllowCoreThreadTimeout(config getBoolean "allow-core-timeout")
.setCorePoolSizeFromFactor(config getInt "core-pool-size-min", config getDouble "core-pool-size-factor", config getInt "core-pool-size-max")
.setMaxPoolSizeFromFactor(config getInt "max-pool-size-min", config getDouble "max-pool-size-factor", config getInt "max-pool-size-max")
.configure(
Some(config getInt "task-queue-size") flatMap {
case size if size > 0 ⇒
Some(config getString "task-queue-type") map {
case "array" ⇒ ThreadPoolConfig.arrayBlockingQueue(size, false) //TODO config fairness?
case "" | "linked" ⇒ ThreadPoolConfig.linkedBlockingQueue(size)
case x ⇒ throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!" format x)
} map { qf ⇒ (q: ThreadPoolConfigBuilder) ⇒ q.setQueueFactory(qf) }
case _ ⇒ None
})
}
def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory =
threadPoolConfig.createExecutorServiceFactory(id, threadFactory)
}
object ForkJoinExecutorConfigurator {
/**
* INTERNAL AKKA USAGE ONLY
*/
final class AkkaForkJoinPool(parallelism: Int,
threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
unhandledExceptionHandler: Thread.UncaughtExceptionHandler)
extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, true) with LoadMetrics {
override def execute(r: Runnable): Unit =
if (r eq null) throw new NullPointerException else super.execute(new AkkaForkJoinTask(r))
def atFullThrottle(): Boolean = this.getActiveThreadCount() >= this.getParallelism()
}
/**
* INTERNAL AKKA USAGE ONLY
*/
@SerialVersionUID(1L)
final class AkkaForkJoinTask(runnable: Runnable) extends ForkJoinTask[Unit] {
override def getRawResult(): Unit = ()
override def setRawResult(unit: Unit): Unit = ()
final override def exec(): Boolean = try { runnable.run(); true } catch {
case anything: Throwable ⇒
val t = Thread.currentThread
t.getUncaughtExceptionHandler match {
case null ⇒
case some ⇒ some.uncaughtException(t, anything)
}
throw anything
}
}
}
class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) {
import ForkJoinExecutorConfigurator._
def validate(t: ThreadFactory): ForkJoinPool.ForkJoinWorkerThreadFactory = t match {
case correct: ForkJoinPool.ForkJoinWorkerThreadFactory ⇒ correct
case x ⇒ throw new IllegalStateException("The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!")
}
class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
val parallelism: Int) extends ExecutorServiceFactory {
def createExecutorService: ExecutorService = new AkkaForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing)
}
final def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = {
val tf = threadFactory match {
case m: MonitorableThreadFactory ⇒
// add the dispatcher id to the thread names
m.withName(m.name + "-" + id)
case other ⇒ other
}
new ForkJoinExecutorServiceFactory(
validate(tf),
ThreadPoolConfig.scaledPoolSize(
config.getInt("parallelism-min"),
config.getDouble("parallelism-factor"),
config.getInt("parallelism-max")))
}
}
class DefaultExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites, fallback: ExecutorServiceConfigurator) extends ExecutorServiceConfigurator(config, prerequisites) {
val provider: ExecutorServiceFactoryProvider =
prerequisites.defaultExecutionContext match {
case Some(ec) ⇒
prerequisites.eventStream.publish(Debug("DefaultExecutorServiceConfigurator", this.getClass, s"Using passed in ExecutionContext as default executor for this ActorSystem. If you want to use a different executor, please specify one in akka.actor.default-dispatcher.default-executor."))
new AbstractExecutorService with ExecutorServiceFactory with ExecutorServiceFactoryProvider {
def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = this
def createExecutorService: ExecutorService = this
def shutdown(): Unit = ()
def isTerminated: Boolean = false
def awaitTermination(timeout: Long, unit: TimeUnit): Boolean = false
def shutdownNow(): ju.List[Runnable] = ju.Collections.emptyList()
def execute(command: Runnable): Unit = ec.execute(command)
def isShutdown: Boolean = false
}
case None ⇒ fallback
}
def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory =
provider.createExecutorServiceFactory(id, threadFactory)
}
| Fincore/org.spark-project.akka | actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala | Scala | mit | 18,294 |
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Paul Phillips
*/
package scala
package reflect
package internal
import scala.collection.mutable
import util.HashSet
import scala.annotation.tailrec
/** An abstraction for considering symbol pairs.
* One of the greatest sources of compiler bugs is that symbols can
* trivially lose their prefixes and turn into some completely different
* type with the smallest of errors. It is the exception not the rule
* that type comparisons are done correctly.
*
* This offers a small step toward coherence with two abstractions
* which come up over and over again:
*
* RelativeTo: operations relative to a prefix
* SymbolPair: two symbols being related somehow, plus the class
* in which the relation is being performed
*
* This is only a start, but it is a start.
*/
abstract class SymbolPairs {
val global: SymbolTable
import global._
/** Are types tp1 and tp2 equivalent seen from the perspective
* of `baseClass`? For instance List[Int] and Seq[Int] are =:=
* when viewed from IterableClass.
*/
def sameInBaseClass(baseClass: Symbol)(tp1: Type, tp2: Type) =
(tp1 baseType baseClass) =:= (tp2 baseType baseClass)
final case class SymbolPair(base: Symbol, low: Symbol, high: Symbol) {
private[this] val self = base.thisType
def pos = if (low.owner == base) low.pos else if (high.owner == base) high.pos else base.pos
def rootType: Type = self
def lowType: Type = self memberType low
def lowErased: Type = erasure.specialErasure(base)(low.tpe)
def lowClassBound: Type = classBoundAsSeen(low.tpe.typeSymbol)
def highType: Type = self memberType high
def highInfo: Type = self memberInfo high
def highErased: Type = erasure.specialErasure(base)(high.tpe)
def highClassBound: Type = classBoundAsSeen(high.tpe.typeSymbol)
def isErroneous = low.tpe.isErroneous || high.tpe.isErroneous
def sameKind = sameLength(low.typeParams, high.typeParams)
private def classBoundAsSeen(tsym: Symbol) =
tsym.classBound.asSeenFrom(rootType, tsym.owner)
private def memberDefString(sym: Symbol, where: Boolean) = {
val def_s = (
if (sym.isConstructor) s"$sym: ${self memberType sym}"
else sym defStringSeenAs (self memberType sym)
)
def_s + whereString(sym)
}
/** A string like ' at line 55' if the symbol is defined in the class
* under consideration, or ' in trait Foo' if defined elsewhere.
*/
private def whereString(sym: Symbol) =
if (sym.owner == base) " at line " + sym.pos.line else sym.locationString
def lowString = memberDefString(low, where = true)
def highString = memberDefString(high, where = true)
override def toString = sm"""
|Cursor(in $base) {
| high $highString
| erased $highErased
| infos ${high.infosString}
| low $lowString
| erased $lowErased
| infos ${low.infosString}
|}""".trim
}
/** The cursor class
* @param base the base class containing the participating symbols
*/
abstract class Cursor(val base: Symbol) {
cursor =>
final val self = base.thisType // The type relative to which symbols are seen.
private val decls = newScope // all the symbols which can take part in a pair.
private val size = bases.length
/** A symbol for which exclude returns true will not appear as
* either end of a pair.
*/
protected def exclude(sym: Symbol): Boolean
/** Does `sym1` match `sym2` such that (sym1, sym2) should be
* considered as a (lo, high) pair? Types always match. Term symbols
* match if their member types relative to `self` match.
*/
protected def matches(lo: Symbol, high: Symbol): Boolean
/** The parents and base classes of `base`. Can be refined in subclasses.
*/
protected def parents: List[Type] = base.info.parents
protected def bases: List[Symbol] = base.info.baseClasses
/** An implementation of BitSets as arrays (maybe consider collection.BitSet
* for that?) The main purpose of this is to implement
* intersectionContainsElement efficiently.
*/
private type BitSet = Array[Int]
/** A mapping from all base class indices to a bitset
* which indicates whether parents are subclasses.
*
* i \in subParents(j) iff
* exists p \in parents, b \in baseClasses:
* i = index(p)
* j = index(b)
* p isSubClass b
* p.baseType(b) == self.baseType(b)
*/
private val subParents = new Array[BitSet](size)
/** A map from baseclasses of <base> to ints, with smaller ints meaning lower in
* linearization order. Symbols that are not baseclasses map to -1.
*/
private val index = new mutable.HashMap[Symbol, Int] { override def default(key: Symbol) = -1 }
/** The scope entries that have already been visited as highSymbol
* (but may have been excluded via hasCommonParentAsSubclass.)
* These will not appear as lowSymbol.
*/
private val visited = HashSet[ScopeEntry]("visited", 64)
/** Initialization has to run now so decls is populated before
* the declaration of curEntry.
*/
init()
// The current low and high symbols; the high may be null.
private[this] var lowSymbol: Symbol = _
private[this] var highSymbol: Symbol = _
// The current entry candidates for low and high symbol.
private[this] var curEntry = decls.elems
private[this] var nextEntry = curEntry
// These fields are initially populated with a call to next().
next()
// populate the above data structures
private def init() {
// Fill `decls` with lower symbols shadowing higher ones
def fillDecls(bcs: List[Symbol], deferred: Boolean) {
if (!bcs.isEmpty) {
fillDecls(bcs.tail, deferred)
var e = bcs.head.info.decls.elems
while (e ne null) {
if (e.sym.initialize.isDeferred == deferred && !exclude(e.sym))
decls enter e.sym
e = e.next
}
}
}
var i = 0
for (bc <- bases) {
index(bc) = i
subParents(i) = new BitSet(size)
i += 1
}
for (p <- parents) {
val pIndex = index(p.typeSymbol)
if (pIndex >= 0)
for (bc <- p.baseClasses ; if sameInBaseClass(bc)(p, self)) {
val bcIndex = index(bc)
if (bcIndex >= 0)
include(subParents(bcIndex), pIndex)
}
}
// first, deferred (this will need to change if we change lookup rules!)
fillDecls(bases, deferred = true)
// then, concrete.
fillDecls(bases, deferred = false)
}
private def include(bs: BitSet, n: Int) {
val nshifted = n >> 5
val nmask = 1 << (n & 31)
bs(nshifted) |= nmask
}
/** Implements `bs1 * bs2 * {0..n} != 0`.
* Used in hasCommonParentAsSubclass */
private def intersectionContainsElementLeq(bs1: BitSet, bs2: BitSet, n: Int): Boolean = {
val nshifted = n >> 5
val nmask = 1 << (n & 31)
var i = 0
while (i < nshifted) {
if ((bs1(i) & bs2(i)) != 0) return true
i += 1
}
(bs1(nshifted) & bs2(nshifted) & (nmask | nmask - 1)) != 0
}
/** Do `sym1` and `sym2` have a common subclass in `parents`?
* In that case we do not follow their pairs.
*/
private def hasCommonParentAsSubclass(sym1: Symbol, sym2: Symbol) = {
val index1 = index(sym1.owner)
(index1 >= 0) && {
val index2 = index(sym2.owner)
(index2 >= 0) && {
intersectionContainsElementLeq(
subParents(index1), subParents(index2), index1 min index2)
}
}
}
@tailrec private def advanceNextEntry() {
if (nextEntry ne null) {
nextEntry = decls lookupNextEntry nextEntry
if (nextEntry ne null) {
val high = nextEntry.sym
val isMatch = matches(lowSymbol, high) && { visited addEntry nextEntry ; true } // side-effect visited on all matches
// skip nextEntry if a class in `parents` is a subclass of the
// owners of both low and high.
if (isMatch && !hasCommonParentAsSubclass(lowSymbol, high))
highSymbol = high
else
advanceNextEntry()
}
}
}
@tailrec private def advanceCurEntry() {
if (curEntry ne null) {
curEntry = curEntry.next
if (curEntry ne null) {
if (visited(curEntry) || exclude(curEntry.sym))
advanceCurEntry()
else
nextEntry = curEntry
}
}
}
/** The `low` and `high` symbol. In the context of overriding pairs,
* low == overriding and high == overridden.
*/
def low = lowSymbol
def high = highSymbol
def hasNext = curEntry ne null
def currentPair = new SymbolPair(base, low, high)
def iterator = new Iterator[SymbolPair] {
def hasNext = cursor.hasNext
def next() = try cursor.currentPair finally cursor.next()
}
// Note that next is called once during object initialization to
// populate the fields tracking the current symbol pair.
def next() {
if (curEntry ne null) {
lowSymbol = curEntry.sym
advanceNextEntry() // sets highSymbol
if (nextEntry eq null) {
advanceCurEntry()
next()
}
}
}
}
}
| felixmulder/scala | src/reflect/scala/reflect/internal/SymbolPairs.scala | Scala | bsd-3-clause | 9,613 |
package akka.contrib.persistence.mongodb
import akka.actor.ActorSystem
import akka.persistence.{SelectedSnapshot, SnapshotMetadata}
import akka.serialization.SerializationExtension
import akka.testkit.TestKit
import com.mongodb.casbah.Imports._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CasbahPersistenceSnapshotterSpec extends TestKit(ActorSystem("unit-test")) with CasbahPersistenceSpec {
import akka.contrib.persistence.mongodb.CasbahPersistenceSnapshotter._
import akka.contrib.persistence.mongodb.SnapshottingFieldNames._
implicit val serialization = SerializationExtension(system)
trait Fixture {
val underTest = new CasbahPersistenceSnapshotter(driver)
val records = List(10, 20, 30).map { sq =>
SelectedSnapshot(SnapshotMetadata("unit-test", sq, 10 * sq), "snapshot-data")
} :+ SelectedSnapshot(SnapshotMetadata("unit-test", 30, 10000), "snapshot-data")
}
"A mongo snapshot implementation" should "serialize and deserialize snapshots" in new Fixture {
val snapshot = records.head
val serialized = serializeSnapshot(snapshot)
serialized(PROCESSOR_ID) should be("unit-test")
serialized(SEQUENCE_NUMBER) should be(10)
serialized(TIMESTAMP) should be(100)
val deserialized = deserializeSnapshot(serialized)
deserialized.metadata.persistenceId should be("unit-test")
deserialized.metadata.sequenceNr should be(10)
deserialized.metadata.timestamp should be(100)
deserialized.snapshot should be("snapshot-data")
}
it should "create an appropriate index" in new Fixture {
withSnapshot { snapshot =>
driver.snaps
val idx = snapshot.getIndexInfo.filter(obj => obj("name").equals(driver.snapsIndexName)).head
idx("unique") should ===(true)
idx("key") should be(MongoDBObject(PROCESSOR_ID -> 1, SEQUENCE_NUMBER -> -1, TIMESTAMP -> -1))
}
}
it should "find nothing by sequence where time is earlier than first snapshot" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
underTest.findYoungestSnapshotByMaxSequence("unit-test", 10, 10).value.get.get shouldBe None
}
}
it should "find a prior sequence where time is earlier than first snapshot for the max sequence" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
underTest.findYoungestSnapshotByMaxSequence("unit-test", 30, 250).value.get.get shouldBe
Some(SelectedSnapshot(SnapshotMetadata("unit-test", 20, 200), "snapshot-data"))
}
}
it should "find the first snapshot by sequence where time is between the first and second snapshot" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
underTest.findYoungestSnapshotByMaxSequence("unit-test", 30, 350).value.get.get shouldBe
Some(SelectedSnapshot(SnapshotMetadata("unit-test", 30, 300), "snapshot-data"))
}
}
it should "find the last snapshot by sequence where time is after the second snapshot" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
underTest.findYoungestSnapshotByMaxSequence("unit-test", 30, 25000).value.get.get shouldBe
Some(SelectedSnapshot(SnapshotMetadata("unit-test", 30, 10000), "snapshot-data"))
}
}
it should "save a snapshot" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
underTest.saveSnapshot(SelectedSnapshot(SnapshotMetadata("unit-test", 4, 1000), "snapshot-payload"))
val saved = snapshot.findOne(MongoDBObject(SEQUENCE_NUMBER -> 4)).get
saved(PROCESSOR_ID) should be("unit-test")
saved(SEQUENCE_NUMBER) should be(4)
saved(TIMESTAMP) should be(1000)
}
}
it should "not delete non-existent snapshots" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
snapshot.size should be(4)
underTest.deleteSnapshot("unit-test", 3, 0)
snapshot.size should be(4)
}
}
it should "only delete the specified snapshot" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
snapshot.size should be(4)
underTest.deleteSnapshot("unit-test", 30, 300)
snapshot.size should be(3)
val result = snapshot.findOne($and(SEQUENCE_NUMBER $eq 30, TIMESTAMP $eq 10000))
result should be('defined)
}
}
it should "delete nothing if nothing matches the criteria" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
snapshot.size should be(4)
underTest.deleteMatchingSnapshots("unit-test", 10, 50)
snapshot.size should be(4)
}
}
it should "delete only what matches the criteria" in new Fixture {
withSnapshot { snapshot =>
snapshot.insert(records: _*)
snapshot.size should be(4)
underTest.deleteMatchingSnapshots("unit-test", 30, 350)
snapshot.size should be(1)
snapshot.findOne($and(PROCESSOR_ID $eq "unit-test", SEQUENCE_NUMBER $eq 30, TIMESTAMP $eq 10000)) shouldBe defined
}
}
it should "read legacy snapshot formats" in new Fixture {
withSnapshot { snapshot =>
val legacies = records.map(CasbahPersistenceSnapshotter.legacySerializeSnapshot)
snapshot.insert(legacies: _*)
snapshot.size should be(4)
snapshot.foreach { dbo =>
deserializeSnapshot(dbo).metadata.persistenceId should be ("unit-test")
}
}
}
it should "read mixed snapshot formats" in new Fixture {
withSnapshot { snapshot =>
val legacies = records.take(2).map(CasbahPersistenceSnapshotter.legacySerializeSnapshot)
val newVersions = records.drop(2).map(CasbahPersistenceSnapshotter.serializeSnapshot)
snapshot.insert(legacies ++ newVersions : _*)
snapshot.size should be(4)
snapshot.foreach { dbo =>
deserializeSnapshot(dbo).metadata.persistenceId should be ("unit-test")
}
}
}
} | twillouer/akka-persistence-mongo | casbah/src/test/scala/akka/contrib/persistence/mongodb/CasbahPersistenceSnapshotterSpec.scala | Scala | apache-2.0 | 5,964 |
package io.buoyant.namerd
package iface
import com.twitter.finagle.netty4.ssl.server.Netty4ServerEngineFactory
import com.twitter.finagle.ssl.server.SslServerEngineFactory
import io.buoyant.config.Parser
import org.scalatest.FunSuite
class MeshInterpreterInitializerTest extends FunSuite {
test("address") {
val yaml = """
|kind: io.l5d.mesh
|ip: 1.2.3.4
|port: 1234
""".stripMargin
val config = Parser
.objectMapper(
yaml,
Iterable(Seq(new MeshIfaceInitializer))
).readValue[MeshIfaceConfig](yaml)
assert(config.addr.getHostString == "1.2.3.4")
assert(config.addr.getPort == 1234)
}
test("tls") {
val yaml = """
|kind: io.l5d.mesh
|tls:
| certPath: cert.pem
| keyPath: key.pem
| caCertPath: cacert.pem
| ciphers:
| - "foo"
| - "bar"
| requireClientAuth: true
""".stripMargin
val config = Parser
.objectMapper(
yaml,
Iterable(Seq(new MeshIfaceInitializer))
).readValue[MeshIfaceConfig](yaml)
val tls = config.tls.get
assert(tls.certPath == "cert.pem")
assert(tls.keyPath == "key.pem")
assert(tls.caCertPath == Some("cacert.pem"))
assert(tls.ciphers == Some(List("foo", "bar")))
assert(tls.requireClientAuth == Some(true))
assert(config.tlsParams[SslServerEngineFactory.Param].factory.isInstanceOf[Netty4ServerEngineFactory])
}
}
| denverwilliams/linkerd | namerd/iface/mesh/src/test/scala/io/buoyant/namerd/iface/MeshIfaceInitializerTest.scala | Scala | apache-2.0 | 1,441 |
/*
* Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.eventuate
import java.io.File
import akka.actor._
import com.rbmhtechnology.eventuate.log._
import com.rbmhtechnology.eventuate.log.leveldb._
import com.rbmhtechnology.eventuate.utilities.RestarterActor
import com.typesafe.config.ConfigFactory
trait LocationCleanupLeveldb extends LocationCleanup {
override def storageLocations: List[File] =
List("eventuate.log.leveldb.dir", "eventuate.snapshot.filesystem.dir").map(s => new File(config.getString(s)))
}
object SingleLocationSpecLeveldb {
object TestEventLog {
def props(logId: String, batching: Boolean): Props = {
val logProps = Props(new TestEventLog(logId)).withDispatcher("eventuate.log.dispatchers.write-dispatcher")
if (batching) Props(new BatchingLayer(logProps)) else logProps
}
}
class TestEventLog(id: String) extends LeveldbEventLog(id, "log-test") with SingleLocationSpec.TestEventLog[LeveldbEventLogState] {
override def unhandled(message: Any): Unit = message match {
case "boom" => throw IntegrationTestException
case "dir" => sender() ! logDir
case _ => super.unhandled(message)
}
}
}
trait SingleLocationSpecLeveldb extends SingleLocationSpec with LocationCleanupLeveldb {
import SingleLocationSpecLeveldb._
private var _log: ActorRef = _
override def beforeEach(): Unit = {
super.beforeEach()
_log = system.actorOf(logProps(logId))
}
def log: ActorRef =
_log
def logProps(logId: String): Props =
RestarterActor.props(TestEventLog.props(logId, batching))
}
trait MultiLocationSpecLeveldb extends MultiLocationSpec with LocationCleanupLeveldb {
override val logFactory: String => Props = id => LeveldbEventLog.props(id)
override val providerConfig = ConfigFactory.parseString(
s"""
|eventuate.log.leveldb.dir = target/test-log
|eventuate.log.leveldb.index-update-limit = 3
|eventuate.log.leveldb.deletion-retry-delay = 1 ms
""".stripMargin)
}
| ianclegg/eventuate | eventuate-log-leveldb/src/it/scala/com/rbmhtechnology/eventuate/LocationSpecLeveldb.scala | Scala | apache-2.0 | 2,655 |
/*
biojava-adam BioJava and ADAM integration.
Copyright (c) 2017-2022 held jointly by the individual authors.
This library is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation; either version 3 of the License, or (at
your option) any later version.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; with out even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library; if not, write to the Free Software Foundation,
Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
> http://www.fsf.org/licensing/licenses/lgpl.html
> http://www.opensource.org/licenses/lgpl-license.php
*/
import org.slf4j.LoggerFactory
val logger = LoggerFactory.getLogger("loadFastaProtein")
import org.apache.log4j.{ Level, Logger }
Logger.getLogger("loadFastaProtein").setLevel(Level.INFO)
Logger.getLogger("org.biojava").setLevel(Level.INFO)
import org.biojava.nbio.adam.BiojavaAdamContext
val bac = BiojavaAdamContext(sc)
val inputPath = Option(System.getenv("INPUT"))
val outputPath = Option(System.getenv("OUTPUT"))
if (inputPath.isEmpty || outputPath.isEmpty) {
logger.error("INPUT and OUTPUT environment variables are required")
System.exit(1)
}
val sequences = bac.loadBiojavaFastaProtein(inputPath.get)
logger.info("Saving protein sequences to output path %s ...".format(outputPath.get))
sequences.save(outputPath.get, asSingleFile = true, disableFastConcat = false)
logger.info("Done")
System.exit(0)
| heuermh/biojava-adam | scripts/loadFastaProtein.scala | Scala | lgpl-3.0 | 1,800 |
/* Generated File */
package models.table.customer
import com.kyleu.projectile.services.database.slick.SlickQueryService.imports._
import java.time.ZonedDateTime
import models.customer.RentalRow
import models.table.store.{InventoryRowTable, StaffRowTable}
import scala.language.higherKinds
object RentalRowTable {
val query = TableQuery[RentalRowTable]
def getByPrimaryKey(rentalId: Long) = query.filter(_.rentalId === rentalId).result.headOption
def getByPrimaryKeySeq(rentalIdSeq: Seq[Long]) = query.filter(_.rentalId.inSet(rentalIdSeq)).result
def getByStaffId(staffId: Int) = query.filter(_.staffId === staffId).result
def getByStaffIdSeq(staffIdSeq: Seq[Int]) = query.filter(_.staffId.inSet(staffIdSeq)).result
def getByInventoryId(inventoryId: Long) = query.filter(_.inventoryId === inventoryId).result
def getByInventoryIdSeq(inventoryIdSeq: Seq[Long]) = query.filter(_.inventoryId.inSet(inventoryIdSeq)).result
def getByCustomerId(customerId: Int) = query.filter(_.customerId === customerId).result
def getByCustomerIdSeq(customerIdSeq: Seq[Int]) = query.filter(_.customerId.inSet(customerIdSeq)).result
implicit class RentalRowTableExtensions[C[_]](q: Query[RentalRowTable, RentalRow, C]) {
def withStaffRow = q.join(StaffRowTable.query).on(_.staffId === _.staffId)
def withStaffRowOpt = q.joinLeft(StaffRowTable.query).on(_.staffId === _.staffId)
def withInventoryRow = q.join(InventoryRowTable.query).on(_.inventoryId === _.inventoryId)
def withInventoryRowOpt = q.joinLeft(InventoryRowTable.query).on(_.inventoryId === _.inventoryId)
def withCustomerRow = q.join(CustomerRowTable.query).on(_.customerId === _.customerId)
def withCustomerRowOpt = q.joinLeft(CustomerRowTable.query).on(_.customerId === _.customerId)
}
}
class RentalRowTable(tag: slick.lifted.Tag) extends Table[RentalRow](tag, "rental") {
val rentalId = column[Long]("rental_id", O.PrimaryKey, O.AutoInc)
val rentalDate = column[ZonedDateTime]("rental_date")
val inventoryId = column[Long]("inventory_id")
val customerId = column[Int]("customer_id")
val returnDate = column[Option[ZonedDateTime]]("return_date")
val staffId = column[Int]("staff_id")
val lastUpdate = column[ZonedDateTime]("last_update")
override val * = (rentalId, rentalDate, inventoryId, customerId, returnDate, staffId, lastUpdate) <> (
(RentalRow.apply _).tupled,
RentalRow.unapply
)
}
| KyleU/boilerplay | app/models/table/customer/RentalRowTable.scala | Scala | cc0-1.0 | 2,417 |
package com.chainstaysoftware.unitofmeasure
import EqualsHelper.DoubleWithEpsilonEquals
/**
* Holds a speed value like 100kph.
* example - 100kph would be SpeedQuantity(Quantity[LengthUnit](100,
* LengthUnit.Meter, EngineeringScale.KILO), TimeUnit.Hour, EngineeringScale.NONE))
*/
case class SpeedQuantity(length: Quantity[LengthUnit], timeUnit: TimeUnit, timeScale: EngineeringScale) {
/**
* Compares this {@link SpeedQuantity} with the specified {@link SpeedQuantity}.
* Two {@link SpeedQuantity} objects that are equal in when converted to a common
* {@link EngineeringScale} and {@link MeasurementUnit} are considered equal by this method.
* This method is provided in preference to individual methods for each of the
* six boolean comparison operators (<, ==, >, >=, !=, <=). The suggested idiom
* for performing these comparisons is:
* (x.compareTo(y) <op> 0), where <op> is one of the six comparison operators.
* @return -1, 0, or 1 as this Quantity is numerically less than, equal to,
* or greater than val.
*/
def compareTo(other: SpeedQuantity, epsilon: Double = 0.001): Int = {
val otherMatchedTime = other.convertTimeTo(timeUnit, timeScale)
val otherMatched = otherMatchedTime.convertUnitsTo(length.measurementUnit).convertScaleTo(length.scale)
val otherMatchedValue = otherMatched.length.value
implicit val precision = Precision(epsilon)
if (length.value ~= otherMatchedValue)
0
else if (length.value < otherMatchedValue)
-1
else
1
}
def convertTimeTo(desiredTimeUnit: TimeUnit, desiredTimeScale: EngineeringScale) = {
val divisor = Quantity[TimeUnit](1, timeUnit, timeScale).convertScaleTo(desiredTimeScale).convertUnitsTo(desiredTimeUnit)
SpeedQuantity(Quantity[LengthUnit](length.value / divisor.value,
length.measurementUnit, length.scale),
desiredTimeUnit, desiredTimeScale)
}
def convertScaleTo(desiredScale: EngineeringScale) =
SpeedQuantity(length.convertScaleTo(desiredScale),
timeUnit, timeScale)
def convertUnitsTo(desiredUnit: LengthUnit) =
SpeedQuantity(length.convertUnitsTo(desiredUnit), timeUnit, timeScale)
private def epsilonEquals(value: Double, y: Double, epsilon: Double) =
{
assert(epsilon >= 0.0, "epsilon must be greater than or equal to zero")
value == y || Math.abs(value - y) <= epsilon
}
}
| ricemery/unitofmeasure | src/main/scala/com/chainstaysoftware/unitofmeasure/SpeedQuantity.scala | Scala | apache-2.0 | 2,388 |
package test;
object Test {
class Editor {
private object extraListener {
def h : AnyRef = extraListener
}
def f = extraListener.h
}
def main(args : Array[String]) : Unit = (new Editor).f
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t1123.scala | Scala | apache-2.0 | 215 |
/*
* Copyright 2016-2020 Daniel Urban and contributors listed in AUTHORS
* Copyright 2020 Nokia
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.lib
import java.net.{ InetAddress, InetSocketAddress }
import scala.concurrent.Future
import scala.concurrent.Await
import scala.concurrent.duration._
import cats.effect.{ IO, ContextShift }
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl._
import akka.util.{ ByteString }
import scodec.bits.BitVector
import scodec.stream.{ StreamEncoder, StreamDecoder }
import fs2.interop.reactivestreams._
import dev.tauri.seals.scodec.StreamCodecs._
import dev.tauri.seals.scodec.StreamCodecs.{ pipe => decPipe }
import Protocol.v1.{ Request, Response, Seed, Random }
object Client {
val reqCodec: StreamEncoder[Request] = streamEncoderFromReified[Request]
val resCodec: StreamDecoder[Response] = streamDecoderFromReified[Response]
def main(args: Array[String]): Unit = {
implicit val sys: ActorSystem = ActorSystem("ClientSystem")
try {
val resp = Await.result(client(1234), 10.seconds)
println(resp)
} finally {
sys.terminate()
}
}
def client(port: Int)(implicit sys: ActorSystem, mat: Materializer): Future[Vector[Response]] = {
val addr = new InetSocketAddress(InetAddress.getLoopbackAddress, port)
Tcp().outgoingConnection(addr).joinMat(logic)(Keep.right).run()
}
def logic(implicit sys: ActorSystem): Flow[ByteString, ByteString, Future[Vector[Response]]] = {
implicit val cs: ContextShift[IO] = IO.contextShift(sys.dispatcher)
val requests = fs2.Stream(Seed(0xabcdL), Random(1, 100)).covary[IO]
val source = Source
.fromPublisher(reqCodec.encode[IO](requests).toUnicastPublisher)
.map(bv => ByteString.fromArrayUnsafe(bv.toByteArray))
// TODO: this would be much less ugly, if we had a decoder `Flow`
val buffer = fs2.concurrent.Queue.unbounded[IO, Option[BitVector]].unsafeRunSync()
val decode: Flow[ByteString, Response, NotUsed] = Flow.fromSinkAndSource(
Sink.onComplete { _ =>
buffer.enqueue1(None).unsafeRunSync()
}.contramap[ByteString] { x =>
buffer.enqueue1(Some(BitVector.view(x.toArray))).unsafeRunSync()
},
Source.fromPublisher(buffer
.dequeue
.unNoneTerminate
.through(decPipe[IO, Response])
.toUnicastPublisher
)
)
val sink: Sink[ByteString, Future[Vector[Response]]] = decode.toMat(
Sink.fold(Vector.empty[Response])(_ :+ _)
)(Keep.right)
Flow.fromSinkAndSourceMat(sink, source)(Keep.left)
}
}
| durban/seals | examples/lib/client/src/main/scala/com/example/lib/Client.scala | Scala | apache-2.0 | 3,185 |
package com.sksamuel.scapegoat.inspections.math
import com.sksamuel.scapegoat._
/**
* @author Stephen Samuel
*/
class BigDecimalDoubleConstructor
extends Inspection(
text = "Big decimal double constructor",
defaultLevel = Levels.Warning,
description = "Checks for use of BigDecimal(double) which can be unsafe.",
explanation =
"The results of this constructor can be somewhat unpredictable. E.g. writing new BigDecimal(0.1) in Java creates a BigDecimal which is actually equal to 0.1000000000000000055511151231257827021181583404541015625. This is because 0.1 cannot be represented exactly as a double."
) {
def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
override def postTyperTraverser =
new context.Traverser {
import context.global._
import definitions.{DoubleClass, FloatClass}
private def isBigDecimal(pack: Tree) =
pack.toString == "scala.`package`.BigDecimal" || pack.toString == "java.math.BigDecimal"
private def isFloatingPointType(tree: Tree) =
tree.tpe <:< FloatClass.tpe || tree.tpe <:< DoubleClass.tpe
override def inspect(tree: Tree): Unit = {
tree match {
case Apply(Select(pack, TermName("apply")), arg :: _)
if isBigDecimal(pack) && isFloatingPointType(arg) =>
context.warn(tree.pos, self, tree.toString.take(100))
case Apply(Select(New(pack), nme.CONSTRUCTOR), arg :: _)
if isBigDecimal(pack) && isFloatingPointType(arg) =>
context.warn(tree.pos, self, tree.toString.take(100))
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/inspections/math/BigDecimalDoubleConstructor.scala | Scala | apache-2.0 | 1,763 |
package plantae.citrus.mqtt.actors.connection
import java.net.InetSocketAddress
import akka.actor.{Actor, ActorLogging, Props}
import akka.io.{IO, Tcp}
import plantae.citrus.mqtt.actors.SystemRoot
class Server extends Actor with ActorLogging {
import Tcp._
import context.system
IO(Tcp) ! Bind(self, new InetSocketAddress(
SystemRoot.config.getString("mqtt.broker.hostname"),
SystemRoot.config.getInt("mqtt.broker.port"))
, backlog = 1023)
def receive = {
case Bound(localAddress) =>
case CommandFailed(_: Bind) =>
log.error("bind failure")
context stop self
case Connected(remote, local) =>
log.info("new connection" + remote)
sender ! Register(context.actorOf(Props(classOf[PacketBridge], sender)))
}
}
| sureddy/mqttd | src/main/scala-2.11/plantae/citrus/mqtt/actors/connection/Server.scala | Scala | mit | 771 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.tools
import java.io.File
import java.net.{MalformedURLException, URL}
import java.util
import com.beust.jcommander.{IValueValidator, Parameter, ParameterException}
import org.apache.hadoop.fs.FsUrlStreamHandlerFactory
import org.locationtech.geomesa.fs.FileSystemDataStore
import org.locationtech.geomesa.fs.FileSystemDataStoreFactory.FileSystemDataStoreParams
import org.locationtech.geomesa.fs.storage.common.FileSystemStorageFactory
import org.locationtech.geomesa.fs.tools.FsDataStoreCommand.FsParams
import org.locationtech.geomesa.tools.DataStoreCommand
import org.locationtech.geomesa.tools.utils.ParameterConverters.KeyValueConverter
/**
* Abstract class for FSDS commands
*/
trait FsDataStoreCommand extends DataStoreCommand[FileSystemDataStore] {
import scala.collection.JavaConverters._
override def params: FsParams
override def connection: Map[String, String] = {
FsDataStoreCommand.configureURLFactory()
val url = try {
if (params.path.matches("""\\w+://.*""")) {
new URL(params.path)
} else {
new File(params.path).toURI.toURL
}
} catch {
case e: MalformedURLException => throw new ParameterException(s"Invalid URL ${params.path}: ", e)
}
val builder = Map.newBuilder[String, String]
builder += (FileSystemDataStoreParams.PathParam.getName -> url.toString)
if (params.configuration != null && !params.configuration.isEmpty) {
builder += (FileSystemDataStoreParams.ConfParam.getName -> params.configuration.asScala.mkString("\\n"))
}
builder.result()
}
}
object FsDataStoreCommand {
private var urlStreamHandlerSet = false
def configureURLFactory(): Unit = synchronized {
if (!urlStreamHandlerSet) {
URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory())
urlStreamHandlerSet = true
}
}
trait FsParams {
@Parameter(names = Array("--path", "-p"), description = "Path to root of filesystem datastore", required = true)
var path: String = _
@Parameter(names = Array("--config"), description = "Configuration properties, in the form k=v", required = false, variableArity = true)
var configuration: java.util.List[String] = _
}
trait EncodingParam {
@Parameter(names = Array("--encoding", "-e"), description = "Encoding (parquet, orc, converter, etc)", validateValueWith = classOf[EncodingValidator], required = true)
var encoding: String = _
}
trait PartitionParam {
@Parameter(names = Array("--partitions"), description = "Partitions (if empty all partitions will be used)", required = false, variableArity = true)
var partitions: java.util.List[String] = new util.ArrayList[String]()
}
trait SchemeParams {
@Parameter(names = Array("--partition-scheme"), description = "PartitionScheme typesafe config string or file", required = true)
var scheme: java.lang.String = _
@Parameter(names = Array("--leaf-storage"), description = "Use Leaf Storage for Partition Scheme", required = false, arity = 1)
var leafStorage: java.lang.Boolean = true
@Parameter(names = Array("--storage-opt"), variableArity = true, description = "Additional storage opts (k=v)", required = false, converter = classOf[KeyValueConverter])
var storageOpts: java.util.List[(String, String)] = new java.util.ArrayList[(String, String)]()
}
class EncodingValidator extends IValueValidator[String] {
override def validate(name: String, value: String): Unit = {
try {
FileSystemStorageFactory.factory(value)
} catch {
case _: IllegalArgumentException =>
throw new ParameterException(s"$value is not a valid encoding for parameter $name." +
s"Available encodings are: ${FileSystemStorageFactory.factories().map(_.getEncoding).mkString(", ")}")
}
}
}
}
| jahhulbert-ccri/geomesa | geomesa-fs/geomesa-fs-tools/src/main/scala/org/locationtech/geomesa/fs/tools/FsDataStoreCommand.scala | Scala | apache-2.0 | 4,312 |
package xyz.seto.obscene
import xyz.seto.obscene.utils.Rectangle
import java.io.DataInput
import java.io.DataOutput
import scala.collection.mutable.ListBuffer
/** A Single stroke of a gesture
* Comprised of a list of gesture points and
* the rectangle bounding box
*/
class GestureStroke(val points: List[GesturePoint]) {
private def createBoundingBox(cBox: Rectangle, cPoints: List[GesturePoint]): Rectangle = cPoints match {
case cP :: cPoints => createBoundingBox(cBox.union(cP), cPoints)
case Nil => cBox
}
/** The rectangle bounding the stroke dictated by the lower and highest GesturePoint */
lazy val boundingBox: Rectangle = createBoundingBox(new Rectangle(0, 0), points)
/** The diagonal of the stoke's bounding box */
lazy val length: Float = this.boundingBox.diagonal
/** Flattens the all the points in a gesture stroke
* @return A list of Floats formatted as X1, Y1, ... Xn, Yn
*/
def flatPoints: List[Float] = {
def loop(points: List[GesturePoint], flattened: ListBuffer[Float]): List[Float] = points match {
case p1 :: ps => {
flattened.append(p1.x)
flattened.append(p1.y)
loop(ps, flattened)
}
case _ => flattened.toList
}
loop(points, ListBuffer())
}
/** Serialize the stroke to the given data stream
* writes the number of points as an integer and then
* serializes all contained points
* @param stream The stream to write data to
*/
def serialize(stream: DataOutput) {
stream.writeInt(this.points.length)
this.points.map(x => x.serialize(stream))
}
}
object GestureStroke {
/** Deserialize a stroke from the given DataInput
* Reads the number of points, an integer, and then that
* many points
* @param stream The DataInput to read from
*/
def deserialize(stream: DataInput): GestureStroke =
new GestureStroke((for(_ <- 1 to stream.readInt())
yield GesturePoint.deserialize(stream)).to[List])
}
| chrisseto/obscene | core/src/main/scala/GestureStroke.scala | Scala | apache-2.0 | 1,980 |
/*
* Copyright (C) 2010 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo
package figure
import edu.umd.cs.piccolo.PCanvas
import kgeom.PArc
import core.Ellipse
class FigArc(val canvas: PCanvas, onEll: Ellipse, start: Double, extent: Double) extends core.Arc(onEll, start, extent) with FigShape {
val pArc = new PArc(onEll.center.x, onEll.center.y, onEll.w, onEll.h, -start, -extent)
protected val piccoloNode = pArc
} | richardfontana/fontana2007-t | KojoEnv/src/net/kogics/kojo/figure/FigArc.scala | Scala | gpl-3.0 | 940 |
package fr.renoux.gaston.model
import fr.renoux.gaston.util.{BitSet, testOnly}
import scala.annotation.tailrec
/** A Record is a triplet of slot, topic and assigned persons */
final case class Record(slot: Slot, topic: Topic, persons: Set[Person])(implicit val problem: Problem) extends Ordered[Record] {
import problem.counts
lazy val personsList: List[Person] = persons.toList
lazy val countPersons: Int = persons.size
lazy val personsBitSet: BitSet[Person] = persons.toBitSet
/* No need to compare persons, on a given schedule there is not two records with the same slot and topic */
override def compare(that: Record): Int = {
val compareSlots = slot.name.compareTo(that.slot.name)
if (compareSlots != 0) compareSlots
else topic.name.compareTo(that.topic.name)
}
lazy val optionalPersons: Set[Person] = persons -- topic.mandatory
lazy val canRemovePersons: Boolean = countPersons > topic.min && optionalPersons.nonEmpty
lazy val canAddPersons: Boolean = countPersons < topic.max
/** Clear all non-mandatory persons. */
lazy val cleared: Record = copy(persons = topic.mandatory)
/** Adds a person to the record. */
def addPerson(person: Person): Record = copy(persons = persons + person)
/** Removes a person from the record. */
def removePerson(person: Person): Record = copy(persons = persons - person)
/** Replace a person by another on the record. */
def replacePerson(oldP: Person, newP: Person): Record = copy(persons = persons -oldP + newP)
/** Score for each person, regardless of its weight. */
lazy val unweightedScoresByPerson: Map[Person, Score] =
persons.view.map { person =>
val prefs = problem.personalPreferencesListByPerson(person)
val score = if (prefs.isEmpty) Score.Zero else prefs.view.map(_.scoreRecord(this)).sum
// TODO sum is a major (22%) hot-spot
person -> score
}.toMap
lazy val impersonalScore: Score = preferencesScoreRec(problem.impersonalRecordLevelPreferencesList)
@tailrec
private def preferencesScoreRec(prefs: List[Preference.RecordLevel], sum: Double = 0): Score = prefs match {
case Nil => Score(sum)
case p :: ps =>
val s = p.scoreRecord(this)
if (s.value == Double.NegativeInfinity) s else preferencesScoreRec(ps, sum + s.value)
}
/**
* Partial Schedules are schedule where topics are matched, but not all persons are assigned yet.
* @return true if this respects all constraints applicable to partial schedules
*/
lazy val isPartialSolution: Boolean = {
topic.max >= countPersons && // topic.min <= pCount &&
!topic.forbidden.exists(persons.contains) && topic.mandatory.forall(persons.contains) &&
topic.slots.forall(_.contains(slot)) &&
persons.forall(slot.personsPresent.contains)
}
/** @return true if this respects all constraints */
lazy val isSolution: Boolean =
isPartialSolution && topic.min <= countPersons
/** Merge with another slot schedule's content. Used only in tests. */
@testOnly def ++(that: Record): Record = {
if (slot != that.slot || topic != that.topic) throw new IllegalArgumentException(s"$this ++ $that")
copy(persons = persons ++ that.persons)
}
/** Produces a clear, single-line version of this record, with no indentation. */
lazy val toFormattedString: String =
s"${topic.name}: ${persons.map(_.name).mkString(", ")}"
}
object Record {
def fromTuple(tuple: (Slot, Topic, Set[Person]))(implicit problem: Problem): Record = Record(tuple._1, tuple._2, tuple._3)
def fromTuple2(tuple: ((Slot, Topic), Set[Person]))(implicit problem: Problem): Record = Record(tuple._1._1, tuple._1._2, tuple._2)
def apply(slot: Slot, topic: Topic, persons: Person*)(implicit problem: Problem): Record = apply(slot, topic, persons.toSet)
}
| gaelrenoux/gaston | src/main/scala/fr/renoux/gaston/model/Record.scala | Scala | apache-2.0 | 3,804 |
package co.rc.smserviceclient.exceptions
import spray.http.HttpResponse
/**
* Class that defines an UnhandledResponseException
*/
class UnhandledResponseException( response: HttpResponse ) extends SessionServiceClientException( s"The external service returned an unexpected response: $response" )
| rodricifuentes1/session-manager-service-client | src/main/scala/co/rc/smserviceclient/exceptions/UnhandledResponseException.scala | Scala | mit | 301 |
package controllers.blog
import java.util.Calendar
import models.blog.{ Article, CommentsShow }
import models.goods.Category
import play.api.data.Form
import play.api.data.Forms.{ mapping, text, _ }
/**
* Created by stanikol on 2/1/17.
*/
object FormsData {
val timeFormatForSortOrder = new java.text.SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX")
val now = Calendar.getInstance().getTime()
val articleForm = Form(mapping(
"id" -> optional(longNumber),
"sort-order" -> default(text, timeFormatForSortOrder.format(now)),
"keywords" -> nonEmptyText,
"title" -> nonEmptyText,
"blog-text" -> nonEmptyText,
"description" -> nonEmptyText,
"blog-short-text" -> nonEmptyText
)(Article.apply)(Article.unapply))
val actionForm = Form(single("action" -> optional(text)))
case class AddComment(articleID: Long, comment: Option[String])
val addCommentForm = Form(mapping(
"article-id" -> longNumber,
"article-comment" -> optional(text)
)(AddComment.apply)(AddComment.unapply))
case class EditComment(commentID: Long, commentText: String, action: String)
val editCommentForm = Form(mapping(
"comment-id" -> longNumber,
"comment-text" -> text,
"action" -> text
)(EditComment.apply)(EditComment.unapply))
val commentsShowForm = Form(mapping(
"comments-order" -> optional(text),
"article-id" -> optional(text)
)(CommentsShow.apply)(CommentsShow.unapply))
case class CategoryEdit(category: Category, action: String)
val categoryForm: Form[CategoryEdit] = Form(mapping(
"category" -> mapping(
"id" -> optional(number),
"name" -> nonEmptyText,
"sort-order" -> nonEmptyText
)(Category.apply)(Category.unapply),
"action" -> nonEmptyText
)(CategoryEdit.apply)(CategoryEdit.unapply))
}
| stanikol/walnuts | server/app/controllers/blog/FormsData.scala | Scala | apache-2.0 | 1,796 |
/*
* MIT License
*
* Copyright (c) 2016 Gonçalo Marques
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.byteslounge.slickrepo.meta
/**
* Business entity that is mapped to a database record.
*/
trait Entity[T <: Entity[T, ID], ID] {
val id: Option[ID]
/**
* Sets the identifier for this entity instance.
*/
def withId(id: ID): T
}
| gonmarques/slick-repo | src/main/scala/com/byteslounge/slickrepo/meta/Entity.scala | Scala | mit | 1,394 |
package models
import scalikejdbc._
import skinny.orm.{Alias, SkinnyCRUDMapperWithId}
case class Station(id: Long, name: String, no: Int, lineId: Long)
object Station extends SkinnyCRUDMapperWithId[Long, Station] {
override def defaultAlias: Alias[Station] = createAlias("st")
val st = defaultAlias
override def extract(rs: WrappedResultSet, n: ResultName[Station]): Station = autoConstruct(rs, n)
override def idToRawValue(id: Long): Any = id
override def rawValueToId(value: Any): Long = value.toString.toLong
def findByNo(lineId: Long, no: Int)(implicit session: DBSession): Option[Station] = {
findBy(sqls.eq(st.lineId, lineId).and.eq(st.no, no))
}
}
case class StationBuilder(name: String, no: Int, lineId: Long) {
def save()(implicit session: DBSession): Long = {
Station.createWithAttributes(
'name -> name,
'no -> no,
'lineID -> lineId
)
}
}
| ponkotuy/train-analyzer | app/models/Station.scala | Scala | apache-2.0 | 907 |
/*
* Copyright 2011-2013 The myBatis Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mybatis.scala.config
object DefaultScriptingDriver extends org.apache.ibatis.scripting.xmltags.XMLLanguageDriver | tempbottle/scala-1 | mybatis-scala-core/src/main/scala/org/mybatis/scala/config/DefaultScriptingDriver.scala | Scala | apache-2.0 | 732 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package gigahorse
final class FileBody private (
val file: java.io.File) extends gigahorse.Body() with Serializable {
override def equals(o: Any): Boolean = o match {
case x: FileBody => (this.file == x.file)
case _ => false
}
override def hashCode: Int = {
37 * (37 * (17 + "gigahorse.FileBody".##) + file.##)
}
override def toString: String = {
"FileBody(" + file + ")"
}
private[this] def copy(file: java.io.File = file): FileBody = {
new FileBody(file)
}
def withFile(file: java.io.File): FileBody = {
copy(file = file)
}
}
object FileBody {
def apply(file: java.io.File): FileBody = new FileBody(file)
}
| eed3si9n/gigahorse | core/src/main/contraband-scala/gigahorse/FileBody.scala | Scala | apache-2.0 | 792 |
package com.seanshubin.utility.string
import com.seanshubin.utility.collection.SeqDifference
import com.seanshubin.utility.string.TableFormat.{LeftJustify, RightJustify}
import org.scalatest.FunSuite
class TableFormatTest extends FunSuite {
test("box drawing characters") {
val tableFormat = TableFormat.BoxDrawingCharacters
val input = Seq(
Seq("Alice", "Bob", "Carol"),
Seq("Dave", "Eve", "Mallory"),
Seq("Peggy", "Trent", "Wendy"))
val expected = Seq(
"╔═════╤═════╤═══════╗",
"║Alice│Bob │Carol ║",
"╟─────┼─────┼───────╢",
"║Dave │Eve │Mallory║",
"╟─────┼─────┼───────╢",
"║Peggy│Trent│Wendy ║",
"╚═════╧═════╧═══════╝"
)
val actual = tableFormat.format(input)
assertLinesEqual(actual, expected)
}
test("plain text characters") {
val tableFormat = TableFormat.AsciiDrawingCharacters
val input = Seq(
Seq("Alice", "Bob", "Carol"),
Seq("Dave", "Eve", "Mallory"),
Seq("Peggy", "Trent", "Wendy"))
val expected = Seq(
"/-----+-----+-------\\\\",
"|Alice|Bob |Carol |",
"+-----+-----+-------+",
"|Dave |Eve |Mallory|",
"+-----+-----+-------+",
"|Peggy|Trent|Wendy |",
"\\\\-----+-----+-------/"
)
val actual = tableFormat.format(input)
assertLinesEqual(actual, expected)
}
test("compact") {
val tableFormat = TableFormat.CompactDrawingCharacters
val input = Seq(
Seq("Alice", "Bob", "Carol"),
Seq("Dave", "Eve", "Mallory"),
Seq("Peggy", "Trent", "Wendy"))
val expected = Seq(
"Alice Bob Carol ",
"Dave Eve Mallory",
"Peggy Trent Wendy "
)
val actual = tableFormat.format(input)
assertLinesEqual(actual, expected)
}
test("left and right justify") {
import TableFormat.{LeftJustify, RightJustify}
val tableFormat = TableFormat.BoxDrawingCharacters
val bigInt = BigInt(2)
val bigDec = BigDecimal(3)
val input = Seq(
Seq("left justify column name", "default justification column name", "right justify column name"),
Seq(LeftJustify("left"), "default", RightJustify("right")),
Seq(LeftJustify(null), null, RightJustify(null)),
Seq(LeftJustify(1), 1, RightJustify(1)),
Seq(LeftJustify(bigInt), bigInt, RightJustify(bigInt)),
Seq(LeftJustify(bigDec), bigDec, RightJustify(bigDec)))
val expected = Seq(
"╔════════════════════════╤═════════════════════════════════╤═════════════════════════╗",
"║left justify column name│default justification column name│right justify column name║",
"╟────────────────────────┼─────────────────────────────────┼─────────────────────────╢",
"║left │default │ right║",
"╟────────────────────────┼─────────────────────────────────┼─────────────────────────╢",
"║null │ null│ null║",
"╟────────────────────────┼─────────────────────────────────┼─────────────────────────╢",
"║1 │ 1│ 1║",
"╟────────────────────────┼─────────────────────────────────┼─────────────────────────╢",
"║2 │ 2│ 2║",
"╟────────────────────────┼─────────────────────────────────┼─────────────────────────╢",
"║3 │ 3│ 3║",
"╚════════════════════════╧═════════════════════════════════╧═════════════════════════╝"
)
val actual = tableFormat.format(input)
assertLinesEqual(actual, expected)
}
test("left and right justify something small") {
val tableFormat = TableFormat.BoxDrawingCharacters
assert(tableFormat.format(Seq(Seq("a"))) === Seq("╔═╗", "║a║", "╚═╝"))
assert(tableFormat.format(Seq(Seq(LeftJustify("a")))) === Seq("╔═╗", "║a║", "╚═╝"))
assert(tableFormat.format(Seq(Seq(RightJustify("a")))) === Seq("╔═╗", "║a║", "╚═╝"))
}
test("no columns") {
val tableFormat = TableFormat.BoxDrawingCharacters
assert(tableFormat.format(Seq(Seq())) === Seq("╔╗", "║║", "╚╝"))
}
test("no rows") {
val tableFormat = TableFormat.BoxDrawingCharacters
assert(tableFormat.format(Seq()) === Seq("╔╗", "╚╝"))
}
test("replace empty cells with blank cells") {
val tableFormat = TableFormat.BoxDrawingCharacters
val input = Seq(
Seq("Alice", "Bob", "Carol"),
Seq("Dave", "Eve"),
Seq("Peggy", "Trent", "Wendy"))
val expected = Seq(
"╔═════╤═════╤═════╗",
"║Alice│Bob │Carol║",
"╟─────┼─────┼─────╢",
"║Dave │Eve │ ║",
"╟─────┼─────┼─────╢",
"║Peggy│Trent│Wendy║",
"╚═════╧═════╧═════╝"
)
val actual = tableFormat.format(input)
assertLinesEqual(actual, expected)
}
def assertLinesEqual(actual: Seq[Any], expected: Seq[Any]): Unit = {
val seqDifference = SeqDifference.diff(actual, expected)
assert(seqDifference.isSame, seqDifference.messageLines.mkString("\\n"))
}
}
| SeanShubin/utility | string/src/test/scala/com/seanshubin/utility/string/TableFormatTest.scala | Scala | unlicense | 6,851 |
package com.temportalist.weepingangels.common.network
import com.temportalist.origin.foundation.common.network.IPacket
import com.temportalist.weepingangels.common.tile.TEStatue
import cpw.mods.fml.common.network.simpleimpl.{IMessage, IMessageHandler, MessageContext}
import cpw.mods.fml.relauncher.Side
import net.minecraft.tileentity.TileEntity
/**
* Used to update different states of a Statue Tile Entity
* States: Face, Arms, Rotation
*
* @author TheTemportalist
*/
class PacketModifyStatue extends IPacket {
/**
* @param tile The tile entity
* @param state 1 = Face, 2 = Arms, 3 = Rotation
* @param value Face (1 = Calm, 2 = Angry), Arms (1 = Hiding, 2 = Peaking, 3 = Confident), Rotation (in degrees)
*/
def this(tile: TileEntity, state: Int, value: Float) {
this()
this.add(tile)
this.add(state)
this.add(value)
}
override def getReceivableSide: Side = Side.SERVER
}
object PacketModifyStatue {
class Handler extends IMessageHandler[PacketModifyStatue, IMessage] {
override def onMessage(message: PacketModifyStatue, ctx: MessageContext): IMessage = {
message.getTile(ctx.getServerHandler.playerEntity.worldObj) match {
case statue: TEStatue =>
message.get[Int] match {
case 1 => // Face
statue.setFacialState(Math.floor(message.get[Float]).toInt)
case 2 => // Arms
statue.setArmState(Math.floor(message.get[Float]).toInt)
println("Set arm state to " + statue.getArmState)
case 3 => // Rotation
statue.setRotation(message.get[Float])
case 4 => // Corruption
statue.setCorruption(message.get[Float].toInt)
case _ =>
}
case _ =>
}
null
}
}
}
| TheTemportalist/WeepingAngels | src/main/scala/com/temportalist/weepingangels/common/network/PacketModifyStatue.scala | Scala | apache-2.0 | 1,676 |
package com.wavesplatform.lang.v1.parser
import cats.instances.either._
import cats.instances.list._
import cats.instances.option._
import cats.syntax.either._
import cats.syntax.traverse._
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.lang.v1.ContractLimits
import com.wavesplatform.lang.v1.evaluator.ctx.impl.PureContext.MaxListLengthV4
import com.wavesplatform.lang.v1.parser.BinaryOperation._
import com.wavesplatform.lang.v1.parser.Expressions.PART.VALID
import com.wavesplatform.lang.v1.parser.Expressions._
import com.wavesplatform.lang.v1.parser.UnaryOperation._
import fastparse.MultiLineWhitespace._
import fastparse._
object Parser {
private val Global = com.wavesplatform.lang.hacks.Global // Hack for IDEA
implicit def hack(p: fastparse.P[Any]): fastparse.P[Unit] = p.map(_ => ())
def keywords = Set("let", "strict", "base58", "base64", "true", "false", "if", "then", "else", "match", "case", "func")
def lowerChar[_: P] = CharIn("a-z")
def upperChar[_: P] = CharIn("A-Z")
def char[_: P] = lowerChar | upperChar
def digit[_: P] = CharIn("0-9")
def unicodeSymbolP[_: P] = P("\\u" ~/ Pass ~~ (char | digit).repX(0, "", 4))
def notEndOfString[_: P] = CharPred(_ != '\"')
def specialSymbols[_: P] = P("\\" ~~ AnyChar)
def comment[_: P]: P[Unit] = P("#" ~~ CharPred(_ != '\n').repX).rep.map(_ => ())
def directive[_: P]: P[Unit] = P("{-#" ~ CharPred(el => el != '\n' && el != '#').rep ~ "#-}").rep(0, comment).map(_ => ())
def unusedText[_: P] = comment ~ directive ~ comment
def escapedUnicodeSymbolP[_: P]: P[(Int, String, Int)] = P(Index ~~ (NoCut(unicodeSymbolP) | specialSymbols).! ~~ Index)
def stringP[_: P]: P[EXPR] =
P(Index ~~ "\"" ~/ Pass ~~ (escapedUnicodeSymbolP | notEndOfString).!.repX ~~ "\"" ~~ Index)
.map {
case (start, xs, end) =>
var errors = Vector.empty[String]
val consumedString = new StringBuilder
xs.foreach { x =>
if (x.startsWith("\\u")) {
if (x.length == 6) {
val hexCode = x.drop(2)
try {
val int = Integer.parseInt(hexCode, 16)
val unicodeSymbol = new String(Character.toChars(int))
consumedString.append(unicodeSymbol)
} catch {
case _: NumberFormatException =>
consumedString.append(x)
errors :+= s"can't parse '$hexCode' as HEX string in '$x'"
case _: IllegalArgumentException =>
consumedString.append(x)
errors :+= s"invalid UTF-8 symbol: '$x'"
}
} else {
consumedString.append(x)
errors :+= s"incomplete UTF-8 symbol definition: '$x'"
}
} else if (x.startsWith("\\")) {
if (x.length == 2) {
consumedString.append(x(1) match {
case 'b' => "\b"
case 'f' => "\f"
case 'n' => "\n"
case 'r' => "\r"
case 't' => "\t"
case '\\' => "\\"
case '"' => "\""
case _ =>
errors :+= s"""unknown escaped symbol: '$x'. The valid are \b, \f, \n, \r, \t"""
x
})
} else {
consumedString.append(x)
errors :+= s"""invalid escaped symbol: '$x'. The valid are \b, \f, \n, \r, \t"""
}
} else {
consumedString.append(x)
}
}
val r =
if (errors.isEmpty) PART.VALID(Pos(start + 1, end - 1), consumedString.toString)
else PART.INVALID(Pos(start + 1, end - 1), errors.mkString(";"))
(Pos(start, end), r)
}
.map(posAndVal => CONST_STRING(posAndVal._1, posAndVal._2))
def correctVarName[_: P]: P[PART[String]] =
(Index ~~ (char ~~ (digit | char).repX()).! ~~ Index)
.filter { case (_, x, _) => !keywords.contains(x) }
.map { case (start, x, end) => PART.VALID(Pos(start, end), x) }
def correctLFunName[_: P]: P[PART[String]] =
(Index ~~ (char ~~ ("_".? ~~ (digit | char)).repX()).! ~~ Index)
.filter { case (_, x, _) => !keywords.contains(x) }
.map { case (start, x, end) => PART.VALID(Pos(start, end), x) }
def genericVarName(namePx: fastparse.P[Any] => P[Unit])(implicit c: fastparse.P[Any]): P[PART[String]] = {
def nameP(implicit c: fastparse.P[Any]) = namePx(c)
(Index ~~ nameP.! ~~ Index).map {
case (start, x, end) =>
if (keywords.contains(x)) PART.INVALID(Pos(start, end), s"keywords are restricted: $x")
else PART.VALID(Pos(start, end), x)
}
}
def anyVarName(implicit c: fastparse.P[Any]): P[PART[String]] = {
def nameP(implicit c: fastparse.P[Any]): P[Unit] = char ~~ (digit | char).repX()
genericVarName(nameP(_))
}
def invalid[_: P]: P[INVALID] = {
import fastparse.NoWhitespace._
P(Index ~~ CharPred(_ != '\n').rep(1) ~~ Index)
.map {
case (start, end) => INVALID(Pos(start, end), "can't parse the expression")
}
}
def border[_: P]: P[Unit] = CharIn(" \t\n\r({")
def numberP[_: P]: P[CONST_LONG] =
P(Index ~~ (CharIn("+\\-").? ~~ digit.repX(1)).! ~~ ("_" ~~ digit.repX(1).!).repX(0) ~~ Index)
.map({ case (start, x1, x2, end) => CONST_LONG(Pos(start, end), x2.foldLeft(x1)(_ ++ _).toLong) })
def trueP[_: P]: P[TRUE] = P(Index ~~ "true".! ~~ !(char | digit) ~~ Index).map { case (start, _, end) => TRUE(Pos(start, end)) }
def falseP[_: P]: P[FALSE] = P(Index ~~ "false".! ~~ !(char | digit) ~~ Index).map { case (start, _, end) => FALSE(Pos(start, end)) }
def curlyBracesP[_: P]: P[EXPR] = P("{" ~ baseExpr ~ "}")
def refP[_: P]: P[REF] = P(correctVarName).map { x =>
REF(Pos(x.position.start, x.position.end), x)
}
def lfunP[_: P]: P[REF] = P(correctLFunName).map { x =>
REF(Pos(x.position.start, x.position.end), x)
}
def ifP[_: P]: P[IF] = {
def optionalPart(keyword: String, branch: String): P[EXPR] = (Index ~ (keyword ~/ Index ~ baseExpr.?).?).map {
case (ifTruePos, ifTrueRaw) =>
ifTrueRaw
.map { case (pos, expr) => expr.getOrElse(INVALID(Pos(pos, pos), s"expected a $branch branch's expression")) }
.getOrElse(INVALID(Pos(ifTruePos, ifTruePos), s"expected a $branch branch"))
}
def thenPart[_: P] = optionalPart("then", "true")
def elsePart[_: P] = optionalPart("else", "false")
P(Index ~~ "if" ~~ &(border) ~/ Index ~ baseExpr.? ~ thenPart ~ elsePart ~~ Index).map {
case (start, condPos, condRaw, ifTrue, ifFalse, end) =>
val cond = condRaw.getOrElse(INVALID(Pos(condPos, condPos), "expected a condition"))
IF(Pos(start, end), cond, ifTrue, ifFalse)
} |
P(Index ~~ "then" ~~ &(border) ~/ Index ~ baseExpr.? ~ elsePart ~~ Index).map {
case (start, ifTrueExprPos, ifTrueRaw, ifFalse, end) =>
val ifTrue = ifTrueRaw.getOrElse(INVALID(Pos(ifTrueExprPos, ifTrueExprPos), "expected a true branch's expression"))
IF(Pos(start, end), INVALID(Pos(start, start), "expected a condition"), ifTrue, ifFalse)
} |
P(Index ~~ "else" ~~ &(border) ~/ Index ~ baseExpr.? ~~ Index).map {
case (start, ifFalseExprPos, ifFalseRaw, end) =>
val ifFalse = ifFalseRaw.getOrElse(INVALID(Pos(ifFalseExprPos, ifFalseExprPos), "expected a false branch's expression"))
IF(Pos(start, end), INVALID(Pos(start, start), "expected a condition"), INVALID(Pos(start, start), "expected a true branch"), ifFalse)
}
}
def functionCallArgs[_: P]: P[Seq[EXPR]] = comment ~ baseExpr.rep(0, comment ~ "," ~ comment) ~ comment
def maybeFunctionCallP[_: P]: P[EXPR] = (Index ~~ lfunP ~~ P("(" ~/ functionCallArgs ~ ")").? ~~ Index).map {
case (start, REF(_, functionName, _, _), Some(args), accessEnd) => FUNCTION_CALL(Pos(start, accessEnd), functionName, args.toList)
case (_, id, None, _) => id
}
def foldP[_: P]: P[EXPR] =
(Index ~~ P("FOLD<") ~~ Index ~~ digit.repX(1).! ~~ Index ~~ ">(" ~/ baseExpr ~ "," ~ baseExpr ~ "," ~ refP ~ ")" ~~ Index)
.map {
case (start, limStart, limit, limEnd, list, acc, f, end) =>
val lim = limit.toInt
if (lim < 1)
INVALID(Pos(limStart, limEnd), "FOLD limit should be natural")
else if (lim > MaxListLengthV4)
INVALID(Pos(limStart, limEnd), s"List size limit in FOLD is too big, $lim must be less or equal $MaxListLengthV4")
else
FOLD(Pos(start, end), lim, list, acc, f)
}
def list[_: P]: P[EXPR] = (Index ~~ P("[") ~ functionCallArgs ~ P("]") ~~ Index).map {
case (s, e, f) =>
val pos = Pos(s, f)
e.foldRight(REF(pos, PART.VALID(pos, "nil")): EXPR) { (v, l) =>
FUNCTION_CALL(pos, PART.VALID(pos, "cons"), List(v, l))
}
}
def bracedArgs[_: P]: P[Seq[EXPR]] =
comment ~ baseExpr.rep(
sep = comment ~ "," ~ comment,
max = ContractLimits.MaxTupleSize
) ~ comment
def bracesOrTuple[_: P]: P[EXPR] = (Index ~~ P("(") ~ bracedArgs ~ P(")") ~~ Index).map {
case (_, Seq(expr), _) => expr
case (s, elements, f) =>
FUNCTION_CALL(
Pos(s, f),
PART.VALID(Pos(s, f), s"_Tuple${elements.length}"),
elements.toList
)
}
def extractableAtom[_: P]: P[EXPR] = P(
curlyBracesP | bracesOrTuple |
byteVectorP | stringP | numberP | trueP | falseP | list |
maybeFunctionCallP
)
sealed trait Accessor
case class Method(name: PART[String], args: Seq[EXPR]) extends Accessor
case class Getter(name: PART[String]) extends Accessor
case class ListIndex(index: EXPR) extends Accessor
case class GenericMethod(name: PART[String], `type`: Type) extends Accessor
def singleTypeP[_: P]: P[Single] = (anyVarName ~~ ("[" ~~ Index ~ unionTypeP ~ Index ~~ "]").?).map {
case (t, param) => Single(t, param.map { case (start, param, end) => VALID(Pos(start, end), param) })
}
def unionTypeP[_: P]: P[Type] =
(Index ~ P("Any") ~ Index).map { case (start, end) => AnyType(Pos(start, end)) } | P(singleTypeP | tupleTypeP)
.rep(1, comment ~ "|" ~ comment)
.map(Union.apply)
def tupleTypeP[_: P]: P[Tuple] =
("(" ~
P(unionTypeP).rep(
ContractLimits.MinTupleSize,
comment ~ "," ~ comment,
ContractLimits.MaxTupleSize,
)
~ ")")
.map(Tuple)
def funcP(implicit c: fastparse.P[Any]): P[FUNC] = {
def funcname(implicit c: fastparse.P[Any]) = anyVarName
def argWithType(implicit c: fastparse.P[Any]) = anyVarName ~ ":" ~ unionTypeP ~ comment
def args(implicit c: fastparse.P[Any]) = "(" ~ comment ~ argWithType.rep(0, "," ~ comment) ~ ")" ~ comment
def funcHeader(implicit c: fastparse.P[Any]) =
Index ~~ "func" ~ funcname ~ comment ~ args ~ "=" ~ P(singleBaseExpr | ("{" ~ baseExpr ~ "}")) ~~ Index
funcHeader.map {
case (start, name, args, expr, end) => FUNC(Pos(start, end), expr, name, args)
}
}
def annotationP[_: P]: P[ANNOTATION] = (Index ~~ "@" ~ anyVarName ~ comment ~ "(" ~ comment ~ anyVarName.rep(0, ",") ~ comment ~ ")" ~~ Index).map {
case (start, name: PART[String], args: Seq[PART[String]], end) => ANNOTATION(Pos(start, end), name, args)
}
def annotatedFunc[_: P]: P[ANNOTATEDFUNC] = (Index ~~ annotationP.rep(1) ~ comment ~ funcP ~~ Index).map {
case (start, as, f, end) => ANNOTATEDFUNC(Pos(start, end), as, f)
}
def matchCaseP(implicit c: fastparse.P[Any]): P[MATCH_CASE] = {
def checkForGenericAndGetLastPos(t: Type): Either[INVALID, Option[Pos]] =
t match {
case Single(VALID(position, "List"), Some(VALID(_, AnyType(_)))) => Right(Some(position))
case Single(name, parameter) =>
parameter
.toLeft(Some(name.position))
.leftMap {
case VALID(position, v) => INVALID(position, s"Unexpected generic match type $t")
case PART.INVALID(position, message) => INVALID(position, message)
}
case Union(types) =>
types.lastOption.flatTraverse(checkForGenericAndGetLastPos)
case Tuple(types) =>
types.lastOption.flatTraverse(checkForGenericAndGetLastPos)
case AnyType(pos) => Right(Some(pos))
}
def restMatchCaseInvalidP(implicit c: fastparse.P[Any]): P[String] = P((!P("=>") ~~ AnyChar.!).repX.map(_.mkString))
def varDefP(implicit c: fastparse.P[Any]): P[Option[PART[String]]] = (anyVarName ~~ !("'" | "(")).map(Some(_)) | P("_").!.map(_ => None)
def typesDefP(implicit c: fastparse.P[Any]) =
(
":" ~ comment ~
(unionTypeP | (Index ~~ restMatchCaseInvalidP ~~ Index).map {
case (start, _, end) =>
Single(PART.INVALID(Pos(start, end), "the type for variable should be specified: `case varName: Type => expr`"), None)
})
).?.map(_.getOrElse(Union(Seq())))
def pattern(implicit c: fastparse.P[Any]): P[Pattern] =
(varDefP ~ comment ~ typesDefP).map { case (v, t) => TypedVar(v, t) } |
(Index ~ "(" ~ pattern.rep(min = 2, sep = ",") ~ ")" ~ Index).map(p => TuplePat(p._2, Pos(p._1, p._3))) |
(Index ~ anyVarName ~ "(" ~ (anyVarName ~ "=" ~ pattern).rep(sep = ",") ~ ")" ~ Index).map(p =>
ObjPat(p._3.map(kp => (PART.toOption(kp._1).get, kp._2)).toMap, Single(p._2, None), Pos(p._1, p._4))) |
(Index ~ baseExpr.rep(min = 1, sep = "|") ~ Index).map(p => ConstsPat(p._2, Pos(p._1, p._3)))
def checkPattern(p: Pattern): Either[INVALID, Option[Pos]] = p match {
case TypedVar(_, t) => checkForGenericAndGetLastPos(t)
case ConstsPat(_, pos) => Right(Some(pos))
case TuplePat(ps, pos) =>
ps.toList traverse checkPattern map { _ =>
Some(pos)
}
case ObjPat(ps, _, pos) =>
ps.values.toList traverse checkPattern map { _ =>
Some(pos)
}
}
P(
Index ~~ "case" ~~ &(border) ~ comment ~/ (
pattern |
(Index ~~ restMatchCaseInvalidP ~~ Index).map {
case (start, _, end) =>
TypedVar(
Some(PART.INVALID(Pos(start, end), "invalid syntax, should be: `case varName: Type => expr` or `case _ => expr`")),
Union(Seq())
)
}
) ~ comment ~ "=>" ~/ baseExpr.? ~~ Index
).map {
case (caseStart, p, e, end) =>
checkPattern(p)
.fold(
error => MATCH_CASE(error.position, pattern = p, expr = error), { pos =>
val cPos = Pos(caseStart, end)
val exprStart = pos.fold(caseStart)(_.end)
MATCH_CASE(
cPos,
pattern = p,
expr = e.getOrElse(INVALID(Pos(exprStart, end), "expected expression"))
)
}
)
}
}
def matchP[_: P]: P[EXPR] =
P(Index ~~ "match" ~~ &(border) ~/ baseExpr ~ "{" ~ comment ~ matchCaseP.rep(0, comment) ~ comment ~ "}" ~~ Index)
.map {
case (start, _, Nil, end) => INVALID(Pos(start, end), "pattern matching requires case branches")
case (start, e, cases, end) => MATCH(Pos(start, end), e, cases.toList)
}
def accessorName(implicit c: fastparse.P[Any]): P[PART[String]] = {
def nameP(implicit c: fastparse.P[Any]) = (char | "_") ~~ (digit | char).repX()
genericVarName(nameP(_))
}
def accessP[_: P]: P[(Int, Accessor, Int)] = P(
(("" ~ comment ~ Index ~ "." ~/ comment ~ functionCallOrGetter) ~~ Index) | (Index ~~ "[" ~/ baseExpr.map(ListIndex) ~ "]" ~~ Index)
)
def functionCallOrGetter[_: P]: P[Accessor] = {
sealed trait ArgsOrType
case class Args(args: Seq[EXPR]) extends ArgsOrType
case class Type(t: Expressions.Type) extends ArgsOrType
(accessorName.map(Getter) ~/ comment ~~ (("(" ~/ comment ~ functionCallArgs ~/ comment ~ ")").map(Args) | ("[" ~ unionTypeP ~ "]").map(Type)).?).map {
case (g @ Getter(name), args) =>
args.fold(g: Accessor) {
case Args(a) => Method(name, a)
case Type(t) => GenericMethod(name, t)
}
}
}
def maybeAccessP[_: P]: P[EXPR] =
P(Index ~~ extractableAtom ~~ Index ~~ NoCut(accessP).repX)
.map {
case (start, obj, objEnd, accessors) =>
accessors.foldLeft(obj) {
case (e, (accessStart, a, accessEnd)) =>
a match {
case Getter(n) => GETTER(Pos(start, accessEnd), e, n)
case Method(n, args) => FUNCTION_CALL(Pos(start, accessEnd), n, e :: args.toList)
case ListIndex(index) => FUNCTION_CALL(Pos(start, accessEnd), PART.VALID(Pos(accessStart, accessEnd), "getElement"), List(e, index))
case GenericMethod(n, t) => GENERIC_FUNCTION_CALL(Pos(start, accessEnd), e, n, t)
}
}
}
def byteVectorP[_: P]: P[EXPR] =
P(Index ~~ "base" ~~ ("58" | "64" | "16").! ~~ "'" ~/ Pass ~~ CharPred(_ != '\'').repX.! ~~ "'" ~~ Index)
.map {
case (start, base, xs, end) =>
val innerStart = start + 8
val innerEnd = end - 1
val decoded = base match {
case "16" => Global.base16Decode(xs, checkLength = false)
case "58" => Global.base58Decode(xs)
case "64" => Global.base64Decode(xs)
}
decoded match {
case Left(err) => CONST_BYTESTR(Pos(start, end), PART.INVALID(Pos(innerStart, innerEnd), err))
case Right(r) => CONST_BYTESTR(Pos(start, end), PART.VALID(Pos(innerStart, innerEnd), ByteStr(r)))
}
}
private def destructuredTupleValuesP[_: P]: P[Seq[(Int, Option[PART[String]])]] =
P("(") ~
(Index ~ anyVarName.?).rep(
ContractLimits.MinTupleSize,
comment ~ "," ~ comment,
ContractLimits.MaxTupleSize
) ~
P(")")
private def letNameP[_: P]: P[Seq[(Int, Option[PART[String]])]] =
(Index ~ anyVarName.?).map(Seq(_))
def variableDefP[_: P](key: String): P[Seq[LET]] =
P(Index ~~ key ~~ &(CharIn(" \t\n\r")) ~/ comment ~ (destructuredTupleValuesP | letNameP) ~ comment ~ Index ~ ("=" ~/ Index ~ baseExpr.?).? ~~ Index)
.map {
case (start, names, valuePos, valueRaw, end) =>
val value = extractValue(valuePos, valueRaw)
val pos = Pos(start, end)
if (names.length == 1)
names.map {
case (nameStart, nameRaw) =>
val name = extractName(Pos(nameStart, nameStart), nameRaw)
LET(pos, name, value)
} else {
val exprRefName = "$t0" + s"${pos.start}${pos.end}"
val exprRef = LET(pos, VALID(pos, exprRefName), value)
val tupleValues =
names.zipWithIndex
.map {
case ((nameStart, nameRaw), i) =>
val namePos = Pos(nameStart, nameStart)
val name = extractName(namePos, nameRaw)
val getter = GETTER(
namePos,
REF(namePos, VALID(namePos, exprRefName)),
VALID(namePos, s"_${i + 1}")
)
LET(pos, name, getter)
}
exprRef +: tupleValues
}
}
// Hack to force parse of "\n". Otherwise it is treated as a separator
def newLineSep(implicit c: fastparse.P[Any]) = {
P(CharsWhileIn(" \t\r").repX ~~ "\n").repX(1)
}
def strictLetBlockP[_: P]: P[EXPR] = {
P(
Index ~~
variableDefP("strict") ~/
Pass ~~
(
("" ~ ";") ~/ (baseExpr | invalid).? |
newLineSep ~/ (baseExpr | invalid).? |
(Index ~~ CharPred(_ != '\n').repX).map(pos => Some(INVALID(Pos(pos, pos), "expected ';'")))
) ~~
Index
).map {
case (start, varNames, body, end) =>
val blockPos = Pos(start, end)
Macro.unwrapStrict(blockPos, varNames, body.getOrElse(INVALID(Pos(end, end), "expected a body")))
}
}
private def extractName(
namePos: Pos,
nameRaw: Option[PART[String]]
): PART[String] =
nameRaw.getOrElse(PART.INVALID(namePos, "expected a variable's name"))
private def extractValue(
valuePos: Int,
valueRaw: Option[(Int, Option[EXPR])]
): EXPR =
valueRaw
.map { case (pos, expr) => expr.getOrElse(INVALID(Pos(pos, pos), "expected a value's expression")) }
.getOrElse(INVALID(Pos(valuePos, valuePos), "expected a value"))
def block[_: P]: P[EXPR] = blockOr(INVALID(_, "expected ';'"))
private def blockOr(otherExpr: Pos => EXPR)(implicit c: fastparse.P[Any]): P[EXPR] = {
def declaration(implicit c: fastparse.P[Any]) = variableDefP("let") | funcP.map(Seq(_))
P(
Index ~~
declaration.rep(1) ~/
Pass ~~
(
("" ~ ";") ~/ (baseExpr | invalid).? |
newLineSep ~/ (baseExpr | invalid).? |
(Index ~~ CharPred(_ != '\n').repX).map(pos => Some(otherExpr(Pos(pos, pos))))
) ~~
Index
).map {
case (start, declarations, body, end) => {
declarations.flatten.reverse
.foldLeft(body.getOrElse(INVALID(Pos(end, end), "expected a body"))) { (acc, l) =>
BLOCK(Pos(start, end), l, acc)
}
}
}
}
def baseAtom[_: P](epn: fastparse.P[Any] => P[EXPR]) = {
def ep[_: P](implicit c: fastparse.P[Any]) = epn(c)
comment ~ P(foldP | ifP | matchP | ep | maybeAccessP) ~ comment
}
def baseExpr[_: P] = P(strictLetBlockP | binaryOp(baseAtom(block(_))(_), opsByPriority))
def blockOrDecl[_: P] = baseAtom(blockOr(p => REF(p, VALID(p, "unit")))(_))
def baseExprOrDecl[_: P] = binaryOp(baseAtom(blockOrDecl(_))(_), opsByPriority)
def singleBaseAtom[_: P] =
comment ~
P(foldP | ifP | matchP | maybeAccessP) ~
comment
def singleBaseExpr[_: P] = P(binaryOp(singleBaseAtom(_), opsByPriority))
def declaration[_: P] = P(variableDefP("let") | funcP.map(Seq(_)))
def revp[A, B](l: A, s: Seq[(B, A)], o: Seq[(A, B)] = Seq.empty): (Seq[(A, B)], A) = {
s.foldLeft((o, l)) { (acc, op) =>
(acc, op) match { case ((o, l), (b, a)) => ((l, b) +: o) -> a }
}
}
def binaryOp(atomA: fastparse.P[Any] => P[EXPR], rest: List[Either[List[BinaryOperation], List[BinaryOperation]]])(
implicit c: fastparse.P[Any]): P[EXPR] = {
def atom(implicit c: fastparse.P[Any]) = atomA(c)
rest match {
case Nil => unaryOp(atom(_), unaryOps)
case Left(kinds) :: restOps =>
def operand(implicit c: fastparse.P[Any]) = binaryOp(atom(_), restOps)
val kindc = kinds
.map(o => { implicit c: fastparse.P[Any] =>
o.parser
})
.reduce((plc, prc) => {
def pl(implicit c: fastparse.P[Any]) = plc(c)
def pr(implicit c: fastparse.P[Any]) = prc(c);
{ implicit c: fastparse.P[Any] =>
P(pl | pr)
}
})
def kind(implicit c: fastparse.P[Any]) = kindc(c)
P(Index ~~ operand ~ P(kind ~ (NoCut(operand) | Index.map(i => INVALID(Pos(i, i), "expected a second operator")))).rep).map {
case (start, left: EXPR, r: Seq[(BinaryOperation, EXPR)]) =>
r.foldLeft(left) { case (acc, (currKind, currOperand)) => currKind.expr(start, currOperand.position.end, acc, currOperand) }
}
case Right(kinds) :: restOps =>
def operand(implicit c: fastparse.P[Any]) = binaryOp(atom(_), restOps)
val kindc = kinds
.map(o => { implicit c: fastparse.P[Any] =>
o.parser
})
.reduce((plc, prc) => {
def pl(implicit c: fastparse.P[Any]) = plc(c)
def pr(implicit c: fastparse.P[Any]) = prc(c);
{ implicit c: fastparse.P[Any] =>
P(pl | pr)
}
})
def kind(implicit c: fastparse.P[Any]) = kindc(c)
P(Index ~~ operand ~ P(kind ~ (NoCut(operand) | Index.map(i => INVALID(Pos(i, i), "expected a second operator")))).rep).map {
case (start, left: EXPR, r: Seq[(BinaryOperation, EXPR)]) =>
val (ops, s) = revp(left, r)
ops.foldLeft(s) { case (acc, (currOperand, currKind)) => currKind.expr(start, currOperand.position.end, currOperand, acc) }
}
}
}
def unaryOp(atom: fastparse.P[Any] => P[EXPR], ops: List[UnaryOperation])(implicit c: fastparse.P[Any]): P[EXPR] =
(ops.foldRight(atom) {
case (op, accc) =>
def acc(implicit c: fastparse.P[Any]) = accc(c);
{ implicit c: fastparse.P[Any] =>
(Index ~~ op.parser.map(_ => ()) ~ P(unaryOp(atom, ops)) ~~ Index).map {
case (start, expr, end) => op.expr(start, end, expr)
} | acc
}
})(c)
def parseExpr(str: String): Parsed[EXPR] = {
def expr[_: P] = P(Start ~ unusedText ~ (baseExpr | invalid) ~ End)
parse(str, expr(_))
}
def parseExprOrDecl(str: String): Parsed[EXPR] = {
def e[_: P] = P(Start ~ unusedText ~ (baseExprOrDecl | invalid) ~ End)
parse(str, e(_))
}
def parseContract(str: String): Parsed[DAPP] = {
def contract[_: P] =
P(Start ~ unusedText ~ (declaration.rep) ~ comment ~ (annotatedFunc.rep) ~ declaration.rep ~ End ~~ Index)
.map {
case (ds, fs, t, end) => (DAPP(Pos(0, end), ds.flatten.toList, fs.toList), t)
}
parse(str, contract(_)) match {
case Parsed.Success((s, t), _) if (t.nonEmpty) =>
def contract[_: P] = P(Start ~ unusedText ~ (declaration.rep) ~ comment ~ (annotatedFunc.rep) ~ !declaration.rep(1) ~ End ~~ Index)
parse(str, contract(_)) match {
case Parsed.Failure(m, o, e) =>
Parsed.Failure(s"Local functions should be defined before @Callable one: ${str.substring(o)}", o, e)
case _ => throw new Exception("Parser error")
}
case Parsed.Success((s, _), v) => Parsed.Success(s, v)
case f: Parsed.Failure => Parsed.Failure(f.label, f.index, f.extra)
}
}
type RemovedCharPos = Pos
def parseExpressionWithErrorRecovery(scriptStr: String): Either[Throwable, (SCRIPT, Option[RemovedCharPos])] = {
def parse(str: String): Either[Parsed.Failure, SCRIPT] =
parseExpr(str) match {
case Parsed.Success(resExpr, _) => Right(SCRIPT(resExpr.position, resExpr))
case f: Parsed.Failure => Left(f)
}
parseWithError[SCRIPT](
new StringBuilder(scriptStr),
parse,
SCRIPT(Pos(0, scriptStr.length - 1), INVALID(Pos(0, scriptStr.length - 1), "Parsing failed. Unknown error."))
).map { exprAndErrorIndexes =>
val removedCharPosOpt = if (exprAndErrorIndexes._2.isEmpty) None else Some(Pos(exprAndErrorIndexes._2.min, exprAndErrorIndexes._2.max))
(exprAndErrorIndexes._1, removedCharPosOpt)
}
}
def parseDAPPWithErrorRecovery(scriptStr: String): Either[Throwable, (DAPP, Option[RemovedCharPos])] = {
def parse(str: String): Either[Parsed.Failure, DAPP] =
parseContract(str) match {
case Parsed.Success(resDAPP, _) => Right(resDAPP)
case f: Parsed.Failure => Left(f)
}
parseWithError[DAPP](
new StringBuilder(scriptStr),
parse,
DAPP(Pos(0, scriptStr.length - 1), List.empty, List.empty)
).map { dAppAndErrorIndexes =>
val removedCharPosOpt = if (dAppAndErrorIndexes._2.isEmpty) None else Some(Pos(dAppAndErrorIndexes._2.min, dAppAndErrorIndexes._2.max))
(dAppAndErrorIndexes._1, removedCharPosOpt)
}
}
private def clearChar(source: StringBuilder, pos: Int): Int = {
if (pos >= 0) {
if (" \n\r".contains(source.charAt(pos))) {
clearChar(source, pos - 1)
} else {
source.setCharAt(pos, ' ')
pos
}
} else {
0
}
}
private def parseWithError[T](
source: StringBuilder,
parse: String => Either[Parsed.Failure, T],
defaultResult: T
): Either[Throwable, (T, Iterable[Int])] = {
parse(source.toString())
.map(dApp => (dApp, Nil))
.left
.flatMap {
case ex: Parsed.Failure => {
val errorLastPos = ex.index
val lastRemovedCharPos = clearChar(source, errorLastPos - 1)
val posList = Set(errorLastPos, lastRemovedCharPos)
if (lastRemovedCharPos > 0) {
parseWithError(source, parse, defaultResult)
.map(dAppAndErrorIndexes => (dAppAndErrorIndexes._1, posList ++ dAppAndErrorIndexes._2.toList))
} else {
Right((defaultResult, posList))
}
}
case _ => Left(new Exception("Unknown parsing error."))
}
}
}
| wavesplatform/Waves | lang/shared/src/main/scala/com/wavesplatform/lang/v1/parser/Parser.scala | Scala | mit | 29,101 |
package lila.tournament
import org.joda.time.DateTime
import scala.concurrent.duration.FiniteDuration
import lila.db.BSON._
import lila.user.{ User, UserRepo }
final class Winners(
mongoCache: lila.memo.MongoCache.Builder,
ttl: FiniteDuration) {
private implicit val WinnerBSONHandler =
reactivemongo.bson.Macros.handler[Winner]
private val scheduledCache = mongoCache[Int, List[Winner]](
prefix = "tournament:winner",
f = fetchScheduled,
timeToLive = ttl)
import Schedule.Freq
private def fetchScheduled(nb: Int): Fu[List[Winner]] = {
val since = DateTime.now minusMonths 1
List(Freq.Marathon, Freq.Monthly, Freq.Weekly, Freq.Daily).map { freq =>
TournamentRepo.lastFinishedScheduledByFreq(freq, since, 4) flatMap toursToWinners
}.sequenceFu.map(_.flatten)
}
private def toursToWinners(tours: List[Tournament]): Fu[List[Winner]] =
tours.sortBy(_.minutes).map { tour =>
PlayerRepo winner tour.id flatMap {
case Some(player) => UserRepo isEngine player.userId map { engine =>
!engine option Winner(tour.id, tour.name, player.userId)
}
case _ => fuccess(none)
}
}.sequenceFu map (_.flatten take 10)
def scheduled(nb: Int): Fu[List[Winner]] = scheduledCache apply nb
}
| terokinnunen/lila | modules/tournament/src/main/Winners.scala | Scala | mit | 1,284 |
package io.buoyant.namerd
import com.twitter.util.{Await, Closable}
import io.buoyant.admin.{App, Build}
import java.io.File
import scala.io.Source
object Main extends App {
def main(): Unit = {
val build = Build.load("/io/buoyant/namerd/build.properties")
log.info("namerd %s (rev=%s) built at %s", build.version, build.revision, build.name)
args match {
case Array(path) =>
val config = loadNamerd(path)
val namerd = config.mk()
val admin = namerd.admin.serve(this, NamerdAdmin(config, namerd))
log.info(s"serving http admin on ${admin.boundAddress}")
val servers = namerd.interfaces.map { iface =>
val server = iface.serve()
log.info(s"serving ${iface.kind} interface on ${server.boundAddress}")
server
}
val telemeters = namerd.telemeters.map(_.run())
closeOnExit(Closable.sequence(
Closable.all(servers: _*),
admin
))
Await.all(servers: _*)
Await.all(telemeters: _*)
Await.result(admin)
case _ => exitOnError("usage: namerd path/to/config")
}
}
private def loadNamerd(path: String): NamerdConfig = {
val configText = path match {
case "-" =>
Source.fromInputStream(System.in).mkString
case path =>
val f = new File(path)
if (!f.isFile) throw new IllegalArgumentException(s"config is not a file: $path")
Source.fromFile(f).mkString
}
NamerdConfig.loadNamerd(configText)
}
}
| hhtpcd/linkerd | namerd/main/src/main/scala/io/buoyant/namerd/Main.scala | Scala | apache-2.0 | 1,527 |
package org.jetbrains.plugins.scala
package testingSupport
package specs2
/**
* @author Roman.Shein
* @since 16.10.2014.
*/
abstract class SCL7228Test extends Specs2TestCase {
addSourceFile("SCL7228Test.scala",
"""
|import org.specs2.mutable.Specification
|
|class SCL7228Test extends Specification {
| override def is = "foo (bar)" ! (true == true)
|}
""".stripMargin
)
def testScl7228(): Unit =
runTestByLocation(loc("SCL7228Test.scala", 3, 1),
assertConfigAndSettings(_, "SCL7228Test"),
assertResultTreeHasExactNamedPath(_, TestNodePath("[root]", "SCL7228Test", "foo (bar)"))
)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/specs2/SCL7228Test.scala | Scala | apache-2.0 | 654 |
/*
* Copyright 2014 Frédéric Cabestre
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sigusr.mqtt.impl.protocol
import net.sigusr.mqtt.api.APIResponse
import net.sigusr.mqtt.impl.frames.Frame
private[protocol] sealed trait Action
private[protocol] case class Sequence(actions: Seq[Action] = Nil) extends Action
private[protocol] case class SendToClient(message: APIResponse) extends Action
private[protocol] case class SendToNetwork(frame: Frame) extends Action
private[protocol] case object ForciblyCloseTransport extends Action
private[protocol] case class SetKeepAlive(keepAlive: Long) extends Action
private[protocol] case class StartPingRespTimer(timeout: Long) extends Action
private[protocol] case class SetPendingPingResponse(isPending: Boolean) extends Action
private[protocol] case class StoreSentInFlightFrame(id: Int, frame: Frame) extends Action
private[protocol] case class RemoveSentInFlightFrame(id: Int) extends Action
private[protocol] case class StoreRecvInFlightFrameId(id: Int) extends Action
private[protocol] case class RemoveRecvInFlightFrameId(id: Int) extends Action
| fcabestre/Scala-MQTT-client | core/src/main/scala/net/sigusr/mqtt/impl/protocol/Action.scala | Scala | apache-2.0 | 1,627 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactiveTests
import monix.execution.Ack
import monix.execution.Scheduler.Implicits.global
import monix.execution.Ack.Continue
import monix.reactive.Observer
import monix.reactiveTests.SubscriberWhiteBoxAsyncTest.Value
import org.reactivestreams.tck.SubscriberWhiteboxVerification.WhiteboxSubscriberProbe
import org.reactivestreams.tck.SubscriberWhiteboxVerification
import org.reactivestreams.{Subscriber, Subscription}
import org.scalatestplus.testng.TestNGSuiteLike
import scala.concurrent.Future
import scala.util.Random
class SubscriberWhiteBoxAsyncTest extends SubscriberWhiteboxVerification[Value](env())
with TestNGSuiteLike {
def createSubscriber(probe: WhiteboxSubscriberProbe[Value]): Subscriber[Value] = {
val underlying = Observer.toReactiveSubscriber(new Observer[Value] {
def onNext(elem: Value): Future[Ack] = {
probe.registerOnNext(elem)
if (Random.nextInt() % 4 == 0)
Future(Continue)
else
Continue
}
def onError(ex: Throwable): Unit = {
probe.registerOnError(ex)
}
def onComplete(): Unit = {
probe.registerOnComplete()
}
})
new Subscriber[Value] {
def onError(t: Throwable): Unit =
underlying.onError(t)
def onSubscribe(s: Subscription): Unit = {
underlying.onSubscribe(s)
probe.registerOnSubscribe(new SubscriberWhiteboxVerification.SubscriberPuppet {
def triggerRequest(elements: Long): Unit = s.request(elements)
def signalCancel(): Unit = s.cancel()
})
}
def onComplete(): Unit =
underlying.onComplete()
def onNext(t: Value): Unit =
underlying.onNext(t)
}
}
def createElement(element: Int): Value = {
Value(element)
}
}
object SubscriberWhiteBoxAsyncTest {
case class Value(nr: Int)
}
| alexandru/monifu | reactiveTests/src/test/scala/monix/reactiveTests/SubscriberWhiteBoxAsyncTest.scala | Scala | apache-2.0 | 2,535 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jp.gihyo.spark
import org.scalatest.{BeforeAndAfterEach, Suite}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{StreamingContext, Seconds}
import jp.gihyo.spark.ch06.UserDic
private[spark]
trait TestStreamingContext extends BeforeAndAfterEach { self: Suite =>
@transient var ssc: StreamingContext = _
@transient var sc: SparkContext = _
val master = "local[2]"
val appN = "StreamingUnitTest"
val bd = Seconds(1)
override def beforeEach() {
super.beforeEach()
val conf = new SparkConf().setMaster(master)
.setAppName(appN)
.set("spark.streaming.clock", "org.apache.spark.util.ManualClock")
.registerKryoClasses(Array(classOf[UserDic]))
ssc = new StreamingContext(conf, bd)
sc = ssc.sparkContext
}
override def afterEach() {
try {
if (ssc != null) {
// stop with sc
ssc.stop(true)
}
ssc = null;
} finally {
super.afterEach()
}
}
}
| yu-iskw/gihyo-spark-book-example | src/test/scala/jp/gihyo/spark/TestStreamingContext.scala | Scala | apache-2.0 | 1,785 |
/**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.appjet.common.sars;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
object SimpleSHA1 {
private val chars = Map(0 -> '0', 1 -> '1', 2 -> '2', 3 -> '3', 4 -> '4', 5 -> '5', 6 -> '6', 7 -> '7',
8 -> '8', 9 -> '9', 10 -> 'a', 11 -> 'b', 12 -> 'c', 13 -> 'd', 14 -> 'e', 15 -> 'f');
private def convertToHex(data: Array[Byte]): String = {
val buf = new StringBuilder();
for (b <- data) {
buf.append(chars(b >>> 4 & 0x0F));
buf.append(chars(b & 0x0F));
}
buf.toString();
}
def apply(text: String): String = {
val md = MessageDigest.getInstance("SHA-1");
md.update(text.getBytes("UTF-8"), 0, text.length());
convertToHex(md.digest());
}
}
| jds2001/Etherpad-development | infrastructure/net.appjet.common.sars/sha1.scala | Scala | apache-2.0 | 1,386 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package viper.silicon.supporters
import org.slf4s.Logging
import viper.silver.ast
import viper.silver.verifier.PartialVerificationError
import viper.silver.verifier.reasons.InsufficientPermission
import viper.silicon.{Config, Stack}
import viper.silicon.interfaces._
import viper.silicon.interfaces.decider.Decider
import viper.silicon.interfaces.state._
import viper.silicon.state._
import viper.silicon.state.terms._
import viper.silicon.state.terms.perms.IsNoAccess
trait ChunkSupporter[ST <: Store[ST],
H <: Heap[H],
S <: State[ST, H, S],
C <: Context[C]] {
def consume(σ: S,
h: H,
name: String,
args: Seq[Term],
perms: Term,
pve: PartialVerificationError,
c: C,
locacc: ast.LocationAccess,
optNode: Option[ast.Node with ast.Positioned] = None)
(Q: (H, Term, C) => VerificationResult)
: VerificationResult
//def produce(σ: S, h: H, ch: BasicChunk, c: C): (H, C)
def withChunk(σ: S,
h: H,
name: String,
args: Seq[Term],
locacc: ast.LocationAccess,
pve: PartialVerificationError,
c: C)
(Q: (BasicChunk, C) => VerificationResult)
: VerificationResult
def withChunk(σ: S,
h: H,
name: String,
args: Seq[Term],
optPerms: Option[Term],
locacc: ast.LocationAccess,
pve: PartialVerificationError,
c: C)
(Q: (BasicChunk, C) => VerificationResult)
: VerificationResult
def getChunk(σ: S, h: H, name: String, args: Seq[Term], c: C): Option[BasicChunk]
def getChunk(σ: S, chunks: Iterable[Chunk], name: String, args: Seq[Term], c: C): Option[BasicChunk]
}
trait ChunkSupporterProvider[ST <: Store[ST],
H <: Heap[H],
S <: State[ST, H, S]]
{ this: Logging
with Evaluator[ST, H, S, DefaultContext[H]]
with Producer[ST, H, S, DefaultContext[H]]
with Consumer[ST, H, S, DefaultContext[H]]
with Brancher[ST, H, S, DefaultContext[H]]
with MagicWandSupporter[ST, H, S]
with HeuristicsSupporter[ST, H, S] =>
private[this] type C = DefaultContext[H]
protected val decider: Decider[ST, H, S, C]
protected val heapCompressor: HeapCompressor[ST, H, S, C]
protected val stateFactory: StateFactory[ST, H, S]
protected val config: Config
object chunkSupporter extends ChunkSupporter[ST, H, S, C] {
private case class PermissionsConsumptionResult(consumedCompletely: Boolean)
def consume(σ: S,
h: H,
name: String,
args: Seq[Term],
perms: Term,
pve: PartialVerificationError,
c: C,
locacc: ast.LocationAccess,
optNode: Option[ast.Node with ast.Positioned] = None)
(Q: (H, Term, C) => VerificationResult)
: VerificationResult = {
val description = optNode.orElse(Some(locacc)).map(node => s"consume ${node.pos}: $node").get
// val description = optNode match {
// case Some(node) => s"consume ${node.pos}: $node"
// case None => s"consume $id"
// }
heuristicsSupporter.tryOperation[H, Term](description)(σ, h, c)((σ1, h1, c1, QS) => {
consume(σ, h1, name, args, perms, locacc, pve, c1)((h2, optCh, c2) =>
optCh match {
case Some(ch) =>
QS(h2, ch.snap.convert(sorts.Snap), c2)
case None =>
/* Not having consumed anything could mean that we are in an infeasible
* branch, or that the permission amount to consume was zero.
* Returning a fresh snapshot in these cases should not lose any information.
*/
QS(h2, decider.fresh(sorts.Snap), c2)
})
})(Q)
}
private def consume(σ: S,
h: H,
name: String,
args: Seq[Term],
perms: Term,
locacc: ast.LocationAccess,
pve: PartialVerificationError,
c: C)
(Q: (H, Option[BasicChunk], C) => VerificationResult)
: VerificationResult = {
/* [2016-05-27 Malte] Performing this check slows down the verification quite
* a bit (from 4 minutes down to 5 minutes, for the whole test suite). Only
* checking the property on-failure (within decider.withChunk) is likely to
* perform better.
*/
// if (decider.check(σ, perms === NoPerm(), config.checkTimeout())) {
// /* Don't try looking for a chunk (which might fail) if zero permissions are
// * to be consumed.
// */
// Q(h, None, c)
// } else {
if (c.exhaleExt) {
/* TODO: Integrate magic wand's transferring consumption into the regular,
* (non-)exact consumption (the code following this if-branch)
*/
magicWandSupporter.transfer(σ, name, args, perms, locacc, pve, c)((optCh, c1) =>
Q(h, optCh, c1))
} else {
if (terms.utils.consumeExactRead(perms, c.constrainableARPs)) {
withChunk(σ, h, name, args, Some(perms), locacc, pve, c)((ch, c1) => {
if (decider.check(σ, IsNoAccess(PermMinus(ch.perm, perms)), config.checkTimeout())) {
Q(h - ch, Some(ch), c1)}
else
Q(h - ch + (ch - perms), Some(ch), c1)})
} else {
withChunk(σ, h, name, args, None, locacc, pve, c)((ch, c1) => {
decider.assume(PermLess(perms, ch.perm))
Q(h - ch + (ch - perms), Some(ch), c1)})
}
}
// }
}
def produce(σ: S, h: H, ch: BasicChunk, c: C): (H, C) = {
val (h1, matchedChunk) = heapCompressor.merge(σ, h, ch, c)
val c1 = c//recordSnapshot(c, matchedChunk, ch)
(h1, c1)
}
/*
* Looking up basic chunks
*/
def withChunk(σ: S,
h: H,
name: String,
args: Seq[Term],
locacc: ast.LocationAccess,
pve: PartialVerificationError,
c: C)
(Q: (BasicChunk, C) => VerificationResult)
: VerificationResult = {
decider.tryOrFail[BasicChunk](σ \\ h, c)((σ1, c1, QS, QF) =>
getChunk(σ1, σ1.h, name, args, c1) match {
case Some(chunk) =>
QS(chunk, c1)
case None =>
if (decider.checkSmoke())
Success() /* TODO: Mark branch as dead? */
else
QF(Failure(pve dueTo InsufficientPermission(locacc)).withLoad(args))}
)(Q)
}
def withChunk(σ: S,
h: H,
name: String,
args: Seq[Term],
optPerms: Option[Term],
locacc: ast.LocationAccess,
pve: PartialVerificationError,
c: C)
(Q: (BasicChunk, C) => VerificationResult)
: VerificationResult =
decider.tryOrFail[BasicChunk](σ \\ h, c)((σ1, c1, QS, QF) =>
withChunk(σ1, σ1.h, name, args, locacc, pve, c1)((ch, c2) => {
val permCheck = optPerms match {
case Some(p) => PermAtMost(p, ch.perm)
case None => ch.perm !== NoPerm()
}
// if (!isKnownToBeTrue(permCheck)) {
// val writer = bookkeeper.logfiles("withChunk")
// writer.println(permCheck)
// }
decider.assert(σ1, permCheck) {
case true =>
decider.assume(permCheck)
QS(ch, c2)
case false =>
QF(Failure(pve dueTo InsufficientPermission(locacc)).withLoad(args))}})
)(Q)
def getChunk(σ: S, h: H, name: String, args: Seq[Term], c: C): Option[BasicChunk] =
getChunk(σ, h.values, name, args, c)
def getChunk(σ: S, chunks: Iterable[Chunk], name: String, args: Seq[Term], c: C): Option[BasicChunk] = {
val relevantChunks = chunks collect { case ch: BasicChunk if ch.name == name => ch }
findChunk(σ, relevantChunks, args)
}
private final def findChunk(σ: S, chunks: Iterable[BasicChunk], args: Seq[Term]) = (
findChunkLiterally(chunks, args)
orElse findChunkWithProver(σ, chunks, args))
private def findChunkLiterally(chunks: Iterable[BasicChunk], args: Seq[Term]) =
chunks find (ch => ch.args == args)
private def findChunkWithProver(σ: S, chunks: Iterable[BasicChunk], args: Seq[Term]) = {
// fcwpLog.println(id)
val chunk =
chunks find (ch =>
decider.check(σ, And(ch.args zip args map (x => x._1 === x._2): _*), config.checkTimeout()))
chunk
}
}
}
| sccblom/vercors | viper/silicon/src/main/scala/supporters/ChunkSupporter.scala | Scala | mpl-2.0 | 9,586 |
package artisanal.pickle.maker
import models._
import parser._
import org.specs2._
import mutable._
import specification._
import scala.reflect.internal.pickling.ByteCodecs
import scala.tools.scalap.scalax.rules.scalasig._
import com.novus.salat.annotations.util._
import scala.reflect.ScalaSignature
class ShortSpec extends mutable.Specification {
"a ScalaSig for case class MyRecord_Short(b: Short)" should {
"have the correct string" in {
val mySig = new ScalaSig(List("case class"), List("models", "MyRecord_Short"), List(("b", "Short")))
val correctParsedSig = SigParserHelper.parseByteCodeFromAnnotation(classOf[MyRecord_Short]).map(ScalaSigAttributeParsers.parse(_)).get
val myParsedSig = SigParserHelper.parseByteCodeFromMySig(mySig).map(ScalaSigAttributeParsers.parse(_)).get
correctParsedSig.toString === myParsedSig.toString
}
}
}
| julianpeeters/artisanal-pickle-maker | src/test/scala/singleValueMember/ShortSpec.scala | Scala | apache-2.0 | 882 |
package com.acework.js.components.bootstrap
import com.acework.js.components.bootstrap.Utils._
import japgolly.scalajs.react.Addons.ReactCloneWithProps
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scala.scalajs.js._
/**
* Created by weiyin on 10/03/15.
*/
object ListGroup extends BootstrapComponent {
override type P = ListGroup
override type S = Unit
override type B = Unit
override type N = TopNode
override def defaultProps = ListGroup()
case class ListGroup(fill: Boolean = false, onClick: UndefOr[() => Unit] = undefined) {
def apply(children: ReactNode*) = component(this, children)
def apply() = component(this)
}
override val component = ReactComponentB[ListGroup]("ListGroup")
.render { (P, C) =>
def renderListItem(child: ReactNode, index: Int) = {
ReactCloneWithProps(child, getChildKeyAndRef(child, index))
}
<.div(^.className := "list-group")(
ValidComponentChildren.map(C, renderListItem)
)
}.build
}
| lvitaly/scalajs-react-bootstrap | core/src/main/scala/com/acework/js/components/bootstrap/ListGroup.scala | Scala | mit | 1,027 |
package models
import java.sql.Timestamp
//import scala.slick.driver.PostgresDriver.simple._
//import scala.slick.driver.MySQLDriver.simple._
import scala.slick.driver.H2Driver.simple._
import models.DTO.{Comment, Customer, User, Ticket}
/**
* Created by pnagarjuna on 22/05/15.
*/
object Tables {
val usersTable = "users"
class Users(tag: Tag) extends Table[User](tag, usersTable) {
def email = column[String]("email", O.NotNull)
def password = column[String]("password", O.NotNull)
def timestamp = column[Timestamp]("timestamp", O.NotNull)
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def * = (email, password, timestamp, id.?) <>(User.tupled, User.unapply)
}
val customerTable = "customers"
class Customers(tag: Tag) extends Table[Customer](tag, customerTable) {
def email = column[String]("email", O.NotNull)
def timestamp = column[Timestamp]("timestamp", O.NotNull)
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def * = (email, timestamp, id.?) <> (Customer.tupled, Customer.unapply)
}
val ticketTable = "tickets"
class Tickets(tag: Tag) extends Table[Ticket](tag, ticketTable) {
def authorId = column[Long]("authorId", O.NotNull)
def customerId = column[Long]("customerId", O.NotNull)
def assignedToId = column[Long]("assignedId", O.Nullable)
def name = column[String]("name", O.NotNull)
def desc = column[String]("desc", O.NotNull)
def status = column[Int]("status", O.NotNull)
def timestamp = column[Timestamp]("timestamp", O.NotNull)
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def * = (authorId, customerId, assignedToId.?, name, desc, status, timestamp, id.?) <> ((Ticket.apply _).tupled, Ticket.unapply)
def authorIdFK = foreignKey("tickets_author_id_fk", authorId, TableQuery[Users])(_.id, ForeignKeyAction.Cascade)
def customerIdFK = foreignKey("tickets_customer_id_fk", customerId, TableQuery[Customers])(_.id, ForeignKeyAction.Cascade, ForeignKeyAction.Cascade)
def assignedToIdFK = foreignKey("tickets_assigned_to_id_fk", assignedToId, TableQuery[Users])(_.id, ForeignKeyAction.Cascade, ForeignKeyAction.Cascade)
}
val commentTable = "comments"
class Comments(tag: Tag) extends Table[Comment](tag, commentTable) {
def commenterId = column[Long]("commenterId", O.NotNull)
def ticketId = column[Long]("ticketId", O.NotNull)
def comment = column[String]("comment", O.NotNull)
def timestamp = column[Timestamp]("timestamp", O.NotNull)
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def * = (commenterId, ticketId, comment, timestamp, id.?) <> (Comment.tupled, Comment.unapply)
def commenterIdFK = foreignKey("comments_commenter_id_fk", commenterId, TableQuery[Users])(_.id, ForeignKeyAction.Cascade, ForeignKeyAction.Cascade)
def ticketIdFK = foreignKey("comments_ticket_id_fk", ticketId, TableQuery[Tickets])(_.id, ForeignKeyAction.Cascade, ForeignKeyAction.Cascade)
}
val users = TableQuery[Users]
val customers = TableQuery[Customers]
val tickets = TableQuery[Tickets]
val comments = TableQuery[Comments]
} | pamu/ticketing-system | app/models/Tables.scala | Scala | apache-2.0 | 3,151 |
package com.featurefm.metrics
import akka.actor._
import com.codahale.metrics.health.HealthCheckRegistry
import nl.grons.metrics.scala.{CheckedBuilder, MetricName}
import scala.collection.concurrent.TrieMap
class HealthCheckExtension(system: ExtendedActorSystem) extends Extension {
/** The application wide registry. */
private val registry: collection.concurrent.Map[String,HealthCheck] = TrieMap()
val codaRegistry = new com.codahale.metrics.health.HealthCheckRegistry()
lazy val codaBridge = new CheckedBuilder {
override lazy val metricBaseName: MetricName = MetricName(system.name)
override val registry: HealthCheckRegistry = codaRegistry
}
/**
* Get a copy of the registered `HealthCheck` definitions
* @return
*/
def getChecks: Seq[HealthCheck] = registry.values.toList
/**
* Add a health check to the registry
* @param check
*/
def addCheck(check: HealthCheck): Unit = {
registry.putIfAbsent(check.healthCheckName ,check)
import system.dispatcher
codaBridge.healthCheck(check.healthCheckName) {
check.getHealth map { h =>
if (h.state != HealthState.GOOD) throw new RuntimeException(h.details)
}
}
}
def removeCheck(check: HealthCheck) = {
registry.remove(check.healthCheckName, check)
}
}
object Health extends ExtensionId[HealthCheckExtension] with ExtensionIdProvider {
//The lookup method is required by ExtensionIdProvider,
// so we return ourselves here, this allows us
// to configure our extension to be loaded when
// the ActorSystem starts up
override def lookup() = Health
//This method will be called by Akka
// to instantiate our Extension
override def createExtension(system: ExtendedActorSystem) = new HealthCheckExtension(system)
def apply()(implicit system: ActorSystem): HealthCheckExtension =
system.registerExtension(this)
}
| ListnPlay/Kastomer | src/main/scala/com/featurefm/metrics/HealthCheckExtension.scala | Scala | mit | 1,881 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend
import akka.actor.{ ActorSystem, PoisonPill, Props }
import akka.event.LoggingReceive
import akka.persistence.{ Persistence, PersistentActor }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
import scala.concurrent.{ Await, ExecutionContext }
import pprint._, Config.Colors.PPrintConfig
object WrapperTest extends App {
class Persister(val persistenceId: String = "foo") extends PersistentActor {
val done = (_: Any) ⇒ sender() ! akka.actor.Status.Success("done")
override def receiveRecover: Receive = akka.actor.Actor.ignoringBehavior
override def receiveCommand: Receive = LoggingReceive {
case xs: List[_] ⇒
log2(xs, "persisting")
persistAll(xs)(done)
case "ping" ⇒
log2("ping => pong", "ping")
sender() ! "pong"
case msg: String ⇒
log2(msg, "persisting")
persist(msg)(done)
}
}
val configName = "wrapper-application.conf"
lazy val configuration = ConfigFactory.load(configName)
implicit val system: ActorSystem = ActorSystem("wrapper", configuration)
implicit val mat: Materializer = ActorMaterializer()
sys.addShutdownHook(system.terminate())
implicit val ec: ExecutionContext = system.dispatcher
val extension = Persistence(system)
var p = system.actorOf(Props(new Persister()))
val tp = TestProbe()
tp.send(p, (1 to 3).map("a-" + _).toList)
tp.expectMsg(akka.actor.Status.Success("done"))
(1 to 3).map("b-" + _).foreach { msg ⇒
tp.send(p, msg)
tp.expectMsg(akka.actor.Status.Success("done"))
}
tp watch p
tp.send(p, PoisonPill)
tp.expectTerminated(p)
p = system.actorOf(Props(new Persister()))
tp.send(p, "ping")
tp.expectMsg("pong")
Await.ready(system.terminate(), 10.seconds)
}
| dnvriend/demo-akka-persistence-jdbc | src/main/scala/com/github/dnvriend/WrapperTest.scala | Scala | apache-2.0 | 2,476 |
package com.pygmalios.reactiveinflux.jawa
import java.util
import java.util.Date
import com.pygmalios.reactiveinflux._
import com.pygmalios.reactiveinflux.jawa.Conversions._
import com.pygmalios.{reactiveinflux => sc}
import org.joda.time.{DateTime, Instant}
class JavaPoint(val underlyingPoint: sc.Point) extends JavaPointNoTime(underlyingPoint) with Point {
def this(time: PointTime,
measurement: String,
tags: util.Map[String, String],
fields: util.Map[String, Object]) {
this(sc.Point(sc.PointTime.ofEpochSecond(time.getSeconds, time.getNano), measurement, tagsToScala(tags), fieldsToScala(fields)))
}
def this(dateTime: DateTime,
measurement: String,
tags: util.Map[String, String],
fields: util.Map[String, Object]) {
this(sc.Point(sc.PointTime(dateTime), measurement, tagsToScala(tags), fieldsToScala(fields)))
}
def this(instant: Instant,
measurement: String,
tags: util.Map[String, String],
fields: util.Map[String, Object]) {
this(sc.Point(sc.PointTime(instant), measurement, tagsToScala(tags), fieldsToScala(fields)))
}
def this(date: Date,
measurement: String,
tags: util.Map[String, String],
fields: util.Map[String, Object]) {
this(sc.Point(sc.PointTime(date), measurement, tagsToScala(tags), fieldsToScala(fields)))
}
override lazy val getTime: PointTime = new JavaPointTime(underlyingPoint.time)
}
| pygmalios/reactiveinflux | src/main/scala/com/pygmalios/reactiveinflux/jawa/JavaPoint.scala | Scala | apache-2.0 | 1,487 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.partial
import java.util.{HashMap => JHashMap}
import scala.collection.JavaConversions.mapAsScalaMap
import scala.collection.Map
import scala.collection.mutable.HashMap
import scala.reflect.ClassTag
import cern.jet.stat.Probability
import org.apache.spark.util.collection.OpenHashMap
/**
* An ApproximateEvaluator for counts by key. Returns a map of key to confidence interval.
*/
private[spark] class GroupedCountEvaluator[T : ClassTag](totalOutputs: Int, confidence: Double)
extends ApproximateEvaluator[OpenHashMap[T,Long], Map[T, BoundedDouble]] {
var outputsMerged = 0
var sums = new OpenHashMap[T,Long]() // Sum of counts for each key
override def merge(outputId: Int, taskResult: OpenHashMap[T,Long]) {
outputsMerged += 1
taskResult.foreach { case (key, value) =>
sums.changeValue(key, value, _ + value)
}
}
override def currentResult(): Map[T, BoundedDouble] = {
if (outputsMerged == totalOutputs) {
val result = new JHashMap[T, BoundedDouble](sums.size)
sums.foreach { case (key, sum) =>
result(key) = new BoundedDouble(sum, 1.0, sum, sum)
}
result
} else if (outputsMerged == 0) {
new HashMap[T, BoundedDouble]
} else {
val p = outputsMerged.toDouble / totalOutputs
val confFactor = Probability.normalInverse(1 - (1 - confidence) / 2)
val result = new JHashMap[T, BoundedDouble](sums.size)
sums.foreach { case (key, sum) =>
val mean = (sum + 1 - p) / p
val variance = (sum + 1) * (1 - p) / (p * p)
val stdev = math.sqrt(variance)
val low = mean - confFactor * stdev
val high = mean + confFactor * stdev
result(key) = new BoundedDouble(mean, confidence, low, high)
}
result
}
}
}
| yelshater/hadoop-2.3.0 | spark-core_2.10-1.0.0-cdh5.1.0/src/main/scala/org/apache/spark/partial/GroupedCountEvaluator.scala | Scala | apache-2.0 | 2,598 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.util.UUID
import java.util.concurrent.atomic.AtomicLong
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.{CarbonDatasourceHadoopRelation, RuntimeConfig, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.cache.dictionary.ManageDictionaryAndBTree
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.fileoperations.FileWriteOperation
import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonMetadata, CarbonTableIdentifier}
import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl
import org.apache.carbondata.core.metadata.schema
import org.apache.carbondata.core.metadata.schema.table
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
import org.apache.carbondata.core.writer.ThriftWriter
import org.apache.carbondata.format.{SchemaEvolutionEntry, TableInfo}
import org.apache.carbondata.processing.merger.TableMeta
import org.apache.carbondata.spark.util.CarbonSparkUtil
case class MetaData(var tablesMeta: ArrayBuffer[TableMeta]) {
// clear the metadata
def clear(): Unit = {
tablesMeta.clear()
}
}
case class CarbonMetaData(dims: Seq[String],
msrs: Seq[String],
carbonTable: CarbonTable,
dictionaryMap: DictionaryMap)
case class DictionaryMap(dictionaryMap: Map[String, Boolean]) {
def get(name: String): Option[Boolean] = {
dictionaryMap.get(name.toLowerCase)
}
}
class CarbonFileMetastore(conf: RuntimeConfig) extends CarbonMetaStore {
@transient
val LOGGER = LogServiceFactory.getLogService("org.apache.spark.sql.CarbonMetastoreCatalog")
val tableModifiedTimeStore = new java.util.HashMap[String, Long]()
tableModifiedTimeStore
.put(CarbonCommonConstants.DATABASE_DEFAULT_NAME, System.currentTimeMillis())
private val nextId = new AtomicLong(0)
def nextQueryId: String = {
System.nanoTime() + ""
}
val metadata = MetaData(new ArrayBuffer[TableMeta]())
/**
* Create spark session from paramters.
*
* @param parameters
* @param absIdentifier
* @param sparkSession
*/
override def createCarbonRelation(parameters: Map[String, String],
absIdentifier: AbsoluteTableIdentifier,
sparkSession: SparkSession): CarbonRelation = {
val database = absIdentifier.getCarbonTableIdentifier.getDatabaseName
val tableName = absIdentifier.getCarbonTableIdentifier.getTableName
val tables = getTableFromMetadataCache(database, tableName)
tables match {
case Some(t) =>
CarbonRelation(database, tableName,
CarbonSparkUtil.createSparkMeta(t.carbonTable), t)
case None =>
readCarbonSchema(absIdentifier) match {
case Some(meta) =>
CarbonRelation(database, tableName,
CarbonSparkUtil.createSparkMeta(meta.carbonTable), meta)
case None =>
throw new NoSuchTableException(database, tableName)
}
}
}
def lookupRelation(dbName: Option[String], tableName: String)
(sparkSession: SparkSession): LogicalPlan = {
lookupRelation(TableIdentifier(tableName, dbName))(sparkSession)
}
override def lookupRelation(tableIdentifier: TableIdentifier)
(sparkSession: SparkSession): LogicalPlan = {
val database = tableIdentifier.database.getOrElse(
sparkSession.catalog.currentDatabase)
val relation = sparkSession.sessionState.catalog.lookupRelation(tableIdentifier) match {
case SubqueryAlias(_,
LogicalRelation(carbonDatasourceHadoopRelation: CarbonDatasourceHadoopRelation, _, _),
_) =>
carbonDatasourceHadoopRelation.carbonRelation
case LogicalRelation(
carbonDatasourceHadoopRelation: CarbonDatasourceHadoopRelation, _, _) =>
carbonDatasourceHadoopRelation.carbonRelation
case _ => throw new NoSuchTableException(database, tableIdentifier.table)
}
relation
}
/**
* This method will search for a table in the catalog metadata
*
* @param database
* @param tableName
* @return
*/
def getTableFromMetadataCache(database: String, tableName: String): Option[TableMeta] = {
metadata.tablesMeta
.find(c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(database) &&
c.carbonTableIdentifier.getTableName.equalsIgnoreCase(tableName))
}
def tableExists(
table: String,
databaseOp: Option[String] = None)(sparkSession: SparkSession): Boolean = {
tableExists(TableIdentifier(table, databaseOp))(sparkSession)
}
override def tableExists(tableIdentifier: TableIdentifier)
(sparkSession: SparkSession): Boolean = {
try {
lookupRelation(tableIdentifier)(sparkSession)
} catch {
case e: Exception =>
return false
}
true
}
private def readCarbonSchema(identifier: AbsoluteTableIdentifier): Option[TableMeta] = {
val dbName = identifier.getCarbonTableIdentifier.getDatabaseName
val tableName = identifier.getCarbonTableIdentifier.getTableName
val storePath = identifier.getStorePath
val carbonTableIdentifier = new CarbonTableIdentifier(dbName.toLowerCase(),
tableName.toLowerCase(), UUID.randomUUID().toString)
val carbonTablePath =
CarbonStorePath.getCarbonTablePath(storePath, carbonTableIdentifier)
val tableMetadataFile = carbonTablePath.getSchemaFilePath
val fileType = FileFactory.getFileType(tableMetadataFile)
if (FileFactory.isFileExist(tableMetadataFile, fileType)) {
val tableUniqueName = dbName + "_" + tableName
val tableInfo: TableInfo = CarbonUtil.readSchemaFile(tableMetadataFile)
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(tableInfo, dbName, tableName, storePath)
val schemaFilePath = CarbonStorePath
.getCarbonTablePath(storePath, carbonTableIdentifier).getSchemaFilePath
wrapperTableInfo.setStorePath(storePath)
wrapperTableInfo
.setMetaDataFilepath(CarbonTablePath.getFolderContainingFile(schemaFilePath))
CarbonMetadata.getInstance().loadTableMetadata(wrapperTableInfo)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable(tableUniqueName)
val tableMeta = new TableMeta(carbonTable.getCarbonTableIdentifier,
identifier.getStorePath,
identifier.getTablePath,
carbonTable)
metadata.tablesMeta += tableMeta
Some(tableMeta)
} else {
None
}
}
/**
* This method will overwrite the existing schema and update it with the given details
*
* @param newTableIdentifier
* @param thriftTableInfo
* @param schemaEvolutionEntry
* @param tablePath
* @param sparkSession
*/
def updateTableSchema(newTableIdentifier: CarbonTableIdentifier,
oldTableIdentifier: CarbonTableIdentifier,
thriftTableInfo: org.apache.carbondata.format.TableInfo,
schemaEvolutionEntry: SchemaEvolutionEntry,
tablePath: String) (sparkSession: SparkSession): String = {
val absoluteTableIdentifier = AbsoluteTableIdentifier.fromTablePath(tablePath)
val schemaConverter = new ThriftWrapperSchemaConverterImpl
if (schemaEvolutionEntry != null) {
thriftTableInfo.fact_table.schema_evolution.schema_evolution_history.add(schemaEvolutionEntry)
}
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(thriftTableInfo,
newTableIdentifier.getDatabaseName,
newTableIdentifier.getTableName,
absoluteTableIdentifier.getStorePath)
val identifier =
new CarbonTableIdentifier(newTableIdentifier.getDatabaseName,
newTableIdentifier.getTableName,
wrapperTableInfo.getFactTable.getTableId)
val path = createSchemaThriftFile(wrapperTableInfo,
thriftTableInfo,
identifier)
addTableCache(wrapperTableInfo,
AbsoluteTableIdentifier.from(absoluteTableIdentifier.getStorePath,
newTableIdentifier.getDatabaseName,
newTableIdentifier.getTableName))
path
}
/**
* This method will is used to remove the evolution entry in case of failure.
*
* @param carbonTableIdentifier
* @param thriftTableInfo
* @param tablePath
* @param sparkSession
*/
def revertTableSchema(carbonTableIdentifier: CarbonTableIdentifier,
thriftTableInfo: org.apache.carbondata.format.TableInfo,
tablePath: String)(sparkSession: SparkSession): String = {
val tableIdentifier = AbsoluteTableIdentifier.fromTablePath(tablePath)
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(thriftTableInfo,
carbonTableIdentifier.getDatabaseName,
carbonTableIdentifier.getTableName,
tableIdentifier.getStorePath)
val evolutionEntries = thriftTableInfo.fact_table.schema_evolution.schema_evolution_history
evolutionEntries.remove(evolutionEntries.size() - 1)
wrapperTableInfo.setStorePath(tableIdentifier.getStorePath)
val path = createSchemaThriftFile(wrapperTableInfo,
thriftTableInfo,
tableIdentifier.getCarbonTableIdentifier)
addTableCache(wrapperTableInfo, tableIdentifier)
path
}
/**
*
* Prepare Thrift Schema from wrapper TableInfo and write to Schema file.
* Load CarbonTable from wrapper tableInfo
*
*/
def saveToDisk(tableInfo: schema.table.TableInfo, tablePath: String) {
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val dbName = tableInfo.getDatabaseName
val tableName = tableInfo.getFactTable.getTableName
val thriftTableInfo = schemaConverter
.fromWrapperToExternalTableInfo(tableInfo, dbName, tableName)
val identifier = AbsoluteTableIdentifier.fromTablePath(tablePath)
tableInfo.setStorePath(identifier.getStorePath)
createSchemaThriftFile(tableInfo,
thriftTableInfo,
identifier.getCarbonTableIdentifier)
LOGGER.info(s"Table $tableName for Database $dbName created successfully.")
}
/**
* Generates schema string from TableInfo
*/
override def generateTableSchemaString(tableInfo: schema.table.TableInfo,
tablePath: String): String = {
val tableIdentifier = AbsoluteTableIdentifier.fromTablePath(tablePath)
val carbonTablePath = CarbonStorePath.getCarbonTablePath(tableIdentifier)
val schemaMetadataPath =
CarbonTablePath.getFolderContainingFile(carbonTablePath.getSchemaFilePath)
tableInfo.setMetaDataFilepath(schemaMetadataPath)
tableInfo.setStorePath(tableIdentifier.getStorePath)
val schemaEvolutionEntry = new schema.SchemaEvolutionEntry
schemaEvolutionEntry.setTimeStamp(tableInfo.getLastUpdatedTime)
tableInfo.getFactTable.getSchemaEvalution.getSchemaEvolutionEntryList.add(schemaEvolutionEntry)
removeTableFromMetadata(tableInfo.getDatabaseName, tableInfo.getFactTable.getTableName)
CarbonMetadata.getInstance().loadTableMetadata(tableInfo)
addTableCache(tableInfo, tableIdentifier)
CarbonUtil.convertToMultiGsonStrings(tableInfo, " ", "", ",")
}
/**
* This method will write the schema thrift file in carbon store and load table metadata
*
* @param tableInfo
* @param thriftTableInfo
* @return
*/
private def createSchemaThriftFile(tableInfo: schema.table.TableInfo,
thriftTableInfo: TableInfo,
carbonTableIdentifier: CarbonTableIdentifier): String = {
val carbonTablePath = CarbonStorePath.
getCarbonTablePath(tableInfo.getStorePath, carbonTableIdentifier)
val schemaFilePath = carbonTablePath.getSchemaFilePath
val schemaMetadataPath = CarbonTablePath.getFolderContainingFile(schemaFilePath)
tableInfo.setMetaDataFilepath(schemaMetadataPath)
val fileType = FileFactory.getFileType(schemaMetadataPath)
if (!FileFactory.isFileExist(schemaMetadataPath, fileType)) {
FileFactory.mkdirs(schemaMetadataPath, fileType)
}
val thriftWriter = new ThriftWriter(schemaFilePath, false)
thriftWriter.open(FileWriteOperation.OVERWRITE)
thriftWriter.write(thriftTableInfo)
thriftWriter.close()
updateSchemasUpdatedTime(touchSchemaFileSystemTime(tableInfo.getStorePath))
carbonTablePath.getPath
}
protected def addTableCache(tableInfo: table.TableInfo,
absoluteTableIdentifier: AbsoluteTableIdentifier) = {
val identifier = absoluteTableIdentifier.getCarbonTableIdentifier
CarbonMetadata.getInstance.removeTable(tableInfo.getTableUniqueName)
removeTableFromMetadata(identifier.getDatabaseName, identifier.getTableName)
CarbonMetadata.getInstance().loadTableMetadata(tableInfo)
val tableMeta = new TableMeta(identifier, absoluteTableIdentifier.getStorePath,
absoluteTableIdentifier.getTablePath,
CarbonMetadata.getInstance().getCarbonTable(identifier.getTableUniqueName))
metadata.tablesMeta += tableMeta
}
/**
* This method will remove the table meta from catalog metadata array
*
* @param dbName
* @param tableName
*/
def removeTableFromMetadata(dbName: String, tableName: String): Unit = {
val metadataToBeRemoved: Option[TableMeta] = getTableFromMetadataCache(dbName, tableName)
metadataToBeRemoved match {
case Some(tableMeta) =>
metadata.tablesMeta -= tableMeta
CarbonMetadata.getInstance.removeTable(dbName + "_" + tableName)
case None =>
if (LOGGER.isDebugEnabled) {
LOGGER.debug(s"No entry for table $tableName in database $dbName")
}
}
}
private def updateMetadataByWrapperTable(
wrapperTableInfo: org.apache.carbondata.core.metadata.schema.table.TableInfo): Unit = {
CarbonMetadata.getInstance().loadTableMetadata(wrapperTableInfo)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable(
wrapperTableInfo.getTableUniqueName)
for (i <- metadata.tablesMeta.indices) {
if (wrapperTableInfo.getTableUniqueName.equals(
metadata.tablesMeta(i).carbonTableIdentifier.getTableUniqueName)) {
metadata.tablesMeta(i).carbonTable = carbonTable
}
}
}
def updateMetadataByThriftTable(schemaFilePath: String,
tableInfo: TableInfo, dbName: String, tableName: String, storePath: String): Unit = {
tableInfo.getFact_table.getSchema_evolution.getSchema_evolution_history.get(0)
.setTime_stamp(System.currentTimeMillis())
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val wrapperTableInfo = schemaConverter
.fromExternalToWrapperTableInfo(tableInfo, dbName, tableName, storePath)
wrapperTableInfo
.setMetaDataFilepath(CarbonTablePath.getFolderContainingFile(schemaFilePath))
wrapperTableInfo.setStorePath(storePath)
updateMetadataByWrapperTable(wrapperTableInfo)
}
def isTablePathExists(tableIdentifier: TableIdentifier)(sparkSession: SparkSession): Boolean = {
try {
val tablePath = lookupRelation(tableIdentifier)(sparkSession).
asInstanceOf[CarbonRelation].tableMeta.tablePath
val fileType = FileFactory.getFileType(tablePath)
FileFactory.isFileExist(tablePath, fileType)
} catch {
case e: Exception =>
false
}
}
def dropTable(tablePath: String, tableIdentifier: TableIdentifier)
(sparkSession: SparkSession) {
val dbName = tableIdentifier.database.get
val tableName = tableIdentifier.table
val identifier = AbsoluteTableIdentifier.fromTablePath(tablePath)
val metadataFilePath = CarbonStorePath.getCarbonTablePath(identifier).getMetadataDirectoryPath
val carbonTable = CarbonMetadata.getInstance.getCarbonTable(dbName + "_" + tableName)
if (null != carbonTable) {
// clear driver B-tree and dictionary cache
ManageDictionaryAndBTree.clearBTreeAndDictionaryLRUCache(carbonTable)
}
val fileType = FileFactory.getFileType(metadataFilePath)
if (FileFactory.isFileExist(metadataFilePath, fileType)) {
// while drop we should refresh the schema modified time so that if any thing has changed
// in the other beeline need to update.
checkSchemasModifiedTimeAndReloadTables(identifier.getStorePath)
removeTableFromMetadata(dbName, tableName)
updateSchemasUpdatedTime(touchSchemaFileSystemTime(identifier.getStorePath))
CarbonHiveMetadataUtil.invalidateAndDropTable(dbName, tableName, sparkSession)
// discard cached table info in cachedDataSourceTables
sparkSession.sessionState.catalog.refreshTable(tableIdentifier)
}
}
private def getTimestampFileAndType(basePath: String) = {
val timestampFile = basePath + "/" + CarbonCommonConstants.SCHEMAS_MODIFIED_TIME_FILE
val timestampFileType = FileFactory.getFileType(timestampFile)
(timestampFile, timestampFileType)
}
/**
* This method will put the updated timestamp of schema file in the table modified time store map
*
* @param timeStamp
*/
private def updateSchemasUpdatedTime(timeStamp: Long) {
tableModifiedTimeStore.put("default", timeStamp)
}
def updateAndTouchSchemasUpdatedTime(basePath: String) {
updateSchemasUpdatedTime(touchSchemaFileSystemTime(basePath))
}
/**
* This method will check and create an empty schema timestamp file
*
* @return
*/
private def touchSchemaFileSystemTime(basePath: String): Long = {
val (timestampFile, timestampFileType) = getTimestampFileAndType(basePath)
if (!FileFactory.isFileExist(timestampFile, timestampFileType)) {
LOGGER.audit(s"Creating timestamp file for $basePath")
FileFactory.createNewFile(timestampFile, timestampFileType)
}
val systemTime = System.currentTimeMillis()
FileFactory.getCarbonFile(timestampFile, timestampFileType)
.setLastModifiedTime(systemTime)
systemTime
}
def checkSchemasModifiedTimeAndReloadTables(storePath: String) {
val (timestampFile, timestampFileType) =
getTimestampFileAndType(storePath)
if (FileFactory.isFileExist(timestampFile, timestampFileType)) {
if (!(FileFactory.getCarbonFile(timestampFile, timestampFileType).
getLastModifiedTime ==
tableModifiedTimeStore.get(CarbonCommonConstants.DATABASE_DEFAULT_NAME))) {
refreshCache()
}
}
}
private def refreshCache() {
metadata.tablesMeta.clear()
}
override def isReadFromHiveMetaStore: Boolean = false
override def listAllTables(sparkSession: SparkSession): Seq[CarbonTable] =
metadata.tablesMeta.map(_.carbonTable)
override def getThriftTableInfo(tablePath: CarbonTablePath)
(sparkSession: SparkSession): TableInfo = {
val tableMetadataFile = tablePath.getSchemaFilePath
CarbonUtil.readSchemaFile(tableMetadataFile)
}
}
| shivangi1015/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala | Scala | apache-2.0 | 19,992 |
import collection.mutable._
class TestSet(s0: Set[Int], s1: Set[Int]) {
val Iterations = 10
val Range = 100000
val testEachStep = false
val Threshold = 20000
val r = new java.util.Random(12345)
def test(s: Set[Int], n: Int): Any = {
val v = n >> 3
n & 7 match {
case 0 | 1 | 2 => s contains v
case 3 => s += v
case 4 => s -= v
case 5 => if (s.size > Threshold) s -= v else s += v
case 6 => s += v
case 7 => s.size
}
}
def explain(n: Int, s: Set[Int]): String = n & 7 match {
case 0 | 1 | 2 => "contains"
case 3 => "add"
case 4 => "remove"
case 5 => if (s.size > Threshold) "remove" else "add"
case 6 => "add"
case 7 => "size"
}
def checkSubSet(pre: String, s0: Set[Int], s1: Set[Int]): Unit = {
for (e <- s0.iterator)
if (!(s1 contains e)) {
assert(false, pre+" element: "+e+"\n S0 = "+s0+"\n S1 = "+s1)
}
}
for (i <- 0 until Iterations) {
val n = r.nextInt(Range)
val res0 = test(s0, n)
val res1 = test(s1, n)
//Console.println("operation = "+explain(n, s0)+", value ="+(n >> 3)+", result0 = "+res0)
if (testEachStep) {
checkSubSet("superfluous", s0, s1)
checkSubSet("missing", s1, s0)
}
if (res0 != res1)
assert(false, "DIFFERENCE , operation = "+explain(n, s0)+", value ="+(n >> 3)+
", result0 = "+res0+", result1 = "+res1)
}
Console.println("succeeded for "+Iterations+" iterations.")
}
object Test extends App {
def t3954: Unit = {
import scala.collection.mutable
val result2 = new mutable.HashSet[Int]
println(result2.add(1))
println(result2.add(1))
val result3 = new java.util.HashSet[Int]()
println(result3.add(1))
println(result3.add(1))
}
t3954
new TestSet(HashSet.empty, new LinkedHashSet)
}
| som-snytt/dotty | tests/run/colltest.scala | Scala | apache-2.0 | 1,903 |
package org.jetbrains.plugins.scala.lang.types.existentialSimplification
package generated
class ExistentialSimplificationSecondRuleTest extends ExistentialSimplificationTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "secondRule/"
def testFewNames() = doTest()
def testFewNamesHarder() = doTest()
def testUnusedWildcard() = doTest()
def testUnusedWildcardJoinedFirstRule() = doTest()
} | ilinum/intellij-scala | test/org/jetbrains/plugins/scala/lang/types/existentialSimplification/generated/ExistentialSimplificationSecondRuleTest.scala | Scala | apache-2.0 | 488 |
/*
* scala-bcp
* Copyright 2014 深圳岂凡网络有限公司 (Shenzhen QiFun Network Corp., LTD)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.qifun.bcp
import java.io.EOFException
import java.io.IOException
import java.nio.ByteBuffer
import scala.annotation.tailrec
import com.qifun.bcp.Bcp._
import com.qifun.statelessFuture.Future
import com.qifun.statelessFuture.util.io.SocketInputStream
import com.qifun.statelessFuture.util.io.SocketWritingQueue
import java.io.EOFException
import java.io.IOException
import scala.collection.mutable.ArrayBuffer
private[bcp] object BcpIo {
private final def receiveUnsignedVarint(stream: SocketInputStream): Future[Int] = {
def receiveRestBytes(result: Int, i: Int): Future[Int] = Future[Int] {
(stream.available_=(1)).await
stream.read() match {
case -1 => throw new EOFException
case b => {
if (i < 32) {
if (b >= 0x80) {
receiveRestBytes(
result | ((b & 0x7f) << i),
i + 7).await
} else {
result | (b << i)
}
} else {
throw new BcpException.VarintTooBig
}
}
}
}
receiveRestBytes(0, 0)
}
@tailrec
private final def writeUnsignedVarint(buffer: ByteBuffer, value: Int) {
if ((value & 0xFFFFFF80) == 0) {
buffer.put(value.toByte)
return ;
} else {
buffer.put(((value & 0x7F) | 0x80).toByte)
writeUnsignedVarint(buffer, value >>> 7)
}
}
final def enqueue(queue: SocketWritingQueue, pack: Acknowledge.type) {
queue.enqueue(ByteBuffer.wrap(Array[Byte](Acknowledge.HeadByte)))
}
final def enqueue(queue: SocketWritingQueue, pack: Finish.type) {
queue.enqueue(ByteBuffer.wrap(Array[Byte](Finish.HeadByte)))
}
final def enqueue(queue: SocketWritingQueue, pack: RetransmissionFinish) {
val headBuffer = ByteBuffer.allocate(20)
headBuffer.put(RetransmissionFinish.HeadByte)
writeUnsignedVarint(headBuffer, pack.connectionId)
writeUnsignedVarint(headBuffer, pack.packId)
headBuffer.flip()
queue.enqueue(headBuffer)
}
final def enqueue(queue: SocketWritingQueue, pack: RetransmissionData) {
val headBuffer = ByteBuffer.allocate(20)
headBuffer.put(RetransmissionData.HeadByte)
writeUnsignedVarint(headBuffer, pack.connectionId)
writeUnsignedVarint(headBuffer, pack.packId)
writeUnsignedVarint(headBuffer, pack.buffers.view.map(_.remaining).sum)
headBuffer.flip()
val newBuffer = for (buffer <- pack.buffers) yield buffer.duplicate()
queue.enqueue((headBuffer +: newBuffer.view): _*)
}
final def enqueue(queue: SocketWritingQueue, pack: Data) {
val headBuffer = ByteBuffer.allocate(20)
headBuffer.put(Data.HeadByte)
writeUnsignedVarint(headBuffer, pack.buffers.view.map(_.remaining).sum)
headBuffer.flip()
val newBuffer = for (buffer <- pack.buffers) yield buffer.duplicate()
queue.enqueue((headBuffer +: newBuffer.view): _*)
}
final def enqueue(queue: SocketWritingQueue, pack: ShutDown.type) {
queue.enqueue(ByteBuffer.wrap(Array[Byte](ShutDown.HeadByte)))
}
final def enqueue(queue: SocketWritingQueue, pack: HeartBeat.type) {
queue.enqueue(ByteBuffer.wrap(Array[Byte](HeartBeat.HeadByte)))
}
final def enqueue(queue: SocketWritingQueue, pack: Packet) {
pack match {
case pack @ Acknowledge => {
enqueue(queue, pack)
}
case pack: Data => {
enqueue(queue, pack)
}
case pack @ Finish => {
enqueue(queue, pack)
}
case pack: RetransmissionData => {
enqueue(queue, pack)
}
case pack: RetransmissionFinish => {
enqueue(queue, pack)
}
case pack @ ShutDown => {
enqueue(queue, pack)
}
case pack @ HeartBeat => {
enqueue(queue, pack)
}
}
}
@throws(classOf[IOException])
final def receive(stream: SocketInputStream) = Future[ClientToServer] {
stream.available_=(1).await
stream.read() match {
case Data.HeadByte => {
val length = receiveUnsignedVarint(stream).await
if (length > MaxDataSize) {
throw new BcpException.DataTooBig
}
stream.available_=(length).await
val buffer = new ArrayBuffer[ByteBuffer]
stream.move(buffer, length)
Data(buffer)
}
case RetransmissionData.HeadByte => {
val connectionId = receiveUnsignedVarint(stream).await
val packId = receiveUnsignedVarint(stream).await
val length = receiveUnsignedVarint(stream).await
if (length > MaxDataSize) {
throw new BcpException.DataTooBig
}
stream.available_=(length).await
val buffer = new ArrayBuffer[ByteBuffer]
stream.move(buffer, length)
RetransmissionData(connectionId, packId, buffer)
}
case RetransmissionFinish.HeadByte => {
val connectionId = receiveUnsignedVarint(stream).await
val packId = receiveUnsignedVarint(stream).await
RetransmissionFinish(connectionId, packId)
}
case Acknowledge.HeadByte => {
Acknowledge
}
case Finish.HeadByte => {
Finish
}
case ShutDown.HeadByte => {
ShutDown
}
case HeartBeat.HeadByte => {
HeartBeat
}
case _ => throw new BcpException.UnknownHeadByte
}
}
private def boolToInt(bool: Boolean) = if (bool) 1 else 0
private def intToBool(int: Int) = if (int == 0) false else true
final def receiveHead(stream: SocketInputStream) = Future {
val sessionId = receiveSessionId(stream).await
val isRenew = receiveUnsignedVarint(stream).await
val connectionId = receiveUnsignedVarint(stream).await
ConnectionHead(sessionId, intToBool(isRenew), connectionId)
}
private def receiveSessionId(stream: SocketInputStream) = Future[Array[Byte]] {
stream.available_=(NumBytesSessionId).await
var sessionId = Array.ofDim[Byte](NumBytesSessionId)
stream.read(sessionId)
sessionId
}
final def enqueueHead(stream: SocketWritingQueue, head: ConnectionHead) = {
val ConnectionHead(sessionId, isRenew, connectionId) = head
val headBuffer = ByteBuffer.allocate(NumBytesSessionId + 1 + 5)
headBuffer.put(sessionId)
writeUnsignedVarint(headBuffer, boolToInt(isRenew))
writeUnsignedVarint(headBuffer, connectionId)
headBuffer.flip()
stream.enqueue(headBuffer)
}
} | qifun/scala-bcp | src/main/scala/com/qifun/bcp/BcpIo.scala | Scala | apache-2.0 | 7,014 |
package epic.framework
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import epic.inference.{ExpectationPropagation, Factor}
import collection.mutable.ArrayBuffer
import epic.util.SafeLogging
import java.util.concurrent.atomic.AtomicLong
import epic.parser.ParseMarginal
class EPInference[Datum, Augment <: AnyRef](val inferences: IndexedSeq[ProjectableInference[Datum, Augment]],
val maxEPIter: Int,
val epInGold: Boolean = false)(implicit aIsFactor: Augment <:< Factor[Augment]) extends ProjectableInference[Datum, Augment] with SafeLogging with Serializable {
type Marginal = EPMarginal[Augment, ProjectableInference[Datum, Augment]#Marginal]
type ExpectedCounts = EPExpectedCounts
type Scorer = EPScorer[ProjectableInference[Datum, Augment]#Scorer]
def baseAugment(v: Datum) = inferences.filter(_ ne null).head.baseAugment(v)
def project(v: Datum, s: Scorer, m: Marginal, oldAugment: Augment): Augment = m.q
def scorer(v: Datum): Scorer = EPScorer(inferences.map(_.scorer(v)))
override def forTesting = new EPInference(inferences.map(_.forTesting), maxEPIter, epInGold)
// ugh code duplication...
def goldMarginal(scorer: Scorer, datum: Datum, augment: Augment): Marginal = {
if (!epInGold) {
val marginals = inferences.indices.map { i =>
val inf = inferences(i)
if (inf eq null)
null.asInstanceOf[ProjectableInference[Datum, Augment]#Marginal]
else
inf.goldMarginal(scorer.scorers(i).asInstanceOf[inf.Scorer], datum)
}
val ((inf, m), iScorer) = (inferences zip marginals zip scorer.scorers).filter(_._1._2 != null).head
EPMarginal(marginals.filter(_ ne null).map(_.logPartition).sum, inf.project(datum, iScorer.asInstanceOf[inf.Scorer], m.asInstanceOf[inf.Marginal], augment), marginals)
} else {
EPInference.doInference(datum, augment, inferences, scorer, (inf:ProjectableInference[Datum, Augment], scorer: ProjectableInference[Datum, Augment]#Scorer, q: Augment) => inf.goldMarginal(scorer.asInstanceOf[inf.Scorer], datum, q), maxEPIter)
}
}
def marginal(scorer: Scorer, datum: Datum, augment: Augment): Marginal = {
EPInference.doInference(datum, augment, inferences, scorer, (inf:ProjectableInference[Datum, Augment], scorer: ProjectableInference[Datum, Augment]#Scorer, q: Augment) => inf.marginal(scorer.asInstanceOf[inf.Scorer], datum, q), maxEPIter)
}
}
case class EPMarginal[Augment, Marginal](logPartition: Double, q: Augment, marginals: IndexedSeq[Marginal]) extends epic.framework.Marginal
object EPInference extends SafeLogging {
val iters, calls = new AtomicLong(0)
def doInference[Datum, Augment <: AnyRef,
Marginal <: ProjectableInference[Datum, Augment]#Marginal,
Scorer](datum: Datum,
augment: Augment, inferences: IndexedSeq[ProjectableInference[Datum, Augment]],
scorer: EPScorer[Scorer],
infType: (ProjectableInference[Datum, Augment],Scorer, Augment)=>Marginal,
maxEPIter: Int = 5,
convergenceThreshold: Double = 1E-4)
(implicit aIsFactor: Augment <:< Factor[Augment]):EPMarginal[Augment, Marginal] = {
var iter = 0
val marginals = ArrayBuffer.fill(inferences.length)(null.asInstanceOf[Marginal])
def project(q: Augment, i: Int) = {
val inf = inferences(i)
marginals(i) = null.asInstanceOf[Marginal]
val iScorer = scorer.scorers(i)
var marg = infType(inf, iScorer, q)
var contributionToLikelihood = marg.logPartition
if (contributionToLikelihood.isInfinite || contributionToLikelihood.isNaN) {
logger.error(s"Model $i is misbehaving ($contributionToLikelihood) on iter $iter! Datum: $datum" )
throw new RuntimeException("EP is being sad!")
/*
marg = inf.marginal(datum)
contributionToLikelihood = marg.logPartition
if (contributionToLikelihood.isInfinite || contributionToLikelihood.isNaN) {
throw new RuntimeException(s"Model $i is misbehaving ($contributionToLikelihood) on iter $iter! Datum: " + datum )
}
*/
}
val newAugment = inf.project(datum, iScorer.asInstanceOf[inf.Scorer], marg.asInstanceOf[inf.Marginal], q)
marginals(i) = marg
// println("Leaving " + i)
newAugment -> contributionToLikelihood
}
val ep = new ExpectationPropagation(project _, convergenceThreshold)
val inferencesToUse = inferences.indices.filter(inferences(_) ne null)
var state: ep.State = null
val iterates = ep.inference(augment, inferencesToUse, inferencesToUse.map(i => inferences(i).baseAugment(datum)))
while (iter < maxEPIter && iterates.hasNext) {
val s = iterates.next()
iter += 1
state = s
}
EPInference.iters.addAndGet(iter)
if (EPInference.calls.incrementAndGet % 1000 == 0) {
val calls = EPInference.calls.get()
val iters = EPInference.iters.get()
logger.info(s"EP Stats $iters $calls ${iters * 1.0 / calls} $maxEPIter")
EPInference.calls.set(0)
EPInference.iters.set(0)
}
logger.debug(f"guess($iter%d:${state.logPartition}%.1f)")
EPMarginal(state.logPartition, state.q, marginals)
}
}
| jovilius/epic | src/main/scala/epic/framework/EPInference.scala | Scala | apache-2.0 | 5,868 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.tree.impl
import scala.collection.mutable
import scala.util.Try
import org.apache.spark.internal.Logging
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.tree.TreeEnsembleParams
import org.apache.spark.mllib.tree.configuration.Algo._
import org.apache.spark.mllib.tree.configuration.QuantileStrategy._
import org.apache.spark.mllib.tree.configuration.Strategy
import org.apache.spark.mllib.tree.impurity.Impurity
import org.apache.spark.rdd.RDD
/**
* Learning and dataset metadata for DecisionTree.
*
* @param numClasses For classification: labels can take values {0, ..., numClasses - 1}.
* For regression: fixed at 0 (no meaning).
* @param maxBins Maximum number of bins, for all features.
* @param featureArity Map: categorical feature index to arity.
* I.e., the feature takes values in {0, ..., arity - 1}.
* @param numBins Number of bins for each feature.
*/
private[spark] class DecisionTreeMetadata(
val numFeatures: Int,
val numExamples: Long,
val numClasses: Int,
val maxBins: Int,
val featureArity: Map[Int, Int],
val unorderedFeatures: Set[Int],
val numBins: Array[Int],
val impurity: Impurity,
val quantileStrategy: QuantileStrategy,
val maxDepth: Int,
val minInstancesPerNode: Int,
val minInfoGain: Double,
val numTrees: Int,
val numFeaturesPerNode: Int) extends Serializable {
def isUnordered(featureIndex: Int): Boolean = unorderedFeatures.contains(featureIndex)
def isClassification: Boolean = numClasses >= 2
def isMulticlass: Boolean = numClasses > 2
def isMulticlassWithCategoricalFeatures: Boolean = isMulticlass && (featureArity.size > 0)
def isCategorical(featureIndex: Int): Boolean = featureArity.contains(featureIndex)
def isContinuous(featureIndex: Int): Boolean = !featureArity.contains(featureIndex)
/**
* Number of splits for the given feature.
* For unordered features, there is 1 bin per split.
* For ordered features, there is 1 more bin than split.
*/
def numSplits(featureIndex: Int): Int = if (isUnordered(featureIndex)) {
numBins(featureIndex)
} else {
numBins(featureIndex) - 1
}
/**
* Set number of splits for a continuous feature.
* For a continuous feature, number of bins is number of splits plus 1.
*/
def setNumSplits(featureIndex: Int, numSplits: Int) {
require(isContinuous(featureIndex),
s"Only number of bin for a continuous feature can be set.")
numBins(featureIndex) = numSplits + 1
}
/**
* Indicates if feature subsampling is being used.
*/
def subsamplingFeatures: Boolean = numFeatures != numFeaturesPerNode
}
private[spark] object DecisionTreeMetadata extends Logging {
/**
* Construct a [[DecisionTreeMetadata]] instance for this dataset and parameters.
* This computes which categorical features will be ordered vs. unordered,
* as well as the number of splits and bins for each feature.
*/
def buildMetadata(
input: RDD[LabeledPoint],
strategy: Strategy,
numTrees: Int,
featureSubsetStrategy: String): DecisionTreeMetadata = {
val numFeatures = input.map(_.features.size).take(1).headOption.getOrElse {
throw new IllegalArgumentException(s"DecisionTree requires size of input RDD > 0, " +
s"but was given by empty one.")
}
require(numFeatures > 0, s"DecisionTree requires number of features > 0, " +
s"but was given an empty features vector")
val numExamples = input.count()
val numClasses = strategy.algo match {
case Classification => strategy.numClasses
case Regression => 0
}
val maxPossibleBins = math.min(strategy.maxBins, numExamples).toInt
if (maxPossibleBins < strategy.maxBins) {
logWarning(s"DecisionTree reducing maxBins from ${strategy.maxBins} to $maxPossibleBins" +
s" (= number of training instances)")
}
// We check the number of bins here against maxPossibleBins.
// This needs to be checked here instead of in Strategy since maxPossibleBins can be modified
// based on the number of training examples.
if (strategy.categoricalFeaturesInfo.nonEmpty) {
val maxCategoriesPerFeature = strategy.categoricalFeaturesInfo.values.max
val maxCategory =
strategy.categoricalFeaturesInfo.find(_._2 == maxCategoriesPerFeature).get._1
require(maxCategoriesPerFeature <= maxPossibleBins,
s"DecisionTree requires maxBins (= $maxPossibleBins) to be at least as large as the " +
s"number of values in each categorical feature, but categorical feature $maxCategory " +
s"has $maxCategoriesPerFeature values. Considering remove this and other categorical " +
"features with a large number of values, or add more training examples.")
}
val unorderedFeatures = new mutable.HashSet[Int]()
val numBins = Array.fill[Int](numFeatures)(maxPossibleBins)
if (numClasses > 2) {
// Multiclass classification
val maxCategoriesForUnorderedFeature =
((math.log(maxPossibleBins / 2 + 1) / math.log(2.0)) + 1).floor.toInt
strategy.categoricalFeaturesInfo.foreach { case (featureIndex, numCategories) =>
// Hack: If a categorical feature has only 1 category, we treat it as continuous.
// TODO(SPARK-9957): Handle this properly by filtering out those features.
if (numCategories > 1) {
// Decide if some categorical features should be treated as unordered features,
// which require 2 * ((1 << numCategories - 1) - 1) bins.
// We do this check with log values to prevent overflows in case numCategories is large.
// The next check is equivalent to: 2 * ((1 << numCategories - 1) - 1) <= maxBins
if (numCategories <= maxCategoriesForUnorderedFeature) {
unorderedFeatures.add(featureIndex)
numBins(featureIndex) = numUnorderedBins(numCategories)
} else {
numBins(featureIndex) = numCategories
}
}
}
} else {
// Binary classification or regression
strategy.categoricalFeaturesInfo.foreach { case (featureIndex, numCategories) =>
// If a categorical feature has only 1 category, we treat it as continuous: SPARK-9957
if (numCategories > 1) {
numBins(featureIndex) = numCategories
}
}
}
// Set number of features to use per node (for random forests).
val _featureSubsetStrategy = featureSubsetStrategy match {
case "auto" =>
if (numTrees == 1) {
"all"
} else {
if (strategy.algo == Classification) {
"sqrt"
} else {
"onethird"
}
}
case _ => featureSubsetStrategy
}
val numFeaturesPerNode: Int = _featureSubsetStrategy match {
case "all" => numFeatures
case "sqrt" => math.sqrt(numFeatures).ceil.toInt
case "log2" => math.max(1, (math.log(numFeatures) / math.log(2)).ceil.toInt)
case "onethird" => (numFeatures / 3.0).ceil.toInt
case _ =>
Try(_featureSubsetStrategy.toInt).filter(_ > 0).toOption match {
case Some(value) => math.min(value, numFeatures)
case None =>
Try(_featureSubsetStrategy.toDouble).filter(_ > 0).filter(_ <= 1.0).toOption match {
case Some(value) => math.ceil(value * numFeatures).toInt
case _ => throw new IllegalArgumentException(s"Supported values:" +
s" ${TreeEnsembleParams.supportedFeatureSubsetStrategies.mkString(", ")}," +
s" (0.0-1.0], [1-n].")
}
}
}
new DecisionTreeMetadata(numFeatures, numExamples, numClasses, numBins.max,
strategy.categoricalFeaturesInfo, unorderedFeatures.toSet, numBins,
strategy.impurity, strategy.quantileCalculationStrategy, strategy.maxDepth,
strategy.minInstancesPerNode, strategy.minInfoGain, numTrees, numFeaturesPerNode)
}
/**
* Version of [[DecisionTreeMetadata#buildMetadata]] for DecisionTree.
*/
def buildMetadata(
input: RDD[LabeledPoint],
strategy: Strategy): DecisionTreeMetadata = {
buildMetadata(input, strategy, numTrees = 1, featureSubsetStrategy = "all")
}
/**
* Given the arity of a categorical feature (arity = number of categories),
* return the number of bins for the feature if it is to be treated as an unordered feature.
* There is 1 split for every partitioning of categories into 2 disjoint, non-empty sets;
* there are math.pow(2, arity - 1) - 1 such splits.
* Each split has 2 corresponding bins.
*/
def numUnorderedBins(arity: Int): Int = (1 << arity - 1) - 1
}
| brad-kaiser/spark | mllib/src/main/scala/org/apache/spark/ml/tree/impl/DecisionTreeMetadata.scala | Scala | apache-2.0 | 9,545 |
package chandu0101.scalajs.react.components
package elementalui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import japgolly.scalajs.react.raw.React
import scala.scalajs.js
import scala.scalajs.js.`|`
case class CheckBox(className: js.UndefOr[String] = js.undefined,
disabled: js.UndefOr[Boolean] = js.undefined,
autofocus: js.UndefOr[Boolean] = js.undefined,
indeterminate: js.UndefOr[Boolean] = js.undefined,
`inline`: js.UndefOr[Boolean] = js.undefined,
label: js.UndefOr[String] = js.undefined,
style: js.UndefOr[String] = js.undefined,
title: js.UndefOr[String] = js.undefined,
onclick: js.UndefOr[ReactEventFromHtml => Callback] = js.undefined,
ondblclick: js.UndefOr[ReactEventFromHtml => Callback] = js.undefined) {
def apply() = {
val props = JSMacro[CheckBox](this)
val component = JsComponent[js.Object, Children.None, Null](Eui.Checkbox)
component(props)
}
}
| rleibman/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/elementalui/Checkbox.scala | Scala | apache-2.0 | 1,101 |
package se.gigurra.wallace.renderer
import com.badlogic.gdx.graphics.g2d.SpriteBatch
import se.gigurra.wallace.util.Decorated
class RichSpriteBatch(_base: SpriteBatch) extends Decorated[SpriteBatch](_base) {
def active(f: => Unit): Unit = {
this.begin()
try {
f
} finally {
this.end()
}
}
def apply(f: => Unit): Unit = {
active(f)
}
}
| GiGurra/Wall-Ace | lib_render/src/main/scala/se/gigurra/wallace/renderer/RichSpriteBatch.scala | Scala | gpl-2.0 | 378 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.