code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.github.tanacasino.sample2
// private をつけるとコンストラクタが他のクラスから呼べないやつになりますJavaでもやりますよね!Singleton パターンなどで。
class CompanionA private (name: String, age: Int) {
// お互いにprivateメンバーにアクセス可能です
def say = CompanionA.PrivateValue
def fire = {
CompanionA.staticMethod
}
}
object CompanionA {
private val PrivateValue = "PrivateValue"
// ファクトリを定義する
def apply(name: String, age: Int): CompanionA = {
new CompanionA(name, age)
}
private def staticMethod = "static!!!"
}
| tanacasino/learning-scala | src/main/scala/com/github/tanacasino/sample2/CompanionA.scala | Scala | mit | 648 |
package io.citrine.lolo.validation
import io.citrine.lolo.{Learner, PredictionResult}
import scala.util.Random
/**
* Methods tha use cross-validation to calculate predicted-vs-actual data and metric estimates
*/
case object CrossValidation {
/**
* Driver to apply named metrics to k-fold cross-validated predicted-vs-actual
*
* @param trainingData to cross-validate with
* @param learner to cross-validate
* @param metrics apply to the predicted-vs-actual data
* @param k number of folds
* @param nTrial number of times to refold the data to improve statistics
* @param rng random number generator to use in choosing folds
* @tparam T type of the response, e.g. Double for scalar regression
* @return a Map from the name of the metric to its mean value and the error in that mean
*/
def kFoldCrossvalidation[T](
trainingData: Seq[(Vector[Any], T)],
learner: Learner,
metrics: Map[String, Merit[T]],
k: Int = 8,
nTrial: Int = 1,
rng: Random = Random
): Map[String, (Double, Double)] = {
Merit.estimateMerits(
kFoldPvA(trainingData, learner, k, nTrial, rng).iterator,
metrics,
rng
)
}
/**
* Use k-fold cross-validation to create predicted vs actual results
*
* @param trainingData to cross-validate with
* @param learner to cross-validate
* @param k number of folds
* @param nTrial number of times to re-fold the data to improve statistics
* @tparam T type of the response, e.g. Double for scalar regression
* @return an iterable over predicted-vs-actual for each fold
*/
def kFoldPvA[T](
trainingData: Seq[(Vector[Any], T)],
learner: Learner,
k: Int = 8,
nTrial: Int = 1,
rng: Random = Random
): Iterable[(PredictionResult[T], Seq[T])] = {
val nTest: Int = Math.ceil(trainingData.size.toDouble / k).toInt
(0 until nTrial).flatMap { _ =>
val folds: Seq[Seq[(Vector[Any], T)]] = rng.shuffle(trainingData).grouped(nTest).toSeq
folds.indices.map { idx =>
val (testFolds, trainFolds) = folds.zipWithIndex.partition(_._2 == idx)
val testData = testFolds.flatMap(_._1)
val trainData = trainFolds.flatMap(_._1)
val model = learner.train(trainData).getModel()
val predictions: PredictionResult[T] = model.transform(testData.map(_._1)).asInstanceOf[PredictionResult[T]]
(predictions, testData.map(_._2))
}
}
}
}
| CitrineInformatics/lolo | src/main/scala/io/citrine/lolo/validation/CrossValidation.scala | Scala | apache-2.0 | 2,536 |
package at.ac.tuwien.ifs.ir.evaluation.pool
import at.ac.tuwien.ifs.ir.model._
import org.apache.commons.math3.distribution.BetaDistribution
import scala.annotation.tailrec
import scala.util.Random
/**
* Multi-Armed Bandits Based Pool
* Created by aldo on 25/07/16.
*/
class MABBasedPool(m: String, c1: Double, c2: Double, sizePool: Int, lRuns: List[Runs], gT: QRels, restore: Boolean, nDs: Map[Int, Int]) extends FixedSizePool(sizePool, lRuns, gT, nDs) {
override lazy val qRels: QRels = PoolConverter.repoolToMABBased(m, c1, c2, sizePool, lRuns, gT, nDs, restore)
override def getName: String = MABBasedPool.getName(m, c1, c2, sizePool)
override def getNewInstance(lRuns: List[Runs]): Pool = MABBasedPool(m, c1, c2, sizePool, lRuns, gT, restore,
FixedSizePool.findTopicSizes(nDs.values.sum, lRuns, qRels))
}
object MABBasedPool {
val rnd = new Random(1234)
def getName(m: String, c1: Double, c2: Double, sizePool: Int): String = "MABBased_" + m + ":" + c1 + ":" + c2 + ":" + ":" + sizePool
def apply(m: String, c1: Double, c2: Double, sizePool: Int, lRuns: List[Runs], gT: QRels, restore: Boolean, nDs: Map[Int, Int]) = new MABBasedPool(m, c1, c2, sizePool, lRuns, gT, restore, nDs)
def getPooledDocuments(m: String, c1: Double, c2: Double, nDs: Map[Int, Int], pRuns: List[Runs], qRels: QRels, restore: Boolean)(topicId: Int): Set[Document] = {
val oRs: Map[Int, List[Document]] = FixedSizePool.getSimplifiedLRuns(topicId, pRuns)
def random(): Set[Document] = {
@tailrec def getDocuments(rs: Map[Int, List[Document]], acc: Set[Document] = Set()): Set[Document] = {
if (acc.size == nDs(topicId) || rs.isEmpty)
acc
else {
// select arm
val nr = rnd.shuffle(rs.keys.toList).head
val doc = rs(nr).head
getDocuments(
FixedSizePool.updateSimplifiedLRuns(rs, nr),
acc + doc)
}
}
getDocuments(oRs)
}
def p(r: List[Document], qRel: QRel): Double = {
if (r.isEmpty)
0.5d
else
r.count(d => qRel.getRel(d) >= 1) / r.size
}
def greedy(c1: Double, c2: Double): Set[Document] = {
@tailrec def getDocuments(rs: Map[Int, List[Document]], cQRel: QRel = new QRel(topicId, Nil), acc: Set[Document] = Set(), n: Int = 1): Set[Document] = {
if (acc.size == nDs(topicId) || rs.isEmpty)
acc
else {
// select arm
val nr =
if (rnd.nextDouble() < Math.min(1d, (c1 * oRs.size) / (c2 * c2 * n)))
rnd.shuffle(rs.keys.toList).head
else
FixedSizePool.getNonDeterministicMaxObject(
oRs.filter(r => rs.contains(r._1)).map(r => (r._1, {
val ds = r._2.take(oRs(r._1).size - rs(r._1).size)
p(ds, cQRel)
})).toList)
// judge doc
val doc = rs(nr).head
val rel = if (cQRel.getRel(doc) < 0) qRels.getRel(topicId, doc) else cQRel.getRel(doc)
getDocuments(
FixedSizePool.updateSimplifiedLRuns(rs, nr),
if (cQRel.getRel(doc) < 0)
new QRel(cQRel.id, cQRel.qrelRecords :+ QRelRecord("Q0", doc, rel))
else
cQRel,
acc + doc,
n + 1)
}
}
getDocuments(oRs)
}
def ucb1Tuned(): Set[Document] = {
@tailrec def getDocuments(rs: Map[Int, List[Document]], cQRel: QRel = new QRel(topicId, Nil), acc: Set[Document] = Set(), n: Int = 0): Set[Document] = {
if (acc.size == nDs(topicId) || rs.isEmpty)
acc
else {
// select arm
val nr =
if (!rs.forall(r => oRs(r._1).size - r._2.size > 0))
rs.filter(r => oRs(r._1).size - r._2.size == 0).head._1
else
FixedSizePool.getNonDeterministicMaxObject(
oRs.filter(r => rs.contains(r._1)).map(r => (r._1, {
// get played documents
val ds = r._2.take(oRs(r._1).size - rs(r._1).size)
// get mean and variance
val mu = p(ds, cQRel)
val va = mu * (1d - mu)
// compute and return ucb1-tuned weight
mu + Math.sqrt(Math.log(n) / ds.size * Math.min(0.25d, va + Math.sqrt(2d * Math.log(n) / ds.size)))
})).toList)
val doc = rs(nr).head
val rel = if (cQRel.getRel(doc) < 0) qRels.getRel(topicId, doc) else cQRel.getRel(doc)
getDocuments(
FixedSizePool.updateSimplifiedLRuns(rs, nr),
if (cQRel.getRel(doc) < 0)
new QRel(cQRel.id, cQRel.qrelRecords :+ QRelRecord("Q0", doc, rel))
else
cQRel,
acc + doc,
n + 1)
}
}
getDocuments(oRs)
}
def beta(): Set[Document] = {
@tailrec def getDocuments(rs: Map[Int, List[Document]], cQRel: QRel = new QRel(topicId, Nil), acc: Set[Document] = Set()): Set[Document] = {
if (acc.size == nDs(topicId) || rs.isEmpty)
acc
else {
// select arm
val nr = FixedSizePool.getNonDeterministicMaxObject(
oRs.filter(r => rs.contains(r._1)).map(r => (r._1, {
val ds = r._2
new BetaDistribution(
1d + ds.count(d => cQRel.getRel(d) > 0),
1d + ds.count(d => cQRel.getRel(d) == 0)).sample()
})).toList)
// judge doc
val doc = rs(nr).head
val rel = if (cQRel.getRel(doc) < 0) qRels.getRel(topicId, doc) else cQRel.getRel(doc)
getDocuments(
FixedSizePool.updateSimplifiedLRuns(rs, nr),
if (cQRel.getRel(doc) < 0)
new QRel(cQRel.id, cQRel.qrelRecords :+ QRelRecord("Q0", doc, rel))
else
cQRel,
acc + doc)
}
}
getDocuments(oRs)
}
def maxMean(): Set[Document] = {
@tailrec def getDocuments(rs: Map[Int, List[Document]], cQRel: QRel = new QRel(topicId, Nil), acc: Set[Document] = Set()): Set[Document] = {
if (acc.size >= nDs(topicId) || rs.isEmpty) {
acc
} else {
// select arm
val nr = FixedSizePool.getNonDeterministicMaxObject(
oRs.filter(r => rs.contains(r._1)).map(r => (r._1, {
val ds = r._2
(1d + ds.count(d => cQRel.getRel(d) > 0)) / (2d + ds.count(d => cQRel.getRel(d) >= 0))
})).toList)
// judge doc
val doc = rs(nr).head
val rel = if (cQRel.getRel(doc) < 0) qRels.getRel(topicId, doc) else cQRel.getRel(doc)
getDocuments(
FixedSizePool.updateSimplifiedLRuns(rs, nr),
if (cQRel.getRel(doc) < 0)
new QRel(cQRel.id, cQRel.qrelRecords :+ QRelRecord("Q0", doc, rel))
else
cQRel,
acc + doc)
}
}
if (restore) {
val nQRel = qRels.getTopicQRels(topicId).qRels.head
val docs = oRs.flatMap(e => oRs(e._1).takeWhile(doc => nQRel.getRel(doc) >= 0)).toSet
val nORs = oRs.map(e => e._1 -> oRs(e._1).dropWhile(doc => nQRel.getRel(doc) >= 0)).filter(_._2.nonEmpty)
getDocuments(nORs, nQRel, docs)
} else {
getDocuments(oRs)
}
}
if (m == "random")
random()
else if (m == "greedy")
greedy(c1, c2)
else if (m == "ucb1-tuned")
ucb1Tuned()
else if (m == "beta")
beta()
else if (m == "maxmean")
maxMean()
else
throw new Exception("Method not found")
}
}
| aldolipani/PoolBiasEstimators | src/main/scala/at/ac/tuwien/ifs/ir/evaluation/pool/MABBasedPool.scala | Scala | apache-2.0 | 7,626 |
package org.jetbrains.plugins.scala.codeInsight.intention.comprehension
import com.intellij.codeInsight.intention.PsiElementBaseIntentionAction
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.{PsiElement, TokenType}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScForStatement
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.util.IntentionAvailabilityChecker
import ConvertToParenthesesIntention._
import org.jetbrains.plugins.scala.ScalaBundle
/**
* Pavel Fatin
*/
object ConvertToParenthesesIntention {
val FamilyName: String = ScalaBundle.message("intention.for.comprehension.convert.to.parentheses")
}
class ConvertToParenthesesIntention extends PsiElementBaseIntentionAction {
def getFamilyName = FamilyName
override def getText = getFamilyName
def isAvailable(project: Project, editor: Editor, element: PsiElement) = {
element match {
case e @ Parent(_: ScForStatement) =>
List(ScalaTokenTypes.tLBRACE, ScalaTokenTypes.tRBRACE).contains(e.getNode.getElementType) &&
IntentionAvailabilityChecker.checkIntention(this, element)
case _ => false
}
}
override def invoke(project: Project, editor: Editor, element: PsiElement) {
val statement = element.getParent.asInstanceOf[ScForStatement]
ScalaPsiUtil.replaceBracesWithParentheses(statement)
val manager = statement.getManager
for (enumerators <- statement.enumerators;
cr <- enumerators.findChildrenByType(TokenType.WHITE_SPACE) if cr.getText.contains('\\n')) {
cr.replace(ScalaPsiElementFactory.createSemicolon(manager))
}
for (cr <- statement.findChildrenByType(TokenType.WHITE_SPACE) if cr.getText.contains('\\n')) {
cr.delete()
}
}
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/codeInsight/intention/comprehension/ConvertToParenthesesIntention.scala | Scala | apache-2.0 | 1,996 |
package org.openapitools.client.model
case class ComputerSet (
_class: Option[String],
_busyExecutors: Option[Integer],
_computer: Option[List[HudsonMasterComputer]],
_displayName: Option[String],
_totalExecutors: Option[Integer]
)
object ComputerSet {
def toStringBody(var_class: Object, var_busyExecutors: Object, var_computer: Object, var_displayName: Object, var_totalExecutors: Object) =
s"""
| {
| "class":$var_class,"busyExecutors":$var_busyExecutors,"computer":$var_computer,"displayName":$var_displayName,"totalExecutors":$var_totalExecutors
| }
""".stripMargin
}
| cliffano/swaggy-jenkins | clients/scala-gatling/generated/src/gatling/scala/org/openapitools/client/model/ComputerSet.scala | Scala | mit | 640 |
/*
* Copyright 2022 Typelevel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.typelevel.sbt.mergify
import sbt._, Keys._
import org.typelevel.sbt.gha._
import java.nio.file.Path
object MergifyPlugin extends AutoPlugin {
object autoImport {
lazy val mergifyGenerate = taskKey[Unit](
"Generates (and overwrites if extant) a .mergify.yml according to configuration")
lazy val mergifyCheck = taskKey[Unit](
"Checks to see if the .mergify.yml files are equivalent to what would be generated and errors if otherwise")
lazy val mergifyPrRules = settingKey[Seq[MergifyPrRule]]("The mergify pull request rules")
lazy val mergifyStewardConfig = settingKey[Option[MergifyStewardConfig]](
"Config for the automerge rule for Scala Steward PRs, set to None to disable.")
lazy val mergifyRequiredJobs =
settingKey[Seq[String]]("Ids for jobs that must succeed for merging (default: [build])")
lazy val mergifySuccessConditions = settingKey[Seq[MergifyCondition]](
"Success conditions for merging (default: auto-generated from `mergifyRequiredJobs` setting)")
lazy val mergifyLabelPaths = settingKey[Map[String, File]](
"A map from label to file path (default: auto-populated for every subproject in your build)")
type MergifyAction = org.typelevel.sbt.mergify.MergifyAction
val MergifyAction = org.typelevel.sbt.mergify.MergifyAction
type MergifyCondition = org.typelevel.sbt.mergify.MergifyCondition
val MergifyCondition = org.typelevel.sbt.mergify.MergifyCondition
type MergifyPrRule = org.typelevel.sbt.mergify.MergifyPrRule
val MergifyPrRule = org.typelevel.sbt.mergify.MergifyPrRule
type MergifyStewardConfig = org.typelevel.sbt.mergify.MergifyStewardConfig
val MergifyStewardConfig = org.typelevel.sbt.mergify.MergifyStewardConfig
}
override def requires = GenerativePlugin
override def trigger: PluginTrigger = allRequirements
import autoImport._
import GenerativePlugin.autoImport._
override def buildSettings: Seq[Setting[_]] = Seq(
mergifyStewardConfig := Some(MergifyStewardConfig()),
mergifyRequiredJobs := Seq("build"),
mergifyLabelPaths := Map.empty,
mergifySuccessConditions := jobSuccessConditions.value,
mergifyPrRules := {
val baseDir = (LocalRootProject / baseDirectory).value.toPath
val stewardRule =
mergifyStewardConfig.value.map(_.toPrRule(mergifySuccessConditions.value.toList)).toList
val labelRules =
mergifyLabelPaths.value.toList.sorted.map {
case (label, file) =>
val relPath = baseDir.relativize(file.toPath.toAbsolutePath.normalize)
val suffix = if (file.isDirectory) "/" else ""
MergifyPrRule(
s"Label ${label} PRs",
List(MergifyCondition.Custom(s"files~=^${relPath}${suffix}")),
List(MergifyAction.Label(add = List(label)))
)
}
stewardRule ++ labelRules
},
mergifyGenerate := {
IO.write(mergifyYaml.value, generateMergifyContents.value)
},
mergifyCheck := {
val log = state.value.log
def reportMismatch(file: File, expected: String, actual: String): Unit = {
log.error(s"Expected:\n$expected")
log.error(s"Actual:\n${GenerativePlugin.diff(expected, actual)}")
sys.error(
s"${file.getName} does not contain contents that would have been generated by sbt-typelevel-mergify; try running mergifyGenerate")
}
def compare(file: File, expected: String): Unit = {
val actual = IO.read(file)
if (expected != actual) {
reportMismatch(file, expected, actual)
}
}
compare(mergifyYaml.value, generateMergifyContents.value)
}
)
override def projectSettings: Seq[Setting[_]] = Seq(
mergifyGenerate / aggregate := false,
mergifyCheck / aggregate := false,
githubWorkflowGenerate := githubWorkflowGenerate
.dependsOn((ThisBuild / mergifyGenerate))
.value,
githubWorkflowCheck := githubWorkflowCheck.dependsOn((ThisBuild / mergifyCheck)).value,
ThisBuild / mergifyLabelPaths := {
val labelPaths = (ThisBuild / mergifyLabelPaths).value
projectLabel.value.fold(labelPaths) {
case (label, path) =>
val add = labelPaths.get(label) match {
case Some(f) => label -> commonAncestor(f.toPath, path)
case None => label -> path
}
labelPaths + (add._1 -> add._2.toFile)
}
}
)
private lazy val jobSuccessConditions = Def.setting {
githubWorkflowGeneratedCI.value.flatMap {
case job if mergifyRequiredJobs.value.contains(job.id) =>
GenerativePlugin
.expandMatrix(
job.oses,
job.scalas,
job.javas,
job.matrixAdds,
job.matrixIncs,
job.matrixExcs
)
.map { cell =>
MergifyCondition.Custom(s"status-success=${job.name} (${cell.mkString(", ")})")
}
case _ => Nil
}
}
private lazy val projectLabel = Def.setting {
val path = (Compile / sourceDirectories)
.?
.value
.getOrElse(Seq.empty)
.map(_.toPath)
.foldLeft(baseDirectory.value.toPath)(commonAncestor(_, _))
val label = path.getFileName.toString
def isRoot = path == (LocalRootProject / baseDirectory).value.toPath
if (label.startsWith(".") || isRoot) // don't label this project
None
else Some(label -> path)
}
// x and y should be absolute/normalized
private def commonAncestor(x: Path, y: Path): Path = {
val n = math.min(x.getNameCount, y.getNameCount)
(0 until n)
.takeWhile(i => x.getName(i) == y.getName(i))
.map(x.getName(_))
.foldLeft(java.nio.file.Paths.get("/"))(_.resolve(_))
}
private lazy val mergifyYaml = Def.setting {
(ThisBuild / baseDirectory).value / ".mergify.yml"
}
private lazy val generateMergifyContents = Def.task {
import _root_.io.circe.syntax._
import _root_.io.circe.yaml.Printer
val contents = Map("pull_request_rules" -> mergifyPrRules.value.toList)
val printer = Printer.spaces2.copy(dropNullKeys = true)
s"""|# This file was automatically generated by sbt-typelevel-mergify using the
|# mergifyGenerate task. You should add and commit this file to
|# your git repository. It goes without saying that you shouldn't edit
|# this file by hand! Instead, if you wish to make changes, you should
|# change your sbt build configuration to revise the mergify configuration
|# to meet your needs, then regenerate this file.
|
|${printer.pretty(contents.asJson)}""".stripMargin
}
}
| typelevel/sbt-typelevel | mergify/src/main/scala/org/typelevel/sbt/mergify/MergifyPlugin.scala | Scala | apache-2.0 | 7,260 |
package org.joda.time.chrono
import java.util.HashMap
import java.util.Locale
import org.joda.time.Chronology
import org.joda.time.DateTimeConstants
import org.joda.time.DateTimeField
import org.joda.time.DateTimeZone
import org.joda.time.DurationField
import org.joda.time.IllegalFieldValueException
import org.joda.time.IllegalInstantException
import org.joda.time.ReadablePartial
import org.joda.time.chrono.AssembledChronology.Fields
import org.joda.time.field.BaseDateTimeField
import org.joda.time.field.BaseDurationField
import ZonedChronology._
object ZonedChronology {
def getInstance(base: Chronology, zone: DateTimeZone): ZonedChronology = {
var _base: Chronology = base
if (_base == null) {
throw new IllegalArgumentException("Must supply a chronology")
}
_base = _base.withUTC()
if (_base == null) {
throw new IllegalArgumentException("UTC chronology must not be null")
}
if (zone == null) {
throw new IllegalArgumentException("DateTimeZone must not be null")
}
new ZonedChronology(_base, zone)
}
def useTimeArithmetic(field: DurationField): Boolean = {
field != null &&
field.getUnitMillis < DateTimeConstants.MILLIS_PER_HOUR * 12
}
@SerialVersionUID(-485345310999208286L)
class ZonedDurationField(val field: DurationField, val zone: DateTimeZone)
extends BaseDurationField(field.getType) {
var iField: DurationField = null
var iZone: DateTimeZone = null
val iTimeField = useTimeArithmetic(field)
if (!field.isSupported) {
throw new IllegalArgumentException()
}
iField = field
iZone = zone
def isPrecise(): Boolean = {
if (iTimeField) iField.isPrecise
else iField.isPrecise && this.iZone.isFixed
}
def getUnitMillis(): Long = iField.getUnitMillis
override def getValue(duration: Long, instant: Long): Int = {
iField.getValue(duration, addOffset(instant))
}
def getValueAsLong(duration: Long, instant: Long): Long = {
iField.getValueAsLong(duration, addOffset(instant))
}
def getMillis(value: Int, instant: Long): Long = {
iField.getMillis(value, addOffset(instant))
}
def getMillis(value: Long, instant: Long): Long = {
iField.getMillis(value, addOffset(instant))
}
def add(instant: Long, value: Int): Long = {
var _instant: Long = instant
val offset = getOffsetToAdd(_instant)
_instant = iField.add(_instant + offset, value)
_instant -
(if (iTimeField) offset else getOffsetFromLocalToSubtract(_instant))
}
def add(instant: Long, value: Long): Long = {
var _instant: Long = instant
val offset = getOffsetToAdd(_instant)
_instant = iField.add(_instant + offset, value)
_instant -
(if (iTimeField) offset else getOffsetFromLocalToSubtract(_instant))
}
override def getDifference(minuendInstant: Long,
subtrahendInstant: Long): Int = {
val offset = getOffsetToAdd(subtrahendInstant)
iField.getDifference(
minuendInstant +
(if (iTimeField) offset else getOffsetToAdd(minuendInstant)),
subtrahendInstant + offset)
}
def getDifferenceAsLong(minuendInstant: Long,
subtrahendInstant: Long): Long = {
val offset = getOffsetToAdd(subtrahendInstant)
iField.getDifferenceAsLong(
minuendInstant +
(if (iTimeField) offset else getOffsetToAdd(minuendInstant)),
subtrahendInstant + offset)
}
private def getOffsetToAdd(instant: Long): Int = {
val offset = this.iZone.getOffset(instant)
val sum = instant + offset
if ((instant ^ sum) < 0 && (instant ^ offset) >= 0) {
throw new ArithmeticException(
"Adding time zone offset caused overflow")
}
offset
}
private def getOffsetFromLocalToSubtract(instant: Long): Int = {
val offset = this.iZone.getOffsetFromLocal(instant)
val diff = instant - offset
if ((instant ^ diff) < 0 && (instant ^ offset) < 0) {
throw new ArithmeticException(
"Subtracting time zone offset caused overflow")
}
offset
}
private def addOffset(instant: Long): Long =
iZone.convertUTCToLocal(instant)
override def equals(obj: Any): Boolean = {
if (super.equals(obj)) {
return true
} else if (obj.isInstanceOf[ZonedDurationField]) {
val other = obj.asInstanceOf[ZonedDurationField]
return iField == other.iField && iZone == other.iZone
}
false
}
override def hashCode(): Int = iField.hashCode ^ iZone.hashCode
}
@SerialVersionUID(-3968986277775529794L)
class ZonedDateTimeField(val field: DateTimeField,
val zone: DateTimeZone,
val durationField: DurationField,
val rangeDurationField: DurationField,
val leapDurationField: DurationField)
extends BaseDateTimeField(field.getType) {
var iTimeField = useTimeArithmetic(durationField)
var iField: DateTimeField = null
var iZone: DateTimeZone = null
var iDurationField: DurationField = null
var iRangeDurationField: DurationField = null
var iLeapDurationField: DurationField = null
if (!field.isSupported) {
throw new IllegalArgumentException()
}
iField = field
iZone = zone
iDurationField = durationField
iTimeField = useTimeArithmetic(durationField)
iRangeDurationField = rangeDurationField
iLeapDurationField = leapDurationField
def isLenient(): Boolean = iField.isLenient
def get(instant: Long): Int = {
val localInstant = iZone.convertUTCToLocal(instant)
iField.get(localInstant)
}
override def getAsText(instant: Long, locale: Locale): String = {
val localInstant = iZone.convertUTCToLocal(instant)
iField.getAsText(localInstant, locale)
}
override def getAsShortText(instant: Long, locale: Locale): String = {
val localInstant = iZone.convertUTCToLocal(instant)
iField.getAsShortText(localInstant, locale)
}
override def getAsText(fieldValue: Int, locale: Locale): String =
iField.getAsText(fieldValue, locale)
override def getAsShortText(fieldValue: Int, locale: Locale): String = {
iField.getAsShortText(fieldValue, locale)
}
override def add(instant: Long, value: Int): Long = {
if (iTimeField) {
val offset = getOffsetToAdd(instant)
val localInstant = iField.add(instant + offset, value)
localInstant - offset
} else {
var localInstant = iZone.convertUTCToLocal(instant)
localInstant = iField.add(localInstant, value)
iZone.convertLocalToUTC(localInstant, false, instant)
}
}
override def add(instant: Long, value: Long): Long = {
if (iTimeField) {
val offset = getOffsetToAdd(instant)
val localInstant = iField.add(instant + offset, value)
localInstant - offset
} else {
var localInstant = iZone.convertUTCToLocal(instant)
localInstant = iField.add(localInstant, value)
iZone.convertLocalToUTC(localInstant, false, instant)
}
}
override def addWrapField(instant: Long, value: Int): Long = {
if (iTimeField) {
val offset = getOffsetToAdd(instant)
val localInstant = iField.addWrapField(instant + offset, value)
localInstant - offset
} else {
var localInstant = iZone.convertUTCToLocal(instant)
localInstant = iField.addWrapField(localInstant, value)
iZone.convertLocalToUTC(localInstant, false, instant)
}
}
override def set(instant: Long, value: Int): Long = {
var localInstant = iZone.convertUTCToLocal(instant)
localInstant = iField.set(localInstant, value)
val result = iZone.convertLocalToUTC(localInstant, false, instant)
if (get(result) != value) {
val cause = IllegalInstantException.create(localInstant, iZone.getID)
val ex = IllegalFieldValueException
.create(iField.getType, Integer.valueOf(value), cause.getMessage)
ex.initCause(cause)
throw ex
}
result
}
override def set(instant: Long, text: String, locale: Locale): Long = {
var localInstant = iZone.convertUTCToLocal(instant)
localInstant = iField.set(localInstant, text, locale)
iZone.convertLocalToUTC(localInstant, false, instant)
}
override def getDifference(minuendInstant: Long,
subtrahendInstant: Long): Int = {
val offset = getOffsetToAdd(subtrahendInstant)
iField.getDifference(
minuendInstant +
(if (iTimeField) offset else getOffsetToAdd(minuendInstant)),
subtrahendInstant + offset)
}
override def getDifferenceAsLong(minuendInstant: Long,
subtrahendInstant: Long): Long = {
val offset = getOffsetToAdd(subtrahendInstant)
iField.getDifferenceAsLong(
minuendInstant +
(if (iTimeField) offset else getOffsetToAdd(minuendInstant)),
subtrahendInstant + offset)
}
def getDurationField(): DurationField = iDurationField
def getRangeDurationField(): DurationField = iRangeDurationField
override def isLeap(instant: Long): Boolean = {
val localInstant = iZone.convertUTCToLocal(instant)
iField.isLeap(localInstant)
}
override def getLeapAmount(instant: Long): Int = {
val localInstant = iZone.convertUTCToLocal(instant)
iField.getLeapAmount(localInstant)
}
override def getLeapDurationField(): DurationField = iLeapDurationField
def roundFloor(instant: Long): Long = {
var _instant: Long = instant
if (iTimeField) {
val offset = getOffsetToAdd(_instant)
_instant = iField.roundFloor(_instant + offset)
_instant - offset
} else {
var localInstant = iZone.convertUTCToLocal(_instant)
localInstant = iField.roundFloor(localInstant)
iZone.convertLocalToUTC(localInstant, false, _instant)
}
}
override def roundCeiling(instant: Long): Long = {
var _instant: Long = instant
if (iTimeField) {
val offset = getOffsetToAdd(_instant)
_instant = iField.roundCeiling(_instant + offset)
_instant - offset
} else {
var localInstant = iZone.convertUTCToLocal(_instant)
localInstant = iField.roundCeiling(localInstant)
iZone.convertLocalToUTC(localInstant, false, _instant)
}
}
override def remainder(instant: Long): Long = {
val localInstant = iZone.convertUTCToLocal(instant)
iField.remainder(localInstant)
}
def getMinimumValue(): Int = iField.getMinimumValue
override def getMinimumValue(instant: Long): Int = {
val localInstant = iZone.convertUTCToLocal(instant)
iField.getMinimumValue(localInstant)
}
override def getMinimumValue(instant: ReadablePartial): Int =
iField.getMinimumValue(instant)
override def getMinimumValue(instant: ReadablePartial,
values: Array[Int]): Int =
iField.getMinimumValue(instant, values)
def getMaximumValue(): Int = iField.getMaximumValue
override def getMaximumValue(instant: Long): Int = {
val localInstant = iZone.convertUTCToLocal(instant)
iField.getMaximumValue(localInstant)
}
override def getMaximumValue(instant: ReadablePartial): Int =
iField.getMaximumValue(instant)
override def getMaximumValue(instant: ReadablePartial,
values: Array[Int]): Int =
iField.getMaximumValue(instant, values)
override def getMaximumTextLength(locale: Locale): Int =
iField.getMaximumTextLength(locale)
override def getMaximumShortTextLength(locale: Locale): Int = {
iField.getMaximumShortTextLength(locale)
}
private def getOffsetToAdd(instant: Long): Int = {
val offset = this.iZone.getOffset(instant)
val sum = instant + offset
if ((instant ^ sum) < 0 && (instant ^ offset) >= 0) {
throw new ArithmeticException(
"Adding time zone offset caused overflow")
}
offset
}
override def equals(obj: Any): Boolean = {
if (super.equals(obj)) {
return true
} else if (obj.isInstanceOf[ZonedDateTimeField]) {
val other = obj.asInstanceOf[ZonedDateTimeField]
return iField == other.iField && iZone == other.iZone && iDurationField == other.iDurationField &&
iRangeDurationField == other.iRangeDurationField
}
false
}
override def hashCode(): Int = iField.hashCode ^ iZone.hashCode
}
}
@SerialVersionUID(-1079258847191166848L)
class ZonedChronology private (base: Chronology, zone: DateTimeZone)
extends AssembledChronology(base, zone) {
override def getZone(): DateTimeZone = getParam.asInstanceOf[DateTimeZone]
def withUTC(): Chronology = getBase
def withZone(zone: DateTimeZone): Chronology = {
var _zone = zone
if (_zone == null) {
_zone = DateTimeZone.getDefault
}
if (_zone == getParam) {
return this
}
if (_zone == DateTimeZone.UTC) {
return getBase
}
new ZonedChronology(getBase, _zone)
}
override def getDateTimeMillis(year: Int,
monthOfYear: Int,
dayOfMonth: Int,
millisOfDay: Int): Long = {
localToUTC(
getBase.getDateTimeMillis(year, monthOfYear, dayOfMonth, millisOfDay))
}
override def getDateTimeMillis(year: Int,
monthOfYear: Int,
dayOfMonth: Int,
hourOfDay: Int,
minuteOfHour: Int,
secondOfMinute: Int,
millisOfSecond: Int): Long = {
localToUTC(
getBase.getDateTimeMillis(year,
monthOfYear,
dayOfMonth,
hourOfDay,
minuteOfHour,
secondOfMinute,
millisOfSecond))
}
override def getDateTimeMillis(instant: Long,
hourOfDay: Int,
minuteOfHour: Int,
secondOfMinute: Int,
millisOfSecond: Int): Long = {
localToUTC(
getBase.getDateTimeMillis(instant + getZone.getOffset(instant),
hourOfDay,
minuteOfHour,
secondOfMinute,
millisOfSecond))
}
private def localToUTC(localInstant: Long): Long = {
val zone = getZone
val offset = zone.getOffsetFromLocal(localInstant)
val utcInstant = localInstant - offset
val offsetBasedOnUtc = zone.getOffset(utcInstant)
if (offset != offsetBasedOnUtc) {
throw IllegalInstantException.create(localInstant, zone.getID)
}
utcInstant
}
protected def assemble(fields: Fields) {
val converted = new HashMap[Any, Any]()
fields.eras = convertField(fields.eras, converted)
fields.centuries = convertField(fields.centuries, converted)
fields.years = convertField(fields.years, converted)
fields.months = convertField(fields.months, converted)
fields.weekyears = convertField(fields.weekyears, converted)
fields.weeks = convertField(fields.weeks, converted)
fields.days = convertField(fields.days, converted)
fields.halfdays = convertField(fields.halfdays, converted)
fields.hours = convertField(fields.hours, converted)
fields.minutes = convertField(fields.minutes, converted)
fields.seconds = convertField(fields.seconds, converted)
fields.millis = convertField(fields.millis, converted)
fields.year = convertField(fields.year, converted)
fields.yearOfEra = convertField(fields.yearOfEra, converted)
fields.yearOfCentury = convertField(fields.yearOfCentury, converted)
fields.centuryOfEra = convertField(fields.centuryOfEra, converted)
fields.era = convertField(fields.era, converted)
fields.dayOfWeek = convertField(fields.dayOfWeek, converted)
fields.dayOfMonth = convertField(fields.dayOfMonth, converted)
fields.dayOfYear = convertField(fields.dayOfYear, converted)
fields.monthOfYear = convertField(fields.monthOfYear, converted)
fields.weekOfWeekyear = convertField(fields.weekOfWeekyear, converted)
fields.weekyear = convertField(fields.weekyear, converted)
fields.weekyearOfCentury =
convertField(fields.weekyearOfCentury, converted)
fields.millisOfSecond = convertField(fields.millisOfSecond, converted)
fields.millisOfDay = convertField(fields.millisOfDay, converted)
fields.secondOfMinute = convertField(fields.secondOfMinute, converted)
fields.secondOfDay = convertField(fields.secondOfDay, converted)
fields.minuteOfHour = convertField(fields.minuteOfHour, converted)
fields.minuteOfDay = convertField(fields.minuteOfDay, converted)
fields.hourOfDay = convertField(fields.hourOfDay, converted)
fields.hourOfHalfday = convertField(fields.hourOfHalfday, converted)
fields.clockhourOfDay = convertField(fields.clockhourOfDay, converted)
fields.clockhourOfHalfday =
convertField(fields.clockhourOfHalfday, converted)
fields.halfdayOfDay = convertField(fields.halfdayOfDay, converted)
}
private def convertField(field: DurationField,
converted: HashMap[Any, Any]): DurationField = {
if (field == null || !field.isSupported) {
return field
}
if (converted.containsKey(field)) {
return converted.get(field).asInstanceOf[DurationField]
}
val zonedField = new ZonedDurationField(field, getZone)
converted.put(field, zonedField)
zonedField
}
private def convertField(field: DateTimeField,
converted: HashMap[Any, Any]): DateTimeField = {
if (field == null || !field.isSupported) {
return field
}
if (converted.containsKey(field)) {
return converted.get(field).asInstanceOf[DateTimeField]
}
val zonedField = new ZonedDateTimeField(
field,
getZone,
convertField(field.getDurationField, converted),
convertField(field.getRangeDurationField, converted),
convertField(field.getLeapDurationField, converted))
converted.put(field, zonedField)
zonedField
}
override def equals(obj: Any): Boolean = {
if (super.equals(obj)) {
return true
}
if (obj.isInstanceOf[ZonedChronology] == false) {
return false
}
val chrono = obj.asInstanceOf[ZonedChronology]
getBase == chrono.getBase && getZone == chrono.getZone
}
override def hashCode(): Int = {
326565 + getZone.hashCode * 11 + getBase.hashCode * 7
}
override def toString(): String = {
"ZonedChronology[" + getBase + ", " + getZone.getID +
']'
}
}
| mdedetrich/soda-time | jvm/src/main/scala/org/joda/time/chrono/ZonedChronology.scala | Scala | bsd-2-clause | 19,234 |
package laws
import java.time.OffsetDateTime
import Money._
import cats._
import cats.data._
import cats.implicits._
object Payments extends Utils {
case class Account(no: String, name: String, openDate: OffsetDateTime, closeDate: Option[OffsetDateTime] = None)
case class Payment(account: Account, amount: Money, dateOfPayment: OffsetDateTime)
def creditsOnly(p: Payment): Money = if (p.amount.isDebit) zeroMoney else p.amount
// concrete implementation
def valuationConcrete(payments: List[Payment]) = payments.foldLeft(zeroMoney) { (a, e) =>
add(a, creditsOnly(e))
}
// generic implementation
def valuation(payments: List[Payment]): Money = {
implicit val m: Monoid[Money] = Money.MoneyAddMonoid
mapReduce(payments)(creditsOnly)
}
def maxPayment(payments: List[Payment]): Money = {
implicit val m: Monoid[Money] = Money.MoneyOrderMonoid
mapReduce(payments)(creditsOnly)
}
}
| debasishg/pigeon | laws-tango/src/main/scala/laws/Payments.scala | Scala | apache-2.0 | 926 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.driver.querybuilder.dslentities
import com.stratio.crossdata.driver.querybuilder.{Expression, Predicate, Relation}
object JoinType extends Enumeration {
type JoinType = Value
val Inner = Value(join())
val LeftOuter = Value(outer("LEFT"))
val RightOuter = Value(outer("RIGHT"))
val FullOuter = Value(outer("FULL"))
val LeftSemi = Value(join("LEFT SEMI"))
private def outer(tpStr: String): String = join(s" $tpStr OUTER ")
private def join(tpStr: String = ""): String = s" ${tpStr} JOIN"
}
import JoinType._
case class Join(private val left: Relation,
private val right: Relation,
private val joinType: JoinType,
private val condition: Option[Expression] = None) extends Relation {
def on(condition: String): Relation = on(XDQLStatement(condition))
def on(condition: Predicate): Relation =
Join(left, right, joinType, Some(condition))
override private[querybuilder] def toXDQL: String =
s"${left.toXDQL} $joinType ${right.toXDQL}" + condition.map(c => s" ON ${c.toXDQL}").getOrElse("")
} | luismcl/crossdata | driver/src/main/scala/com/stratio/crossdata/driver/querybuilder/dslentities/joins.scala | Scala | apache-2.0 | 1,723 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import java.util.Comparator
import scala.util.control.Exception.allCatch
import com.fasterxml.jackson.core._
import org.apache.spark.SparkException
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis.TypeCoercion
import org.apache.spark.sql.catalyst.expressions.ExprUtils
import org.apache.spark.sql.catalyst.json.JacksonUtils.nextUntil
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.catalyst.util.LegacyDateFormats.FAST_DATE_FORMAT
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
private[sql] class JsonInferSchema(options: JSONOptions) extends Serializable {
private val decimalParser = ExprUtils.getDecimalParser(options.locale)
private val timestampFormatter = TimestampFormatter(
options.timestampFormat,
options.zoneId,
options.locale,
legacyFormat = FAST_DATE_FORMAT)
/**
* Infer the type of a collection of json records in three stages:
* 1. Infer the type of each record
* 2. Merge types by choosing the lowest type necessary to cover equal keys
* 3. Replace any remaining null fields with string, the top type
*/
def infer[T](
json: RDD[T],
createParser: (JsonFactory, T) => JsonParser): StructType = {
val parseMode = options.parseMode
val columnNameOfCorruptRecord = options.columnNameOfCorruptRecord
// In each RDD partition, perform schema inference on each row and merge afterwards.
val typeMerger = JsonInferSchema.compatibleRootType(columnNameOfCorruptRecord, parseMode)
val mergedTypesFromPartitions = json.mapPartitions { iter =>
val factory = options.buildJsonFactory()
iter.flatMap { row =>
try {
Utils.tryWithResource(createParser(factory, row)) { parser =>
parser.nextToken()
Some(inferField(parser))
}
} catch {
case e @ (_: RuntimeException | _: JsonProcessingException) => parseMode match {
case PermissiveMode =>
Some(StructType(Seq(StructField(columnNameOfCorruptRecord, StringType))))
case DropMalformedMode =>
None
case FailFastMode =>
throw new SparkException("Malformed records are detected in schema inference. " +
s"Parse Mode: ${FailFastMode.name}.", e)
}
}
}.reduceOption(typeMerger).toIterator
}
// Here we manually submit a fold-like Spark job, so that we can set the SQLConf when running
// the fold functions in the scheduler event loop thread.
val existingConf = SQLConf.get
var rootType: DataType = StructType(Nil)
val foldPartition = (iter: Iterator[DataType]) => iter.fold(StructType(Nil))(typeMerger)
val mergeResult = (index: Int, taskResult: DataType) => {
rootType = SQLConf.withExistingConf(existingConf) {
typeMerger(rootType, taskResult)
}
}
json.sparkContext.runJob(mergedTypesFromPartitions, foldPartition, mergeResult)
canonicalizeType(rootType, options)
.find(_.isInstanceOf[StructType])
// canonicalizeType erases all empty structs, including the only one we want to keep
.getOrElse(StructType(Nil)).asInstanceOf[StructType]
}
/**
* Infer the type of a json document from the parser's token stream
*/
def inferField(parser: JsonParser): DataType = {
import com.fasterxml.jackson.core.JsonToken._
parser.getCurrentToken match {
case null | VALUE_NULL => NullType
case FIELD_NAME =>
parser.nextToken()
inferField(parser)
case VALUE_STRING if parser.getTextLength < 1 =>
// Zero length strings and nulls have special handling to deal
// with JSON generators that do not distinguish between the two.
// To accurately infer types for empty strings that are really
// meant to represent nulls we assume that the two are isomorphic
// but will defer treating null fields as strings until all the
// record fields' types have been combined.
NullType
case VALUE_STRING =>
val field = parser.getText
lazy val decimalTry = allCatch opt {
val bigDecimal = decimalParser(field)
DecimalType(bigDecimal.precision, bigDecimal.scale)
}
if (options.prefersDecimal && decimalTry.isDefined) {
decimalTry.get
} else if (options.inferTimestamp &&
(allCatch opt timestampFormatter.parse(field)).isDefined) {
TimestampType
} else {
StringType
}
case START_OBJECT =>
val builder = Array.newBuilder[StructField]
while (nextUntil(parser, END_OBJECT)) {
builder += StructField(
parser.getCurrentName,
inferField(parser),
nullable = true)
}
val fields: Array[StructField] = builder.result()
// Note: other code relies on this sorting for correctness, so don't remove it!
java.util.Arrays.sort(fields, JsonInferSchema.structFieldComparator)
StructType(fields)
case START_ARRAY =>
// If this JSON array is empty, we use NullType as a placeholder.
// If this array is not empty in other JSON objects, we can resolve
// the type as we pass through all JSON objects.
var elementType: DataType = NullType
while (nextUntil(parser, END_ARRAY)) {
elementType = JsonInferSchema.compatibleType(
elementType, inferField(parser))
}
ArrayType(elementType)
case (VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT) if options.primitivesAsString => StringType
case (VALUE_TRUE | VALUE_FALSE) if options.primitivesAsString => StringType
case VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT =>
import JsonParser.NumberType._
parser.getNumberType match {
// For Integer values, use LongType by default.
case INT | LONG => LongType
// Since we do not have a data type backed by BigInteger,
// when we see a Java BigInteger, we use DecimalType.
case BIG_INTEGER | BIG_DECIMAL =>
val v = parser.getDecimalValue
if (Math.max(v.precision(), v.scale()) <= DecimalType.MAX_PRECISION) {
DecimalType(Math.max(v.precision(), v.scale()), v.scale())
} else {
DoubleType
}
case FLOAT | DOUBLE if options.prefersDecimal =>
val v = parser.getDecimalValue
if (Math.max(v.precision(), v.scale()) <= DecimalType.MAX_PRECISION) {
DecimalType(Math.max(v.precision(), v.scale()), v.scale())
} else {
DoubleType
}
case FLOAT | DOUBLE =>
DoubleType
}
case VALUE_TRUE | VALUE_FALSE => BooleanType
}
}
/**
* Recursively canonicalizes inferred types, e.g., removes StructTypes with no fields,
* drops NullTypes or converts them to StringType based on provided options.
*/
private[catalyst] def canonicalizeType(
tpe: DataType, options: JSONOptions): Option[DataType] = tpe match {
case at: ArrayType =>
canonicalizeType(at.elementType, options)
.map(t => at.copy(elementType = t))
case StructType(fields) =>
val canonicalFields = fields.filter(_.name.nonEmpty).flatMap { f =>
canonicalizeType(f.dataType, options)
.map(t => f.copy(dataType = t))
}
// SPARK-8093: empty structs should be deleted
if (canonicalFields.isEmpty) {
None
} else {
Some(StructType(canonicalFields))
}
case NullType =>
if (options.dropFieldIfAllNull) {
None
} else {
Some(StringType)
}
case other => Some(other)
}
}
object JsonInferSchema {
val structFieldComparator = new Comparator[StructField] {
override def compare(o1: StructField, o2: StructField): Int = {
o1.name.compareTo(o2.name)
}
}
def isSorted(arr: Array[StructField]): Boolean = {
var i: Int = 0
while (i < arr.length - 1) {
if (structFieldComparator.compare(arr(i), arr(i + 1)) > 0) {
return false
}
i += 1
}
true
}
def withCorruptField(
struct: StructType,
other: DataType,
columnNameOfCorruptRecords: String,
parseMode: ParseMode): StructType = parseMode match {
case PermissiveMode =>
// If we see any other data type at the root level, we get records that cannot be
// parsed. So, we use the struct as the data type and add the corrupt field to the schema.
if (!struct.fieldNames.contains(columnNameOfCorruptRecords)) {
// If this given struct does not have a column used for corrupt records,
// add this field.
val newFields: Array[StructField] =
StructField(columnNameOfCorruptRecords, StringType, nullable = true) +: struct.fields
// Note: other code relies on this sorting for correctness, so don't remove it!
java.util.Arrays.sort(newFields, structFieldComparator)
StructType(newFields)
} else {
// Otherwise, just return this struct.
struct
}
case DropMalformedMode =>
// If corrupt record handling is disabled we retain the valid schema and discard the other.
struct
case FailFastMode =>
// If `other` is not struct type, consider it as malformed one and throws an exception.
throw new SparkException("Malformed records are detected in schema inference. " +
s"Parse Mode: ${FailFastMode.name}. Reasons: Failed to infer a common schema. " +
s"Struct types are expected, but `${other.catalogString}` was found.")
}
/**
* Remove top-level ArrayType wrappers and merge the remaining schemas
*/
def compatibleRootType(
columnNameOfCorruptRecords: String,
parseMode: ParseMode): (DataType, DataType) => DataType = {
// Since we support array of json objects at the top level,
// we need to check the element type and find the root level data type.
case (ArrayType(ty1, _), ty2) =>
compatibleRootType(columnNameOfCorruptRecords, parseMode)(ty1, ty2)
case (ty1, ArrayType(ty2, _)) =>
compatibleRootType(columnNameOfCorruptRecords, parseMode)(ty1, ty2)
// Discard null/empty documents
case (struct: StructType, NullType) => struct
case (NullType, struct: StructType) => struct
case (struct: StructType, o) if !o.isInstanceOf[StructType] =>
withCorruptField(struct, o, columnNameOfCorruptRecords, parseMode)
case (o, struct: StructType) if !o.isInstanceOf[StructType] =>
withCorruptField(struct, o, columnNameOfCorruptRecords, parseMode)
// If we get anything else, we call compatibleType.
// Usually, when we reach here, ty1 and ty2 are two StructTypes.
case (ty1, ty2) => compatibleType(ty1, ty2)
}
private[this] val emptyStructFieldArray = Array.empty[StructField]
/**
* Returns the most general data type for two given data types.
*/
def compatibleType(t1: DataType, t2: DataType): DataType = {
TypeCoercion.findTightestCommonType(t1, t2).getOrElse {
// t1 or t2 is a StructType, ArrayType, or an unexpected type.
(t1, t2) match {
// Double support larger range than fixed decimal, DecimalType.Maximum should be enough
// in most case, also have better precision.
case (DoubleType, _: DecimalType) | (_: DecimalType, DoubleType) =>
DoubleType
case (t1: DecimalType, t2: DecimalType) =>
val scale = math.max(t1.scale, t2.scale)
val range = math.max(t1.precision - t1.scale, t2.precision - t2.scale)
if (range + scale > 38) {
// DecimalType can't support precision > 38
DoubleType
} else {
DecimalType(range + scale, scale)
}
case (StructType(fields1), StructType(fields2)) =>
// Both fields1 and fields2 should be sorted by name, since inferField performs sorting.
// Therefore, we can take advantage of the fact that we're merging sorted lists and skip
// building a hash map or performing additional sorting.
assert(isSorted(fields1),
s"${StructType.simpleString}'s fields were not sorted: ${fields1.toSeq}")
assert(isSorted(fields2),
s"${StructType.simpleString}'s fields were not sorted: ${fields2.toSeq}")
val newFields = new java.util.ArrayList[StructField]()
var f1Idx = 0
var f2Idx = 0
while (f1Idx < fields1.length && f2Idx < fields2.length) {
val f1Name = fields1(f1Idx).name
val f2Name = fields2(f2Idx).name
val comp = f1Name.compareTo(f2Name)
if (comp == 0) {
val dataType = compatibleType(fields1(f1Idx).dataType, fields2(f2Idx).dataType)
newFields.add(StructField(f1Name, dataType, nullable = true))
f1Idx += 1
f2Idx += 1
} else if (comp < 0) { // f1Name < f2Name
newFields.add(fields1(f1Idx))
f1Idx += 1
} else { // f1Name > f2Name
newFields.add(fields2(f2Idx))
f2Idx += 1
}
}
while (f1Idx < fields1.length) {
newFields.add(fields1(f1Idx))
f1Idx += 1
}
while (f2Idx < fields2.length) {
newFields.add(fields2(f2Idx))
f2Idx += 1
}
StructType(newFields.toArray(emptyStructFieldArray))
case (ArrayType(elementType1, containsNull1), ArrayType(elementType2, containsNull2)) =>
ArrayType(compatibleType(elementType1, elementType2), containsNull1 || containsNull2)
// The case that given `DecimalType` is capable of given `IntegralType` is handled in
// `findTightestCommonType`. Both cases below will be executed only when the given
// `DecimalType` is not capable of the given `IntegralType`.
case (t1: IntegralType, t2: DecimalType) =>
compatibleType(DecimalType.forType(t1), t2)
case (t1: DecimalType, t2: IntegralType) =>
compatibleType(t1, DecimalType.forType(t2))
// strings and every string is a Json object.
case (_, _) => StringType
}
}
}
}
| goldmedal/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JsonInferSchema.scala | Scala | apache-2.0 | 15,223 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.continuous
import org.apache.spark.{Partition, SparkEnv, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.sources.v2.writer.DataWriter
import org.apache.spark.sql.sources.v2.writer.streaming.StreamingDataWriterFactory
import org.apache.spark.util.Utils
/**
* The RDD writing to a sink in continuous processing.
*
* Within each task, we repeatedly call prev.compute(). Each resulting iterator contains the data
* to be written for one epoch, which we commit and forward to the driver.
*
* We keep repeating prev.compute() and writing new epochs until the query is shut down.
*/
class ContinuousWriteRDD(var prev: RDD[InternalRow], writerFactory: StreamingDataWriterFactory)
extends RDD[Unit](prev) {
override val partitioner = prev.partitioner
override def getPartitions: Array[Partition] = prev.partitions
override def compute(split: Partition, context: TaskContext): Iterator[Unit] = {
val epochCoordinator = EpochCoordinatorRef.get(
context.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY),
SparkEnv.get)
EpochTracker.initializeCurrentEpoch(
context.getLocalProperty(ContinuousExecution.START_EPOCH_KEY).toLong)
while (!context.isInterrupted() && !context.isCompleted()) {
var dataWriter: DataWriter[InternalRow] = null
// write the data and commit this writer.
Utils.tryWithSafeFinallyAndFailureCallbacks(block = {
try {
val dataIterator = prev.compute(split, context)
dataWriter = writerFactory.createWriter(
context.partitionId(),
context.taskAttemptId(),
EpochTracker.getCurrentEpoch.get)
while (dataIterator.hasNext) {
dataWriter.write(dataIterator.next())
}
logInfo(s"Writer for partition ${context.partitionId()} " +
s"in epoch ${EpochTracker.getCurrentEpoch.get} is committing.")
val msg = dataWriter.commit()
epochCoordinator.send(
CommitPartitionEpoch(
context.partitionId(),
EpochTracker.getCurrentEpoch.get,
msg)
)
logInfo(s"Writer for partition ${context.partitionId()} " +
s"in epoch ${EpochTracker.getCurrentEpoch.get} committed.")
EpochTracker.incrementCurrentEpoch()
} catch {
case _: InterruptedException =>
// Continuous shutdown always involves an interrupt. Just finish the task.
}
})(catchBlock = {
// If there is an error, abort this writer. We enter this callback in the middle of
// rethrowing an exception, so compute() will stop executing at this point.
logError(s"Writer for partition ${context.partitionId()} is aborting.")
if (dataWriter != null) dataWriter.abort()
logError(s"Writer for partition ${context.partitionId()} aborted.")
})
}
Iterator()
}
override def clearDependencies() {
super.clearDependencies()
prev = null
}
}
| WindCanDie/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousWriteRDD.scala | Scala | apache-2.0 | 3,915 |
package scala.slick.lifted
import annotation.implicitNotFound
import scala.slick.ast.BaseTypedType
@implicitNotFound("Cannot perform option-mapped operation\\n with type: (${P1}, ${P2}) => ${R}\\n for base type: (${B1}, ${B2}) => ${BR}")
sealed trait OptionMapper2[B1, B2, BR, P1, P2, R] extends (Column[BR] => Column[R])
object OptionMapper2 {
val plain = new OptionMapper2[Any,Any,Any,Any,Any,Any] { def apply(n: Column[Any]): Column[Any] = n }
val option = new OptionMapper2[Any,Any,Any,Any,Any,Option[Any]] { def apply(n: Column[Any]): Column[Option[Any]] = n.? }
@inline implicit def getOptionMapper2TT[B1, B2 : BaseTypedType, BR] = OptionMapper2.plain .asInstanceOf[OptionMapper2[B1, B2, BR, B1, B2, BR]]
@inline implicit def getOptionMapper2TO[B1, B2 : BaseTypedType, BR] = OptionMapper2.option.asInstanceOf[OptionMapper2[B1, B2, BR, B1, Option[B2], Option[BR]]]
@inline implicit def getOptionMapper2OT[B1, B2 : BaseTypedType, BR] = OptionMapper2.option.asInstanceOf[OptionMapper2[B1, B2, BR, Option[B1], B2, Option[BR]]]
@inline implicit def getOptionMapper2OO[B1, B2 : BaseTypedType, BR] = OptionMapper2.option.asInstanceOf[OptionMapper2[B1, B2, BR, Option[B1], Option[B2], Option[BR]]]
}
@implicitNotFound("Cannot perform option-mapped operation\\n with type: (${P1}, ${P2}, ${P3}) => ${R}\\n for base type: (${B1}, ${B2}, ${B3}) => ${BR}")
sealed trait OptionMapper3[B1, B2, B3, BR, P1, P2, P3, R] extends (Column[BR] => Column[R])
object OptionMapper3 {
val plain = new OptionMapper3[Any,Any,Any,Any,Any,Any,Any,Any] { def apply(n: Column[Any]): Column[Any] = n }
val option = new OptionMapper3[Any,Any,Any,Any,Any,Any,Any,Option[Any]] { def apply(n: Column[Any]): Column[Option[Any]] = n.? }
@inline implicit def getOptionMapper3TTT[B1, B2 : BaseTypedType, B3 : BaseTypedType, BR] = OptionMapper3.plain .asInstanceOf[OptionMapper3[B1, B2, B3, BR, B1, B2, B3, BR]]
@inline implicit def getOptionMapper3TTO[B1, B2 : BaseTypedType, B3 : BaseTypedType, BR] = OptionMapper3.option.asInstanceOf[OptionMapper3[B1, B2, B3, BR, B1, B2, Option[B3], Option[BR]]]
@inline implicit def getOptionMapper3TOT[B1, B2 : BaseTypedType, B3 : BaseTypedType, BR] = OptionMapper3.option.asInstanceOf[OptionMapper3[B1, B2, B3, BR, B1, Option[B2], B3, Option[BR]]]
@inline implicit def getOptionMapper3TOO[B1, B2 : BaseTypedType, B3 : BaseTypedType, BR] = OptionMapper3.option.asInstanceOf[OptionMapper3[B1, B2, B3, BR, B1, Option[B2], Option[B3], Option[BR]]]
@inline implicit def getOptionMapper3OTT[B1, B2 : BaseTypedType, B3 : BaseTypedType, BR] = OptionMapper3.option.asInstanceOf[OptionMapper3[B1, B2, B3, BR, Option[B1], B2, B3, Option[BR]]]
@inline implicit def getOptionMapper3OTO[B1, B2 : BaseTypedType, B3 : BaseTypedType, BR] = OptionMapper3.option.asInstanceOf[OptionMapper3[B1, B2, B3, BR, Option[B1], B2, Option[B3], Option[BR]]]
@inline implicit def getOptionMapper3OOT[B1, B2 : BaseTypedType, B3 : BaseTypedType, BR] = OptionMapper3.option.asInstanceOf[OptionMapper3[B1, B2, B3, BR, Option[B1], Option[B2], B3, Option[BR]]]
@inline implicit def getOptionMapper3OOO[B1, B2 : BaseTypedType, B3 : BaseTypedType, BR] = OptionMapper3.option.asInstanceOf[OptionMapper3[B1, B2, B3, BR, Option[B1], Option[B2], Option[B3], Option[BR]]]
}
object OptionMapperDSL {
type arg[B1, P1] = {
type to[BR, PR] = OptionMapper2[B1, B1, BR, P1, P1, PR]
type toSame = OptionMapper2[B1, B1, B1, P1, P1, P1]
type arg[B2, P2] = {
type to[BR, PR] = OptionMapper2[B1, B2, BR, P1, P2, PR]
type arg[B3, P3] = {
type to[BR, PR] = OptionMapper3[B1, B2, B3, BR, P1, P2, P3, PR]
}
}
}
}
| boldradius/slick | src/main/scala/scala/slick/lifted/OptionMapper.scala | Scala | bsd-2-clause | 3,781 |
package com.weez.mercury
object App {
import akka.actor._
import common._
def main(args: Array[String]): Unit = {
start(args)
}
def start(args: Array[String]) = {
val system = ActorSystem("mercury")
// use 'kill -15' (SIGTERM) or 'kill -2' (SIGINT) to terminate this application.
// do NOT use 'kill -9' (SIGKILL), otherwise the shutdown hook will not work.
// http://stackoverflow.com/questions/2541597/how-to-gracefully-handle-the-sigkill-signal-in-java
Runtime.getRuntime.addShutdownHook(new Thread {
override def run(): Unit = {
system.shutdown()
}
})
val config = system.settings.config.getConfig("weez-mercury")
val app = new ServiceManager(system, config)
if (app.config.getBoolean("http.enable")) {
system.actorOf(Props(classOf[HttpServer.ServerActor], app, config.getConfig("http")), "http")
}
if (config.getBoolean("akka.enable")) {
system.actorOf(Props(classOf[AkkaServer.ServerActor], app), "akka")
}
app.start()
app
}
}
| weeztech/weez-mercury | main/src/main/scala/com/weez/mercury/App.scala | Scala | apache-2.0 | 1,040 |
package lila.pref
import play.api.mvc.Request
// because the form structure has changed
// and the mobile app keeps sending the old format
object FormCompatLayer {
private type FormData = Map[String, Seq[String]]
def apply(req: Request[_]): FormData =
moveTo("display", List(
"animation",
"captured",
"highlight",
"destination",
"coords",
"replay",
"pieceNotation",
"blindfold")) {
moveTo("behavior", List(
"moveEvent",
"premove",
"takeback",
"autoQueen",
"autoThreefold",
"submitMove",
"confirmResign",
"keyboardMove")) {
reqToFormData(req)
}
}
private def moveTo(prefix: String, fields: List[String])(data: FormData): FormData =
fields.foldLeft(data) {
case (d, field) =>
val newField = s"$prefix.$field"
d + (newField -> ~d.get(newField).orElse(d.get(field)))
}
private def reqToFormData(req: Request[_]): FormData = {
(req.body match {
case body: play.api.mvc.AnyContent if body.asFormUrlEncoded.isDefined => body.asFormUrlEncoded.get
case body: play.api.mvc.AnyContent if body.asMultipartFormData.isDefined => body.asMultipartFormData.get.asFormUrlEncoded
case _ => Map.empty[String, Seq[String]]
}) ++ req.queryString
}
}
| clarkerubber/lila | modules/pref/src/main/FormCompatLayer.scala | Scala | agpl-3.0 | 1,340 |
@main def Test = {
println(test(identity))
println(test(x => x))
println(test(x => { println(x); x }))
}
| som-snytt/dotty | tests/run-macros/lambda-extractor-1/Test_2.scala | Scala | apache-2.0 | 112 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.unsafe
import java.io.File
import scala.collection.JavaConverters._
import org.apache.commons.io.FileUtils
import org.apache.commons.io.filefilter.TrueFileFilter
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.{HashPartitioner, ShuffleDependency, SparkContext, ShuffleSuite}
import org.apache.spark.rdd.ShuffledRDD
import org.apache.spark.serializer.{JavaSerializer, KryoSerializer}
import org.apache.spark.util.Utils
class UnsafeShuffleSuite extends ShuffleSuite with BeforeAndAfterAll {
// This test suite should run all tests in ShuffleSuite with unsafe-based shuffle.
override def beforeAll() {
conf.set("spark.shuffle.manager", "tungsten-sort")
// UnsafeShuffleManager requires at least 128 MB of memory per task in order to be able to sort
// shuffle records.
conf.set("spark.shuffle.memoryFraction", "0.5")
}
test("UnsafeShuffleManager properly cleans up files for shuffles that use the new shuffle path") {
val tmpDir = Utils.createTempDir()
try {
val myConf = conf.clone()
.set("spark.local.dir", tmpDir.getAbsolutePath)
sc = new SparkContext("local", "test", myConf)
// Create a shuffled RDD and verify that it will actually use the new UnsafeShuffle path
val rdd = sc.parallelize(1 to 10, 1).map(x => (x, x))
val shuffledRdd = new ShuffledRDD[Int, Int, Int](rdd, new HashPartitioner(4))
.setSerializer(new KryoSerializer(myConf))
val shuffleDep = shuffledRdd.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]]
assert(UnsafeShuffleManager.canUseUnsafeShuffle(shuffleDep))
def getAllFiles: Set[File] =
FileUtils.listFiles(tmpDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet
val filesBeforeShuffle = getAllFiles
// Force the shuffle to be performed
shuffledRdd.count()
// Ensure that the shuffle actually created files that will need to be cleaned up
val filesCreatedByShuffle = getAllFiles -- filesBeforeShuffle
filesCreatedByShuffle.map(_.getName) should be
Set("shuffle_0_0_0.data", "shuffle_0_0_0.index")
// Check that the cleanup actually removes the files
sc.env.blockManager.master.removeShuffle(0, blocking = true)
for (file <- filesCreatedByShuffle) {
assert (!file.exists(), s"Shuffle file $file was not cleaned up")
}
} finally {
Utils.deleteRecursively(tmpDir)
}
}
test("UnsafeShuffleManager properly cleans up files for shuffles that use the old shuffle path") {
val tmpDir = Utils.createTempDir()
try {
val myConf = conf.clone()
.set("spark.local.dir", tmpDir.getAbsolutePath)
sc = new SparkContext("local", "test", myConf)
// Create a shuffled RDD and verify that it will actually use the old SortShuffle path
val rdd = sc.parallelize(1 to 10, 1).map(x => (x, x))
val shuffledRdd = new ShuffledRDD[Int, Int, Int](rdd, new HashPartitioner(4))
.setSerializer(new JavaSerializer(myConf))
val shuffleDep = shuffledRdd.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]]
assert(!UnsafeShuffleManager.canUseUnsafeShuffle(shuffleDep))
def getAllFiles: Set[File] =
FileUtils.listFiles(tmpDir, TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE).asScala.toSet
val filesBeforeShuffle = getAllFiles
// Force the shuffle to be performed
shuffledRdd.count()
// Ensure that the shuffle actually created files that will need to be cleaned up
val filesCreatedByShuffle = getAllFiles -- filesBeforeShuffle
filesCreatedByShuffle.map(_.getName) should be
Set("shuffle_0_0_0.data", "shuffle_0_0_0.index")
// Check that the cleanup actually removes the files
sc.env.blockManager.master.removeShuffle(0, blocking = true)
for (file <- filesCreatedByShuffle) {
assert (!file.exists(), s"Shuffle file $file was not cleaned up")
}
} finally {
Utils.deleteRecursively(tmpDir)
}
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/test/scala/org/apache/spark/shuffle/unsafe/UnsafeShuffleSuite.scala | Scala | apache-2.0 | 4,840 |
package com.kakao.cuesheet
import java.text.SimpleDateFormat
import java.util.Date
import com.kakao.cuesheet.launcher.YarnConnector
import org.apache.spark.SparkContext
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.yarn.CueSheetYarnClient
import org.apache.spark.launcher.SparkLauncherHook
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
/** The base type to extend from, to build a CueSheet application.
* The values of sc, ssc, sqlContext, and spark are accessible in the derived object's body as if
* it is inside a Spark shell. A typical example would be:
*
* {{{
* object Example extends CueSheet {
* sc.parallelize(1 to 100).sum()
* }
* }}}
*
* This class contains the main method, which is the application's entry point,
* to perform tasks like running in client/cluster mode, or making an assembly for deployment.
* This class uses Scala's DelayedInit mechanism, so it contains only methods, no variables.
* The required fields are defined in the superclass.
*/
abstract class CueSheet(additionalSettings: (String, String)*) extends CueSheetBase(additionalSettings: _*) with App {
import com.kakao.cuesheet.ExecutionConfig.{config, manager}
/** Overrides App.main to implement the entry point, instead of executing the main body.
* The main class body is still accessible via super.main()
*/
final override def main(args: Array[String]) {
init()
if (config.contains("cuesheet.install") && !isOnCluster) {
installApplication(config("cuesheet.install"), args)
} else {
if (ExecutionConfig.mode == CLIENT || isOnCluster) {
// launch the main class, if it is configured for client mode or if this JVM is inside cluster already.
runDriver(args)
} else {
// otherwise, deploy this application to the cluster
runDeploy(args)
}
}
}
/** prints runtime information */
private def init(): Unit = {
logger.info(s"Running CueSheet ${CueSheetVersion.version}")
val fields = getClass.getDeclaredFields.filterNot(_.getName.contains("$"))
fields.foreach { field =>
logger.warn(s"""field "${field.getName}" of type ${field.getType.getSimpleName} might not get serialized correctly""")
}
if (fields.nonEmpty) {
logger.warn(
s"""consider using the double-brace trick like:
|
|object ${getClass.getSimpleName.stripSuffix("$")} extends CueSheet {{
| // your main logic here ...
|}}
""".stripMargin)
}
}
/** This method can be overridden to implement something to be executed before the application starts.
* When loaded from a checkpoint, the CueSheet class body does not get called, but this method does.
* Possible example is to send a notification about the application launch, including e.g. sc.uiWebUrl, applicationId
*/
def sparkContextAvailable(sc: SparkContext): Unit = {
logger.info(s"Spark Context is now available; web UI: ${sc.uiWebUrl.getOrElse("none")}")
logger.info(s"Application ID: ${sc.applicationId}")
}
/** Executes the driver. In client mode, this function is called in the local JVM,
* and in cluster mode, this function is called inside a remote node,
* while communicating with this JVM which is running [[runDeploy]].
*/
private def runDriver(args: Array[String]): Unit = {
// maybe acquire streaming context, either from the checkpoint or a fresh one.
val maybeStreaming = sparkConf.getOption("spark.streaming.checkpoint.path") match {
case Some(checkpoint) =>
try {
// calling this lazy val 'ssc', will try to load checkpoint,
// and throws 'ReadFromCheckpoint' exception, if the loading is succesful.
val state = ssc.getState()
// so at this point, a new streaming context has been made.
logger.info(s"Starting a fresh streaming application. ssc.state=$state")
//
sparkContextAvailable(sc)
super.main(args)
Some(ssc)
} catch {
case ReadFromCheckpoint(streamingContext) =>
logger.info(s"successfully read checkpoint from $checkpoint")
sparkContextAvailable(sc)
Some(streamingContext)
}
case None =>
sparkContextAvailable(sc)
super.main(args)
if (streaming) Some(ssc) else None
}
maybeStreaming.collect {
case context =>
context.start()
context.awaitTermination()
}
if (contextAvailable) {
sc.stop()
}
}
/** deploy the application to a remote cluster */
private def runDeploy(args: Array[String]): Unit = {
manager match {
case YARN =>
val assembly = buildAssembly()
// skip launch when installing the application
if (!config.contains("cuesheet.install")) {
val arguments = ArrayBuffer("--jar", assembly, "--class", className) ++ args.flatMap { arg => Seq("--arg", arg) }
logger.info(s"spark-submit arguments: ${arguments.mkString(" ")}")
val hadoopConf = SparkHadoopUtil.get.newConfiguration(sparkConf)
CueSheetYarnClient.run(hadoopConf, sparkConf, arguments.toArray, saveApplicationId)
}
case SPARK =>
throw new NotImplementedError("Spark Standalone mode not implemented yet")
case MESOS =>
throw new NotImplementedError("Mesos mode not implemented yet")
case LOCAL =>
logger.error("local mode does not support running on cluster")
throw new RuntimeException("local mode does not support running on cluster")
}
}
private def installApplication(tag: String, args: Array[String]): Unit = {
if (ExecutionConfig.manager != YARN) {
throw new RuntimeException("Installing is supported only in YARN for now")
}
val assembly = buildAssembly()
val hadoopConf = SparkHadoopUtil.get.newConfiguration(sparkConf)
val uploadedAssembly = YarnConnector.uploadAssembly(hadoopConf, assembly, className, tag)
val jarName = uploadedAssembly.split('/').last
val sparkJars = sparkConf.get("spark.hdfs.jars")
val defaultFS = hadoopConf.get("fs.defaultFS")
val shortJar = uploadedAssembly.stripPrefix(defaultFS)
val shortSparkJars = sparkJars.stripPrefix(defaultFS)
val hadoopXML = HadoopUtils.getHadoopXML(hadoopConf)
val suffix = if (tag.nonEmpty) tag else new SimpleDateFormat("yyyyMMdd-HHmmss").format(new Date())
val dir = s"${jarName.stripSuffix(".jar")}-$tag"
val arguments = args.map(SparkLauncherHook.quoteForCommandString).mkString(" ")
System.err.println(
s"""
|rm -rf $dir && mkdir $dir && cd $dir &&
|echo $hadoopXML > core-site.xml &&
|hdfs --config . dfs -get hdfs://$shortJar \\\\!$jarName &&
|hdfs --config . dfs -get hdfs://$shortSparkJars &&
|java -classpath "*" $className $arguments && cd .. && rm -rf $dir
|
|""".stripMargin)
System.out.flush()
}
}
| kakao/cuesheet | src/main/scala/com/kakao/cuesheet/CueSheet.scala | Scala | apache-2.0 | 7,064 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.sdk.pipeline.aggregation.cube
case class ExpiringData(timeDimension: String, granularity: String, timeAvailability: String) | fjsc/sparta | sdk/src/main/scala/com/stratio/sparta/sdk/pipeline/aggregation/cube/ExpiringData.scala | Scala | apache-2.0 | 771 |
package blog.debug
import java.util.Properties
import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import blog.Main
import blog.common.Util
import blog.io.TableWriter
import blog.model.Evidence
import blog.model.Model
import blog.model.Queries
import blog.sample.LWSampler
class LWDebugger(model: Model, evidence: Evidence, queries: Queries)
extends SamplerDebugger[LWSample](model, evidence, queries) {
def makeSampler = {
val sampler = new LWSampler(model, new Properties())
sampler.initialize(evidence, queries)
sampler
}
def nextSample = {
sampler.nextSample()
new LWSample(model, sampler.getLatestWorld(), sampler.getLatestLogWeight())
}
}
object LWDebugger {
/**
* Create a LWDebugger for the given model.
*
* Example usage from iblog:
* <code>
* scala> val d = LWDebugger.make("tmp/burglary.all")
* scala> import d._
* scala> n
* scala> s.eval("Earthquake | JohnCalls")
* </code>
*/
def make(path: String): LWDebugger = {
Util.initRandom(false)
val model = new Model()
val evidence = new Evidence(model)
val queries = new Queries(model)
Main.simpleSetupFromFiles(model, evidence, queries, path :: Nil)
new LWDebugger(model, evidence, queries)
}
}
| BayesianLogic/blog | src/main/scala/blog/debug/LWDebugger.scala | Scala | bsd-3-clause | 1,285 |
package fpinscala.errorhandling
sealed trait Option[+A] {
def map[B](f: A => B): Option[B] = this match {
case None => None
case Some(a) => Some(f(a))
}
def getOrElse[B>:A](default: => B): B = this match {
case None => default
case Some(a) => a
}
def flatMap[B](f: A => Option[B]): Option[B] =
map(f) getOrElse None
/*
Of course, we can also implement `flatMap` with explicit pattern matching.
*/
def flatMap_1[B](f: A => Option[B]): Option[B] = this match {
case None => None
case Some(a) => f(a)
}
def orElse[B>:A](ob: Option[B]): Option[B] =
Some(this) getOrElse ob
/*
Again, we can implement this with explicit pattern matching.
*/
def orElse_1[B>:A](ob: => Option[B]): Option[B] = this match {
case None => ob
case _ => this
}
def filter(f: A => Boolean): Option[A] = this match {
case Some(a) if f(a) => this
case _ => None
}
/*
This can also be defined in terms of `flatMap`.
*/
def filter_1(f: A => Boolean): Option[A] =
flatMap(a => if (f(a)) None else Some(a))
}
case class Some[+A](get: A) extends Option[A]
case object None extends Option[Nothing]
object Option {
case class MyException(msg: String) extends RuntimeException
def failingFn(i: Int): Int =
try {
if (i > 42) throw MyException("fail!")
else i + 42
} catch {
case MyException(msg) => 42
}
def mean(xs: Seq[Double]): Option[Double] =
if (xs.isEmpty) None
else Some(xs.sum / xs.length)
def variance(xs: Seq[Double]): Option[Double] =
mean(xs) flatMap (m => mean(xs.map(x => math.pow(x - m, 2))))
import java.util.regex._
def pattern(s: String): Option[Pattern] =
try {
Some(Pattern.compile(s))
} catch {
case e: PatternSyntaxException => None
}
/*
The for-comprehension syntax is somewhat clearer. Here are both versions:
*/
def map2[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] =
a flatMap (aa =>
b map (bb =>
f(aa, bb)))
def map2_1[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] =
for {
a1 <- a
b1 <- b
} yield f(a1,b1)
def bothMatch(pat1: String, pat2: String, s: String): Option[Boolean] =
map2(mkMatcher(pat1), mkMatcher(pat2))((f,g) => f(s) && g(s))
def mkMatcher(pat: String): Option[String => Boolean] =
pattern(pat) map (p => (s: String) => p.matcher(s).matches) // The details of this API don't matter too much, but `p.matcher(s).matches` will check if the string `s` matches the pattern `p`.
/*
Here is an explicit recursive version:
*/
def sequence[A](a: List[Option[A]]): Option[List[A]] =
a match {
case Nil => Some(Nil)
case h::t => h flatMap (hh => sequence(t) map (hh :: _))
}
/*
It can also be implemented using `foldRight` and `map2`. The type annotation on `foldRight` is needed here, otherwise Scala wrongly infers the result type of the fold as `Some[Nil.type]` and reports a type error (try it!). This is an unfortunate consequence of Scala using subtyping to encode algebraic data types.
*/
def sequence_1[A](a: List[Option[A]]): Option[List[A]] =
a.foldRight[Option[List[A]]](Some(Nil))((x,y) => map2(x,y)(_ :: _))
def traverse[A, B](a: List[A])(f: A => Option[B]): Option[List[B]] =
a match {
case Nil => Some(Nil)
case h::t => map2(f(h), traverse(t)(f))(_ :: _)
}
def traverse_1[A, B](a: List[A])(f: A => Option[B]): Option[List[B]] =
a.foldRight[Option[List[B]]](Some(Nil))((h,t) => map2(f(h),t)(_ :: _))
} | ryo-murai/fpinscala-exercises | answers/src/main/scala/fpinscala/errorhandling/Option.scala | Scala | mit | 3,590 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aliyun.emr.examples
import org.apache.spark.{SparkConf, SparkContext}
object TestOss {
def main(args: Array[String]): Unit = {
if (args.length < 2) {
System.err.println(
"""Usage: TestOss <inputPath> <numPartition>
|
|Arguments:
|
| inputPath Aliyun OSS object path, like: oss://accessKeyId:[email protected]/path
| numPartitions the number of RDD partitions.
|
""".stripMargin)
}
val inputPath = args(1)
val numPartitions = args(2).toInt
val conf = new SparkConf().setAppName("Test OSS Read")
val sc = new SparkContext(conf)
val ossData = sc.textFile(inputPath, numPartitions)
println("The top 10 lines are:")
ossData.top(10).foreach(println)
}
}
| uncleGen/aliyun-emapreduce-sdk | examples/src/main/scala/com/aliyun/emr/examples/TestOss.scala | Scala | artistic-2.0 | 1,668 |
package reductions
import scala.annotation._
import org.scalameter._
import common._
object ParallelParenthesesBalancingRunner {
@volatile var seqResult = false
@volatile var parResult = false
val standardConfig = config(
Key.exec.minWarmupRuns -> 40,
Key.exec.maxWarmupRuns -> 80,
Key.exec.benchRuns -> 120,
Key.verbose -> true
) withWarmer(new Warmer.Default)
def main(args: Array[String]): Unit = {
val length = 100000000
val chars = new Array[Char](length)
val threshold = 10000
val seqtime = standardConfig measure {
seqResult = ParallelParenthesesBalancing.balance(chars)
}
println(s"sequential result = $seqResult")
println(s"sequential balancing time: $seqtime ms")
val fjtime = standardConfig measure {
parResult = ParallelParenthesesBalancing.parBalance(chars, threshold)
}
println(s"parallel result = $parResult")
println(s"parallel balancing time: $fjtime ms")
println(s"speedup: ${seqtime / fjtime}")
}
}
object ParallelParenthesesBalancing {
/** Returns `true` iff the parentheses in the input `chars` are balanced.
*
* @param chars An array representing a string, usually created with "my string".toArray .
* @return `true` if `chars` has balanced parentheses. "a" is balanced. "()" is balanced.
* ")(" is not balanced even though the left and right paren count is the same.
**/
def balance(chars: Array[Char]): Boolean = balanceWorker(0, 0, chars, 0, chars.length)
/** My Boolean.toInt conversion. Returns 1 if `e` is `true` and 0 if `e` is `false`.
*/
private def toInt(e: Boolean): Int = if (e) 1 else 0
// balanceWorker is the guts of the balance check. It steps through the array looking
// for parens. Left parens increase `left`, right parens increase `right`. If `right` > `left`
// we know we aren't balanced so answer `false`. If we get to the end of the array
// and `left` == `right`, we are balanced and return `true`; if `left` and `right` differ
// then we're unbalanced so return `false`.
private def balanceWorker(left: Int, right: Int, chars: Array[Char], cur: Int, end: Int): Boolean = {
if (right > left) false
else if (cur >= end) (left == right) // notice that is a Boolean expression!
else
balanceWorker(left + toInt( chars(cur) == '(' ), right + toInt( chars(cur) == ')' ), chars, cur + 1, end)
}
/** Returns `true` iff the parentheses in the input `chars` are balanced.
*
* @param chars An array representing a string, usually created with "my string".toArray .
* @param threshold String segments shorter than `threshold` are processed sequentially instead of in parallel.
* @return `true` if `chars` has balanced parentheses. "a" is balanced. "()" is balanced.
* ")(" is not balanced even though the left and right paren count is the same.
**/
def parBalance(chars: Array[Char], threshold: Int): Boolean = {
/** Walk the `chars` array from `idx` to `until` (exclusive) sequentially (without spawning threads).
* Bump the open (left) paren count for every '(' seen. When ')' seen, if we have an unbalanced '(' then
* lower the open (left) paren count. Otherwise, bump the close (right) paren count.
*
* @param idx The starting point of the string segment. Inclusive.
* @param until The ending point. Exclusive.
* @param open The unbalanced '(' count.
* @param close The unbalanced ')' count.
* @return A pair (`open`, `close`) representing the unbalanced counts for this segment.
**/
def traverse(idx: Int, until: Int, open: Int, close: Int): (Int, Int) = {
if (idx >= until) (open, close)
else
chars(idx) match {
case '(' => traverse(idx+1, until, open+1, close)
case ')' =>
if (open > 0) traverse(idx+1, until, open-1, close) // we have a paren we can "balance" away
else traverse(idx+1, until, open, close+1)
case _ => traverse(idx+1, until, open, close)
}
}
/** Process the `chars` array starting at `from` up to `until`. If the segment length is longer than
* `threshold`, split the segment into 2 roughly equal pieces and process each in parallel. If they are
* shorter than `threshold`, or if `threshold` is stupid (zero or negative), process sequentially.
*
* @param from Starting point in `chars`, inclusive.
* @param until Ending point of the segment, exclusive.
* @return A pair (`open`, `close`) representing the unbalanced counts for this segment.
**/
def reduce(from: Int, until: Int): (Int, Int) = {
if ( (until - from) <= threshold || threshold <= 0 ) traverse(from, until, 0, 0)
else
{
val mid = from + ( (until - from)/2 )
val ( (open_l, close_l), (open_r, close_r) ) = parallel(reduce(from, mid), reduce(mid, until))
// The "tricky" part is how to combine the 4 returned values together into a pair of values.
// Took some whiteboard work to figure out. How you combine depends on if there are more open
// parens than close parens. If open is bigger, use `rr` to "balance" away what we can. Then
// include the remaining open parens in `rl`. Similarly, if there are more close parens, use `ll` to
// "balance" away what we can then include the remaining close parens in `lr`.
if (open_l > close_r)
( open_l - close_r + open_r, close_l ) // left result is the master.
else
( open_r, close_r - open_l + close_l ) // right result is the master.
}
}
// (`open`, `close`) of (0,0) means balanced parentheses. Anything else is unbalanced.
reduce(0, chars.length) == (0, 0)
}
// For those who want more:
// Prove that your reduction operator is associative!
}
| jeffreylloydbrown/classwork | ParallelProgrammingInScala/reductions/src/main/scala/reductions/ParallelParenthesesBalancing.scala | Scala | unlicense | 6,029 |
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.kudu.spark.tools
import org.apache.kudu.Type
import org.apache.kudu.client.KuduPartitioner
import org.apache.kudu.spark.kudu.KuduTestSuite
import org.apache.kudu.test.RandomUtils
import org.apache.kudu.util.DecimalUtil
import org.apache.kudu.util.SchemaGenerator
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.SparkListener
import org.apache.spark.scheduler.SparkListenerTaskEnd
import org.apache.spark.sql.Row
import org.junit.Test
import org.junit.Assert.assertEquals
import org.slf4j.Logger
import org.slf4j.LoggerFactory
class DistributedDataGeneratorTest extends KuduTestSuite {
val log: Logger = LoggerFactory.getLogger(getClass)
private val generator = new SchemaGenerator.SchemaGeneratorBuilder()
.random(RandomUtils.getRandom)
// These types don't have enough values to prevent collisions.
.excludeTypes(Type.BOOL, Type.INT8)
// Ensure decimals have enough values to prevent collisions.
.precisionRange(DecimalUtil.MAX_DECIMAL32_PRECISION, DecimalUtil.MAX_DECIMAL_PRECISION)
.build()
private val randomTableName: String = "random-table"
@Test
def testGenerateRandomData() {
val numRows = 100
val args = Array(
s"--num-rows=$numRows",
"--num-tasks=10",
"--type=random",
randomTableName,
harness.getMasterAddressesAsString)
val rdd = runGeneratorTest(args)
val collisions = ss.sparkContext.longAccumulator("row_collisions").value
// Collisions could cause the number of row to be less than the number set.
assertEquals(numRows - collisions, rdd.collect.length)
}
@Test
def testGenerateSequentialData() {
val numRows = 100
val args = Array(
s"--num-rows=$numRows",
"--num-tasks=10",
"--type=sequential",
randomTableName,
harness.getMasterAddressesAsString)
val rdd = runGeneratorTest(args)
assertEquals(numRows, rdd.collect.length)
}
@Test
def testRepartitionData() {
val numRows = 100
val args = Array(
s"--num-rows=$numRows",
"--num-tasks=10",
"--type=sequential",
"--repartition=true",
randomTableName,
harness.getMasterAddressesAsString)
val rdd = runGeneratorTest(args)
assertEquals(numRows, rdd.collect.length)
}
@Test
def testNumTasks() {
// Add a SparkListener to count the number of tasks that end.
var actualNumTasks = 0
val listener = new SparkListener {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
actualNumTasks += 1
}
}
ss.sparkContext.addSparkListener(listener)
val numTasks = 8
val numRows = 100
val args = Array(
s"--num-rows=$numRows",
s"--num-tasks=$numTasks",
randomTableName,
harness.getMasterAddressesAsString)
runGeneratorTest(args)
assertEquals(numTasks, actualNumTasks)
}
@Test
def testNumTasksRepartition(): Unit = {
// Add a SparkListener to count the number of tasks that end.
var actualNumTasks = 0
val listener = new SparkListener {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
actualNumTasks += 1
}
}
ss.sparkContext.addSparkListener(listener)
val numTasks = 8
val numRows = 100
val args = Array(
s"--num-rows=$numRows",
s"--num-tasks=$numTasks",
"--repartition=true",
randomTableName,
harness.getMasterAddressesAsString)
runGeneratorTest(args)
val table = kuduContext.syncClient.openTable(randomTableName)
val numPartitions = new KuduPartitioner.KuduPartitionerBuilder(table).build().numPartitions()
// We expect the number of tasks to be equal to numTasks + numPartitions because numTasks tasks
// are run to generate the data then we repartition the data to match the table partitioning
// and numPartitions tasks load the data.
assertEquals(numTasks + numPartitions, actualNumTasks)
}
def runGeneratorTest(args: Array[String]): RDD[Row] = {
val schema = generator.randomSchema()
val options = generator.randomCreateTableOptions(schema)
kuduClient.createTable(randomTableName, schema, options)
DistributedDataGenerator.testMain(args, ss)
kuduContext.kuduRDD(ss.sparkContext, randomTableName)
}
}
| InspurUSA/kudu | java/kudu-spark-tools/src/test/scala/org/apache/kudu/spark/tools/DistributedDataGeneratorTest.scala | Scala | apache-2.0 | 5,059 |
package edu.cmu.lti.oaqa.bagpipes.executor.uima
import org.apache.uima.UIMAFramework
import org.apache.uima.jcas.JCas
import org.apache.uima.util.CasCopier
import org.apache.uima.fit.factory.AnalysisEngineFactory
import org.apache.uima.fit.factory.JCasFactory
import edu.cmu.lti.oaqa.bagpipes.configuration.AbstractDescriptors._
import UimaAnnotator._
import edu.cmu.lti.oaqa.bagpipes.executor.Annotator
import edu.cmu.lti.oaqa.bagpipes.executor.uima._
import org.apache.uima.analysis_component.AnalysisComponent
import edu.cmu.lti.oaqa.bagpipes.executor.Result
/**
* Instantiate and use any UIMA AnalysisEngine in a pipeline.
*
* @param compDesc
* A ComponentDescriptor with the class and parameters of the UIMA
* AnalysisEngine to use.
* @author Collin McCormack, and Avner Maiberg ([email protected])
*/
final class UimaAnnotator(compDesc: ComponentDescriptor) extends UimaComponent(compDesc) with Annotator[JCas] {
// Create AnalysisEngine internally
val componentClass = createAnalysisComponentClass(className)
println("annotator params: " + params)
val aeDescriptor = AnalysisEngineFactory.createPrimitiveDescription(componentClass, typeSysDesc, params: _*)
val ae = UIMAFramework.produceAnalysisEngine(aeDescriptor)
/**
* Process a copy of the input JCas with this AnalysisEngine.
*
* @param input
* The JCas to process
* @return A process copy of the input JCas
*/
override def executeComponent(input: Result[JCas]): Result[JCas] = {
val result @ Result(cas) = input
ae.process(cas.getCas())
result
}
/**
* Clean-up and release resources.
*/
override def destroy() = {
ae.collectionProcessComplete()
ae.destroy()
}
}
object UimaAnnotator {
private def createAnalysisComponentClass(className: String) = Class.forName(className).asInstanceOf[Class[_ <: AnalysisComponent]]
} | oaqa/bagpipes | src/main/scala/edu/cmu/lti/oaqa/bagpipes/executor/uima/UimaAnnotator.scala | Scala | apache-2.0 | 1,870 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.File
import java.net.Socket
import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.util.Properties
import akka.actor._
import com.google.common.collect.MapMaker
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.api.python.PythonWorkerFactory
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.network.BlockTransferService
import org.apache.spark.network.netty.NettyBlockTransferService
import org.apache.spark.network.nio.NioBlockTransferService
import org.apache.spark.scheduler.{OutputCommitCoordinator, LiveListenerBus}
import org.apache.spark.scheduler.OutputCommitCoordinator.OutputCommitCoordinatorActor
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.{ShuffleMemoryManager, ShuffleManager}
import org.apache.spark.storage._
import org.apache.spark.util.{AkkaUtils, Utils}
/**
* :: DeveloperApi ::
* Holds all the runtime environment objects for a running Spark instance (either master or worker),
* including the serializer, Akka actor system, block manager, map output tracker, etc. Currently
* Spark code finds the SparkEnv through a global variable, so all the threads can access the same
* SparkEnv. It can be accessed by SparkEnv.get (e.g. after creating a SparkContext).
*
* NOTE: This is not intended for external use. This is exposed for Shark and may be made private
* in a future release.
*/
@DeveloperApi
class SparkEnv (
val executorId: String,
val actorSystem: ActorSystem,
val serializer: Serializer,
val closureSerializer: Serializer,
val cacheManager: CacheManager,
val mapOutputTracker: MapOutputTracker,
val shuffleManager: ShuffleManager,
val broadcastManager: BroadcastManager,
val blockTransferService: BlockTransferService,
val blockManager: BlockManager,
val securityManager: SecurityManager,
val httpFileServer: HttpFileServer,
val sparkFilesDir: String,
val metricsSystem: MetricsSystem,
val shuffleMemoryManager: ShuffleMemoryManager,
val outputCommitCoordinator: OutputCommitCoordinator,
val conf: SparkConf) extends Logging {
private[spark] var isStopped = false
private val pythonWorkers = mutable.HashMap[(String, Map[String, String]), PythonWorkerFactory]()
// A general, soft-reference map for metadata needed during HadoopRDD split computation
// (e.g., HadoopFileRDD uses this to cache JobConfs and InputFormats).
private[spark] val hadoopJobMetadata = new MapMaker().softValues().makeMap[String, Any]()
private[spark] def stop() {
isStopped = true
pythonWorkers.foreach { case(key, worker) => worker.stop() }
Option(httpFileServer).foreach(_.stop())
mapOutputTracker.stop()
shuffleManager.stop()
broadcastManager.stop()
blockManager.stop()
blockManager.master.stop()
metricsSystem.stop()
outputCommitCoordinator.stop()
actorSystem.shutdown()
// Unfortunately Akka's awaitTermination doesn't actually wait for the Netty server to shut
// down, but let's call it anyway in case it gets fixed in a later release
// UPDATE: In Akka 2.1.x, this hangs if there are remote actors, so we can't call it.
// actorSystem.awaitTermination()
// Note that blockTransferService is stopped by BlockManager since it is started by it.
}
private[spark]
def createPythonWorker(pythonExec: String, envVars: Map[String, String]): java.net.Socket = {
synchronized {
val key = (pythonExec, envVars)
pythonWorkers.getOrElseUpdate(key, new PythonWorkerFactory(pythonExec, envVars)).create()
}
}
private[spark]
def destroyPythonWorker(pythonExec: String, envVars: Map[String, String], worker: Socket) {
synchronized {
val key = (pythonExec, envVars)
pythonWorkers.get(key).foreach(_.stopWorker(worker))
}
}
private[spark]
def releasePythonWorker(pythonExec: String, envVars: Map[String, String], worker: Socket) {
synchronized {
val key = (pythonExec, envVars)
pythonWorkers.get(key).foreach(_.releaseWorker(worker))
}
}
}
object SparkEnv extends Logging {
@volatile private var env: SparkEnv = _
private[spark] val driverActorSystemName = "sparkDriver"
private[spark] val executorActorSystemName = "sparkExecutor"
def set(e: SparkEnv) {
env = e
}
/**
* Returns the SparkEnv.
*/
def get: SparkEnv = {
env
}
/**
* Returns the ThreadLocal SparkEnv.
*/
@deprecated("Use SparkEnv.get instead", "1.2")
def getThreadLocal: SparkEnv = {
env
}
/**
* Create a SparkEnv for the driver.
*/
private[spark] def createDriverEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus,
mockOutputCommitCoordinator: Option[OutputCommitCoordinator] = None): SparkEnv = {
assert(conf.contains("spark.driver.host"), "spark.driver.host is not set on the driver!")
assert(conf.contains("spark.driver.port"), "spark.driver.port is not set on the driver!")
val hostname = conf.get("spark.driver.host")
val port = conf.get("spark.driver.port").toInt
create(
conf,
SparkContext.DRIVER_IDENTIFIER,
hostname,
port,
isDriver = true,
isLocal = isLocal,
listenerBus = listenerBus,
mockOutputCommitCoordinator = mockOutputCommitCoordinator
)
}
/**
* Create a SparkEnv for an executor.
* In coarse-grained mode, the executor provides an actor system that is already instantiated.
*/
private[spark] def createExecutorEnv(
conf: SparkConf,
executorId: String,
hostname: String,
port: Int,
numCores: Int,
isLocal: Boolean): SparkEnv = {
val env = create(
conf,
executorId,
hostname,
port,
isDriver = false,
isLocal = isLocal,
numUsableCores = numCores
)
SparkEnv.set(env)
env
}
/**
* Helper method to create a SparkEnv for a driver or an executor.
*/
private def create(
conf: SparkConf,
executorId: String,
hostname: String,
port: Int,
isDriver: Boolean,
isLocal: Boolean,
listenerBus: LiveListenerBus = null,
numUsableCores: Int = 0,
mockOutputCommitCoordinator: Option[OutputCommitCoordinator] = None): SparkEnv = {
// Listener bus is only used on the driver
if (isDriver) {
assert(listenerBus != null, "Attempted to create driver SparkEnv with null listener bus!")
}
val securityManager = new SecurityManager(conf)
// Create the ActorSystem for Akka and get the port it binds to.
val (actorSystem, boundPort) = {
val actorSystemName = if (isDriver) driverActorSystemName else executorActorSystemName
AkkaUtils.createActorSystem(actorSystemName, hostname, port, conf, securityManager)
}
// Figure out which port Akka actually bound to in case the original port is 0 or occupied.
if (isDriver) {
conf.set("spark.driver.port", boundPort.toString)
} else {
conf.set("spark.executor.port", boundPort.toString)
}
// Create an instance of the class with the given name, possibly initializing it with our conf
def instantiateClass[T](className: String): T = {
val cls = Class.forName(className, true, Utils.getContextOrSparkClassLoader)
// Look for a constructor taking a SparkConf and a boolean isDriver, then one taking just
// SparkConf, then one taking no arguments
try {
cls.getConstructor(classOf[SparkConf], java.lang.Boolean.TYPE)
.newInstance(conf, new java.lang.Boolean(isDriver))
.asInstanceOf[T]
} catch {
case _: NoSuchMethodException =>
try {
cls.getConstructor(classOf[SparkConf]).newInstance(conf).asInstanceOf[T]
} catch {
case _: NoSuchMethodException =>
cls.getConstructor().newInstance().asInstanceOf[T]
}
}
}
// Create an instance of the class named by the given SparkConf property, or defaultClassName
// if the property is not set, possibly initializing it with our conf
def instantiateClassFromConf[T](propertyName: String, defaultClassName: String): T = {
instantiateClass[T](conf.get(propertyName, defaultClassName))
}
val serializer = instantiateClassFromConf[Serializer](
"spark.serializer", "org.apache.spark.serializer.JavaSerializer")
logDebug(s"Using serializer: ${serializer.getClass}")
val closureSerializer = instantiateClassFromConf[Serializer](
"spark.closure.serializer", "org.apache.spark.serializer.JavaSerializer")
def registerOrLookup(name: String, newActor: => Actor): ActorRef = {
if (isDriver) {
logInfo("Registering " + name)
actorSystem.actorOf(Props(newActor), name = name)
} else {
AkkaUtils.makeDriverRef(name, conf, actorSystem)
}
}
val mapOutputTracker = if (isDriver) {
new MapOutputTrackerMaster(conf)
} else {
new MapOutputTrackerWorker(conf)
}
// Have to assign trackerActor after initialization as MapOutputTrackerActor
// requires the MapOutputTracker itself
mapOutputTracker.trackerActor = registerOrLookup(
"MapOutputTracker",
new MapOutputTrackerMasterActor(mapOutputTracker.asInstanceOf[MapOutputTrackerMaster], conf))
// Let the user specify short names for shuffle managers
val shortShuffleMgrNames = Map(
"hash" -> "org.apache.spark.shuffle.hash.HashShuffleManager",
"sort" -> "org.apache.spark.shuffle.sort.SortShuffleManager")
val shuffleMgrName = conf.get("spark.shuffle.manager", "sort")
val shuffleMgrClass = shortShuffleMgrNames.getOrElse(shuffleMgrName.toLowerCase, shuffleMgrName)
val shuffleManager = instantiateClass[ShuffleManager](shuffleMgrClass)
val shuffleMemoryManager = new ShuffleMemoryManager(conf)
val blockTransferService =
conf.get("spark.shuffle.blockTransferService", "netty").toLowerCase match {
case "netty" =>
new NettyBlockTransferService(conf, securityManager, numUsableCores)
case "nio" =>
new NioBlockTransferService(conf, securityManager)
}
val blockManagerMaster = new BlockManagerMaster(registerOrLookup(
"BlockManagerMaster",
new BlockManagerMasterActor(isLocal, conf, listenerBus)), conf, isDriver)
// NB: blockManager is not valid until initialize() is called later.
val blockManager = new BlockManager(executorId, actorSystem, blockManagerMaster,
serializer, conf, mapOutputTracker, shuffleManager, blockTransferService, securityManager,
numUsableCores)
val broadcastManager = new BroadcastManager(isDriver, conf, securityManager)
val cacheManager = new CacheManager(blockManager)
val httpFileServer =
if (isDriver) {
val fileServerPort = conf.getInt("spark.fileserver.port", 0)
val server = new HttpFileServer(conf, securityManager, fileServerPort)
server.initialize()
conf.set("spark.fileserver.uri", server.serverUri)
server
} else {
null
}
val metricsSystem = if (isDriver) {
// Don't start metrics system right now for Driver.
// We need to wait for the task scheduler to give us an app ID.
// Then we can start the metrics system.
MetricsSystem.createMetricsSystem("driver", conf, securityManager)
} else {
// We need to set the executor ID before the MetricsSystem is created because sources and
// sinks specified in the metrics configuration file will want to incorporate this executor's
// ID into the metrics they report.
conf.set("spark.executor.id", executorId)
val ms = MetricsSystem.createMetricsSystem("executor", conf, securityManager)
ms.start()
ms
}
// Set the sparkFiles directory, used when downloading dependencies. In local mode,
// this is a temporary directory; in distributed mode, this is the executor's current working
// directory.
val sparkFilesDir: String = if (isDriver) {
Utils.createTempDir(Utils.getLocalDir(conf), "userFiles").getAbsolutePath
} else {
"."
}
// Warn about deprecated spark.cache.class property
if (conf.contains("spark.cache.class")) {
logWarning("The spark.cache.class property is no longer being used! Specify storage " +
"levels using the RDD.persist() method instead.")
}
val outputCommitCoordinator = mockOutputCommitCoordinator.getOrElse {
new OutputCommitCoordinator(conf)
}
val outputCommitCoordinatorActor = registerOrLookup("OutputCommitCoordinator",
new OutputCommitCoordinatorActor(outputCommitCoordinator))
outputCommitCoordinator.coordinatorActor = Some(outputCommitCoordinatorActor)
new SparkEnv(
executorId,
actorSystem,
serializer,
closureSerializer,
cacheManager,
mapOutputTracker,
shuffleManager,
broadcastManager,
blockTransferService,
blockManager,
securityManager,
httpFileServer,
sparkFilesDir,
metricsSystem,
shuffleMemoryManager,
outputCommitCoordinator,
conf)
}
/**
* Return a map representation of jvm information, Spark properties, system properties, and
* class paths. Map keys define the category, and map values represent the corresponding
* attributes as a sequence of KV pairs. This is used mainly for SparkListenerEnvironmentUpdate.
*/
private[spark]
def environmentDetails(
conf: SparkConf,
schedulingMode: String,
addedJars: Seq[String],
addedFiles: Seq[String]): Map[String, Seq[(String, String)]] = {
import Properties._
val jvmInformation = Seq(
("Java Version", s"$javaVersion ($javaVendor)"),
("Java Home", javaHome),
("Scala Version", versionString)
).sorted
// Spark properties
// This includes the scheduling mode whether or not it is configured (used by SparkUI)
val schedulerMode =
if (!conf.contains("spark.scheduler.mode")) {
Seq(("spark.scheduler.mode", schedulingMode))
} else {
Seq[(String, String)]()
}
val sparkProperties = (conf.getAll ++ schedulerMode).sorted
// System properties that are not java classpaths
val systemProperties = Utils.getSystemProperties.toSeq
val otherProperties = systemProperties.filter { case (k, _) =>
k != "java.class.path" && !k.startsWith("spark.")
}.sorted
// Class paths including all added jars and files
val classPathEntries = javaClassPath
.split(File.pathSeparator)
.filterNot(_.isEmpty)
.map((_, "System Classpath"))
val addedJarsAndFiles = (addedJars ++ addedFiles).map((_, "Added By User"))
val classPaths = (addedJarsAndFiles ++ classPathEntries).sorted
Map[String, Seq[(String, String)]](
"JVM Information" -> jvmInformation,
"Spark Properties" -> sparkProperties,
"System Properties" -> otherProperties,
"Classpath Entries" -> classPaths)
}
}
| hengyicai/OnlineAggregationUCAS | core/src/main/scala/org/apache/spark/SparkEnv.scala | Scala | apache-2.0 | 15,952 |
package de.fosd.typechef.lexer
import java.io._
import de.fosd.typechef.conditional.{Conditional, One}
import de.fosd.typechef.featureexpr.{FeatureExprFactory, SingleFeatureExpr}
import de.fosd.typechef.lexer.LexerFrontend.{LexerError, LexerResult, LexerSuccess}
import de.fosd.typechef.{LexerToken, VALexer}
/**
* differential testing compares the output of the jcpp/xtc preprocessor with
* the output of an external preprocessor, brute force over all configurations
*
* That is, we execute the following:
* * jcpp/xtc over all configurations producing a conditional token stream
* * jcpp/xtc over each configuration separately, producing a token stream without conditions
* * cpp (needs to be available in the system's path) producing a preprocessed file, that is then lexed by jcpp/xtc
*
* Features can be defined in the first line of the test file with "// features: A B C D" or are extracted
* from whatever features jcpp/xtc find
*/
trait DifferentialTestingFramework extends LexerHelper {
import scala.collection.JavaConverters._
def analyzeFile(file: File, inclDirectory: File, debug: Boolean = false, ignoreWarnings: Boolean = false): Unit = {
assert(file != null && file.exists(), s"file not found: $file")
val fileContent = getFileContent(file)
val initFeatures: Set[SingleFeatureExpr] = getInitFeatures(fileContent)
status(s"lexing all configurations for $file")
val vresult = lex(file, inclDirectory, debug, ignoreWarnings)
assert(expectTotalFailure(fileContent) || hasSuccess(vresult), "parsing failed in all configurations: " + vresult)
val features = getFeatures(initFeatures, vresult)
val maxFeatures = 8
if (features.size > maxFeatures)
System.err.println("Warning: too many features (%s; 2^%d configurations)\\n using random instead of exaustive strategy beyond %d features".format(features, features.size, maxFeatures))
for (config <- genAllConfigurations(features)) {
//run same lexer on a single configuration
status(s"comparing against single config, configuration $config")
if (debug)
println(s"### configuration $config")
val configuredvtokens = extractTokensForConfig(vresult, config)
if (debug)
println(s"expecting $configuredvtokens")
val result = lex(file, inclDirectory, debug, ignoreWarnings, config.map(f => (f.feature -> "1")).toMap, (features -- config).map(_.feature))
assert(result.isInstanceOf[One[_]], "received conditional result when executing a single configuration??")
val tokens = getTokensFromResult(result.asInstanceOf[One[LexerResult]].value)
compareTokenLists(configuredvtokens, tokens, config, false)
//compare against CPP
status(s"comparing against cpp, configuration $config")
val cppresult: Conditional[LexerFrontend.LexerResult] = tryAgainIfEmpty(() => lexcpp(file, inclDirectory, debug, ignoreWarnings, config.map(f => (f.feature -> "1")).toMap, (features -- config).map(_.feature)), 3)
assert(cppresult.isInstanceOf[One[_]], "received conditional result when executing a single configuration??")
val cpptokens = getTokensFromResult(cppresult.asInstanceOf[One[LexerResult]].value)
compareTokenLists(configuredvtokens, cpptokens, config, true)
}
}
def genAllConfigurations(exprs: Set[SingleFeatureExpr]): List[Set[SingleFeatureExpr]] =
if (exprs.isEmpty) List(Set())
else {
val configs = genAllConfigurations(exprs.tail)
val head = exprs.head
//if too many features, just select random values after the first 10 instead of exploring all
if (exprs.size > 10) {
if (Math.random() > 0.5) configs else configs.map(_ + head)
}
else configs ++ configs.map(_ + head)
}
def getInitFeatures(filecontent: String): Set[SingleFeatureExpr] =
filecontent.split("\\n").filter(_ startsWith "// features:").flatMap(
_.drop(12).split(" ").map(_.trim).filterNot(_.isEmpty)).map(FeatureExprFactory.createDefinedExternal).toSet
def expectTotalFailure(filecontent: String): Boolean =
filecontent.split("\\n").exists(_ startsWith "// expect cpp failure")
def extractTokensForConfig(vresult: Conditional[LexerResult], config: Set[SingleFeatureExpr]): List[LexerToken] = {
val c = config.map(_.feature)
val configuredvresult = vresult.select(c)
val configuredvtokens = getTokensFromResult(configuredvresult)
configuredvtokens.filter(_.getFeature.evaluate(c))
}
def getTokensFromResult(result: LexerResult): List[LexerToken] =
if (result.isInstanceOf[LexerSuccess])
result.asInstanceOf[LexerSuccess].getTokens.asScala.toList.filter(_.isLanguageToken)
else {
List()
}
def compareTokenLists(vlist: List[LexerToken], alist: List[LexerToken], config: Set[SingleFeatureExpr], withCPP: Boolean): Unit = {
val msgWithCPP = if (withCPP) "(cpp)" else "(typechef)"
lazy val msg = s" in config $config.\\n" +
s"variability-aware lexing: $vlist\\n" +
s"lexing specific config $msgWithCPP: $alist"
assert(vlist.length == alist.length, "preprocessor produces output of different length" + msg)
(vlist zip alist).foreach(
x => assert(x._1.getText == x._2.getText, s"mismatch on token $x" + msg)
)
}
def hasSuccess(conditional: Conditional[LexerResult]): Boolean = conditional.exists({
case a: LexerSuccess => true
case _ => false
})
protected def status(s: String) = {}
protected def tryAgainIfEmpty(cmd: () => Conditional[LexerFrontend.LexerResult], nrTries: Int): Conditional[LexerFrontend.LexerResult] = {
val result = cmd()
if (nrTries > 1) {
val r = result.asInstanceOf[One[LexerResult]].value
var failed = false
if (r.isInstanceOf[LexerSuccess])
if (r.asInstanceOf[LexerSuccess].getTokens.asScala.toList.filter(_.isLanguageToken).isEmpty)
failed = true
if (failed)
return tryAgainIfEmpty(cmd, nrTries-1)
}
return result
}
protected def lexcpp(file: File,
folder: File,
debug: Boolean = false,
ignoreWarnings: Boolean = true,
definedMacros: Map[String, String] = Map(),
undefMacros: Set[String] = Set()
): Conditional[LexerFrontend.LexerResult] = {
val output = new ByteArrayOutputStream()
import scala.sys.process._
val cppcmd = "cpp"
val cmd = cppcmd + " -I " + folder.getAbsolutePath + " " + file.getAbsolutePath + " " + definedMacros.map(v => "-D" + v._1 + "=" + v._2).mkString(" ") + " " + undefMacros.map("-U" + _).mkString(" ")
var msg = ""
val isSuccess = cmd #> output ! ProcessLogger(l => msg = msg + "\\n" + l)
if (isSuccess != 0) {
// System.err.println(msg)
return One(new LexerError(s"cpp execution failed with value $isSuccess: $msg", "", 0, 0))
}
val jcppinput = new ByteArrayInputStream(output.toByteArray)
lex(new VALexer.StreamSource(jcppinput, "nofile"), false, new File("."), false, Map[String, String](), Set[String]())
}
private def getFeatures(initSet: Set[SingleFeatureExpr], conditional: Conditional[LexerResult]): Set[SingleFeatureExpr] = {
var foundFeatures: Set[SingleFeatureExpr] = Set()
conditional.foreach(x =>
if (x.isInstanceOf[LexerSuccess]) x.asInstanceOf[LexerSuccess].getTokens.asScala.foreach(t =>
foundFeatures ++= t.getFeature.collectDistinctFeatureObjects
))
if (initSet.nonEmpty) {
assert((foundFeatures subsetOf initSet) || (foundFeatures equals initSet), "features declared in test file, but additional features found; check test file")
initSet
} else
foundFeatures
}
private def getFileContent(file: File): String = {
val source = io.Source.fromFile(file)
val r = source.mkString
source.close()
r
}
}
| mbeddr/TypeChef | PartialPreprocessor/src/test/scala/de/fosd/typechef/lexer/DifferentialTestingFramework.scala | Scala | lgpl-3.0 | 8,452 |
package nodes.util
import breeze.linalg.{DenseVector, argmax}
import workflow.Transformer
/**
* Transformer that returns the index of the largest value in the vector
*/
object MaxClassifier extends Transformer[DenseVector[Double], Int] {
override def apply(in: DenseVector[Double]): Int = argmax(in)
}
| o0neup/keystone | src/main/scala/nodes/util/MaxClassifier.scala | Scala | apache-2.0 | 308 |
package org.scalawiki.bots
import java.net.URLDecoder
import org.scalawiki.MwBot
import org.scalawiki.dto.cmd.Action
import org.scalawiki.dto.cmd.query.prop.rvprop.{Content, RvProp}
import org.scalawiki.dto.cmd.query.prop.{Prop, Revisions}
import org.scalawiki.dto.cmd.query.{Query, TitlesParam}
import org.scalawiki.dto.{Image, Page}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import spray.util.pimpFuture
import scala.io.Source
/**
* Converts links to Wikimedia Commons files to short links (with file id) and tries to add image author and license
*/
object ShortLinksBot {
val commons = MwBot.fromHost(MwBot.commons)
val ukWiki = MwBot.fromHost(MwBot.ukWiki)
def getPage(title: String): Future[Page] = {
val action = Action(Query(
TitlesParam(Seq(title)),
Prop(
Revisions(RvProp(Content))
)
))
commons.run(action).flatMap { commonsPages =>
val commonsPage = commonsPages.head
if (commonsPage.missing) {
ukWiki.run(action).map(_.head)
} else Future.successful(commonsPage)
}
.recoverWith { case e =>
Future.successful(Page(title = "Error! " + e))
}
}
def getPageLicense(page: Page): Option[String] = {
for (id <- page.id;
text <- page.revisions.headOption.flatMap(_.content)) yield {
val author = Image.getAuthorFromPage(text)
val license = text
.split("\\\\s|\\\\||\\\\{|\\\\}")
.map(_.toLowerCase)
.find { s =>
s.startsWith("cc-") ||
s.startsWith("gfdl") ||
s.startsWith("wikimapia")
}
.getOrElse("???")
val readableLicense = license
.replace("cc-by-sa-", "CC BY-SA ")
.replace("cc-zero", "CC0 1.0")
.replace("gfdl-self", "GFDL")
.replace("wikimapia", "CC BY-SA 3.0")
s"https://commons.wikimedia.org/?curid=$id © $author, $readableLicense"
}
}
def getLineInfo(line: String): Future[String] = {
val s = line.indexOf("File:")
val title = line.substring(s).trim
getPage(title).map { page =>
getPageLicense(page).getOrElse("Error with " + title)
}
}
def getFileSubstring(line: String): Future[String] = {
val replaced = line.replace("%D0%A4%D0%B0%D0%B9%D0%BB:", "File:")
val start = replaced.indexOf("File:")
if (start >= 0) {
val decoded = URLDecoder.decode(replaced.substring(start), "UTF-8")
getPage(decoded.trim).map(page => getPageLicense(page).getOrElse(line)).recoverWith { case e =>
Future.successful("Error! " + e)
}
}
else Future.successful(line)
}
def main(args: Array[String]) {
val lines = Source.fromFile("arch.txt").getLines().toSeq
val parallel = false
val updatedLines = if (parallel) {
Future.sequence(lines.map(getFileSubstring)).await
} else {
lines.map(line => getFileSubstring(line).await)
}
println(updatedLines.mkString("\\n"))
}
}
| intracer/scalawiki | scalawiki-bots/src/main/scala/org/scalawiki/bots/ShortLinksBot.scala | Scala | apache-2.0 | 2,977 |
package org.elasticmq.persistence.sql
import org.elasticmq.persistence.CreateQueueMetadata
import org.elasticmq.util.Logging
class QueueRepository(db: DB) extends Logging {
import scalikejdbc._
implicit val session: AutoSession = AutoSession
private val tableName = SQLSyntax.createUnsafely("queue")
if (db.persistenceConfig.pruneDataOnInit) {
logger.debug(s"Deleting stored queues")
sql"drop table if exists $tableName".execute.apply()
}
sql"""
create table if not exists $tableName (
name varchar unique,
data blob
)""".execute.apply()
def drop(): Unit = {
sql"drop table if exists $tableName".execute.apply()
}
def findAll(): List[CreateQueueMetadata] = {
DB localTx { implicit session =>
sql"select * from $tableName"
.map(rs => DBQueue(rs))
.list
.apply()
.map(_.toCreateQueue)
}
}
def add(createQueue: CreateQueueMetadata): Int = {
val db = DBQueue.from(createQueue)
sql"""insert into $tableName (name, data)
values (${db.name},
${db.data})""".update.apply
}
def update(createQueue: CreateQueueMetadata): Int = {
val db = DBQueue.from(createQueue)
sql"""update $tableName set data = ${db.data} where name = ${db.name}""".update.apply
}
def remove(queueName: String): Int = {
sql"delete from $tableName where name = $queueName".update.apply
}
}
| adamw/elasticmq | persistence/persistence-sql/src/main/scala/org/elasticmq/persistence/sql/QueueRepository.scala | Scala | apache-2.0 | 1,421 |
object Test extends App {
val result = "börk börk" flatMap (ch ⇒ if (ch > 127) f"&#x${ch}%04x;" else "" + ch)
println(result)
}
| som-snytt/dotty | tests/pending/run/t8091.scala | Scala | apache-2.0 | 136 |
package io.buoyant.router
import com.twitter.finagle.{Path, Service, ServiceFactory, SimpleFilter, Stack}
import com.twitter.finagle.buoyant.{Dst, EncodeResidual}
import com.twitter.finagle.mux.{Request, Response}
import com.twitter.util._
object MuxEncodeResidual extends Stack.Module1[Dst.Bound, ServiceFactory[Request, Response]] {
val role = EncodeResidual.role
val description = EncodeResidual.description
def make(bound: Dst.Bound, factory: ServiceFactory[Request, Response]) =
new ResidualFilter(bound.path) andThen factory
class ResidualFilter(path: Path) extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) =
service(Request(path, req.body))
}
}
| denverwilliams/linkerd | router/mux/src/main/scala/io/buoyant/router/MuxEncodeResidual.scala | Scala | apache-2.0 | 733 |
package scalariform.parser
import scalariform.utils.CaseClassReflector
import scalariform.utils.Range
import scalariform.lexer.Token
sealed trait AstNode extends CaseClassReflector {
def tokens: List[Token]
def firstTokenOption: Option[Token] = tokens.headOption
lazy val lastTokenOption: Option[Token] = tokens.lastOption
def firstToken = firstTokenOption.get
lazy val lastToken = lastTokenOption.get
protected trait Flattenable {
def tokens: List[Token]
}
def isEmpty = tokens.isEmpty
protected implicit def astNodeToFlattenable(node: AstNode): Flattenable = new Flattenable { val tokens = node.tokens }
protected implicit def listToFlattenable[T <% Flattenable](list: List[T]): Flattenable = new Flattenable { val tokens = list flatMap { _.tokens } }
protected implicit def optionToFlattenable[T <% Flattenable](option: Option[T]): Flattenable = new Flattenable { val tokens = option.toList flatMap { _.tokens } }
protected implicit def pairToFlattenable[T1 <% Flattenable, T2 <% Flattenable](pair: (T1, T2)): Flattenable = new Flattenable { val tokens = pair._1.tokens ::: pair._2.tokens }
protected implicit def tripleToFlattenable[T1 <% Flattenable, T2 <% Flattenable, T3 <% Flattenable](triple: (T1, T2, T3)): Flattenable = new Flattenable { val tokens = triple._1.tokens ++ triple._2.tokens ++ triple._3.tokens }
protected implicit def eitherToFlattenable[T1 <% Flattenable, T2 <% Flattenable](either: T1 Either T2): Flattenable = new Flattenable {
val tokens = either match {
case Left(f) ⇒ f.tokens
case Right(f) ⇒ f.tokens
}
}
protected implicit def tokenToFlattenable(token: Token): Flattenable = new Flattenable { val tokens = List(token) }
protected def flatten(flattenables: Flattenable*): List[Token] = flattenables.toList flatMap { _.tokens }
def immediateChildren: List[AstNode] = productIterator.toList flatten immediateAstNodes
private def immediateAstNodes(n: Any): List[AstNode] = n match {
case a: AstNode ⇒ List(a)
case t: Token ⇒ Nil
case Some(x) ⇒ immediateAstNodes(x)
case xs @ (_ :: _) ⇒ xs flatMap { immediateAstNodes(_) }
case Left(x) ⇒ immediateAstNodes(x)
case Right(x) ⇒ immediateAstNodes(x)
case (l, r) ⇒ immediateAstNodes(l) ++ immediateAstNodes(r)
case (x, y, z) ⇒ immediateAstNodes(x) ++ immediateAstNodes(y) ++ immediateAstNodes(z)
case true | false | Nil | None ⇒ Nil
}
/**
* Returns range of tokens in the node, or None if there are no tokens in the node
*/
def rangeOpt: Option[Range] =
if (tokens.isEmpty)
None
else {
val firstIndex = tokens.head.offset
val lastIndex = tokens.last.lastCharacterOffset
Some(Range(firstIndex, lastIndex - firstIndex + 1))
}
}
case class GeneralTokens(val toks: List[Token]) extends AstNode with TypeElement with ExprElement {
lazy val tokens = flatten(toks)
}
case class Refinement(lbrace: Token, refineStatSeq: StatSeq, rbrace: Token) extends AstNode with TypeElement {
lazy val tokens = flatten(lbrace, refineStatSeq, rbrace)
}
case class TypeParam(contents: List[TypeElement]) extends AstNode with TypeElement {
lazy val tokens = flatten(contents)
}
case class TypeParamClause(contents: List[TypeElement]) extends AstNode with TypeElement {
//require(!contents.isEmpty)
lazy val tokens = flatten(contents)
}
case class Annotation(at: Token, annotationType: Type, argumentExprss: List[ArgumentExprs], newlineOption: Option[Token]) extends TypeElement with ExprElement {
lazy val tokens = flatten(at, annotationType, argumentExprss, newlineOption)
}
case class InfixTypeConstructor(id: Token) extends AstNode with TypeElement {
lazy val tokens = flatten(id)
}
sealed trait TypeElement extends AstNode
case class Type(contents: List[TypeElement]) extends AstNode with TypeElement {
//require(!contents.isEmpty)
lazy val tokens = flatten(contents)
}
case class VarianceTypeElement(id: Token) extends AstNode with TypeElement {
lazy val tokens = flatten(id)
}
case class VarargsTypeElement(star: Token) extends AstNode with TypeElement {
lazy val tokens = flatten(star)
}
case class CallByNameTypeElement(arrow: Token) extends AstNode with TypeElement {
lazy val tokens = flatten(arrow)
}
sealed trait ExprElement extends AstNode
case class Expr(contents: List[ExprElement]) extends AstNode with ExprElement with Stat with Enumerator with XmlContents with ImportExpr {
lazy val tokens = flatten(contents)
}
case class ParenExpr(lparen: Token, contents: List[ExprElement], rparen: Token) extends ExprElement {
lazy val tokens = flatten(lparen, contents, rparen)
}
case class PrefixExprElement(id: Token) extends ExprElement {
lazy val tokens = flatten(id)
}
case class PostfixExpr(first: List[ExprElement], postfixId: Token) extends ExprElement {
lazy val tokens = flatten(first, postfixId)
}
case class InfixExpr(left: List[ExprElement], infixId: Token, newlineOption: Option[Token], right: List[ExprElement]) extends ExprElement {
lazy val tokens = flatten(left, infixId, newlineOption, right)
}
case class CallExpr(
exprDotOpt: Option[(List[ExprElement], Token)],
id: Token,
typeArgsOpt: Option[TypeExprElement] = None,
newLineOptsAndArgumentExprss: List[(Option[Token], ArgumentExprs)] = Nil,
uscoreOpt: Option[Token] = None
) extends ExprElement {
lazy val tokens = flatten(exprDotOpt, id, typeArgsOpt, newLineOptsAndArgumentExprss, uscoreOpt)
}
case class TypeExprElement(contents: List[TypeElement]) extends AstNode with ExprElement {
//require(!contents.isEmpty)
lazy val tokens = flatten(contents)
}
trait ArgumentExprs extends ExprElement
case class BlockArgumentExprs(contents: List[ExprElement]) extends ArgumentExprs {
lazy val tokens = flatten(contents)
}
case class ParenArgumentExprs(lparen: Token, contents: List[ExprElement], rparen: Token) extends ArgumentExprs {
lazy val tokens = flatten(lparen, contents, rparen)
}
case class Argument(expr: Expr) extends AstNode with ExprElement {
lazy val tokens = flatten(expr)
}
case class New(newToken: Token, template: Template) extends ExprElement {
lazy val tokens = flatten(newToken, template)
}
case class IfExpr(
ifToken: Token,
condExpr: CondExpr,
newlinesOpt: Option[Token],
body: Expr,
elseClause: Option[ElseClause]
) extends AstNode with ExprElement {
lazy val tokens = flatten(ifToken, condExpr, newlinesOpt, body, elseClause)
}
case class ElseClause(semiOpt: Option[Token], elseToken: Token, elseBody: Expr) extends AstNode {
lazy val tokens = flatten(semiOpt, elseToken, elseBody)
}
case class BlockExpr(lbrace: Token, caseClausesOrStatSeq: Either[CaseClauses, StatSeq], rbrace: Token) extends AstNode with ExprElement {
lazy val tokens = flatten(lbrace, caseClausesOrStatSeq, rbrace)
}
case class CondExpr(lparen: Token, condition: Expr, rparen: Token) extends AstNode {
lazy val tokens = flatten(lparen, condition, rparen)
}
case class WhileExpr(whileToken: Token, condExpr: CondExpr, newlinesOpt: Option[Token], body: Expr) extends AstNode with ExprElement {
lazy val tokens = flatten(whileToken, condExpr, newlinesOpt, body)
}
case class DoExpr(doToken: Token, body: Expr, statSepOpt: Option[Token], whileToken: Token, condExpr: CondExpr) extends AstNode with ExprElement {
lazy val tokens = flatten(doToken, body, statSepOpt, whileToken, condExpr)
}
case class ForExpr(
forToken: Token,
lParenOrBrace: Token,
enumerators: Enumerators,
rParenOrBrace: Token,
newlinesOption: Option[Token],
yieldOption: Option[Token],
body: Expr
) extends AstNode with ExprElement {
lazy val tokens = flatten(forToken, lParenOrBrace, enumerators, rParenOrBrace, newlinesOption, yieldOption, body)
}
sealed trait Enumerator extends AstNode
case class Enumerators(initialGenerator: Generator, rest: List[(Token, Enumerator)]) extends AstNode {
lazy val tokens = flatten(initialGenerator, rest)
}
case class Generator(
valOption: Option[Token],
pattern: Expr,
equalsOrArrowToken: Token,
expr: Expr,
guards: List[Guard]
) extends AstNode with Enumerator {
lazy val tokens = flatten(valOption, pattern, equalsOrArrowToken, expr, guards)
}
case class Guard(ifToken: Token, expr: Expr) extends AstNode with Enumerator {
lazy val tokens = flatten(ifToken, expr)
}
case class CatchClause(catchToken: Token, catchBlockOrExpr: Either[BlockExpr, Expr]) extends AstNode {
lazy val tokens = flatten(catchToken, catchBlockOrExpr)
}
case class TryExpr(tryToken: Token, body: Expr, catchClauseOption: Option[CatchClause], finallyClauseOption: Option[(Token, Expr)]) extends AstNode with ExprElement {
lazy val tokens = flatten(tryToken, body, catchClauseOption, finallyClauseOption)
}
case class FullDefOrDcl(annotations: List[Annotation], modifiers: List[Modifier], defOrDcl: DefOrDcl) extends Stat {
lazy val tokens = flatten(annotations, modifiers, defOrDcl)
}
case class MatchExpr(left: List[ExprElement], matchToken: Token, block: BlockExpr) extends ExprElement {
lazy val tokens = flatten(left, matchToken, block)
}
case class AscriptionExpr(left: List[ExprElement], colon: Token, right: List[ExprElement]) extends ExprElement {
lazy val tokens = flatten(left, colon, right)
}
case class EqualsExpr(lhs: List[ExprElement], equals: Token, rhs: Expr) extends ExprElement {
lazy val tokens = flatten(lhs, equals, rhs)
}
case class CaseClause(casePattern: CasePattern, statSeq: StatSeq) extends AstNode {
lazy val tokens = flatten(casePattern, statSeq)
}
case class CasePattern(caseToken: Token, pattern: Expr, guardOption: Option[Guard], arrow: Token) extends AstNode {
lazy val tokens = flatten(caseToken, pattern, guardOption, arrow)
}
case class CaseClauses(caseClauses: List[CaseClause]) extends AstNode {
//require(!caseClauses.isEmpty)
lazy val tokens = flatten(caseClauses)
}
sealed trait DefOrDcl extends AstNode
case class TypeDefOrDcl(contents: List[TypeElement]) extends DefOrDcl {
//require(!contents.isEmpty)
lazy val tokens = flatten(contents)
}
case class PatDefOrDcl(
valOrVarToken: Token,
pattern: Expr,
otherPatterns: List[(Token, Expr)],
typedOpt: Option[(Token, Type)],
equalsClauseOption: Option[(Token, Expr)]
) extends DefOrDcl {
lazy val tokens = flatten(valOrVarToken, pattern, otherPatterns, typedOpt, equalsClauseOption)
}
sealed trait FunBody extends AstNode
case class ProcFunBody(newlineOpt: Option[Token], bodyBlock: BlockExpr) extends FunBody {
lazy val tokens = flatten(newlineOpt, bodyBlock)
}
case class ExprFunBody(equals: Token, macroOpt: Option[Token], body: Expr) extends FunBody {
lazy val tokens = flatten(equals, macroOpt, body)
}
case class ParamClauses(newlineOpt: Option[Token], paramClausesAndNewlines: List[(ParamClause, Option[Token])]) extends AstNode {
lazy val tokens = flatten(newlineOpt, paramClausesAndNewlines)
}
case class ParamClause(lparen: Token, implicitOption: Option[Token], firstParamOption: Option[Param], otherParams: List[(Token, Param)], rparen: Token) extends AstNode {
lazy val tokens = flatten(lparen, implicitOption, firstParamOption, otherParams, rparen)
}
case class Param(annotations: List[Annotation], modifiers: List[Modifier], valOrVarOpt: Option[Token], id: Token, paramTypeOpt: Option[(Token, Type)], defaultValueOpt: Option[(Token, Expr)]) extends AstNode {
lazy val tokens = flatten(annotations, modifiers, valOrVarOpt, id, paramTypeOpt, defaultValueOpt)
}
case class FunDefOrDcl(
defToken: Token,
nameToken: Token, // id or THIS
typeParamClauseOpt: Option[TypeParamClause],
paramClauses: ParamClauses,
returnTypeOpt: Option[(Token, Type)],
funBodyOpt: Option[FunBody],
localDef: Boolean
) extends DefOrDcl {
lazy val tokens = flatten(defToken, nameToken, typeParamClauseOpt, paramClauses, returnTypeOpt, funBodyOpt)
}
case class TmplDef(
markerTokens: List[Token],
name: Token,
typeParamClauseOpt: Option[TypeParamClause],
annotations: List[Annotation],
accessModifierOpt: Option[AccessModifier],
paramClausesOpt: Option[ParamClauses],
templateInheritanceSectionOpt: Option[TemplateInheritanceSection],
templateBodyOption: Option[TemplateBody]
) extends DefOrDcl {
//require(markerTokens.size <= 2)
lazy val tokens = flatten(markerTokens, name, typeParamClauseOpt, annotations, accessModifierOpt, paramClausesOpt, templateInheritanceSectionOpt, templateBodyOption)
}
case class TemplateInheritanceSection(
extendsOrSubtype: Token,
earlyDefsOpt: Option[EarlyDefs],
templateParentsOpt: Option[TemplateParents]
) extends AstNode {
lazy val tokens = flatten(extendsOrSubtype, earlyDefsOpt, templateParentsOpt)
}
case class EarlyDefs(earlyBody: TemplateBody, withOpt: Option[Token]) extends AstNode {
lazy val tokens = flatten(earlyBody, withOpt)
}
case class Template(earlyDefsOpt: Option[EarlyDefs], templateParentsOpt: Option[TemplateParents], templateBodyOpt: Option[TemplateBody]) extends ExprElement {
lazy val tokens = flatten(earlyDefsOpt, templateParentsOpt, templateBodyOpt)
}
case class TemplateBody(newlineOpt: Option[Token], lbrace: Token, statSeq: StatSeq, rbrace: Token) extends AstNode {
lazy val tokens = flatten(newlineOpt, lbrace, statSeq, rbrace)
}
sealed trait Stat extends AstNode
case class StatSeq(
selfReferenceOpt: Option[(Expr, Token)],
firstStatOpt: Option[Stat],
otherStats: List[(Token, Option[Stat])]
) extends AstNode with ExprElement {
lazy val tokens = flatten(selfReferenceOpt, firstStatOpt, otherStats)
}
case class TemplateParents(typeAndArgs: (Type, List[ArgumentExprs]), withTypesAndArgs: List[(Token, Type, List[ArgumentExprs])]) extends AstNode {
lazy val tokens = flatten(typeAndArgs, withTypesAndArgs)
}
case class ImportClause(importToken: Token, importExpr: ImportExpr, otherImportExprs: List[(Token, ImportExpr)]) extends AstNode with Stat {
lazy val tokens = flatten(importToken, importExpr, otherImportExprs)
}
sealed trait ImportExpr extends AstNode
case class BlockImportExpr(prefixExpr: Expr, importSelectors: ImportSelectors) extends ImportExpr {
lazy val tokens = flatten(prefixExpr, importSelectors)
}
case class ImportSelectors(lbrace: Token, firstImportSelector: Expr, otherImportSelectors: List[(Token, Expr)], rbrace: Token) extends AstNode {
lazy val tokens = flatten(lbrace, firstImportSelector, otherImportSelectors, rbrace)
}
case class PackageBlock(packageToken: Token, name: CallExpr, newlineOpt: Option[Token], lbrace: Token, topStats: StatSeq, rbrace: Token) extends Stat {
lazy val tokens = flatten(packageToken, name, newlineOpt, lbrace, topStats, rbrace)
}
case class PackageStat(packageToken: Token, name: CallExpr) extends Stat {
lazy val tokens = flatten(packageToken, name)
}
sealed trait Modifier extends AstNode
case class SimpleModifier(token: Token) extends Modifier {
lazy val tokens = flatten(token)
}
case class AccessModifier(privateOrProtected: Token, accessQualifierOpt: Option[AccessQualifier]) extends Modifier {
lazy val tokens = flatten(privateOrProtected, accessQualifierOpt)
}
case class AccessQualifier(lbracket: Token, thisOrId: Token, rbracket: Token) extends AstNode {
lazy val tokens = flatten(lbracket, thisOrId, rbracket)
}
case class CompilationUnit(topStats: StatSeq, eofToken: Token) extends AstNode {
lazy val tokens = flatten(topStats, eofToken)
}
case class AnonymousFunctionStart(parameters: List[ExprElement], arrow: Token) extends ExprElement {
lazy val tokens = flatten(parameters, arrow)
}
case class AnonymousFunction(parameters: List[ExprElement], arrow: Token, body: StatSeq) extends ExprElement {
lazy val tokens = flatten(parameters, arrow, body)
}
case class StringInterpolation(interpolationId: Token, stringPartsAndScala: List[(Token, Expr)], terminalString: Token) extends ExprElement {
lazy val tokens = flatten(interpolationId, stringPartsAndScala, terminalString)
}
sealed trait XmlExprElement extends ExprElement
case class XmlStartTag(startOpen: Token, name: Token, attributes: List[(Option[Token], XmlAttribute)], whitespaceOption: Option[Token], tagClose: Token) extends XmlExprElement {
lazy val tokens = flatten(startOpen, name, attributes, whitespaceOption, tagClose)
}
case class XmlAttribute(name: Token, whitespaceOption: Option[Token], equals: Token, whitespaceOption2: Option[Token], valueOrEmbeddedScala: Either[Token, Expr]) extends XmlExprElement {
lazy val tokens = flatten(name, whitespaceOption, equals, whitespaceOption2, valueOrEmbeddedScala)
}
case class XmlEmptyElement(startOpen: Token, name: Token, attributes: List[(Option[Token], XmlAttribute)], whitespaceOption: Option[Token], emptyClose: Token) extends XmlElement {
lazy val tokens = flatten(startOpen, name, attributes, whitespaceOption, emptyClose)
}
case class XmlEndTag(endOpen: Token, name: Token, whitespaceOption: Option[Token], tagClose: Token) extends XmlExprElement {
lazy val tokens = flatten(endOpen, name, whitespaceOption, tagClose)
}
sealed trait XmlElement extends XmlContents
case class XmlNonEmptyElement(startTag: XmlStartTag, contents: List[XmlContents], endTag: XmlEndTag) extends XmlElement {
lazy val tokens = flatten(startTag, contents, endTag)
}
sealed trait XmlContents extends XmlExprElement
case class XmlPCDATA(token: Token) extends XmlContents { lazy val tokens = flatten(token) }
case class XmlCDATA(token: Token) extends XmlContents { lazy val tokens = flatten(token) }
case class XmlComment(token: Token) extends XmlContents { lazy val tokens = flatten(token) }
case class XmlUnparsed(token: Token) extends XmlContents { lazy val tokens = flatten(token) }
case class XmlProcessingInstruction(token: Token) extends XmlContents { lazy val tokens = flatten(token) }
case class XmlExpr(first: XmlContents, otherElements: List[XmlContents]) extends ExprElement {
lazy val tokens = flatten(first, otherElements)
}
| triggerNZ/scalariform | scalariform/src/main/scala/com/danieltrinh/scalariform/parser/AstNodes.scala | Scala | mit | 18,408 |
package at.ac.tuwien.ifs.utils
/**
* Created by aldo on 31/08/16.
*/
object Profiler {
def time[T](a:() => (T)): T = {
print("Profiler: ")
val t0 = System.currentTimeMillis()
val r = a()
val t1 = System.currentTimeMillis()
println("completed in " + (t1 - t0) / 1000f + "s")
r
}
}
| aldolipani/PoolBiasEstimators | src/main/scala/at/ac/tuwien/ifs/utils/Profiler.scala | Scala | apache-2.0 | 315 |
package scorex.account
import scorex.crypto.SigningFunctionsImpl
case class PrivateKeyAccount(seed: Array[Byte], privateKey: Array[Byte], override val publicKey: Array[Byte])
extends PublicKeyAccount(publicKey) {
require(seed != null)
require(privateKey != null)
require(publicKey != null)
override val address = Account.fromPubkey(publicKey)
def this(seed: Array[Byte], keyPair: (Array[Byte], Array[Byte])) = this(seed, keyPair._1, keyPair._2)
def this(seed: Array[Byte]) = this(seed, SigningFunctionsImpl.createKeyPair(seed))
} | beni55/Scorex-Lagonaki | scorex-basics/src/main/scala/scorex/account/PrivateKeyAccount.scala | Scala | cc0-1.0 | 549 |
import lolchat._
import lolchat.data._
import lolchat.model._
import org.scalatest.concurrent.AsyncAssertions.{Dismissals, Waiter}
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import cats.syntax.all._
import scala.collection.mutable
import scala.concurrent.duration._
import scala.util.Random
class FriendMgmtSpec extends BaseSpec {
override def beforeAll(): Unit = {
val prg = for {
_ <- login
f <- friends
} yield f
// tests assume alice and bob are already friends
whenReady(LoLChat.run(prg(bobSess))) { res =>
res.fold(err => false, friends => friends.exists(_.id == alice.summId)) should be(true)
}
whenReady(LoLChat.run(prg(aliceSess))) { res =>
res.fold(err => false, friends => friends.exists(_.id == bob.summId)) should be(true)
}
}
it should "be able to create a friend group" in {
val testGroup = s"testGroup${Random.nextInt(1000)}"
val prg = for {
_ <- createGroup(testGroup)
names <- groupNames
} yield names
whenReady(LoLChat.run(prg(bobSess))) {
case Right(groupNames) => groupNames should contain(testGroup)
case Left(error) => fail(error.msg)
}
}
it should "be able to move a friend between groups" in {
val testGroup = s"testGroup${Random.nextInt(1000)}"
val prg = for {
_ <- moveFriendToGroup(alice.inGameName, testGroup)
f <- friendById(alice.summId)
} yield f
whenReady(LoLChat.run(prg(bobSess))) {
case Right(Some(f)) => f.groupName should contain(testGroup)
case Right(None) => fail("fail to find alice in friend list")
case Left(err) => fail(err.msg)
}
}
"LoLChat" should "be able to add and remove friends" in {
val waiter = new Waiter
val events = mutable.Queue[FriendListEvent]()
bobSess.friendListStream.map {
case e: FriendAdded => events.enqueue(e); waiter.dismiss()
case e: FriendRemoved => events.enqueue(e); waiter.dismiss()
case _ =>
}
val id = alice.summId
whenReady(LoLChat.run(removeFriend(id)(bobSess)))(identity)
whenReady(LoLChat.run(sendFriendRequest(id)(bobSess)))(identity)
waiter.await(Timeout(10.seconds), Dismissals(2))
events should be(mutable.Queue[FriendListEvent](FriendRemoved(id), FriendAdded(id)))
}
it should "be able to get online friends" in {
whenReady(LoLChat.run(onlineFriends(aliceSess))) { res =>
val onFriends: Vector[Friend] = res.getOrElse(fail("Fail to get online friends"))
onFriends.find(_.id == bob.summId) shouldBe defined
}
}
}
| Thangiee/League-of-Legend-Chat-Lib-Scala | lib/src/it/scala/FriendMgmtSpec.scala | Scala | mit | 2,584 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs105.boxes
import uk.gov.hmrc.ct.accounts.frs105.retriever.Frs105AccountsBoxRetriever
import uk.gov.hmrc.ct.accounts.{AccountsMoneyValidationFixture, MockFrs105AccountsRetriever}
class AC58Spec extends AccountsMoneyValidationFixture[Frs105AccountsBoxRetriever] with MockFrs105AccountsRetriever {
testAccountsMoneyValidationWithMin("AC58", minValue = 0, AC58)
}
| liquidarmour/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs105/boxes/AC58Spec.scala | Scala | apache-2.0 | 1,006 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.sse
import io.gatling.commons.util.Clock
import io.gatling.commons.validation.{ Failure, Validation }
import io.gatling.core.CoreComponents
import io.gatling.core.action.{ Action, RequestAction }
import io.gatling.core.session.{ Expression, Session }
import io.gatling.core.stats.StatsEngine
import io.gatling.core.util.NameGen
import io.gatling.http.action.sse.fsm.SseFsm
import io.gatling.http.client.Request
import io.gatling.http.protocol.HttpComponents
class SseConnect(
override val requestName: Expression[String],
sseName: Expression[String],
request: Expression[Request],
connectCheckSequences: List[SseMessageCheckSequenceBuilder],
coreComponents: CoreComponents,
httpComponents: HttpComponents,
val next: Action
) extends RequestAction
with SseAction
with NameGen {
override val name: String = genName("sseConnect")
override def clock: Clock = coreComponents.clock
override def statsEngine: StatsEngine = coreComponents.statsEngine
override def sendRequest(session: Session): Validation[Unit] =
for {
reqName <- requestName(session)
fsmName <- sseName(session)
_ <- fetchFsm(fsmName, session) match {
case _: Failure =>
for {
request <- request(session)
resolvedCheckSequences <- SseMessageCheckSequenceBuilder.resolve(connectCheckSequences, session)
} yield {
logger.debug(s"Opening sse '$fsmName': Scenario '${session.scenario}', UserId #${session.userId}")
val fsm = SseFsm(
session,
fsmName,
reqName,
request,
resolvedCheckSequences,
statsEngine,
httpComponents.httpEngine,
httpComponents.httpProtocol,
clock
)
fsm.onPerformInitialConnect(session.set(fsmName, fsm), next)
}
case _ =>
Failure(s"Unable to create a new SSE stream with name $sseName: already exists")
}
} yield ()
}
| gatling/gatling | gatling-http/src/main/scala/io/gatling/http/action/sse/SseConnect.scala | Scala | apache-2.0 | 2,684 |
import sbt._
object Dependencies {
val resolutionRepos = Seq(
"Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/",
"spray" at "http://repo.spray.io",
"spray nightly" at "http://nightlies.spray.io/"
)
val akkaVersion = "2.3.6"
val sprayVersion = "1.3.1"
val scalaVersion = "2.10.4"
object Libraries {
val scalaActors = "org.scala-lang" % "scala-actors" % scalaVersion
val scalaTest = "org.scalatest" % "scalatest_2.10" % "1.9.1" % "test"
val junit = "com.novocode" % "junit-interface" % "0.10-M2" % "test"
val scallop = "org.rogach" %% "scallop" % "0.8.1"
val akkaActor = "com.typesafe.akka" %% "akka-actor" % akkaVersion
val akkaRemote = "com.typesafe.akka" %% "akka-remote" % akkaVersion
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % akkaVersion
val akkaTestkit = "com.typesafe.akka" %% "akka-testkit" % akkaVersion
val logback = "ch.qos.logback" % "logback-classic" % "1.0.9"
val spray = "io.spray" %% "spray-can" % sprayVersion
val sprayRouting = "io.spray" %% "spray-routing" % sprayVersion
val sprayJson = "io.spray" %% "spray-json" % "1.2.6"
val sprayHttpx = "io.spray" %% "spray-httpx" % sprayVersion
val sprayClient = "io.spray" %% "spray-client" % sprayVersion
val sprayCan = "io.spray" %% "spray-can" % sprayVersion
val sprayWebsocket = "com.wandoulabs.akka" %% "spray-websocket" % "0.1.3"
}
}
| hamiltont/clasp | project/Dependencies.scala | Scala | mit | 1,539 |
package com.naokia.groonga4s
package object request {
trait Request{
def toQuery: String
}
trait RequestWithBody extends Request{
def getBody: String
}
}
| naokia/groonga4s | src/main/scala/com/naokia/groonga4s/request/package.scala | Scala | apache-2.0 | 172 |
package almhirt.akkax
import akka.actor.{ Props, ActorRef, ActorPath, ActorSelection }
import almhirt.common._
import almhirt.tracking.CorrelationId
object ActorMessages {
final case class CreateChildActors(factories: Seq[ComponentFactory], returnActorRefs: Boolean, correlationId: Option[CorrelationId])
final case class CreateChildActor(factory: ComponentFactory, returnActorRef: Boolean, correlationId: Option[CorrelationId])
sealed trait CreateChildActorRsp
final case class ChildActorCreated(actorRef: ActorRef, correlationId: Option[CorrelationId]) extends CreateChildActorRsp
final case class CreateChildActorFailed(cause: Problem, correlationId: Option[CorrelationId]) extends CreateChildActorRsp
sealed trait ResovleResponse
sealed trait ResolveSingleResponse extends ResovleResponse
final case class ResolvedSingle(resolved: ActorRef, correlationId: Option[CorrelationId]) extends ResolveSingleResponse
final case class SingleNotResolved(problem: Problem, correlationId: Option[CorrelationId]) extends ResolveSingleResponse
sealed trait ResolveManyResponse extends ResovleResponse
final case class ManyResolved(resolved: Map[String, ActorRef], correlationId: Option[CorrelationId]) extends ResolveManyResponse
final case class ManyNotResolved(problem: Problem, correlationId: Option[CorrelationId]) extends ResolveManyResponse
final case class UnfoldComponents(factories: Seq[ComponentFactory])
sealed trait CircuitStateChangedMessage
sealed trait CircuitNotAllWillFail extends CircuitStateChangedMessage
sealed trait CircuitAllWillFail extends CircuitStateChangedMessage
case object CircuitClosed extends CircuitNotAllWillFail
case object CircuitHalfOpened extends CircuitNotAllWillFail
case object CircuitOpened extends CircuitAllWillFail
case object CircuitFuseRemoved extends CircuitAllWillFail
case object CircuitDestroyed extends CircuitAllWillFail
case object CircuitCircumvented extends CircuitNotAllWillFail
sealed trait HerderAppStartupMessage
case object HerderServiceAppStarted extends HerderAppStartupMessage
final case class HerderServiceAppFailedToStart(problem: Problem) extends HerderAppStartupMessage
sealed trait ComponentControlMessage
sealed trait ComponentControlCommand extends ComponentControlMessage { def action: ComponentControlAction }
final case class Pause(token: Option[PauseToken]) extends ComponentControlCommand { override val action = ComponentControlAction.Pause }
final case class Resume(token: Option[PauseToken]) extends ComponentControlCommand { override val action = ComponentControlAction.Resume }
case object Restart extends ComponentControlCommand { override val action = ComponentControlAction.Restart }
case object PrepareForShutdown extends ComponentControlCommand { override val action = ComponentControlAction.PrepareForShutdown }
case object ReportComponentState extends ComponentControlMessage
sealed trait StatusReportMessage
final case class SendStatusReport(options: ezreps.EzOptions) extends StatusReportMessage
sealed trait SendStatusReportRsp extends StatusReportMessage
final case class CurrentStatusReport(status: ezreps.EzReport) extends SendStatusReportRsp
final case class ReportStatusFailed(cause: almhirt.problem.ProblemCause) extends SendStatusReportRsp
case object ConsiderMeForReporting
case object ForgetMeForReporting
}
object CreateChildActorHelper {
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration
import akka.pattern._
import almhirt.almfuture.all._
def createChildActor(parent: ActorRef, factory: ComponentFactory, correlationId: Option[CorrelationId])(maxDur: FiniteDuration)(implicit execCtx: ExecutionContext): AlmFuture[ActorRef] = {
parent.ask(ActorMessages.CreateChildActor(factory, true, correlationId))(maxDur).mapCastTo[ActorMessages.CreateChildActorRsp].mapV {
case ActorMessages.ChildActorCreated(newChild, correlationId) ⇒ scalaz.Success(newChild)
case ActorMessages.CreateChildActorFailed(problem, correlationId) ⇒ scalaz.Failure(problem)
}
}
} | chridou/almhirt | almhirt-core/src/main/scala/almhirt/akkax/ActorMessages.scala | Scala | apache-2.0 | 4,110 |
package com.ornithoptergames.psav
import java.io.File
import scala.util.Failure
import scala.util.Try
import scala.util.matching.Regex
import FrameInfoLoader._
import akka.actor.Actor
trait FileLoader {
def load(file: File): Try[FrameInfo]
}
object FrameInfoLoader {
val actorName = "frame-info-loader"
case class Load(file: File, layerNameFilters: List[Regex])
type LoadResult = Try[FrameInfo]
class LoadException(message: String = null, cause: Throwable = null) extends Exception(message, cause)
}
class FrameInfoLoader extends Actor {
def receive = {
case Load(file, filters) =>
val result = loadByExtension(file).map { info => applyFilter(info, filters) }
sender ! result
}
private[this] def extension(file: File) = file.getName().takeRight(3).toLowerCase()
private[this] def loadByExtension(file: File): Try[FrameInfo] =
extension(file) match {
case "psd" => PsdLoader.load(file)
case "svg" => SvgLoader.load(file)
case _ => Failure(new LoadException("Unsupported file extension."))
}
private[this] def applyFilter(frameInfo: FrameInfo, filters: List[Regex]) = {
lazy val exclude = (f: Frame) => filters.exists { _.pattern.matcher(f.name).matches() }
val frames = frameInfo.frames.filterNot(exclude)
FrameInfo(frameInfo.size, frames)
}
} | JavadocMD/anim-view | src/main/scala/com/ornithoptergames/psav/FrameInfoLoader.scala | Scala | apache-2.0 | 1,392 |
package org.fusesource.cloudmix.controller.resources
import org.fusesource.scalate.servlet.ServletRenderContext
import org.fusesource.cloudmix.common.dto.{AgentDetails, DependencyStatus, FeatureDetails, ProfileDetails}
import org.fusesource.cloudmix.common.URIs
class ViewHelper(implicit context: ServletRenderContext) {
def uri(text: String) = context.uri(text)
// Profiles
def profileLink(resource: ProfileResource): String = {
profileLink(resource.getProfileDetails)
}
def profileLink(profile: ProfileDetails): String = {
uri("/profiles/" + profile.getId)
}
def propertiesLink(profile: ProfileDetails): String = {
profileLink(profile) + "/properties"
}
def propertiesLink(resource: ProfileResource): String = {
profileLink(resource) + "/properties"
}
// Features....
def featureLink(feature: FeatureDetails): String = {
featureLink(feature.getId)
}
def featureLink(feature: DependencyStatus): String = {
featureLink(feature.getFeatureId)
}
def featureLink(featureId: String): String = {
uri("/features/" + featureId)
}
def agentFeatureLink(agent: AgentDetails, featureId: String): String = {
URIs.appendPaths(agent.getHref, "features", featureId)
}
// Agents
def agentLink(agent: AgentDetails): String = {
agentLink(agent.getId)
}
def agentLink(agentId: String): String = {
uri("/agents/" + agentId)
}
/*
def siteLink(agent: AgentDetails): NodeSeq = {
val href = agent.getHref
if (href != null)
<a href={href} class='site'>
{agent.getId}
</a>
else
Text("")
}
*/
} | chirino/cloudmix | org.fusesource.cloudmix.controller.webapp/src/main/scala/org/fusesource/cloudmix/controller/resources/ViewHelper.scala | Scala | agpl-3.0 | 1,614 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
import scala.util.parsing.input._
import org.junit.Test
import org.junit.Assert.fail
class t8879 {
@Test
def test: Unit = {
val testPagedSeq = {
var nbpage = 0
def more(data: Array[Char], start: Int, len: Int): Int = {
if (nbpage < 1) {
var i = 0
while (i < len && nbpage < 3) {
if (i % 100 != 0) {
data(start + i) = 'a'
} else {
data(start + i) = '\n'
}
i += 1
}
if (i == 0) -1 else {
nbpage += 1
i
}
} else {
fail("Should not read more than 1 page!")
0
}
}
new PagedSeq(more(_: Array[Char], _: Int, _: Int))
}
val s = new StreamReader(testPagedSeq, 0, 1)
// should not trigger reading of the second page
s.drop(20)
}
}
| scala/scala-parser-combinators | shared/src/test/scala/scala/util/parsing/combinator/t8879.scala | Scala | apache-2.0 | 1,188 |
package scalan.compilation.lms.cxx.sharedptr
import java.io.File
import scalan.compilation.lms.linalgebra.{LinAlgLmsBridge, LinAlgCxxShptrLmsBackend}
import scalan.{JNIExtractorOps, JNIExtractorOpsExp}
import scalan.compilation.GraphVizConfig
import scalan.compilation.lms.JNILmsBridge
import scalan.compilation.lms.cxx.LmsCompilerCxx
import scalan.it.BaseItTests
import scalan.linalgebra._
trait JNILinAlgProg extends LinearAlgebraExamples with JNIExtractorOps {
lazy val ddmvm_jni = JNI_Wrap(ddmvm)
lazy val dsmvm_jni = JNI_Wrap(dsmvm)
lazy val sdmvm_jni = JNI_Wrap(sdmvm)
lazy val ssmvm_jni = JNI_Wrap(ssmvm)
lazy val fdmvm_jni = JNI_Wrap(fdmvm)
lazy val fsmvm_jni = JNI_Wrap(fsmvm)
}
class JNILinAlgItTests extends BaseItTests[JNILinAlgProg](???) {
class ProgExp extends LADslExp with JNILinAlgProg with JNIExtractorOpsExp
val prog = new LmsCompilerCxx(new ProgExp) with JNILmsBridge with LinAlgLmsBridge {
override val lms = new LinAlgCxxShptrLmsBackend
}
implicit val cfg = prog.defaultCompilerConfig
val defaultCompilers = compilers(prog)
val dir = new File(prefix, "mvm-cxx")
test("ddmvm_jni") {
// doesn't compile yet (similar below)
// compileSource(_.ddmvm_jni)
prog.buildExecutable(dir, dir, "ddmvm", prog.scalan.ddmvm_jni, GraphVizConfig.default)
}
test("dsmvm_jni") {
prog.buildExecutable(dir, dir, "dsmvm", prog.scalan.dsmvm_jni, GraphVizConfig.default)
}
test("sdmvm_jni") {
prog.buildExecutable(dir, dir, "sdmvm", prog.scalan.sdmvm_jni, GraphVizConfig.default)
}
test("ssmvm_jni") {
prog.buildExecutable(dir, dir, "ssmvm", prog.scalan.ssmvm_jni, GraphVizConfig.default)
}
test("fdmvm_jni") {
prog.buildExecutable(dir, dir, "fdmvm", prog.scalan.fdmvm_jni, GraphVizConfig.default)
}
test("fsmvm_jni") {
prog.buildExecutable(dir, dir, "fsmvm", prog.scalan.fsmvm_jni, GraphVizConfig.default)
}
}
| scalan/scalan | lms-backend/linear-algebra/src/it/scala/scalan/compilation/lms/cxx/sharedptr/JNILinAlgItTests.scala | Scala | apache-2.0 | 1,907 |
/**
* Copyright 2012 Foursquare Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.foursquare.lint
import scala.reflect.generic.Flags
import scala.tools.nsc.{Global, Phase}
import scala.tools.nsc.plugins.{Plugin, PluginComponent}
class LinterPlugin(val global: Global) extends Plugin {
import global._
val name = "linter"
val description = ""
val components = List[PluginComponent](LinterComponent)
private object LinterComponent extends PluginComponent {
import global._
val global = LinterPlugin.this.global
override val runsAfter = List("typer")
val phaseName = "linter"
override def newPhase(prev: Phase): StdPhase = new StdPhase(prev) {
override def apply(unit: global.CompilationUnit): Unit = {
new LinterTraverser(unit).traverse(unit.body)
}
}
class LinterTraverser(unit: CompilationUnit) extends Traverser {
import definitions.{AnyClass, ObjectClass, Object_==, OptionClass, SeqClass}
val JavaConversionsModule: Symbol = definitions.getModule("scala.collection.JavaConversions")
val SeqLikeClass: Symbol = definitions.getClass("scala.collection.SeqLike")
val SeqLikeContains: Symbol = SeqLikeClass.info.member(newTermName("contains"))
val OptionGet: Symbol = OptionClass.info.member(nme.get)
def SeqMemberType(seenFrom: Type): Type = {
SeqLikeClass.tpe.typeArgs.head.asSeenFrom(seenFrom, SeqLikeClass)
}
def isSubtype(x: Tree, y: Tree): Boolean = {
x.tpe.widen <:< y.tpe.widen
}
def methodImplements(method: Symbol, target: Symbol): Boolean = {
method == target || method.allOverriddenSymbols.contains(target)
}
def isGlobalImport(selector: ImportSelector): Boolean = {
selector.name == nme.WILDCARD && selector.renamePos == -1
}
override def traverse(tree: Tree): Unit = tree match {
case Apply(eqeq @ Select(lhs, nme.EQ), List(rhs))
if methodImplements(eqeq.symbol, Object_==) && !(isSubtype(lhs, rhs) || isSubtype(rhs, lhs)) =>
val warnMsg = "Comparing with == on instances of different types (%s, %s) will probably return false."
unit.warning(eqeq.pos, warnMsg.format(lhs.tpe.widen, rhs.tpe.widen))
case Import(pkg, selectors)
if pkg.symbol == JavaConversionsModule && selectors.exists(isGlobalImport) =>
unit.warning(pkg.pos, "Conversions in scala.collection.JavaConversions._ are dangerous.")
case Import(pkg, selectors)
if selectors.exists(isGlobalImport) =>
unit.warning(pkg.pos, "Wildcard imports should be avoided. Favor import selector clauses.")
case Apply(contains @ Select(seq, _), List(target))
if methodImplements(contains.symbol, SeqLikeContains) && !(target.tpe <:< SeqMemberType(seq.tpe)) =>
val warnMsg = "SeqLike[%s].contains(%s) will probably return false."
unit.warning(contains.pos, warnMsg.format(SeqMemberType(seq.tpe), target.tpe.widen))
case get @ Select(_, nme.get) if methodImplements(get.symbol, OptionGet) =>
if (!get.pos.source.path.contains("src/test")) {
unit.warning(get.pos, "Calling .get on Option will throw an exception if the Option is None.")
}
case _ =>
super.traverse(tree)
}
}
}
}
| non/linter | src/main/scala/LinterPlugin.scala | Scala | apache-2.0 | 3,909 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.jdbc.query
import java.sql.Types.{BLOB, CLOB, DATE, TIMESTAMP}
import java.sql.{BatchUpdateException, Connection, PreparedStatement, ResultSet, SQLException}
import javax.sql.DataSource
import org.beangle.commons.collection.page.PageLimit
import org.beangle.commons.lang.Strings
import org.beangle.commons.logging.Logging
import org.beangle.data.jdbc.DefaultSqlTypeMapping
import org.beangle.data.jdbc.engine.Engines
import scala.collection.immutable.ArraySeq
object JdbcExecutor {
def convert(rs: ResultSet, types: Array[Int]): Array[Any] = {
val objs = Array.ofDim[Any](types.length)
types.indices foreach { i =>
var v = rs.getObject(i + 1)
if (null != v) {
types(i) match {
// timstamp 在驱动中类型会和java.sql.Timestamp不同
case TIMESTAMP => v = rs.getTimestamp(i + 1)
case DATE => v = rs.getDate(i + 1)
case BLOB =>
val blob = rs.getBlob(i + 1)
v = blob.getBytes(1, blob.length.toInt)
case CLOB =>
val clob = rs.getClob(i + 1)
v = clob.getSubString(1, clob.length.toInt)
case _ =>
}
}
objs(i) = v
}
objs
}
}
class JdbcExecutor(dataSource: DataSource) extends Logging {
private val engine = Engines.forDataSource(dataSource)
val sqlTypeMapping = new DefaultSqlTypeMapping(engine)
var showSql = false
var fetchSize = 1000
def unique[T](sql: String, params: Any*): Option[T] = {
val rs = query(sql, params: _*)
if (rs.isEmpty) {
None
} else {
val o = rs.head
Some(o.head.asInstanceOf[T])
}
}
def queryForInt(sql: String): Option[Int] = {
val num: Option[Number] = unique(sql)
num match {
case Some(n) => Some(n.intValue)
case None => None
}
}
def queryForLong(sql: String): Option[Long] = {
val num: Option[Number] = unique(sql)
num match {
case Some(n) => Some(n.longValue)
case None => None
}
}
def useConnection[T](f: Connection => T): T = {
val conn = dataSource.getConnection()
try {
f(conn)
} finally {
if (null != conn) {
conn.close()
}
}
}
def statement(sql: String): Statement = {
new Statement(sql, this)
}
def iterate(sql: String, params: Any*): ResultSetIterator = {
if (showSql) println("JdbcExecutor:" + sql)
val conn = dataSource.getConnection()
conn.setAutoCommit(false)
val stmt = conn.prepareStatement(sql)
stmt.setFetchSize(fetchSize)
TypeParamSetter(sqlTypeMapping, params)(stmt)
val rs = stmt.executeQuery()
new ResultSetIterator(rs)
}
def query(sql: String, params: Any*): collection.Seq[Array[Any]] = {
query(sql, TypeParamSetter(sqlTypeMapping, params))
}
def query(sql: String, setter: PreparedStatement => Unit): collection.Seq[Array[Any]] = {
if (showSql) println("JdbcExecutor:" + sql)
useConnection { conn =>
val stmt = conn.prepareStatement(sql)
setter(stmt)
new ResultSetIterator(stmt.executeQuery()).listAll()
}
}
def fetch(sql: String, limit: PageLimit, params: Any*): collection.Seq[Array[Any]] = {
fetch(sql, limit, TypeParamSetter(sqlTypeMapping, params))
}
def fetch(sql: String, limit: PageLimit, setter: PreparedStatement => Unit): collection.Seq[Array[Any]] = {
val rs = engine.limit(sql, limit.pageSize * (limit.pageIndex - 1), limit.pageSize)
if (showSql) println("JdbcExecutor:" + rs._1)
useConnection { conn =>
val stmt = conn.prepareStatement(rs._1)
setter(stmt)
var start = stmt.getParameterMetaData.getParameterCount - rs._2.size
rs._2 foreach { i =>
stmt.setInt(start + 1, i)
start += 1
}
new ResultSetIterator(stmt.executeQuery()).listAll()
}
}
def update(sql: String, params: Any*): Int = {
update(sql, TypeParamSetter(sqlTypeMapping, params))
}
def update(sql: String, setter: PreparedStatement => Unit): Int = {
if (showSql) println("JdbcExecutor:" + sql)
var stmt: PreparedStatement = null
val conn = dataSource.getConnection
if (conn.getAutoCommit) conn.setAutoCommit(false)
var rows = 0
try {
stmt = conn.prepareStatement(sql)
setter(stmt)
rows = stmt.executeUpdate()
stmt.close()
stmt = null
conn.commit()
} catch {
case e: SQLException =>
conn.rollback()
rethrow(e, sql)
} finally {
if (null != stmt) stmt.close()
conn.close()
}
rows
}
def batch(sql: String, datas: collection.Seq[Array[_]], types: collection.Seq[Int]): Seq[Int] = {
if (showSql) println("JdbcExecutor:" + sql)
var stmt: PreparedStatement = null
val conn = dataSource.getConnection
if (conn.getAutoCommit) conn.setAutoCommit(false)
val rows = new collection.mutable.ListBuffer[Int]
var curParam: Seq[_] = null
try {
stmt = conn.prepareStatement(sql)
for (param <- datas) {
curParam = ArraySeq.unsafeWrapArray(param)
ParamSetter.setParams(stmt, param, types)
stmt.addBatch()
}
rows ++= stmt.executeBatch()
conn.commit()
} catch {
case be: BatchUpdateException =>
conn.rollback()
rethrow(be.getNextException, sql, curParam)
case e: SQLException =>
conn.rollback()
rethrow(e, sql, curParam)
} finally {
stmt.close()
conn.close()
}
rows.toList
}
protected def rethrow(cause: SQLException, sql: String, params: Any*): Unit = {
var causeMessage = cause.getMessage
if (causeMessage == null) causeMessage = ""
val msg = new StringBuffer(causeMessage)
msg.append(" Query: ").append(sql).append(" Parameters: ")
if (params == null) msg.append("[]")
else msg.append(Strings.join(params, ","))
val e = new SQLException(msg.toString, cause.getSQLState, cause.getErrorCode)
e.setNextException(cause)
throw e
}
}
| beangle/data | jdbc/src/main/scala/org/beangle/data/jdbc/query/JdbcExecutor.scala | Scala | lgpl-3.0 | 6,704 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{Locale, Properties}
import scala.collection.JavaConverters._
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.Partition
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions}
import org.apache.spark.sql.execution.LogicalRDD
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.{DataSource, FailureSafeParser}
import org.apache.spark.sql.execution.datasources.csv._
import org.apache.spark.sql.execution.datasources.jdbc._
import org.apache.spark.sql.execution.datasources.json.TextInputJsonDataSource
import org.apache.spark.sql.types.{StringType, StructType}
import org.apache.spark.unsafe.types.UTF8String
/**
* Interface used to load a [[Dataset]] from external storage systems (e.g. file systems,
* key-value stores, etc). Use `SparkSession.read` to access this.
*
* @since 1.4.0
*/
@InterfaceStability.Stable
class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
/**
* Specifies the input data source format.
*
* @since 1.4.0
*/
def format(source: String): DataFrameReader = {
this.source = source
this
}
/**
* Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema
* automatically from data. By specifying the schema here, the underlying data source can
* skip the schema inference step, and thus speed up data loading.
*
* @since 1.4.0
*/
def schema(schema: StructType): DataFrameReader = {
this.userSpecifiedSchema = Option(schema)
this
}
/**
* Adds an input option for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def option(key: String, value: String): DataFrameReader = {
this.extraOptions += (key -> value)
this
}
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataFrameReader = option(key, value.toString)
/**
* (Scala-specific) Adds input options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def options(options: scala.collection.Map[String, String]): DataFrameReader = {
this.extraOptions ++= options
this
}
/**
* Adds input options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* </ul>
*
* @since 1.4.0
*/
def options(options: java.util.Map[String, String]): DataFrameReader = {
this.options(options.asScala)
this
}
/**
* Loads input in as a `DataFrame`, for data sources that don't require a path (e.g. external
* key-value stores).
*
* @since 1.4.0
*/
def load(): DataFrame = {
load(Seq.empty: _*) // force invocation of `load(...varargs...)`
}
/**
* Loads input in as a `DataFrame`, for data sources that require a path (e.g. data backed by
* a local or distributed file system).
*
* @since 1.4.0
*/
def load(path: String): DataFrame = {
option("path", path).load(Seq.empty: _*) // force invocation of `load(...varargs...)`
}
/**
* Loads input in as a `DataFrame`, for data sources that support multiple paths.
* Only works if the source is a HadoopFsRelationProvider.
*
* @since 1.6.0
*/
@scala.annotation.varargs
def load(paths: String*): DataFrame = {
if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw new AnalysisException("Hive data source can only be used with tables, you can not " +
"read files of Hive data source directly.")
}
sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = paths,
userSpecifiedSchema = userSpecifiedSchema,
className = source,
options = extraOptions.toMap).resolveRelation())
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table and connection properties.
*
* @since 1.4.0
*/
def jdbc(url: String, table: String, properties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// properties should override settings in extraOptions.
this.extraOptions ++= properties.asScala
// explicit url and dbtable should override all
this.extraOptions += (JDBCOptions.JDBC_URL -> url, JDBCOptions.JDBC_TABLE_NAME -> table)
format("jdbc").load()
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table. Partitions of the table will be retrieved in parallel based on the parameters
* passed to this function.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`.
* @param table Name of the table in the external database.
* @param columnName the name of a column of integral type that will be used for partitioning.
* @param lowerBound the minimum value of `columnName` used to decide partition stride.
* @param upperBound the maximum value of `columnName` used to decide partition stride.
* @param numPartitions the number of partitions. This, along with `lowerBound` (inclusive),
* `upperBound` (exclusive), form partition strides for generated WHERE
* clause expressions used to split the column `columnName` evenly. When
* the input is less than 1, the number is set to 1.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
columnName: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int,
connectionProperties: Properties): DataFrame = {
// columnName, lowerBound, upperBound and numPartitions override settings in extraOptions.
this.extraOptions ++= Map(
JDBCOptions.JDBC_PARTITION_COLUMN -> columnName,
JDBCOptions.JDBC_LOWER_BOUND -> lowerBound.toString,
JDBCOptions.JDBC_UPPER_BOUND -> upperBound.toString,
JDBCOptions.JDBC_NUM_PARTITIONS -> numPartitions.toString)
jdbc(url, table, connectionProperties)
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table using connection properties. The `predicates` parameter gives a list
* expressions suitable for inclusion in WHERE clauses; each one defines one partition
* of the `DataFrame`.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`
* @param table Name of the table in the external database.
* @param predicates Condition in the where clause for each partition.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// connectionProperties should override settings in extraOptions.
val params = extraOptions.toMap ++ connectionProperties.asScala.toMap
val options = new JDBCOptions(url, table, params)
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
JDBCPartition(part, i) : Partition
}
val relation = JDBCRelation(parts, options)(sparkSession)
sparkSession.baseRelationToDataFrame(relation)
}
/**
* Loads a JSON file and returns the results as a `DataFrame`.
*
* See the documentation on the overloaded `json()` method with varargs for more details.
*
* @since 1.4.0
*/
def json(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
json(Seq(path): _*)
}
/**
* Loads JSON files and returns the results as a `DataFrame`.
*
* <a href="http://jsonlines.org/">JSON Lines</a> (newline-delimited JSON) is supported by
* default. For JSON (one record per file), set the `wholeFile` option to true.
*
* This function goes through the input once to determine the input schema. If you know the
* schema in advance, use the version that specifies the schema to avoid the extra scan.
*
* You can set the following JSON-specific options to deal with non-standard JSON files:
* <ul>
* <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li>
* <li>`prefersDecimal` (default `false`): infers all floating-point values as a decimal
* type. If the values do not fit in decimal, then it infers them as doubles.</li>
* <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li>
* <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li>
* <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes
* </li>
* <li>`allowNumericLeadingZeros` (default `false`): allows leading zeros in numbers
* (e.g. 00012)</li>
* <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows accepting quoting of all
* character using backslash quoting mechanism</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing.
* <ul>
* <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted record, and puts
* the malformed string into a field configured by `columnNameOfCorruptRecord`. To keep
* corrupt records, an user can set a string type field named `columnNameOfCorruptRecord`
* in an user-defined schema. If a schema does not have the field, it drops corrupt records
* during parsing. When inferring a schema, it implicitly adds a `columnNameOfCorruptRecord`
* field in an output schema.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to
* date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
* <li>`wholeFile` (default `false`): parse one record, which may span multiple lines,
* per file</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def json(paths: String*): DataFrame = format("json").load(paths : _*)
/**
* Loads a `JavaRDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON
* Lines text format or newline-delimited JSON</a>) and returns the result as
* a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: JavaRDD[String]): DataFrame = json(jsonRDD.rdd)
/**
* Loads an `RDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: RDD[String]): DataFrame = {
json(sparkSession.createDataset(jsonRDD)(Encoders.STRING))
}
/**
* Loads a `Dataset[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonDataset input Dataset with one JSON object per record
* @since 2.2.0
*/
def json(jsonDataset: Dataset[String]): DataFrame = {
val parsedOptions = new JSONOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
val schema = userSpecifiedSchema.getOrElse {
TextInputJsonDataSource.inferFromDataset(jsonDataset, parsedOptions)
}
verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val createParser = CreateJacksonParser.string _
val parsed = jsonDataset.rdd.mapPartitions { iter =>
val rawParser = new JacksonParser(actualSchema, parsedOptions)
val parser = new FailureSafeParser[String](
input => rawParser.parse(input, createParser, UTF8String.fromString),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord)
iter.flatMap(parser.parse)
}
Dataset.ofRows(
sparkSession,
LogicalRDD(schema.toAttributes, parsed)(sparkSession))
}
/**
* Loads a CSV file and returns the result as a `DataFrame`. See the documentation on the
* other overloaded `csv()` method for more details.
*
* @since 2.0.0
*/
def csv(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
csv(Seq(path): _*)
}
/**
* Loads an `Dataset[String]` storing CSV rows and returns the result as a `DataFrame`.
*
* If the schema is not specified using `schema` function and `inferSchema` option is enabled,
* this function goes through the input once to determine the input schema.
*
* If the schema is not specified using `schema` function and `inferSchema` option is disabled,
* it determines the columns as string types and it reads only the first line to determine the
* names and the number of fields.
*
* @param csvDataset input Dataset with one CSV row per record
* @since 2.2.0
*/
def csv(csvDataset: Dataset[String]): DataFrame = {
val parsedOptions: CSVOptions = new CSVOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.sessionLocalTimeZone)
val filteredLines: Dataset[String] =
CSVUtils.filterCommentAndEmpty(csvDataset, parsedOptions)
val maybeFirstLine: Option[String] = filteredLines.take(1).headOption
val schema = userSpecifiedSchema.getOrElse {
TextInputCSVDataSource.inferFromDataset(
sparkSession,
csvDataset,
maybeFirstLine,
parsedOptions)
}
verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val linesWithoutHeader: RDD[String] = maybeFirstLine.map { firstLine =>
filteredLines.rdd.mapPartitions(CSVUtils.filterHeaderLine(_, firstLine, parsedOptions))
}.getOrElse(filteredLines.rdd)
val parsed = linesWithoutHeader.mapPartitions { iter =>
val rawParser = new UnivocityParser(actualSchema, parsedOptions)
val parser = new FailureSafeParser[String](
input => Seq(rawParser.parse(input)),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord)
iter.flatMap(parser.parse)
}
Dataset.ofRows(
sparkSession,
LogicalRDD(schema.toAttributes, parsed)(sparkSession))
}
/**
* Loads CSV files and returns the result as a `DataFrame`.
*
* This function will go through the input once to determine the input schema if `inferSchema`
* is enabled. To avoid going through the entire data once, disable `inferSchema` option or
* specify the schema explicitly using `schema`.
*
* You can set the following CSV-specific options to deal with CSV files:
* <ul>
* <li>`sep` (default `,`): sets the single character as a separator for each
* field and value.</li>
* <li>`encoding` (default `UTF-8`): decodes the CSV files by the given encoding
* type.</li>
* <li>`quote` (default `"`): sets the single character used for escaping quoted values where
* the separator can be part of the value. If you would like to turn off quotations, you need to
* set not `null` but an empty string. This behaviour is different from
* `com.databricks.spark.csv`.</li>
* <li>`escape` (default `\\`): sets the single character used for escaping quotes inside
* an already quoted value.</li>
* <li>`comment` (default empty string): sets the single character used for skipping lines
* beginning with this character. By default, it is disabled.</li>
* <li>`header` (default `false`): uses the first line as names of columns.</li>
* <li>`inferSchema` (default `false`): infers the input schema automatically from data. It
* requires one extra pass over the data.</li>
* <li>`ignoreLeadingWhiteSpace` (default `false`): a flag indicating whether or not leading
* whitespaces from values being read should be skipped.</li>
* <li>`ignoreTrailingWhiteSpace` (default `false`): a flag indicating whether or not trailing
* whitespaces from values being read should be skipped.</li>
* <li>`nullValue` (default empty string): sets the string representation of a null value. Since
* 2.0.1, this applies to all supported types including the string type.</li>
* <li>`nanValue` (default `NaN`): sets the string representation of a non-number" value.</li>
* <li>`positiveInf` (default `Inf`): sets the string representation of a positive infinity
* value.</li>
* <li>`negativeInf` (default `-Inf`): sets the string representation of a negative infinity
* value.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to
* date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
* <li>`maxColumns` (default `20480`): defines a hard limit of how many columns
* a record can have.</li>
* <li>`maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed
* for any given value being read. By default, it is -1 meaning unlimited length</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing. It supports the following case-insensitive modes.
* <ul>
* <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted record, and puts
* the malformed string into a field configured by `columnNameOfCorruptRecord`. To keep
* corrupt records, an user can set a string type field named `columnNameOfCorruptRecord`
* in an user-defined schema. If a schema does not have the field, it drops corrupt records
* during parsing. When a length of parsed CSV tokens is shorter than an expected length
* of a schema, it sets `null` for extra fields.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`wholeFile` (default `false`): parse one record, which may span multiple lines.</li>
* </ul>
* @since 2.0.0
*/
@scala.annotation.varargs
def csv(paths: String*): DataFrame = format("csv").load(paths : _*)
/**
* Loads a Parquet file, returning the result as a `DataFrame`. See the documentation
* on the other overloaded `parquet()` method for more details.
*
* @since 2.0.0
*/
def parquet(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
parquet(Seq(path): _*)
}
/**
* Loads a Parquet file, returning the result as a `DataFrame`.
*
* You can set the following Parquet-specific option(s) for reading Parquet files:
* <ul>
* <li>`mergeSchema` (default is the value specified in `spark.sql.parquet.mergeSchema`): sets
* whether we should merge schemas collected from all Parquet part-files. This will override
* `spark.sql.parquet.mergeSchema`.</li>
* </ul>
* @since 1.4.0
*/
@scala.annotation.varargs
def parquet(paths: String*): DataFrame = {
format("parquet").load(paths: _*)
}
/**
* Loads an ORC file and returns the result as a `DataFrame`.
*
* @param path input path
* @since 1.5.0
* @note Currently, this method can only be used after enabling Hive support.
*/
def orc(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
orc(Seq(path): _*)
}
/**
* Loads ORC files and returns the result as a `DataFrame`.
*
* @param paths input paths
* @since 2.0.0
* @note Currently, this method can only be used after enabling Hive support.
*/
@scala.annotation.varargs
def orc(paths: String*): DataFrame = format("orc").load(paths: _*)
/**
* Returns the specified table as a `DataFrame`.
*
* @since 1.4.0
*/
def table(tableName: String): DataFrame = {
assertNoSpecifiedSchema("table")
sparkSession.table(tableName)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any. See the documentation on
* the other overloaded `text()` method for more details.
*
* @since 2.0.0
*/
def text(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
text(Seq(path): _*)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any.
*
* Each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.text("/path/to/spark/README.md")
*
* // Java:
* spark.read().text("/path/to/spark/README.md")
* }}}
*
* @param paths input paths
* @since 1.6.0
*/
@scala.annotation.varargs
def text(paths: String*): DataFrame = format("text").load(paths : _*)
/**
* Loads text files and returns a [[Dataset]] of String. See the documentation on the
* other overloaded `textFile()` method for more details.
* @since 2.0.0
*/
def textFile(path: String): Dataset[String] = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
textFile(Seq(path): _*)
}
/**
* Loads text files and returns a [[Dataset]] of String. The underlying schema of the Dataset
* contains a single string column named "value".
*
* If the directory structure of the text files contains partitioning information, those are
* ignored in the resulting Dataset. To include partitioning information as columns, use `text`.
*
* Each line in the text files is a new element in the resulting Dataset. For example:
* {{{
* // Scala:
* spark.read.textFile("/path/to/spark/README.md")
*
* // Java:
* spark.read().textFile("/path/to/spark/README.md")
* }}}
*
* @param paths input path
* @since 2.0.0
*/
@scala.annotation.varargs
def textFile(paths: String*): Dataset[String] = {
assertNoSpecifiedSchema("textFile")
text(paths : _*).select("value").as[String](sparkSession.implicits.newStringEncoder)
}
/**
* A convenient function for schema validation in APIs.
*/
private def assertNoSpecifiedSchema(operation: String): Unit = {
if (userSpecifiedSchema.nonEmpty) {
throw new AnalysisException(s"User specified schema not supported with `$operation`")
}
}
/**
* A convenient function for schema validation in datasources supporting
* `columnNameOfCorruptRecord` as an option.
*/
private def verifyColumnNameOfCorruptRecord(
schema: StructType,
columnNameOfCorruptRecord: String): Unit = {
schema.getFieldIndex(columnNameOfCorruptRecord).foreach { corruptFieldIndex =>
val f = schema(corruptFieldIndex)
if (f.dataType != StringType || !f.nullable) {
throw new AnalysisException(
"The field for corrupt records must be string type and nullable")
}
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = sparkSession.sessionState.conf.defaultDataSourceName
private var userSpecifiedSchema: Option[StructType] = None
private val extraOptions = new scala.collection.mutable.HashMap[String, String]
}
| MLnick/spark | sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala | Scala | apache-2.0 | 28,505 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.collection.mutable
import scala.collection.{IterableFactoryDefaults, SeqFactory}
trait Seq[A]
extends Iterable[A]
with collection.Seq[A]
with SeqOps[A, Seq, Seq[A]]
with IterableFactoryDefaults[A, Seq] {
override def iterableFactory: SeqFactory[Seq] = Seq
}
/**
* $factoryInfo
* @define coll mutable sequence
* @define Coll `mutable.Seq`
*/
@SerialVersionUID(3L)
object Seq extends SeqFactory.Delegate[Seq](ArrayBuffer)
/**
* @define coll mutable sequence
* @define Coll `mutable.Seq`
*/
trait SeqOps[A, +CC[_], +C <: AnyRef]
extends collection.SeqOps[A, CC, C]
with Cloneable[C] {
override def clone(): C = {
val b = newSpecificBuilder
b ++= this
b.result()
}
/** Replaces element at given index with a new value.
*
* @param idx the index of the element to replace.
* @param elem the new value.
* @throws IndexOutOfBoundsException if the index is not valid.
*/
@throws[IndexOutOfBoundsException]
def update(idx: Int, elem: A): Unit
@deprecated("Use `mapInPlace` on an `IndexedSeq` instead", "2.13.0")
@`inline`final def transform(f: A => A): this.type = {
var i = 0
val siz = size
while (i < siz) { this(i) = f(this(i)); i += 1 }
this
}
}
/** Explicit instantiation of the `Seq` trait to reduce class file size in subclasses. */
abstract class AbstractSeq[A] extends scala.collection.AbstractSeq[A] with Seq[A]
| scala/scala | src/library/scala/collection/mutable/Seq.scala | Scala | apache-2.0 | 1,739 |
object Test {
def main(args: Array[String]): Unit = {
class Foo(val a: Int, val b: Int, val c: Int)
import scala.reflect.runtime.{currentMirror => cm}
val decls = cm.classSymbol(classOf[Foo]).info.decls
decls.sorted.toList.filter(!_.isMethod) foreach System.out.println
}
}
| folone/dotty | tests/pending/run/reflection-sorted-decls.scala | Scala | bsd-3-clause | 295 |
package com.blinkbox.books.purchasetransformer
import com.blinkbox.books.messaging.Xml._
import com.blinkbox.books.messaging.EventHeader
import java.io.ByteArrayInputStream
import scala.util.{ Try, Success, Failure }
import scala.xml.{ XML, Node }
import scala.xml.NodeSeq
// Code to convert incoming message to case classes.
// This kind of code should perhaps live in a separate library that can then be
// used by both the publisher(s) and consumers of the messages. Ideally, alongside a schema
// for the message.
/**
* A Purchase Complete message as published by the payment service after customer has bought something.
*/
case class Purchase(
userId: String,
basketId: String,
firstName: String,
lastName: String,
email: String,
clubcardNumber: Option[String],
clubcardPointsAward: Option[Int],
totalPrice: Price,
basketItems: Seq[BasketItem]) {
require(basketItems.size > 0, "No books given")
}
case class BasketItem(
isbn: String,
salePrice: Price,
listPrice: Price)
case class Price(amount: BigDecimal, currency: String)
object Purchase {
/**
* Convert input message to Purchase object.
*/
def fromXml(xml: Array[Byte]): Purchase = {
val purchase = XML.load(new ByteArrayInputStream(xml))
val basketId = purchase.stringValue("basketId")
val basketItems = for (basketItem <- purchase \\ "basketItems" \\ "basketItem")
yield BasketItem(basketItem.stringValue("isbn"),
price(basketItem \\ "salePrice"), price(basketItem \\ "listPrice"))
Purchase(purchase.stringValue("userId"), basketId, purchase.stringValue("firstName"),
purchase.stringValue("lastName"), purchase.stringValue("email"),
purchase.stringValueOptional("clubcardNumber"), purchase.stringValueOptional("clubcardPointsAward").map(_.toInt),
price(purchase \\ "totalPrice"), basketItems)
}
private def price(priceNode: NodeSeq) =
Price(BigDecimal(priceNode.stringValue("amount")), priceNode.stringValue("currency"))
/** Get Event Context from fields of purchase message. */
def context(purchase: Purchase) =
EventHeader(originator = PurchaseTransformerService.Originator,
userId = Some(purchase.userId), transactionId = Some(purchase.basketId))
}
| blinkboxbooks/purchase-transformer.scala | src/main/scala/com/blinkbox/books/purchasetransformer/Purchase.scala | Scala | mit | 2,224 |
/*
* Copyright (c) 2016, Innoave.com
* All rights reserved.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL INNOAVE.COM OR ITS CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package io.scalatestfx.framework.scalatest
import javafx.stage.Stage
class ApplicationAdapter(
val applicationFixture: ApplicationFixture
) extends javafx.application.Application {
override def init() {
applicationFixture.init()
}
override def start(stage: Stage) {
applicationFixture.start(stage)
}
override def stop() {
applicationFixture.stop()
}
}
| haraldmaida/ScalaTestFX | scalatestfx/src/main/scala/io/scalatestfx/framework/scalatest/ApplicationAdapter.scala | Scala | apache-2.0 | 1,240 |
package cogdebugger.coggui3ports
import org.interactivemesh.scala.swing.InternalFrame
/**
* This event is used to signal that a ProbeFrame has sized itself and is
* ready for display. This is important for the probe desktop's window tiler,
* as it can't know where to place a ProbeFrame until the frame's final size
* is determined.
*/
case class FramePackedEvent(source: InternalFrame) extends scala.swing.event.UIEvent
| hpe-cct/cct-core | src/main/scala/cogdebugger/coggui3ports/FramePackedEvent.scala | Scala | apache-2.0 | 430 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.sbt.run
import com.lightbend.lagom.dev.Reloader
import com.lightbend.lagom.dev.Reloader.{ CompileFailure, CompileResult, CompileSuccess, Source }
import com.lightbend.lagom.sbt.Internal
import com.lightbend.lagom.sbt.LagomPlugin.autoImport._
import com.lightbend.lagom.sbt.LagomReloadableService.autoImport._
import sbt._
import sbt.Keys._
import play.api.PlayException
import play.sbt.PlayExceptions._
private[sbt] object RunSupport {
def reloadRunTask(
extraConfigs: Map[String, String]
): Def.Initialize[Task[Reloader.DevServer]] = Def.task {
val state = Keys.state.value
val scope = resolvedScoped.value.scope
val reloadCompile = () => RunSupport.compile(
() => Project.runTask(lagomReload in scope, state).map(_._2).get,
() => Project.runTask(lagomReloaderClasspath in scope, state).map(_._2).get,
() => Project.runTask(streamsManager in scope, state).map(_._2).get.toEither.right.toOption
)
val classpath = (devModeDependencies.value ++ (externalDependencyClasspath in Runtime).value).distinct.files
Reloader.startDevMode(
scalaInstance.value.loader,
classpath,
reloadCompile,
lagomClassLoaderDecorator.value,
lagomWatchDirectories.value,
lagomFileWatchService.value,
baseDirectory.value,
extraConfigs.toSeq ++ lagomDevSettings.value,
lagomServicePort.value,
RunSupport
)
}
def nonReloadRunTask(
extraConfigs: Map[String, String]
): Def.Initialize[Task[Reloader.DevServer]] = Def.task {
val classpath = (devModeDependencies.value ++ (fullClasspath in Runtime).value).distinct
val buildLinkSettings = extraConfigs.toSeq ++ lagomDevSettings.value
Reloader.startNoReload(scalaInstance.value.loader, classpath.map(_.data), baseDirectory.value, buildLinkSettings,
lagomServicePort.value)
}
private def devModeDependencies = Def.task {
(managedClasspath in Internal.Configs.DevRuntime).value
}
def compile(reloadCompile: () => Result[sbt.inc.Analysis], classpath: () => Result[Classpath], streams: () => Option[Streams]): CompileResult = {
reloadCompile().toEither
.left.map(compileFailure(streams()))
.right.map { analysis =>
classpath().toEither
.left.map(compileFailure(streams()))
.right.map { classpath =>
CompileSuccess(sourceMap(analysis), classpath.files)
}.fold(identity, identity)
}.fold(identity, identity)
}
def sourceMap(analysis: sbt.inc.Analysis): Map[String, Source] = {
analysis.apis.internal.foldLeft(Map.empty[String, Source]) {
case (sourceMap, (file, source)) => sourceMap ++ {
source.api.definitions map { d => d.name -> Source(file, originalSource(file)) }
}
}
}
def originalSource(file: File): Option[File] = {
play.twirl.compiler.MaybeGeneratedSource.unapply(file).map(_.file)
}
def compileFailure(streams: Option[Streams])(incomplete: Incomplete): CompileResult = {
CompileFailure(taskFailureHandler(incomplete, streams))
}
def taskFailureHandler(incomplete: Incomplete, streams: Option[Streams]): PlayException = {
Incomplete.allExceptions(incomplete).headOption.map {
case e: PlayException => e
case e: xsbti.CompileFailed =>
getProblems(incomplete, streams)
.find(_.severity == xsbti.Severity.Error)
.map(CompilationException)
.getOrElse(UnexpectedException(Some("The compilation failed without reporting any problem!"), Some(e)))
case e: Exception => UnexpectedException(unexpected = Some(e))
}.getOrElse {
UnexpectedException(Some("The compilation task failed without any exception!"))
}
}
def getScopedKey(incomplete: Incomplete): Option[ScopedKey[_]] = incomplete.node flatMap {
case key: ScopedKey[_] => Option(key)
case task: Task[_] => task.info.attributes get taskDefinitionKey
}
def getProblems(incomplete: Incomplete, streams: Option[Streams]): Seq[xsbti.Problem] = {
allProblems(incomplete) ++ {
Incomplete.linearize(incomplete).flatMap(getScopedKey).flatMap { scopedKey =>
val JavacError = """\\[error\\]\\s*(.*[.]java):(\\d+):\\s*(.*)""".r
val JavacErrorInfo = """\\[error\\]\\s*([a-z ]+):(.*)""".r
val JavacErrorPosition = """\\[error\\](\\s*)\\^\\s*""".r
streams.map { streamsManager =>
var first: (Option[(String, String, String)], Option[Int]) = (None, None)
var parsed: (Option[(String, String, String)], Option[Int]) = (None, None)
Output.lastLines(scopedKey, streamsManager, None).map(_.replace(scala.Console.RESET, "")).map(_.replace(scala.Console.RED, "")).collect {
case JavacError(file, line, message) => parsed = Some((file, line, message)) -> None
case JavacErrorInfo(key, message) => parsed._1.foreach { o =>
parsed = Some((parsed._1.get._1, parsed._1.get._2, parsed._1.get._3 + " [" + key.trim + ": " + message.trim + "]")) -> None
}
case JavacErrorPosition(pos) =>
parsed = parsed._1 -> Some(pos.size)
if (first == ((None, None))) {
first = parsed
}
}
first
}.collect {
case (Some(error), maybePosition) => new xsbti.Problem {
def message = error._3
def category = ""
def position = new xsbti.Position {
def line = xsbti.Maybe.just(error._2.toInt)
def lineContent = ""
def offset = xsbti.Maybe.nothing[java.lang.Integer]
def pointer = maybePosition.map(pos => xsbti.Maybe.just((pos - 1).asInstanceOf[java.lang.Integer])).getOrElse(xsbti.Maybe.nothing[java.lang.Integer])
def pointerSpace = xsbti.Maybe.nothing[String]
def sourceFile = xsbti.Maybe.just(file(error._1))
def sourcePath = xsbti.Maybe.just(error._1)
}
def severity = xsbti.Severity.Error
}
}
}
}
}
def allProblems(inc: Incomplete): Seq[xsbti.Problem] = {
allProblems(inc :: Nil)
}
def allProblems(incs: Seq[Incomplete]): Seq[xsbti.Problem] = {
problems(Incomplete.allExceptions(incs).toSeq)
}
def problems(es: Seq[Throwable]): Seq[xsbti.Problem] = {
es flatMap {
case cf: xsbti.CompileFailed => cf.problems
case _ => Nil
}
}
}
| edouardKaiser/lagom | dev/sbt-plugin/src/main/scala/com/lightbend/lagom/sbt/run/RunSupport.scala | Scala | apache-2.0 | 6,516 |
case class HasSingleField(f: HasSingleField)
object Test {
def main(args: Array[String]) = {
val s: Object = HasSingleField(null)
s match {
case Matcher(self) =>
assert(self ne null)
}
}
}
object Matcher {
def unapply(x: Object): Option[HasSingleField] = {
if (x.isInstanceOf[HasSingleField])
Some(x.asInstanceOf[HasSingleField])
else
None
}
}
| densh/dotty | tests/run/patmat-option-named.scala | Scala | bsd-3-clause | 401 |
package pkg
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
object Test {
// imports need special handling
val lb1: ListBuffer[Int] = null
val lb2: mutable.ListBuffer[Int] = null
def main(args: Array[String]): Unit = {
val world = Hello("World")
println(s"Hello ${world.name}")
}
// Char.toString is Any.toString and therefore
// needs special handling
def f(c: Char) = c.toString
// Nothing is a special case
val nothingCase = classOf[Nothing]
}
case class Hello(name: String)
| sschaef/tooling-research | sbt-plugin/src/sbt-test/sbt-amora/scala_2_11_8/src/main/scala/pkg/Test.scala | Scala | mit | 539 |
package gitbucket.core.view
import gitbucket.core.controller.Context
import gitbucket.core.service.RepositoryService.RepositoryInfo
import org.scalatest.FunSpec
import org.scalatest.mockito.MockitoSugar
class HelpersSpec extends FunSpec with MockitoSugar {
private implicit val context = mock[Context]
private val repository = mock[RepositoryInfo]
import helpers._
describe("detect and render links") {
it("should pass identical string when no link is present") {
val before = "Description"
val after = detectAndRenderLinks(before, repository)
assert(after == before)
}
it("should convert a single link") {
val before = "http://example.com"
val after = detectAndRenderLinks(before, repository)
assert(after == """<a href="http://example.com">http://example.com</a>""")
}
it("should convert a single link within trailing text") {
val before = "Example Project. http://example.com"
val after = detectAndRenderLinks(before, repository)
assert(after == """Example Project. <a href="http://example.com">http://example.com</a>""")
}
it("should convert a mulitple links within text") {
val before = "Example Project. http://example.com. (See also https://github.com/)"
val after = detectAndRenderLinks(before, repository)
assert(after == """Example Project. <a href="http://example.com">http://example.com</a>. (See also <a href="https://github.com/">https://github.com/</a>)""")
}
it("should properly escape html metacharacters") {
val before = "<>&"
val after = detectAndRenderLinks(before, repository)
assert(after == """<>&""")
}
it("should escape html metacharacters adjacent to a link") {
val before = "<http://example.com>"
val after = detectAndRenderLinks(before, repository)
assert(after == """<<a href="http://example.com">http://example.com</a>>""")
}
it("should stop link recognition at a metacharacter") {
val before = "http://exa<mple.com"
val after = detectAndRenderLinks(before, repository)
assert(after == """<a href="http://exa">http://exa</a><mple.com""")
}
it("should make sure there are no double quotes in the href attribute") {
val before = "http://exa\\"mple.com"
val after = detectAndRenderLinks(before, repository)
assert(after == """<a href="http://exa"mple.com">http://exa"mple.com</a>""")
}
}
}
| nobusugi246/gitbucket | src/test/scala/gitbucket/core/view/HelpersSpec.scala | Scala | apache-2.0 | 2,471 |
package com.shekhargulati.medium
import com.shekhargulati.medium.MediumApiProtocol._
import com.shekhargulati.medium.domainObjects._
import okhttp3.FormBody.Builder
import okhttp3._
import spray.json._
class MediumClient(clientId: String, clientSecret: String, var accessToken: Option[String] = None) {
val client = new OkHttpClient()
val baseApiUrl = new HttpUrl.Builder()
.scheme("https")
.host("api.medium.com")
.build()
/**
* Get a URL for users to authorize the application
*
* @param state: A string that will be passed back to the redirectUrl
* @param redirectUrl: The URL to redirect after authorization
* @param requestScope: The scopes to grant the application
* @return authorization URL
*/
def getAuthorizationUrl(state: String, redirectUrl: String, requestScope: Array[String]): String = {
val httpUrl = baseApiUrl.resolve("/m/oauth/authorize").newBuilder()
.addQueryParameter("client_id", clientId)
.addQueryParameter("scope", requestScope.mkString(","))
.addQueryParameter("state", state)
.addQueryParameter("response_type", "code")
.addQueryParameter("redirect_uri", redirectUrl)
.build()
httpUrl.toString
}
/**
* Exchange authorization code for a long-lived access token. This allows you to make authenticated requests on behalf of the user.
*
* @param code authorization code
* @return Access token
*/
def exchangeAuthorizationCode(code: String, redirectUrl: String): AccessToken = {
val httpUrl = baseApiUrl.resolve("/v1/tokens")
val body = new Builder()
.add("code", code)
.add("client_id", clientId)
.add("client_secret", clientSecret)
.add("grant_type", "authorization_code")
.add("redirect_uri", redirectUrl)
.build()
val request = new Request.Builder()
.header("Content-Type", "application/x-www-form-urlencoded")
.url(httpUrl)
.post(body)
.build()
val accessTokenObject = makeRequest(request, data => data.convertTo[AccessToken])
accessToken = Some(accessTokenObject.accessToken)
accessTokenObject
}
/**
* Get details of the authenticated user
*
* @return Returns details of the user who has granted permission to the application.
*/
def getUser: User = accessToken match {
case Some(at) => makeRequest(baseApiUrl.resolve("/v1/me"), at, data => data.convertTo[User])
case _ => mediumError("Please set access token")
}
/**
* Returns a full list of publications that the user is related to in some way: This includes all publications the user is subscribed to, writes to, or edits. This endpoint offers a set of data similar to what you’ll see at https://medium.com/me/publications when logged in.
* @param userId id of a user
* @return a sequence of Publication
*/
def getPublicationsForUser(userId: String): Seq[Publication] = accessToken match {
case Some(at) => makeRequest(baseApiUrl.resolve(s"/v1/users/$userId/publications"), at, data => data.convertTo[Seq[Publication]])
case _ => mediumError("Please set access token")
}
/**
* Lists all contributors for a given publication
*
* @param publicationId id of the publication.
* @return a Sequence of contributors
*/
def getContributorsForPublication(publicationId: String): Seq[Contributor] = accessToken match {
case Some(at) => makeRequest(baseApiUrl.resolve(s"/v1/publications/$publicationId/contributors"), at, data => data.convertTo[Seq[Contributor]])
case _ => mediumError("Please set access token")
}
/**
* Creates a post on the authenticated user’s profile
* @param postRequest post request with data
* @return created Post
*/
def createPost(authorId: String, postRequest: PostRequest): Post = accessToken match {
case Some(at) =>
val httpUrl = baseApiUrl.resolve(s"/v1/users/$authorId/posts")
val request = new Request.Builder()
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.header("Accept-Charset", "utf-8")
.header("Authorization", s"Bearer $at")
.url(httpUrl)
.post(RequestBody.create(MediaType.parse("application/json"), postRequest.toJson.prettyPrint))
.build()
makeRequest(request, data => data.convertTo[Post])
case _ => mediumError("Please set access token")
}
/**
* Creates a post on Medium and places it under specified publication.
* Please refer to the API documentation for rules around publishing in
* a publication: https://github.com/Medium/medium-api-docs
*
* @param publicationId
* @param postRequest
* @return
*/
def createPostInPublication(publicationId: String, postRequest: PostRequest): Post = accessToken match {
case Some(at) =>
val httpUrl = baseApiUrl.resolve(s"/v1/publications/$publicationId/posts")
val request = new Request.Builder()
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.header("Accept-Charset", "utf-8")
.header("Authorization", s"Bearer $at")
.url(httpUrl)
.post(RequestBody.create(MediaType.parse("application/json"), postRequest.toJson.prettyPrint))
.build()
makeRequest(request, data => data.convertTo[Post])
case _ => mediumError("Please set access token")
}
private def makeRequest[T](httpUrl: HttpUrl, at: String, f: (JsValue) => T)(implicit p: JsonReader[T]): T = {
val request = new Request.Builder()
.header("Content-Type", "application/json")
.header("Accept", "application/json")
.header("Accept-Charset", "utf-8")
.header("Authorization", s"Bearer $at")
.url(httpUrl)
.get()
.build()
makeRequest(request, f)
}
private def makeRequest[T](request: Request, f: (JsValue) => T)(implicit p: JsonReader[T]): T = {
val response = client.newCall(request).execute()
val responseJson: String = response.body().string()
response match {
case r if r.isSuccessful =>
val jsValue: JsValue = responseJson.parseJson
jsValue.asJsObject.getFields("data").headOption match {
case Some(data) => f(data)
case _ => jsValue.convertTo[T]
}
case _ => mediumError(s"Received HTTP error response code ${response.code()}" + responseJson.parseJson.convertTo[ErrorResponse].toString)
}
}
}
object MediumClient {
def apply(clientId: String, clientSecret: String): MediumClient = new MediumClient(clientId, clientSecret)
def apply(clientId: String, clientSecret: String, accessToken: String): MediumClient = new MediumClient(clientId, clientSecret, Some(accessToken))
}
case class MediumException(message: String, cause: Throwable = null) extends RuntimeException(message, cause)
| shekhargulati/medium-scala-sdk | src/main/scala/com/shekhargulati/medium/MediumClient.scala | Scala | apache-2.0 | 6,846 |
import scala.math.{ pow, sqrt, log10 }
val phi: Double = 1.61803398874989484820458683436563811772030917980576
def digitsInFib(n: Int): Int = {
(n * log10(phi) - log10(sqrt(5)) + 1.0).toInt
}
val q = Iterator.from(1).collectFirst { case n if digitsInFib(n) >= 1000 => n }
println(q.get)
| natw/project-euler | 25.scala | Scala | apache-2.0 | 292 |
package org.opencommercesearch.api.common
/*
* Licensed to OpenCommerceSearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. OpenCommerceSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import play.api.mvc.{AnyContent, Controller, Request}
import org.apache.solr.client.solrj.SolrQuery
import org.apache.commons.lang3.StringUtils
/**
* This trait provides subclasses with functionality to parse the list
* of fields that should be return in the response
*
* @author rmerizalde
*/
trait FieldList {
/**
* @deprecated
*/
def withFields(query: SolrQuery, fields: Option[String]) : SolrQuery = {
for (f <- fields) {
if (f.size > 0) { query.setFields(fields.get.split(','): _*) }
}
query
}
/**
* Return a sequence with list of fields to return from storage.
* @param request is the implicit request
* @param allowStar Whether or not star ("*") should be allowed. If true then start ("*") is kept on the field list, otherwise is removed. By default is false.
* @param fieldsFieldName The fields field name to look for on the request. By default is "fields"
* @tparam R type of the request
* @return a sequence with the field names
*/
def fieldList[R](allowStar : Boolean = false, fieldsFieldName: String = "fields")(implicit request: Request[R]) : Seq[String] = {
val fields = request.getQueryString(fieldsFieldName)
var fieldsStr = fields.getOrElse(StringUtils.EMPTY)
var fieldSeparators = ","
if(!allowStar) {
fieldSeparators += "*"
} else {
// support field format like skus.* or skus.availability.*
fieldsStr = StringUtils.remove(fieldsStr, ".*")
}
StringUtils.split(fieldsStr, fieldSeparators)
}
}
| madickson/opencommercesearch | opencommercesearch-api/app/org/opencommercesearch/api/common/FieldList.scala | Scala | apache-2.0 | 2,346 |
/******************************************************************************
Copyright (c) 2012-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.bug_detector
import kr.ac.kaist.jsaf.analysis.cfg._
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.{SemanticsExpr => SE}
class CommonDetect(bugDetector: BugDetector) {
val cfg = bugDetector.cfg
val typing = bugDetector.typing
val bugStorage = bugDetector.bugStorage
val bugOption = bugDetector.bugOption
val varManager = bugDetector.varManager
val stateManager = bugDetector.stateManager
////////////////////////////////////////////////////////////////
// ConvertToNumber Check
////////////////////////////////////////////////////////////////
def convertToNumberCheck(node: Node, inst: CFGInst, expr1: CFGExpr, expr2: CFGExpr, doToPrimitive: Boolean, conditionFunction: (PValue, PValue) => Boolean): Unit = {
if(!bugOption.ConvertUndefToNum_Check) return
// Get spans
val expr1Span = expr1.getInfo match {
case Some(info) => info.getSpan
case None => inst.getInfo.get.getSpan
}
val expr2Span = if (expr2 == null) null else expr2.getInfo match {
case Some(info) => info.getSpan
case None => inst.getInfo.get.getSpan
}
// Get variable names
val varId1: String = varManager.getUserVarAssign(expr1) match {
case bv: BugVar0 if (bv.toString != "undefined") => " '" + bv.toString + "' can be undefined."
case _ => ""
}
val varId2: String = {
if (expr2 != null) {
varManager.getUserVarAssign(expr2) match {
case bv: BugVar0 if (bv.toString != "undefined") => " '" + bv.toString + "' can be undefined."
case _ => ""
}
}
else null
}
// Check for each CState
val bugCheckInstance = new BugCheckInstance()
val mergedCState = stateManager.getInputCState(node, inst.getInstId, bugOption.contextSensitive(ConvertUndefToNum))
for ((callContext, state) <- mergedCState) {
// For expr1
val value1: Value = SE.V(expr1, state.heap, state.context)._1
val pvalue1: PValue = if (doToPrimitive) Helper.toPrimitive_better(state.heap, value1) else value1.pvalue
// For expr2 (this can be null)
val value2: Value = if (expr2 != null) SE.V(expr2, state.heap, state.context)._1 else null
val pvalue2: PValue = if (expr2 != null) {if (doToPrimitive) Helper.toPrimitive_better(state.heap, value2) else value2.pvalue} else null
if (conditionFunction == null || conditionFunction(pvalue1, pvalue2)) {
if (!bugOption.ConvertUndefToNum_VariableMustHaveUndefinedOnly || pvalue1.typeCount == 1 && value1.locset.isEmpty) {
// Check possibility of being undefined
val checkInstance = bugCheckInstance.insert(pvalue1.undefval == UndefTop, expr1Span, callContext, state)
checkInstance.pValue = pvalue1
checkInstance.string1 = varId1
}
if (expr2 != null) {
if (!bugOption.ConvertUndefToNum_VariableMustHaveUndefinedOnly || pvalue2.typeCount == 1 && value2.locset.isEmpty) {
// Check possibility of being undefined
val checkInstance = bugCheckInstance.insert(pvalue2.undefval == UndefTop, expr2Span, callContext, state)
checkInstance.pValue = pvalue2
checkInstance.string1 = varId2
}
}
}
}
// Filter out bugs depending on options
if (!bugOption.ConvertUndefToNum_UndefMustBeConvertedInEveryState) {
bugCheckInstance.filter((bug, notBug) => (bug.pValue == notBug.pValue))
}
// Report bugs
for (checkInstance <- bugCheckInstance.bugList) bugStorage.addMessage(checkInstance.span, ConvertUndefToNum, inst, checkInstance.callContext, checkInstance.string1)
}
////////////////////////////////////////////////////////////////
// DefaultValue + ImplicitTypeConversion Check
////////////////////////////////////////////////////////////////
/*
def defaultValueCheck(inst: CFGInst, expr: CFGExpr, hint: String): Unit = {
val node = cfg.findEnclosingNode(inst)
val bugCheckInstance = new BugCheckInstance()
val mergedCState = stateManager.getInputCState(node, inst.getInstId, bugOption.contextSensitive(DefaultValueTypeError))
for ((callContext, state) <- mergedCState) {
val heap = state.heap
val context = state.context
val exprVal = SE.V(expr, heap, context)._1
val exprPVal = exprVal.pvalue
val exprLoc = exprVal.locset
val exprInfo = expr.getInfo
val name = varManager.getUserVarAssign(expr) match {
case name: BugVar0 => "'" + name.toString + "'"
case _ => "an object"
}
var isBuiltin = false
val checkOrder = hint match {
case "String" => Array("toString", "valueOf")
case "Number" => Array("valueOf", "toString")
}
// To check ImplicitTypeConversion in built-in functions
for(loc <- exprLoc) {
if(!isBuiltin) isBuiltin = heap(loc)("@function").funid.exists(fid => typing.builtinFset contains fid)
}
// Bug Detect
if(exprLoc.exists(loc => {
checkOrder.exists(funName => {
val funValue = heap(loc)(funName).objval.value
//println("Loc = " + loc + ", funName = " + funName + ", funValue = " + funValue)
if(funValue.locset.isEmpty) false
else {
if (isBuiltin && implicitTypeConversionCheck(funValue, funName)) true
else if (defaultValueTypeErrorCheck(funValue)) true
else false
}
})
})) infoCheck(DefaultValueTypeError)
////////////////////////////////////////////////////////////////
// DefaultValueTypeError Check
////////////////////////////////////////////////////////////////
def defaultValueTypeErrorCheck(value: Value): Boolean = {
/*for(loc <- value.locset) {
println(" loc = " + loc + ", ObjectName = " + kr.ac.kaist.jsaf.analysis.typing.domain.DomainPrinter.printLoc(loc) + ", isCallable = " + Helper.IsCallable(heap, loc))
for(fid <- heap(loc)("@function").funid) {
println(" fid = " + fid + ", function name = " + ModelManager.getFuncName(fid))
}
}*/
value.locset.exists(loc =>
Helper.IsCallable(heap, loc).getPair match {
case (AbsSingle, Some(b)) => b
case _ => false // Maybe
}
)
}
////////////////////////////////////////////////////////////////
// ImplicitTypeConversion Check
////////////////////////////////////////////////////////////////
def implicitTypeConversionCheck(value: Value, hint: String): Boolean = {
value.locset.exists(loc =>
heap(loc)("@function").funid.exists(fid =>
typing.builtinFset.get(fid) match {
case Some(builtinName) => !internalMethodMap(hint).contains(builtinName) && infoCheck(ImplicitCallToString)
case None => infoCheck(ImplicitCallValueOf)
}
)
)
}
////////////////////////////////////////////////////////////////
// Report bug if info exists
////////////////////////////////////////////////////////////////
def infoCheck(flag: BugKind): Boolean = {
exprInfo match {
case Some(info) => bugStorage.addMessage(info.getSpan, flag, null, null, name)
case None => System.out.println("bugDetector, Bug '%d'. Expression has no info.".format(flag))
}
exprInfo.isDefined
}
}
}
*/
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/bug_detector/CommonDetect.scala | Scala | bsd-3-clause | 7,917 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData}
// Wrapped in an object to check Scala compatibility. See SPARK-13929
object TestUDT {
@SQLUserDefinedType(udt = classOf[MyDenseVectorUDT])
private[sql] class MyDenseVector(val data: Array[Double]) extends Serializable {
override def hashCode(): Int = java.util.Arrays.hashCode(data)
override def equals(other: Any): Boolean = other match {
case v: MyDenseVector => java.util.Arrays.equals(this.data, v.data)
case _ => false
}
override def toString: String = data.mkString("(", ", ", ")")
}
private[sql] class MyDenseVectorUDT extends UserDefinedType[MyDenseVector] {
override def sqlType: DataType = ArrayType(DoubleType, containsNull = false)
override def serialize(features: MyDenseVector): ArrayData = {
new GenericArrayData(features.data.map(_.asInstanceOf[Any]))
}
override def deserialize(datum: Any): MyDenseVector = {
datum match {
case data: ArrayData =>
new MyDenseVector(data.toDoubleArray())
}
}
override def userClass: Class[MyDenseVector] = classOf[MyDenseVector]
private[spark] override def asNullable: MyDenseVectorUDT = this
override def hashCode(): Int = getClass.hashCode()
override def equals(other: Any): Boolean = other.isInstanceOf[MyDenseVectorUDT]
}
}
| pgandhi999/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/types/TestUDT.scala | Scala | apache-2.0 | 2,211 |
package parsec
trait HListParsers
extends HListParsable
with HListProjectable
with HListBoolean
with Parsers
with RepetitionParsers
| manojo/parsequery | macros/src/main/scala/parsec/HListParsers.scala | Scala | mit | 156 |
package org.jetbrains.plugins.scala
import com.intellij.openapi.application.WriteAction
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.testFramework.LightProjectDescriptor
import com.intellij.util.ThrowableRunnable
/**
* Nikolay.Tropin
* 22-Sep-17
*/
abstract class DelegatingProjectDescriptor(val delegate: LightProjectDescriptor) extends LightProjectDescriptor {
override def setUpProject(project: Project, handler: LightProjectDescriptor.SetupHandler) =
delegate.setUpProject(project, handler)
override def createSourcesRoot(module: Module) =
delegate.createSourcesRoot(module)
override def getModuleType =
delegate.getModuleType
override def createMainModule(project: Project) =
delegate.createMainModule(project)
override def getSdk =
delegate.getSdk
}
object DelegatingProjectDescriptor {
def withAfterSetupProject(delegate: LightProjectDescriptor)(work: ThrowableRunnable[Nothing]): LightProjectDescriptor = {
new DelegatingProjectDescriptor(delegate) {
override def setUpProject(project: Project, handler: LightProjectDescriptor.SetupHandler): Unit = {
super.setUpProject(project, handler)
WriteAction.run(work)
}
}
}
} | jastice/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/DelegatingProjectDescriptor.scala | Scala | apache-2.0 | 1,269 |
/*
* Copyright 2015 Marconi Lanna
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
case class REPLesent(
width: Int = 0
, height: Int = 0
, input: String = "REPLesent.txt"
, slideCounter: Boolean = false
, slideTotal: Boolean = false
, intp: scala.tools.nsc.interpreter.IMain = null
) {
import scala.util.Try
private case class Config(
top: String = "*"
, bottom: String = "*"
, sinistral: String = "* "
, dextral: String = " *"
, newline: String = System.lineSeparator
, private val width: Int
, private val height: Int
) {
val (screenWidth, screenHeight): (Int, Int) = {
val defaultWidth = 80
val defaultHeight = 25
if (width > 0 && height > 0) (width, height) else {
// Experimental support for screen size auto-detection.
// Supports only Unix-like systems, including Mac OS X and Linux.
// Does not work with Microsoft Windows.
val Array(h, w) = Try {
import scala.sys.process._
val stty = Seq("sh", "-c", "stty size < /dev/tty").!!
stty.trim.split(' ') map { _.toInt }
} getOrElse Array(0, 0)
val screenWidth = Seq(width, w) find { _ > 0 } getOrElse defaultWidth
val screenHeight = Seq(height, h) find { _ > 0 } getOrElse defaultHeight
(screenWidth, screenHeight)
}
}
private def fill(s: String): String = if (s.isEmpty) s else {
val t = s * (screenWidth / s.length)
t + s.take(screenWidth - t.length)
}
val topRow = fill(top) + newline
val bottomRow = fill(bottom)
val verticalSpace = screenHeight - 3 // accounts for header, footer, and REPL prompt
val horizontalSpace = screenWidth - sinistral.length - dextral.length
val whiteSpace = " "
val blankLine = sinistral + {
if (dextral.isEmpty) "" else whiteSpace * horizontalSpace + dextral
} + newline
}
private val config = Config(width = width, height = height)
private case class Line(content: String, length: Int, private val style: Line.Style) {
override def toString: String = content
def isEmpty: Boolean = content.isEmpty
def render(margin: Int): String = style(this, margin)
}
private object Line {
import scala.io.AnsiColor._
protected sealed trait Style {
import config.whiteSpace
protected def horizontalSpace = config.horizontalSpace
protected def fill(line: Line, left: Int, right: Int): String = {
whiteSpace * left + line + whiteSpace * right
}
def apply(line: Line, margin: Int): String
}
private object HorizontalRuler extends Style {
private val ansiBegin = RESET.head
private val ansiEnd = RESET.last
private val defaultPattern = Line("-")
def apply(line: Line, margin: Int): String = {
// Provides a default pattern if none was specified
val pattern = if (line.isEmpty) defaultPattern else line
val width = horizontalSpace - margin
val repeats = width / pattern.length
val content = pattern.toString * repeats
var remaining = width - repeats * pattern.length
var ansi = false
var reset = ""
val padding = pattern.toString takeWhile { c =>
val continue = remaining > 0
if (continue) c match {
case `ansiEnd` if ansi => ansi = false
case _ if ansi => // no-op
case `ansiBegin` => ansi = true; reset = RESET
case c if Character.isHighSurrogate(c) => // no-op
case _ => remaining -= 1
}
continue
}
val left = margin / 2
val right = margin - left
val l = Line(content + padding + reset, width, LeftAligned)
fill(l, left, right)
}
}
private object FullScreenHorizontalRuler extends Style {
def apply(line: Line, ignored: Int): String = HorizontalRuler(line, 0)
}
private object LeftFlushed extends Style {
def apply(line: Line, ignored: Int): String = {
val left = 0
val right = horizontalSpace - line.length
fill(line, left, right)
}
}
private object LeftAligned extends Style {
def apply(line: Line, margin: Int): String = {
val left = margin / 2
val right = horizontalSpace - left - line.length
fill(line, left, right)
}
}
private object Centered extends Style {
def apply(line: Line, ignored: Int): String = {
val margin = horizontalSpace - line.length
val left = margin / 2
val right = margin - left
fill(line, left, right)
}
}
private object RightAligned extends Style {
def apply(line: Line, margin: Int): String = {
val right = (margin + 1) / 2
val left = horizontalSpace - right - line.length
fill(line, left, right)
}
}
private object RightFlushed extends Style {
def apply(line: Line, ignored: Int): String = {
val left = horizontalSpace - line.length
val right = 0
fill(line, left, right)
}
}
private def style(line: String): (String, Style) = line match {
case s if s startsWith "<< " => (s.drop(3), LeftFlushed)
case s if s startsWith "< " => (s.drop(2), LeftAligned)
case s if s startsWith "| " => (s.drop(2), Centered)
case s if s startsWith "> " => (s.drop(2), RightAligned)
case s if s startsWith ">> " => (s.drop(3), RightFlushed)
case s if s startsWith "//" => (s.drop(2), FullScreenHorizontalRuler)
case s if s startsWith "/" => (s.drop(1), HorizontalRuler)
case s: String => (s, LeftAligned)
}
private val ansiEscape = """\\\\.""".r
private val ansiColor = Map(
'b' -> BLUE,
'c' -> CYAN,
'g' -> GREEN,
'k' -> BLACK,
'm' -> MAGENTA,
'r' -> RED,
'w' -> WHITE,
'y' -> YELLOW,
'B' -> BLUE_B,
'C' -> CYAN_B,
'G' -> GREEN_B,
'K' -> BLACK_B,
'M' -> MAGENTA_B,
'R' -> RED_B,
'W' -> WHITE_B,
'Y' -> YELLOW_B,
'!' -> REVERSED,
'*' -> BOLD,
'_' -> UNDERLINED
)
private def ansi(line: String): (String, Int) = {
var drop = 0
var reset = ""
val content: String = ansiEscape replaceAllIn (line, m =>
m.matched(1) match {
case c if ansiColor.contains(c) => drop += 2; reset = RESET; ansiColor(c)
case 's' => drop += 2; RESET
case '\\\\' => drop += 1; "\\\\\\\\"
case c: Char => "\\\\\\\\" + c
}
)
(content + reset, drop)
}
private val emojiEscape = """:([\\w+\\-]+):""".r
private lazy val emojis: Map[String, String] = {
Try {
val input = io.Source.fromFile("emoji.txt").getLines
input.map { l =>
val a = l.split(' ')
(a(1), a(0))
}.toMap
} getOrElse Map.empty
}
private def emoji(line: String): (String, Int) = {
var drop = 0
val content: String = emojiEscape replaceAllIn (line, m => {
m.group(1) match {
case e if emojis.contains(e) => drop += m.matched.length - 1; emojis(e)
case _ => m.matched
}
})
(content, drop)
}
def apply(line: String): Line = {
val (l1, lineStyle) = style(line)
val (l2, ansiDrop) = ansi(l1)
val (content, emojiDrop) = emoji(l2)
val length = l1.codePointCount(0, l1.length) - ansiDrop - emojiDrop
Line(content = content, length = length, style = lineStyle)
}
}
// `size` and `maxLength` refer to the dimensions of the slide's last build
private case class Build(content: IndexedSeq[Line], size: Int, maxLength: Int, footer: Line)
private case class Slide(content: IndexedSeq[Line], builds: IndexedSeq[Int], code: IndexedSeq[String]) {
private val maxLength = content.maxBy(_.length).length
def lastBuild: Int = builds.size - 1
def hasBuild(n: Int): Boolean = builds.isDefinedAt(n)
def build(n: Int, footer: Line): Build = Build(content.take(builds(n)), content.size, maxLength, footer)
}
private case class Deck(slides: IndexedSeq[Slide]) {
private var slideCursor = -1
private var buildCursor = 0
private def currentSlideIsDefined: Boolean = slides.isDefinedAt(slideCursor)
private def currentSlide: Slide = slides(slideCursor)
private def footer: Line = {
val sb = StringBuilder.newBuilder
if (slideCounter) {
sb ++= ">> " + (slideCursor + 1)
if (slideTotal) sb ++= "/" + slides.size
sb ++= " "
}
Line(sb.mkString)
}
private def select(slide: Int = slideCursor, build: Int = 0): Option[Build] = {
import math.{max, min}
// "Stops" the cursor one position after/before the last/first slide to avoid
// multiple next/previous calls taking it indefinitely away from the deck
slideCursor = max(-1, min(slides.size, slide))
buildCursor = build
if (currentSlideIsDefined && currentSlide.hasBuild(buildCursor)) {
Some(currentSlide.build(buildCursor, footer))
} else {
None
}
}
def jumpTo(n: Int): Option[Build] = select(slide = n)
def jump(n: Int): Option[Build] = jumpTo(slideCursor + n)
def nextBuild: Option[Build] = select(build = buildCursor + 1) orElse jump(1)
def redrawBuild: Option[Build] = select(build = buildCursor)
def previousBuild: Option[Build] = select(build = buildCursor - 1) orElse {
jump(-1) flatMap { _ =>
select(build = currentSlide.lastBuild)
}
}
def lastSlide: Option[Build] = jumpTo(slides.size - 1)
def lastBuild: Option[Build] = jumpTo(slides.size) orElse previousBuild
def runCode: Unit = {
val code = currentSlide.code(buildCursor)
if (repl.isEmpty) {
Console.err.print(s"No reference to REPL found. Please call with parameter intp=$$intp")
} else if (code.isEmpty) {
Console.err.print("No code for you")
} else {
repl foreach { _ interpret code }
}
}
}
private val helpMessage = """Usage:
| next n > go to next build/slide
| previous p < go back to previous build/slide
| redraw z redraw the current build/slide
| Next N >> go to next slide
| Previous P << go back to previous slide
| i next i n advance i slides
| i previous i p go back i slides
| i go i g go to slide i
| first f |< go to first slide
| last l >| go to last slide
| Last L >>| go to last build of last slide
| run r !! execute code that appears on slide
| blank b blank screen
| help h ? print this help message""".stripMargin
private val repl = Option(intp)
private val deck = Deck(parseFile(input))
private def parseFile(file: String): IndexedSeq[Slide] = {
Try {
val input = io.Source.fromFile(file).getLines
parse(input)
} getOrElse {
Console.err.print(s"Sorry, could not parse file $file. Quick, say something funny before anyone notices!")
IndexedSeq.empty
}
}
private def parse(input: Iterator[String]): IndexedSeq[Slide] = {
sealed trait Parser {
def switch: Parser
def apply(line: String): (Line, Option[String])
}
object LineParser extends Parser {
def switch: Parser = CodeParser
def apply(line: String): (Line, Option[String]) = (Line(line), None)
}
object CodeParser extends Parser {
private val regex = Seq(
"\\\\\\\\m" -> ("""\\b(?:abstract|case|catch|class|def|do|else|extends|final|finally|for|""" +
"""forSome|if|implicit|import|lazy|match|new|object|override|package|private|""" +
"""protected|return|sealed|super|throw|trait|try|type|val|var|while|with|yield)\\b""").r
, "\\\\\\\\g" -> """\\b(?:true|false|null|this)\\b""".r
, "\\\\\\\\b" -> ("""(?i)\\b(?:(?:0(?:[0-7]+|X[0-9A-F]+))L?|(?:(?:0|[1-9][0-9]*)""" +
"""(?:(?:\\.[0-9]+)?(?:E[+\\-]?[0-9]+)?F?|L?))|\\\\.[0-9]+(?:E[+\\-]?[0-9]+)?F?)\\b""").r
, "\\\\\\\\*" -> """\\b[$_]*[A-Z][_$A-Z0-9]*[\\w$]*\\b""".r
)
def switch: Parser = LineParser
def apply(line: String): (Line, Option[String]) = {
val l = Line("< " + (line /: regex) { case (line, (color, regex)) =>
regex replaceAllIn (line, m =>
color + m + "\\\\\\\\s"
)
})
(l, Option(line))
}
}
case class Acc(
content: IndexedSeq[Line] = IndexedSeq.empty
, builds: IndexedSeq[Int] = IndexedSeq.empty
, deck: IndexedSeq[Slide] = IndexedSeq.empty
, code: IndexedSeq[String] = IndexedSeq.empty
, codeAcc: IndexedSeq[String] = IndexedSeq.empty
, parser: Parser = LineParser
) {
import config.newline
def switchParser: Acc = copy(parser = parser.switch)
def append(line: String): Acc = {
val (l, c) = parser(line)
copy(content = content :+ l, codeAcc = c.fold(codeAcc)(codeAcc :+ _))
}
def pushBuild: Acc = copy(
builds = builds :+ content.size
, code = code :+ (codeAcc mkString newline)
, codeAcc = IndexedSeq.empty
)
def pushSlide: Acc = {
if (content.isEmpty) {
append("").pushSlide
} else {
val finalBuild = pushBuild
val slide = Slide(content, finalBuild.builds, finalBuild.code)
Acc(deck = deck :+ slide)
}
}
}
val slideSeparator = "---"
val buildSeparator = "--"
val codeDelimiter = "```"
val acc = (Acc() /: input) { (acc, line) =>
line match {
case `slideSeparator` => acc.pushSlide
case `buildSeparator` => acc.pushBuild
case `codeDelimiter` => acc.switchParser
case _ => acc.append(line)
}
}.pushSlide
acc.deck
}
private def render(build: Build): String = {
import config._
val topPadding = (verticalSpace - build.size) / 2
val bottomPadding = verticalSpace - topPadding - build.content.size
val margin = horizontalSpace - build.maxLength
val sb = StringBuilder.newBuilder
def render(line: Line): StringBuilder = {
sb ++= sinistral
sb ++= line.render(margin)
sb ++= dextral
sb ++= newline
}
sb ++= topRow
sb ++= blankLine * topPadding
build.content foreach render
if (slideCounter && bottomPadding > 0) {
sb ++= blankLine * (bottomPadding - 1)
render(build.footer)
} else {
sb ++= blankLine * bottomPadding
}
sb ++= bottomRow
sb.mkString
}
private def show(build: Option[Build]): Unit = {
if (build.isEmpty) Console.err.print("No slide for you")
build foreach { b => print(render(b)) }
}
implicit class Ops(val i: Int) {
def next: Unit = show(deck.jump(i))
def n: Unit = next
def previous: Unit = show(deck.jump(-i))
def p: Unit = previous
def go: Unit = show(deck.jumpTo(i - 1))
def g: Unit = go
}
def next: Unit = show(deck.nextBuild)
def n: Unit = next
def > : Unit = next
def previous: Unit = show(deck.previousBuild)
def p: Unit = previous
def < : Unit = previous
def redraw: Unit = show(deck.redrawBuild)
def z: Unit = redraw
def Next: Unit = 1.next
def N: Unit = Next
def >> : Unit = Next
def Previous: Unit = 1.previous
def P: Unit = Previous
def << : Unit = Previous
def first: Unit = 1.go
def f: Unit = first
def |< : Unit = first
def last: Unit = show(deck.lastSlide)
def l: Unit = last
def >| : Unit = last
def Last: Unit = show(deck.lastBuild)
def L: Unit = Last
def >>| : Unit = Last
def run: Unit = deck.runCode
def r: Unit = run
def !! : Unit = run
def blank: Unit = print(config.newline * config.screenHeight)
def b: Unit = blank
def help: Unit = print(helpMessage)
def h: Unit = help
def ? : Unit = help
}
| MaxWorgan/talk | src/main/scala/REPLesent.scala | Scala | apache-2.0 | 16,552 |
package org.scalaide.extensions
package autoedits
import org.eclipse.jface.text.IRegion
import org.scalaide.core.text.Add
import org.scalaide.core.text.Replace
import org.scalaide.util.eclipse.RegionUtils._
import scalariform.lexer._
object SurroundBlockSetting extends AutoEditSetting(
id = ExtensionSetting.fullyQualifiedName[SurroundBlock],
name = "Surround a block with curly braces",
description = ExtensionSetting.formatDescription(
"""|In Scala, it happens very often that users write a definition that \\
|contains only a single expression. In these cases one can leave out \\
|curly braces:
|
| def id(i: Int) =
| i
|
|Often, it happens that such a single expression needs to be expanded \\
|into multiple expressions, where braces need to be added. This auto \\
|edit helps in such cases and automatically adds the curly brace \\
|whenever the opening brace is inserted at the beginning of the block, \\
|which is in this case after the equal sign:
|
| def id(i: Int) = ^
| i
|
|Here, ^ denotes the position of the cursor. Inserting `{` into the \\
|document results in:
|
| def id(i: Int) = {^
| i
| }
|
|Note: The opening curly brace needs to be the last character of the \\
|line (excluding whitespace), otherwise no ending curly brace is added.
|""".stripMargin)
)
trait SurroundBlock extends AutoEdit {
override def setting = SurroundBlockSetting
override def perform() = {
val elseLikeTokens = Set(Tokens.ELSE, Tokens.CATCH, Tokens.FINALLY)
check(textChange) {
case Add(start, "{") =>
surroundLocation(start) map {
case (pos, indentLen, token) =>
val sep = System.getProperty("line.separator")
val indent = " " * indentLen
val change = if (elseLikeTokens(token.tokenType))
Replace(start, pos + indentLen, s"{${document.textRange(start, pos)}$indent} ")
else
Replace(start, pos, s"{${document.textRange(start, pos)}$indent}$sep")
change.withCursorPos(start+1)
}
}
}
/**
* Returns a triple with the position where the closing curly brace should be inserted,
* the indentation of the line where the opening curly brace is inserted into the document
* and the first token after the insertion point.
*
* In case no insertion position could be found, `None` is returned.
*/
private def surroundLocation(offset: Int): Option[(Int, Int, Token)] = {
def indentLenOfLine(line: IRegion) = {
val text = document.textRange(line.start, line.end)
text.takeWhile(Character.isWhitespace).length()
}
val firstLine = document.lineInformationOfOffset(offset)
val firstIndent = indentLenOfLine(firstLine)
val lexer = ScalaLexer.createRawLexer(document.textRange(offset, document.length-1), forgiveErrors = true)
def loop(): Option[(Int, Token)] =
if (!lexer.hasNext)
None
else {
import Tokens._
val t = lexer.next()
if (t.tokenType == RBRACE
|| t.tokenType == VARID
|| COMMENTS.contains(t.tokenType)
|| (Tokens.KEYWORDS contains t.tokenType)) {
val line = document.lineInformationOfOffset(t.offset+offset)
val indent = indentLenOfLine(line)
if (t.tokenType == Tokens.RBRACE && indent == firstIndent)
None
else if (indent <= firstIndent) {
var prevLine = document.lineInformationOfOffset(line.start-1)
while (prevLine.trim(document).length == 0)
prevLine = document.lineInformationOfOffset(prevLine.start-1)
if (prevLine.start == firstLine.start)
None
else
Some((prevLine.end+1, t))
}
else
loop()
}
else
loop()
}
if (offset == firstLine.trimRight(document).end+1)
loop() map { case (line, token) => (line, firstIndent, token) }
else
None
}
}
| aleksi-lukkarinen/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/extensions/autoedits/SurroundBlock.scala | Scala | bsd-3-clause | 4,213 |
package com.twitter.finagle.netty4.channel
import com.twitter.finagle.stats.InMemoryStatsReceiver
import io.netty.channel._
import io.netty.util.{AttributeKey, Attribute}
import java.util.concurrent.atomic.AtomicInteger
import org.junit.runner.RunWith
import org.mockito.Mockito.when
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class ChannelRequestStatsHandlerTest extends FunSuite with MockitoSugar {
def mkAttr(ai: AtomicInteger): Attribute[AtomicInteger] = new Attribute[AtomicInteger] {
def set(value: AtomicInteger): Unit = ai.set(value.get())
def get(): AtomicInteger = ai
def key(): AttributeKey[AtomicInteger] = ???
def getAndRemove(): AtomicInteger = ???
def remove(): Unit = ???
def compareAndSet(oldValue: AtomicInteger, newValue: AtomicInteger): Boolean = ???
def setIfAbsent(value: AtomicInteger): AtomicInteger = ???
def getAndSet(value: AtomicInteger): AtomicInteger = ???
}
test("ChannelRequestStatsHandler counts messages") {
val sr = new InMemoryStatsReceiver()
def requestsEqual(requests: Seq[Float]) =
assert(
sr.stat("connection_requests")() == requests
)
val handler = new ChannelRequestStatsHandler(sr)
requestsEqual(Seq.empty[Float])
val ctx = mock[ChannelHandlerContext]
val chan = mock[Channel]
val reqAttr = mkAttr(new AtomicInteger(0))
when(ctx.channel).thenReturn(chan)
when(chan.attr(ChannelRequestStatsHandler.ConnectionRequestsKey)).thenReturn(reqAttr)
val msg = new Object
// first connection sends two messages
handler.handlerAdded(ctx)
handler.channelRead(ctx, msg)
handler.channelRead(ctx, msg)
handler.channelInactive(ctx)
// second connection sends zero
handler.handlerAdded(ctx)
handler.channelInactive(ctx)
// third connection sends one
handler.handlerAdded(ctx)
handler.channelRead(ctx, msg)
handler.channelInactive(ctx)
requestsEqual(Seq(2.0f, 0.0f, 1.0f))
}
}
| adriancole/finagle | finagle-netty4/src/test/scala/com/twitter/finagle/netty4/channel/ChannelRequestStatsHandlerTest.scala | Scala | apache-2.0 | 2,064 |
package controllers
import akka.actor.ActorSystem
import akka.stream.Materializer
import com.google.inject.Inject
import play.api.mvc._
import shared.SharedMessages
import play.api.libs.streams.ActorFlow
import sparkConn.{SparkCommon, TwitterStreamer}
import sparkStream._
class Application @Inject() (implicit system: ActorSystem,
materializer: Materializer/*,
twitterStreamer: TwitterStreamer*/) extends Controller {
def index = Action {
Ok(views.html.index(SharedMessages.itWorks))
}
def sparkSocket = WebSocket.accept[String,String] { request =>
ActorFlow.actorRef(out => WebSocketActor.props(out))
}
def setupLogging() = {
import org.apache.log4j.{Level,Logger}
val rootLogger = Logger.getRootLogger
rootLogger.setLevel(Level.ERROR)
}
def fontDir(file: String) = Action.async {
implicit request => {
controllers.Assets.at("/../client/src/main/resources/", file, false).apply(request)
}
}
def sparkBookData = Action {
val sc = SparkCommon.sc
//System.setProperty("twitter4j.oauth")
val inputList = sc.parallelize(List(1,2,3,4))
val text = "Test123" + inputList.collect().foldLeft(" ")((a,b) => a + b)
val input = sc.textFile("book.txt")
var words = input.flatMap(line => line.split(' '))
val lowerCaseWords = words.map(_.toLowerCase())
val wordCounts = lowerCaseWords.countByValue()
val sample = wordCounts.take(20)
for ((word, count) <- sample) {
println(word + " " + count)
}
//val ssc = SparkCommon.ssc
Ok(text)
}
}
| gvatn/play-scalajs-webgl-spark | server/app/controllers/Application.scala | Scala | mit | 1,605 |
package com.yuzhouwan.bigdata.spark.streaming
import breeze.linalg.DenseVector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.{LabeledPoint, StreamingLinearRegressionWithSGD}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
/**
* Copyright @ 2019 yuzhouwan.com
* All right reserved.
* Function: ML Analysis
*
* @author Benedict Jin
* @since 2015/8/13
*/
object MLAnalysis {
def main(args: Array[String]) {
if (args.length < 4) {
System.err.println("Usage: WindowCounter <master> <hostname> <port> <interval> \\n" +
"In local mode, <master> should be 'local[n]' with n > 1")
System.exit(1)
}
val ssc = new StreamingContext(args(0), "ML Analysis", Seconds(args(3).toInt))
val stream = ssc.socketTextStream(args(1), args(2).toInt, StorageLevel.MEMORY_ONLY_SER)
val NumFeatures = 100
val zeroVector = DenseVector.zeros[Double](NumFeatures)
val model = new StreamingLinearRegressionWithSGD()
.setInitialWeights(Vectors.dense(zeroVector.data))
.setNumIterations(1)
.setStepSize(0.01)
val labeledStream = stream.map { event =>
val split = event.split("\\t")
val y = split(0).toDouble
val features = split(1).split(",").map(_.toDouble)
LabeledPoint(label = y, features = Vectors.dense(features))
}
model.trainOn(labeledStream)
val predictAndTrue = labeledStream.transform { rdd =>
val latest = model.latestModel()
rdd.map { point =>
val predict = latest.predict(point.features)
predict - point.label
}
}
predictAndTrue.foreachRDD { (rdd, time) =>
val mse = rdd.map { case (err) => math.pow(err, 2.0) }.mean()
val rmse = math.sqrt(mse)
println(
s"""
|-------------------------------------------
|Time: $time
|-------------------------------------------
""".stripMargin)
println(s"MSE current batch: Model : $mse")
println(s"RMSE current batch: Model : $rmse")
println("...\\n")
}
ssc.start()
ssc.awaitTermination()
}
}
| asdf2014/yuzhouwan | yuzhouwan-bigdata/yuzhouwan-bigdata-spark/src/main/scala/com/yuzhouwan/bigdata/spark/streaming/MLAnalysis.scala | Scala | apache-2.0 | 2,166 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg.distributed
import breeze.linalg.{DenseMatrix => BDM}
import org.apache.spark.annotation.Since
import org.apache.spark.mllib.linalg.{Matrix, SparseMatrix, Vectors}
import org.apache.spark.rdd.RDD
/**
* Represents an entry in an distributed matrix.
* @param i row index
* @param j column index
* @param value value of the entry
*/
@Since("1.0.0")
case class MatrixEntry(i: Long, j: Long, value: Double)
/**
* Represents a matrix in coordinate format.
*
* @param entries matrix entries
* @param nRows number of rows. A non-positive value means unknown, and then the number of rows will
* be determined by the max row index plus one.
* @param nCols number of columns. A non-positive value means unknown, and then the number of
* columns will be determined by the max column index plus one.
*/
@Since("1.0.0")
class CoordinateMatrix @Since("1.0.0") (
@Since("1.0.0") val entries: RDD[MatrixEntry],
private var nRows: Long,
private var nCols: Long) extends DistributedMatrix {
/** Alternative constructor leaving matrix dimensions to be determined automatically. */
@Since("1.0.0")
def this(entries: RDD[MatrixEntry]) = this(entries, 0L, 0L)
/** Gets or computes the number of columns. */
@Since("1.0.0")
override def numCols(): Long = {
if (nCols <= 0L) {
computeSize()
}
nCols
}
/** Gets or computes the number of rows. */
@Since("1.0.0")
override def numRows(): Long = {
if (nRows <= 0L) {
computeSize()
}
nRows
}
/** Transposes this CoordinateMatrix. */
@Since("1.3.0")
def transpose(): CoordinateMatrix = {
new CoordinateMatrix(entries.map(x => MatrixEntry(x.j, x.i, x.value)), numCols(), numRows())
}
/** Converts to IndexedRowMatrix. The number of columns must be within the integer range. */
@Since("1.0.0")
def toIndexedRowMatrix(): IndexedRowMatrix = {
val nl = numCols()
if (nl > Int.MaxValue) {
sys.error(s"Cannot convert to a row-oriented format because the number of columns $nl is " +
"too large.")
}
val n = nl.toInt
val indexedRows = entries.map(entry => (entry.i, (entry.j.toInt, entry.value)))
.groupByKey()
.map { case (i, vectorEntries) =>
IndexedRow(i, Vectors.sparse(n, vectorEntries.toSeq))
}
new IndexedRowMatrix(indexedRows, numRows(), n)
}
/**
* Converts to RowMatrix, dropping row indices after grouping by row index.
* The number of columns must be within the integer range.
*/
@Since("1.0.0")
def toRowMatrix(): RowMatrix = {
toIndexedRowMatrix().toRowMatrix()
}
/** Converts to BlockMatrix. Creates blocks of [[SparseMatrix]] with size 1024 x 1024. */
@Since("1.3.0")
def toBlockMatrix(): BlockMatrix = {
toBlockMatrix(1024, 1024)
}
/**
* Converts to BlockMatrix. Creates blocks of [[SparseMatrix]].
* @param rowsPerBlock The number of rows of each block. The blocks at the bottom edge may have
* a smaller value. Must be an integer value greater than 0.
* @param colsPerBlock The number of columns of each block. The blocks at the right edge may have
* a smaller value. Must be an integer value greater than 0.
* @return a [[BlockMatrix]]
*/
@Since("1.3.0")
def toBlockMatrix(rowsPerBlock: Int, colsPerBlock: Int): BlockMatrix = {
require(rowsPerBlock > 0,
s"rowsPerBlock needs to be greater than 0. rowsPerBlock: $rowsPerBlock")
require(colsPerBlock > 0,
s"colsPerBlock needs to be greater than 0. colsPerBlock: $colsPerBlock")
val m = numRows()
val n = numCols()
val numRowBlocks = math.ceil(m.toDouble / rowsPerBlock).toInt
val numColBlocks = math.ceil(n.toDouble / colsPerBlock).toInt
val partitioner = GridPartitioner(numRowBlocks, numColBlocks, entries.partitions.length)
val blocks: RDD[((Int, Int), Matrix)] = entries.map { entry =>
val blockRowIndex = (entry.i / rowsPerBlock).toInt
val blockColIndex = (entry.j / colsPerBlock).toInt
val rowId = entry.i % rowsPerBlock
val colId = entry.j % colsPerBlock
((blockRowIndex, blockColIndex), (rowId.toInt, colId.toInt, entry.value))
}.groupByKey(partitioner).map { case ((blockRowIndex, blockColIndex), entry) =>
val effRows = math.min(m - blockRowIndex.toLong * rowsPerBlock, rowsPerBlock).toInt
val effCols = math.min(n - blockColIndex.toLong * colsPerBlock, colsPerBlock).toInt
((blockRowIndex, blockColIndex), SparseMatrix.fromCOO(effRows, effCols, entry))
}
new BlockMatrix(blocks, rowsPerBlock, colsPerBlock, m, n)
}
/** Determines the size by computing the max row/column index. */
private def computeSize() {
// Reduce will throw an exception if `entries` is empty.
val (m1, n1) = entries.map(entry => (entry.i, entry.j)).reduce { case ((i1, j1), (i2, j2)) =>
(math.max(i1, i2), math.max(j1, j2))
}
// There may be empty columns at the very right and empty rows at the very bottom.
nRows = math.max(nRows, m1 + 1L)
nCols = math.max(nCols, n1 + 1L)
}
/** Collects data and assembles a local matrix. */
private[mllib] override def toBreeze(): BDM[Double] = {
val m = numRows().toInt
val n = numCols().toInt
val mat = BDM.zeros[Double](m, n)
entries.collect().foreach { case MatrixEntry(i, j, value) =>
mat(i.toInt, j.toInt) = value
}
mat
}
}
| xieguobin/Spark_2.0.0_cn1 | mllib/linalg/distributed/CoordinateMatrix.scala | Scala | apache-2.0 | 6,257 |
import java.io.File;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashSet;
import java.util.Set;
/*
*/
abstract class Node
case class LeafNode(data: String, counter: Int) extends Node;
case class FullNode(data: String, left: Node, right: Node) extends Node
case class LeftNode(data: String, left: Node) extends Node
case class RightNode(data: String, right: Node) extends Node
object trieLZ78 extends App {
/* val alphabet:Seq[String]
val firstSymbol:String
val rootHeadName:String // Name of root level's head*/
System.out.println("This is the lz78")
// val words = List( "B","E","O","R","N","O","T","T","O","B","E" );
// val words = List( "A","B","R","A","C","A","D","A","B","R","A" );
val alphabet = List( "A","B","C","R");
val words = List( "A","B","R");
def construct(A: List[String]): Node = {
def insert(tree: Node, value: String): Node = {
tree match {
case null => LeafNode(value,0)
case LeafNode(data,1) => if (value > data) {
System.out.println("1) Nodo Hoja \t (value > data) "+value+" > "+data )
LeftNode(data, LeafNode(value,1))
}else{
System.out.println("2) Nodo Hoja\t (value < = data) "+value+" > "+data )
RightNode(data, LeafNode(value,1))
}
case LeftNode(data, left) => if (value > data) {
System.out.println("3) Nodo Izq \t (value > data) "+value+" > "+data )
LeftNode(value, LeftNode(data, left))
}else{
System.out.println("4) Nodo Izq\t (value <= data) "+value+" > "+data )
FullNode(data, left, LeafNode(value,1))
}
/*
case RightNode(data, right) => if (value > data) {
System.out.println("5) Nodo Dcha\t (value > data) "+value+" > "+data )
FullNode(data, LeafNode(value,1), right)
}else{
System.out.println("6) Nodo Dcha\t (value <= data) "+value+" > "+data )
RightNode(value, RightNode(data, right))
}
case FullNode(data, left, right) => if (value > data) {
System.out.println("7) FullNode\t (value > data) "+value+" > "+data )
FullNode(data, insert(left, value), right)
}else {
System.out.println("8) FullNode\t (value <= data) "+value+" > "+data )
FullNode(data, left, insert(right, value))
}*/
}
}
//Comienzo el Tree vacio
var tree: Node = null;
tree = insert(tree, "")
return tree
};// termina el construct
//=> System.out.println(" Valor List "+A);
val f = (A: String) => System.out.println(A)
words.map(f);
var x = construct(words);
def recurseNode(A: Node, depth: Int) {
def display(data: String, depth: Int) {
for (i <- 1 to depth * 2) { System.out.print("*") }
// System.out.println(data);
}
A match {
case null => {
display("[]", depth)
}
case LeafNode(data,0) => {
display(data, depth)
recurseNode(null, depth + 1)
recurseNode(null, depth + 1)
}
/* case FullNode(data, left, right) => {
display(data, depth)
recurseNode(left, depth + 1)
recurseNode(right, depth + 1)
}
case RightNode(data, right) => {
display(data, depth)
recurseNode(null, depth + 1)
recurseNode(right, depth + 1)
}*/
case LeftNode(data, left) => {
// System.out.println("izq")
display(data, depth)
recurseNode(left, depth + 1)
recurseNode(null, depth + 1)
}
}
} //recurseNode
def output(A: Node, recurse: (Node, Int) => Unit) = { recurse(A, 0) }
def renderTree(A: Node) = { output(x, recurseNode); }
renderTree(x);
}
| jaimeguzman/learning | lala.scala | Scala | apache-2.0 | 3,948 |
package controllers.authentication
import akka.util.Timeout
import scala.concurrent.Await
import scala.concurrent.duration._
import org.scalatest._
import org.scalatest.mock.MockitoSugar
import org.specs2.time.DurationConversions
import play.api.Logger
import play.api.libs.json._
import play.api.test._
import play.api.test.Helpers._
import play.api.libs.ws._
import play.api.mvc._
import models.authentication.RegisteredUserJsonFormats.registeredUserFormat
class RegisteredUserControllerSpec extends FlatSpec with
MockitoSugar with Matchers with BeforeAndAfter with DurationConversions {
//implicit val Timeout = 10 seconds
//override implicit def defaultAwaitTimeout: Timeout = 20.seconds
println("Starting tests")
println()
"A registered user " should "be returned " in {
val registeredUser = RegisteredUserController.setUpRegisteredUser
registeredUser.authorityLevel should be ("99")
//registeredUser.crDate should be ("dave")
registeredUser.email should be ("[email protected]")
registeredUser.firstName should be ("dave")
registeredUser.lastName should be ("alan")
registeredUser.password should be ("password8")
registeredUser.telephone should be ("01388 898989")
//registeredUser.updDate should be ("dave")
registeredUser.userId should be ("userid8")
}
it should "Try and call the method" in new WithApplication(new FakeApplication()) {
val result = Await.result(RegisteredUserController.listRegisteredUsers(FakeRequest()), 10 seconds)
result.toString
println("** " + result)
}
} | Braffa/sellem-mongodb | test/controllers/authentication/RegisteredUserControllerSpec.scala | Scala | mit | 1,580 |
package org.jetbrains.plugins.scala.refactoring.extractMethod
package generated
class ScalaExtractMethodSimpleTest extends ScalaExtractMethodTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "simple/"
def testElementCreatedInside() = doTest()
def testOneInput() = doTest()
def testSCL1868() = doTest()
def testSCL4576() = doTest()
def testSimple() = doTest()
def testChangeLocalVar() = doTest()
def testUnitReturn() = doTest()
} | igrocki/intellij-scala | test/org/jetbrains/plugins/scala/refactoring/extractMethod/generated/ScalaExtractMethodSimpleTest.scala | Scala | apache-2.0 | 537 |
package org.jetbrains.plugins.scala.debugger.evaluation.evaluator
import com.intellij.debugger.JavaDebuggerBundle
import com.intellij.debugger.engine.evaluation.EvaluationContextImpl
import com.intellij.debugger.engine.evaluation.expression.Evaluator
import com.intellij.debugger.impl.DebuggerUtilsImpl
import com.sun.jdi._
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.debugger.evaluation.EvaluationException
import org.jetbrains.plugins.scala.debugger.evaluation.util.DebuggerUtil
import org.jetbrains.plugins.scala.extensions.inReadAction
import org.jetbrains.plugins.scala.lang.psi.types.{ScCompoundType, ScType}
class AsInstanceOfEvaluator(operandEvaluator: Evaluator, rawType: ScType) extends Evaluator {
override def evaluate(context: EvaluationContextImpl): Value = {
val proxy = context.getDebugProcess.getVirtualMachineProxy
object NumericType {
def unapply(tpe: ScType): Option[PrimitiveValue => Value] = {
val stdTypes = tpe.projectContext.stdTypes
import stdTypes._
tpe match {
case Byte => Some(pv => proxy.mirrorOf(pv.byteValue))
case Char => Some(pv => proxy.mirrorOf(pv.charValue()))
case Double => Some(pv => proxy.mirrorOf(pv.doubleValue()))
case Float => Some(pv => proxy.mirrorOf(pv.floatValue()))
case Int => Some(pv => proxy.mirrorOf(pv.intValue()))
case Long => Some(pv => proxy.mirrorOf(pv.longValue()))
case Short => Some(pv => proxy.mirrorOf(pv.shortValue()))
case _ => None
}
}
}
val tpe = inReadAction(rawType.removeAliasDefinitions().widenIfLiteral)
val stdTypes = tpe.projectContext.stdTypes
import stdTypes._
val value = operandEvaluator.evaluate(context).asInstanceOf[Value]
def message: String = {
val valueType = value.`type`().name() match {
case "boolean" => "Boolean"
case "byte" => "Byte"
case "char" => "Char"
case "double" => "Double"
case "float" => "Float"
case "int" => "Int"
case "long" => "Long"
case "short" => "Short"
case other => other
}
val castType = tpe match {
case Boolean => "Boolean"
case Byte => "Byte"
case Char => "Char"
case Double => "Double"
case Float => "Float"
case Int => "Int"
case Long => "Long"
case Short => "Short"
case _ => inReadAction(DebuggerUtil.getJVMQualifiedName(tpe).getDisplayName(context.getDebugProcess))
}
ScalaBundle.message("error.cannot.cast.value.to.type", valueType, castType)
}
(value, tpe) match {
case (_, _: ScCompoundType) => value
case (null, _) if tpe.isPrimitive =>
throw EvaluationException(JavaDebuggerBundle.message("evaluation.error.cannot.cast.null", tpe.canonicalText))
case (null, _) => null
case (b: BooleanValue, Boolean) => b
case (b: ByteValue, NumericType(fn)) => fn(b)
case (c: CharValue, NumericType(fn)) => fn(c)
case (d: DoubleValue, NumericType(fn)) => fn(d)
case (f: FloatValue, NumericType(fn)) => fn(f)
case (i: IntegerValue, NumericType(fn)) => fn(i)
case (l: LongValue, NumericType(fn)) => fn(l)
case (s: ShortValue, NumericType(fn)) => fn(s)
case (_: PrimitiveValue, _) =>
throw EvaluationException(message)
case (o: ObjectReference, _) =>
val valueType = o.referenceType()
val castType = new ClassOfEvaluator(tpe).evaluate(context).reflectedType()
if (DebuggerUtilsImpl.instanceOf(valueType, castType)) o
else throw EvaluationException(message)
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/debugger/evaluation/evaluator/AsInstanceOfEvaluator.scala | Scala | apache-2.0 | 3,690 |
package org.scaladebugger.test.invalid
/**
* Represents a scenario where the package and class names do not match the
* source path.
*/
object InvalidSourcePath {
def main(args: Array[String]): Unit = {
val c = new InvalidSourcePathClass
val x = 1 + 1
val y = c.getClass.getName
x + y
}
}
class InvalidSourcePathClass
| ensime/scala-debugger | scala-debugger-test/src/main/scala/org/scaladebugger/test/misc/InvalidSourcePath.scala | Scala | apache-2.0 | 345 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import org.joda.time.LocalDate
import play.api.libs.json.JodaWrites._
import play.api.libs.json.JodaReads._
import play.api.libs.json.Json
case class CalculationRequest(scon: String, nino: String, surname: String, firstForename: String,
calctype: Int,
revaluationDate: Option[LocalDate] = None,
revaluationRate: Option[Int] = None,
requestEarnings: Option[Int] = None,
dualCalc: Option[Int] = None,
terminationDate: Option[LocalDate] = None ) {
}
object CalculationRequest {
implicit val formats = Json.format[CalculationRequest]
}
| hmrc/gmp-frontend | app/models/CalculationRequest.scala | Scala | apache-2.0 | 1,337 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.broadcast
import java.io._
import scala.math
import scala.util.Random
import org.apache.spark._
import org.apache.spark.storage.{BroadcastBlockId, BroadcastHelperBlockId, StorageLevel}
import org.apache.spark.util.Utils
private[spark] class TorrentBroadcast[T](@transient var value_ : T, isLocal: Boolean, id: Long)
extends Broadcast[T](id) with Logging with Serializable {
def value = value_
def broadcastId = BroadcastBlockId(id)
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.putSingle(broadcastId, value_, StorageLevel.MEMORY_AND_DISK, false)
}
@transient var arrayOfBlocks: Array[TorrentBlock] = null
@transient var totalBlocks = -1
@transient var totalBytes = -1
@transient var hasBlocks = 0
if (!isLocal) {
sendBroadcast()
}
def sendBroadcast() {
var tInfo = TorrentBroadcast.blockifyObject(value_)
totalBlocks = tInfo.totalBlocks
totalBytes = tInfo.totalBytes
hasBlocks = tInfo.totalBlocks
// Store meta-info
val metaId = BroadcastHelperBlockId(broadcastId, "meta")
val metaInfo = TorrentInfo(null, totalBlocks, totalBytes)
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.putSingle(
metaId, metaInfo, StorageLevel.MEMORY_AND_DISK, true)
}
// Store individual pieces
for (i <- 0 until totalBlocks) {
val pieceId = BroadcastHelperBlockId(broadcastId, "piece" + i)
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.putSingle(
pieceId, tInfo.arrayOfBlocks(i), StorageLevel.MEMORY_AND_DISK, true)
}
}
}
// Called by JVM when deserializing an object
private def readObject(in: ObjectInputStream) {
in.defaultReadObject()
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.getSingle(broadcastId) match {
case Some(x) =>
value_ = x.asInstanceOf[T]
case None =>
val start = System.nanoTime
logInfo("Started reading broadcast variable " + id)
// Initialize @transient variables that will receive garbage values from the master.
resetWorkerVariables()
if (receiveBroadcast(id)) {
value_ = TorrentBroadcast.unBlockifyObject[T](arrayOfBlocks, totalBytes, totalBlocks)
// Store the merged copy in cache so that the next worker doesn't need to rebuild it.
// This creates a tradeoff between memory usage and latency.
// Storing copy doubles the memory footprint; not storing doubles deserialization cost.
SparkEnv.get.blockManager.putSingle(
broadcastId, value_, StorageLevel.MEMORY_AND_DISK, false)
// Remove arrayOfBlocks from memory once value_ is on local cache
resetWorkerVariables()
} else {
logError("Reading broadcast variable " + id + " failed")
}
val time = (System.nanoTime - start) / 1e9
logInfo("Reading broadcast variable " + id + " took " + time + " s")
}
}
}
private def resetWorkerVariables() {
arrayOfBlocks = null
totalBytes = -1
totalBlocks = -1
hasBlocks = 0
}
def receiveBroadcast(variableID: Long): Boolean = {
// Receive meta-info
val metaId = BroadcastHelperBlockId(broadcastId, "meta")
var attemptId = 10
while (attemptId > 0 && totalBlocks == -1) {
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.getSingle(metaId) match {
case Some(x) =>
val tInfo = x.asInstanceOf[TorrentInfo]
totalBlocks = tInfo.totalBlocks
totalBytes = tInfo.totalBytes
arrayOfBlocks = new Array[TorrentBlock](totalBlocks)
hasBlocks = 0
case None =>
Thread.sleep(500)
}
}
attemptId -= 1
}
if (totalBlocks == -1) {
return false
}
// Receive actual blocks
val recvOrder = new Random().shuffle(Array.iterate(0, totalBlocks)(_ + 1).toList)
for (pid <- recvOrder) {
val pieceId = BroadcastHelperBlockId(broadcastId, "piece" + pid)
TorrentBroadcast.synchronized {
SparkEnv.get.blockManager.getSingle(pieceId) match {
case Some(x) =>
arrayOfBlocks(pid) = x.asInstanceOf[TorrentBlock]
hasBlocks += 1
SparkEnv.get.blockManager.putSingle(
pieceId, arrayOfBlocks(pid), StorageLevel.MEMORY_AND_DISK, true)
case None =>
throw new SparkException("Failed to get " + pieceId + " of " + broadcastId)
}
}
}
(hasBlocks == totalBlocks)
}
}
private object TorrentBroadcast
extends Logging {
private var initialized = false
private var conf: SparkConf = null
def initialize(_isDriver: Boolean, conf: SparkConf) {
TorrentBroadcast.conf = conf //TODO: we might have to fix it in tests
synchronized {
if (!initialized) {
initialized = true
}
}
}
def stop() {
initialized = false
}
lazy val BLOCK_SIZE = conf.getInt("spark.broadcast.blockSize", 4096) * 1024
def blockifyObject[T](obj: T): TorrentInfo = {
val byteArray = Utils.serialize[T](obj)
val bais = new ByteArrayInputStream(byteArray)
var blockNum = (byteArray.length / BLOCK_SIZE)
if (byteArray.length % BLOCK_SIZE != 0)
blockNum += 1
var retVal = new Array[TorrentBlock](blockNum)
var blockID = 0
for (i <- 0 until (byteArray.length, BLOCK_SIZE)) {
val thisBlockSize = math.min(BLOCK_SIZE, byteArray.length - i)
var tempByteArray = new Array[Byte](thisBlockSize)
val hasRead = bais.read(tempByteArray, 0, thisBlockSize)
retVal(blockID) = new TorrentBlock(blockID, tempByteArray)
blockID += 1
}
bais.close()
val tInfo = TorrentInfo(retVal, blockNum, byteArray.length)
tInfo.hasBlocks = blockNum
tInfo
}
def unBlockifyObject[T](arrayOfBlocks: Array[TorrentBlock],
totalBytes: Int,
totalBlocks: Int): T = {
val retByteArray = new Array[Byte](totalBytes)
for (i <- 0 until totalBlocks) {
System.arraycopy(arrayOfBlocks(i).byteArray, 0, retByteArray,
i * BLOCK_SIZE, arrayOfBlocks(i).byteArray.length)
}
Utils.deserialize[T](retByteArray, Thread.currentThread.getContextClassLoader)
}
}
private[spark] case class TorrentBlock(
blockID: Int,
byteArray: Array[Byte])
extends Serializable
private[spark] case class TorrentInfo(
@transient arrayOfBlocks : Array[TorrentBlock],
totalBlocks: Int,
totalBytes: Int)
extends Serializable {
@transient var hasBlocks = 0
}
/**
* A [[BroadcastFactory]] that creates a torrent-based implementation of broadcast.
*/
class TorrentBroadcastFactory extends BroadcastFactory {
def initialize(isDriver: Boolean, conf: SparkConf) { TorrentBroadcast.initialize(isDriver, conf) }
def newBroadcast[T](value_ : T, isLocal: Boolean, id: Long) =
new TorrentBroadcast[T](value_, isLocal, id)
def stop() { TorrentBroadcast.stop() }
}
| dotunolafunmiloye/spark | core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala | Scala | apache-2.0 | 7,869 |
package com.rasterfoundry.datamodel
import io.circe.generic.JsonCodec
@JsonCodec
final case class ActiveStatus(isActive: Boolean)
| azavea/raster-foundry | app-backend/datamodel/src/main/scala/ActiveStatus.scala | Scala | apache-2.0 | 132 |
package com.yukihirai0505.sInstagram.responses.auth
import com.yukihirai0505.sInstagram.utils.Configurations.clientSecret
sealed trait Auth
case class AccessToken(token: String) extends Auth
case class SignedAccessToken(token: String, clientSecret: String = clientSecret) extends Auth
| yukihirai0505/sInstagram | src/main/scala/com/yukihirai0505/sInstagram/responses/auth/Auth.scala | Scala | mit | 289 |
object Foo {
object Values {
implicit def fromInt(x: Int): Values = ???
}
trait Values
}
final class Foo(name: String) {
def bar(values: Foo.Values): Bar = ???
}
trait Bar
| yusuke2255/dotty | tests/untried/pos/t7264/A_1.scala | Scala | bsd-3-clause | 185 |
/*
* Copyright (c) 2015-2017 Toby Weston
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s4j.scala.chapter17
import s4j.scala.chapter12.{Customer, ShoppingBasket}
import s4j.scala.chapter17.ThreadExample._
object ThreadExample {
def runInThread(function: () => Unit) {
new Thread() {
override def run(): Unit = function() // aka function.apply()
}.start()
}
def runInThread2(function: => Unit) { // call-by-name
new Thread() {
override def run(): Unit = function // not function()
}.start()
}
def runInThread3(group: String, function: => Unit) {
new Thread(new ThreadGroup(group), () => function).start()
}
def runInThread4(group: String)(function: => Unit) {
new Thread(new ThreadGroup(group), () => function).start()
}
def main(args: Array[String]) {
runInThread(() => {
// some long running task
println("Hello function")
})
runInThread2 {
println("Hello Lazy val")
}
}
}
/*
a class representing some kind of UI with 'update' methods
to update itself
*/
class UI {
def updateUiElements() {
new Thread() {
override def run(): Unit = updateCustomerBasket(basket)
}.start()
new Thread() {
override def run(): Unit = updateOffersFor(customer)
}.start()
// more updates, all done in their own threads
}
def updateUiElements2() {
runInThread(() => updateCustomerBasket(basket))
runInThread(() => updateOffersFor(customer))
// more updates, all done in their own threads
}
def updateUiElements3() {
runInThread { () =>
updateCustomerBasket(basket)
}
runInThread { () =>
updateOffersFor(customer)
}
// more updates, all done in their own threads
}
def updateUiElementsX() {
runInThread(() => {
applyDiscountToBasket(basket)
updateCustomerBasket(basket)
})
runInThread(() => updateOffersFor(customer))
// more updates, all done in their own threads
}
def updateUiElements4() {
runInThread2 {
applyDiscountToBasket(basket)
updateCustomerBasket(basket)
}
runInThread2 {
updateOffersFor(customer)
}
// more updates, all done in their own threads
}
def updateUiElements5() {
runInThread3("basket", {
applyDiscountToBasket(basket)
updateCustomerBasket(basket)
})
runInThread3("customer",
updateOffersFor(customer)
)
// more updates, all done in their own threads
}
def updateUiElements6() {
runInThread4("basket") {
applyDiscountToBasket(basket)
updateCustomerBasket(basket)
}
runInThread3("customer",
updateOffersFor(customer)
)
// more updates, all done in their own threads
}
def applyDiscountToBasket(basket: ShoppingBasket) {}
def updateCustomerBasket(basket: ShoppingBasket) {}
def updateOffersFor(customer: Customer) {}
private val basket = new ShoppingBasket()
private val customer = new Customer("", "")
}
| tobyweston/learn-scala-java-devs | src/main/scala/s4j/scala/chapter17/ThreadExample.scala | Scala | apache-2.0 | 3,502 |
package org.akka.essentials.dispatcher
import akka.actor.Actor
class MsgEchoActor extends Actor {
var messageProcessed:Int = 0
def receive: Receive = {
case message =>
messageProcessed = messageProcessed + 1
println(
"Received Message %s in Actor %s using Thread %s, total message processed %s".format( message,
self.path.name, Thread.currentThread().getName(), messageProcessed))
}
} | rokumar7/trial | AkkaDispatcherExample/src/main/scala/org/akka/essentials/dispatcher/MsgEchoActor.scala | Scala | unlicense | 405 |
package services
import java.nio.charset.StandardCharsets
import java.util.Base64
object StringService {
def base64Encode(string: String): String = {
Base64.getEncoder.encodeToString(string.getBytes(StandardCharsets.UTF_8))
}
}
| PanzerKunst/redesigned-cruited.com-frontend | document-web-service/app/services/StringService.scala | Scala | gpl-3.0 | 242 |
package controllers
/**
* Created by Ravis on 06/04/15.
*/
import play.api.mvc._
import models.db.{SearchQueries, SearchResult}
object SearchController extends Controller {
def search(title: Option[String], disc: Option[String], fio: Option[String]) = Action {
val result: List[SearchResult] = SearchQueries.queryWithParams(title, disc, fio)
Ok(views.html.Search.search(result, title.getOrElse(""), disc.getOrElse(""), fio.getOrElse("")))
}
}
| RavisMsk/Design-Tech-Home-Task | app/controllers/SearchController.scala | Scala | mit | 460 |
package com.arcusys.valamis.lesson.model
object PackageActivityType extends Enumeration{
val Published, Shared, Completed = Value
}
| igor-borisov/valamis | valamis-lesson/src/main/scala/com/arcusys/valamis/lesson/model/PackageActivityType.scala | Scala | gpl-3.0 | 135 |
package com.github.chawasit.smc.simulator
import java.io.{File, PrintWriter}
import scala.io.Source
object Main extends App {
override def main(args: Array[String]): Unit = {
val arguments = new ArgumentConfiguration(args)
try {
val instructions = readFile(arguments.input())
val turingMachine = TuringMachine(instructions)
val haltedTuringMachine = turingMachine run
haltedTuringMachine printState()
} catch {
case e: SimulatorException =>
println(s"[${Console.BLUE}Simulator${Console.RESET}] ${e.getMessage}")
case e: Exception =>
println(s"[${Console.RED}RunTime${Console.RESET}] $e")
}
}
private def writeFile(output: String, data: String): Unit = {
val writer = new PrintWriter(new File(output))
writer.write(data)
writer.close()
}
private def readFile(path: String): List[Int] =
Source.fromFile(path)
.getLines()
.toList
.map { _.toInt }
}
| chawasit/Scala-SMC-Simulator | src/main/scala/com/github/chawasit/smc/simulator/Main.scala | Scala | unlicense | 1,009 |
def upCase: String => Writer[String, String] =
s => Writer(s.toUpperCase, "upCase ") | hmemcpy/milewski-ctfp-pdf | src/content/3.4/code/scala/snippet19.scala | Scala | gpl-3.0 | 86 |
package cgta.oscala
package util
import scala.collection.mutable.ListBuffer
import scala.annotation.tailrec
import scala.util.control.NonFatal
//////////////////////////////////////////////////////////////
// Copyright (c) 2014 Ben Jackman
// All Rights Reserved
// please contact [email protected]
// for licensing inquiries
// Created by bjackman @ 6/17/14 11:24 AM
//////////////////////////////////////////////////////////////
object StackTracer {
def trace(e: Throwable): List[String] = {
val lb = new ListBuffer[Either[String, StackTraceElement]]
@tailrec
def loop(e: Throwable) {
val msg = try {e.getMessage} catch {case NonFatal(_) => "UNABLE TO DISPLAY EXCEPTION MESSAGE"}
val name = e.getClass.getName
lb += Left(name + ": " + msg)
lb ++= e.getStackTrace.map(Right(_))
val cause = e.getCause
if (cause != null) loop(cause)
}
loop(e)
trace(lb.toList)
}
def trace(t: Seq[Either[String, StackTraceElement]]): List[String] = {
val lb = new ListBuffer[String]
var first = true
t.foreach {
case Left(msg) =>
lb += (if (first) "Exception " else "Caused by: ") + msg
first = false
case Right(ste) =>
lb += " at " + ste.toString
}
lb.toList
}
} | cgta/open | oscala/shared/src/main/scala/cgta/oscala/util/StackTracer.scala | Scala | mit | 1,275 |
package algebra
package ring
import scala.{specialized => sp}
/**
* Rig consists of:
*
* - a commutative monoid for addition (+)
* - a monoid for multiplication (*)
*
* Alternately, a Rig can be thought of as a ring without
* multiplicative or additive inverses (or as a semiring with a
* multiplicative identity).
* Mnemonic: "Rig is a Ring without 'N'egation."
*/
trait Rig[@sp(Int, Long, Float, Double) A] extends Any with Semiring[A] with MultiplicativeMonoid[A]
object Rig extends AdditiveMonoidFunctions[Rig] with MultiplicativeMonoidFunctions[Rig] {
@inline final def apply[A](implicit ev: Rig[A]): Rig[A] = ev
}
| tixxit/algebra | core/src/main/scala/algebra/ring/Rig.scala | Scala | mit | 638 |
import scala.collection.mutable.WeakHashMap
import scala.collection.JavaConversions.*
class bar { }
class foo {
val map = WeakHashMap[AnyRef, collection.mutable.Map[bar, collection.mutable.Set[bar]]]()
def test={
val tmp:bar=null
if (map.get(tmp).isEmpty) map.put(tmp,collection.mutable.Set())
}
}
| dotty-staging/dotty | tests/untried/neg/t5580b.scala | Scala | apache-2.0 | 315 |
package com.sorrentocorp.akka.stream
import akka.util.ByteString
import scala.util._
/** Tries to match a ByteString literal. */
class Expect(val prefix: ByteString) {
require(!prefix.isEmpty)
val prefixLen = prefix.size
private var buffer: ByteString = ByteString.empty
def offer(input: ByteString): Unit = buffer ++= input
/** Before prefix is matched, returns None; after matching, returns rest of the input */
def poll: Option[Either[ByteString, ByteString]] =
if (buffer.size < prefixLen)
None
else if (buffer.take(prefixLen) == prefix)
Some(Right(buffer.drop(prefixLen)))
else
Some(Left(buffer))
}
object Expect {
def apply(str: String) = new Expect(ByteString(str))
}
| Kai-Chen/streaming-json-parser | src/main/scala/com/sorrentocorp/akka/stream/Expect.scala | Scala | mit | 724 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import cascading.flow.FlowDef
import cascading.pipe.Pipe
import java.util.{ Map => JMap, List => JList }
/**
* This is an enrichment-pattern class for cascading.flow.FlowDef.
* The rule is to never use this class directly in input or return types, but
* only to add methods to FlowDef.
*/
class RichFlowDef(val fd: FlowDef) {
// allow .asScala conversions
import collection.JavaConverters._
// RichPipe and RichFlowDef implicits
import Dsl._
def copy: FlowDef = {
val newFd = new FlowDef
newFd.mergeFrom(fd)
newFd
}
/**
* Merge state from FlowDef excluding Sources/Sinks/Tails (sometimes we don't want both)
*/
private[scalding] def mergeMiscFrom(o: FlowDef): Unit = {
// See the cascading code that this string is a "," separated set.
o.getTags.split(",").foreach(fd.addTag)
mergeLeft(fd.getTraps, o.getTraps)
mergeLeft(fd.getCheckpoints, o.getCheckpoints)
appendLeft(fd.getClassPath, o.getClassPath)
fd.setAssertionLevel(preferLeft(fd.getAssertionLevel, o.getAssertionLevel))
fd.setName(preferLeft(fd.getName, o.getName))
}
private[this] def preferLeft[T](left: T, right: T): T =
Option(left).getOrElse(right)
private[this] def mergeLeft[K, V](left: JMap[K, V], right: JMap[K, V]) {
right.asScala.foreach {
case (k, v) =>
if (!left.containsKey(k)) left.put(k, v)
}
}
private[this] def appendLeft[T](left: JList[T], right: JList[T]) {
val existing = left.asScala.toSet
right.asScala
.filterNot(existing)
.foreach(left.add)
}
/**
* Mutate current flow def to add all sources/sinks/etc from given FlowDef
*/
def mergeFrom(o: FlowDef): Unit = {
mergeLeft(fd.getSources, o.getSources)
mergeLeft(fd.getSinks, o.getSinks)
appendLeft(fd.getTails, o.getTails)
fd.mergeMiscFrom(o)
// Merge the FlowState
FlowStateMap.get(o)
.foreach { oFS =>
FlowStateMap.mutate(fd) { current =>
// overwrite the items from o with current
(FlowState(oFS.sourceMap ++ current.sourceMap), ())
}
}
}
/**
* find all heads reachable from the tails (as a set of names)
*/
def heads: Set[Pipe] = fd.getTails.asScala.flatMap(_.getHeads).toSet
/**
* New flow def with only sources upstream from tails.
*/
def withoutUnusedSources: FlowDef = {
// add taps associated with heads to localFlow
val filteredSources = fd.getSources.asScala.filterKeys(heads.map(p => p.getName)).asJava
val newFd = fd.copy
newFd.getSources.clear()
newFd.addSources(filteredSources)
newFd
}
/**
* FlowDef that only includes things upstream from the given Pipe
*/
def onlyUpstreamFrom(pipe: Pipe): FlowDef = {
val newFd = new FlowDef
// don't copy any sources/sinks
newFd.mergeMiscFrom(fd)
val sourceTaps = fd.getSources
val newSrcs = newFd.getSources
val upipes = pipe.upstreamPipes
val headNames: Set[String] = upipes
.filter(_.getPrevious.length == 0) // implies _ is a head
.map(_.getName)
.toSet
headNames
.foreach { head =>
// TODO: make sure we handle checkpoints correctly
if (!newSrcs.containsKey(head)) {
newFd.addSource(head, sourceTaps.get(head))
}
}
val sinks = fd.getSinks
if (sinks.containsKey(pipe.getName)) {
newFd.addTailSink(pipe, sinks.get(pipe.getName))
}
// Update the FlowState:
FlowStateMap.get(fd)
.foreach { thisFS =>
val subFlowState = thisFS.sourceMap
.foldLeft(Map[String, Source]()) {
case (newfs, kv @ (name, source)) =>
if (headNames(name)) newfs + kv
else newfs
}
FlowStateMap.mutate(newFd) { _ => (FlowState(subFlowState), ()) }
}
newFd
}
}
| oeddyo/scalding | scalding-core/src/main/scala/com/twitter/scalding/RichFlowDef.scala | Scala | apache-2.0 | 4,403 |
package net.sansa_stack.rdf.common.partition.core
import net.sansa_stack.rdf.common.partition.layout.{TripleLayout, TripleLayoutDouble, TripleLayoutLong, TripleLayoutString, TripleLayoutStringDate, TripleLayoutStringLang}
import org.apache.jena.datatypes.TypeMapper
import org.apache.jena.datatypes.xsd.XSDDatatype
import org.apache.jena.graph.{Node, Triple}
import org.apache.jena.vocabulary.{RDF, XSD}
object RdfPartitionerDefault
extends RdfPartitioner[RdfPartitionDefault] with Serializable {
def getUriOrBNodeString(node: Node): String = {
val termType = getRdfTermType(node)
termType match {
case 0 => node.getBlankNodeId.getLabelString
case 1 => node.getURI
case _ => throw new RuntimeException("Neither Uri nor blank node: " + node)
}
}
def getRdfTermType(node: Node): Byte = {
val result =
if (node.isURI()) 1.toByte else if (node.isLiteral()) 2.toByte else if (node.isBlank()) 0.toByte else {
throw new RuntimeException("Unknown RDF term type: " + node)
} // -1
result
}
def isPlainLiteralDatatype(dtypeIri: String): Boolean = {
val result = dtypeIri == null || dtypeIri == "" || dtypeIri == XSD.xstring.getURI || dtypeIri == RDF.langString.getURI
result
}
def isPlainLiteral(node: Node): Boolean = {
val result = node.isLiteral() && isPlainLiteralDatatype(node.getLiteralDatatypeURI) // NodeUtils.isSimpleString(node) || NodeUtils.isLangString(node))
result
}
def isTypedLiteral(node: Node): Boolean = {
val result = node.isLiteral() && !isPlainLiteral(node)
result
}
def fromTriple(t: Triple): RdfPartitionDefault = {
val s = t.getSubject
val o = t.getObject
val subjectType = getRdfTermType(s)
val objectType = getRdfTermType(o)
// val predicateType =
val predicate = t.getPredicate.getURI
// In the case of plain literals, we replace the datatype langString with string
// in order to group them all into the same partition
val datatype = if (o.isLiteral()) (if (isPlainLiteral(o)) XSD.xstring.getURI else o.getLiteralDatatypeURI) else ""
val langTagPresent = isPlainLiteral(o)
RdfPartitionDefault(subjectType, predicate, objectType, datatype, langTagPresent)
}
/**
* Lay a triple out based on the partition
* Does not (re-)check the matches condition
*/
def determineLayout(t: RdfPartitionDefault): TripleLayout = {
val oType = t.objectType
val layout = oType match {
case 0 => TripleLayoutString
case 1 => TripleLayoutString
case 2 => if (isPlainLiteralDatatype(t.datatype)) TripleLayoutStringLang else determineLayoutDatatype(t.datatype)
// if(!t.langTagPresent)
// TripleLayoutString else TripleLayoutStringLang
case _ => throw new RuntimeException("Unsupported object type: " + t)
}
layout
}
private val intDTypeURIs = Set(XSDDatatype.XSDnegativeInteger, XSDDatatype.XSDpositiveInteger,
XSDDatatype.XSDnonNegativeInteger, XSDDatatype.XSDnonPositiveInteger,
XSDDatatype.XSDinteger, XSDDatatype.XSDint)
.map(_.getURI)
def determineLayoutDatatype(dtypeIri: String): TripleLayout = {
val dti = if (dtypeIri == "http://www.w3.org/1999/02/22-rdf-syntax-ns#langString") {
XSD.xstring.getURI
} else dtypeIri
var v = TypeMapper.getInstance.getSafeTypeByName(dti).getJavaClass
// type mapper returns null for some integer types
if (v == null && intDTypeURIs.contains(dtypeIri)) v = classOf[Integer]
// val v = node.getLiteralValue
v match {
case w if (w == classOf[java.lang.Byte] || w == classOf[java.lang.Short] || w == classOf[java.lang.Integer] || w == classOf[java.lang.Long]) => TripleLayoutLong
case w if (w == classOf[java.lang.Float] || w == classOf[java.lang.Double]) => TripleLayoutDouble
case w if dtypeIri == XSD.date.getURI => TripleLayoutStringDate
// case w if(w == classOf[String]) => TripleLayoutString
case w => TripleLayoutString
// case _ => TripleLayoutStringDatatype
// case _ => throw new RuntimeException("Unsupported object type: " + dtypeIri)
}
}
}
| SANSA-Stack/Spark-RDF | sansa-rdf-common/src/main/scala/net/sansa_stack/rdf/common/partition/core/RdfPartitionerDefault.scala | Scala | gpl-3.0 | 4,179 |
package relationshipextractor
import org.easyrules.api._
import org.easyrules.core._
class MyListener extends RuleListener {
override def beforeExecute(rule: Rule) {
println("beforeExecute")
}
override def onSuccess(rule: Rule) {
println("onSuccess")
}
override def onFailure(rule: Rule, exception: Exception) {
println("onFailure")
}
}
object Launcher extends App {
val theSon = Person("the son", "the sentence")
val theFather = Person("the father", "the sentence")
val relation = Relation(theSon, "son", RelationDefinition.getRelations.get(0), theFather, "the sentence")
theSon.relations.add(relation)
theSon.relations.foreach(Graph.addRelation(_))
val inferFather: InferFather = new InferFather
inferFather.input(theSon)
val rulesEngine = RulesEngineBuilder.aNewRulesEngine().build()
rulesEngine.registerRule(inferFather)
rulesEngine.fireRules
} | ErikGartner/relationship-extractor | src/main/scala/relationshipextractor/RuleLauncher.scala | Scala | apache-2.0 | 901 |
package org.apache.spark.ml.mleap.feature
import ml.combust.mleap.core.feature.MultinomialLabelerModel
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.ml.Transformer
import org.apache.spark.ml.linalg.{Vector, VectorUDT}
import org.apache.spark.ml.mleap.param.{HasLabelsCol, HasProbabilitiesCol}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.param.shared.HasFeaturesCol
import org.apache.spark.ml.util.Identifiable
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions.{udf, col}
import ml.combust.mleap.core.util.VectorConverters._
/**
* Created by hollinwilkins on 1/18/17.
*/
class MultinomialLabeler(override val uid: String = Identifiable.randomUID("math_unary"),
val model: MultinomialLabelerModel) extends Transformer
with HasFeaturesCol
with HasProbabilitiesCol
with HasLabelsCol {
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
def setProbabilitiesCol(value: String): this.type = set(probabilitiesCol, value)
def setLabelsCol(value: String): this.type = set(labelsCol, value)
@org.apache.spark.annotation.Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
val probabilitiesUdf = udf {
(vector: Vector) => model.top(vector).map(_._1).toArray
}
val labelsUdf = udf {
(vector: Vector) => model.topLabels(vector).toArray
}
dataset.withColumn($(probabilitiesCol), probabilitiesUdf(col($(featuresCol)))).
withColumn($(labelsCol), labelsUdf(col($(featuresCol))))
}
override def copy(extra: ParamMap): Transformer =
copyValues(new MultinomialLabeler(uid, model), extra)
@DeveloperApi
override def transformSchema(schema: StructType): StructType = {
require(schema($(featuresCol)).dataType.isInstanceOf[VectorUDT],
s"Features column must be of type NumericType but got ${schema($(featuresCol)).dataType}")
val inputFields = schema.fields
require(!inputFields.exists(_.name == $(probabilitiesCol)),
s"Output column ${$(probabilitiesCol)} already exists.")
require(!inputFields.exists(_.name == $(labelsCol)),
s"Output column ${$(labelsCol)} already exists.")
StructType(schema.fields ++ Seq(StructField($(probabilitiesCol), ArrayType(DoubleType)),
StructField($(labelsCol), ArrayType(StringType))))
}
}
| combust/mleap | mleap-spark-extension/src/main/scala/org/apache/spark/ml/mleap/feature/MultinomialLabeler.scala | Scala | apache-2.0 | 2,414 |
package org.scurator
import java.util.concurrent.TimeUnit
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.imps.CuratorFrameworkState
import org.apache.zookeeper.KeeperException.NoNodeException
import org.apache.zookeeper.data.Stat
import org.scurator.components._
import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.concurrent.{ExecutionContext, Future, TimeoutException, blocking}
class SCuratorClient(val underlying: CuratorFramework) {
/**
* Start the client
*
* This should not be called if autoStart is set to true
*/
def start(): Unit = underlying.start()
/**
* Wait until the client connects
*
* @param maxWait the maximum amount of time to wait for a connection
* @return Future[Unit]: Success if connection has been established, Failure otherwise
*/
def connect(maxWait: Duration)(implicit executor: ExecutionContext): Future[Unit] = {
Future {
val maxWaitMillis = if (maxWait.isFinite() && maxWait >= Duration.Zero) maxWait.toMillis.toInt else -1 // -1 is indefinitely
val connected = blocking { underlying.blockUntilConnected(maxWaitMillis, TimeUnit.MILLISECONDS) }
if (!connected) throw new TimeoutException(s"Could not connect within the maximum duration of $maxWait")
}
}
/**
* Stop the client
*/
def close(): Unit = underlying.close()
/**
* Return the current namespace or "" if none
*
* @return namespace
*/
def getNamespace: String = underlying.getNamespace
/**
* Returns the state of this instance
*
* @return state
*/
def state: CuratorFrameworkState = underlying.getState
/**
* Returns a facade of the current instance that uses the specified namespace
* or no namespace if <code>newNamespace</code> is <code>null</code>.
*
* @param newNamespace the new namespace or null for none
* @return facade
*/
def usingNamespace(newNamespace: String): SCuratorClient = {
new SCuratorClient(underlying.usingNamespace(newNamespace))
}
/**
* Create a node with the given CreateRequest
*
* @param request the CreateRequest to use
* @param createParents specify if parent nodes should be created
* @return Future[CreateResponse]
*/
def create(request: CreateRequest, createParents: Boolean = false)(implicit executor: ExecutionContext): Future[CreateResponse] = {
Future {
val builder = underlying.create()
// This is done in one step due to Curator fluent api type issues. TODO: Create Curator Jira
// val compressedOpt = request.compressed match {
// case false => builder
// case true => builder.compressed
// }
// val parentOpt = createParents match {
// case false => compressedOpt
// case true => compressedOpt.creatingParentsIfNeeded
// }
val compressedAndParentOpt = (request.compressed, createParents) match {
case (false, false) => builder
case (true, false) => builder.compressed
case (false, true) => builder.creatingParentsIfNeeded
case (true, true) => builder.compressed.creatingParentsIfNeeded
}
val modeOpt = compressedAndParentOpt.withMode(request.mode)
val aclOpt = request.acl match {
case None => modeOpt
case Some(a) => modeOpt.withACL(a.asJava)
}
val path = request.data match {
case None => blocking { aclOpt.forPath(request.path, null) } // scalastyle:ignore
case Some(d) => blocking { aclOpt.forPath(request.path, d) }
}
CreateResponse(path)
}
}
/**
* Delete a node with the given DeleteRequest
*
* @param request the DeleteRequest to use
* @param deleteChildren specify if children nodes should be deleted
* @return Future[DeleteResponse]
*/
def delete(request: DeleteRequest, deleteChildren: Boolean = false)(implicit executor: ExecutionContext): Future[DeleteResponse] = {
Future {
val builder = underlying.delete()
val deleteChildrenOpt = deleteChildren match {
case false => builder
case true => builder.deletingChildrenIfNeeded
}
val versionOpt = request.version match {
case None => deleteChildrenOpt
case Some(v) => deleteChildrenOpt.withVersion(v)
}
blocking { versionOpt.forPath(request.path) }
DeleteResponse(request.path)
}
}
/**
* Check if a node exists with the given ExistsRequest
*
* @param request the ExistsRequest to use
* @return Future[ExistsResponse]
*/
def exists(request: ExistsRequest)(implicit executor: ExecutionContext): Future[ExistsResponse] = {
Future {
val builder = underlying.checkExists
val (watchOpt, watcher) = request.watch match {
case false => (builder, None)
case true =>
val w = Some(new Watcher())
(builder.usingWatcher(w.get), w)
}
val result = blocking { watchOpt.forPath(request.path) }
val statOpt = Option(result)
ExistsResponse(request.path, statOpt.isDefined, statOpt, watcher)
}
}
/**
* Get a node's ACL with the given GetACLRequest
*
* @param request the GetACLRequest to use
* @return Future[GetACLResponse]
*/
def getAcl(request: GetACLRequest)(implicit executor: ExecutionContext): Future[GetACLResponse] = {
Future {
val builder = underlying.getACL
val (statOpt, stat) = {
val s = new Stat()
(builder.storingStatIn(s), s)
}
val acl = blocking { statOpt.forPath(request.path) }
GetACLResponse(request.path, acl.asScala, stat)
}
}
/**
* Get a node's child nodes with the given GetChildrenRequest
*
* The list of children returned is not sorted and no guarantee is provided
* as to its natural or lexical order.
*
* @param request the GetChildrenRequest to use
* @return Future[GetChildrenResponse]
*/
def getChildren(request: GetChildrenRequest)(implicit executor: ExecutionContext): Future[GetChildrenResponse] = {
Future {
val builder = underlying.getChildren
val (statOpt, stat) = {
val s = new Stat()
(builder.storingStatIn(s), s)
}
val (watchOpt, watcher) = request.watch match {
case false => (statOpt, None)
case true =>
val w = Some(new Watcher())
(statOpt.usingWatcher(w.get), w)
}
val children = blocking { watchOpt.forPath(request.path) }
GetChildrenResponse(request.path, children.asScala, stat, watcher)
}
}
/**
* Get a node's data with the given GetDataRequest
*
* @param request the GetDataRequest to use
* @return Future[GetDataResponse]
*/
def getData[T](request: GetDataRequest)(implicit executor: ExecutionContext): Future[GetDataResponse] = {
Future {
val builder = underlying.getData
val decompressedOpt = request.decompressed match {
case false => builder
case true => builder.decompressed
}
val (statOpt, stat) = {
val s = new Stat()
(decompressedOpt.storingStatIn(s), s)
}
val (watchOpt, watcher) = request.watch match {
case false => (statOpt, None)
case true =>
val w = Some(new Watcher())
(statOpt.usingWatcher(w.get), w)
}
val data = blocking { watchOpt.forPath(request.path) }
GetDataResponse(request.path, Option(data), stat, watcher)
}
}
/**
* Set a node's ACL with the given SetACLRequest
*
* @param request the SetACLRequest to use
* @return Future[SetACLResponse]
*/
def setAcl(request: SetACLRequest)(implicit executor: ExecutionContext): Future[SetACLResponse] = {
Future {
val builder = underlying.setACL()
val versionOpt = request.version match {
case None => builder
case Some(v) => builder.withVersion(v)
}
val stat = blocking { versionOpt.withACL(request.acl.asJava).forPath(request.path) }
SetACLResponse(request.path, stat)
}
}
/**
* Set a node's data with the given SetDataRequest
*
* @param request the SetDataRequest to use
* @return Future[SetDataResponse]
*/
def setData(request: SetDataRequest)(implicit executor: ExecutionContext): Future[SetDataResponse] = {
Future {
val builder = underlying.setData()
val compressedOpt = request.compressed match {
case false => builder
case true => builder.compressed()
}
val versionOpt = request.version match {
case None => compressedOpt
case Some(v) => compressedOpt.withVersion(v)
}
val stat = request.data match {
case None => blocking { versionOpt.forPath(request.path, null) } // scalastyle:ignore
case Some(d) => blocking { versionOpt.forPath(request.path, d) }
}
SetDataResponse(request.path, stat)
}
}
/**
* Get a node's stat object with the given StatRequest
*
* @param request the StatRequest to use
* @return Future[StatResponse]
*/
def stat(request: StatRequest)(implicit executor: ExecutionContext): Future[StatResponse] = {
exists(ExistsRequest(request.path)).map { response =>
response.stat.map { s =>
StatResponse(request.path, stat = s)
}.getOrElse(throw new NoNodeException(request.path))
}
}
/**
* Sets a watch on a node
*
* @param request the WatchRequest to use
* @return Future[WatchResponse]
*/
def watch(request: WatchRequest)(implicit executor: ExecutionContext): Future[WatchResponse] = {
exists(ExistsRequest(request.path, watch = true)).map { response =>
response.watch.map { w =>
WatchResponse(request.path, watch = w)
}.getOrElse(throw new NoNodeException(request.path))
}
}
/**
* Flushes channel between process and leader
*
* @param request the SyncRequest to use
* @return Future[SyncResponse]
*/
def sync(request: SyncRequest)(implicit executor: ExecutionContext): Future[SyncResponse] = {
Future {
blocking { underlying.sync().forPath(request.path) }
SyncResponse(request.path)
}
}
/**
* Executes multiple ZooKeeper operations as a single TransactionRequest
*
* On success, a list of OpResponse is returned
* On failure, none of the ops are completed, and the Future is failed with the exception that caused the failure
*
* @param ops a Sequence of OpRequests
* @return `Future[Seq[OpResponse]]`
*/
def transaction(ops: Seq[OpRequest])(implicit executor: ExecutionContext): Future[Seq[OpResponse]] = {
Future {
val request = TransactionRequest(ops)
val transaction = Transactions.toCuratorTransaction(underlying.inTransaction(), request)
val results = blocking { transaction.commit() }
val response = Transactions.fromCuratorTransactionResults(results.asScala.toSeq)
response.ops
}
}
/**
* Clear internal references to watchers that may inhibit garbage collection.
* Call this method on watchers you are no longer interested in.
*
* @param watch the watcher reference to clear
*/
def clearWatch(watch: Watcher)(implicit executor: ExecutionContext): Unit = {
watch.close()
underlying.clearWatcherReferences(watch)
}
}
object SCuratorClient {
/**
* Creates a SCuratorClient by wrapping the passed CuratorFramework.
*
* If autoStart is true. The client will be started if it has not been started already.
*
* @param underlying the CuratorFramework to wrap
* @param autoStart true if client should be started automatically
* @return
*/
def apply(underlying: CuratorFramework, autoStart: Boolean = false): SCuratorClient = {
val client = new SCuratorClient(underlying)
if (client.state == CuratorFrameworkState.LATENT && autoStart) {
client.start()
}
client
}
object Implicits {
import scala.language.implicitConversions
implicit def curatorToSCurator(underlying: CuratorFramework): SCuratorClient = {
new SCuratorClient(underlying)
}
}
}
| granthenke/scurator | src/main/scala/org/scurator/SCuratorClient.scala | Scala | apache-2.0 | 12,074 |
package model.dtos
case class ConsultationsPerMonth (date:String, numberOfConsultations: Int, cons_ids:String)
| scify/DemocracIT-Web | app/model/dtos/ConsultationsPerMonth.scala | Scala | apache-2.0 | 112 |
package dit4c.scheduler.domain
import akka.actor._
object Cluster {
def props(clusterInfo: Option[ClusterInfo], defaultConfigProvider: ConfigProvider): Props =
Props(classOf[Cluster], clusterInfo, defaultConfigProvider)
trait Command extends BaseCommand
case object GetState extends Command
trait Response extends BaseResponse
trait GetStateResponse extends Response
case object Uninitialized extends GetStateResponse
case class Inactive(
id: String,
displayName: String) extends GetStateResponse
case class Active(
id: String,
displayName: String,
supportsSave: Boolean) extends GetStateResponse
}
class Cluster(
clusterInfo: Option[ClusterInfo],
configProvider: ConfigProvider)
extends Actor
with ActorLogging {
import Cluster._
lazy val clusterId = self.path.name
override def receive: Receive = clusterInfo match {
case None =>
{
case GetState => sender ! Uninitialized
}
case Some(info) if !info.active =>
{
case GetState =>
sender ! Inactive(
clusterId,
info.displayName)
}
case Some(info) =>
{
case GetState =>
sender ! Active(
clusterId,
info.displayName,
info.supportsSave)
case msg =>
clusterManager(info) forward msg
}
}
protected def clusterManager(clusterInfo: ClusterInfo) =
context.child(managerActorId) match {
case None =>
val manager: ActorRef =
context.actorOf(
managerProps,
managerActorId)
context.watch(manager)
manager
case Some(ref) => ref
}
private val managerActorId = "manager"
private val managerProps =
RktClusterManager.props(
clusterId, configProvider)(context.dispatcher)
} | dit4c/dit4c | dit4c-scheduler/src/main/scala/dit4c/scheduler/domain/Cluster.scala | Scala | mit | 1,862 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import std._
import xsbt.api.{Discovered,Discovery}
import inc.Analysis
import TaskExtra._
import Types._
import xsbti.api.Definition
import ConcurrentRestrictions.Tag
import org.scalatools.testing.{AnnotatedFingerprint, Fingerprint, Framework, SubclassFingerprint}
import collection.mutable
import java.io.File
sealed trait TestOption
object Tests
{
// (overall result, individual results)
type Output = (TestResult.Value, Map[String,TestResult.Value])
final case class Setup(setup: ClassLoader => Unit) extends TestOption
def Setup(setup: () => Unit) = new Setup(_ => setup())
final case class Cleanup(cleanup: ClassLoader => Unit) extends TestOption
def Cleanup(setup: () => Unit) = new Cleanup(_ => setup())
final case class Exclude(tests: Iterable[String]) extends TestOption
final case class Listeners(listeners: Iterable[TestReportListener]) extends TestOption
final case class Filter(filterTest: String => Boolean) extends TestOption
// args for all frameworks
def Argument(args: String*): Argument = Argument(None, args.toList)
// args for a particular test framework
def Argument(tf: TestFramework, args: String*): Argument = Argument(Some(tf), args.toList)
// None means apply to all, Some(tf) means apply to a particular framework only.
final case class Argument(framework: Option[TestFramework], args: List[String]) extends TestOption
final class Execution(val options: Seq[TestOption], val parallel: Boolean, val tags: Seq[(Tag, Int)])
def apply(frameworks: Map[TestFramework, Framework], testLoader: ClassLoader, discovered: Seq[TestDefinition], options: Seq[TestOption], parallel: Boolean, noTestsMessage: => String, log: Logger): Task[Output] =
apply(frameworks, testLoader, discovered, new Execution(options, parallel, Nil), noTestsMessage, log)
def apply(frameworks: Map[TestFramework, Framework], testLoader: ClassLoader, discovered: Seq[TestDefinition], config: Execution, noTestsMessage: => String, log: Logger): Task[Output] =
{
import mutable.{HashSet, ListBuffer, Map, Set}
val testFilters = new ListBuffer[String => Boolean]
val excludeTestsSet = new HashSet[String]
val setup, cleanup = new ListBuffer[ClassLoader => Unit]
val testListeners = new ListBuffer[TestReportListener]
val testArgsByFramework = Map[Framework, ListBuffer[String]]()
val undefinedFrameworks = new ListBuffer[String]
def frameworkArgs(framework: Framework, args: Seq[String]): Unit =
testArgsByFramework.getOrElseUpdate(framework, new ListBuffer[String]) ++= args
def frameworkArguments(framework: TestFramework, args: Seq[String]): Unit =
(frameworks get framework) match {
case Some(f) => frameworkArgs(f, args)
case None => undefinedFrameworks += framework.implClassName
}
for(option <- config.options)
{
option match
{
case Filter(include) => testFilters += include
case Exclude(exclude) => excludeTestsSet ++= exclude
case Listeners(listeners) => testListeners ++= listeners
case Setup(setupFunction) => setup += setupFunction
case Cleanup(cleanupFunction) => cleanup += cleanupFunction
/**
* There are two cases here.
* The first handles TestArguments in the project file, which
* might have a TestFramework specified.
* The second handles arguments to be applied to all test frameworks.
* -- arguments from the project file that didnt have a framework specified
* -- command line arguments (ex: test-only someClass -- someArg)
* (currently, command line args must be passed to all frameworks)
*/
case Argument(Some(framework), args) => frameworkArguments(framework, args)
case Argument(None, args) => frameworks.values.foreach { f => frameworkArgs(f, args) }
}
}
if(excludeTestsSet.size > 0)
log.debug(excludeTestsSet.mkString("Excluding tests: \n\t", "\n\t", ""))
if(undefinedFrameworks.size > 0)
log.warn("Arguments defined for test frameworks that are not present:\n\t" + undefinedFrameworks.mkString("\n\t"))
def includeTest(test: TestDefinition) = !excludeTestsSet.contains(test.name) && testFilters.forall(filter => filter(test.name))
val tests = discovered.filter(includeTest).toSet.toSeq
val arguments = testArgsByFramework.map { case (k,v) => (k, v.toList) } toMap;
testTask(frameworks.values.toSeq, testLoader, tests, noTestsMessage, setup.readOnly, cleanup.readOnly, log, testListeners.readOnly, arguments, config)
}
def testTask(frameworks: Seq[Framework], loader: ClassLoader, tests: Seq[TestDefinition], noTestsMessage: => String,
userSetup: Iterable[ClassLoader => Unit], userCleanup: Iterable[ClassLoader => Unit],
log: Logger, testListeners: Seq[TestReportListener], arguments: Map[Framework, Seq[String]], config: Execution): Task[Output] =
{
def fj(actions: Iterable[() => Unit]): Task[Unit] = nop.dependsOn( actions.toSeq.fork( _() ) : _*)
def partApp(actions: Iterable[ClassLoader => Unit]) = actions.toSeq map {a => () => a(loader) }
val (frameworkSetup, runnables, frameworkCleanup) =
TestFramework.testTasks(frameworks, loader, tests, noTestsMessage, log, testListeners, arguments)
val setupTasks = fj(partApp(userSetup) :+ frameworkSetup)
val mainTasks =
if(config.parallel)
makeParallel(runnables, setupTasks, config.tags).toSeq.join
else
makeSerial(runnables, setupTasks, config.tags)
val taggedMainTasks = mainTasks.tagw(config.tags : _*)
taggedMainTasks map processResults flatMap { results =>
val cleanupTasks = fj(partApp(userCleanup) :+ frameworkCleanup(results._1))
cleanupTasks map { _ => results }
}
}
type TestRunnable = (String, () => TestResult.Value)
def makeParallel(runnables: Iterable[TestRunnable], setupTasks: Task[Unit], tags: Seq[(Tag,Int)]) =
runnables map { case (name, test) => task { (name, test()) } dependsOn setupTasks named name tagw(tags : _*) }
def makeSerial(runnables: Iterable[TestRunnable], setupTasks: Task[Unit], tags: Seq[(Tag,Int)]) =
task { runnables map { case (name, test) => (name, test()) } } dependsOn(setupTasks)
def processResults(results: Iterable[(String, TestResult.Value)]): (TestResult.Value, Map[String, TestResult.Value]) =
(overall(results.map(_._2)), results.toMap)
def overall(results: Iterable[TestResult.Value]): TestResult.Value =
(TestResult.Passed /: results) { (acc, result) => if(acc.id < result.id) result else acc }
def discover(frameworks: Seq[Framework], analysis: Analysis, log: Logger): (Seq[TestDefinition], Set[String]) =
discover(frameworks flatMap TestFramework.getTests, allDefs(analysis), log)
def allDefs(analysis: Analysis) = analysis.apis.internal.values.flatMap(_.api.definitions).toSeq
def discover(fingerprints: Seq[Fingerprint], definitions: Seq[Definition], log: Logger): (Seq[TestDefinition], Set[String]) =
{
val subclasses = fingerprints collect { case sub: SubclassFingerprint => (sub.superClassName, sub.isModule, sub) };
val annotations = fingerprints collect { case ann: AnnotatedFingerprint => (ann.annotationName, ann.isModule, ann) };
log.debug("Subclass fingerprints: " + subclasses)
log.debug("Annotation fingerprints: " + annotations)
def firsts[A,B,C](s: Seq[(A,B,C)]): Set[A] = s.map(_._1).toSet
def defined(in: Seq[(String,Boolean,Fingerprint)], names: Set[String], IsModule: Boolean): Seq[Fingerprint] =
in collect { case (name, IsModule, print) if names(name) => print }
def toFingerprints(d: Discovered): Seq[Fingerprint] =
defined(subclasses, d.baseClasses, d.isModule) ++
defined(annotations, d.annotations, d.isModule)
val discovered = Discovery(firsts(subclasses), firsts(annotations))(definitions)
val tests = for( (df, di) <- discovered; fingerprint <- toFingerprints(di) ) yield new TestDefinition(df.name, fingerprint)
val mains = discovered collect { case (df, di) if di.hasMain => df.name }
(tests, mains.toSet)
}
def showResults(log: Logger, results: (TestResult.Value, Map[String, TestResult.Value])): Unit =
{
import TestResult.{Error, Failed, Passed}
def select(Tpe: TestResult.Value) = results._2 collect { case (name, Tpe) => name }
val failures = select(Failed)
val errors = select(Error)
val passed = select(Passed)
def show(label: String, level: Level.Value, tests: Iterable[String]): Unit =
if(!tests.isEmpty)
{
log.log(level, label)
log.log(level, tests.mkString("\t", "\n\t", ""))
}
show("Passed tests:", Level.Debug, passed )
show("Failed tests:", Level.Error, failures)
show("Error during tests:", Level.Error, errors)
if(!failures.isEmpty || !errors.isEmpty)
error("Tests unsuccessful")
}
} | kuochaoyi/xsbt | main/actions/Tests.scala | Scala | bsd-3-clause | 8,679 |
package scala.tools.nsc
package interactive
package tests.core
import scala.reflect.internal.util.Position
/** Set of core test definitions that are executed for each test run. */
private[tests] trait CoreTestDefs
extends PresentationCompilerRequestsWorkingMode {
import scala.tools.nsc.interactive.Global
/** Ask the presentation compiler for completion at all locations
* (in all sources) where the defined `marker` is found. */
class TypeCompletionAction(override val compiler: Global)
extends PresentationCompilerTestDef
with AskTypeCompletionAt {
override def runTest() {
askAllSources(TypeCompletionMarker) { pos =>
askTypeCompletionAt(pos)
} { (pos, members) =>
withResponseDelimiter {
reporter.println("[response] askTypeCompletion at " + format(pos))
// we skip getClass because it changed signature between 1.5 and 1.6, so there is no
// universal check file that we can provide for this to work
reporter.println("retrieved %d members".format(members.size))
compiler ask { () =>
val filtered = members.filterNot(member => (member.sym.name string_== "getClass") || member.sym.isConstructor)
reporter println (filtered.map(_.forceInfoString).sorted mkString "\\n")
}
}
}
}
}
/** Ask the presentation compiler for completion at all locations
* (in all sources) where the defined `marker` is found. */
class ScopeCompletionAction(override val compiler: Global)
extends PresentationCompilerTestDef
with AskScopeCompletionAt {
override def runTest() {
askAllSources(ScopeCompletionMarker) { pos =>
askScopeCompletionAt(pos)
} { (pos, members) =>
withResponseDelimiter {
reporter.println("[response] askScopeCompletion at " + format(pos))
try {
// exclude members not from source (don't have position), for more focused and self contained tests.
def eligible(sym: compiler.Symbol) = sym.pos != compiler.NoPosition
val filtered = members.filter(member => eligible(member.sym))
reporter.println("retrieved %d members".format(filtered.size))
compiler ask { () =>
reporter.println(filtered.map(_.forceInfoString).sorted mkString "\\n")
}
} catch {
case t: Throwable =>
t.printStackTrace()
}
}
}
}
}
/** Ask the presentation compiler for type info at all locations
* (in all sources) where the defined `marker` is found. */
class TypeAction(override val compiler: Global)
extends PresentationCompilerTestDef
with AskTypeAt {
override def runTest() {
askAllSources(TypeMarker) { pos =>
askTypeAt(pos)
} { (pos, tree) =>
withResponseDelimiter {
reporter.println("[response] askTypeAt " + format(pos))
compiler.ask(() => reporter.println(tree))
}
}
}
}
/** Ask the presentation compiler for hyperlink at all locations
* (in all sources) where the defined `marker` is found. */
class HyperlinkAction(override val compiler: Global)
extends PresentationCompilerTestDef
with AskTypeAt
with AskTypeCompletionAt {
override def runTest() {
askAllSources(HyperlinkMarker) { pos =>
askTypeAt(pos)(NullReporter)
} { (pos, tree) =>
if(tree.symbol == compiler.NoSymbol || tree.symbol == null) {
reporter.println("\\nNo symbol is associated with tree: "+tree)
}
else {
reporter.println("\\naskHyperlinkPos for `" + tree.symbol.name + "` at " + format(pos) + " " + pos.source.file.name)
val r = new Response[Position]
// `tree.symbol.sourceFile` was discovered to be null when testing using virtpatmat on the akka presentation test, where a position had shifted to point to `Int`
// askHyperlinkPos for `Int` at (73,19) pi.scala --> class Int in package scala has null sourceFile!
val treePath = if (tree.symbol.sourceFile ne null) tree.symbol.sourceFile.path else null
val treeName = if (tree.symbol.sourceFile ne null) tree.symbol.sourceFile.name else null
sourceFiles.find(_.path == treePath) match {
case Some(source) =>
compiler.askLinkPos(tree.symbol, source, r)
r.get match {
case Left(pos) =>
val resolvedPos = if (tree.symbol.pos.isDefined) tree.symbol.pos else pos
withResponseDelimiter {
reporter.println("[response] found askHyperlinkPos for `" + tree.symbol.name + "` at " + format(resolvedPos) + " " + tree.symbol.sourceFile.name)
}
case Right(ex) =>
ex.printStackTrace()
}
case None =>
reporter.println("[error] could not locate sourcefile `" + treeName + "`." +
"Hint: Does the looked up definition come form a binary?")
}
}
}
}
}
}
| felixmulder/scala | src/interactive/scala/tools/nsc/interactive/tests/core/CoreTestDefs.scala | Scala | bsd-3-clause | 5,109 |
package napplelabs.swish.example
import napplelabs.swish._
object Simple {
def main(args: Array[String]) = {
val sc = new ServerConfig(user = "zkim")
Swish.withServer(sc) {
conn =>
val commandResponse = conn.exec("ls -aul")
println("Exit Value: " + commandResponse.exitValue)
println("Output: " + commandResponse.output)
}
}
}
| zk/swish-scala | src/test/scala/napplelabs/swish/example/Simple.scala | Scala | mit | 412 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.