code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import scala.collection.JavaConverters._ import scala.reflect.runtime.universe.TypeTag import scala.util.Try import org.apache.spark.annotation.Stable import org.apache.spark.sql.api.java._ import org.apache.spark.sql.catalyst.ScalaReflection import org.apache.spark.sql.catalyst.analysis.{Star, UnresolvedFunction} import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.plans.logical.{BROADCAST, HintInfo, ResolvedHint} import org.apache.spark.sql.catalyst.util.{CharVarcharUtils, TimestampFormatter} import org.apache.spark.sql.execution.SparkSqlParser import org.apache.spark.sql.expressions.{Aggregator, SparkUserDefinedFunction, UserDefinedAggregator, UserDefinedFunction} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ import org.apache.spark.sql.types.DataType.parseTypeWithFallback import org.apache.spark.util.Utils /** * Commonly used functions available for DataFrame operations. Using functions defined here provides * a little bit more compile-time safety to make sure the function exists. * * Spark also includes more built-in functions that are less common and are not defined here. * You can still access them (and all the functions defined here) using the `functions.expr()` API * and calling them through a SQL expression string. You can find the entire list of functions * at SQL API documentation. * * As an example, `isnan` is a function that is defined here. You can use `isnan(col("myCol"))` * to invoke the `isnan` function. This way the programming language's compiler ensures `isnan` * exists and is of the proper form. You can also use `expr("isnan(myCol)")` function to invoke the * same function. In this case, Spark itself will ensure `isnan` exists when it analyzes the query. * * `regr_count` is an example of a function that is built-in but not defined here, because it is * less commonly used. To invoke it, use `expr("regr_count(yCol, xCol)")`. * * This function APIs usually have methods with `Column` signature only because it can support not * only `Column` but also other types such as a native string. The other variants currently exist * for historical reasons. * * @groupname udf_funcs UDF functions * @groupname agg_funcs Aggregate functions * @groupname datetime_funcs Date time functions * @groupname sort_funcs Sorting functions * @groupname normal_funcs Non-aggregate functions * @groupname math_funcs Math functions * @groupname misc_funcs Misc functions * @groupname window_funcs Window functions * @groupname string_funcs String functions * @groupname collection_funcs Collection functions * @groupname partition_transforms Partition transform functions * @groupname Ungrouped Support functions for DataFrames * @since 1.3.0 */ @Stable // scalastyle:off object functions { // scalastyle:on private def withExpr(expr: Expression): Column = Column(expr) private def withAggregateFunction( func: AggregateFunction, isDistinct: Boolean = false): Column = { Column(func.toAggregateExpression(isDistinct)) } /** * Returns a [[Column]] based on the given column name. * * @group normal_funcs * @since 1.3.0 */ def col(colName: String): Column = Column(colName) /** * Returns a [[Column]] based on the given column name. Alias of [[col]]. * * @group normal_funcs * @since 1.3.0 */ def column(colName: String): Column = Column(colName) /** * Creates a [[Column]] of literal value. * * The passed in object is returned directly if it is already a [[Column]]. * If the object is a Scala Symbol, it is converted into a [[Column]] also. * Otherwise, a new [[Column]] is created to represent the literal value. * * @group normal_funcs * @since 1.3.0 */ def lit(literal: Any): Column = typedLit(literal) /** * Creates a [[Column]] of literal value. * * An alias of `typedlit`, and it is encouraged to use `typedlit` directly. * * @group normal_funcs * @since 2.2.0 */ def typedLit[T : TypeTag](literal: T): Column = typedlit(literal) /** * Creates a [[Column]] of literal value. * * The passed in object is returned directly if it is already a [[Column]]. * If the object is a Scala Symbol, it is converted into a [[Column]] also. * Otherwise, a new [[Column]] is created to represent the literal value. * The difference between this function and [[lit]] is that this function * can handle parameterized scala types e.g.: List, Seq and Map. * * @group normal_funcs * @since 3.2.0 */ def typedlit[T : TypeTag](literal: T): Column = literal match { case c: Column => c case s: Symbol => new ColumnName(s.name) case _ => Column(Literal.create(literal)) } ////////////////////////////////////////////////////////////////////////////////////////////// // Sort functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Returns a sort expression based on ascending order of the column. * {{{ * df.sort(asc("dept"), desc("age")) * }}} * * @group sort_funcs * @since 1.3.0 */ def asc(columnName: String): Column = Column(columnName).asc /** * Returns a sort expression based on ascending order of the column, * and null values return before non-null values. * {{{ * df.sort(asc_nulls_first("dept"), desc("age")) * }}} * * @group sort_funcs * @since 2.1.0 */ def asc_nulls_first(columnName: String): Column = Column(columnName).asc_nulls_first /** * Returns a sort expression based on ascending order of the column, * and null values appear after non-null values. * {{{ * df.sort(asc_nulls_last("dept"), desc("age")) * }}} * * @group sort_funcs * @since 2.1.0 */ def asc_nulls_last(columnName: String): Column = Column(columnName).asc_nulls_last /** * Returns a sort expression based on the descending order of the column. * {{{ * df.sort(asc("dept"), desc("age")) * }}} * * @group sort_funcs * @since 1.3.0 */ def desc(columnName: String): Column = Column(columnName).desc /** * Returns a sort expression based on the descending order of the column, * and null values appear before non-null values. * {{{ * df.sort(asc("dept"), desc_nulls_first("age")) * }}} * * @group sort_funcs * @since 2.1.0 */ def desc_nulls_first(columnName: String): Column = Column(columnName).desc_nulls_first /** * Returns a sort expression based on the descending order of the column, * and null values appear after non-null values. * {{{ * df.sort(asc("dept"), desc_nulls_last("age")) * }}} * * @group sort_funcs * @since 2.1.0 */ def desc_nulls_last(columnName: String): Column = Column(columnName).desc_nulls_last ////////////////////////////////////////////////////////////////////////////////////////////// // Aggregate functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * @group agg_funcs * @since 1.3.0 */ @deprecated("Use approx_count_distinct", "2.1.0") def approxCountDistinct(e: Column): Column = approx_count_distinct(e) /** * @group agg_funcs * @since 1.3.0 */ @deprecated("Use approx_count_distinct", "2.1.0") def approxCountDistinct(columnName: String): Column = approx_count_distinct(columnName) /** * @group agg_funcs * @since 1.3.0 */ @deprecated("Use approx_count_distinct", "2.1.0") def approxCountDistinct(e: Column, rsd: Double): Column = approx_count_distinct(e, rsd) /** * @group agg_funcs * @since 1.3.0 */ @deprecated("Use approx_count_distinct", "2.1.0") def approxCountDistinct(columnName: String, rsd: Double): Column = { approx_count_distinct(Column(columnName), rsd) } /** * Aggregate function: returns the approximate number of distinct items in a group. * * @group agg_funcs * @since 2.1.0 */ def approx_count_distinct(e: Column): Column = withAggregateFunction { HyperLogLogPlusPlus(e.expr) } /** * Aggregate function: returns the approximate number of distinct items in a group. * * @group agg_funcs * @since 2.1.0 */ def approx_count_distinct(columnName: String): Column = approx_count_distinct(column(columnName)) /** * Aggregate function: returns the approximate number of distinct items in a group. * * @param rsd maximum relative standard deviation allowed (default = 0.05) * * @group agg_funcs * @since 2.1.0 */ def approx_count_distinct(e: Column, rsd: Double): Column = withAggregateFunction { HyperLogLogPlusPlus(e.expr, rsd, 0, 0) } /** * Aggregate function: returns the approximate number of distinct items in a group. * * @param rsd maximum relative standard deviation allowed (default = 0.05) * * @group agg_funcs * @since 2.1.0 */ def approx_count_distinct(columnName: String, rsd: Double): Column = { approx_count_distinct(Column(columnName), rsd) } /** * Aggregate function: returns the average of the values in a group. * * @group agg_funcs * @since 1.3.0 */ def avg(e: Column): Column = withAggregateFunction { Average(e.expr) } /** * Aggregate function: returns the average of the values in a group. * * @group agg_funcs * @since 1.3.0 */ def avg(columnName: String): Column = avg(Column(columnName)) /** * Aggregate function: returns a list of objects with duplicates. * * @note The function is non-deterministic because the order of collected results depends * on the order of the rows which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 1.6.0 */ def collect_list(e: Column): Column = withAggregateFunction { CollectList(e.expr) } /** * Aggregate function: returns a list of objects with duplicates. * * @note The function is non-deterministic because the order of collected results depends * on the order of the rows which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 1.6.0 */ def collect_list(columnName: String): Column = collect_list(Column(columnName)) /** * Aggregate function: returns a set of objects with duplicate elements eliminated. * * @note The function is non-deterministic because the order of collected results depends * on the order of the rows which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 1.6.0 */ def collect_set(e: Column): Column = withAggregateFunction { CollectSet(e.expr) } /** * Aggregate function: returns a set of objects with duplicate elements eliminated. * * @note The function is non-deterministic because the order of collected results depends * on the order of the rows which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 1.6.0 */ def collect_set(columnName: String): Column = collect_set(Column(columnName)) /** * Aggregate function: returns the Pearson Correlation Coefficient for two columns. * * @group agg_funcs * @since 1.6.0 */ def corr(column1: Column, column2: Column): Column = withAggregateFunction { Corr(column1.expr, column2.expr) } /** * Aggregate function: returns the Pearson Correlation Coefficient for two columns. * * @group agg_funcs * @since 1.6.0 */ def corr(columnName1: String, columnName2: String): Column = { corr(Column(columnName1), Column(columnName2)) } /** * Aggregate function: returns the number of items in a group. * * @group agg_funcs * @since 1.3.0 */ def count(e: Column): Column = withAggregateFunction { e.expr match { // Turn count(*) into count(1) case s: Star => Count(Literal(1)) case _ => Count(e.expr) } } /** * Aggregate function: returns the number of items in a group. * * @group agg_funcs * @since 1.3.0 */ def count(columnName: String): TypedColumn[Any, Long] = count(Column(columnName)).as(ExpressionEncoder[Long]()) /** * Aggregate function: returns the number of distinct items in a group. * * An alias of `count_distinct`, and it is encouraged to use `count_distinct` directly. * * @group agg_funcs * @since 1.3.0 */ @scala.annotation.varargs def countDistinct(expr: Column, exprs: Column*): Column = count_distinct(expr, exprs: _*) /** * Aggregate function: returns the number of distinct items in a group. * * An alias of `count_distinct`, and it is encouraged to use `count_distinct` directly. * * @group agg_funcs * @since 1.3.0 */ @scala.annotation.varargs def countDistinct(columnName: String, columnNames: String*): Column = count_distinct(Column(columnName), columnNames.map(Column.apply) : _*) /** * Aggregate function: returns the number of distinct items in a group. * * @group agg_funcs * @since 3.2.0 */ @scala.annotation.varargs def count_distinct(expr: Column, exprs: Column*): Column = // For usage like countDistinct("*"), we should let analyzer expand star and // resolve function. Column(UnresolvedFunction("count", (expr +: exprs).map(_.expr), isDistinct = true)) /** * Aggregate function: returns the population covariance for two columns. * * @group agg_funcs * @since 2.0.0 */ def covar_pop(column1: Column, column2: Column): Column = withAggregateFunction { CovPopulation(column1.expr, column2.expr) } /** * Aggregate function: returns the population covariance for two columns. * * @group agg_funcs * @since 2.0.0 */ def covar_pop(columnName1: String, columnName2: String): Column = { covar_pop(Column(columnName1), Column(columnName2)) } /** * Aggregate function: returns the sample covariance for two columns. * * @group agg_funcs * @since 2.0.0 */ def covar_samp(column1: Column, column2: Column): Column = withAggregateFunction { CovSample(column1.expr, column2.expr) } /** * Aggregate function: returns the sample covariance for two columns. * * @group agg_funcs * @since 2.0.0 */ def covar_samp(columnName1: String, columnName2: String): Column = { covar_samp(Column(columnName1), Column(columnName2)) } /** * Aggregate function: returns the first value in a group. * * The function by default returns the first values it sees. It will return the first non-null * value it sees when ignoreNulls is set to true. If all values are null, then null is returned. * * @note The function is non-deterministic because its results depends on the order of the rows * which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 2.0.0 */ def first(e: Column, ignoreNulls: Boolean): Column = withAggregateFunction { First(e.expr, ignoreNulls) } /** * Aggregate function: returns the first value of a column in a group. * * The function by default returns the first values it sees. It will return the first non-null * value it sees when ignoreNulls is set to true. If all values are null, then null is returned. * * @note The function is non-deterministic because its results depends on the order of the rows * which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 2.0.0 */ def first(columnName: String, ignoreNulls: Boolean): Column = { first(Column(columnName), ignoreNulls) } /** * Aggregate function: returns the first value in a group. * * The function by default returns the first values it sees. It will return the first non-null * value it sees when ignoreNulls is set to true. If all values are null, then null is returned. * * @note The function is non-deterministic because its results depends on the order of the rows * which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 1.3.0 */ def first(e: Column): Column = first(e, ignoreNulls = false) /** * Aggregate function: returns the first value of a column in a group. * * The function by default returns the first values it sees. It will return the first non-null * value it sees when ignoreNulls is set to true. If all values are null, then null is returned. * * @note The function is non-deterministic because its results depends on the order of the rows * which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 1.3.0 */ def first(columnName: String): Column = first(Column(columnName)) /** * Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated * or not, returns 1 for aggregated or 0 for not aggregated in the result set. * * @group agg_funcs * @since 2.0.0 */ def grouping(e: Column): Column = Column(Grouping(e.expr)) /** * Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated * or not, returns 1 for aggregated or 0 for not aggregated in the result set. * * @group agg_funcs * @since 2.0.0 */ def grouping(columnName: String): Column = grouping(Column(columnName)) /** * Aggregate function: returns the level of grouping, equals to * * {{{ * (grouping(c1) <<; (n-1)) + (grouping(c2) <<; (n-2)) + ... + grouping(cn) * }}} * * @note The list of columns should match with grouping columns exactly, or empty (means all the * grouping columns). * * @group agg_funcs * @since 2.0.0 */ def grouping_id(cols: Column*): Column = Column(GroupingID(cols.map(_.expr))) /** * Aggregate function: returns the level of grouping, equals to * * {{{ * (grouping(c1) <<; (n-1)) + (grouping(c2) <<; (n-2)) + ... + grouping(cn) * }}} * * @note The list of columns should match with grouping columns exactly. * * @group agg_funcs * @since 2.0.0 */ def grouping_id(colName: String, colNames: String*): Column = { grouping_id((Seq(colName) ++ colNames).map(n => Column(n)) : _*) } /** * Aggregate function: returns the kurtosis of the values in a group. * * @group agg_funcs * @since 1.6.0 */ def kurtosis(e: Column): Column = withAggregateFunction { Kurtosis(e.expr) } /** * Aggregate function: returns the kurtosis of the values in a group. * * @group agg_funcs * @since 1.6.0 */ def kurtosis(columnName: String): Column = kurtosis(Column(columnName)) /** * Aggregate function: returns the last value in a group. * * The function by default returns the last values it sees. It will return the last non-null * value it sees when ignoreNulls is set to true. If all values are null, then null is returned. * * @note The function is non-deterministic because its results depends on the order of the rows * which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 2.0.0 */ def last(e: Column, ignoreNulls: Boolean): Column = withAggregateFunction { new Last(e.expr, ignoreNulls) } /** * Aggregate function: returns the last value of the column in a group. * * The function by default returns the last values it sees. It will return the last non-null * value it sees when ignoreNulls is set to true. If all values are null, then null is returned. * * @note The function is non-deterministic because its results depends on the order of the rows * which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 2.0.0 */ def last(columnName: String, ignoreNulls: Boolean): Column = { last(Column(columnName), ignoreNulls) } /** * Aggregate function: returns the last value in a group. * * The function by default returns the last values it sees. It will return the last non-null * value it sees when ignoreNulls is set to true. If all values are null, then null is returned. * * @note The function is non-deterministic because its results depends on the order of the rows * which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 1.3.0 */ def last(e: Column): Column = last(e, ignoreNulls = false) /** * Aggregate function: returns the last value of the column in a group. * * The function by default returns the last values it sees. It will return the last non-null * value it sees when ignoreNulls is set to true. If all values are null, then null is returned. * * @note The function is non-deterministic because its results depends on the order of the rows * which may be non-deterministic after a shuffle. * * @group agg_funcs * @since 1.3.0 */ def last(columnName: String): Column = last(Column(columnName), ignoreNulls = false) /** * Aggregate function: returns the maximum value of the expression in a group. * * @group agg_funcs * @since 1.3.0 */ def max(e: Column): Column = withAggregateFunction { Max(e.expr) } /** * Aggregate function: returns the maximum value of the column in a group. * * @group agg_funcs * @since 1.3.0 */ def max(columnName: String): Column = max(Column(columnName)) /** * Aggregate function: returns the average of the values in a group. * Alias for avg. * * @group agg_funcs * @since 1.4.0 */ def mean(e: Column): Column = avg(e) /** * Aggregate function: returns the average of the values in a group. * Alias for avg. * * @group agg_funcs * @since 1.4.0 */ def mean(columnName: String): Column = avg(columnName) /** * Aggregate function: returns the minimum value of the expression in a group. * * @group agg_funcs * @since 1.3.0 */ def min(e: Column): Column = withAggregateFunction { Min(e.expr) } /** * Aggregate function: returns the minimum value of the column in a group. * * @group agg_funcs * @since 1.3.0 */ def min(columnName: String): Column = min(Column(columnName)) /** * Aggregate function: returns the approximate `percentile` of the numeric column `col` which * is the smallest value in the ordered `col` values (sorted from least to greatest) such that * no more than `percentage` of `col` values is less than the value or equal to that value. * * If percentage is an array, each value must be between 0.0 and 1.0. * If it is a single floating point value, it must be between 0.0 and 1.0. * * The accuracy parameter is a positive numeric literal * which controls approximation accuracy at the cost of memory. * Higher value of accuracy yields better accuracy, 1.0/accuracy * is the relative error of the approximation. * * @group agg_funcs * @since 3.1.0 */ def percentile_approx(e: Column, percentage: Column, accuracy: Column): Column = { withAggregateFunction { new ApproximatePercentile( e.expr, percentage.expr, accuracy.expr ) } } /** * Aggregate function: returns the product of all numerical elements in a group. * * @group agg_funcs * @since 3.2.0 */ def product(e: Column): Column = withAggregateFunction { new Product(e.expr) } /** * Aggregate function: returns the skewness of the values in a group. * * @group agg_funcs * @since 1.6.0 */ def skewness(e: Column): Column = withAggregateFunction { Skewness(e.expr) } /** * Aggregate function: returns the skewness of the values in a group. * * @group agg_funcs * @since 1.6.0 */ def skewness(columnName: String): Column = skewness(Column(columnName)) /** * Aggregate function: alias for `stddev_samp`. * * @group agg_funcs * @since 1.6.0 */ def stddev(e: Column): Column = withAggregateFunction { StddevSamp(e.expr) } /** * Aggregate function: alias for `stddev_samp`. * * @group agg_funcs * @since 1.6.0 */ def stddev(columnName: String): Column = stddev(Column(columnName)) /** * Aggregate function: returns the sample standard deviation of * the expression in a group. * * @group agg_funcs * @since 1.6.0 */ def stddev_samp(e: Column): Column = withAggregateFunction { StddevSamp(e.expr) } /** * Aggregate function: returns the sample standard deviation of * the expression in a group. * * @group agg_funcs * @since 1.6.0 */ def stddev_samp(columnName: String): Column = stddev_samp(Column(columnName)) /** * Aggregate function: returns the population standard deviation of * the expression in a group. * * @group agg_funcs * @since 1.6.0 */ def stddev_pop(e: Column): Column = withAggregateFunction { StddevPop(e.expr) } /** * Aggregate function: returns the population standard deviation of * the expression in a group. * * @group agg_funcs * @since 1.6.0 */ def stddev_pop(columnName: String): Column = stddev_pop(Column(columnName)) /** * Aggregate function: returns the sum of all values in the expression. * * @group agg_funcs * @since 1.3.0 */ def sum(e: Column): Column = withAggregateFunction { Sum(e.expr) } /** * Aggregate function: returns the sum of all values in the given column. * * @group agg_funcs * @since 1.3.0 */ def sum(columnName: String): Column = sum(Column(columnName)) /** * Aggregate function: returns the sum of distinct values in the expression. * * @group agg_funcs * @since 1.3.0 */ @deprecated("Use sum_distinct", "3.2.0") def sumDistinct(e: Column): Column = withAggregateFunction(Sum(e.expr), isDistinct = true) /** * Aggregate function: returns the sum of distinct values in the expression. * * @group agg_funcs * @since 1.3.0 */ @deprecated("Use sum_distinct", "3.2.0") def sumDistinct(columnName: String): Column = sum_distinct(Column(columnName)) /** * Aggregate function: returns the sum of distinct values in the expression. * * @group agg_funcs * @since 3.2.0 */ def sum_distinct(e: Column): Column = withAggregateFunction(Sum(e.expr), isDistinct = true) /** * Aggregate function: alias for `var_samp`. * * @group agg_funcs * @since 1.6.0 */ def variance(e: Column): Column = withAggregateFunction { VarianceSamp(e.expr) } /** * Aggregate function: alias for `var_samp`. * * @group agg_funcs * @since 1.6.0 */ def variance(columnName: String): Column = variance(Column(columnName)) /** * Aggregate function: returns the unbiased variance of the values in a group. * * @group agg_funcs * @since 1.6.0 */ def var_samp(e: Column): Column = withAggregateFunction { VarianceSamp(e.expr) } /** * Aggregate function: returns the unbiased variance of the values in a group. * * @group agg_funcs * @since 1.6.0 */ def var_samp(columnName: String): Column = var_samp(Column(columnName)) /** * Aggregate function: returns the population variance of the values in a group. * * @group agg_funcs * @since 1.6.0 */ def var_pop(e: Column): Column = withAggregateFunction { VariancePop(e.expr) } /** * Aggregate function: returns the population variance of the values in a group. * * @group agg_funcs * @since 1.6.0 */ def var_pop(columnName: String): Column = var_pop(Column(columnName)) ////////////////////////////////////////////////////////////////////////////////////////////// // Window functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Window function: returns the cumulative distribution of values within a window partition, * i.e. the fraction of rows that are below the current row. * * {{{ * N = total number of rows in the partition * cumeDist(x) = number of values before (and including) x / N * }}} * * @group window_funcs * @since 1.6.0 */ def cume_dist(): Column = withExpr { new CumeDist } /** * Window function: returns the rank of rows within a window partition, without any gaps. * * The difference between rank and dense_rank is that denseRank leaves no gaps in ranking * sequence when there are ties. That is, if you were ranking a competition using dense_rank * and had three people tie for second place, you would say that all three were in second * place and that the next person came in third. Rank would give me sequential numbers, making * the person that came in third place (after the ties) would register as coming in fifth. * * This is equivalent to the DENSE_RANK function in SQL. * * @group window_funcs * @since 1.6.0 */ def dense_rank(): Column = withExpr { new DenseRank } /** * Window function: returns the value that is `offset` rows before the current row, and * `null` if there is less than `offset` rows before the current row. For example, * an `offset` of one will return the previous row at any given point in the window partition. * * This is equivalent to the LAG function in SQL. * * @group window_funcs * @since 1.4.0 */ def lag(e: Column, offset: Int): Column = lag(e, offset, null) /** * Window function: returns the value that is `offset` rows before the current row, and * `null` if there is less than `offset` rows before the current row. For example, * an `offset` of one will return the previous row at any given point in the window partition. * * This is equivalent to the LAG function in SQL. * * @group window_funcs * @since 1.4.0 */ def lag(columnName: String, offset: Int): Column = lag(columnName, offset, null) /** * Window function: returns the value that is `offset` rows before the current row, and * `defaultValue` if there is less than `offset` rows before the current row. For example, * an `offset` of one will return the previous row at any given point in the window partition. * * This is equivalent to the LAG function in SQL. * * @group window_funcs * @since 1.4.0 */ def lag(columnName: String, offset: Int, defaultValue: Any): Column = { lag(Column(columnName), offset, defaultValue) } /** * Window function: returns the value that is `offset` rows before the current row, and * `defaultValue` if there is less than `offset` rows before the current row. For example, * an `offset` of one will return the previous row at any given point in the window partition. * * This is equivalent to the LAG function in SQL. * * @group window_funcs * @since 1.4.0 */ def lag(e: Column, offset: Int, defaultValue: Any): Column = { lag(e, offset, defaultValue, false) } /** * Window function: returns the value that is `offset` rows before the current row, and * `defaultValue` if there is less than `offset` rows before the current row. `ignoreNulls` * determines whether null values of row are included in or eliminated from the calculation. * For example, an `offset` of one will return the previous row at any given point in the * window partition. * * This is equivalent to the LAG function in SQL. * * @group window_funcs * @since 3.2.0 */ def lag(e: Column, offset: Int, defaultValue: Any, ignoreNulls: Boolean): Column = withExpr { Lag(e.expr, Literal(offset), Literal(defaultValue), ignoreNulls) } /** * Window function: returns the value that is `offset` rows after the current row, and * `null` if there is less than `offset` rows after the current row. For example, * an `offset` of one will return the next row at any given point in the window partition. * * This is equivalent to the LEAD function in SQL. * * @group window_funcs * @since 1.4.0 */ def lead(columnName: String, offset: Int): Column = { lead(columnName, offset, null) } /** * Window function: returns the value that is `offset` rows after the current row, and * `null` if there is less than `offset` rows after the current row. For example, * an `offset` of one will return the next row at any given point in the window partition. * * This is equivalent to the LEAD function in SQL. * * @group window_funcs * @since 1.4.0 */ def lead(e: Column, offset: Int): Column = { lead(e, offset, null) } /** * Window function: returns the value that is `offset` rows after the current row, and * `defaultValue` if there is less than `offset` rows after the current row. For example, * an `offset` of one will return the next row at any given point in the window partition. * * This is equivalent to the LEAD function in SQL. * * @group window_funcs * @since 1.4.0 */ def lead(columnName: String, offset: Int, defaultValue: Any): Column = { lead(Column(columnName), offset, defaultValue) } /** * Window function: returns the value that is `offset` rows after the current row, and * `defaultValue` if there is less than `offset` rows after the current row. For example, * an `offset` of one will return the next row at any given point in the window partition. * * This is equivalent to the LEAD function in SQL. * * @group window_funcs * @since 1.4.0 */ def lead(e: Column, offset: Int, defaultValue: Any): Column = { lead(e, offset, defaultValue, false) } /** * Window function: returns the value that is `offset` rows after the current row, and * `defaultValue` if there is less than `offset` rows after the current row. `ignoreNulls` * determines whether null values of row are included in or eliminated from the calculation. * The default value of `ignoreNulls` is false. For example, an `offset` of one will return * the next row at any given point in the window partition. * * This is equivalent to the LEAD function in SQL. * * @group window_funcs * @since 3.2.0 */ def lead(e: Column, offset: Int, defaultValue: Any, ignoreNulls: Boolean): Column = withExpr { Lead(e.expr, Literal(offset), Literal(defaultValue), ignoreNulls) } /** * Window function: returns the value that is the `offset`th row of the window frame * (counting from 1), and `null` if the size of window frame is less than `offset` rows. * * It will return the `offset`th non-null value it sees when ignoreNulls is set to true. * If all values are null, then null is returned. * * This is equivalent to the nth_value function in SQL. * * @group window_funcs * @since 3.1.0 */ def nth_value(e: Column, offset: Int, ignoreNulls: Boolean): Column = withExpr { NthValue(e.expr, Literal(offset), ignoreNulls) } /** * Window function: returns the value that is the `offset`th row of the window frame * (counting from 1), and `null` if the size of window frame is less than `offset` rows. * * This is equivalent to the nth_value function in SQL. * * @group window_funcs * @since 3.1.0 */ def nth_value(e: Column, offset: Int): Column = withExpr { NthValue(e.expr, Literal(offset), false) } /** * Window function: returns the ntile group id (from 1 to `n` inclusive) in an ordered window * partition. For example, if `n` is 4, the first quarter of the rows will get value 1, the second * quarter will get 2, the third quarter will get 3, and the last quarter will get 4. * * This is equivalent to the NTILE function in SQL. * * @group window_funcs * @since 1.4.0 */ def ntile(n: Int): Column = withExpr { new NTile(Literal(n)) } /** * Window function: returns the relative rank (i.e. percentile) of rows within a window partition. * * This is computed by: * {{{ * (rank of row in its partition - 1) / (number of rows in the partition - 1) * }}} * * This is equivalent to the PERCENT_RANK function in SQL. * * @group window_funcs * @since 1.6.0 */ def percent_rank(): Column = withExpr { new PercentRank } /** * Window function: returns the rank of rows within a window partition. * * The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking * sequence when there are ties. That is, if you were ranking a competition using dense_rank * and had three people tie for second place, you would say that all three were in second * place and that the next person came in third. Rank would give me sequential numbers, making * the person that came in third place (after the ties) would register as coming in fifth. * * This is equivalent to the RANK function in SQL. * * @group window_funcs * @since 1.4.0 */ def rank(): Column = withExpr { new Rank } /** * Window function: returns a sequential number starting at 1 within a window partition. * * @group window_funcs * @since 1.6.0 */ def row_number(): Column = withExpr { RowNumber() } ////////////////////////////////////////////////////////////////////////////////////////////// // Non-aggregate functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Creates a new array column. The input columns must all have the same data type. * * @group normal_funcs * @since 1.4.0 */ @scala.annotation.varargs def array(cols: Column*): Column = withExpr { CreateArray(cols.map(_.expr)) } /** * Creates a new array column. The input columns must all have the same data type. * * @group normal_funcs * @since 1.4.0 */ @scala.annotation.varargs def array(colName: String, colNames: String*): Column = { array((colName +: colNames).map(col) : _*) } /** * Creates a new map column. The input columns must be grouped as key-value pairs, e.g. * (key1, value1, key2, value2, ...). The key columns must all have the same data type, and can't * be null. The value columns must all have the same data type. * * @group normal_funcs * @since 2.0 */ @scala.annotation.varargs def map(cols: Column*): Column = withExpr { CreateMap(cols.map(_.expr)) } /** * Creates a new map column. The array in the first column is used for keys. The array in the * second column is used for values. All elements in the array for key should not be null. * * @group normal_funcs * @since 2.4 */ def map_from_arrays(keys: Column, values: Column): Column = withExpr { MapFromArrays(keys.expr, values.expr) } /** * Marks a DataFrame as small enough for use in broadcast joins. * * The following example marks the right DataFrame for broadcast hash join using `joinKey`. * {{{ * // left and right are DataFrames * left.join(broadcast(right), "joinKey") * }}} * * @group normal_funcs * @since 1.5.0 */ def broadcast[T](df: Dataset[T]): Dataset[T] = { Dataset[T](df.sparkSession, ResolvedHint(df.logicalPlan, HintInfo(strategy = Some(BROADCAST))))(df.exprEnc) } /** * Returns the first column that is not null, or null if all inputs are null. * * For example, `coalesce(a, b, c)` will return a if a is not null, * or b if a is null and b is not null, or c if both a and b are null but c is not null. * * @group normal_funcs * @since 1.3.0 */ @scala.annotation.varargs def coalesce(e: Column*): Column = withExpr { Coalesce(e.map(_.expr)) } /** * Creates a string column for the file name of the current Spark task. * * @group normal_funcs * @since 1.6.0 */ def input_file_name(): Column = withExpr { InputFileName() } /** * Return true iff the column is NaN. * * @group normal_funcs * @since 1.6.0 */ def isnan(e: Column): Column = withExpr { IsNaN(e.expr) } /** * Return true iff the column is null. * * @group normal_funcs * @since 1.6.0 */ def isnull(e: Column): Column = withExpr { IsNull(e.expr) } /** * A column expression that generates monotonically increasing 64-bit integers. * * The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive. * The current implementation puts the partition ID in the upper 31 bits, and the record number * within each partition in the lower 33 bits. The assumption is that the data frame has * less than 1 billion partitions, and each partition has less than 8 billion records. * * As an example, consider a `DataFrame` with two partitions, each with 3 records. * This expression would return the following IDs: * * {{{ * 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594. * }}} * * @group normal_funcs * @since 1.4.0 */ @deprecated("Use monotonically_increasing_id()", "2.0.0") def monotonicallyIncreasingId(): Column = monotonically_increasing_id() /** * A column expression that generates monotonically increasing 64-bit integers. * * The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive. * The current implementation puts the partition ID in the upper 31 bits, and the record number * within each partition in the lower 33 bits. The assumption is that the data frame has * less than 1 billion partitions, and each partition has less than 8 billion records. * * As an example, consider a `DataFrame` with two partitions, each with 3 records. * This expression would return the following IDs: * * {{{ * 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594. * }}} * * @group normal_funcs * @since 1.6.0 */ def monotonically_increasing_id(): Column = withExpr { MonotonicallyIncreasingID() } /** * Returns col1 if it is not NaN, or col2 if col1 is NaN. * * Both inputs should be floating point columns (DoubleType or FloatType). * * @group normal_funcs * @since 1.5.0 */ def nanvl(col1: Column, col2: Column): Column = withExpr { NaNvl(col1.expr, col2.expr) } /** * Unary minus, i.e. negate the expression. * {{{ * // Select the amount column and negates all values. * // Scala: * df.select( -df("amount") ) * * // Java: * df.select( negate(df.col("amount")) ); * }}} * * @group normal_funcs * @since 1.3.0 */ def negate(e: Column): Column = -e /** * Inversion of boolean expression, i.e. NOT. * {{{ * // Scala: select rows that are not active (isActive === false) * df.filter( !df("isActive") ) * * // Java: * df.filter( not(df.col("isActive")) ); * }}} * * @group normal_funcs * @since 1.3.0 */ def not(e: Column): Column = !e /** * Generate a random column with independent and identically distributed (i.i.d.) samples * uniformly distributed in [0.0, 1.0). * * @note The function is non-deterministic in general case. * * @group normal_funcs * @since 1.4.0 */ def rand(seed: Long): Column = withExpr { Rand(seed) } /** * Generate a random column with independent and identically distributed (i.i.d.) samples * uniformly distributed in [0.0, 1.0). * * @note The function is non-deterministic in general case. * * @group normal_funcs * @since 1.4.0 */ def rand(): Column = rand(Utils.random.nextLong) /** * Generate a column with independent and identically distributed (i.i.d.) samples from * the standard normal distribution. * * @note The function is non-deterministic in general case. * * @group normal_funcs * @since 1.4.0 */ def randn(seed: Long): Column = withExpr { Randn(seed) } /** * Generate a column with independent and identically distributed (i.i.d.) samples from * the standard normal distribution. * * @note The function is non-deterministic in general case. * * @group normal_funcs * @since 1.4.0 */ def randn(): Column = randn(Utils.random.nextLong) /** * Partition ID. * * @note This is non-deterministic because it depends on data partitioning and task scheduling. * * @group normal_funcs * @since 1.6.0 */ def spark_partition_id(): Column = withExpr { SparkPartitionID() } /** * Computes the square root of the specified float value. * * @group math_funcs * @since 1.3.0 */ def sqrt(e: Column): Column = withExpr { Sqrt(e.expr) } /** * Computes the square root of the specified float value. * * @group math_funcs * @since 1.5.0 */ def sqrt(colName: String): Column = sqrt(Column(colName)) /** * Creates a new struct column. * If the input column is a column in a `DataFrame`, or a derived column expression * that is named (i.e. aliased), its name would be retained as the StructField's name, * otherwise, the newly generated StructField's name would be auto generated as * `col` with a suffix `index + 1`, i.e. col1, col2, col3, ... * * @group normal_funcs * @since 1.4.0 */ @scala.annotation.varargs def struct(cols: Column*): Column = withExpr { CreateStruct.create(cols.map(_.expr)) } /** * Creates a new struct column that composes multiple input columns. * * @group normal_funcs * @since 1.4.0 */ @scala.annotation.varargs def struct(colName: String, colNames: String*): Column = { struct((colName +: colNames).map(col) : _*) } /** * Evaluates a list of conditions and returns one of multiple possible result expressions. * If otherwise is not defined at the end, null is returned for unmatched conditions. * * {{{ * // Example: encoding gender string column into integer. * * // Scala: * people.select(when(people("gender") === "male", 0) * .when(people("gender") === "female", 1) * .otherwise(2)) * * // Java: * people.select(when(col("gender").equalTo("male"), 0) * .when(col("gender").equalTo("female"), 1) * .otherwise(2)) * }}} * * @group normal_funcs * @since 1.4.0 */ def when(condition: Column, value: Any): Column = withExpr { CaseWhen(Seq((condition.expr, lit(value).expr))) } /** * Computes bitwise NOT (~) of a number. * * @group normal_funcs * @since 1.4.0 */ @deprecated("Use bitwise_not", "3.2.0") def bitwiseNOT(e: Column): Column = bitwise_not(e) /** * Computes bitwise NOT (~) of a number. * * @group normal_funcs * @since 3.2.0 */ def bitwise_not(e: Column): Column = withExpr { BitwiseNot(e.expr) } /** * Parses the expression string into the column that it represents, similar to * [[Dataset#selectExpr]]. * {{{ * // get the number of words of each length * df.groupBy(expr("length(word)")).count() * }}} * * @group normal_funcs */ def expr(expr: String): Column = { val parser = SparkSession.getActiveSession.map(_.sessionState.sqlParser).getOrElse { new SparkSqlParser() } Column(parser.parseExpression(expr)) } ////////////////////////////////////////////////////////////////////////////////////////////// // Math Functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Computes the absolute value of a numeric value. * * @group math_funcs * @since 1.3.0 */ def abs(e: Column): Column = withExpr { Abs(e.expr) } /** * @return inverse cosine of `e` in radians, as if computed by `java.lang.Math.acos` * * @group math_funcs * @since 1.4.0 */ def acos(e: Column): Column = withExpr { Acos(e.expr) } /** * @return inverse cosine of `columnName`, as if computed by `java.lang.Math.acos` * * @group math_funcs * @since 1.4.0 */ def acos(columnName: String): Column = acos(Column(columnName)) /** * @return inverse hyperbolic cosine of `e` * * @group math_funcs * @since 3.1.0 */ def acosh(e: Column): Column = withExpr { Acosh(e.expr) } /** * @return inverse hyperbolic cosine of `columnName` * * @group math_funcs * @since 3.1.0 */ def acosh(columnName: String): Column = acosh(Column(columnName)) /** * @return inverse sine of `e` in radians, as if computed by `java.lang.Math.asin` * * @group math_funcs * @since 1.4.0 */ def asin(e: Column): Column = withExpr { Asin(e.expr) } /** * @return inverse sine of `columnName`, as if computed by `java.lang.Math.asin` * * @group math_funcs * @since 1.4.0 */ def asin(columnName: String): Column = asin(Column(columnName)) /** * @return inverse hyperbolic sine of `e` * * @group math_funcs * @since 3.1.0 */ def asinh(e: Column): Column = withExpr { Asinh(e.expr) } /** * @return inverse hyperbolic sine of `columnName` * * @group math_funcs * @since 3.1.0 */ def asinh(columnName: String): Column = asinh(Column(columnName)) /** * @return inverse tangent of `e` as if computed by `java.lang.Math.atan` * * @group math_funcs * @since 1.4.0 */ def atan(e: Column): Column = withExpr { Atan(e.expr) } /** * @return inverse tangent of `columnName`, as if computed by `java.lang.Math.atan` * * @group math_funcs * @since 1.4.0 */ def atan(columnName: String): Column = atan(Column(columnName)) /** * @param y coordinate on y-axis * @param x coordinate on x-axis * @return the <i>theta</i> component of the point * (<i>r</i>, <i>theta</i>) * in polar coordinates that corresponds to the point * (<i>x</i>, <i>y</i>) in Cartesian coordinates, * as if computed by `java.lang.Math.atan2` * * @group math_funcs * @since 1.4.0 */ def atan2(y: Column, x: Column): Column = withExpr { Atan2(y.expr, x.expr) } /** * @param y coordinate on y-axis * @param xName coordinate on x-axis * @return the <i>theta</i> component of the point * (<i>r</i>, <i>theta</i>) * in polar coordinates that corresponds to the point * (<i>x</i>, <i>y</i>) in Cartesian coordinates, * as if computed by `java.lang.Math.atan2` * * @group math_funcs * @since 1.4.0 */ def atan2(y: Column, xName: String): Column = atan2(y, Column(xName)) /** * @param yName coordinate on y-axis * @param x coordinate on x-axis * @return the <i>theta</i> component of the point * (<i>r</i>, <i>theta</i>) * in polar coordinates that corresponds to the point * (<i>x</i>, <i>y</i>) in Cartesian coordinates, * as if computed by `java.lang.Math.atan2` * * @group math_funcs * @since 1.4.0 */ def atan2(yName: String, x: Column): Column = atan2(Column(yName), x) /** * @param yName coordinate on y-axis * @param xName coordinate on x-axis * @return the <i>theta</i> component of the point * (<i>r</i>, <i>theta</i>) * in polar coordinates that corresponds to the point * (<i>x</i>, <i>y</i>) in Cartesian coordinates, * as if computed by `java.lang.Math.atan2` * * @group math_funcs * @since 1.4.0 */ def atan2(yName: String, xName: String): Column = atan2(Column(yName), Column(xName)) /** * @param y coordinate on y-axis * @param xValue coordinate on x-axis * @return the <i>theta</i> component of the point * (<i>r</i>, <i>theta</i>) * in polar coordinates that corresponds to the point * (<i>x</i>, <i>y</i>) in Cartesian coordinates, * as if computed by `java.lang.Math.atan2` * * @group math_funcs * @since 1.4.0 */ def atan2(y: Column, xValue: Double): Column = atan2(y, lit(xValue)) /** * @param yName coordinate on y-axis * @param xValue coordinate on x-axis * @return the <i>theta</i> component of the point * (<i>r</i>, <i>theta</i>) * in polar coordinates that corresponds to the point * (<i>x</i>, <i>y</i>) in Cartesian coordinates, * as if computed by `java.lang.Math.atan2` * * @group math_funcs * @since 1.4.0 */ def atan2(yName: String, xValue: Double): Column = atan2(Column(yName), xValue) /** * @param yValue coordinate on y-axis * @param x coordinate on x-axis * @return the <i>theta</i> component of the point * (<i>r</i>, <i>theta</i>) * in polar coordinates that corresponds to the point * (<i>x</i>, <i>y</i>) in Cartesian coordinates, * as if computed by `java.lang.Math.atan2` * * @group math_funcs * @since 1.4.0 */ def atan2(yValue: Double, x: Column): Column = atan2(lit(yValue), x) /** * @param yValue coordinate on y-axis * @param xName coordinate on x-axis * @return the <i>theta</i> component of the point * (<i>r</i>, <i>theta</i>) * in polar coordinates that corresponds to the point * (<i>x</i>, <i>y</i>) in Cartesian coordinates, * as if computed by `java.lang.Math.atan2` * * @group math_funcs * @since 1.4.0 */ def atan2(yValue: Double, xName: String): Column = atan2(yValue, Column(xName)) /** * @return inverse hyperbolic tangent of `e` * * @group math_funcs * @since 3.1.0 */ def atanh(e: Column): Column = withExpr { Atanh(e.expr) } /** * @return inverse hyperbolic tangent of `columnName` * * @group math_funcs * @since 3.1.0 */ def atanh(columnName: String): Column = atanh(Column(columnName)) /** * An expression that returns the string representation of the binary value of the given long * column. For example, bin("12") returns "1100". * * @group math_funcs * @since 1.5.0 */ def bin(e: Column): Column = withExpr { Bin(e.expr) } /** * An expression that returns the string representation of the binary value of the given long * column. For example, bin("12") returns "1100". * * @group math_funcs * @since 1.5.0 */ def bin(columnName: String): Column = bin(Column(columnName)) /** * Computes the cube-root of the given value. * * @group math_funcs * @since 1.4.0 */ def cbrt(e: Column): Column = withExpr { Cbrt(e.expr) } /** * Computes the cube-root of the given column. * * @group math_funcs * @since 1.4.0 */ def cbrt(columnName: String): Column = cbrt(Column(columnName)) /** * Computes the ceiling of the given value. * * @group math_funcs * @since 1.4.0 */ def ceil(e: Column): Column = withExpr { Ceil(e.expr) } /** * Computes the ceiling of the given column. * * @group math_funcs * @since 1.4.0 */ def ceil(columnName: String): Column = ceil(Column(columnName)) /** * Convert a number in a string column from one base to another. * * @group math_funcs * @since 1.5.0 */ def conv(num: Column, fromBase: Int, toBase: Int): Column = withExpr { Conv(num.expr, lit(fromBase).expr, lit(toBase).expr) } /** * @param e angle in radians * @return cosine of the angle, as if computed by `java.lang.Math.cos` * * @group math_funcs * @since 1.4.0 */ def cos(e: Column): Column = withExpr { Cos(e.expr) } /** * @param columnName angle in radians * @return cosine of the angle, as if computed by `java.lang.Math.cos` * * @group math_funcs * @since 1.4.0 */ def cos(columnName: String): Column = cos(Column(columnName)) /** * @param e hyperbolic angle * @return hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh` * * @group math_funcs * @since 1.4.0 */ def cosh(e: Column): Column = withExpr { Cosh(e.expr) } /** * @param columnName hyperbolic angle * @return hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh` * * @group math_funcs * @since 1.4.0 */ def cosh(columnName: String): Column = cosh(Column(columnName)) /** * Computes the exponential of the given value. * * @group math_funcs * @since 1.4.0 */ def exp(e: Column): Column = withExpr { Exp(e.expr) } /** * Computes the exponential of the given column. * * @group math_funcs * @since 1.4.0 */ def exp(columnName: String): Column = exp(Column(columnName)) /** * Computes the exponential of the given value minus one. * * @group math_funcs * @since 1.4.0 */ def expm1(e: Column): Column = withExpr { Expm1(e.expr) } /** * Computes the exponential of the given column minus one. * * @group math_funcs * @since 1.4.0 */ def expm1(columnName: String): Column = expm1(Column(columnName)) /** * Computes the factorial of the given value. * * @group math_funcs * @since 1.5.0 */ def factorial(e: Column): Column = withExpr { Factorial(e.expr) } /** * Computes the floor of the given value. * * @group math_funcs * @since 1.4.0 */ def floor(e: Column): Column = withExpr { Floor(e.expr) } /** * Computes the floor of the given column. * * @group math_funcs * @since 1.4.0 */ def floor(columnName: String): Column = floor(Column(columnName)) /** * Returns the greatest value of the list of values, skipping null values. * This function takes at least 2 parameters. It will return null iff all parameters are null. * * @group normal_funcs * @since 1.5.0 */ @scala.annotation.varargs def greatest(exprs: Column*): Column = withExpr { Greatest(exprs.map(_.expr)) } /** * Returns the greatest value of the list of column names, skipping null values. * This function takes at least 2 parameters. It will return null iff all parameters are null. * * @group normal_funcs * @since 1.5.0 */ @scala.annotation.varargs def greatest(columnName: String, columnNames: String*): Column = { greatest((columnName +: columnNames).map(Column.apply): _*) } /** * Computes hex value of the given column. * * @group math_funcs * @since 1.5.0 */ def hex(column: Column): Column = withExpr { Hex(column.expr) } /** * Inverse of hex. Interprets each pair of characters as a hexadecimal number * and converts to the byte representation of number. * * @group math_funcs * @since 1.5.0 */ def unhex(column: Column): Column = withExpr { Unhex(column.expr) } /** * Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow. * * @group math_funcs * @since 1.4.0 */ def hypot(l: Column, r: Column): Column = withExpr { Hypot(l.expr, r.expr) } /** * Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow. * * @group math_funcs * @since 1.4.0 */ def hypot(l: Column, rightName: String): Column = hypot(l, Column(rightName)) /** * Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow. * * @group math_funcs * @since 1.4.0 */ def hypot(leftName: String, r: Column): Column = hypot(Column(leftName), r) /** * Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow. * * @group math_funcs * @since 1.4.0 */ def hypot(leftName: String, rightName: String): Column = hypot(Column(leftName), Column(rightName)) /** * Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow. * * @group math_funcs * @since 1.4.0 */ def hypot(l: Column, r: Double): Column = hypot(l, lit(r)) /** * Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow. * * @group math_funcs * @since 1.4.0 */ def hypot(leftName: String, r: Double): Column = hypot(Column(leftName), r) /** * Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow. * * @group math_funcs * @since 1.4.0 */ def hypot(l: Double, r: Column): Column = hypot(lit(l), r) /** * Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow. * * @group math_funcs * @since 1.4.0 */ def hypot(l: Double, rightName: String): Column = hypot(l, Column(rightName)) /** * Returns the least value of the list of values, skipping null values. * This function takes at least 2 parameters. It will return null iff all parameters are null. * * @group normal_funcs * @since 1.5.0 */ @scala.annotation.varargs def least(exprs: Column*): Column = withExpr { Least(exprs.map(_.expr)) } /** * Returns the least value of the list of column names, skipping null values. * This function takes at least 2 parameters. It will return null iff all parameters are null. * * @group normal_funcs * @since 1.5.0 */ @scala.annotation.varargs def least(columnName: String, columnNames: String*): Column = { least((columnName +: columnNames).map(Column.apply): _*) } /** * Computes the natural logarithm of the given value. * * @group math_funcs * @since 1.4.0 */ def log(e: Column): Column = withExpr { Log(e.expr) } /** * Computes the natural logarithm of the given column. * * @group math_funcs * @since 1.4.0 */ def log(columnName: String): Column = log(Column(columnName)) /** * Returns the first argument-base logarithm of the second argument. * * @group math_funcs * @since 1.4.0 */ def log(base: Double, a: Column): Column = withExpr { Logarithm(lit(base).expr, a.expr) } /** * Returns the first argument-base logarithm of the second argument. * * @group math_funcs * @since 1.4.0 */ def log(base: Double, columnName: String): Column = log(base, Column(columnName)) /** * Computes the logarithm of the given value in base 10. * * @group math_funcs * @since 1.4.0 */ def log10(e: Column): Column = withExpr { Log10(e.expr) } /** * Computes the logarithm of the given value in base 10. * * @group math_funcs * @since 1.4.0 */ def log10(columnName: String): Column = log10(Column(columnName)) /** * Computes the natural logarithm of the given value plus one. * * @group math_funcs * @since 1.4.0 */ def log1p(e: Column): Column = withExpr { Log1p(e.expr) } /** * Computes the natural logarithm of the given column plus one. * * @group math_funcs * @since 1.4.0 */ def log1p(columnName: String): Column = log1p(Column(columnName)) /** * Computes the logarithm of the given column in base 2. * * @group math_funcs * @since 1.5.0 */ def log2(expr: Column): Column = withExpr { Log2(expr.expr) } /** * Computes the logarithm of the given value in base 2. * * @group math_funcs * @since 1.5.0 */ def log2(columnName: String): Column = log2(Column(columnName)) /** * Returns the value of the first argument raised to the power of the second argument. * * @group math_funcs * @since 1.4.0 */ def pow(l: Column, r: Column): Column = withExpr { Pow(l.expr, r.expr) } /** * Returns the value of the first argument raised to the power of the second argument. * * @group math_funcs * @since 1.4.0 */ def pow(l: Column, rightName: String): Column = pow(l, Column(rightName)) /** * Returns the value of the first argument raised to the power of the second argument. * * @group math_funcs * @since 1.4.0 */ def pow(leftName: String, r: Column): Column = pow(Column(leftName), r) /** * Returns the value of the first argument raised to the power of the second argument. * * @group math_funcs * @since 1.4.0 */ def pow(leftName: String, rightName: String): Column = pow(Column(leftName), Column(rightName)) /** * Returns the value of the first argument raised to the power of the second argument. * * @group math_funcs * @since 1.4.0 */ def pow(l: Column, r: Double): Column = pow(l, lit(r)) /** * Returns the value of the first argument raised to the power of the second argument. * * @group math_funcs * @since 1.4.0 */ def pow(leftName: String, r: Double): Column = pow(Column(leftName), r) /** * Returns the value of the first argument raised to the power of the second argument. * * @group math_funcs * @since 1.4.0 */ def pow(l: Double, r: Column): Column = pow(lit(l), r) /** * Returns the value of the first argument raised to the power of the second argument. * * @group math_funcs * @since 1.4.0 */ def pow(l: Double, rightName: String): Column = pow(l, Column(rightName)) /** * Returns the positive value of dividend mod divisor. * * @group math_funcs * @since 1.5.0 */ def pmod(dividend: Column, divisor: Column): Column = withExpr { Pmod(dividend.expr, divisor.expr) } /** * Returns the double value that is closest in value to the argument and * is equal to a mathematical integer. * * @group math_funcs * @since 1.4.0 */ def rint(e: Column): Column = withExpr { Rint(e.expr) } /** * Returns the double value that is closest in value to the argument and * is equal to a mathematical integer. * * @group math_funcs * @since 1.4.0 */ def rint(columnName: String): Column = rint(Column(columnName)) /** * Returns the value of the column `e` rounded to 0 decimal places with HALF_UP round mode. * * @group math_funcs * @since 1.5.0 */ def round(e: Column): Column = round(e, 0) /** * Round the value of `e` to `scale` decimal places with HALF_UP round mode * if `scale` is greater than or equal to 0 or at integral part when `scale` is less than 0. * * @group math_funcs * @since 1.5.0 */ def round(e: Column, scale: Int): Column = withExpr { Round(e.expr, Literal(scale)) } /** * Returns the value of the column `e` rounded to 0 decimal places with HALF_EVEN round mode. * * @group math_funcs * @since 2.0.0 */ def bround(e: Column): Column = bround(e, 0) /** * Round the value of `e` to `scale` decimal places with HALF_EVEN round mode * if `scale` is greater than or equal to 0 or at integral part when `scale` is less than 0. * * @group math_funcs * @since 2.0.0 */ def bround(e: Column, scale: Int): Column = withExpr { BRound(e.expr, Literal(scale)) } /** * Shift the given value numBits left. If the given value is a long value, this function * will return a long value else it will return an integer value. * * @group math_funcs * @since 1.5.0 */ @deprecated("Use shiftleft", "3.2.0") def shiftLeft(e: Column, numBits: Int): Column = shiftleft(e, numBits) /** * Shift the given value numBits left. If the given value is a long value, this function * will return a long value else it will return an integer value. * * @group math_funcs * @since 3.2.0 */ def shiftleft(e: Column, numBits: Int): Column = withExpr { ShiftLeft(e.expr, lit(numBits).expr) } /** * (Signed) shift the given value numBits right. If the given value is a long value, it will * return a long value else it will return an integer value. * * @group math_funcs * @since 1.5.0 */ @deprecated("Use shiftright", "3.2.0") def shiftRight(e: Column, numBits: Int): Column = shiftright(e, numBits) /** * (Signed) shift the given value numBits right. If the given value is a long value, it will * return a long value else it will return an integer value. * * @group math_funcs * @since 3.2.0 */ def shiftright(e: Column, numBits: Int): Column = withExpr { ShiftRight(e.expr, lit(numBits).expr) } /** * Unsigned shift the given value numBits right. If the given value is a long value, * it will return a long value else it will return an integer value. * * @group math_funcs * @since 1.5.0 */ @deprecated("Use shiftrightunsigned", "3.2.0") def shiftRightUnsigned(e: Column, numBits: Int): Column = shiftrightunsigned(e, numBits) /** * Unsigned shift the given value numBits right. If the given value is a long value, * it will return a long value else it will return an integer value. * * @group math_funcs * @since 3.2.0 */ def shiftrightunsigned(e: Column, numBits: Int): Column = withExpr { ShiftRightUnsigned(e.expr, lit(numBits).expr) } /** * Computes the signum of the given value. * * @group math_funcs * @since 1.4.0 */ def signum(e: Column): Column = withExpr { Signum(e.expr) } /** * Computes the signum of the given column. * * @group math_funcs * @since 1.4.0 */ def signum(columnName: String): Column = signum(Column(columnName)) /** * @param e angle in radians * @return sine of the angle, as if computed by `java.lang.Math.sin` * * @group math_funcs * @since 1.4.0 */ def sin(e: Column): Column = withExpr { Sin(e.expr) } /** * @param columnName angle in radians * @return sine of the angle, as if computed by `java.lang.Math.sin` * * @group math_funcs * @since 1.4.0 */ def sin(columnName: String): Column = sin(Column(columnName)) /** * @param e hyperbolic angle * @return hyperbolic sine of the given value, as if computed by `java.lang.Math.sinh` * * @group math_funcs * @since 1.4.0 */ def sinh(e: Column): Column = withExpr { Sinh(e.expr) } /** * @param columnName hyperbolic angle * @return hyperbolic sine of the given value, as if computed by `java.lang.Math.sinh` * * @group math_funcs * @since 1.4.0 */ def sinh(columnName: String): Column = sinh(Column(columnName)) /** * @param e angle in radians * @return tangent of the given value, as if computed by `java.lang.Math.tan` * * @group math_funcs * @since 1.4.0 */ def tan(e: Column): Column = withExpr { Tan(e.expr) } /** * @param columnName angle in radians * @return tangent of the given value, as if computed by `java.lang.Math.tan` * * @group math_funcs * @since 1.4.0 */ def tan(columnName: String): Column = tan(Column(columnName)) /** * @param e hyperbolic angle * @return hyperbolic tangent of the given value, as if computed by `java.lang.Math.tanh` * * @group math_funcs * @since 1.4.0 */ def tanh(e: Column): Column = withExpr { Tanh(e.expr) } /** * @param columnName hyperbolic angle * @return hyperbolic tangent of the given value, as if computed by `java.lang.Math.tanh` * * @group math_funcs * @since 1.4.0 */ def tanh(columnName: String): Column = tanh(Column(columnName)) /** * @group math_funcs * @since 1.4.0 */ @deprecated("Use degrees", "2.1.0") def toDegrees(e: Column): Column = degrees(e) /** * @group math_funcs * @since 1.4.0 */ @deprecated("Use degrees", "2.1.0") def toDegrees(columnName: String): Column = degrees(Column(columnName)) /** * Converts an angle measured in radians to an approximately equivalent angle measured in degrees. * * @param e angle in radians * @return angle in degrees, as if computed by `java.lang.Math.toDegrees` * * @group math_funcs * @since 2.1.0 */ def degrees(e: Column): Column = withExpr { ToDegrees(e.expr) } /** * Converts an angle measured in radians to an approximately equivalent angle measured in degrees. * * @param columnName angle in radians * @return angle in degrees, as if computed by `java.lang.Math.toDegrees` * * @group math_funcs * @since 2.1.0 */ def degrees(columnName: String): Column = degrees(Column(columnName)) /** * @group math_funcs * @since 1.4.0 */ @deprecated("Use radians", "2.1.0") def toRadians(e: Column): Column = radians(e) /** * @group math_funcs * @since 1.4.0 */ @deprecated("Use radians", "2.1.0") def toRadians(columnName: String): Column = radians(Column(columnName)) /** * Converts an angle measured in degrees to an approximately equivalent angle measured in radians. * * @param e angle in degrees * @return angle in radians, as if computed by `java.lang.Math.toRadians` * * @group math_funcs * @since 2.1.0 */ def radians(e: Column): Column = withExpr { ToRadians(e.expr) } /** * Converts an angle measured in degrees to an approximately equivalent angle measured in radians. * * @param columnName angle in degrees * @return angle in radians, as if computed by `java.lang.Math.toRadians` * * @group math_funcs * @since 2.1.0 */ def radians(columnName: String): Column = radians(Column(columnName)) ////////////////////////////////////////////////////////////////////////////////////////////// // Misc functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Calculates the MD5 digest of a binary column and returns the value * as a 32 character hex string. * * @group misc_funcs * @since 1.5.0 */ def md5(e: Column): Column = withExpr { Md5(e.expr) } /** * Calculates the SHA-1 digest of a binary column and returns the value * as a 40 character hex string. * * @group misc_funcs * @since 1.5.0 */ def sha1(e: Column): Column = withExpr { Sha1(e.expr) } /** * Calculates the SHA-2 family of hash functions of a binary column and * returns the value as a hex string. * * @param e column to compute SHA-2 on. * @param numBits one of 224, 256, 384, or 512. * * @group misc_funcs * @since 1.5.0 */ def sha2(e: Column, numBits: Int): Column = { require(Seq(0, 224, 256, 384, 512).contains(numBits), s"numBits $numBits is not in the permitted values (0, 224, 256, 384, 512)") withExpr { Sha2(e.expr, lit(numBits).expr) } } /** * Calculates the cyclic redundancy check value (CRC32) of a binary column and * returns the value as a bigint. * * @group misc_funcs * @since 1.5.0 */ def crc32(e: Column): Column = withExpr { Crc32(e.expr) } /** * Calculates the hash code of given columns, and returns the result as an int column. * * @group misc_funcs * @since 2.0.0 */ @scala.annotation.varargs def hash(cols: Column*): Column = withExpr { new Murmur3Hash(cols.map(_.expr)) } /** * Calculates the hash code of given columns using the 64-bit * variant of the xxHash algorithm, and returns the result as a long * column. * * @group misc_funcs * @since 3.0.0 */ @scala.annotation.varargs def xxhash64(cols: Column*): Column = withExpr { new XxHash64(cols.map(_.expr)) } /** * Returns null if the condition is true, and throws an exception otherwise. * * @group misc_funcs * @since 3.1.0 */ def assert_true(c: Column): Column = withExpr { new AssertTrue(c.expr) } /** * Returns null if the condition is true; throws an exception with the error message otherwise. * * @group misc_funcs * @since 3.1.0 */ def assert_true(c: Column, e: Column): Column = withExpr { new AssertTrue(c.expr, e.expr) } /** * Throws an exception with the provided error message. * * @group misc_funcs * @since 3.1.0 */ def raise_error(c: Column): Column = withExpr { RaiseError(c.expr) } ////////////////////////////////////////////////////////////////////////////////////////////// // String functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Computes the numeric value of the first character of the string column, and returns the * result as an int column. * * @group string_funcs * @since 1.5.0 */ def ascii(e: Column): Column = withExpr { Ascii(e.expr) } /** * Computes the BASE64 encoding of a binary column and returns it as a string column. * This is the reverse of unbase64. * * @group string_funcs * @since 1.5.0 */ def base64(e: Column): Column = withExpr { Base64(e.expr) } /** * Concatenates multiple input string columns together into a single string column, * using the given separator. * * @group string_funcs * @since 1.5.0 */ @scala.annotation.varargs def concat_ws(sep: String, exprs: Column*): Column = withExpr { ConcatWs(Literal.create(sep, StringType) +: exprs.map(_.expr)) } /** * Computes the first argument into a string from a binary using the provided character set * (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16'). * If either argument is null, the result will also be null. * * @group string_funcs * @since 1.5.0 */ def decode(value: Column, charset: String): Column = withExpr { StringDecode(value.expr, lit(charset).expr) } /** * Computes the first argument into a binary from a string using the provided character set * (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16'). * If either argument is null, the result will also be null. * * @group string_funcs * @since 1.5.0 */ def encode(value: Column, charset: String): Column = withExpr { Encode(value.expr, lit(charset).expr) } /** * Formats numeric column x to a format like '#,###,###.##', rounded to d decimal places * with HALF_EVEN round mode, and returns the result as a string column. * * If d is 0, the result has no decimal point or fractional part. * If d is less than 0, the result will be null. * * @group string_funcs * @since 1.5.0 */ def format_number(x: Column, d: Int): Column = withExpr { FormatNumber(x.expr, lit(d).expr) } /** * Formats the arguments in printf-style and returns the result as a string column. * * @group string_funcs * @since 1.5.0 */ @scala.annotation.varargs def format_string(format: String, arguments: Column*): Column = withExpr { FormatString((lit(format) +: arguments).map(_.expr): _*) } /** * Returns a new string column by converting the first letter of each word to uppercase. * Words are delimited by whitespace. * * For example, "hello world" will become "Hello World". * * @group string_funcs * @since 1.5.0 */ def initcap(e: Column): Column = withExpr { InitCap(e.expr) } /** * Locate the position of the first occurrence of substr column in the given string. * Returns null if either of the arguments are null. * * @note The position is not zero based, but 1 based index. Returns 0 if substr * could not be found in str. * * @group string_funcs * @since 1.5.0 */ def instr(str: Column, substring: String): Column = withExpr { StringInstr(str.expr, lit(substring).expr) } /** * Computes the character length of a given string or number of bytes of a binary string. * The length of character strings include the trailing spaces. The length of binary strings * includes binary zeros. * * @group string_funcs * @since 1.5.0 */ def length(e: Column): Column = withExpr { Length(e.expr) } /** * Converts a string column to lower case. * * @group string_funcs * @since 1.3.0 */ def lower(e: Column): Column = withExpr { Lower(e.expr) } /** * Computes the Levenshtein distance of the two given string columns. * @group string_funcs * @since 1.5.0 */ def levenshtein(l: Column, r: Column): Column = withExpr { Levenshtein(l.expr, r.expr) } /** * Locate the position of the first occurrence of substr. * * @note The position is not zero based, but 1 based index. Returns 0 if substr * could not be found in str. * * @group string_funcs * @since 1.5.0 */ def locate(substr: String, str: Column): Column = withExpr { new StringLocate(lit(substr).expr, str.expr) } /** * Locate the position of the first occurrence of substr in a string column, after position pos. * * @note The position is not zero based, but 1 based index. returns 0 if substr * could not be found in str. * * @group string_funcs * @since 1.5.0 */ def locate(substr: String, str: Column, pos: Int): Column = withExpr { StringLocate(lit(substr).expr, str.expr, lit(pos).expr) } /** * Left-pad the string column with pad to a length of len. If the string column is longer * than len, the return value is shortened to len characters. * * @group string_funcs * @since 1.5.0 */ def lpad(str: Column, len: Int, pad: String): Column = withExpr { StringLPad(str.expr, lit(len).expr, lit(pad).expr) } /** * Trim the spaces from left end for the specified string value. * * @group string_funcs * @since 1.5.0 */ def ltrim(e: Column): Column = withExpr {StringTrimLeft(e.expr) } /** * Trim the specified character string from left end for the specified string column. * @group string_funcs * @since 2.3.0 */ def ltrim(e: Column, trimString: String): Column = withExpr { StringTrimLeft(e.expr, Literal(trimString)) } /** * Extract a specific group matched by a Java regex, from the specified string column. * If the regex did not match, or the specified group did not match, an empty string is returned. * if the specified group index exceeds the group count of regex, an IllegalArgumentException * will be thrown. * * @group string_funcs * @since 1.5.0 */ def regexp_extract(e: Column, exp: String, groupIdx: Int): Column = withExpr { RegExpExtract(e.expr, lit(exp).expr, lit(groupIdx).expr) } /** * Replace all substrings of the specified string value that match regexp with rep. * * @group string_funcs * @since 1.5.0 */ def regexp_replace(e: Column, pattern: String, replacement: String): Column = withExpr { RegExpReplace(e.expr, lit(pattern).expr, lit(replacement).expr) } /** * Replace all substrings of the specified string value that match regexp with rep. * * @group string_funcs * @since 2.1.0 */ def regexp_replace(e: Column, pattern: Column, replacement: Column): Column = withExpr { RegExpReplace(e.expr, pattern.expr, replacement.expr) } /** * Decodes a BASE64 encoded string column and returns it as a binary column. * This is the reverse of base64. * * @group string_funcs * @since 1.5.0 */ def unbase64(e: Column): Column = withExpr { UnBase64(e.expr) } /** * Right-pad the string column with pad to a length of len. If the string column is longer * than len, the return value is shortened to len characters. * * @group string_funcs * @since 1.5.0 */ def rpad(str: Column, len: Int, pad: String): Column = withExpr { StringRPad(str.expr, lit(len).expr, lit(pad).expr) } /** * Repeats a string column n times, and returns it as a new string column. * * @group string_funcs * @since 1.5.0 */ def repeat(str: Column, n: Int): Column = withExpr { StringRepeat(str.expr, lit(n).expr) } /** * Trim the spaces from right end for the specified string value. * * @group string_funcs * @since 1.5.0 */ def rtrim(e: Column): Column = withExpr { StringTrimRight(e.expr) } /** * Trim the specified character string from right end for the specified string column. * @group string_funcs * @since 2.3.0 */ def rtrim(e: Column, trimString: String): Column = withExpr { StringTrimRight(e.expr, Literal(trimString)) } /** * Returns the soundex code for the specified expression. * * @group string_funcs * @since 1.5.0 */ def soundex(e: Column): Column = withExpr { SoundEx(e.expr) } /** * Splits str around matches of the given pattern. * * @param str a string expression to split * @param pattern a string representing a regular expression. The regex string should be * a Java regular expression. * * @group string_funcs * @since 1.5.0 */ def split(str: Column, pattern: String): Column = withExpr { StringSplit(str.expr, Literal(pattern), Literal(-1)) } /** * Splits str around matches of the given pattern. * * @param str a string expression to split * @param pattern a string representing a regular expression. The regex string should be * a Java regular expression. * @param limit an integer expression which controls the number of times the regex is applied. * <ul> * <li>limit greater than 0: The resulting array's length will not be more than limit, * and the resulting array's last entry will contain all input beyond the last * matched regex.</li> * <li>limit less than or equal to 0: `regex` will be applied as many times as * possible, and the resulting array can be of any size.</li> * </ul> * * @group string_funcs * @since 3.0.0 */ def split(str: Column, pattern: String, limit: Int): Column = withExpr { StringSplit(str.expr, Literal(pattern), Literal(limit)) } /** * Substring starts at `pos` and is of length `len` when str is String type or * returns the slice of byte array that starts at `pos` in byte and is of length `len` * when str is Binary type * * @note The position is not zero based, but 1 based index. * * @group string_funcs * @since 1.5.0 */ def substring(str: Column, pos: Int, len: Int): Column = withExpr { Substring(str.expr, lit(pos).expr, lit(len).expr) } /** * Returns the substring from string str before count occurrences of the delimiter delim. * If count is positive, everything the left of the final delimiter (counting from left) is * returned. If count is negative, every to the right of the final delimiter (counting from the * right) is returned. substring_index performs a case-sensitive match when searching for delim. * * @group string_funcs */ def substring_index(str: Column, delim: String, count: Int): Column = withExpr { SubstringIndex(str.expr, lit(delim).expr, lit(count).expr) } /** * Overlay the specified portion of `src` with `replace`, * starting from byte position `pos` of `src` and proceeding for `len` bytes. * * @group string_funcs * @since 3.0.0 */ def overlay(src: Column, replace: Column, pos: Column, len: Column): Column = withExpr { Overlay(src.expr, replace.expr, pos.expr, len.expr) } /** * Overlay the specified portion of `src` with `replace`, * starting from byte position `pos` of `src`. * * @group string_funcs * @since 3.0.0 */ def overlay(src: Column, replace: Column, pos: Column): Column = withExpr { new Overlay(src.expr, replace.expr, pos.expr) } /** * Splits a string into arrays of sentences, where each sentence is an array of words. * @group string_funcs * @since 3.2.0 */ def sentences(string: Column, language: Column, country: Column): Column = withExpr { Sentences(string.expr, language.expr, country.expr) } /** * Splits a string into arrays of sentences, where each sentence is an array of words. * The default locale is used. * @group string_funcs * @since 3.2.0 */ def sentences(string: Column): Column = withExpr { Sentences(string.expr) } /** * Translate any character in the src by a character in replaceString. * The characters in replaceString correspond to the characters in matchingString. * The translate will happen when any character in the string matches the character * in the `matchingString`. * * @group string_funcs * @since 1.5.0 */ def translate(src: Column, matchingString: String, replaceString: String): Column = withExpr { StringTranslate(src.expr, lit(matchingString).expr, lit(replaceString).expr) } /** * Trim the spaces from both ends for the specified string column. * * @group string_funcs * @since 1.5.0 */ def trim(e: Column): Column = withExpr { StringTrim(e.expr) } /** * Trim the specified character from both ends for the specified string column. * @group string_funcs * @since 2.3.0 */ def trim(e: Column, trimString: String): Column = withExpr { StringTrim(e.expr, Literal(trimString)) } /** * Converts a string column to upper case. * * @group string_funcs * @since 1.3.0 */ def upper(e: Column): Column = withExpr { Upper(e.expr) } ////////////////////////////////////////////////////////////////////////////////////////////// // DateTime functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * (Scala-specific) Creates a datetime interval * * @param years Number of years * @param months Number of months * @param weeks Number of weeks * @param days Number of days * @param hours Number of hours * @param mins Number of mins * @param secs Number of secs * @return A datetime interval * @group datetime_funcs * @since 3.2.0 */ def make_interval( years: Column = lit(0), months: Column = lit(0), weeks: Column = lit(0), days: Column = lit(0), hours: Column = lit(0), mins: Column = lit(0), secs: Column = lit(0)): Column = withExpr { MakeInterval(years.expr, months.expr, weeks.expr, days.expr, hours.expr, mins.expr, secs.expr) } /** * Returns the date that is `numMonths` after `startDate`. * * @param startDate A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param numMonths The number of months to add to `startDate`, can be negative to subtract months * @return A date, or null if `startDate` was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def add_months(startDate: Column, numMonths: Int): Column = add_months(startDate, lit(numMonths)) /** * Returns the date that is `numMonths` after `startDate`. * * @param startDate A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param numMonths A column of the number of months to add to `startDate`, can be negative to * subtract months * @return A date, or null if `startDate` was a string that could not be cast to a date * @group datetime_funcs * @since 3.0.0 */ def add_months(startDate: Column, numMonths: Column): Column = withExpr { AddMonths(startDate.expr, numMonths.expr) } /** * Returns the current date at the start of query evaluation as a date column. * All calls of current_date within the same query return the same value. * * @group datetime_funcs * @since 1.5.0 */ def current_date(): Column = withExpr { CurrentDate() } /** * Returns the current timestamp at the start of query evaluation as a timestamp column. * All calls of current_timestamp within the same query return the same value. * * @group datetime_funcs * @since 1.5.0 */ def current_timestamp(): Column = withExpr { CurrentTimestamp() } /** * Converts a date/timestamp/string to a value of string in the format specified by the date * format given by the second argument. * * See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html"> * Datetime Patterns</a> * for valid date and time format patterns * * @param dateExpr A date, timestamp or string. If a string, the data must be in a format that * can be cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param format A pattern `dd.MM.yyyy` would return a string like `18.03.1993` * @return A string, or null if `dateExpr` was a string that could not be cast to a timestamp * @note Use specialized functions like [[year]] whenever possible as they benefit from a * specialized implementation. * @throws IllegalArgumentException if the `format` pattern is invalid * @group datetime_funcs * @since 1.5.0 */ def date_format(dateExpr: Column, format: String): Column = withExpr { DateFormatClass(dateExpr.expr, Literal(format)) } /** * Returns the date that is `days` days after `start` * * @param start A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param days The number of days to add to `start`, can be negative to subtract days * @return A date, or null if `start` was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def date_add(start: Column, days: Int): Column = date_add(start, lit(days)) /** * Returns the date that is `days` days after `start` * * @param start A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param days A column of the number of days to add to `start`, can be negative to subtract days * @return A date, or null if `start` was a string that could not be cast to a date * @group datetime_funcs * @since 3.0.0 */ def date_add(start: Column, days: Column): Column = withExpr { DateAdd(start.expr, days.expr) } /** * Returns the date that is `days` days before `start` * * @param start A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param days The number of days to subtract from `start`, can be negative to add days * @return A date, or null if `start` was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def date_sub(start: Column, days: Int): Column = date_sub(start, lit(days)) /** * Returns the date that is `days` days before `start` * * @param start A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param days A column of the number of days to subtract from `start`, can be negative to add * days * @return A date, or null if `start` was a string that could not be cast to a date * @group datetime_funcs * @since 3.0.0 */ def date_sub(start: Column, days: Column): Column = withExpr { DateSub(start.expr, days.expr) } /** * Returns the number of days from `start` to `end`. * * Only considers the date part of the input. For example: * {{{ * dateddiff("2018-01-10 00:00:00", "2018-01-09 23:59:59") * // returns 1 * }}} * * @param end A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param start A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @return An integer, or null if either `end` or `start` were strings that could not be cast to * a date. Negative if `end` is before `start` * @group datetime_funcs * @since 1.5.0 */ def datediff(end: Column, start: Column): Column = withExpr { DateDiff(end.expr, start.expr) } /** * Extracts the year as an integer from a given date/timestamp/string. * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def year(e: Column): Column = withExpr { Year(e.expr) } /** * Extracts the quarter as an integer from a given date/timestamp/string. * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def quarter(e: Column): Column = withExpr { Quarter(e.expr) } /** * Extracts the month as an integer from a given date/timestamp/string. * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def month(e: Column): Column = withExpr { Month(e.expr) } /** * Extracts the day of the week as an integer from a given date/timestamp/string. * Ranges from 1 for a Sunday through to 7 for a Saturday * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 2.3.0 */ def dayofweek(e: Column): Column = withExpr { DayOfWeek(e.expr) } /** * Extracts the day of the month as an integer from a given date/timestamp/string. * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def dayofmonth(e: Column): Column = withExpr { DayOfMonth(e.expr) } /** * Extracts the day of the year as an integer from a given date/timestamp/string. * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def dayofyear(e: Column): Column = withExpr { DayOfYear(e.expr) } /** * Extracts the hours as an integer from a given date/timestamp/string. * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def hour(e: Column): Column = withExpr { Hour(e.expr) } /** * Returns the last day of the month which the given date belongs to. * For example, input "2015-07-27" returns "2015-07-31" since July 31 is the last day of the * month in July 2015. * * @param e A date, timestamp or string. If a string, the data must be in a format that can be * cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @return A date, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def last_day(e: Column): Column = withExpr { LastDay(e.expr) } /** * Extracts the minutes as an integer from a given date/timestamp/string. * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def minute(e: Column): Column = withExpr { Minute(e.expr) } /** * Returns number of months between dates `start` and `end`. * * A whole number is returned if both inputs have the same day of month or both are the last day * of their respective months. Otherwise, the difference is calculated assuming 31 days per month. * * For example: * {{{ * months_between("2017-11-14", "2017-07-14") // returns 4.0 * months_between("2017-01-01", "2017-01-10") // returns 0.29032258 * months_between("2017-06-01", "2017-06-16 12:00:00") // returns -0.5 * }}} * * @param end A date, timestamp or string. If a string, the data must be in a format that can * be cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param start A date, timestamp or string. If a string, the data must be in a format that can * cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @return A double, or null if either `end` or `start` were strings that could not be cast to a * timestamp. Negative if `end` is before `start` * @group datetime_funcs * @since 1.5.0 */ def months_between(end: Column, start: Column): Column = withExpr { new MonthsBetween(end.expr, start.expr) } /** * Returns number of months between dates `end` and `start`. If `roundOff` is set to true, the * result is rounded off to 8 digits; it is not rounded otherwise. * @group datetime_funcs * @since 2.4.0 */ def months_between(end: Column, start: Column, roundOff: Boolean): Column = withExpr { MonthsBetween(end.expr, start.expr, lit(roundOff).expr) } /** * Returns the first date which is later than the value of the `date` column that is on the * specified day of the week. * * For example, `next_day('2015-07-27', "Sunday")` returns 2015-08-02 because that is the first * Sunday after 2015-07-27. * * @param date A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param dayOfWeek Case insensitive, and accepts: "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun" * @return A date, or null if `date` was a string that could not be cast to a date or if * `dayOfWeek` was an invalid value * @group datetime_funcs * @since 1.5.0 */ def next_day(date: Column, dayOfWeek: String): Column = next_day(date, lit(dayOfWeek)) /** * Returns the first date which is later than the value of the `date` column that is on the * specified day of the week. * * For example, `next_day('2015-07-27', "Sunday")` returns 2015-08-02 because that is the first * Sunday after 2015-07-27. * * @param date A date, timestamp or string. If a string, the data must be in a format that * can be cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param dayOfWeek A column of the day of week. Case insensitive, and accepts: "Mon", "Tue", * "Wed", "Thu", "Fri", "Sat", "Sun" * @return A date, or null if `date` was a string that could not be cast to a date or if * `dayOfWeek` was an invalid value * @group datetime_funcs * @since 3.2.0 */ def next_day(date: Column, dayOfWeek: Column): Column = withExpr { NextDay(date.expr, dayOfWeek.expr) } /** * Extracts the seconds as an integer from a given date/timestamp/string. * @return An integer, or null if the input was a string that could not be cast to a timestamp * @group datetime_funcs * @since 1.5.0 */ def second(e: Column): Column = withExpr { Second(e.expr) } /** * Extracts the week number as an integer from a given date/timestamp/string. * * A week is considered to start on a Monday and week 1 is the first week with more than 3 days, * as defined by ISO 8601 * * @return An integer, or null if the input was a string that could not be cast to a date * @group datetime_funcs * @since 1.5.0 */ def weekofyear(e: Column): Column = withExpr { WeekOfYear(e.expr) } /** * Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string * representing the timestamp of that moment in the current system time zone in the * yyyy-MM-dd HH:mm:ss format. * * @param ut A number of a type that is castable to a long, such as string or integer. Can be * negative for timestamps before the unix epoch * @return A string, or null if the input was a string that could not be cast to a long * @group datetime_funcs * @since 1.5.0 */ def from_unixtime(ut: Column): Column = withExpr { FromUnixTime(ut.expr, Literal(TimestampFormatter.defaultPattern)) } /** * Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string * representing the timestamp of that moment in the current system time zone in the given * format. * * See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html"> * Datetime Patterns</a> * for valid date and time format patterns * * @param ut A number of a type that is castable to a long, such as string or integer. Can be * negative for timestamps before the unix epoch * @param f A date time pattern that the input will be formatted to * @return A string, or null if `ut` was a string that could not be cast to a long or `f` was * an invalid date time pattern * @group datetime_funcs * @since 1.5.0 */ def from_unixtime(ut: Column, f: String): Column = withExpr { FromUnixTime(ut.expr, Literal(f)) } /** * Returns the current Unix timestamp (in seconds) as a long. * * @note All calls of `unix_timestamp` within the same query return the same value * (i.e. the current timestamp is calculated at the start of query evaluation). * * @group datetime_funcs * @since 1.5.0 */ def unix_timestamp(): Column = withExpr { UnixTimestamp(CurrentTimestamp(), Literal(TimestampFormatter.defaultPattern)) } /** * Converts time string in format yyyy-MM-dd HH:mm:ss to Unix timestamp (in seconds), * using the default timezone and the default locale. * * @param s A date, timestamp or string. If a string, the data must be in the * `yyyy-MM-dd HH:mm:ss` format * @return A long, or null if the input was a string not of the correct format * @group datetime_funcs * @since 1.5.0 */ def unix_timestamp(s: Column): Column = withExpr { UnixTimestamp(s.expr, Literal(TimestampFormatter.defaultPattern)) } /** * Converts time string with given pattern to Unix timestamp (in seconds). * * See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html"> * Datetime Patterns</a> * for valid date and time format patterns * * @param s A date, timestamp or string. If a string, the data must be in a format that can be * cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param p A date time pattern detailing the format of `s` when `s` is a string * @return A long, or null if `s` was a string that could not be cast to a date or `p` was * an invalid format * @group datetime_funcs * @since 1.5.0 */ def unix_timestamp(s: Column, p: String): Column = withExpr { UnixTimestamp(s.expr, Literal(p)) } /** * Converts to a timestamp by casting rules to `TimestampType`. * * @param s A date, timestamp or string. If a string, the data must be in a format that can be * cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @return A timestamp, or null if the input was a string that could not be cast to a timestamp * @group datetime_funcs * @since 2.2.0 */ def to_timestamp(s: Column): Column = withExpr { new ParseToTimestamp(s.expr) } /** * Converts time string with the given pattern to timestamp. * * See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html"> * Datetime Patterns</a> * for valid date and time format patterns * * @param s A date, timestamp or string. If a string, the data must be in a format that can be * cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param fmt A date time pattern detailing the format of `s` when `s` is a string * @return A timestamp, or null if `s` was a string that could not be cast to a timestamp or * `fmt` was an invalid format * @group datetime_funcs * @since 2.2.0 */ def to_timestamp(s: Column, fmt: String): Column = withExpr { new ParseToTimestamp(s.expr, Literal(fmt)) } /** * Converts the column into `DateType` by casting rules to `DateType`. * * @group datetime_funcs * @since 1.5.0 */ def to_date(e: Column): Column = withExpr { new ParseToDate(e.expr) } /** * Converts the column into a `DateType` with a specified format * * See <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html"> * Datetime Patterns</a> * for valid date and time format patterns * * @param e A date, timestamp or string. If a string, the data must be in a format that can be * cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param fmt A date time pattern detailing the format of `e` when `e`is a string * @return A date, or null if `e` was a string that could not be cast to a date or `fmt` was an * invalid format * @group datetime_funcs * @since 2.2.0 */ def to_date(e: Column, fmt: String): Column = withExpr { new ParseToDate(e.expr, Literal(fmt)) } /** * Returns date truncated to the unit specified by the format. * * For example, `trunc("2018-11-19 12:01:19", "year")` returns 2018-01-01 * * @param date A date, timestamp or string. If a string, the data must be in a format that can be * cast to a date, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param format: 'year', 'yyyy', 'yy' to truncate by year, * or 'month', 'mon', 'mm' to truncate by month * Other options are: 'week', 'quarter' * * @return A date, or null if `date` was a string that could not be cast to a date or `format` * was an invalid value * @group datetime_funcs * @since 1.5.0 */ def trunc(date: Column, format: String): Column = withExpr { TruncDate(date.expr, Literal(format)) } /** * Returns timestamp truncated to the unit specified by the format. * * For example, `date_trunc("year", "2018-11-19 12:01:19")` returns 2018-01-01 00:00:00 * * @param format: 'year', 'yyyy', 'yy' to truncate by year, * 'month', 'mon', 'mm' to truncate by month, * 'day', 'dd' to truncate by day, * Other options are: * 'microsecond', 'millisecond', 'second', 'minute', 'hour', 'week', 'quarter' * @param timestamp A date, timestamp or string. If a string, the data must be in a format that * can be cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @return A timestamp, or null if `timestamp` was a string that could not be cast to a timestamp * or `format` was an invalid value * @group datetime_funcs * @since 2.3.0 */ def date_trunc(format: String, timestamp: Column): Column = withExpr { TruncTimestamp(Literal(format), timestamp.expr) } /** * Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders * that time as a timestamp in the given time zone. For example, 'GMT+1' would yield * '2017-07-14 03:40:00.0'. * * @param ts A date, timestamp or string. If a string, the data must be in a format that can be * cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param tz A string detailing the time zone ID that the input should be adjusted to. It should * be in the format of either region-based zone IDs or zone offsets. Region IDs must * have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in * the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are * supported as aliases of '+00:00'. Other short names are not recommended to use * because they can be ambiguous. * @return A timestamp, or null if `ts` was a string that could not be cast to a timestamp or * `tz` was an invalid value * @group datetime_funcs * @since 1.5.0 */ def from_utc_timestamp(ts: Column, tz: String): Column = withExpr { FromUTCTimestamp(ts.expr, Literal(tz)) } /** * Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders * that time as a timestamp in the given time zone. For example, 'GMT+1' would yield * '2017-07-14 03:40:00.0'. * @group datetime_funcs * @since 2.4.0 */ def from_utc_timestamp(ts: Column, tz: Column): Column = withExpr { FromUTCTimestamp(ts.expr, tz.expr) } /** * Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time * zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield * '2017-07-14 01:40:00.0'. * * @param ts A date, timestamp or string. If a string, the data must be in a format that can be * cast to a timestamp, such as `yyyy-MM-dd` or `yyyy-MM-dd HH:mm:ss.SSSS` * @param tz A string detailing the time zone ID that the input should be adjusted to. It should * be in the format of either region-based zone IDs or zone offsets. Region IDs must * have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in * the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are * supported as aliases of '+00:00'. Other short names are not recommended to use * because they can be ambiguous. * @return A timestamp, or null if `ts` was a string that could not be cast to a timestamp or * `tz` was an invalid value * @group datetime_funcs * @since 1.5.0 */ def to_utc_timestamp(ts: Column, tz: String): Column = withExpr { ToUTCTimestamp(ts.expr, Literal(tz)) } /** * Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time * zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield * '2017-07-14 01:40:00.0'. * @group datetime_funcs * @since 2.4.0 */ def to_utc_timestamp(ts: Column, tz: Column): Column = withExpr { ToUTCTimestamp(ts.expr, tz.expr) } /** * Bucketize rows into one or more time windows given a timestamp specifying column. Window * starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window * [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in * the order of months are not supported. The following example takes the average stock price for * a one minute window every 10 seconds starting 5 seconds after the hour: * * {{{ * val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType * df.groupBy(window($"timestamp", "1 minute", "10 seconds", "5 seconds"), $"stockId") * .agg(mean("price")) * }}} * * The windows will look like: * * {{{ * 09:00:05-09:01:05 * 09:00:15-09:01:15 * 09:00:25-09:01:25 ... * }}} * * For a streaming query, you may use the function `current_timestamp` to generate windows on * processing time. * * @param timeColumn The column or the expression to use as the timestamp for windowing by time. * The time column must be of TimestampType. * @param windowDuration A string specifying the width of the window, e.g. `10 minutes`, * `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for * valid duration identifiers. Note that the duration is a fixed length of * time, and does not vary over time according to a calendar. For example, * `1 day` always means 86,400,000 milliseconds, not a calendar day. * @param slideDuration A string specifying the sliding interval of the window, e.g. `1 minute`. * A new window will be generated every `slideDuration`. Must be less than * or equal to the `windowDuration`. Check * `org.apache.spark.unsafe.types.CalendarInterval` for valid duration * identifiers. This duration is likewise absolute, and does not vary * according to a calendar. * @param startTime The offset with respect to 1970-01-01 00:00:00 UTC with which to start * window intervals. For example, in order to have hourly tumbling windows that * start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide * `startTime` as `15 minutes`. * * @group datetime_funcs * @since 2.0.0 */ def window( timeColumn: Column, windowDuration: String, slideDuration: String, startTime: String): Column = { withExpr { TimeWindow(timeColumn.expr, windowDuration, slideDuration, startTime) }.as("window") } /** * Bucketize rows into one or more time windows given a timestamp specifying column. Window * starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window * [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in * the order of months are not supported. The windows start beginning at 1970-01-01 00:00:00 UTC. * The following example takes the average stock price for a one minute window every 10 seconds: * * {{{ * val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType * df.groupBy(window($"timestamp", "1 minute", "10 seconds"), $"stockId") * .agg(mean("price")) * }}} * * The windows will look like: * * {{{ * 09:00:00-09:01:00 * 09:00:10-09:01:10 * 09:00:20-09:01:20 ... * }}} * * For a streaming query, you may use the function `current_timestamp` to generate windows on * processing time. * * @param timeColumn The column or the expression to use as the timestamp for windowing by time. * The time column must be of TimestampType. * @param windowDuration A string specifying the width of the window, e.g. `10 minutes`, * `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for * valid duration identifiers. Note that the duration is a fixed length of * time, and does not vary over time according to a calendar. For example, * `1 day` always means 86,400,000 milliseconds, not a calendar day. * @param slideDuration A string specifying the sliding interval of the window, e.g. `1 minute`. * A new window will be generated every `slideDuration`. Must be less than * or equal to the `windowDuration`. Check * `org.apache.spark.unsafe.types.CalendarInterval` for valid duration * identifiers. This duration is likewise absolute, and does not vary * according to a calendar. * * @group datetime_funcs * @since 2.0.0 */ def window(timeColumn: Column, windowDuration: String, slideDuration: String): Column = { window(timeColumn, windowDuration, slideDuration, "0 second") } /** * Generates tumbling time windows given a timestamp specifying column. Window * starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window * [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in * the order of months are not supported. The windows start beginning at 1970-01-01 00:00:00 UTC. * The following example takes the average stock price for a one minute tumbling window: * * {{{ * val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType * df.groupBy(window($"timestamp", "1 minute"), $"stockId") * .agg(mean("price")) * }}} * * The windows will look like: * * {{{ * 09:00:00-09:01:00 * 09:01:00-09:02:00 * 09:02:00-09:03:00 ... * }}} * * For a streaming query, you may use the function `current_timestamp` to generate windows on * processing time. * * @param timeColumn The column or the expression to use as the timestamp for windowing by time. * The time column must be of TimestampType. * @param windowDuration A string specifying the width of the window, e.g. `10 minutes`, * `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for * valid duration identifiers. * * @group datetime_funcs * @since 2.0.0 */ def window(timeColumn: Column, windowDuration: String): Column = { window(timeColumn, windowDuration, windowDuration, "0 second") } /** * Creates timestamp from the number of seconds since UTC epoch. * @group datetime_funcs * @since 3.1.0 */ def timestamp_seconds(e: Column): Column = withExpr { SecondsToTimestamp(e.expr) } ////////////////////////////////////////////////////////////////////////////////////////////// // Collection functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Returns null if the array is null, true if the array contains `value`, and false otherwise. * @group collection_funcs * @since 1.5.0 */ def array_contains(column: Column, value: Any): Column = withExpr { ArrayContains(column.expr, lit(value).expr) } /** * Returns `true` if `a1` and `a2` have at least one non-null element in common. If not and both * the arrays are non-empty and any of them contains a `null`, it returns `null`. It returns * `false` otherwise. * @group collection_funcs * @since 2.4.0 */ def arrays_overlap(a1: Column, a2: Column): Column = withExpr { ArraysOverlap(a1.expr, a2.expr) } /** * Returns an array containing all the elements in `x` from index `start` (or starting from the * end if `start` is negative) with the specified `length`. * * @param x the array column to be sliced * @param start the starting index * @param length the length of the slice * * @group collection_funcs * @since 2.4.0 */ def slice(x: Column, start: Int, length: Int): Column = slice(x, lit(start), lit(length)) /** * Returns an array containing all the elements in `x` from index `start` (or starting from the * end if `start` is negative) with the specified `length`. * * @param x the array column to be sliced * @param start the starting index * @param length the length of the slice * * @group collection_funcs * @since 3.1.0 */ def slice(x: Column, start: Column, length: Column): Column = withExpr { Slice(x.expr, start.expr, length.expr) } /** * Concatenates the elements of `column` using the `delimiter`. Null values are replaced with * `nullReplacement`. * @group collection_funcs * @since 2.4.0 */ def array_join(column: Column, delimiter: String, nullReplacement: String): Column = withExpr { ArrayJoin(column.expr, Literal(delimiter), Some(Literal(nullReplacement))) } /** * Concatenates the elements of `column` using the `delimiter`. * @group collection_funcs * @since 2.4.0 */ def array_join(column: Column, delimiter: String): Column = withExpr { ArrayJoin(column.expr, Literal(delimiter), None) } /** * Concatenates multiple input columns together into a single column. * The function works with strings, binary and compatible array columns. * * @group collection_funcs * @since 1.5.0 */ @scala.annotation.varargs def concat(exprs: Column*): Column = withExpr { Concat(exprs.map(_.expr)) } /** * Locates the position of the first occurrence of the value in the given array as long. * Returns null if either of the arguments are null. * * @note The position is not zero based, but 1 based index. Returns 0 if value * could not be found in array. * * @group collection_funcs * @since 2.4.0 */ def array_position(column: Column, value: Any): Column = withExpr { ArrayPosition(column.expr, lit(value).expr) } /** * Returns element of array at given index in value if column is array. Returns value for * the given key in value if column is map. * * @group collection_funcs * @since 2.4.0 */ def element_at(column: Column, value: Any): Column = withExpr { ElementAt(column.expr, lit(value).expr) } /** * Sorts the input array in ascending order. The elements of the input array must be orderable. * Null elements will be placed at the end of the returned array. * * @group collection_funcs * @since 2.4.0 */ def array_sort(e: Column): Column = withExpr { new ArraySort(e.expr) } /** * Remove all elements that equal to element from the given array. * * @group collection_funcs * @since 2.4.0 */ def array_remove(column: Column, element: Any): Column = withExpr { ArrayRemove(column.expr, lit(element).expr) } /** * Removes duplicate values from the array. * @group collection_funcs * @since 2.4.0 */ def array_distinct(e: Column): Column = withExpr { ArrayDistinct(e.expr) } /** * Returns an array of the elements in the intersection of the given two arrays, * without duplicates. * * @group collection_funcs * @since 2.4.0 */ def array_intersect(col1: Column, col2: Column): Column = withExpr { ArrayIntersect(col1.expr, col2.expr) } /** * Returns an array of the elements in the union of the given two arrays, without duplicates. * * @group collection_funcs * @since 2.4.0 */ def array_union(col1: Column, col2: Column): Column = withExpr { ArrayUnion(col1.expr, col2.expr) } /** * Returns an array of the elements in the first array but not in the second array, * without duplicates. The order of elements in the result is not determined * * @group collection_funcs * @since 2.4.0 */ def array_except(col1: Column, col2: Column): Column = withExpr { ArrayExcept(col1.expr, col2.expr) } private def createLambda(f: Column => Column) = { val x = UnresolvedNamedLambdaVariable(Seq(UnresolvedNamedLambdaVariable.freshVarName("x"))) val function = f(Column(x)).expr LambdaFunction(function, Seq(x)) } private def createLambda(f: (Column, Column) => Column) = { val x = UnresolvedNamedLambdaVariable(Seq(UnresolvedNamedLambdaVariable.freshVarName("x"))) val y = UnresolvedNamedLambdaVariable(Seq(UnresolvedNamedLambdaVariable.freshVarName("y"))) val function = f(Column(x), Column(y)).expr LambdaFunction(function, Seq(x, y)) } private def createLambda(f: (Column, Column, Column) => Column) = { val x = UnresolvedNamedLambdaVariable(Seq(UnresolvedNamedLambdaVariable.freshVarName("x"))) val y = UnresolvedNamedLambdaVariable(Seq(UnresolvedNamedLambdaVariable.freshVarName("y"))) val z = UnresolvedNamedLambdaVariable(Seq(UnresolvedNamedLambdaVariable.freshVarName("z"))) val function = f(Column(x), Column(y), Column(z)).expr LambdaFunction(function, Seq(x, y, z)) } /** * Returns an array of elements after applying a transformation to each element * in the input array. * {{{ * df.select(transform(col("i"), x => x + 1)) * }}} * * @param column the input array column * @param f col => transformed_col, the lambda function to transform the input column * * @group collection_funcs * @since 3.0.0 */ def transform(column: Column, f: Column => Column): Column = withExpr { ArrayTransform(column.expr, createLambda(f)) } /** * Returns an array of elements after applying a transformation to each element * in the input array. * {{{ * df.select(transform(col("i"), (x, i) => x + i)) * }}} * * @param column the input array column * @param f (col, index) => transformed_col, the lambda function to filter the input column * given the index. Indices start at 0. * * @group collection_funcs * @since 3.0.0 */ def transform(column: Column, f: (Column, Column) => Column): Column = withExpr { ArrayTransform(column.expr, createLambda(f)) } /** * Returns whether a predicate holds for one or more elements in the array. * {{{ * df.select(exists(col("i"), _ % 2 === 0)) * }}} * * @param column the input array column * @param f col => predicate, the Boolean predicate to check the input column * * @group collection_funcs * @since 3.0.0 */ def exists(column: Column, f: Column => Column): Column = withExpr { ArrayExists(column.expr, createLambda(f)) } /** * Returns whether a predicate holds for every element in the array. * {{{ * df.select(forall(col("i"), x => x % 2 === 0)) * }}} * * @param column the input array column * @param f col => predicate, the Boolean predicate to check the input column * * @group collection_funcs * @since 3.0.0 */ def forall(column: Column, f: Column => Column): Column = withExpr { ArrayForAll(column.expr, createLambda(f)) } /** * Returns an array of elements for which a predicate holds in a given array. * {{{ * df.select(filter(col("s"), x => x % 2 === 0)) * }}} * * @param column the input array column * @param f col => predicate, the Boolean predicate to filter the input column * * @group collection_funcs * @since 3.0.0 */ def filter(column: Column, f: Column => Column): Column = withExpr { ArrayFilter(column.expr, createLambda(f)) } /** * Returns an array of elements for which a predicate holds in a given array. * {{{ * df.select(filter(col("s"), (x, i) => i % 2 === 0)) * }}} * * @param column the input array column * @param f (col, index) => predicate, the Boolean predicate to filter the input column * given the index. Indices start at 0. * * @group collection_funcs * @since 3.0.0 */ def filter(column: Column, f: (Column, Column) => Column): Column = withExpr { ArrayFilter(column.expr, createLambda(f)) } /** * Applies a binary operator to an initial state and all elements in the array, * and reduces this to a single state. The final state is converted into the final result * by applying a finish function. * {{{ * df.select(aggregate(col("i"), lit(0), (acc, x) => acc + x, _ * 10)) * }}} * * @param expr the input array column * @param initialValue the initial value * @param merge (combined_value, input_value) => combined_value, the merge function to merge * an input value to the combined_value * @param finish combined_value => final_value, the lambda function to convert the combined value * of all inputs to final result * * @group collection_funcs * @since 3.0.0 */ def aggregate( expr: Column, initialValue: Column, merge: (Column, Column) => Column, finish: Column => Column): Column = withExpr { ArrayAggregate( expr.expr, initialValue.expr, createLambda(merge), createLambda(finish) ) } /** * Applies a binary operator to an initial state and all elements in the array, * and reduces this to a single state. * {{{ * df.select(aggregate(col("i"), lit(0), (acc, x) => acc + x)) * }}} * * @param expr the input array column * @param initialValue the initial value * @param merge (combined_value, input_value) => combined_value, the merge function to merge * an input value to the combined_value * @group collection_funcs * @since 3.0.0 */ def aggregate(expr: Column, initialValue: Column, merge: (Column, Column) => Column): Column = aggregate(expr, initialValue, merge, c => c) /** * Merge two given arrays, element-wise, into a single array using a function. * If one array is shorter, nulls are appended at the end to match the length of the longer * array, before applying the function. * {{{ * df.select(zip_with(df1("val1"), df1("val2"), (x, y) => x + y)) * }}} * * @param left the left input array column * @param right the right input array column * @param f (lCol, rCol) => col, the lambda function to merge two input columns into one column * * @group collection_funcs * @since 3.0.0 */ def zip_with(left: Column, right: Column, f: (Column, Column) => Column): Column = withExpr { ZipWith(left.expr, right.expr, createLambda(f)) } /** * Applies a function to every key-value pair in a map and returns * a map with the results of those applications as the new keys for the pairs. * {{{ * df.select(transform_keys(col("i"), (k, v) => k + v)) * }}} * * @param expr the input map column * @param f (key, value) => new_key, the lambda function to transform the key of input map column * * @group collection_funcs * @since 3.0.0 */ def transform_keys(expr: Column, f: (Column, Column) => Column): Column = withExpr { TransformKeys(expr.expr, createLambda(f)) } /** * Applies a function to every key-value pair in a map and returns * a map with the results of those applications as the new values for the pairs. * {{{ * df.select(transform_values(col("i"), (k, v) => k + v)) * }}} * * @param expr the input map column * @param f (key, value) => new_value, the lambda function to transform the value of input map * column * * @group collection_funcs * @since 3.0.0 */ def transform_values(expr: Column, f: (Column, Column) => Column): Column = withExpr { TransformValues(expr.expr, createLambda(f)) } /** * Returns a map whose key-value pairs satisfy a predicate. * {{{ * df.select(map_filter(col("m"), (k, v) => k * 10 === v)) * }}} * * @param expr the input map column * @param f (key, value) => predicate, the Boolean predicate to filter the input map column * * @group collection_funcs * @since 3.0.0 */ def map_filter(expr: Column, f: (Column, Column) => Column): Column = withExpr { MapFilter(expr.expr, createLambda(f)) } /** * Merge two given maps, key-wise into a single map using a function. * {{{ * df.select(map_zip_with(df("m1"), df("m2"), (k, v1, v2) => k === v1 + v2)) * }}} * * @param left the left input map column * @param right the right input map column * @param f (key, value1, value2) => new_value, the lambda function to merge the map values * * @group collection_funcs * @since 3.0.0 */ def map_zip_with( left: Column, right: Column, f: (Column, Column, Column) => Column): Column = withExpr { MapZipWith(left.expr, right.expr, createLambda(f)) } /** * Creates a new row for each element in the given array or map column. * Uses the default column name `col` for elements in the array and * `key` and `value` for elements in the map unless specified otherwise. * * @group collection_funcs * @since 1.3.0 */ def explode(e: Column): Column = withExpr { Explode(e.expr) } /** * Creates a new row for each element in the given array or map column. * Uses the default column name `col` for elements in the array and * `key` and `value` for elements in the map unless specified otherwise. * Unlike explode, if the array/map is null or empty then null is produced. * * @group collection_funcs * @since 2.2.0 */ def explode_outer(e: Column): Column = withExpr { GeneratorOuter(Explode(e.expr)) } /** * Creates a new row for each element with position in the given array or map column. * Uses the default column name `pos` for position, and `col` for elements in the array * and `key` and `value` for elements in the map unless specified otherwise. * * @group collection_funcs * @since 2.1.0 */ def posexplode(e: Column): Column = withExpr { PosExplode(e.expr) } /** * Creates a new row for each element with position in the given array or map column. * Uses the default column name `pos` for position, and `col` for elements in the array * and `key` and `value` for elements in the map unless specified otherwise. * Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced. * * @group collection_funcs * @since 2.2.0 */ def posexplode_outer(e: Column): Column = withExpr { GeneratorOuter(PosExplode(e.expr)) } /** * Extracts json object from a json string based on json path specified, and returns json string * of the extracted json object. It will return null if the input json string is invalid. * * @group collection_funcs * @since 1.6.0 */ def get_json_object(e: Column, path: String): Column = withExpr { GetJsonObject(e.expr, lit(path).expr) } /** * Creates a new row for a json column according to the given field names. * * @group collection_funcs * @since 1.6.0 */ @scala.annotation.varargs def json_tuple(json: Column, fields: String*): Column = withExpr { require(fields.nonEmpty, "at least 1 field name should be given.") JsonTuple(json.expr +: fields.map(Literal.apply)) } // scalastyle:off line.size.limit /** * (Scala-specific) Parses a column containing a JSON string into a `StructType` with the * specified schema. Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema to use when parsing the json string * @param options options to control how the json is parsed. Accepts the same options as the * json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 2.1.0 */ // scalastyle:on line.size.limit def from_json(e: Column, schema: StructType, options: Map[String, String]): Column = from_json(e, schema.asInstanceOf[DataType], options) // scalastyle:off line.size.limit /** * (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType` * as keys type, `StructType` or `ArrayType` with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema to use when parsing the json string * @param options options to control how the json is parsed. accepts the same options and the * json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 2.2.0 */ // scalastyle:on line.size.limit def from_json(e: Column, schema: DataType, options: Map[String, String]): Column = withExpr { JsonToStructs(CharVarcharUtils.failIfHasCharVarchar(schema), options, e.expr) } // scalastyle:off line.size.limit /** * (Java-specific) Parses a column containing a JSON string into a `StructType` with the * specified schema. Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema to use when parsing the json string * @param options options to control how the json is parsed. accepts the same options and the * json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 2.1.0 */ // scalastyle:on line.size.limit def from_json(e: Column, schema: StructType, options: java.util.Map[String, String]): Column = from_json(e, schema, options.asScala.toMap) // scalastyle:off line.size.limit /** * (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType` * as keys type, `StructType` or `ArrayType` with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema to use when parsing the json string * @param options options to control how the json is parsed. accepts the same options and the * json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 2.2.0 */ // scalastyle:on line.size.limit def from_json(e: Column, schema: DataType, options: java.util.Map[String, String]): Column = { from_json(e, CharVarcharUtils.failIfHasCharVarchar(schema), options.asScala.toMap) } /** * Parses a column containing a JSON string into a `StructType` with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema to use when parsing the json string * * @group collection_funcs * @since 2.1.0 */ def from_json(e: Column, schema: StructType): Column = from_json(e, schema, Map.empty[String, String]) /** * Parses a column containing a JSON string into a `MapType` with `StringType` as keys type, * `StructType` or `ArrayType` with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema to use when parsing the json string * * @group collection_funcs * @since 2.2.0 */ def from_json(e: Column, schema: DataType): Column = from_json(e, schema, Map.empty[String, String]) // scalastyle:off line.size.limit /** * (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType` * as keys type, `StructType` or `ArrayType` with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema as a DDL-formatted string. * @param options options to control how the json is parsed. accepts the same options and the * json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 2.1.0 */ // scalastyle:on line.size.limit def from_json(e: Column, schema: String, options: java.util.Map[String, String]): Column = { from_json(e, schema, options.asScala.toMap) } // scalastyle:off line.size.limit /** * (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType` * as keys type, `StructType` or `ArrayType` with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema as a DDL-formatted string. * @param options options to control how the json is parsed. accepts the same options and the * json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 2.3.0 */ // scalastyle:on line.size.limit def from_json(e: Column, schema: String, options: Map[String, String]): Column = { val dataType = parseTypeWithFallback( schema, DataType.fromJson, "Cannot parse the schema in JSON format: ", fallbackParser = DataType.fromDDL) from_json(e, dataType, options) } /** * (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType` * as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema to use when parsing the json string * * @group collection_funcs * @since 2.4.0 */ def from_json(e: Column, schema: Column): Column = { from_json(e, schema, Map.empty[String, String].asJava) } // scalastyle:off line.size.limit /** * (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType` * as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing JSON data. * @param schema the schema to use when parsing the json string * @param options options to control how the json is parsed. accepts the same options and the * json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 2.4.0 */ // scalastyle:on line.size.limit def from_json(e: Column, schema: Column, options: java.util.Map[String, String]): Column = { withExpr(new JsonToStructs(e.expr, schema.expr, options.asScala.toMap)) } /** * Parses a JSON string and infers its schema in DDL format. * * @param json a JSON string. * * @group collection_funcs * @since 2.4.0 */ def schema_of_json(json: String): Column = schema_of_json(lit(json)) /** * Parses a JSON string and infers its schema in DDL format. * * @param json a foldable string column containing a JSON string. * * @group collection_funcs * @since 2.4.0 */ def schema_of_json(json: Column): Column = withExpr(new SchemaOfJson(json.expr)) // scalastyle:off line.size.limit /** * Parses a JSON string and infers its schema in DDL format using options. * * @param json a foldable string column containing JSON data. * @param options options to control how the json is parsed. accepts the same options and the * json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * @return a column with string literal containing schema in DDL format. * * @group collection_funcs * @since 3.0.0 */ // scalastyle:on line.size.limit def schema_of_json(json: Column, options: java.util.Map[String, String]): Column = { withExpr(SchemaOfJson(json.expr, options.asScala.toMap)) } // scalastyle:off line.size.limit /** * (Scala-specific) Converts a column containing a `StructType`, `ArrayType` or * a `MapType` into a JSON string with the specified schema. * Throws an exception, in the case of an unsupported type. * * @param e a column containing a struct, an array or a map. * @param options options to control how the struct column is converted into a json string. * accepts the same options and the json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * Additionally the function supports the `pretty` option which enables * pretty JSON generation. * * @group collection_funcs * @since 2.1.0 */ // scalastyle:on line.size.limit def to_json(e: Column, options: Map[String, String]): Column = withExpr { StructsToJson(options, e.expr) } // scalastyle:off line.size.limit /** * (Java-specific) Converts a column containing a `StructType`, `ArrayType` or * a `MapType` into a JSON string with the specified schema. * Throws an exception, in the case of an unsupported type. * * @param e a column containing a struct, an array or a map. * @param options options to control how the struct column is converted into a json string. * accepts the same options and the json data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option"> * Data Source Option</a> in the version you use. * Additionally the function supports the `pretty` option which enables * pretty JSON generation. * * @group collection_funcs * @since 2.1.0 */ // scalastyle:on line.size.limit def to_json(e: Column, options: java.util.Map[String, String]): Column = to_json(e, options.asScala.toMap) /** * Converts a column containing a `StructType`, `ArrayType` or * a `MapType` into a JSON string with the specified schema. * Throws an exception, in the case of an unsupported type. * * @param e a column containing a struct, an array or a map. * * @group collection_funcs * @since 2.1.0 */ def to_json(e: Column): Column = to_json(e, Map.empty[String, String]) /** * Returns length of array or map. * * The function returns null for null input if spark.sql.legacy.sizeOfNull is set to false or * spark.sql.ansi.enabled is set to true. Otherwise, the function returns -1 for null input. * With the default settings, the function returns -1 for null input. * * @group collection_funcs * @since 1.5.0 */ def size(e: Column): Column = withExpr { Size(e.expr) } /** * Sorts the input array for the given column in ascending order, * according to the natural ordering of the array elements. * Null elements will be placed at the beginning of the returned array. * * @group collection_funcs * @since 1.5.0 */ def sort_array(e: Column): Column = sort_array(e, asc = true) /** * Sorts the input array for the given column in ascending or descending order, * according to the natural ordering of the array elements. * Null elements will be placed at the beginning of the returned array in ascending order or * at the end of the returned array in descending order. * * @group collection_funcs * @since 1.5.0 */ def sort_array(e: Column, asc: Boolean): Column = withExpr { SortArray(e.expr, lit(asc).expr) } /** * Returns the minimum value in the array. * * @group collection_funcs * @since 2.4.0 */ def array_min(e: Column): Column = withExpr { ArrayMin(e.expr) } /** * Returns the maximum value in the array. * * @group collection_funcs * @since 2.4.0 */ def array_max(e: Column): Column = withExpr { ArrayMax(e.expr) } /** * Returns a random permutation of the given array. * * @note The function is non-deterministic. * * @group collection_funcs * @since 2.4.0 */ def shuffle(e: Column): Column = withExpr { Shuffle(e.expr) } /** * Returns a reversed string or an array with reverse order of elements. * @group collection_funcs * @since 1.5.0 */ def reverse(e: Column): Column = withExpr { Reverse(e.expr) } /** * Creates a single array from an array of arrays. If a structure of nested arrays is deeper than * two levels, only one level of nesting is removed. * @group collection_funcs * @since 2.4.0 */ def flatten(e: Column): Column = withExpr { Flatten(e.expr) } /** * Generate a sequence of integers from start to stop, incrementing by step. * * @group collection_funcs * @since 2.4.0 */ def sequence(start: Column, stop: Column, step: Column): Column = withExpr { new Sequence(start.expr, stop.expr, step.expr) } /** * Generate a sequence of integers from start to stop, * incrementing by 1 if start is less than or equal to stop, otherwise -1. * * @group collection_funcs * @since 2.4.0 */ def sequence(start: Column, stop: Column): Column = withExpr { new Sequence(start.expr, stop.expr) } /** * Creates an array containing the left argument repeated the number of times given by the * right argument. * * @group collection_funcs * @since 2.4.0 */ def array_repeat(left: Column, right: Column): Column = withExpr { ArrayRepeat(left.expr, right.expr) } /** * Creates an array containing the left argument repeated the number of times given by the * right argument. * * @group collection_funcs * @since 2.4.0 */ def array_repeat(e: Column, count: Int): Column = array_repeat(e, lit(count)) /** * Returns an unordered array containing the keys of the map. * @group collection_funcs * @since 2.3.0 */ def map_keys(e: Column): Column = withExpr { MapKeys(e.expr) } /** * Returns an unordered array containing the values of the map. * @group collection_funcs * @since 2.3.0 */ def map_values(e: Column): Column = withExpr { MapValues(e.expr) } /** * Returns an unordered array of all entries in the given map. * @group collection_funcs * @since 3.0.0 */ def map_entries(e: Column): Column = withExpr { MapEntries(e.expr) } /** * Returns a map created from the given array of entries. * @group collection_funcs * @since 2.4.0 */ def map_from_entries(e: Column): Column = withExpr { MapFromEntries(e.expr) } /** * Returns a merged array of structs in which the N-th struct contains all N-th values of input * arrays. * @group collection_funcs * @since 2.4.0 */ @scala.annotation.varargs def arrays_zip(e: Column*): Column = withExpr { ArraysZip(e.map(_.expr)) } /** * Returns the union of all the given maps. * @group collection_funcs * @since 2.4.0 */ @scala.annotation.varargs def map_concat(cols: Column*): Column = withExpr { MapConcat(cols.map(_.expr)) } // scalastyle:off line.size.limit /** * Parses a column containing a CSV string into a `StructType` with the specified schema. * Returns `null`, in the case of an unparseable string. * * @param e a string column containing CSV data. * @param schema the schema to use when parsing the CSV string * @param options options to control how the CSV is parsed. accepts the same options and the * CSV data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 3.0.0 */ // scalastyle:on line.size.limit def from_csv(e: Column, schema: StructType, options: Map[String, String]): Column = withExpr { val replaced = CharVarcharUtils.failIfHasCharVarchar(schema).asInstanceOf[StructType] CsvToStructs(replaced, options, e.expr) } // scalastyle:off line.size.limit /** * (Java-specific) Parses a column containing a CSV string into a `StructType` * with the specified schema. Returns `null`, in the case of an unparseable string. * * @param e a string column containing CSV data. * @param schema the schema to use when parsing the CSV string * @param options options to control how the CSV is parsed. accepts the same options and the * CSV data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 3.0.0 */ // scalastyle:on line.size.limit def from_csv(e: Column, schema: Column, options: java.util.Map[String, String]): Column = { withExpr(new CsvToStructs(e.expr, schema.expr, options.asScala.toMap)) } /** * Parses a CSV string and infers its schema in DDL format. * * @param csv a CSV string. * * @group collection_funcs * @since 3.0.0 */ def schema_of_csv(csv: String): Column = schema_of_csv(lit(csv)) /** * Parses a CSV string and infers its schema in DDL format. * * @param csv a foldable string column containing a CSV string. * * @group collection_funcs * @since 3.0.0 */ def schema_of_csv(csv: Column): Column = withExpr(new SchemaOfCsv(csv.expr)) // scalastyle:off line.size.limit /** * Parses a CSV string and infers its schema in DDL format using options. * * @param csv a foldable string column containing a CSV string. * @param options options to control how the CSV is parsed. accepts the same options and the * CSV data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option"> * Data Source Option</a> in the version you use. * @return a column with string literal containing schema in DDL format. * * @group collection_funcs * @since 3.0.0 */ // scalastyle:on line.size.limit def schema_of_csv(csv: Column, options: java.util.Map[String, String]): Column = { withExpr(SchemaOfCsv(csv.expr, options.asScala.toMap)) } // scalastyle:off line.size.limit /** * (Java-specific) Converts a column containing a `StructType` into a CSV string with * the specified schema. Throws an exception, in the case of an unsupported type. * * @param e a column containing a struct. * @param options options to control how the struct column is converted into a CSV string. * It accepts the same options and the CSV data source. * See * <a href= * "https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option"> * Data Source Option</a> in the version you use. * * @group collection_funcs * @since 3.0.0 */ // scalastyle:on line.size.limit def to_csv(e: Column, options: java.util.Map[String, String]): Column = withExpr { StructsToCsv(options.asScala.toMap, e.expr) } /** * Converts a column containing a `StructType` into a CSV string with the specified schema. * Throws an exception, in the case of an unsupported type. * * @param e a column containing a struct. * * @group collection_funcs * @since 3.0.0 */ def to_csv(e: Column): Column = to_csv(e, Map.empty[String, String].asJava) /** * A transform for timestamps and dates to partition data into years. * * @group partition_transforms * @since 3.0.0 */ def years(e: Column): Column = withExpr { Years(e.expr) } /** * A transform for timestamps and dates to partition data into months. * * @group partition_transforms * @since 3.0.0 */ def months(e: Column): Column = withExpr { Months(e.expr) } /** * A transform for timestamps and dates to partition data into days. * * @group partition_transforms * @since 3.0.0 */ def days(e: Column): Column = withExpr { Days(e.expr) } /** * A transform for timestamps to partition data into hours. * * @group partition_transforms * @since 3.0.0 */ def hours(e: Column): Column = withExpr { Hours(e.expr) } /** * A transform for any type that partitions by a hash of the input column. * * @group partition_transforms * @since 3.0.0 */ def bucket(numBuckets: Column, e: Column): Column = withExpr { numBuckets.expr match { case lit @ Literal(_, IntegerType) => Bucket(lit, e.expr) case _ => throw new AnalysisException(s"Invalid number of buckets: bucket($numBuckets, $e)") } } /** * A transform for any type that partitions by a hash of the input column. * * @group partition_transforms * @since 3.0.0 */ def bucket(numBuckets: Int, e: Column): Column = withExpr { Bucket(Literal(numBuckets), e.expr) } // scalastyle:off line.size.limit // scalastyle:off parameter.number /* Use the following code to generate: (0 to 10).foreach { x => val types = (1 to x).foldRight("RT")((i, s) => {s"A$i, $s"}) val typeTags = (1 to x).map(i => s"A$i: TypeTag").foldLeft("RT: TypeTag")(_ + ", " + _) val inputEncoders = (1 to x).foldRight("Nil")((i, s) => {s"Try(ExpressionEncoder[A$i]()).toOption :: $s"}) println(s""" |/** | * Defines a Scala closure of $x arguments as user-defined function (UDF). | * The data types are automatically inferred based on the Scala closure's | * signature. By default the returned UDF is deterministic. To change it to | * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. | * | * @group udf_funcs | * @since 1.3.0 | */ |def udf[$typeTags](f: Function$x[$types]): UserDefinedFunction = { | val outputEncoder = Try(ExpressionEncoder[RT]()).toOption | val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) | val inputEncoders = $inputEncoders | val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) | if (nullable) udf else udf.asNonNullable() |}""".stripMargin) } (0 to 10).foreach { i => val extTypeArgs = (0 to i).map(_ => "_").mkString(", ") val anyTypeArgs = (0 to i).map(_ => "Any").mkString(", ") val anyCast = s".asInstanceOf[UDF$i[$anyTypeArgs]]" val anyParams = (1 to i).map(_ => "_: Any").mkString(", ") val funcCall = if (i == 0) s"() => f$anyCast.call($anyParams)" else s"f$anyCast.call($anyParams)" println(s""" |/** | * Defines a Java UDF$i instance as user-defined function (UDF). | * The caller must specify the output data type, and there is no automatic input type coercion. | * By default the returned UDF is deterministic. To change it to nondeterministic, call the | * API `UserDefinedFunction.asNondeterministic()`. | * | * @group udf_funcs | * @since 2.3.0 | */ |def udf(f: UDF$i[$extTypeArgs], returnType: DataType): UserDefinedFunction = { | val func = $funcCall | SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill($i)(None)) |}""".stripMargin) } */ ////////////////////////////////////////////////////////////////////////////////////////////// // Scala UDF functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Obtains a `UserDefinedFunction` that wraps the given `Aggregator` * so that it may be used with untyped Data Frames. * {{{ * val agg = // Aggregator[IN, BUF, OUT] * * // declare a UDF based on agg * val aggUDF = udaf(agg) * val aggData = df.agg(aggUDF($"colname")) * * // register agg as a named function * spark.udf.register("myAggName", udaf(agg)) * }}} * * @tparam IN the aggregator input type * @tparam BUF the aggregating buffer type * @tparam OUT the finalized output type * * @param agg the typed Aggregator * * @return a UserDefinedFunction that can be used as an aggregating expression. * * @note The input encoder is inferred from the input type IN. */ def udaf[IN: TypeTag, BUF, OUT](agg: Aggregator[IN, BUF, OUT]): UserDefinedFunction = { udaf(agg, ExpressionEncoder[IN]()) } /** * Obtains a `UserDefinedFunction` that wraps the given `Aggregator` * so that it may be used with untyped Data Frames. * {{{ * Aggregator<IN, BUF, OUT> agg = // custom Aggregator * Encoder<IN> enc = // input encoder * * // declare a UDF based on agg * UserDefinedFunction aggUDF = udaf(agg, enc) * DataFrame aggData = df.agg(aggUDF($"colname")) * * // register agg as a named function * spark.udf.register("myAggName", udaf(agg, enc)) * }}} * * @tparam IN the aggregator input type * @tparam BUF the aggregating buffer type * @tparam OUT the finalized output type * * @param agg the typed Aggregator * @param inputEncoder a specific input encoder to use * * @return a UserDefinedFunction that can be used as an aggregating expression * * @note This overloading takes an explicit input encoder, to support UDAF * declarations in Java. */ def udaf[IN, BUF, OUT]( agg: Aggregator[IN, BUF, OUT], inputEncoder: Encoder[IN]): UserDefinedFunction = { UserDefinedAggregator(agg, inputEncoder) } /** * Defines a Scala closure of 0 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag](f: Function0[RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 1 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag](f: Function1[A1, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 2 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag](f: Function2[A1, A2, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 3 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag](f: Function3[A1, A2, A3, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 4 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag](f: Function4[A1, A2, A3, A4, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 5 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag](f: Function5[A1, A2, A3, A4, A5, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 6 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag](f: Function6[A1, A2, A3, A4, A5, A6, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 7 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag](f: Function7[A1, A2, A3, A4, A5, A6, A7, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 8 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag](f: Function8[A1, A2, A3, A4, A5, A6, A7, A8, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 9 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag](f: Function9[A1, A2, A3, A4, A5, A6, A7, A8, A9, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } /** * Defines a Scala closure of 10 arguments as user-defined function (UDF). * The data types are automatically inferred based on the Scala closure's * signature. By default the returned UDF is deterministic. To change it to * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 1.3.0 */ def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag](f: Function10[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, RT]): UserDefinedFunction = { val outputEncoder = Try(ExpressionEncoder[RT]()).toOption val ScalaReflection.Schema(dataType, nullable) = outputEncoder.map(UDFRegistration.outputSchema).getOrElse(ScalaReflection.schemaFor[RT]) val inputEncoders = Try(ExpressionEncoder[A1]()).toOption :: Try(ExpressionEncoder[A2]()).toOption :: Try(ExpressionEncoder[A3]()).toOption :: Try(ExpressionEncoder[A4]()).toOption :: Try(ExpressionEncoder[A5]()).toOption :: Try(ExpressionEncoder[A6]()).toOption :: Try(ExpressionEncoder[A7]()).toOption :: Try(ExpressionEncoder[A8]()).toOption :: Try(ExpressionEncoder[A9]()).toOption :: Try(ExpressionEncoder[A10]()).toOption :: Nil val udf = SparkUserDefinedFunction(f, dataType, inputEncoders, outputEncoder) if (nullable) udf else udf.asNonNullable() } ////////////////////////////////////////////////////////////////////////////////////////////// // Java UDF functions ////////////////////////////////////////////////////////////////////////////////////////////// /** * Defines a Java UDF0 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF0[_], returnType: DataType): UserDefinedFunction = { val func = () => f.asInstanceOf[UDF0[Any]].call() SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(0)(None)) } /** * Defines a Java UDF1 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF1[_, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF1[Any, Any]].call(_: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(1)(None)) } /** * Defines a Java UDF2 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF2[_, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF2[Any, Any, Any]].call(_: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(2)(None)) } /** * Defines a Java UDF3 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF3[_, _, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF3[Any, Any, Any, Any]].call(_: Any, _: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(3)(None)) } /** * Defines a Java UDF4 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF4[_, _, _, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF4[Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(4)(None)) } /** * Defines a Java UDF5 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF5[_, _, _, _, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF5[Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(5)(None)) } /** * Defines a Java UDF6 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF6[_, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF6[Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(6)(None)) } /** * Defines a Java UDF7 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF7[_, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF7[Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(7)(None)) } /** * Defines a Java UDF8 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF8[_, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF8[Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(8)(None)) } /** * Defines a Java UDF9 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF9[_, _, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF9[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(9)(None)) } /** * Defines a Java UDF10 instance as user-defined function (UDF). * The caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * @group udf_funcs * @since 2.3.0 */ def udf(f: UDF10[_, _, _, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = { val func = f.asInstanceOf[UDF10[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any) SparkUserDefinedFunction(func, returnType, inputEncoders = Seq.fill(10)(None)) } // scalastyle:on parameter.number // scalastyle:on line.size.limit /** * Defines a deterministic user-defined function (UDF) using a Scala closure. For this variant, * the caller must specify the output data type, and there is no automatic input type coercion. * By default the returned UDF is deterministic. To change it to nondeterministic, call the * API `UserDefinedFunction.asNondeterministic()`. * * Note that, although the Scala closure can have primitive-type function argument, it doesn't * work well with null values. Because the Scala closure is passed in as Any type, there is no * type information for the function arguments. Without the type information, Spark may blindly * pass null to the Scala closure with primitive-type argument, and the closure will see the * default value of the Java type for the null argument, e.g. `udf((x: Int) => x, IntegerType)`, * the result is 0 for null input. * * @param f A closure in Scala * @param dataType The output data type of the UDF * * @group udf_funcs * @since 2.0.0 */ @deprecated("Scala `udf` method with return type parameter is deprecated. " + "Please use Scala `udf` method without return type parameter.", "3.0.0") def udf(f: AnyRef, dataType: DataType): UserDefinedFunction = { if (!SQLConf.get.getConf(SQLConf.LEGACY_ALLOW_UNTYPED_SCALA_UDF)) { val errorMsg = "You're using untyped Scala UDF, which does not have the input type " + "information. Spark may blindly pass null to the Scala closure with primitive-type " + "argument, and the closure will see the default value of the Java type for the null " + "argument, e.g. `udf((x: Int) => x, IntegerType)`, the result is 0 for null input. " + "To get rid of this error, you could:\\n" + "1. use typed Scala UDF APIs(without return type parameter), e.g. `udf((x: Int) => x)`\\n" + "2. use Java UDF APIs, e.g. `udf(new UDF1[String, Integer] { " + "override def call(s: String): Integer = s.length() }, IntegerType)`, " + "if input types are all non primitive\\n" + s"3. set ${SQLConf.LEGACY_ALLOW_UNTYPED_SCALA_UDF.key} to true and " + s"use this API with caution" throw new AnalysisException(errorMsg) } SparkUserDefinedFunction(f, dataType, inputEncoders = Nil) } /** * Call an user-defined function. * * @group udf_funcs * @since 1.5.0 */ @scala.annotation.varargs @deprecated("Use call_udf") def callUDF(udfName: String, cols: Column*): Column = call_udf(udfName, cols: _*) /** * Call an user-defined function. * Example: * {{{ * import org.apache.spark.sql._ * * val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value") * val spark = df.sparkSession * spark.udf.register("simpleUDF", (v: Int) => v * v) * df.select($"id", call_udf("simpleUDF", $"value")) * }}} * * @group udf_funcs * @since 3.2.0 */ @scala.annotation.varargs def call_udf(udfName: String, cols: Column*): Column = withExpr { UnresolvedFunction(udfName, cols.map(_.expr), isDistinct = false) } }
maropu/spark
sql/core/src/main/scala/org/apache/spark/sql/functions.scala
Scala
apache-2.0
187,382
package TAPLcomp2.tyarith import scala.util.parsing.combinator.ImplicitConversions import scala.util.parsing.combinator.syntactical.StandardTokenParsers sealed trait Ty case object TyBool extends Ty case object TyNat extends Ty sealed trait Term case object TmTrue extends Term case object TmFalse extends Term case class TmIf(cond: Term, t1: Term, t2: Term) extends Term case object TmZero extends Term case class TmSucc(t: Term) extends Term case class TmPred(t: Term) extends Term case class TmIsZero(t: Term) extends Term object TyArithParsers extends StandardTokenParsers with ImplicitConversions { lexical.reserved += ("true", "false", "if", "then", "else", "iszero", "succ", "pred") lexical.delimiters += ("(", ")", ";") private def term: Parser[Term] = appTerm ||| ("if" ~> term) ~ ("then" ~> term) ~ ("else" ~> term) ^^ TmIf private def appTerm: Parser[Term] = aTerm ||| "succ" ~> aTerm ^^ TmSucc ||| "pred" ~> aTerm ^^ TmPred ||| "iszero" ~> aTerm ^^ TmIsZero // Atomic terms are ones that never require extra parentheses private def aTerm: Parser[Term] = "(" ~> term <~ ")" ||| "true" ^^ { _ => TmTrue } ||| "false" ^^ { _ => TmFalse } ||| numericLit ^^ { x => num(x.toInt) } private def num(x: Int): Term = x match { case 0 => TmZero case _ => TmSucc(num(x - 1)) } def input(s: String) = phrase(term)(new lexical.Scanner(s)) match { case t if t.successful => t.get case t => sys.error(t.toString) } }
hy-zhang/parser
Scala/Parser/src/TAPLcomp2/tyarith/parser.scala
Scala
bsd-3-clause
1,507
package com.twitter.finagle.zipkin.core import com.twitter.conversions.DurationOps._ import com.twitter.finagle.tracing._ import com.twitter.util._ import java.net.{InetAddress, InetSocketAddress} import org.scalatest.funsuite.AnyFunSuite class RawZipkinTracerTest extends AnyFunSuite { val traceId = TraceId(Some(SpanId(123)), Some(SpanId(123)), SpanId(123), None, Flags().setDebug) class FakeRawZipkinTracer extends RawZipkinTracer { var spans: Seq[Span] = Seq.empty override def sendSpans(xs: Seq[Span]): Future[Unit] = { spans ++= xs Future.Unit } } test("send spans when flushed") { val tracer = new FakeRawZipkinTracer val localAddress = InetAddress.getByAddress(Array.fill(4) { 1 }) val remoteAddress = InetAddress.getByAddress(Array.fill(4) { 10 }) val port1 = 80 // never bound val port2 = 53 // ditto tracer.record( Record( traceId, Time.fromSeconds(123), Annotation.ClientAddr(new InetSocketAddress(localAddress, port1)) ) ) tracer.record( Record( traceId, Time.fromSeconds(123), Annotation.LocalAddr(new InetSocketAddress(localAddress, port1)) ) ) tracer.record( Record( traceId, Time.fromSeconds(123), Annotation.ServerAddr(new InetSocketAddress(remoteAddress, port2)) ) ) tracer.record(Record(traceId, Time.fromSeconds(123), Annotation.ServiceName("service"))) tracer.record(Record(traceId, Time.fromSeconds(123), Annotation.Rpc("method"))) tracer.record( Record(traceId, Time.fromSeconds(123), Annotation.BinaryAnnotation("i16", 16.toShort)) ) tracer.record(Record(traceId, Time.fromSeconds(123), Annotation.BinaryAnnotation("i32", 32))) tracer.record(Record(traceId, Time.fromSeconds(123), Annotation.BinaryAnnotation("i64", 64L))) tracer.record( Record(traceId, Time.fromSeconds(123), Annotation.BinaryAnnotation("double", 123.3d)) ) tracer.record( Record(traceId, Time.fromSeconds(123), Annotation.BinaryAnnotation("string", "woopie")) ) tracer.record(Record(traceId, Time.fromSeconds(123), Annotation.Message("boo"))) tracer.record( Record(traceId, Time.fromSeconds(123), Annotation.Message("boohoo"), Some(1.second)) ) tracer.record(Record(traceId, Time.fromSeconds(123), Annotation.ClientSend)) tracer.record(Record(traceId, Time.fromSeconds(123), Annotation.ClientRecv)) tracer.flush() assert(tracer.spans.length == 1) } }
twitter/finagle
finagle-zipkin-core/src/test/scala/com/twitter/finagle/zipkin/core/RawZipkinTracerTest.scala
Scala
apache-2.0
2,531
package at.nonblocking.cliwix.core import org.junit.Test import at.nonblocking.cliwix.core.command.{CommandResult, CompanyListCommand} import at.nonblocking.cliwix.core.handler.Handler import at.nonblocking.cliwix.model.Company import java.{util=>jutil} class ExceptionTranslationTest { @Test(expected = classOf[CliwixCommandExecutionException]) def test(): Unit = { class ListHandlerTest extends Handler[CompanyListCommand, jutil.Map[String, Company]] { override def handle(command: CompanyListCommand): CommandResult[jutil.Map[String, Company]] = { throw new IllegalArgumentException } } new ListHandlerTest().execute(new CompanyListCommand(true)) } }
nonblocking/cliwix
cliwix-core/src/test/scala/at/nonblocking/cliwix/core/ExceptionTranslationTest.scala
Scala
agpl-3.0
697
package org.bitcoins.node.networking.peer import akka.actor.ActorRef import akka.util.Timeout import org.bitcoins.core.api.chain.{ChainApi, FilterSyncMarker} import org.bitcoins.core.bloom.BloomFilter import org.bitcoins.core.number.Int32 import org.bitcoins.core.p2p._ import org.bitcoins.core.protocol.transaction.Transaction import org.bitcoins.crypto.{ DoubleSha256Digest, DoubleSha256DigestBE, HashDigest } import org.bitcoins.node.P2PLogger import org.bitcoins.node.config.NodeAppConfig import org.bitcoins.node.constant.NodeConstants import org.bitcoins.node.networking.P2PClient import scala.concurrent.duration.DurationInt import scala.concurrent.{ExecutionContext, Future} case class PeerMessageSender(client: P2PClient)(implicit conf: NodeAppConfig) extends P2PLogger { private val socket = client.peer.socket implicit private val timeout = Timeout(30.seconds) /** Initiates a connection with the given peer */ def connect(): Unit = { client.actor ! P2PClient.ConnectCommand } def isConnected()(implicit ec: ExecutionContext): Future[Boolean] = { client.isConnected() } def isInitialized()(implicit ec: ExecutionContext): Future[Boolean] = { client.isInitialized() } def isDisconnected()(implicit ec: ExecutionContext): Future[Boolean] = { client.isDisconnected() } /** Disconnects the given peer */ def disconnect()(implicit ec: ExecutionContext): Future[Unit] = { isConnected().flatMap { case true => logger.info(s"Disconnecting peer at socket=${socket}") (client.actor ! P2PClient.CloseCommand) Future.unit case false => val err = s"Cannot disconnect client that is not connected to socket=${socket}!" logger.warn(err) Future.unit } } /** Sends a [[org.bitcoins.core.p2p.VersionMessage VersionMessage]] to our peer */ def sendVersionMessage(): Future[Unit] = { val local = java.net.InetAddress.getLocalHost val versionMsg = VersionMessage( conf.network, InetAddress(client.peer.socket.getAddress.getAddress), InetAddress(local.getAddress), relay = conf.relay) logger.trace(s"Sending versionMsg=$versionMsg to peer=${client.peer}") sendMsg(versionMsg) } def sendVersionMessage(chainApi: ChainApi)(implicit ec: ExecutionContext): Future[Unit] = { chainApi.getBestHashBlockHeight().flatMap { height => val localhost = java.net.InetAddress.getLocalHost val versionMsg = VersionMessage(conf.network, NodeConstants.userAgent, Int32(height), InetAddress(localhost.getAddress), InetAddress(localhost.getAddress), conf.relay) logger.debug(s"Sending versionMsg=$versionMsg to peer=${client.peer}") sendMsg(versionMsg) } } def sendVerackMessage(): Future[Unit] = { val verackMsg = VerAckMessage sendMsg(verackMsg) } def sendSendAddrV2Message(): Future[Unit] = { sendMsg(SendAddrV2Message) } /** Responds to a ping message */ def sendPong(ping: PingMessage): Future[Unit] = { val pong = PongMessage(ping.nonce) logger.trace(s"Sending pong=$pong to peer=${client.peer}") sendMsg(pong) } def sendGetHeadersMessage(lastHash: DoubleSha256Digest): Future[Unit] = { val headersMsg = GetHeadersMessage(lastHash) logger.trace(s"Sending getheaders=$headersMsg to peer=${client.peer}") sendMsg(headersMsg) } def sendGetHeadersMessage( hashes: Vector[DoubleSha256Digest]): Future[Unit] = { // GetHeadersMessage has a max of 101 hashes val headersMsg = GetHeadersMessage(hashes.distinct.take(101)) logger.trace(s"Sending getheaders=$headersMsg to peer=${client.peer}") sendMsg(headersMsg) } def sendHeadersMessage(): Future[Unit] = { val sendHeadersMsg = SendHeadersMessage sendMsg(sendHeadersMsg) } /** Sends a inventory message with the given transactions */ def sendInventoryMessage(transactions: Transaction*): Future[Unit] = { val inventories = transactions.map(tx => Inventory(TypeIdentifier.MsgTx, tx.txId)) val message = InventoryMessage(inventories) logger.trace(s"Sending inv=$message to peer=${client.peer}") sendMsg(message) } def sendFilterClearMessage(): Future[Unit] = { sendMsg(FilterClearMessage) } def sendFilterAddMessage(hash: HashDigest): Future[Unit] = { val message = FilterAddMessage.fromHash(hash) logger.trace(s"Sending filteradd=$message to peer=${client.peer}") sendMsg(message) } def sendFilterLoadMessage(bloom: BloomFilter): Future[Unit] = { val message = FilterLoadMessage(bloom) logger.trace(s"Sending filterload=$message to peer=${client.peer}") sendMsg(message) } def sendTransactionMessage(transaction: Transaction): Future[Unit] = { val message = TransactionMessage(transaction) logger.debug(s"Sending txmessage=$message to peer=${client.peer}") sendMsg(message) } /** Sends a request for filtered blocks matching the given headers */ def sendGetDataMessage( typeIdentifier: TypeIdentifier, hashes: DoubleSha256Digest*): Future[Unit] = { val inventories = hashes.map(hash => Inventory(typeIdentifier, hash)) val message = GetDataMessage(inventories) logger.debug(s"Sending getdata=$message to peer=${client.peer}") sendMsg(message) } def sendGetCompactFiltersMessage( filterSyncMarker: FilterSyncMarker): Future[Unit] = { val message = GetCompactFiltersMessage(if (filterSyncMarker.startHeight < 0) 0 else filterSyncMarker.startHeight, filterSyncMarker.stopBlockHash) logger.debug(s"Sending getcfilters=$message to peer ${client.peer}") sendMsg(message) } def sendGetCompactFilterHeadersMessage( filterSyncMarker: FilterSyncMarker): Future[Unit] = { val message = GetCompactFilterHeadersMessage(if (filterSyncMarker.startHeight < 0) 0 else filterSyncMarker.startHeight, filterSyncMarker.stopBlockHash) logger.debug(s"Sending getcfheaders=$message to peer ${client.peer}") sendMsg(message) } def sendGetCompactFilterCheckPointMessage( stopHash: DoubleSha256Digest): Future[Unit] = { val message = GetCompactFilterCheckPointMessage(stopHash) logger.debug(s"Sending getcfcheckpt=$message to peer ${client.peer}") sendMsg(message) } private[node] def sendNextGetCompactFilterCommand( chainApi: ChainApi, filterBatchSize: Int, startHeight: Int)(implicit ec: ExecutionContext): Future[Boolean] = { for { filterSyncMarkerOpt <- chainApi.nextFilterHeaderBatchRange(startHeight, filterBatchSize) res <- filterSyncMarkerOpt match { case Some(filterSyncMarker) => logger.info(s"Requesting compact filters from $filterSyncMarker") sendGetCompactFiltersMessage(filterSyncMarker) .map(_ => true) case None => Future.successful(false) } } yield res } private[node] def sendNextGetCompactFilterHeadersCommand( chainApi: ChainApi, filterHeaderBatchSize: Int, prevStopHash: DoubleSha256DigestBE)(implicit ec: ExecutionContext): Future[Boolean] = { for { filterSyncMarkerOpt <- chainApi.nextBlockHeaderBatchRange( prevStopHash = prevStopHash, batchSize = filterHeaderBatchSize) res <- filterSyncMarkerOpt match { case Some(filterSyncMarker) => logger.info( s"Requesting next compact filter headers from $filterSyncMarker") sendGetCompactFilterHeadersMessage(filterSyncMarker) .map(_ => true) case None => Future.successful(false) } } yield res } private[node] def sendMsg(msg: NetworkPayload): Future[Unit] = { //version or verack messages are the only messages that //can be sent before we are fully initialized //as they are needed to complete our handshake with our peer logger.debug(s"Sending msg=${msg.commandName} to peer=${socket}") val newtworkMsg = NetworkMessage(conf.network, msg) client.actor ! newtworkMsg Future.unit } } object PeerMessageSender { sealed abstract class PeerMessageHandlerMsg /** For when we are done with exchanging version and verack messages * This means we can send normal p2p messages now */ case object HandshakeFinished extends PeerMessageHandlerMsg case class SendToPeer(msg: NetworkMessage) extends PeerMessageHandlerMsg /** Accumulators network messages while we are doing a handshake with our peer * and caches a peer handler actor so we can send a [[HandshakeFinished]] * message back to the actor when we are fully connected */ case class MessageAccumulator( networkMsgs: Vector[(ActorRef, NetworkMessage)], peerHandler: ActorRef) }
bitcoin-s/bitcoin-s
node/src/main/scala/org/bitcoins/node/networking/peer/PeerMessageSender.scala
Scala
mit
9,028
package snakesladders.domain.models import snakesladders.domain.models.GameDefinitions.GameDefinition import snakesladders.domain.models.Players.Player case class Game( gameDefinition: GameDefinition, players: Seq[Player] )
sibex/snakes-and-ladders
src/main/scala/snakesladders/domain/models/Game.scala
Scala
mit
230
package org.littlewings.javaee7.typed import javax.enterprise.context.ApplicationScoped import javax.enterprise.inject.Typed class Book trait Shop[T] class Business @Typed(Array(classOf[Shop[Book]])) @ApplicationScoped class BookShop extends Business with Shop[Book]
kazuhira-r/javaee7-scala-examples
cdi-typed/src/main/scala/org/littlewings/javaee7/typed/BookShop.scala
Scala
mit
272
package com.twitter.finagle.naming import com.twitter.finagle._ import com.twitter.util.Activity /** * Interpret names against a Dtab. Differs from * [[com.twitter.finagle.Namer Namers]] in that the passed in * [[com.twitter.finagle.Dtab Dtab]] can affect the resolution process. */ trait NameInterpreter { /** * Bind `path` against the given `dtab`. */ def bind(dtab: Dtab, path: Path): Activity[NameTree[Name.Bound]] } object NameInterpreter extends NameInterpreter { /** * The global interpreter that resolves all names in Finagle. * * Can be modified to provide a different mechanism for name resolution. */ @volatile var global: NameInterpreter = DefaultInterpreter /** Java API for setting the interpreter */ def setGlobal(nameInterpreter: NameInterpreter): Unit = global = nameInterpreter override def bind(dtab: Dtab, tree: Path): Activity[NameTree[Name.Bound]] = global.bind(dtab, tree) }
adriancole/finagle
finagle-core/src/main/scala/com/twitter/finagle/naming/NameInterpreter.scala
Scala
apache-2.0
948
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.mxnet package object infer { private[mxnet] val handlerType = MXNetHandlerType.SingleThreadHandler }
indhub/mxnet
scala-package/infer/src/main/scala/org/apache/mxnet/infer/package.scala
Scala
apache-2.0
925
import org.scalatest.FunSuite @identity case class InteropIdentity(x: Int) @placebo case class InteropPlacebo(x: Int) class InteropCaseSynthesis extends FunSuite { test("case module synthesis for identity") { assert(InteropIdentity.toString === "InteropIdentity") } test("case module synthesis for placebo") { assert(InteropPlacebo.toString === "InteropPlacebo") } }
scalamacros/paradise
tests/src/test/scala/annotations/run/InteropCaseSynthesis.scala
Scala
bsd-3-clause
385
package colossus.streaming /** * A `Signal` is a callback mechanism used by both [[Source]] and [[Sink]] to manage forward/back-pressure. In both cases it is returned when a requested operation cannot immediately complete, but can at a later point in time. For example, when pulling from a [[Source]], if no item is immediately available, a signal is returned that will be triggered when an item is available to pull. * {{{ * val stream: Source[Int] = //... * stream.pull() match { * case PullResult.Item(num) => //... * case PullResult.Full(signal) => signal.notify { * //when this callback function is called, it is guaranteed that an item is now available * stream.pull()//... * } *} }}} * Signals are multi-listener, so that multiple consumers can attach callbacks * to a single listener. Callbacks are fired in the order they are queued, and * generally conditions for triggering a signal are re-checked for each listener * (so that, for example, if one item is pushed to an empty Source, only one * listener is signaled). */ trait Signal { def notify(cb: => Unit) } /** * When a user attempts to push a value into a pipe, and the pipe either fills * or was already full, a Trigger is returned in the PushResult. This is * essentially just a fillable callback function that is called when the pipe * either becomes empty or is closed or terminated * * Notice that when the trigger is executed we don't include any information * about the state of the pipe. The handler can just try pushing again to * determine if the pipe is dead or not. */ class Trigger extends Signal { private var callbacks = new java.util.LinkedList[() => Unit] def empty = callbacks.size == 0 def notify(cb: => Unit) { //println("adding signal") callbacks.add(() => cb) } def trigger(): Boolean = { if (callbacks.size == 0) false else { callbacks.remove()() true } } def triggerAll() { while (trigger()) {} } def clear() { callbacks.clear() } }
tumblr/colossus
colossus/src/main/scala/colossus/streaming/Signal.scala
Scala
apache-2.0
2,059
package sttp.client3 import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import java.io.IOException import java.net.URI class SttpBackendOptionsProxyTest2 extends AnyFlatSpec with Matchers { it should "throw UnsupportedOperationException with reason" in { val proxySetting = SttpBackendOptions.Proxy( "fakeproxyserverhost", 8080, SttpBackendOptions.ProxyType.Http, nonProxyHosts = Nil, onlyProxyHosts = Nil ) val proxySelector = proxySetting.asJavaProxySelector val ex = intercept[UnsupportedOperationException] { val uri = new URI("foo") val ioe = new IOException("bar") proxySelector.connectFailed(uri, proxySetting.inetSocketAddress, ioe) } ex.getMessage should be("Couldn't connect to the proxy server, uri: foo, socket: fakeproxyserverhost:8080") } }
softwaremill/sttp
core/src/test/scalajvm/sttp/client3/SttpBackendOptionsProxyTest2.scala
Scala
apache-2.0
872
/* * Copyright 2001-2008 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest import NodeFamily._ import scala.collection.immutable.ListSet import org.scalatest.StackDepthExceptionHelper.getStackDepthFun import java.util.concurrent.atomic.AtomicReference import java.util.ConcurrentModificationException import org.scalatest.events._ import Suite.anErrorThatShouldCauseAnAbort /** * A suite of tests in which each test represents one <em>scenario</em> of a <em>feature</em>. * <code>FeatureSpec</code> is intended for writing tests that are "higher level" than unit tests, for example, integration * tests, functional tests, and acceptance tests. You can use <code>FeatureSpec</code> for unit testing if you prefer, however. * Here's an example: * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import org.scalatest.GivenWhenThen * import scala.collection.mutable.Stack * * class StackFeatureSpec extends FeatureSpec with GivenWhenThen { * * feature("The user can pop an element off the top of the stack") { * * info("As a programmer") * info("I want to be able to pop items off the stack") * info("So that I can get them in last-in-first-out order") * * scenario("pop is invoked on a non-empty stack") { * * given("a non-empty stack") * val stack = new Stack[Int] * stack.push(1) * stack.push(2) * val oldSize = stack.size * * when("when pop is invoked on the stack") * val result = stack.pop() * * then("the most recently pushed element should be returned") * assert(result === 2) * * and("the stack should have one less item than before") * assert(stack.size === oldSize - 1) * } * * scenario("pop is invoked on an empty stack") { * * given("an empty stack") * val emptyStack = new Stack[String] * * when("when pop is invoked on the stack") * then("NoSuchElementException should be thrown") * intercept[NoSuchElementException] { * emptyStack.pop() * } * * and("the stack should still be empty") * assert(emptyStack.isEmpty) * } * } * } * </pre> * * <p> * A <code>FeatureSpec</code> contains <em>feature clauses</em> and <em>scenarios</em>. You define a feature clause * with <code>feature</code>, and a scenario with <code>scenario</code>. Both * <code>feature</code> and <code>scenario</code> are methods, defined in * <code>FeatureSpec</code>, which will be invoked * by the primary constructor of <code>StackFeatureSpec</code>. * A feature clause describes a feature of the <em>subject</em> (class or other entity) you are specifying * and testing. In the previous example, * the subject under specification and test is a stack. The feature being specified and tested is * the ability for a user (a programmer in this case) to pop an element off the top of the stack. With each scenario you provide a * string (the <em>spec text</em>) that specifies the behavior of the subject for * one scenario in which the feature may be used, and a block of code that tests that behavior. * You place the spec text between the parentheses, followed by the test code between curly * braces. The test code will be wrapped up as a function passed as a by-name parameter to * <code>scenario</code>, which will register the test for later execution. * </p> * * <p> * A <code>FeatureSpec</code>'s lifecycle has two phases: the <em>registration</em> phase and the * <em>ready</em> phase. It starts in registration phase and enters ready phase the first time * <code>run</code> is called on it. It then remains in ready phase for the remainder of its lifetime. * </p> * * <p> * Scenarios can only be registered with the <code>scenario</code> method while the <code>FeatureSpec</code> is * in its registration phase. Any attempt to register a scenario after the <code>FeatureSpec</code> has * entered its ready phase, <em>i.e.</em>, after <code>run</code> has been invoked on the <code>FeatureSpec</code>, * will be met with a thrown <code>TestRegistrationClosedException</code>. The recommended style * of using <code>FeatureSpec</code> is to register tests during object construction as is done in all * the examples shown here. If you keep to the recommended style, you should never see a * <code>TestRegistrationClosedException</code>. * </p> * * <p> * Each scenario represents one test. The name of the test is the spec text passed to the <code>scenario</code> method. * The feature name does not appear as part of the test name. In a <code>FeatureSpec</code>, therefore, you must take care * to ensure that each test has a unique name (in other words, that each <code>scenario</code> has unique spec text). * </p> * * <p> * When you run a <code>FeatureSpec</code>, it will send <code>Formatter</code>s in the events it sends to the * <code>Reporter</code>. ScalaTest's built-in reporters will report these events in such a way * that the output is easy to read as an informal specification of the <em>subject</em> being tested. * For example, if you ran <code>StackFeatureSpec</code> from within the Scala interpreter: * </p> * * <pre class="stREPL"> * scala> (new StackFeatureSpec).execute() * </pre> * * <p> * You would see: * </p> * * <pre class="stREPL"> * <span class="stGreen">Feature: The user can pop an element off the top of the stack * As a programmer * I want to be able to pop items off the stack * So that I can get them in last-in-first-out order * Scenario: pop is invoked on a non-empty stack * Given a non-empty stack * When when pop is invoked on the stack * Then the most recently pushed element should be returned * And the stack should have one less item than before * Scenario: pop is invoked on an empty stack * Given an empty stack * When when pop is invoked on the stack * Then NoSuchElementException should be thrown * And the stack should still be empty</span> * </pre> * * <p> * See also: <a href="http://www.scalatest.org/getting_started_with_feature_spec" target="_blank">Getting started with <code>FeatureSpec</code>.</a> * </p> * * <p> * <em>Note: Trait <code>FeatureSpec</code>'s syntax is in part inspired by <a href="http://cukes.info/" target="_blank">Cucumber</a>, a Ruby BDD framework.</em> *</p> * * <h2>Ignored tests</h2> * * <p> * To support the common use case of &#8220;temporarily&#8221; disabling a test, with the * good intention of resurrecting the test at a later time, <code>FeatureSpec</code> provides registration * methods that start with <code>ignore</code> instead of <code>scenario</code>. For example, to temporarily * disable the test named <code>addition</code>, just change &#8220;<code>scenario</code>&#8221; into &#8220;<code>ignore</code>,&#8221; like this: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * * class ArithmeticSpec extends FeatureSpec { * * // Sharing fixture objects via instance variables * val shared = 5 * * feature("Integer arithmetic") { * * ignore("addition") { * val sum = 2 + 3 * assert(sum === shared) * } * * scenario("subtraction") { * val diff = 7 - 2 * assert(diff === shared) * } * } * } * </pre> * * <p> * If you run this version of <code>ArithmeticSpec</code> with: * </p> * * <pre class="stREPL"> * scala> (new ArithmeticSpec).execute() * </pre> * * <p> * It will run only <code>subtraction</code> and report that <code>addition</code> was ignored: * </p> * * <pre class="stREPL"> * <span class="stGreen">Feature: Integer arithmetic </span> * <span class="stYellow">Scenario: addition !!! IGNORED !!!</span> * <span class="stGreen">Scenario: subtraction</span> * </pre> * * <h2>Informers</h2> * * <p> * One of the parameters to the <code>run</code> method is a <code>Reporter</code>, which * will collect and report information about the running suite of tests. * Information about suites and tests that were run, whether tests succeeded or failed, * and tests that were ignored will be passed to the <code>Reporter</code> as the suite runs. * Most often the reporting done by default by <code>FeatureSpec</code>'s methods will be sufficient, but * occasionally you may wish to provide custom information to the <code>Reporter</code> from a test. * For this purpose, an <code>Informer</code> that will forward information to the current <code>Reporter</code> * is provided via the <code>info</code> parameterless method. * You can pass the extra information to the <code>Informer</code> via its <code>apply</code> method. * The <code>Informer</code> will then pass the information to the <code>Reporter</code> via an <code>InfoProvided</code> event. * Here's an example: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * * class ArithmeticSpec extends FeatureSpec { * * feature("Integer arithmetic") { * * scenario("addition") { * val sum = 2 + 3 * assert(sum === 5) * info("Addition seems to work") * } * * scenario("subtraction") { * val diff = 7 - 2 * assert(diff === 5) * } * } * } * </pre> * * If you run this <code>ArithmeticSpec</code> from the interpreter, you will see the following message * included in the printed report: * * <pre class="stREPL"> * <span class="stGreen">Feature: Integer arithmetic * Scenario: addition * Addition seems to work</span> * </pre> * * <p> * One use case for the <code>Informer</code> is to pass more information about a scenario to the reporter. For example, * the <code>GivenWhenThen</code> trait provides methods that use the implicit <code>info</code> provided by <code>FeatureSpec</code> * to pass such information to the reporter. Here's an example: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import org.scalatest.GivenWhenThen * * class ArithmeticSpec extends FeatureSpec with GivenWhenThen { * * feature("Integer arithmetic") { * * scenario("addition") { * * given("two integers") * val x = 2 * val y = 3 * * when("they are added") * val sum = x + y * * then("the result is the sum of the two numbers") * assert(sum === 5) * } * * scenario("subtraction") { * * given("two integers") * val x = 7 * val y = 2 * * when("one is subtracted from the other") * val diff = x - y * * then("the result is the difference of the two numbers") * assert(diff === 5) * } * } * } * </pre> * * <p> * If you run this <code>FeatureSpec</code> from the interpreter, you will see the following messages * included in the printed report: * </p> * * <pre class="stREPL"> * scala> (new ArithmeticSpec).execute() * <span class="stGreen">Feature: Integer arithmetic * Scenario: addition * Given two integers * When they are added * Then the result is the sum of the two numbers * Scenario: subtraction * Given two integers * When one is subtracted from the other * Then the result is the difference of the two numbers</span> * </pre> * * <h2>Pending tests</h2> * * <p> * A <em>pending test</em> is one that has been given a name but is not yet implemented. The purpose of * pending tests is to facilitate a style of testing in which documentation of behavior is sketched * out before tests are written to verify that behavior (and often, before the behavior of * the system being tested is itself implemented). Such sketches form a kind of specification of * what tests and functionality to implement later. * </p> * * <p> * To support this style of testing, a test can be given a name that specifies one * bit of behavior required by the system being tested. The test can also include some code that * sends more information about the behavior to the reporter when the tests run. At the end of the test, * it can call method <code>pending</code>, which will cause it to complete abruptly with <code>TestPendingException</code>. * </p> * * <p> * Because tests in ScalaTest can be designated as pending with <code>TestPendingException</code>, both the test name and any information * sent to the reporter when running the test can appear in the report of a test run. (In other words, * the code of a pending test is executed just like any other test.) However, because the test completes abruptly * with <code>TestPendingException</code>, the test will be reported as pending, to indicate * the actual test, and possibly the functionality, has not yet been implemented. * You can mark tests as pending in a <code>FeatureSpec</code> like this: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * * class ArithmeticSpec extends FeatureSpec { * * // Sharing fixture objects via instance variables * val shared = 5 * * feature("Integer arithmetic") { * * scenario("addition") { * val sum = 2 + 3 * assert(sum === shared) * } * * scenario("subtraction") (pending) * } * } * </pre> * * <p> * (Note: "<code>(pending)</code>" is the body of the test. Thus the test contains just one statement, an invocation * of the <code>pending</code> method, which throws <code>TestPendingException</code>.) * If you run this version of <code>ArithmeticSpec</code> with: * </p> * * <pre class="stREPL"> * scala> (new ArithmeticSpec).execute() * </pre> * * <p> * It will run both tests, but report that <code>subtraction</code> is pending. You'll see: * </p> * * <pre class="stREPL"> * <span class="stGreen">Feature: Integer arithmetic * Scenario: addition</span> * <span class="stYellow">Scenario: subtraction (pending)</span> * </pre> * * <p> * One difference between an ignored test and a pending one is that an ignored test is intended to be used during a * significant refactorings of the code under test, when tests break and you don't want to spend the time to fix * all of them immediately. You can mark some of those broken tests as ignored temporarily, so that you can focus the red * bar on just failing tests you actually want to fix immediately. Later you can go back and fix the ignored tests. * In other words, by ignoring some failing tests temporarily, you can more easily notice failed tests that you actually * want to fix. By contrast, a pending test is intended to be used before a test and/or the code under test is written. * Pending indicates you've decided to write a test for a bit of behavior, but either you haven't written the test yet, or * have only written part of it, or perhaps you've written the test but don't want to implement the behavior it tests * until after you've implemented a different bit of behavior you realized you need first. Thus ignored tests are designed * to facilitate refactoring of existing code whereas pending tests are designed to facilitate the creation of new code. * </p> * * <p> * One other difference between ignored and pending tests is that ignored tests are implemented as a test tag that is * excluded by default. Thus an ignored test is never executed. By contrast, a pending test is implemented as a * test that throws <code>TestPendingException</code> (which is what calling the <code>pending</code> method does). Thus * the body of pending tests are executed up until they throw <code>TestPendingException</code>. The reason for this difference * is that it enables your unfinished test to send <code>InfoProvided</code> messages to the reporter before it completes * abruptly with <code>TestPendingException</code>, as shown in the previous example on <code>Informer</code>s * that used the <code>GivenWhenThen</code> trait. For example, the following snippet in a <code>FeatureSpec</code>: * </p> * * <pre class="stHighlight"> * feature("Integer arithmetic") { *&nbsp; * scenario("addition") { * given("two integers") * when("they are added") * then("the result is the sum of the two numbers") * pending * } * // ... * </pre> * * <p> * Would yield the following output when run in the interpreter: * </p> * * <pre class="stREPL"> * <span class="stGreen">Feature: Integer arithmetic</span> * <span class="stYellow">Scenario: addition (pending) * Given two integers * When they are added * Then the result is the sum of the two numbers</span> * </pre> * * <h2>Tagging tests</h2> * * <p> * A <code>FeatureSpec</code>'s tests may be classified into groups by <em>tagging</em> them with string names. * As with any suite, when executing a <code>FeatureSpec</code>, groups of tests can * optionally be included and/or excluded. To tag a <code>FeatureSpec</code>'s tests, * you pass objects that extend abstract class <code>org.scalatest.Tag</code> to methods * that register tests, <code>test</code> and <code>ignore</code>. Class <code>Tag</code> takes one parameter, a string name. If you have * created Java annotation interfaces for use as group names in direct subclasses of <code>org.scalatest.Suite</code>, * then you will probably want to use group names on your <code>FeatureSpec</code>s that match. To do so, simply * pass the fully qualified names of the Java interfaces to the <code>Tag</code> constructor. For example, if you've * defined Java annotation interfaces with fully qualified names, <code>com.mycompany.tags.SlowTest</code> and * <code>com.mycompany.tags.DbTest</code>, then you could * create matching groups for <code>FeatureSpec</code>s like this: * </p> * * <pre class="stHighlight"> * import org.scalatest.Tag * * object SlowTest extends Tag("com.mycompany.tags.SlowTest") * object DbTest extends Tag("com.mycompany.tags.DbTest") * </pre> * * <p> * Given these definitions, you could place <code>FeatureSpec</code> tests into groups like this: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * * class ArithmeticSpec extends FeatureSpec { * * // Sharing fixture objects via instance variables * val shared = 5 * * feature("Integer arithmetic") { * * scenario("addition", SlowTest) { * val sum = 2 + 3 * assert(sum === shared) * } * * scenario("subtraction", SlowTest, DbTest) { * val diff = 7 - 2 * assert(diff === shared) * } * } * } * </pre> * * <p> * This code marks both tests, "addition" and "subtraction," with the <code>com.mycompany.tags.SlowTest</code> tag, * and test "subtraction" with the <code>com.mycompany.tags.DbTest</code> tag. * </p> * * <p> * The <code>run</code> method takes a <code>Filter</code>, whose constructor takes an optional * <code>Set[String]</code> called <code>tagsToInclude</code> and a <code>Set[String]</code> called * <code>tagsToExclude</code>. If <code>tagsToInclude</code> is <code>None</code>, all tests will be run * except those those belonging to tags listed in the * <code>tagsToExclude</code> <code>Set</code>. If <code>tagsToInclude</code> is defined, only tests * belonging to tags mentioned in the <code>tagsToInclude</code> set, and not mentioned in <code>tagsToExclude</code>, * will be run. * </p> * * <a name="sharedFixtures"></a><h2>Shared fixtures</h2> * * <p> * A test <em>fixture</em> is objects or other artifacts (such as files, sockets, database * connections, <em>etc.</em>) used by tests to do their work. * If a fixture is used by only one test method, then the definitions of the fixture objects can * be local to the method, such as the objects assigned to <code>sum</code> and <code>diff</code> in the * previous <code>ExampleSpec</code> examples. If multiple methods need to share an immutable fixture, one approach * is to assign them to instance variables. * </p> * * <p> * In some cases, however, shared <em>mutable</em> fixture objects may be changed by test methods such that * they need to be recreated or reinitialized before each test. Shared resources such * as files or database connections may also need to * be created and initialized before, and cleaned up after, each test. JUnit 3 offered methods <code>setUp</code> and * <code>tearDown</code> for this purpose. In ScalaTest, you can use the <code>BeforeAndAfterEach</code> trait, * which will be described later, to implement an approach similar to JUnit's <code>setUp</code> * and <code>tearDown</code>, however, this approach usually involves reassigning <code>var</code>s or mutating objects * between tests. Before going that route, you may wish to consider some more functional approaches that * avoid side effects. * </p> * * <h4>Calling create-fixture methods</h4> * * <p> * One approach is to write one or more <em>create-fixture</em> methods * that return a new instance of a needed fixture object (or an holder object containing multiple needed fixture objects) each time it * is called. You can then call a create-fixture method at the beginning of each * test method that needs the fixture, storing the returned object or objects in local variables. Here's an example: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import collection.mutable.ListBuffer * * class ExampleSpec extends FeatureSpec { * * def fixture = * new { * val builder = new StringBuilder("ScalaTest is ") * val buffer = new ListBuffer[String] * } * * feature("Fixtures can be shared") { * * scenario("user learns how to share fixtures") { * val f = fixture * f.builder.append("easy!") * assert(f.builder.toString === "ScalaTest is easy!") * assert(f.buffer.isEmpty) * f.buffer += "sweet" * } * * scenario("user enjoys writing tests with shared fixtures") { * val f = fixture * f.builder.append("fun!") * assert(f.builder.toString === "ScalaTest is fun!") * assert(f.buffer.isEmpty) * } * } * } * </pre> * * <p> * The &ldquo;<code>f.</code>&rdquo; in front of each use of a fixture object provides a visual indication of which objects * are part of the fixture, but if you prefer, you can import the the members with &ldquo;<code>import f._</code>&rdquo; and use the names directly. * </p> * * <h4>Instantiating fixture traits</h4> * * <p> * A related technique is to place * the fixture objects in a <em>fixture trait</em> and run your test code in the context of a new anonymous class instance that mixes in * the fixture trait, like this: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import collection.mutable.ListBuffer * * class ExampleSpec extends FeatureSpec { * * trait Fixture { * val builder = new StringBuilder("ScalaTest is ") * val buffer = new ListBuffer[String] * } * * feature("Fixtures can be shared") { * * scenario("user learns how to share fixtures") { * new Fixture { * builder.append("easy!") * assert(builder.toString === "ScalaTest is easy!") * assert(buffer.isEmpty) * buffer += "sweet" * } * } * * scenario("user enjoys writing tests with shared fixtures") { * new Fixture { * builder.append("fun!") * assert(builder.toString === "ScalaTest is fun!") * assert(buffer.isEmpty) * } * } * } * } * </pre> * * <h4>Mixing in <code>OneInstancePerTest</code></h4> * * <p> * If every test method requires the same set of * mutable fixture objects, one other approach you can take is make them simply <code>val</code>s and mix in trait * <a href="OneInstancePerTest.html"><code>OneInstancePerTest</code></a>. If you mix in <code>OneInstancePerTest</code>, each test * will be run in its own instance of the <code>Suite</code>, similar to the way JUnit tests are executed. Here's an example: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import org.scalatest.OneInstancePerTest * import collection.mutable.ListBuffer * * class ExampleSpec extends FeatureSpec with OneInstancePerTest { * * val builder = new StringBuilder("ScalaTest is ") * val buffer = new ListBuffer[String] * * feature("Fixtures can be shared") { * * scenario("user learns how to share fixtures") { * builder.append("easy!") * assert(builder.toString === "ScalaTest is easy!") * assert(buffer.isEmpty) * buffer += "sweet" * } * * scenario("user enjoys writing tests with shared fixtures") { * builder.append("fun!") * assert(builder.toString === "ScalaTest is fun!") * assert(buffer.isEmpty) * } * } * } * </pre> * * <p> * Although the create-fixture, fixture-trait, and <code>OneInstancePerTest</code> approaches take care of setting up a fixture before each * test, they don't address the problem of cleaning up a fixture after the test completes. In this situation, you'll need to either * use side effects or the <em>loan pattern</em>. * </p> * * <h4>Mixing in <code>BeforeAndAfter</code></h4> * * <p> * One way to use side effects is to mix in the <a href="BeforeAndAfter.html"><code>BeforeAndAfter</code></a> trait. * With this trait you can denote a bit of code to run before each test with <code>before</code> and/or after each test * each test with <code>after</code>, like this: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import org.scalatest.BeforeAndAfter * import collection.mutable.ListBuffer * * class ExampleSpec extends FeatureSpec with BeforeAndAfter { * * val builder = new StringBuilder * val buffer = new ListBuffer[String] * * before { * builder.append("ScalaTest is ") * } * * after { * builder.clear() * buffer.clear() * } * * feature("Fixtures can be shared") { * * scenario("user learns how to share fixtures") { * builder.append("easy!") * assert(builder.toString === "ScalaTest is easy!") * assert(buffer.isEmpty) * buffer += "sweet" * } * * scenario("user enjoys writing tests with shared fixtures") { * builder.append("fun!") * assert(builder.toString === "ScalaTest is fun!") * assert(buffer.isEmpty) * } * } * } * </pre> * * <h4>Overriding <code>withFixture(NoArgTest)</code></h4> * * <p> * An alternate way to take care of setup and cleanup via side effects * is to override <code>withFixture</code>. Trait <code>Suite</code>'s implementation of * <code>runTest</code>, which is inherited by this trait, passes a no-arg test function to <code>withFixture</code>. It is <code>withFixture</code>'s * responsibility to invoke that test function. <code>Suite</code>'s implementation of <code>withFixture</code> simply * invokes the function, like this: * </p> * * <pre class="stHighlight"> * // Default implementation * protected def withFixture(test: NoArgTest) { * test() * } * </pre> * * <p> * You can, therefore, override <code>withFixture</code> to perform setup before, and cleanup after, invoking the test function. If * you have cleanup to perform, you should invoke the test function * inside a <code>try</code> block and perform the cleanup in a <code>finally</code> clause. * Here's an example: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import collection.mutable.ListBuffer * * class ExampleSpec extends FeatureSpec { * * val builder = new StringBuilder * val buffer = new ListBuffer[String] * * override def withFixture(test: NoArgTest) { * builder.append("ScalaTest is ") // perform setup * try { * test() // invoke the test function * } * finally { * builder.clear() // perform cleanup * buffer.clear() * } * } * * feature("Fixtures can be shared") { * * scenario("user learns how to share fixtures") { * builder.append("easy!") * assert(builder.toString === "ScalaTest is easy!") * assert(buffer.isEmpty) * buffer += "sweet" * } * * scenario("user enjoys writing tests with shared fixtures") { * builder.append("fun!") * assert(builder.toString === "ScalaTest is fun!") * assert(buffer.isEmpty) * buffer += "clear" * } * } * } * </pre> * * <p> * Note that the <a href="Suite$NoArgTest.html"><code>NoArgTest</code></a> passed to <code>withFixture</code>, in addition to * an <code>apply</code> method that executes the test, also includes the test name as well as the <a href="Suite.html#configMapSection">config * map</a> passed to <code>runTest</code>. Thus you can also use the test name and configuration objects in <code>withFixture</code>. * </p> * * <p> * The reason you should perform cleanup in a <code>finally</code> clause is that <code>withFixture</code> is called by * <code>runTest</code>, which expects an exception to be thrown to indicate a failed test. Thus when you invoke * the <code>test</code> function inside <code>withFixture</code>, it may complete abruptly with an exception. The <code>finally</code> * clause will ensure the fixture cleanup happens as that exception propagates back up the call stack to <code>runTest</code>. * </p> * * <h4>Overriding <code>withFixture(OneArgTest)</code></h4> * * <p> * To use the loan pattern, you can extend <code>fixture.FeatureSpec</code> (from the <code>org.scalatest.fixture</code> package) instead of * <code>FeatureSpec</code>. Each test in a <code>fixture.FeatureSpec</code> takes a fixture as a parameter, allowing you to pass the fixture into * the test. You must indicate the type of the fixture parameter by specifying <code>FixtureParam</code>, and implement a * <code>withFixture</code> method that takes a <code>OneArgTest</code>. This <code>withFixture</code> method is responsible for * invoking the one-arg test function, so you can perform fixture set up before, and clean up after, invoking and passing * the fixture into the test function. Here's an example: * </p> * * <pre class="stHighlight"> * import org.scalatest.fixture * import java.io.FileWriter * import java.io.File * * class ExampleSpec extends fixture.FeatureSpec { * * final val tmpFile = "temp.txt" * * type FixtureParam = FileWriter * * def withFixture(test: OneArgTest) { * * val writer = new FileWriter(tmpFile) // set up the fixture * try { * test(writer) // "loan" the fixture to the test * } * finally { * writer.close() // clean up the fixture * } * } * * feature("Fixtures can be shared") { * * scenario("user learns how to share fixtures") { writer => * writer.write("Hello, test!") * writer.flush() * assert(new File(tmpFile).length === 12) * } * * scenario("user enjoys writing tests with shared fixtures") { writer => * writer.write("Hi, test!") * writer.flush() * assert(new File(tmpFile).length === 9) * } * } * } * </pre> * * <p> * For more information, see the <a href="fixture/FeatureSpec.html">documentation for <code>fixture.FeatureSpec</code></a>. * </p> * * <a name="differentFixtures"></a><h2>Providing different fixtures to different tests</h2> * * <p> * If different tests in the same <code>FeatureSpec</code> require different fixtures, you can combine the previous techniques and * provide each test with just the fixture or fixtures it needs. Here's an example in which a <code>StringBuilder</code> and a * <code>ListBuffer</code> are provided via fixture traits, and file writer (that requires cleanup) is provided via the loan pattern: * </p> * * <pre class="stHighlight"> * import java.io.FileWriter * import java.io.File * import collection.mutable.ListBuffer * import org.scalatest.FeatureSpec * * class ExampleSpec extends FeatureSpec { * * final val tmpFile = "temp.txt" * * trait Builder { * val builder = new StringBuilder("ScalaTest is ") * } * * trait Buffer { * val buffer = ListBuffer("ScalaTest", "is") * } * * def withWriter(testCode: FileWriter => Any) { * val writer = new FileWriter(tmpFile) // set up the fixture * try { * testCode(writer) // "loan" the fixture to the test * } * finally { * writer.close() // clean up the fixture * } * } * * scenario("user is productive using the test framework") { // This test needs the StringBuilder fixture * new Builder { * builder.append("productive!") * assert(builder.toString === "ScalaTest is productive!") * } * } * * scenario("tests are readable") { // This test needs the ListBuffer[String] fixture * new Buffer { * buffer += ("readable!") * assert(buffer === List("ScalaTest", "is", "readable!")) * } * } * * scenario("the test framework is user-friendly") { // This test needs the FileWriter fixture * withWriter { writer => * writer.write("Hello, user!") * writer.flush() * assert(new File(tmpFile).length === 12) * } * } * * scenario("test code is clear and concise") { // This test needs the StringBuilder and ListBuffer * new Builder with Buffer { * builder.append("clear!") * buffer += ("concise!") * assert(builder.toString === "ScalaTest is clear!") * assert(buffer === List("ScalaTest", "is", "concise!")) * } * } * * scenario("user composes test artifacts") { // This test needs all three fixtures * new Builder with Buffer { * builder.append("clear!") * buffer += ("concise!") * assert(builder.toString === "ScalaTest is clear!") * assert(buffer === List("ScalaTest", "is", "concise!")) * withWriter { writer => * writer.write(builder.toString) * writer.flush() * assert(new File(tmpFile).length === 19) * } * } * } * } * </pre> * * <p> * In the previous example, <code>"user is productive using the test framework</code> uses only the <code>StringBuilder</code> fixture, so it just instantiates * a <code>new Builder</code>, whereas <code>tests are readable</code> uses only the <code>ListBuffer</code> fixture, so it just intantiates * a <code>new Buffer</code>. <code>the test framework is user-friendly</code> needs just the <code>FileWriter</code> fixture, so it invokes * <code>withWriter</code>, which prepares and passes a <code>FileWriter</code> to the test (and takes care of closing it afterwords). * </p> * * <p> * Two tests need multiple fixtures: <code>test code is clear and concise</code> needs both the <code>StringBuilder</code> and the * <code>ListBuffer</code>, so it instantiates a class that mixes in both fixture traits with <code>new Builder with Buffer</code>. * <code>user composes test artifacts</code> needs all three fixtures, so in addition to <code>new Builder with Buffer</code> it also invokes * <code>withWriter</code>, wrapping just the of the test code that needs the fixture. * </p> * * <p> * Note that in this case, the loan pattern is being implemented via the <code>withWriter</code> method that takes a function, not * by overriding <code>fixture.FeatureSpec</code>'s <code>withFixture(OneArgTest)</code> method. <code>fixture.FeatureSpec</code> makes the most sense * if all (or at least most) tests need the same fixture, whereas in this <code>Suite</code> only two tests need the * <code>FileWriter</code>. * </p> * * <p> * In the previous example, the <code>withWriter</code> method passed an object into * the tests. Passing fixture objects into tests is generally a good idea when possible, but sometimes a side affect is unavoidable. * For example, if you need to initialize a database running on a server across a network, your with-fixture * method will likely have nothing to pass. In such cases, simply create a with-fixture method that takes a by-name parameter and * performs setup and cleanup via side effects, like this: * </p> * * <pre class="stHighlight"> * def withDataInDatabase(test: => Any) { * // initialize the database across the network * try { * test // "loan" the initialized database to the test * } * finally { * // clean up the database * } * } * </pre> * * <p> * You can then use it like: * </p> * * <pre class="stHighlight"> * scenario("user logs in") { * withDataInDatabase { * // test user logging in scenario * } * } * </pre> * * <a name="composingFixtures"></a><h2>Composing stackable fixture traits</h2> * * <p> * In larger projects, teams often end up with several different fixtures that test classes need in different combinations, * and possibly initialized (and cleaned up) in different orders. A good way to accomplish this in ScalaTest is to factor the individual * fixtures into traits that can be composed using the <em>stackable trait</em> pattern. This can be done, for example, by placing * <code>withFixture</code> methods in several traits, each of which call <code>super.withFixture</code>. Here's an example in * which the <code>StringBuilder</code> and <code>ListBuffer[String]</code> fixtures used in the previous examples have been * factored out into two <em>stackable fixture traits</em> named <code>Builder</code> and <code>Buffer</code>: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import org.scalatest.AbstractSuite * import collection.mutable.ListBuffer * * trait Builder extends AbstractSuite { this: Suite => * * val builder = new StringBuilder * * abstract override def withFixture(test: NoArgTest) { * builder.append("ScalaTest is ") * try { * super.withFixture(test) // To be stackable, must call super.withFixture * } * finally { * builder.clear() * } * } * } * * trait Buffer extends AbstractSuite { this: Suite => * * val buffer = new ListBuffer[String] * * abstract override def withFixture(test: NoArgTest) { * try { * super.withFixture(test) // To be stackable, must call super.withFixture * } * finally { * buffer.clear() * } * } * } * * class ExampleSpec extends FeatureSpec with Builder with Buffer { * * feature("Fixtures can be shared") { * * scenario("user learns how to share fixtures") { * builder.append("easy!") * assert(builder.toString === "ScalaTest is easy!") * assert(buffer.isEmpty) * buffer += "sweet" * } * * scenario("user enjoys writing tests with shared fixtures") { * builder.append("fun!") * assert(builder.toString === "ScalaTest is fun!") * assert(buffer.isEmpty) * buffer += "clear" * } * } * } * </pre> * * <p> * By mixing in both the <code>Builder</code> and <code>Buffer</code> traits, <code>ExampleSpec</code> gets both fixtures, which will be * initialized before each test and cleaned up after. The order the traits are mixed together determines the order of execution. * In this case, <code>Builder</code> is "super" to </code>Buffer</code>. If you wanted <code>Buffer</code> to be "super" * to <code>Builder</code>, you need only switch the order you mix them together, like this: * </p> * * <pre class="stHighlight"> * class Example2Spec extends FeatureSpec with Buffer with Builder * </pre> * * <p> * And if you only need one fixture you mix in only that trait: * </p> * * <pre class="stHighlight"> * class Example3Spec extends FeatureSpec with Builder * </pre> * * <p> * Another way to create stackable fixture traits is by extending the <a href="BeforeAndAfterEach.html"><code>BeforeAndAfterEach</code></a> * and/or <a href="BeforeAndAfterAll.html"><code>BeforeAndAfterAll</code></a> traits. * <code>BeforeAndAfterEach</code> has a <code>beforeEach</code> method that will be run before each test (like JUnit's <code>setUp</code>), * and an <code>afterEach</code> method that will be run after (like JUnit's <code>tearDown</code>). * Similarly, <code>BeforeAndAfterAll</code> has a <code>beforeAll</code> method that will be run before all tests, * and an <code>afterAll</code> method that will be run after all tests. Here's what the previously shown example would look like if it * were rewritten to use the <code>BeforeAndAfterEach</code> methods instead of <code>withFixture</code>: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import org.scalatest.BeforeAndAfterEach * import collection.mutable.ListBuffer * * trait Builder extends BeforeAndAfterEach { this: Suite => * * val builder = new StringBuilder * * override def beforeEach() { * builder.append("ScalaTest is ") * super.beforeEach() // To be stackable, must call super.beforeEach * } * * override def afterEach() { * try { * super.afterEach() // To be stackable, must call super.afterEach * } * finally { * builder.clear() * } * } * } * * trait Buffer extends BeforeAndAfterEach { this: Suite => * * val buffer = new ListBuffer[String] * * override def afterEach() { * try { * super.afterEach() // To be stackable, must call super.afterEach * } * finally { * buffer.clear() * } * } * } * * class ExampleSpec extends FeatureSpec with Builder with Buffer { * * feature("Fixtures can be shared") { * * scenario("user learns how to share fixtures") { * builder.append("easy!") * assert(builder.toString === "ScalaTest is easy!") * assert(buffer.isEmpty) * buffer += "sweet" * } * * scenario("user enjoys writing tests with shared fixtures") { * builder.append("fun!") * assert(builder.toString === "ScalaTest is fun!") * assert(buffer.isEmpty) * buffer += "clear" * } * } * } * </pre> * * <p> * To get the same ordering as <code>withFixture</code>, place your <code>super.beforeEach</code> call at the end of each * <code>beforeEach</code> method, and the <code>super.afterEach</code> call at the beginning of each <code>afterEach</code> * method, as shown in the previous example. It is a good idea to invoke <code>super.afterEach</code> in a <code>try</code> * block and perform cleanup in a <code>finally</code> clause, as shown in the previous example, because this ensures the * cleanup code is performed even if <code>super.afterAll</code> throws an exception. * </p> * * <p> * One difference to bear in mind between the before-and-after traits and the <code>withFixture</code> methods, is that if * a <code>withFixture</code> method completes abruptly with an exception, it is considered a failed test. By contrast, if any of the * methods on the before-and-after traits (<em>i.e.</em>, <code>before</code> and <code>after</code> of <code>BeforeAndAfter</code>, * <code>beforeEach</code> and <code>afterEach</code> of <code>BeforeAndAfterEach</code>, * and <code>beforeAll</code> and <code>afterAll</code> of <code>BeforeAndAfterAll</code>) complete abruptly, it is considered a * failed suite, which will result in a <a href="events/SuiteAborted.html"><code>SuiteAborted</code></a> event. * </p> * * <a name="SharedScenarios"></a><h2>Shared scenarios</h2> * * <p> * Sometimes you may want to run the same test code on different fixture objects. In other words, you may want to write tests that are "shared" * by different fixture objects. * To accomplish this in a <code>FeatureSpec</code>, you first place shared tests (<em>i.e.</em>, shared scenarios) in * <em>behavior functions</em>. These behavior functions will be * invoked during the construction phase of any <code>FeatureSpec</code> that uses them, so that the scenarios they contain will * be registered as scenarios in that <code>FeatureSpec</code>. * For example, given this stack class: * </p> * * <pre class="stHighlight"> * import scala.collection.mutable.ListBuffer * * class Stack[T] { * * val MAX = 10 * private val buf = new ListBuffer[T] * * def push(o: T) { * if (!full) * buf.prepend(o) * else * throw new IllegalStateException("can't push onto a full stack") * } * * def pop(): T = { * if (!empty) * buf.remove(0) * else * throw new IllegalStateException("can't pop an empty stack") * } * * def peek: T = { * if (!empty) * buf(0) * else * throw new IllegalStateException("can't pop an empty stack") * } * * def full: Boolean = buf.size == MAX * def empty: Boolean = buf.size == 0 * def size = buf.size * * override def toString = buf.mkString("Stack(", ", ", ")") * } * </pre> * * <p> * You may want to test the <code>Stack</code> class in different states: empty, full, with one item, with one item less than capacity, * <em>etc</em>. You may find you have several scenarios that make sense any time the stack is non-empty. Thus you'd ideally want to run * those same scenarios for three stack fixture objects: a full stack, a stack with a one item, and a stack with one item less than * capacity. With shared tests, you can factor these scenarios out into a behavior function, into which you pass the * stack fixture to use when running the tests. So in your <code>FeatureSpec</code> for stack, you'd invoke the * behavior function three times, passing in each of the three stack fixtures so that the shared scenarios are run for all three fixtures. * </p> * * <p> * You can define a behavior function that encapsulates these shared scenarios inside the <code>FeatureSpec</code> that uses them. If they are shared * between different <code>FeatureSpec</code>s, however, you could also define them in a separate trait that is mixed into * each <code>FeatureSpec</code> that uses them. * <a name="StackBehaviors">For</a> example, here the <code>nonEmptyStack</code> behavior function (in this case, a * behavior <em>method</em>) is defined in a trait along with another * method containing shared scenarios for non-full stacks: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import org.scalatest.GivenWhenThen * import org.scalatestexamples.helpers.Stack * * trait FeatureSpecStackBehaviors { this: FeatureSpec with GivenWhenThen =&gt; * * def nonEmptyStack(createNonEmptyStack: =&gt; Stack[Int], lastItemAdded: Int) { * * scenario("empty is invoked on this non-empty stack: " + createNonEmptyStack.toString) { * * given("a non-empty stack") * val stack = createNonEmptyStack * * when("empty is invoked on the stack") * then("empty returns false") * assert(!stack.empty) * } * * scenario("peek is invoked on this non-empty stack: " + createNonEmptyStack.toString) { * * given("a non-empty stack") * val stack = createNonEmptyStack * val size = stack.size * * when("peek is invoked on the stack") * then("peek returns the last item added") * assert(stack.peek === lastItemAdded) * * and("the size of the stack is the same as before") * assert(stack.size === size) * } * * scenario("pop is invoked on this non-empty stack: " + createNonEmptyStack.toString) { * * given("a non-empty stack") * val stack = createNonEmptyStack * val size = stack.size * * when("pop is invoked on the stack") * then("pop returns the last item added") * assert(stack.pop === lastItemAdded) * * and("the size of the stack one less than before") * assert(stack.size === size - 1) * } * } * * def nonFullStack(createNonFullStack: =&gt; Stack[Int]) { * * scenario("full is invoked on this non-full stack: " + createNonFullStack.toString) { * * given("a non-full stack") * val stack = createNonFullStack * * when("full is invoked on the stack") * then("full returns false") * assert(!stack.full) * } * * scenario("push is invoked on this non-full stack: " + createNonFullStack.toString) { * * given("a non-full stack") * val stack = createNonFullStack * val size = stack.size * * when("push is invoked on the stack") * stack.push(7) * * then("the size of the stack is one greater than before") * assert(stack.size === size + 1) * * and("the top of the stack contains the pushed value") * assert(stack.peek === 7) * } * } * } * </pre> * * <p> * Given these behavior functions, you could invoke them directly, but <code>FeatureSpec</code> offers a DSL for the purpose, * which looks like this: * </p> * * <pre class="stHighlight"> * scenariosFor(nonEmptyStack(stackWithOneItem, lastValuePushed)) * scenariosFor(nonFullStack(stackWithOneItem)) * </pre> * * <p> * If you prefer to use an imperative style to change fixtures, for example by mixing in <code>BeforeAndAfterEach</code> and * reassigning a <code>stack</code> <code>var</code> in <code>beforeEach</code>, you could write your behavior functions * in the context of that <code>var</code>, which means you wouldn't need to pass in the stack fixture because it would be * in scope already inside the behavior function. In that case, your code would look like this: * </p> * * <pre class="stHighlight"> * scenariosFor(nonEmptyStack) // assuming lastValuePushed is also in scope inside nonEmptyStack * scenariosFor(nonFullStack) * </pre> * * <p> * The recommended style, however, is the functional, pass-all-the-needed-values-in style. Here's an example: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * import org.scalatest.GivenWhenThen * import org.scalatestexamples.helpers.Stack * * class StackFeatureSpec extends FeatureSpec with GivenWhenThen with FeatureSpecStackBehaviors { * * // Stack fixture creation methods * def emptyStack = new Stack[Int] * * def fullStack = { * val stack = new Stack[Int] * for (i <- 0 until stack.MAX) * stack.push(i) * stack * } * * def stackWithOneItem = { * val stack = new Stack[Int] * stack.push(9) * stack * } * * def stackWithOneItemLessThanCapacity = { * val stack = new Stack[Int] * for (i <- 1 to 9) * stack.push(i) * stack * } * * val lastValuePushed = 9 * * feature("A Stack is pushed and popped") { * * scenario("empty is invoked on an empty stack") { * * given("an empty stack") * val stack = emptyStack * * when("empty is invoked on the stack") * then("empty returns true") * assert(stack.empty) * } * * scenario("peek is invoked on an empty stack") { * * given("an empty stack") * val stack = emptyStack * * when("peek is invoked on the stack") * then("peek throws IllegalStateException") * intercept[IllegalStateException] { * stack.peek * } * } * * scenario("pop is invoked on an empty stack") { * * given("an empty stack") * val stack = emptyStack * * when("pop is invoked on the stack") * then("pop throws IllegalStateException") * intercept[IllegalStateException] { * emptyStack.pop * } * } * * scenariosFor(nonEmptyStack(stackWithOneItem, lastValuePushed)) * scenariosFor(nonFullStack(stackWithOneItem)) * * scenariosFor(nonEmptyStack(stackWithOneItemLessThanCapacity, lastValuePushed)) * scenariosFor(nonFullStack(stackWithOneItemLessThanCapacity)) * * scenario("full is invoked on a full stack") { * * given("an full stack") * val stack = fullStack * * when("full is invoked on the stack") * then("full returns true") * assert(stack.full) * } * * scenariosFor(nonEmptyStack(fullStack, lastValuePushed)) * * scenario("push is invoked on a full stack") { * * given("an full stack") * val stack = fullStack * * when("push is invoked on the stack") * then("push throws IllegalStateException") * intercept[IllegalStateException] { * stack.push(10) * } * } * } * } * </pre> * * <p> * If you load these classes into the Scala interpreter (with scalatest's JAR file on the class path), and execute it, * you'll see: * </p> * * <pre class="stREPL"> * scala> (new StackFeatureSpec).execute() * <span class="stGreen">Feature: A Stack is pushed and popped * Scenario: empty is invoked on an empty stack * Given an empty stack * When empty is invoked on the stack * Then empty returns true * Scenario: peek is invoked on an empty stack * Given an empty stack * When peek is invoked on the stack * Then peek throws IllegalStateException * Scenario: pop is invoked on an empty stack * Given an empty stack * When pop is invoked on the stack * Then pop throws IllegalStateException * Scenario: empty is invoked on this non-empty stack: Stack(9) * Given a non-empty stack * When empty is invoked on the stack * Then empty returns false * Scenario: peek is invoked on this non-empty stack: Stack(9) * Given a non-empty stack * When peek is invoked on the stack * Then peek returns the last item added * And the size of the stack is the same as before * Scenario: pop is invoked on this non-empty stack: Stack(9) * Given a non-empty stack * When pop is invoked on the stack * Then pop returns the last item added * And the size of the stack one less than before * Scenario: full is invoked on this non-full stack: Stack(9) * Given a non-full stack * When full is invoked on the stack * Then full returns false * Scenario: push is invoked on this non-full stack: Stack(9) * Given a non-full stack * When push is invoked on the stack * Then the size of the stack is one greater than before * And the top of the stack contains the pushed value * Scenario: empty is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1) * Given a non-empty stack * When empty is invoked on the stack * Then empty returns false * Scenario: peek is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1) * Given a non-empty stack * When peek is invoked on the stack * Then peek returns the last item added * And the size of the stack is the same as before * Scenario: pop is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1) * Given a non-empty stack * When pop is invoked on the stack * Then pop returns the last item added * And the size of the stack one less than before * Scenario: full is invoked on this non-full stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1) * Given a non-full stack * When full is invoked on the stack * Then full returns false * Scenario: push is invoked on this non-full stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1) * Given a non-full stack * When push is invoked on the stack * Then the size of the stack is one greater than before * And the top of the stack contains the pushed value * Scenario: full is invoked on a full stack * Given an full stack * When full is invoked on the stack * Then full returns true * Scenario: empty is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0) * Given a non-empty stack * When empty is invoked on the stack * Then empty returns false * Scenario: peek is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0) * Given a non-empty stack * When peek is invoked on the stack * Then peek returns the last item added * And the size of the stack is the same as before * Scenario: pop is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1, 0) * Given a non-empty stack * When pop is invoked on the stack * Then pop returns the last item added * And the size of the stack one less than before * Scenario: push is invoked on a full stack * Given an full stack * When push is invoked on the stack * Then push throws IllegalStateException</span> * </pre> * * <p> * One thing to keep in mind when using shared tests is that in ScalaTest, each test in a suite must have a unique name. * If you register the same tests repeatedly in the same suite, one problem you may encounter is an exception at runtime * complaining that multiple tests are being registered with the same test name. * In a <code>FeatureSpec</code> there is no nesting construct analogous to <code>FunSpec</code>'s <code>describe</code> clause. * Therefore, you need to do a bit of * extra work to ensure that the test names are unique. If a duplicate test name problem shows up in a * <code>FeatureSpec</code>, you'll need to pass in a prefix or suffix string to add to each test name. You can pass this string * the same way you pass any other data needed by the shared tests, or just call <code>toString</code> on the shared fixture object. * This is the approach taken by the previous <code>FeatureSpecStackBehaviors</code> example. * </p> * * <p> * Given this <code>FeatureSpecStackBehaviors</code> trait, calling it with the <code>stackWithOneItem</code> fixture, like this: * </p> * * <pre class="stHighlight"> * scenariosFor(nonEmptyStack(stackWithOneItem, lastValuePushed)) * </pre> * * <p> * yields test names: * </p> * * <ul> * <li><code>empty is invoked on this non-empty stack: Stack(9)</code></li> * <li><code>peek is invoked on this non-empty stack: Stack(9)</code></li> * <li><code>pop is invoked on this non-empty stack: Stack(9)</code></li> * </ul> * * <p> * Whereas calling it with the <code>stackWithOneItemLessThanCapacity</code> fixture, like this: * </p> * * <pre class="stHighlight"> * scenariosFor(nonEmptyStack(stackWithOneItemLessThanCapacity, lastValuePushed)) * </pre> * * <p> * yields different test names: * </p> * * <ul> * <li><code>empty is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)</code></li> * <li><code>peek is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)</code></li> * <li><code>pop is invoked on this non-empty stack: Stack(9, 8, 7, 6, 5, 4, 3, 2, 1)</code></li> * </ul> * * @author Bill Venners */ @Style("org.scalatest.finders.FeatureSpecFinder") trait FeatureSpec extends Suite { thisSuite => private final val engine = new Engine("concurrentFeatureSpecMod", "FeatureSpec") private final val stackDepth = 4 import engine._ /** * Returns an <code>Informer</code> that during test execution will forward strings (and other objects) passed to its * <code>apply</code> method to the current reporter. If invoked in a constructor, it * will register the passed string for forwarding later during test execution. If invoked while this * <code>FeatureSpec</code> is being executed, such as from inside a test function, it will forward the information to * the current reporter immediately. If invoked at any other time, it will * throw an exception. This method can be called safely by any thread. */ implicit protected def info: Informer = atomicInformer.get /** * Returns a <code>Documenter</code> that during test execution will forward strings passed to its * <code>apply</code> method to the current reporter. If invoked in a constructor, it * will register the passed string for forwarding later during test execution. If invoked while this * <code>FeatureSpec</code> is being executed, such as from inside a test function, it will forward the information to * the current reporter immediately. If invoked at any other time, it will * throw an exception. This method can be called safely by any thread. */ implicit protected def markup: Documenter = atomicDocumenter.get /** * Register a test with the given spec text, optional tags, and test function value that takes no arguments. * An invocation of this method is called an &#8220;example.&#8221; * * This method will register the test for later execution via an invocation of one of the <code>execute</code> * methods. The name of the test will be a concatenation of the text of all surrounding describers, * from outside in, and the passed spec text, with one space placed between each item. (See the documenation * for <code>testNames</code> for an example.) The resulting test name must not have been registered previously on * this <code>FeatureSpec</code> instance. * * @param specText the specification text, which will be combined with the descText of any surrounding describers * to form the test name * @param testTags the optional list of tags for this test * @param testFun the test function * @throws DuplicateTestNameException if a test with the same name has been registered previously * @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite * @throws NullPointerException if <code>specText</code> or any passed test tag is <code>null</code> */ protected def scenario(specText: String, testTags: Tag*)(testFun: => Unit) { registerTest(Resources("scenario", specText), testFun _, "scenarioCannotAppearInsideAnotherScenario", "FeatureSpec.scala", "scenario", stackDepth, None, None, testTags: _*) } /** * Register a test to ignore, which has the given spec text, optional tags, and test function value that takes no arguments. * This method will register the test for later ignoring via an invocation of one of the <code>execute</code> * methods. This method exists to make it easy to ignore an existing test by changing the call to <code>it</code> * to <code>ignore</code> without deleting or commenting out the actual test code. The test will not be executed, but a * report will be sent that indicates the test was ignored. The name of the test will be a concatenation of the text of all surrounding describers, * from outside in, and the passed spec text, with one space placed between each item. (See the documenation * for <code>testNames</code> for an example.) The resulting test name must not have been registered previously on * this <code>FeatureSpec</code> instance. * * @param specText the specification text, which will be combined with the descText of any surrounding describers * to form the test name * @param testTags the optional list of tags for this test * @param testFun the test function * @throws DuplicateTestNameException if a test with the same name has been registered previously * @throws TestRegistrationClosedException if invoked after <code>run</code> has been invoked on this suite * @throws NullPointerException if <code>specText</code> or any passed test tag is <code>null</code> */ protected def ignore(specText: String, testTags: Tag*)(testFun: => Unit) { registerIgnoredTest(Resources("scenario", specText), testFun _, "ignoreCannotAppearInsideAScenario", "FeatureSpec.scala", "ignore", stackDepth, testTags: _*) } /** * Describe a &#8220;subject&#8221; being specified and tested by the passed function value. The * passed function value may contain more describers (defined with <code>describe</code>) and/or tests * (defined with <code>it</code>). This trait's implementation of this method will register the * description string and immediately invoke the passed function. */ protected def feature(description: String)(fun: => Unit) { if (!currentBranchIsTrunk) throw new NotAllowedException(Resources("cantNestFeatureClauses"), getStackDepthFun("FeatureSpec.scala", "feature")) registerNestedBranch(description, None, fun, "featureCannotAppearInsideAScenario", "FeatureSpec.scala", "feature", stackDepth) } /** * A <code>Map</code> whose keys are <code>String</code> tag names to which tests in this <code>FeatureSpec</code> belong, and values * the <code>Set</code> of test names that belong to each tag. If this <code>FeatureSpec</code> contains no tags, this method returns an empty <code>Map</code>. * * <p> * This trait's implementation returns tags that were passed as strings contained in <code>Tag</code> objects passed to * methods <code>test</code> and <code>ignore</code>. * </p> */ override def testTags: Map[String, Set[String]] = atomic.get.tagsMap /** * Run a test. This trait's implementation runs the test registered with the name specified by * <code>testName</code>. Each test's name is a concatenation of the text of all describers surrounding a test, * from outside in, and the test's spec text, with one space placed between each item. (See the documenation * for <code>testNames</code> for an example.) * * @param testName the name of one test to execute. * @param reporter the <code>Reporter</code> to which results will be reported * @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early. * @param configMap a <code>Map</code> of properties that can be used by this <code>FeatureSpec</code>'s executing tests. * @throws NullPointerException if any of <code>testName</code>, <code>reporter</code>, <code>stopper</code>, or <code>configMap</code> * is <code>null</code>. */ protected override def runTest(testName: String, reporter: Reporter, stopper: Stopper, configMap: Map[String, Any], tracker: Tracker) { def invokeWithFixture(theTest: TestLeaf) { val theConfigMap = configMap withFixture( new NoArgTest { def name = testName def apply() { theTest.testFun() } def configMap = theConfigMap } ) } runTestImpl(thisSuite, testName, reporter, stopper, configMap, tracker, false, invokeWithFixture) } /** * Run zero to many of this <code>FeatureSpec</code>'s tests. * * <p> * This method takes a <code>testName</code> parameter that optionally specifies a test to invoke. * If <code>testName</code> is <code>Some</code>, this trait's implementation of this method * invokes <code>runTest</code> on this object, passing in: * </p> * * <ul> * <li><code>testName</code> - the <code>String</code> value of the <code>testName</code> <code>Option</code> passed * to this method</li> * <li><code>reporter</code> - the <code>Reporter</code> passed to this method, or one that wraps and delegates to it</li> * <li><code>stopper</code> - the <code>Stopper</code> passed to this method, or one that wraps and delegates to it</li> * <li><code>configMap</code> - the <code>configMap</code> passed to this method, or one that wraps and delegates to it</li> * </ul> * * <p> * This method takes a <code>Set</code> of tag names that should be included (<code>tagsToInclude</code>), and a <code>Set</code> * that should be excluded (<code>tagsToExclude</code>), when deciding which of this <code>Suite</code>'s tests to execute. * If <code>tagsToInclude</code> is empty, all tests will be executed * except those those belonging to tags listed in the <code>tagsToExclude</code> <code>Set</code>. If <code>tagsToInclude</code> is non-empty, only tests * belonging to tags mentioned in <code>tagsToInclude</code>, and not mentioned in <code>tagsToExclude</code> * will be executed. However, if <code>testName</code> is <code>Some</code>, <code>tagsToInclude</code> and <code>tagsToExclude</code> are essentially ignored. * Only if <code>testName</code> is <code>None</code> will <code>tagsToInclude</code> and <code>tagsToExclude</code> be consulted to * determine which of the tests named in the <code>testNames</code> <code>Set</code> should be run. For more information on trait tags, see the main documentation for this trait. * </p> * * <p> * If <code>testName</code> is <code>None</code>, this trait's implementation of this method * invokes <code>testNames</code> on this <code>Suite</code> to get a <code>Set</code> of names of tests to potentially execute. * (A <code>testNames</code> value of <code>None</code> essentially acts as a wildcard that means all tests in * this <code>Suite</code> that are selected by <code>tagsToInclude</code> and <code>tagsToExclude</code> should be executed.) * For each test in the <code>testName</code> <code>Set</code>, in the order * they appear in the iterator obtained by invoking the <code>elements</code> method on the <code>Set</code>, this trait's implementation * of this method checks whether the test should be run based on the <code>tagsToInclude</code> and <code>tagsToExclude</code> <code>Set</code>s. * If so, this implementation invokes <code>runTest</code>, passing in: * </p> * * <ul> * <li><code>testName</code> - the <code>String</code> name of the test to run (which will be one of the names in the <code>testNames</code> <code>Set</code>)</li> * <li><code>reporter</code> - the <code>Reporter</code> passed to this method, or one that wraps and delegates to it</li> * <li><code>stopper</code> - the <code>Stopper</code> passed to this method, or one that wraps and delegates to it</li> * <li><code>configMap</code> - the <code>configMap</code> passed to this method, or one that wraps and delegates to it</li> * </ul> * * @param testName an optional name of one test to run. If <code>None</code>, all relevant tests should be run. * I.e., <code>None</code> acts like a wildcard that means run all relevant tests in this <code>Suite</code>. * @param reporter the <code>Reporter</code> to which results will be reported * @param stopper the <code>Stopper</code> that will be consulted to determine whether to stop execution early. * @param filter a <code>Filter</code> with which to filter tests based on their tags * @param configMap a <code>Map</code> of key-value pairs that can be used by the executing <code>Suite</code> of tests. * @param distributor an optional <code>Distributor</code>, into which to put nested <code>Suite</code>s to be run * by another entity, such as concurrently by a pool of threads. If <code>None</code>, nested <code>Suite</code>s will be run sequentially. * @param tracker a <code>Tracker</code> tracking <code>Ordinal</code>s being fired by the current thread. * @throws NullPointerException if any of the passed parameters is <code>null</code>. * @throws IllegalArgumentException if <code>testName</code> is defined, but no test with the specified test name * exists in this <code>Suite</code> */ protected override def runTests(testName: Option[String], reporter: Reporter, stopper: Stopper, filter: Filter, configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) { runTestsImpl(thisSuite, testName, reporter, stopper, filter, configMap, distributor, tracker, info, false, runTest) } /** * An immutable <code>Set</code> of test names. If this <code>FeatureSpec</code> contains no tests, this method returns an * empty <code>Set</code>. * * <p> * This trait's implementation of this method will return a set that contains the names of all registered tests. The set's * iterator will return those names in the order in which the tests were registered. Each test's name is composed * of the concatenation of the text of each surrounding describer, in order from outside in, and the text of the * example itself, with all components separated by a space. For example, consider this <code>FeatureSpec</code>: * </p> * * <pre class="stHighlight"> * import org.scalatest.FeatureSpec * * class StackSpec extends FeatureSpec { * feature("A Stack") { * scenario("(when not empty) must allow me to pop") {} * scenario("(when not full) must allow me to push") {} * } * } * </pre> * * <p> * Invoking <code>testNames</code> on this <code>FeatureSpec</code> will yield a set that contains the following * two test name strings: * </p> * * <pre> * "A Stack (when not empty) must allow me to pop" * "A Stack (when not full) must allow me to push" * </pre> */ // override def testNames: Set[String] = ListSet(atomic.get.testsList.map(_.testName): _*) override def testNames: Set[String] = { // I'm returning a ListSet here so that they tests will be run in registration order ListSet(atomic.get.testNamesList.toArray: _*) } override def run(testName: Option[String], reporter: Reporter, stopper: Stopper, filter: Filter, configMap: Map[String, Any], distributor: Option[Distributor], tracker: Tracker) { runImpl(thisSuite, testName, reporter, stopper, filter, configMap, distributor, tracker, super.run) } /** * Registers shared scenarios. * * <p> * This method enables the following syntax for shared scenarios in a <code>FeatureSpec</code>: * </p> * * <pre class="stHighlight"> * scenariosFor(nonEmptyStack(lastValuePushed)) * </pre> * * <p> * This method just provides syntax sugar intended to make the intent of the code clearer. * Because the parameter passed to it is * type <code>Unit</code>, the expression will be evaluated before being passed, which * is sufficient to register the shared scenarios. For examples of shared scenarios, see the * <a href="#SharedScenarios">Shared scenarios section</a> in the main documentation for this trait. * </p> */ protected def scenariosFor(unit: Unit) {} }
epishkin/scalatest-google-code
src/main/scala/org/scalatest/FeatureSpec.scala
Scala
apache-2.0
74,610
package wandou.collection import scala.collection.generic.CanBuildFrom import scala.collection.generic.MutableMapFactory import scala.collection.mutable.Map import scala.collection.mutable.MapLike @SerialVersionUID(1L) final class WeakKeyHashMap[A, B] extends Map[A, B] with MapLike[A, B, WeakKeyHashMap[A, B]] with WeakKeyHashTable[A, B] with Serializable { override def empty: WeakKeyHashMap[A, B] = WeakKeyHashMap.empty[A, B] override def clear() = clearTable override def size: Int = { if (tableSize == 0) return 0 expungeStaleEntries tableSize } def get(key: A): Option[B] = { val e = findEntry(key) if (e == null) None else Some(e.value) } override def put(key: A, value: B): Option[B] = { val e = findEntry(key) if (e == null) { addEntry(new WeakEntry(key, value, queue)); None } else { val v = e.value; e.value = value; Some(v) } } override def update(key: A, value: B): Unit = put(key, value) override def remove(key: A): Option[B] = { val e = removeEntry(key) if (e ne null) Some(e.value) else None } def +=(kv: (A, B)): this.type = { val e = findEntry(kv._1) if (e == null) addEntry(new WeakEntry(kv._1, kv._2, queue)) else e.value = kv._2 this } def -=(key: A): this.type = { removeEntry(key); this } def iterator = entriesIterator map { e => (e.key, e.value) } override def foreach[C](f: ((A, B)) => C): Unit = foreachEntry(e => f(e.key, e.value)) /* Override to avoid tuple allocation in foreach */ override def keySet: collection.Set[A] = new DefaultKeySet { override def foreach[C](f: A => C) = foreachEntry(e => f(e.key)) } /* Override to avoid tuple allocation in foreach */ override def values: collection.Iterable[B] = new DefaultValuesIterable { override def foreach[C](f: B => C) = foreachEntry(e => f(e.value)) } /* Override to avoid tuple allocation */ override def keysIterator: Iterator[A] = new Iterator[A] { val iter = entriesIterator def hasNext = iter.hasNext def next = iter.next.key } /* Override to avoid tuple allocation */ override def valuesIterator: Iterator[B] = new Iterator[B] { val iter = entriesIterator def hasNext = iter.hasNext def next = iter.next.value } private def writeObject(out: java.io.ObjectOutputStream) { serializeTo(out, _.value) } private def readObject(in: java.io.ObjectInputStream) { init[B](in, new WeakEntry(_, _, queue)) } } object WeakKeyHashMap extends MutableMapFactory[WeakKeyHashMap] { implicit def canBuildFrom[A, B]: CanBuildFrom[Coll, (A, B), WeakKeyHashMap[A, B]] = new MapCanBuildFrom[A, B] def empty[A, B]: WeakKeyHashMap[A, B] = new WeakKeyHashMap[A, B] }
wandoulabs/wandou-math
wandou-util/src/main/scala/wandou/collection/WeakKeyHashMap.scala
Scala
apache-2.0
2,737
package com.blinkbox.books.spray import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import scala.concurrent.duration._ import scala.util.Try import spray.http.DateTime import spray.http.StatusCodes._ import spray.routing.{ HttpService, Route } import spray.testkit.ScalatestRouteTest /** * A trait that contains a route using the 'cacheable' directive. */ private[spray] trait CacheableService extends HttpService with Directives { val maxAge = 1.minute // Test route. // This is defined as a val to check that headers are set dynamically even with a static route. val route: Route = get { path("cacheableEndpoint") { cacheable(maxAge) { complete(OK) } } ~ path("nonCacheableEndpoint") { complete(OK) } ~ path("cacheableCompletedEndpoint") { cacheable(maxAge, OK) } } } @RunWith(classOf[JUnitRunner]) class CacheableDirectiveTest extends FunSuite with ScalatestRouteTest with CacheableService { override implicit def actorRefFactory = system test("Response headers on cacheable endpoint") { val start = DateTime.now Get("/cacheableEndpoint") ~> route ~> check { assert(status == OK) assert(header("Cache-Control").get.value == s"public, max-age=${maxAge.toSeconds}") val expiryTimeStr = header("Expires").get.value assert(parseDateTime(expiryTimeStr).isSuccess, "Should have set expiry time in the valid format") } } test("Response headers when completing cacheable object") { val start = DateTime.now Get("/cacheableCompletedEndpoint") ~> route ~> check { assert(status == OK) assert(header("Cache-Control").get.value == s"public, max-age=${maxAge.toSeconds}") val expiryTimeStr = header("Expires").get.value assert(parseDateTime(expiryTimeStr).isSuccess, "Should have set expiry time in the valid format") } } test("Response headers are updated dynamically") { Get("/cacheableEndpoint") ~> route ~> check { val time1 = parseDateTime(header("Expires").get.value).get // Sleep for a second, to ensure date header will be updated. Thread.sleep(1001) Get("/cacheableEndpoint") ~> route ~> check { assert(status == OK) val time2 = parseDateTime(header("Expires").get.value).get assert(time1 != time2, "Time field should be updated dynamically on each request") } } } test("No cache related response headers on non-cacheable endpoint") { Get("/nonCacheableEndpoint") ~> route ~> check { assert(status == OK) assert(header("Cache-Control") == None) assert(header("Expires") == None) } } // The expected date-time format. private val dateTimeFormatter = org.joda.time.format.DateTimeFormat.forPattern("E, d MMM yyyy HH:mm:ss z") def parseDateTime(str: String) = Try(DateTime(dateTimeFormatter.parseDateTime(str).getMillis())) }
blinkboxbooks/common-spray.scala
src/test/scala/com/blinkbox/books/spray/CachableDirectiveTest.scala
Scala
mit
2,952
package com.jdrews.logstation.tailer import java.io._ import akka.actor.{Actor, ActorLogging, ActorRef} import com.google.common.xml.XmlEscapers import com.jdrews.logstation.config.BridgeController import com.jdrews.logstation.service.ServiceShutdown import com.jdrews.logstation.webserver.LogMessage import com.osinka.tailf.Tail /** * Created by jdrews on 2/21/2015. * * Actor to perform the tailing functionality * Should be one of these actors per log */ class LogTailerActor extends Actor with ActorLogging { // TODO: probably doesn't need to be a set. There should be only one thread per actor private var readerThreads = Set.empty[Thread] var colorizer: Option[ActorRef] = None private val bridge = BridgeController.getBridgeActor private val sleepIntervalForUpdates = 2000 def readLastLines(r: BufferedReader, skipBytes: Long, logFile: String): Unit = { if (skipBytes > 0) { r.skip(skipBytes) // read off any garbage line r.readLine() // back to normal tailing } //read(r, logFile) loopRead(r, logFile) } def read(r: BufferedReader, logFile: String): Unit = { if (!Thread.currentThread().isInterrupted) { val l = r.readLine if (l != null) { // log.info(s"read line: $l") // pass to colorizer if it's up, otherwise skip it and go straight to bridge colorizer.getOrElse(bridge) ! new LogMessage(XmlEscapers.xmlAttributeEscaper().escape(l), XmlEscapers.xmlAttributeEscaper().escape(logFile)) } read(r, logFile) } else { r.close() log.info("read() Shutdown!") self ! "doneRead" } } def loopRead(r: BufferedReader, logFile: String): Unit = { while (!Thread.currentThread().isInterrupted) { val l = r.readLine if (l != null) { // log.info(s"read line: $l") // pass to colorizer if it's up, otherwise skip it and go straight to bridge colorizer.getOrElse(bridge) ! new LogMessage(l, logFile) } else { try { // wait a bit for some more logs... Thread.sleep(sleepIntervalForUpdates) } catch { // clean up if we're sleeping when it's time to quit case ie: InterruptedException => Thread.currentThread().interrupt() } } } r.close() log.info("loopRead() shutdown!") self ! "doneRead" } def receive = { case LogThisFile(logFile) => log.debug(s"About to begin logging $logFile") // calculate bytes to skip to get to last N bytes of file val file: File = new File(logFile) val readLastNBytes = 100 val skipBytes = file.length() - readLastNBytes // begin reading val r = new BufferedReader(new InputStreamReader(Tail.follow(file))) val readerThread = new Thread(new Runnable { def run() { readLastLines(r, skipBytes, logFile) } }) readerThread.setDaemon(true) readerThread.start() readerThreads += readerThread case cref: ActorRef => // load up the colorizer log.debug(s"got the colorzier! $cref") colorizer = Some(cref) log.debug(s"the colorizer.getOrElse -> ${colorizer.getOrElse("nada hombre!")}") case ServiceShutdown => log.info("shutting down read thread") readerThreads.foreach(thread => thread.interrupt()) case "doneRead" => log.info("Read thread shut down. Shutting down self...") context stop self case something => log.warning(s"huh? what's this: $something") } }
jdrews/logstation
src/main/scala/com/jdrews/logstation/tailer/LogTailerActor.scala
Scala
apache-2.0
3,968
package scalariform object Utils { def writeText(file: java.io.File, text: String, encodingOpt: Option[String] = None): Unit = { import java.io.{ OutputStreamWriter, FileOutputStream } val encoding = encodingOpt getOrElse (System getProperty "file.encoding") val writer = new OutputStreamWriter(new FileOutputStream(file), encoding) try writer.write(text) finally writer.close() } }
mdr/scalariform
cli/src/main/scala/scalariform/Utils.scala
Scala
mit
421
package es.ucm.fdi.sscheck.testing import org.junit.runner.RunWith import org.specs2.runner.JUnitRunner import org.specs2.ScalaCheck import org.specs2.scalacheck.{Parameters, ScalaCheckProperty} import org.specs2.specification.BeforeAfterEach import org.scalacheck.{Prop, Gen} import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.Prop.AnyOperators import com.typesafe.scalalogging.slf4j.Logger import org.slf4j.LoggerFactory // (almost the same) example for https://github.com/etorreborre/specs2/issues/393 @RunWith(classOf[JUnitRunner]) class SequentialScalacheckOriginal extends org.specs2.Specification with org.specs2.matcher.MustThrownExpectations with BeforeAfterEach with ScalaCheck { // cannot use private[this] due to https://issues.scala-lang.org/browse/SI-8087 @transient private val logger = Logger(LoggerFactory.getLogger("SequentialScalacheckOriginal")) def is = sequential ^ "Example run should be sequential with ScalaCheck configured for one worker" ^ "property 1" ! skipped // this generates and error before Specs2 v "3.6.2-20150716123420-ac2f605" prop1 ^ "property 2" ! prop2 ^ end var counters = scala.collection.mutable.Map("example" -> 0, "test case" -> 0, "prop1" -> 0) def getPrinter(counterId : String) = { if (counterId == "test case") (s : String) => logger.debug(s) else (s : String) => logger.warn(s) } def enter(counterId : String) : Unit = this.synchronized { val print = getPrinter(counterId) print(s"one more $counterId") counters(counterId) = counters(counterId) + 1 if (counters(counterId) > 1) { logger.error(s"too many $counterId") throw new RuntimeException("this should be sequential") } } def exit(counterId : String) : Unit = this.synchronized { getPrinter(counterId)(s"one less $counterId") counters(counterId) -= 1 } var once = false def enterOnce() : Unit = this.synchronized { if (once) { throw new RuntimeException("this should be executed just once") } once = true } override def before : Unit = enter("example") override def after : Unit = exit("example") def prop1 = { enter("prop1") enterOnce() val p = Prop.forAll ("x" |: Gen.choose(0, 100)) { x : Int => logger.debug(s"$x,") enter("test case") x must be_>=(0) exit("test case") true } . set(workers = 1, minTestsOk = 150).verbose // comment the set and this works ok exit("prop1") p } def prop2 = { logger.info("running prop2") 2 === 2 } }
MiguelPeralvo/sscheck
src/test/scala/es/ucm/fdi/sscheck/testing/SequentialScalacheckOriginal.scala
Scala
apache-2.0
2,608
package pl.edu.agh.mplt.parser.AMPL.declarations import org.scalatest.{Matchers, FlatSpec} import pl.edu.agh.mplt.parser.phrase.set._ import pl.edu.agh.mplt.parser.phrase.expression.{Bin, Number} import pl.edu.agh.mplt.parser.{AMPLParser, IntercodeImplicits} import pl.edu.agh.mplt.parser.member.SetMember import pl.edu.agh.mplt.parser.reference.SimpleReference import pl.edu.agh.mplt.parser.phrase.set.SetComprehension import pl.edu.agh.mplt.parser.phrase.set.Indexing import scala.Some import pl.edu.agh.mplt.parser.member.StringMember import pl.edu.agh.mplt.parser.phrase.set.ExplicitSet import pl.edu.agh.mplt.parser.declaration.data.{SetDeclaration, Attribute} class SetDeclarationTest extends FlatSpec with Matchers with IntercodeImplicits { val parser = AMPLParser() def expr = parser.datatypeDeclaration def parse(input: String) = parser.parse(expr, input).get "Set declaration parser" should "parse simple set declaration" in { parse("set nodes;") should be(SetDeclaration("nodes")) } it should "parse set declaration with alias" in { parse("set oranges apples;") should be(SetDeclaration("oranges", Some("apples"))) } //////////////////////////////// ////////// indexing ////////// //////////////////////////////// it should "parse set declaration with simple indexing" in { parse("set apples { 1 + 3 .. 10 by 4 };") should be( SetDeclaration("apples", indexing = Some(Indexing(SetComprehension(Bin.+(1, 3), 10, 4))))) } it should "parse set declaration with alias and indexing" in { parse("set oranges apples { 1 + 3 .. 10 by 4 };") should be( SetDeclaration("oranges", Some("apples"), indexing = Some(Indexing(SetComprehension(Bin.+(1, 3), 10, 4))))) } it should "parse set declaration with indexing" in { parse( """set apples { 1 + 3 .. 10 by 4 , {1, 2, 3}, {"a", "b", "c"} };""") should be( SetDeclaration("apples", indexing = Some(Indexing(List( SetComprehension(Bin.+(1, 3), 10, 4), ExplicitSet(Set[SetMember](Number(1), Number(2), Number(3))), ExplicitSet(Set[SetMember](StringMember("a"), StringMember("b"), StringMember("c")))))))) } /////////////////////////////// ///////// attributes ///////// /////////////////////////////// it should "parse set declaration with dimension attribute" in { parse("set apples dimen 1;") should be(SetDeclaration("apples", attributes = List(Attribute.Dimension("1")))) } it should "parse set declaration with within attribute" in { parse( """set apples within {"a", "b", "c"} ;""") should be(SetDeclaration("apples", attributes = List(Attribute.Membership( ExplicitSet(Set[SetMember](StringMember("a"), StringMember("b"), StringMember("c"))))))) } it should "parse set declaration with = attribute" in { parse("set numbers = {1, 2, 3};") should be(SetDeclaration("numbers", attributes = List(Attribute.FinalSet( ExplicitSet(Set[SetMember](1, 2, 3)))))) } it should "parse set declaration with := attribute the sme as '='" in { parse("set numbers := {1, 2, 3};") should be(parse("set numbers = {1, 2, 3};")) } it should "parse set declaration with default attribute" in { parse("set numbers default {1, 2, 3};") should be(SetDeclaration("numbers", attributes = List(Attribute.DefaultSet(ExplicitSet(Set[SetMember](1, 2, 3)))))) } it should "parse set declaration with many attributes" in { parse("set arcs dimen 3, within nodes cross nodes, default {1, 2};") should be(SetDeclaration("arcs", attributes = List( Attribute.Dimension(3), Attribute.Membership(Sets.Cartesian(SimpleReference("nodes"), SimpleReference("nodes"))), Attribute.DefaultSet(ExplicitSet(Set[SetMember](1, 2)))))) } it should "parse set declaration with alias and attribute" in { parse("set apples oranges dimen 1;") should be(SetDeclaration("apples", Some("oranges"), attributes = List(Attribute.Dimension(1)))) } it should "parse set declaration with indexing and attribute" in { parse("set apples {i in 1 .. 10} dimen 1;") should be(SetDeclaration("apples", indexing = Some(Indexing(IndexedSet(List("i"), SetComprehension(1, 10)))), attributes = List(Attribute.Dimension(1)))) } it should "parse set declaration with alias and indexing and attribute" in { parse("set apples oranges {i in 1 .. 10} dimen 1;") should be(SetDeclaration("apples", Some("oranges"), indexing = Some(Indexing(IndexedSet(List("i"), SetComprehension(1, 10)))), attributes = List(Attribute.Dimension(1)))) } }
marek1840/MPLT
src/test/scala/pl/edu/agh/mplt/parser/AMPL/declarations/SetDeclarationTest.scala
Scala
mit
4,713
import lib.Timing.measureTask import lib._ import model._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent._ import scala.concurrent.duration.Duration import scala.sys.process._ import scalax.file.PathMatcher.IsDirectory import scalax.io.Codec /* * Vary BFG runs by: * Java version * BFG version (JGit version?) * */ object Benchmark extends App { implicit val codec = Codec.UTF8 BenchmarkConfig.parser.parse(args, BenchmarkConfig()) map { config => println(s"Using resources dir : ${config.resourcesDir.path}") require(config.resourcesDir.exists, s"Resources dir not found : ${config.resourcesDir.path}") require(config.jarsDir.exists, s"Jars dir not found : ${config.jarsDir.path}") require(config.reposDir.exists, s"Repos dir not found : ${config.reposDir.path}") val missingJars = config.bfgJars.filterNot(_.exists).map(_.toAbsolute.path) require(missingJars.isEmpty, s"Missing BFG jars : ${missingJars.mkString(",")}") val tasksFuture = for { bfgInvocableEngineSet <- bfgInvocableEngineSet(config) } yield { val gfbInvocableEngineSetOpt = if (config.onlyBfg) None else Some(InvocableEngineSet[GFBInvocation](GitFilterBranch, Seq(InvocableGitFilterBranch))) boogaloo(config, new RepoExtractor(config.scratchDir), Seq(bfgInvocableEngineSet) ++ gfbInvocableEngineSetOpt.toSeq) } Await.result(tasksFuture, Duration.Inf) } def bfgInvocableEngineSet(config: BenchmarkConfig): Future[InvocableEngineSet[BFGInvocation]] = for { javas <- Future.traverse(config.javaCmds)(jc => JavaVersion.version(jc).map(v => Java(jc, v))) } yield { val invocables = for { java <- javas bfgJar <- config.bfgJars } yield InvocableBFG(java, BFGJar.from(bfgJar)) InvocableEngineSet[BFGInvocation](BFG, invocables) } /* * A Task says "here is something you can do to a given repo, and here is how to do * it with a BFG, and with git-filter-branch" */ def boogaloo(config: BenchmarkConfig, repoExtractor: RepoExtractor, invocableEngineSets: Seq[InvocableEngineSet[_ <: EngineInvocation]]) = { for { repoSpecDir <- config.repoSpecDirs.toList availableCommandDirs = (repoSpecDir / "commands").children().filter(IsDirectory).toList // println(s"Available commands for $repoName : ${availableCommandDirs.map(_.name).mkString(", ")}") commandDir <- availableCommandDirs.filter(p => config.commands(p.name)) } yield { val repoName = repoSpecDir.name val commandName = commandDir.name commandName -> (for { invocableEngineSet <- invocableEngineSets } yield for { (invocable, processMaker) <- invocableEngineSet.invocationsFor(commandDir) } yield { val cleanRepoDir = repoExtractor.extractRepoFrom(repoSpecDir / "repo.git.zip") commandDir.children().foreach(p => p.copyTo(cleanRepoDir / p.name)) val process = processMaker(cleanRepoDir) val duration = measureTask(s"$commandName - $invocable") { process ! ProcessLogger(_ => Unit) } if (config.dieIfTaskTakesLongerThan.exists(_ < duration.toMillis)) { throw new Exception("This took too long: "+duration) } invocable -> duration }) } } println(s"\\n...benchmark finished.") }
digitalquest/bfg-repo-cleaner
bfg-benchmark/src/main/scala/Benchmark.scala
Scala
gpl-3.0
3,424
/** MACHINE-GENERATED FROM AVRO SCHEMA. DO NOT EDIT DIRECTLY */ package com.test.ARecord import com.test.MoneyDecimal final case class ARecord(GroupId: String = "", MediaCostCPMInUSD: Option[MoneyDecimal.Value], SupplyVendor: Option[String])
makubi/avrohugger-maven-plugin
src/test/resources/unit/avrohugger-maven-plugin/expected/ARecord.scala
Scala
apache-2.0
243
package code.rest import net.liftweb.http.{Req,OutputStreamResponse} import net.liftweb.http.rest._ object Numbers extends RestHelper { // Generate numbers, converted to Array[Byte] def infinite = Stream.from(1).map(num2bytes) def num2bytes(x: Int) = (x + "\\n") getBytes("utf-8") serve { case Req("numbers" :: Nil, _, _) => OutputStreamResponse( (out) => infinite.foreach(out.write) ) } }
gruenewa/lift-testbox
src/main/scala/code/rest/Numbers.scala
Scala
apache-2.0
422
package michid.script.shell import michid.script.oak.fixtures.oakFixtures import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class RunScriptIT extends FunSuite with ScriptRunner { oakFixtures.values.foreach(oakFixture => { test(s"run script ($oakFixture)") { run(oakFixture) { """println("foo-42")""" } { case (out, err) => assert(err.isEmpty) assert(out.contains("foo-42")) } } test(s"run script through interpreter ($oakFixture)") { run(oakFixture) { """"println(42)".run""" } { case (out, err) => assert(err.isEmpty) assert(out.contains("42")) } } }) }
mduerig/script-oak
script-oak-shell/src/test/scala/michid/script/shell/RunScriptIT.scala
Scala
apache-2.0
770
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.util.concurrent.TimeUnit import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import scala.util.control.{ControlThrowable, NonFatal} import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.internal.Logging import org.apache.spark.internal.config.{DYN_ALLOCATION_MAX_EXECUTORS, DYN_ALLOCATION_MIN_EXECUTORS} import org.apache.spark.metrics.source.Source import org.apache.spark.scheduler._ import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils} /** * An agent that dynamically allocates and removes executors based on the workload. * * The ExecutorAllocationManager maintains a moving target number of executors which is periodically * synced to the cluster manager. The target starts at a configured initial value and changes with * the number of pending and running tasks. * * Decreasing the target number of executors happens when the current target is more than needed to * handle the current load. The target number of executors is always truncated to the number of * executors that could run all current running and pending tasks at once. * * Increasing the target number of executors happens in response to backlogged tasks waiting to be * scheduled. If the scheduler queue is not drained in N seconds, then new executors are added. If * the queue persists for another M seconds, then more executors are added and so on. The number * added in each round increases exponentially from the previous round until an upper bound has been * reached. The upper bound is based both on a configured property and on the current number of * running and pending tasks, as described above. * * The rationale for the exponential increase is twofold: (1) Executors should be added slowly * in the beginning in case the number of extra executors needed turns out to be small. Otherwise, * we may add more executors than we need just to remove them later. (2) Executors should be added * quickly over time in case the maximum number of executors is very high. Otherwise, it will take * a long time to ramp up under heavy workloads. * * The remove policy is simpler: If an executor has been idle for K seconds, meaning it has not * been scheduled to run any tasks, then it is removed. * * There is no retry logic in either case because we make the assumption that the cluster manager * will eventually fulfill all requests it receives asynchronously. * * The relevant Spark properties include the following: * * spark.dynamicAllocation.enabled - Whether this feature is enabled * spark.dynamicAllocation.minExecutors - Lower bound on the number of executors * spark.dynamicAllocation.maxExecutors - Upper bound on the number of executors * spark.dynamicAllocation.initialExecutors - Number of executors to start with * * spark.dynamicAllocation.schedulerBacklogTimeout (M) - * If there are backlogged tasks for this duration, add new executors * * spark.dynamicAllocation.sustainedSchedulerBacklogTimeout (N) - * If the backlog is sustained for this duration, add more executors * This is used only after the initial backlog timeout is exceeded * * spark.dynamicAllocation.executorIdleTimeout (K) - * If an executor has been idle for this duration, remove it */ private[spark] class ExecutorAllocationManager( client: ExecutorAllocationClient, listenerBus: LiveListenerBus, conf: SparkConf) extends Logging { allocationManager => import ExecutorAllocationManager._ // Lower and upper bounds on the number of executors. private val minNumExecutors = conf.get(DYN_ALLOCATION_MIN_EXECUTORS) private val maxNumExecutors = conf.get(DYN_ALLOCATION_MAX_EXECUTORS) private val initialNumExecutors = Utils.getDynamicAllocationInitialExecutors(conf) // How long there must be backlogged tasks for before an addition is triggered (seconds) private val schedulerBacklogTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.schedulerBacklogTimeout", "1s") // Same as above, but used only after `schedulerBacklogTimeoutS` is exceeded private val sustainedSchedulerBacklogTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout", s"${schedulerBacklogTimeoutS}s") // How long an executor must be idle for before it is removed (seconds) private val executorIdleTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.executorIdleTimeout", "60s") private val cachedExecutorIdleTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.cachedExecutorIdleTimeout", s"${Integer.MAX_VALUE}s") // During testing, the methods to actually kill and add executors are mocked out private val testing = conf.getBoolean("spark.dynamicAllocation.testing", false) // TODO: The default value of 1 for spark.executor.cores works right now because dynamic // allocation is only supported for YARN and the default number of cores per executor in YARN is // 1, but it might need to be attained differently for different cluster managers private val tasksPerExecutor = conf.getInt("spark.executor.cores", 1) / conf.getInt("spark.task.cpus", 1) validateSettings() // Number of executors to add in the next round private var numExecutorsToAdd = 1 // The desired number of executors at this moment in time. If all our executors were to die, this // is the number of executors we would immediately want from the cluster manager. private var numExecutorsTarget = initialNumExecutors // Executors that have been requested to be removed but have not been killed yet private val executorsPendingToRemove = new mutable.HashSet[String] // All known executors private val executorIds = new mutable.HashSet[String] // A timestamp of when an addition should be triggered, or NOT_SET if it is not set // This is set when pending tasks are added but not scheduled yet private var addTime: Long = NOT_SET // A timestamp for each executor of when the executor should be removed, indexed by the ID // This is set when an executor is no longer running a task, or when it first registers private val removeTimes = new mutable.HashMap[String, Long] // Polling loop interval (ms) private val intervalMillis: Long = 100 // Clock used to schedule when executors should be added and removed private var clock: Clock = new SystemClock() // Listener for Spark events that impact the allocation policy private val listener = new ExecutorAllocationListener // Executor that handles the scheduling task. private val executor = ThreadUtils.newDaemonSingleThreadScheduledExecutor("spark-dynamic-executor-allocation") // Metric source for ExecutorAllocationManager to expose internal status to MetricsSystem. val executorAllocationManagerSource = new ExecutorAllocationManagerSource // Whether we are still waiting for the initial set of executors to be allocated. // While this is true, we will not cancel outstanding executor requests. This is // set to false when: // (1) a stage is submitted, or // (2) an executor idle timeout has elapsed. @volatile private var initializing: Boolean = true // Number of locality aware tasks, used for executor placement. private var localityAwareTasks = 0 // Host to possible task running on it, used for executor placement. private var hostToLocalTaskCount: Map[String, Int] = Map.empty /** * Verify that the settings specified through the config are valid. * If not, throw an appropriate exception. */ private def validateSettings(): Unit = { if (minNumExecutors < 0 || maxNumExecutors < 0) { throw new SparkException("spark.dynamicAllocation.{min/max}Executors must be positive!") } if (maxNumExecutors == 0) { throw new SparkException("spark.dynamicAllocation.maxExecutors cannot be 0!") } if (minNumExecutors > maxNumExecutors) { throw new SparkException(s"spark.dynamicAllocation.minExecutors ($minNumExecutors) must " + s"be less than or equal to spark.dynamicAllocation.maxExecutors ($maxNumExecutors)!") } if (schedulerBacklogTimeoutS <= 0) { throw new SparkException("spark.dynamicAllocation.schedulerBacklogTimeout must be > 0!") } if (sustainedSchedulerBacklogTimeoutS <= 0) { throw new SparkException( "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout must be > 0!") } if (executorIdleTimeoutS <= 0) { throw new SparkException("spark.dynamicAllocation.executorIdleTimeout must be > 0!") } // Require external shuffle service for dynamic allocation // Otherwise, we may lose shuffle files when killing executors if (!conf.getBoolean("spark.shuffle.service.enabled", false) && !testing) { throw new SparkException("Dynamic allocation of executors requires the external " + "shuffle service. You may enable this through spark.shuffle.service.enabled.") } if (tasksPerExecutor == 0) { throw new SparkException("spark.executor.cores must not be less than spark.task.cpus.") } } /** * Use a different clock for this allocation manager. This is mainly used for testing. */ def setClock(newClock: Clock): Unit = { clock = newClock } /** * Register for scheduler callbacks to decide when to add and remove executors, and start * the scheduling task. */ def start(): Unit = { listenerBus.addToManagementQueue(listener) val scheduleTask = new Runnable() { override def run(): Unit = { try { schedule() } catch { case ct: ControlThrowable => throw ct case t: Throwable => logWarning(s"Uncaught exception in thread ${Thread.currentThread().getName}", t) } } } executor.scheduleWithFixedDelay(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS) client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) } /** * Stop the allocation manager. */ def stop(): Unit = { executor.shutdown() executor.awaitTermination(10, TimeUnit.SECONDS) } /** * Reset the allocation manager when the cluster manager loses track of the driver's state. * This is currently only done in YARN client mode, when the AM is restarted. * * This method forgets about any state about existing executors, and forces the scheduler to * re-evaluate the number of needed executors the next time it's run. */ def reset(): Unit = synchronized { addTime = 0L numExecutorsTarget = initialNumExecutors executorsPendingToRemove.clear() removeTimes.clear() } /** * The maximum number of executors we would need under the current load to satisfy all running * and pending tasks, rounded up. */ private def maxNumExecutorsNeeded(): Int = { val numRunningOrPendingTasks = listener.totalPendingTasks + listener.totalRunningTasks (numRunningOrPendingTasks + tasksPerExecutor - 1) / tasksPerExecutor } private def totalRunningTasks(): Int = synchronized { listener.totalRunningTasks } /** * This is called at a fixed interval to regulate the number of pending executor requests * and number of executors running. * * First, adjust our requested executors based on the add time and our current needs. * Then, if the remove time for an existing executor has expired, kill the executor. * * This is factored out into its own method for testing. */ private def schedule(): Unit = synchronized { val now = clock.getTimeMillis updateAndSyncNumExecutorsTarget(now) val executorIdsToBeRemoved = ArrayBuffer[String]() removeTimes.retain { case (executorId, expireTime) => val expired = now >= expireTime if (expired) { initializing = false executorIdsToBeRemoved += executorId } !expired } if (executorIdsToBeRemoved.nonEmpty) { removeExecutors(executorIdsToBeRemoved) } } /** * Updates our target number of executors and syncs the result with the cluster manager. * * Check to see whether our existing allocation and the requests we've made previously exceed our * current needs. If so, truncate our target and let the cluster manager know so that it can * cancel pending requests that are unneeded. * * If not, and the add time has expired, see if we can request new executors and refresh the add * time. * * @return the delta in the target number of executors. */ private def updateAndSyncNumExecutorsTarget(now: Long): Int = synchronized { val maxNeeded = maxNumExecutorsNeeded if (initializing) { // Do not change our target while we are still initializing, // Otherwise the first job may have to ramp up unnecessarily 0 } else if (maxNeeded < numExecutorsTarget) { // The target number exceeds the number we actually need, so stop adding new // executors and inform the cluster manager to cancel the extra pending requests val oldNumExecutorsTarget = numExecutorsTarget numExecutorsTarget = math.max(maxNeeded, minNumExecutors) numExecutorsToAdd = 1 // If the new target has not changed, avoid sending a message to the cluster manager if (numExecutorsTarget < oldNumExecutorsTarget) { client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) logDebug(s"Lowering target number of executors to $numExecutorsTarget (previously " + s"$oldNumExecutorsTarget) because not all requested executors are actually needed") } numExecutorsTarget - oldNumExecutorsTarget } else if (addTime != NOT_SET && now >= addTime) { val delta = addExecutors(maxNeeded) logDebug(s"Starting timer to add more executors (to " + s"expire in $sustainedSchedulerBacklogTimeoutS seconds)") addTime = now + (sustainedSchedulerBacklogTimeoutS * 1000) delta } else { 0 } } /** * Request a number of executors from the cluster manager. * If the cap on the number of executors is reached, give up and reset the * number of executors to add next round instead of continuing to double it. * * @param maxNumExecutorsNeeded the maximum number of executors all currently running or pending * tasks could fill * @return the number of additional executors actually requested. */ private def addExecutors(maxNumExecutorsNeeded: Int): Int = { // Do not request more executors if it would put our target over the upper bound if (numExecutorsTarget >= maxNumExecutors) { logDebug(s"Not adding executors because our current target total " + s"is already $numExecutorsTarget (limit $maxNumExecutors)") numExecutorsToAdd = 1 return 0 } val oldNumExecutorsTarget = numExecutorsTarget // There's no point in wasting time ramping up to the number of executors we already have, so // make sure our target is at least as much as our current allocation: numExecutorsTarget = math.max(numExecutorsTarget, executorIds.size) // Boost our target with the number to add for this round: numExecutorsTarget += numExecutorsToAdd // Ensure that our target doesn't exceed what we need at the present moment: numExecutorsTarget = math.min(numExecutorsTarget, maxNumExecutorsNeeded) // Ensure that our target fits within configured bounds: numExecutorsTarget = math.max(math.min(numExecutorsTarget, maxNumExecutors), minNumExecutors) val delta = numExecutorsTarget - oldNumExecutorsTarget // If our target has not changed, do not send a message // to the cluster manager and reset our exponential growth if (delta == 0) { // Check if there is any speculative jobs pending if (listener.pendingTasks == 0 && listener.pendingSpeculativeTasks > 0) { numExecutorsTarget = math.max(math.min(maxNumExecutorsNeeded + 1, maxNumExecutors), minNumExecutors) } else { numExecutorsToAdd = 1 return 0 } } val addRequestAcknowledged = try { testing || client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) } catch { case NonFatal(e) => // Use INFO level so the error it doesn't show up by default in shells. Errors here are more // commonly caused by YARN AM restarts, which is a recoverable issue, and generate a lot of // noisy output. logInfo("Error reaching cluster manager.", e) false } if (addRequestAcknowledged) { val executorsString = "executor" + { if (delta > 1) "s" else "" } logInfo(s"Requesting $delta new $executorsString because tasks are backlogged" + s" (new desired total will be $numExecutorsTarget)") numExecutorsToAdd = if (delta == numExecutorsToAdd) { numExecutorsToAdd * 2 } else { 1 } delta } else { logWarning( s"Unable to reach the cluster manager to request $numExecutorsTarget total executors!") numExecutorsTarget = oldNumExecutorsTarget 0 } } /** * Request the cluster manager to remove the given executors. * Returns the list of executors which are removed. */ private def removeExecutors(executors: Seq[String]): Seq[String] = synchronized { val executorIdsToBeRemoved = new ArrayBuffer[String] logInfo("Request to remove executorIds: " + executors.mkString(", ")) val numExistingExecutors = allocationManager.executorIds.size - executorsPendingToRemove.size var newExecutorTotal = numExistingExecutors executors.foreach { executorIdToBeRemoved => if (newExecutorTotal - 1 < minNumExecutors) { logDebug(s"Not removing idle executor $executorIdToBeRemoved because there are only " + s"$newExecutorTotal executor(s) left (minimum number of executor limit $minNumExecutors)") } else if (newExecutorTotal - 1 < numExecutorsTarget) { logDebug(s"Not removing idle executor $executorIdToBeRemoved because there are only " + s"$newExecutorTotal executor(s) left (number of executor target $numExecutorsTarget)") } else if (canBeKilled(executorIdToBeRemoved)) { executorIdsToBeRemoved += executorIdToBeRemoved newExecutorTotal -= 1 } } if (executorIdsToBeRemoved.isEmpty) { return Seq.empty[String] } // Send a request to the backend to kill this executor(s) val executorsRemoved = if (testing) { executorIdsToBeRemoved } else { client.killExecutors(executorIdsToBeRemoved) } // [SPARK-21834] killExecutors api reduces the target number of executors. // So we need to update the target with desired value. client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) // reset the newExecutorTotal to the existing number of executors newExecutorTotal = numExistingExecutors if (testing || executorsRemoved.nonEmpty) { executorsRemoved.foreach { removedExecutorId => newExecutorTotal -= 1 logInfo(s"Removing executor $removedExecutorId because it has been idle for " + s"$executorIdleTimeoutS seconds (new desired total will be $newExecutorTotal)") executorsPendingToRemove.add(removedExecutorId) } executorsRemoved } else { logWarning(s"Unable to reach the cluster manager to kill executor/s " + s"${executorIdsToBeRemoved.mkString(",")} or no executor eligible to kill!") Seq.empty[String] } } /** * Request the cluster manager to remove the given executor. * Return whether the request is acknowledged. */ private def removeExecutor(executorId: String): Boolean = synchronized { val executorsRemoved = removeExecutors(Seq(executorId)) executorsRemoved.nonEmpty && executorsRemoved(0) == executorId } /** * Determine if the given executor can be killed. */ private def canBeKilled(executorId: String): Boolean = synchronized { // Do not kill the executor if we are not aware of it (should never happen) if (!executorIds.contains(executorId)) { logWarning(s"Attempted to remove unknown executor $executorId!") return false } // Do not kill the executor again if it is already pending to be killed (should never happen) if (executorsPendingToRemove.contains(executorId)) { logWarning(s"Attempted to remove executor $executorId " + s"when it is already pending to be removed!") return false } true } /** * Callback invoked when the specified executor has been added. */ private def onExecutorAdded(executorId: String): Unit = synchronized { if (!executorIds.contains(executorId)) { executorIds.add(executorId) // If an executor (call this executor X) is not removed because the lower bound // has been reached, it will no longer be marked as idle. When new executors join, // however, we are no longer at the lower bound, and so we must mark executor X // as idle again so as not to forget that it is a candidate for removal. (see SPARK-4951) executorIds.filter(listener.isExecutorIdle).foreach(onExecutorIdle) logInfo(s"New executor $executorId has registered (new total is ${executorIds.size})") } else { logWarning(s"Duplicate executor $executorId has registered") } } /** * Callback invoked when the specified executor has been removed. */ private def onExecutorRemoved(executorId: String): Unit = synchronized { if (executorIds.contains(executorId)) { executorIds.remove(executorId) removeTimes.remove(executorId) logInfo(s"Existing executor $executorId has been removed (new total is ${executorIds.size})") if (executorsPendingToRemove.contains(executorId)) { executorsPendingToRemove.remove(executorId) logDebug(s"Executor $executorId is no longer pending to " + s"be removed (${executorsPendingToRemove.size} left)") } } else { logWarning(s"Unknown executor $executorId has been removed!") } } /** * Callback invoked when the scheduler receives new pending tasks. * This sets a time in the future that decides when executors should be added * if it is not already set. */ private def onSchedulerBacklogged(): Unit = synchronized { if (addTime == NOT_SET) { logDebug(s"Starting timer to add executors because pending tasks " + s"are building up (to expire in $schedulerBacklogTimeoutS seconds)") addTime = clock.getTimeMillis + schedulerBacklogTimeoutS * 1000 } } /** * Callback invoked when the scheduler queue is drained. * This resets all variables used for adding executors. */ private def onSchedulerQueueEmpty(): Unit = synchronized { logDebug("Clearing timer to add executors because there are no more pending tasks") addTime = NOT_SET numExecutorsToAdd = 1 } /** * Callback invoked when the specified executor is no longer running any tasks. * This sets a time in the future that decides when this executor should be removed if * the executor is not already marked as idle. */ private def onExecutorIdle(executorId: String): Unit = synchronized { if (executorIds.contains(executorId)) { if (!removeTimes.contains(executorId) && !executorsPendingToRemove.contains(executorId)) { // Note that it is not necessary to query the executors since all the cached // blocks we are concerned with are reported to the driver. Note that this // does not include broadcast blocks. val hasCachedBlocks = SparkEnv.get.blockManager.master.hasCachedBlocks(executorId) val now = clock.getTimeMillis() val timeout = { if (hasCachedBlocks) { // Use a different timeout if the executor has cached blocks. now + cachedExecutorIdleTimeoutS * 1000 } else { now + executorIdleTimeoutS * 1000 } } val realTimeout = if (timeout <= 0) Long.MaxValue else timeout // overflow removeTimes(executorId) = realTimeout logDebug(s"Starting idle timer for $executorId because there are no more tasks " + s"scheduled to run on the executor (to expire in ${(realTimeout - now)/1000} seconds)") } } else { logWarning(s"Attempted to mark unknown executor $executorId idle") } } /** * Callback invoked when the specified executor is now running a task. * This resets all variables used for removing this executor. */ private def onExecutorBusy(executorId: String): Unit = synchronized { logDebug(s"Clearing idle timer for $executorId because it is now running a task") removeTimes.remove(executorId) } /** * A listener that notifies the given allocation manager of when to add and remove executors. * * This class is intentionally conservative in its assumptions about the relative ordering * and consistency of events returned by the listener. */ private class ExecutorAllocationListener extends SparkListener { private val stageIdToNumTasks = new mutable.HashMap[Int, Int] // Number of running tasks per stage including speculative tasks. // Should be 0 when no stages are active. private val stageIdToNumRunningTask = new mutable.HashMap[Int, Int] private val stageIdToTaskIndices = new mutable.HashMap[Int, mutable.HashSet[Int]] private val executorIdToTaskIds = new mutable.HashMap[String, mutable.HashSet[Long]] // Number of speculative tasks to be scheduled in each stage private val stageIdToNumSpeculativeTasks = new mutable.HashMap[Int, Int] // The speculative tasks started in each stage private val stageIdToSpeculativeTaskIndices = new mutable.HashMap[Int, mutable.HashSet[Int]] // stageId to tuple (the number of task with locality preferences, a map where each pair is a // node and the number of tasks that would like to be scheduled on that node) map, // maintain the executor placement hints for each stage Id used by resource framework to better // place the executors. private val stageIdToExecutorPlacementHints = new mutable.HashMap[Int, (Int, Map[String, Int])] override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = { initializing = false val stageId = stageSubmitted.stageInfo.stageId val numTasks = stageSubmitted.stageInfo.numTasks allocationManager.synchronized { stageIdToNumTasks(stageId) = numTasks stageIdToNumRunningTask(stageId) = 0 allocationManager.onSchedulerBacklogged() // Compute the number of tasks requested by the stage on each host var numTasksPending = 0 val hostToLocalTaskCountPerStage = new mutable.HashMap[String, Int]() stageSubmitted.stageInfo.taskLocalityPreferences.foreach { locality => if (!locality.isEmpty) { numTasksPending += 1 locality.foreach { location => val count = hostToLocalTaskCountPerStage.getOrElse(location.host, 0) + 1 hostToLocalTaskCountPerStage(location.host) = count } } } stageIdToExecutorPlacementHints.put(stageId, (numTasksPending, hostToLocalTaskCountPerStage.toMap)) // Update the executor placement hints updateExecutorPlacementHints() } } override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { val stageId = stageCompleted.stageInfo.stageId allocationManager.synchronized { stageIdToNumTasks -= stageId stageIdToNumRunningTask -= stageId stageIdToNumSpeculativeTasks -= stageId stageIdToTaskIndices -= stageId stageIdToSpeculativeTaskIndices -= stageId stageIdToExecutorPlacementHints -= stageId // Update the executor placement hints updateExecutorPlacementHints() // If this is the last stage with pending tasks, mark the scheduler queue as empty // This is needed in case the stage is aborted for any reason if (stageIdToNumTasks.isEmpty && stageIdToNumSpeculativeTasks.isEmpty) { allocationManager.onSchedulerQueueEmpty() } } } override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { val stageId = taskStart.stageId val taskId = taskStart.taskInfo.taskId val taskIndex = taskStart.taskInfo.index val executorId = taskStart.taskInfo.executorId allocationManager.synchronized { if (stageIdToNumRunningTask.contains(stageId)) { stageIdToNumRunningTask(stageId) += 1 } // This guards against the race condition in which the `SparkListenerTaskStart` // event is posted before the `SparkListenerBlockManagerAdded` event, which is // possible because these events are posted in different threads. (see SPARK-4951) if (!allocationManager.executorIds.contains(executorId)) { allocationManager.onExecutorAdded(executorId) } // If this is the last pending task, mark the scheduler queue as empty if (taskStart.taskInfo.speculative) { stageIdToSpeculativeTaskIndices.getOrElseUpdate(stageId, new mutable.HashSet[Int]) += taskIndex } else { stageIdToTaskIndices.getOrElseUpdate(stageId, new mutable.HashSet[Int]) += taskIndex } if (totalPendingTasks() == 0) { allocationManager.onSchedulerQueueEmpty() } // Mark the executor on which this task is scheduled as busy executorIdToTaskIds.getOrElseUpdate(executorId, new mutable.HashSet[Long]) += taskId allocationManager.onExecutorBusy(executorId) } } override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { val executorId = taskEnd.taskInfo.executorId val taskId = taskEnd.taskInfo.taskId val taskIndex = taskEnd.taskInfo.index val stageId = taskEnd.stageId allocationManager.synchronized { if (stageIdToNumRunningTask.contains(stageId)) { stageIdToNumRunningTask(stageId) -= 1 } // If the executor is no longer running any scheduled tasks, mark it as idle if (executorIdToTaskIds.contains(executorId)) { executorIdToTaskIds(executorId) -= taskId if (executorIdToTaskIds(executorId).isEmpty) { executorIdToTaskIds -= executorId allocationManager.onExecutorIdle(executorId) } } // If the task failed, we expect it to be resubmitted later. To ensure we have // enough resources to run the resubmitted task, we need to mark the scheduler // as backlogged again if it's not already marked as such (SPARK-8366) if (taskEnd.reason != Success) { if (totalPendingTasks() == 0) { allocationManager.onSchedulerBacklogged() } if (taskEnd.taskInfo.speculative) { stageIdToSpeculativeTaskIndices.get(stageId).foreach {_.remove(taskIndex)} } else { stageIdToTaskIndices.get(stageId).foreach {_.remove(taskIndex)} } } } } override def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit = { val executorId = executorAdded.executorId if (executorId != SparkContext.DRIVER_IDENTIFIER) { // This guards against the race condition in which the `SparkListenerTaskStart` // event is posted before the `SparkListenerBlockManagerAdded` event, which is // possible because these events are posted in different threads. (see SPARK-4951) if (!allocationManager.executorIds.contains(executorId)) { allocationManager.onExecutorAdded(executorId) } } } override def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit = { allocationManager.onExecutorRemoved(executorRemoved.executorId) } override def onSpeculativeTaskSubmitted(speculativeTask: SparkListenerSpeculativeTaskSubmitted) : Unit = { val stageId = speculativeTask.stageId allocationManager.synchronized { stageIdToNumSpeculativeTasks(stageId) = stageIdToNumSpeculativeTasks.getOrElse(stageId, 0) + 1 allocationManager.onSchedulerBacklogged() } } /** * An estimate of the total number of pending tasks remaining for currently running stages. Does * not account for tasks which may have failed and been resubmitted. * * Note: This is not thread-safe without the caller owning the `allocationManager` lock. */ def pendingTasks(): Int = { stageIdToNumTasks.map { case (stageId, numTasks) => numTasks - stageIdToTaskIndices.get(stageId).map(_.size).getOrElse(0) }.sum } def pendingSpeculativeTasks(): Int = { stageIdToNumSpeculativeTasks.map { case (stageId, numTasks) => numTasks - stageIdToSpeculativeTaskIndices.get(stageId).map(_.size).getOrElse(0) }.sum } def totalPendingTasks(): Int = { pendingTasks + pendingSpeculativeTasks } /** * The number of tasks currently running across all stages. */ def totalRunningTasks(): Int = { stageIdToNumRunningTask.values.sum } /** * Return true if an executor is not currently running a task, and false otherwise. * * Note: This is not thread-safe without the caller owning the `allocationManager` lock. */ def isExecutorIdle(executorId: String): Boolean = { !executorIdToTaskIds.contains(executorId) } /** * Update the Executor placement hints (the number of tasks with locality preferences, * a map where each pair is a node and the number of tasks that would like to be scheduled * on that node). * * These hints are updated when stages arrive and complete, so are not up-to-date at task * granularity within stages. */ def updateExecutorPlacementHints(): Unit = { var localityAwareTasks = 0 val localityToCount = new mutable.HashMap[String, Int]() stageIdToExecutorPlacementHints.values.foreach { case (numTasksPending, localities) => localityAwareTasks += numTasksPending localities.foreach { case (hostname, count) => val updatedCount = localityToCount.getOrElse(hostname, 0) + count localityToCount(hostname) = updatedCount } } allocationManager.localityAwareTasks = localityAwareTasks allocationManager.hostToLocalTaskCount = localityToCount.toMap } } /** * Metric source for ExecutorAllocationManager to expose its internal executor allocation * status to MetricsSystem. * Note: These metrics heavily rely on the internal implementation of * ExecutorAllocationManager, metrics or value of metrics will be changed when internal * implementation is changed, so these metrics are not stable across Spark version. */ private[spark] class ExecutorAllocationManagerSource extends Source { val sourceName = "ExecutorAllocationManager" val metricRegistry = new MetricRegistry() private def registerGauge[T](name: String, value: => T, defaultValue: T): Unit = { metricRegistry.register(MetricRegistry.name("executors", name), new Gauge[T] { override def getValue: T = synchronized { Option(value).getOrElse(defaultValue) } }) } registerGauge("numberExecutorsToAdd", numExecutorsToAdd, 0) registerGauge("numberExecutorsPendingToRemove", executorsPendingToRemove.size, 0) registerGauge("numberAllExecutors", executorIds.size, 0) registerGauge("numberTargetExecutors", numExecutorsTarget, 0) registerGauge("numberMaxNeededExecutors", maxNumExecutorsNeeded(), 0) } } private object ExecutorAllocationManager { val NOT_SET = Long.MaxValue }
cin/spark
core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
Scala
apache-2.0
36,821
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.worker import java.io.{File, FileOutputStream, InputStream, IOException} import scala.collection.JavaConverters._ import scala.collection.Map import org.apache.spark.SecurityManager import org.apache.spark.deploy.Command import org.apache.spark.internal.Logging import org.apache.spark.launcher.WorkerCommandBuilder import org.apache.spark.util.Utils /** * Utilities for running commands with the spark classpath. */ private[deploy] object CommandUtils extends Logging { /** * Build a ProcessBuilder based on the given parameters. * The `env` argument is exposed for testing. */ def buildProcessBuilder( command: Command, securityMgr: SecurityManager, memory: Int, sparkHome: String, substituteArguments: String => String, classPaths: Seq[String] = Seq[String](), env: Map[String, String] = sys.env): ProcessBuilder = { // 在给定的基础上构建一个命令,考虑到该命令运行的本地环境,替换任何占位符,并附加任何额外的类路径。 val localCommand = buildLocalCommand( command, securityMgr, substituteArguments, classPaths, env) val commandSeq = buildCommandSeq(localCommand, memory, sparkHome) val builder = new ProcessBuilder(commandSeq: _*) val environment = builder.environment() for ((key, value) <- localCommand.environment) { environment.put(key, value) } builder } private def buildCommandSeq(command: Command, memory: Int, sparkHome: String): Seq[String] = { // SPARK-698: do not call the run.cmd script, as process.destroy() // fails to kill a process tree on Windows val cmd = new WorkerCommandBuilder(sparkHome, memory, command).buildCommand() cmd.asScala ++ Seq(command.mainClass) ++ command.arguments } /** * Build a command based on the given one, taking into account the local environment * of where this command is expected to run, substitute any placeholders, and append * any extra class paths. */ private def buildLocalCommand( command: Command, securityMgr: SecurityManager, substituteArguments: String => String, classPath: Seq[String] = Seq[String](), env: Map[String, String]): Command = { val libraryPathName = Utils.libraryPathEnvName val libraryPathEntries = command.libraryPathEntries val cmdLibraryPath = command.environment.get(libraryPathName) var newEnvironment = if (libraryPathEntries.nonEmpty && libraryPathName.nonEmpty) { val libraryPaths = libraryPathEntries ++ cmdLibraryPath ++ env.get(libraryPathName) command.environment + ((libraryPathName, libraryPaths.mkString(File.pathSeparator))) } else { command.environment } // set auth secret to env variable if needed if (securityMgr.isAuthenticationEnabled) { newEnvironment += (SecurityManager.ENV_AUTH_SECRET -> securityMgr.getSecretKey) } Command( command.mainClass, command.arguments.map(substituteArguments), newEnvironment, command.classPathEntries ++ classPath, Seq[String](), // library path already captured in environment variable // filter out auth secret from java options command.javaOpts.filterNot(_.startsWith("-D" + SecurityManager.SPARK_AUTH_SECRET_CONF))) } /** Spawn a thread that will redirect a given stream to a file */ def redirectStream(in: InputStream, file: File) { val out = new FileOutputStream(file, true) // TODO: It would be nice to add a shutdown hook here that explains why the output is // terminating. Otherwise if the worker dies the executor logs will silently stop. new Thread("redirect output to " + file) { override def run() { try { Utils.copyStream(in, out, true) } catch { case e: IOException => logInfo("Redirection to " + file + " closed: " + e.getMessage) } } }.start() } }
spark0001/spark2.1.1
core/src/main/scala/org/apache/spark/deploy/worker/CommandUtils.scala
Scala
apache-2.0
4,766
/** * * IncomingTransactionDialogFragment * Ledger wallet * * Created by Pierre Pollastri on 22/01/15. * * The MIT License (MIT) * * Copyright (c) 2015 Ledger * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ package com.ledger.ledgerwallet.app.m2fa import java.text.SimpleDateFormat import java.util.Locale import android.content.DialogInterface import android.os.Bundle import android.view.{View, ViewGroup, LayoutInflater} import com.ledger.ledgerwallet.R import com.ledger.ledgerwallet.base.BaseDialogFragment import com.ledger.ledgerwallet.bitcoin.AmountFormatter import com.ledger.ledgerwallet.remote.api.m2fa.IncomingTransactionAPI import com.ledger.ledgerwallet.utils.TR import com.ledger.ledgerwallet.view.DialogActionBarController import com.ledger.ledgerwallet.widget.TextView class IncomingTransactionDialogFragment extends BaseDialogFragment { lazy val actions = DialogActionBarController(R.id.dialog_action_bar).noNeutralButton lazy val amount = TR(R.id.amount).as[TextView] lazy val address = TR(R.id.address).as[TextView] lazy val date = TR(R.id.date).as[TextView] lazy val name = TR(R.id.dongle_name).as[TextView] private[this] var _transaction: Option[IncomingTransactionAPI#IncomingTransaction] = None def this(tx: IncomingTransactionAPI#IncomingTransaction) { this() _transaction = Option(tx) setCancelable(false) } override def onCreateView(inflater: LayoutInflater, container: ViewGroup, savedInstanceState: Bundle): View = { inflater.inflate(R.layout.incoming_transaction_dialog_fragment, container, false) } override def onResume(): Unit = { super.onResume() if (_transaction.isEmpty || _transaction.get.isDone) dismiss() _transaction.foreach(_.onCancelled(dismiss)) } override def onPause(): Unit = { super.onPause() _transaction.foreach(_.onCancelled(null)) dismissAllowingStateLoss() } override def onViewCreated(view: View, savedInstanceState: Bundle): Unit = { super.onViewCreated(view, savedInstanceState) actions onPositiveClick { _transaction.foreach(_.accept()) _transaction = None dismiss() } actions onNegativeClick { _transaction.foreach(_.reject()) _transaction = None dismiss() } _transaction match { case Some(transaction) => amount.setText(AmountFormatter.Bitcoin.format(transaction.amount)) address.setText(transaction.address) name.setText(transaction.dongle.name.get) android.text.format.DateFormat.getBestDateTimePattern(Locale.getDefault, "dd/MM/yyyy") val df = new SimpleDateFormat(android.text.format.DateFormat.getBestDateTimePattern(Locale.getDefault, "dd/MM/yyyy")) val hf = android.text.format.DateFormat.getTimeFormat(getActivity) date.setText(TR(R.string.incoming_tx_date).as[String].format(df.format(transaction.date), hf.format(transaction.date))) case _ => } } override def onDismiss(dialog: DialogInterface): Unit = { super.onDismiss(dialog) _transaction.foreach(_.cancel()) } } object IncomingTransactionDialogFragment { val DefaultTag = "IncomingTransactionDialogFragment" }
Morveus/ledger-wallet-android
app/src/main/scala/com/ledger/ledgerwallet/app/m2fa/IncomingTransactionDialogFragment.scala
Scala
mit
4,211
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.feature import org.apache.spark.ml.attribute.{AttributeGroup, BinaryAttribute, NominalAttribute} import org.apache.spark.ml.linalg.{Vector, Vectors, VectorUDT} import org.apache.spark.ml.param.ParamsSuite import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest} import org.apache.spark.sql.{Encoder, Row} import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.functions.col import org.apache.spark.sql.types._ class OneHotEncoderEstimatorSuite extends MLTest with DefaultReadWriteTest { import testImplicits._ test("params") { ParamsSuite.checkParams(new OneHotEncoderEstimator) } test("OneHotEncoderEstimator dropLast = false") { val data = Seq( Row(0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(1.0, Vectors.sparse(3, Seq((1, 1.0)))), Row(2.0, Vectors.sparse(3, Seq((2, 1.0)))), Row(0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(2.0, Vectors.sparse(3, Seq((2, 1.0))))) val schema = StructType(Array( StructField("input", DoubleType), StructField("expected", new VectorUDT))) val df = spark.createDataFrame(sc.parallelize(data), schema) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("input")) .setOutputCols(Array("output")) assert(encoder.getDropLast === true) encoder.setDropLast(false) assert(encoder.getDropLast === false) val model = encoder.fit(df) testTransformer[(Double, Vector)](df, model, "output", "expected") { case Row(output: Vector, expected: Vector) => assert(output === expected) } } test("OneHotEncoderEstimator dropLast = true") { val data = Seq( Row(0.0, Vectors.sparse(2, Seq((0, 1.0)))), Row(1.0, Vectors.sparse(2, Seq((1, 1.0)))), Row(2.0, Vectors.sparse(2, Seq())), Row(0.0, Vectors.sparse(2, Seq((0, 1.0)))), Row(0.0, Vectors.sparse(2, Seq((0, 1.0)))), Row(2.0, Vectors.sparse(2, Seq()))) val schema = StructType(Array( StructField("input", DoubleType), StructField("expected", new VectorUDT))) val df = spark.createDataFrame(sc.parallelize(data), schema) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("input")) .setOutputCols(Array("output")) val model = encoder.fit(df) testTransformer[(Double, Vector)](df, model, "output", "expected") { case Row(output: Vector, expected: Vector) => assert(output === expected) } } test("input column with ML attribute") { val attr = NominalAttribute.defaultAttr.withValues("small", "medium", "large") val df = Seq(0.0, 1.0, 2.0, 1.0).map(Tuple1.apply).toDF("size") .select(col("size").as("size", attr.toMetadata())) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("size")) .setOutputCols(Array("encoded")) val model = encoder.fit(df) testTransformerByGlobalCheckFunc[(Double)](df, model, "encoded") { rows => val group = AttributeGroup.fromStructField(rows.head.schema("encoded")) assert(group.size === 2) assert(group.getAttr(0) === BinaryAttribute.defaultAttr.withName("small").withIndex(0)) assert(group.getAttr(1) === BinaryAttribute.defaultAttr.withName("medium").withIndex(1)) } } test("input column without ML attribute") { val df = Seq(0.0, 1.0, 2.0, 1.0).map(Tuple1.apply).toDF("index") val encoder = new OneHotEncoderEstimator() .setInputCols(Array("index")) .setOutputCols(Array("encoded")) val model = encoder.fit(df) testTransformerByGlobalCheckFunc[(Double)](df, model, "encoded") { rows => val group = AttributeGroup.fromStructField(rows.head.schema("encoded")) assert(group.size === 2) assert(group.getAttr(0) === BinaryAttribute.defaultAttr.withName("0").withIndex(0)) assert(group.getAttr(1) === BinaryAttribute.defaultAttr.withName("1").withIndex(1)) } } test("read/write") { val encoder = new OneHotEncoderEstimator() .setInputCols(Array("index")) .setOutputCols(Array("encoded")) testDefaultReadWrite(encoder) } test("OneHotEncoderModel read/write") { val instance = new OneHotEncoderModel("myOneHotEncoderModel", Array(1, 2, 3)) val newInstance = testDefaultReadWrite(instance) assert(newInstance.categorySizes === instance.categorySizes) } test("OneHotEncoderEstimator with varying types") { val data = Seq( Row(0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(1.0, Vectors.sparse(3, Seq((1, 1.0)))), Row(2.0, Vectors.sparse(3, Seq((2, 1.0)))), Row(0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(2.0, Vectors.sparse(3, Seq((2, 1.0))))) val schema = StructType(Array( StructField("input", DoubleType), StructField("expected", new VectorUDT))) val df = spark.createDataFrame(sc.parallelize(data), schema) class NumericTypeWithEncoder[A](val numericType: NumericType) (implicit val encoder: Encoder[(A, Vector)]) val types = Seq( new NumericTypeWithEncoder[Short](ShortType), new NumericTypeWithEncoder[Long](LongType), new NumericTypeWithEncoder[Int](IntegerType), new NumericTypeWithEncoder[Float](FloatType), new NumericTypeWithEncoder[Byte](ByteType), new NumericTypeWithEncoder[Double](DoubleType), new NumericTypeWithEncoder[Decimal](DecimalType(10, 0))(ExpressionEncoder())) for (t <- types) { val dfWithTypes = df.select(col("input").cast(t.numericType), col("expected")) val estimator = new OneHotEncoderEstimator() .setInputCols(Array("input")) .setOutputCols(Array("output")) .setDropLast(false) val model = estimator.fit(dfWithTypes) testTransformer(dfWithTypes, model, "output", "expected") { case Row(output: Vector, expected: Vector) => assert(output === expected) }(t.encoder) } } test("OneHotEncoderEstimator: encoding multiple columns and dropLast = false") { val data = Seq( Row(0.0, Vectors.sparse(3, Seq((0, 1.0))), 2.0, Vectors.sparse(4, Seq((2, 1.0)))), Row(1.0, Vectors.sparse(3, Seq((1, 1.0))), 3.0, Vectors.sparse(4, Seq((3, 1.0)))), Row(2.0, Vectors.sparse(3, Seq((2, 1.0))), 0.0, Vectors.sparse(4, Seq((0, 1.0)))), Row(0.0, Vectors.sparse(3, Seq((0, 1.0))), 1.0, Vectors.sparse(4, Seq((1, 1.0)))), Row(0.0, Vectors.sparse(3, Seq((0, 1.0))), 0.0, Vectors.sparse(4, Seq((0, 1.0)))), Row(2.0, Vectors.sparse(3, Seq((2, 1.0))), 2.0, Vectors.sparse(4, Seq((2, 1.0))))) val schema = StructType(Array( StructField("input1", DoubleType), StructField("expected1", new VectorUDT), StructField("input2", DoubleType), StructField("expected2", new VectorUDT))) val df = spark.createDataFrame(sc.parallelize(data), schema) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("input1", "input2")) .setOutputCols(Array("output1", "output2")) assert(encoder.getDropLast === true) encoder.setDropLast(false) assert(encoder.getDropLast === false) val model = encoder.fit(df) testTransformer[(Double, Vector, Double, Vector)]( df, model, "output1", "output2", "expected1", "expected2") { case Row(output1: Vector, output2: Vector, expected1: Vector, expected2: Vector) => assert(output1 === expected1) assert(output2 === expected2) } } test("OneHotEncoderEstimator: encoding multiple columns and dropLast = true") { val data = Seq( Row(0.0, Vectors.sparse(2, Seq((0, 1.0))), 2.0, Vectors.sparse(3, Seq((2, 1.0)))), Row(1.0, Vectors.sparse(2, Seq((1, 1.0))), 3.0, Vectors.sparse(3, Seq())), Row(2.0, Vectors.sparse(2, Seq()), 0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(0.0, Vectors.sparse(2, Seq((0, 1.0))), 1.0, Vectors.sparse(3, Seq((1, 1.0)))), Row(0.0, Vectors.sparse(2, Seq((0, 1.0))), 0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(2.0, Vectors.sparse(2, Seq()), 2.0, Vectors.sparse(3, Seq((2, 1.0))))) val schema = StructType(Array( StructField("input1", DoubleType), StructField("expected1", new VectorUDT), StructField("input2", DoubleType), StructField("expected2", new VectorUDT))) val df = spark.createDataFrame(sc.parallelize(data), schema) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("input1", "input2")) .setOutputCols(Array("output1", "output2")) val model = encoder.fit(df) testTransformer[(Double, Vector, Double, Vector)]( df, model, "output1", "output2", "expected1", "expected2") { case Row(output1: Vector, output2: Vector, expected1: Vector, expected2: Vector) => assert(output1 === expected1) assert(output2 === expected2) } } test("Throw error on invalid values") { val trainingData = Seq((0, 0), (1, 1), (2, 2)) val trainingDF = trainingData.toDF("id", "a") val testData = Seq((0, 0), (1, 2), (1, 3)) val testDF = testData.toDF("id", "a") val encoder = new OneHotEncoderEstimator() .setInputCols(Array("a")) .setOutputCols(Array("encoded")) val model = encoder.fit(trainingDF) testTransformerByInterceptingException[(Int, Int)]( testDF, model, expectedMessagePart = "Unseen value: 3.0. To handle unseen values", firstResultCol = "encoded") } test("Can't transform on negative input") { val trainingDF = Seq((0, 0), (1, 1), (2, 2)).toDF("a", "b") val testDF = Seq((0, 0), (-1, 2), (1, 3)).toDF("a", "b") val encoder = new OneHotEncoderEstimator() .setInputCols(Array("a")) .setOutputCols(Array("encoded")) val model = encoder.fit(trainingDF) testTransformerByInterceptingException[(Int, Int)]( testDF, model, expectedMessagePart = "Negative value: -1.0. Input can't be negative", firstResultCol = "encoded") } test("Keep on invalid values: dropLast = false") { val trainingDF = Seq(Tuple1(0), Tuple1(1), Tuple1(2)).toDF("input") val testData = Seq( Row(0.0, Vectors.sparse(4, Seq((0, 1.0)))), Row(1.0, Vectors.sparse(4, Seq((1, 1.0)))), Row(3.0, Vectors.sparse(4, Seq((3, 1.0))))) val schema = StructType(Array( StructField("input", DoubleType), StructField("expected", new VectorUDT))) val testDF = spark.createDataFrame(sc.parallelize(testData), schema) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("input")) .setOutputCols(Array("output")) .setHandleInvalid("keep") .setDropLast(false) val model = encoder.fit(trainingDF) testTransformer[(Double, Vector)](testDF, model, "output", "expected") { case Row(output: Vector, expected: Vector) => assert(output === expected) } } test("Keep on invalid values: dropLast = true") { val trainingDF = Seq(Tuple1(0), Tuple1(1), Tuple1(2)).toDF("input") val testData = Seq( Row(0.0, Vectors.sparse(3, Seq((0, 1.0)))), Row(1.0, Vectors.sparse(3, Seq((1, 1.0)))), Row(3.0, Vectors.sparse(3, Seq()))) val schema = StructType(Array( StructField("input", DoubleType), StructField("expected", new VectorUDT))) val testDF = spark.createDataFrame(sc.parallelize(testData), schema) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("input")) .setOutputCols(Array("output")) .setHandleInvalid("keep") .setDropLast(true) val model = encoder.fit(trainingDF) testTransformer[(Double, Vector)](testDF, model, "output", "expected") { case Row(output: Vector, expected: Vector) => assert(output === expected) } } test("OneHotEncoderModel changes dropLast") { val data = Seq( Row(0.0, Vectors.sparse(3, Seq((0, 1.0))), Vectors.sparse(2, Seq((0, 1.0)))), Row(1.0, Vectors.sparse(3, Seq((1, 1.0))), Vectors.sparse(2, Seq((1, 1.0)))), Row(2.0, Vectors.sparse(3, Seq((2, 1.0))), Vectors.sparse(2, Seq())), Row(0.0, Vectors.sparse(3, Seq((0, 1.0))), Vectors.sparse(2, Seq((0, 1.0)))), Row(0.0, Vectors.sparse(3, Seq((0, 1.0))), Vectors.sparse(2, Seq((0, 1.0)))), Row(2.0, Vectors.sparse(3, Seq((2, 1.0))), Vectors.sparse(2, Seq()))) val schema = StructType(Array( StructField("input", DoubleType), StructField("expected1", new VectorUDT), StructField("expected2", new VectorUDT))) val df = spark.createDataFrame(sc.parallelize(data), schema) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("input")) .setOutputCols(Array("output")) val model = encoder.fit(df) model.setDropLast(false) testTransformer[(Double, Vector, Vector)](df, model, "output", "expected1") { case Row(output: Vector, expected1: Vector) => assert(output === expected1) } model.setDropLast(true) testTransformer[(Double, Vector, Vector)](df, model, "output", "expected2") { case Row(output: Vector, expected2: Vector) => assert(output === expected2) } } test("OneHotEncoderModel changes handleInvalid") { val trainingDF = Seq(Tuple1(0), Tuple1(1), Tuple1(2)).toDF("input") val testData = Seq( Row(0.0, Vectors.sparse(4, Seq((0, 1.0)))), Row(1.0, Vectors.sparse(4, Seq((1, 1.0)))), Row(3.0, Vectors.sparse(4, Seq((3, 1.0))))) val schema = StructType(Array( StructField("input", DoubleType), StructField("expected", new VectorUDT))) val testDF = spark.createDataFrame(sc.parallelize(testData), schema) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("input")) .setOutputCols(Array("output")) val model = encoder.fit(trainingDF) model.setHandleInvalid("error") testTransformerByInterceptingException[(Double, Vector)]( testDF, model, expectedMessagePart = "Unseen value: 3.0. To handle unseen values", firstResultCol = "output") model.setHandleInvalid("keep") testTransformerByGlobalCheckFunc[(Double, Vector)](testDF, model, "output") { _ => } } test("Transforming on mismatched attributes") { val attr = NominalAttribute.defaultAttr.withValues("small", "medium", "large") val df = Seq(0.0, 1.0, 2.0, 1.0).map(Tuple1.apply).toDF("size") .select(col("size").as("size", attr.toMetadata())) val encoder = new OneHotEncoderEstimator() .setInputCols(Array("size")) .setOutputCols(Array("encoded")) val model = encoder.fit(df) val testAttr = NominalAttribute.defaultAttr.withValues("tiny", "small", "medium", "large") val testDF = Seq(0.0, 1.0, 2.0, 3.0).map(Tuple1.apply).toDF("size") .select(col("size").as("size", testAttr.toMetadata())) testTransformerByInterceptingException[(Double)]( testDF, model, expectedMessagePart = "OneHotEncoderModel expected 2 categorical values", firstResultCol = "encoded") } }
bravo-zhang/spark
mllib/src/test/scala/org/apache/spark/ml/feature/OneHotEncoderEstimatorSuite.scala
Scala
apache-2.0
15,927
/* * Copyright (c) 2014-2015 by its authors. Some rights reserved. * See the project homepage at: http://www.monifu.org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monifu.reactive.internals.operators import monifu.reactive.Ack.Continue import monifu.reactive.{Observer, Observable} import scala.concurrent.duration.Duration.Zero import scala.util.Success object MinBySuite extends BaseOperatorSuite { def createObservable(sourceCount: Int) = Some { val o = Observable.range(sourceCount, 0, -1).minBy(x => sourceCount - x) Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero) } def observableInError(sourceCount: Int, ex: Throwable) = Some { val o = Observable.create[Long] { subscriber => implicit val s = subscriber.scheduler val source = createObservableEndingInError(Observable.range(sourceCount, 0, -1), ex) .minBy(x => sourceCount - x) subscriber.onNext(sum(sourceCount)).onComplete { case Success(Continue) => source.subscribe(subscriber) case _ => () } } Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero) } def count(sourceCount: Int) = 1 def sum(sourceCount: Int) = sourceCount def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some { val o = Observable.range(0, sourceCount) .minBy(x => if (x == sourceCount-1) throw ex else x) Sample(o, 0, 0, Zero, Zero) } test("empty observable should be empty") { implicit s => val source: Observable[Long] = Observable.empty var received = 0 var wasCompleted = false source.minBy(x => 100 - x).onSubscribe(new Observer[Long] { def onNext(elem: Long) = { received += 1; Continue } def onError(ex: Throwable) = () def onComplete() = { wasCompleted = true } }) assertEquals(received, 0) assert(wasCompleted) } }
sergius/monifu
monifu/shared/src/test/scala/monifu/reactive/internals/operators/MinBySuite.scala
Scala
apache-2.0
2,394
/* * WebBrowser.scala * (Mellite) * * Copyright (c) 2012-2022 Hanns Holger Rutz. All rights reserved. * * This software is published under the GNU Affero General Public License v3+ * * * For further information, please contact Hanns Holger Rutz at * [email protected] */ package de.sciss.mellite import java.awt.GraphicsEnvironment import de.sciss.desktop.impl.WindowImpl import de.sciss.desktop.{Window, WindowHandler} import de.sciss.lucre.swing.LucreSwing.requireEDT import dotterweide.ide.{AbstractDocBrowser, DocBrowser} import javax.swing.JComponent import scala.swing.Component object WebBrowser { def instance: DocBrowser = Impl private object Impl extends AbstractDocBrowser { impl => private[this] val frame: WindowImpl = new WindowImpl { w => def handler: WindowHandler = Mellite.windowHandler override protected def style: Window.Style = Window.Auxiliary title = baseTitle // XXX TODO yes, we need to get rid of JFX contents = { val mPanel = impl.getClass.getMethod("fxPanel") val panel = mPanel.invoke(impl).asInstanceOf[JComponent] Component.wrap(panel) } bounds = { val gc = GraphicsEnvironment.getLocalGraphicsEnvironment.getDefaultScreenDevice.getDefaultConfiguration val r = gc.getBounds val x2 = r.x + r.width r.width = math.min(r.width/2, 960) r.x = x2 - r.width val h = r.height r.height = math.min(r.height, 960) r.y = r.y + (h - r.height)/2 r } front() } def title: String = frame.title def title_=(value: String): Unit = { requireEDT() frame.title = value if (!frame.visible) frame.visible = true } def dispose(): Unit = frame.dispose() } }
Sciss/Mellite
app/src/main/scala/de/sciss/mellite/WebBrowser.scala
Scala
agpl-3.0
1,829
package org.hmrc.pricing.models object Checkout { type Discount = BigDecimal def total(basket: Basket, discounts: (Basket => Discount)*): BigDecimal = { val discount = discounts.distinct.foldLeft(BigDecimal(0)) { (discountAcc, discount) => discountAcc + discount(basket) } basket.total - discount } } object Discounts { final val Apples2For1: Basket => Checkout.Discount = { basket => { val apples = basket.list collect { case a@Apple => a } // For every 2 apples in the basked we simply eliminate one from the total math.floor(apples.size / 2) * Apple.price } } final val Oranges3For2: Basket => Checkout.Discount = { basket => { val oranges = basket.list collect { case o@Orange => o } // For every 3 oranges we find in the basket we discount the price of one. math.floor(oranges.size / 3) * Orange.price } } }
alexflav23/hm
src/main/scala/org/hmrc/pricing/models/Checkout.scala
Scala
apache-2.0
936
package ch.wsl.box.client.views /** * Created by andre on 4/3/2017. */ import ch.wsl.box.client.routes.Routes import ch.wsl.box.client.services.{ClientConf, Labels, Navigate} import ch.wsl.box.client.styles.{BootstrapCol, GlobalStyles} import io.udash._ import io.udash.bootstrap.BootstrapStyles import io.udash.bootstrap.form.UdashForm import io.udash.core.Presenter import org.scalajs.dom.{Element, Event} import ch.wsl.box.client.{DataListState, DataState} import ch.wsl.box.client.views.components.widget.WidgetUtils import ch.wsl.box.model.shared.ExportDef import scalatags.generic case class DataList(list:Seq[ExportDef], currentEntity:Option[ExportDef], search:String, filteredList:Seq[ExportDef], kind:String) object DataList extends HasModelPropertyCreator[DataList] { implicit val blank: Blank[DataList] = Blank.Simple(DataList(Seq(),None,"",Seq(),"")) } case class DataListViewPresenter(modelName:String) extends ViewFactory[DataListState] { override def create(): (View, Presenter[DataListState]) = { val model = ModelProperty.blank[DataList] val presenter = new DataListPresenter(model) val view = new DataListView(model,presenter) (view,presenter) } } class DataListPresenter(model:ModelProperty[DataList]) extends Presenter[DataListState] { import ch.wsl.box.client.Context._ override def handleState(state: DataListState ): Unit = { model.subProp(_.kind).set(state.kind) // println(state.currentExport) services.rest.dataList(state.kind,services.clientSession.lang()).map{ exports => model.subSeq(_.list).set(exports) model.subSeq(_.filteredList).set(exports) val current = exports.find(_.function == state.currentExport) model.subProp(_.currentEntity).set(current) } } def updateExportsList() = { model.subProp(_.filteredList).set(model.subProp(_.list).get.filter(m => m.label.contains(model.get.search))) } } class DataListView(model:ModelProperty[DataList], presenter: DataListPresenter) extends ContainerView { import scalatags.JsDom.all._ import scalacss.ScalatagsCss._ import io.udash.css.CssView._ val sidebarGrid = BootstrapCol.md(2) def contentGrid = BootstrapCol.md(10) override def renderChild(view: Option[View]): Unit = { import io.udash.wrappers.jquery._ jQ(content).children().remove() if(view.isDefined) { view.get.getTemplate.applyTo(content) } } private val content: Element = div().render private def sidebar: Element = div(sidebarGrid)( Labels.exports.search, TextInput(model.subProp(_.search))(onkeyup :+= ((ev: Event) => presenter.updateExportsList())), produce(model.subProp(_.search)) { q => ul(ClientConf.style.noBullet)( repeat(model.subSeq(_.filteredList)){m => li(produce(m) { export => WidgetUtils.addTooltip(m.get.tooltip) (a(Navigate.click(DataState(model.get.kind,export.function)), m.get.label).render)._1 }).render } ).render } ).render override def getTemplate: scalatags.generic.Modifier[Element] = div(BootstrapStyles.Grid.row)( sidebar, div(contentGrid)( div(h1(Labels.exports.title)).render, produce(model)( m => m.currentEntity match { case None => div( p(Labels.exports.select) ).render case Some(model) => div().render } ), content ) ) }
Insubric/box
client/src/main/scala/ch/wsl/box/client/views/DataListView.scala
Scala
apache-2.0
3,445
package spark.partial import java.util.{HashMap => JHashMap} import java.util.{Map => JMap} import scala.collection.mutable.HashMap import scala.collection.Map import scala.collection.JavaConversions.mapAsScalaMap import spark.util.StatCounter /** * An ApproximateEvaluator for means by key. Returns a map of key to confidence interval. */ private[spark] class GroupedMeanEvaluator[T](totalOutputs: Int, confidence: Double) extends ApproximateEvaluator[JHashMap[T, StatCounter], Map[T, BoundedDouble]] { var outputsMerged = 0 var sums = new JHashMap[T, StatCounter] // Sum of counts for each key override def merge(outputId: Int, taskResult: JHashMap[T, StatCounter]) { outputsMerged += 1 val iter = taskResult.entrySet.iterator() while (iter.hasNext) { val entry = iter.next() val old = sums.get(entry.getKey) if (old != null) { old.merge(entry.getValue) } else { sums.put(entry.getKey, entry.getValue) } } } override def currentResult(): Map[T, BoundedDouble] = { if (outputsMerged == totalOutputs) { val result = new JHashMap[T, BoundedDouble](sums.size) val iter = sums.entrySet.iterator() while (iter.hasNext) { val entry = iter.next() val mean = entry.getValue.mean result(entry.getKey) = new BoundedDouble(mean, 1.0, mean, mean) } result } else if (outputsMerged == 0) { new HashMap[T, BoundedDouble] } else { val p = outputsMerged.toDouble / totalOutputs val studentTCacher = new StudentTCacher(confidence) val result = new JHashMap[T, BoundedDouble](sums.size) val iter = sums.entrySet.iterator() while (iter.hasNext) { val entry = iter.next() val counter = entry.getValue val mean = counter.mean val stdev = math.sqrt(counter.sampleVariance / counter.count) val confFactor = studentTCacher.get(counter.count) val low = mean - confFactor * stdev val high = mean + confFactor * stdev result(entry.getKey) = new BoundedDouble(mean, confidence, low, high) } result } } }
koeninger/spark
core/src/main/scala/spark/partial/GroupedMeanEvaluator.scala
Scala
bsd-3-clause
2,146
package net.lshift.diffa.agent.client /** * Copyright (C) 2010-2011 LShift Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import scala.collection.JavaConversions._ import com.sun.jersey.api.client.ClientResponse import net.lshift.diffa.kernel.frontend._ import net.lshift.diffa.client.RestClientParams class ConfigurationRestClient(serverRootUrl:String, domain:String, params: RestClientParams = RestClientParams.default) extends DomainAwareRestClient(serverRootUrl, domain, "domains/{domain}/config/", params) { def declareEndpoint(e:EndpointDef) = { create("endpoints", e) e } def declarePair(p:PairDef) = { create("pairs", p) p } def declareRepairAction(name: String, id: String, scope: String, pairKey: String) = { val action = new RepairActionDef(name, id, scope) create("/pairs/"+pairKey+"/repair-actions", action) action } def removeRepairAction(name: String, pairKey: String) { delete("/pairs/"+pairKey+"/repair-actions/"+name) } def declareEscalation(name: String, pairKey: String, action: String, actionType: String, rule: String, delay: Int) = { val escalation = new EscalationDef(name, action, actionType, rule, delay) create("/pairs/"+pairKey+"/escalations", escalation) escalation } def removeEscalation(name: String, pairKey: String) = { delete("/pairs/" + pairKey + "/escalations/" + name) } def makeDomainMember(userName: String, policy:String) = resource.path("members/" + policy + "/" + userName).post() def removeDomainMembership(userName: String, policy:String) = delete("/members/" + policy + "/" + userName) def listDomainMembers = rpc("members/",classOf[Array[PolicyMember]]) def deletePair(pairKey: String) = { val path = resource.path("pairs").path(pairKey) val response = path.delete(classOf[ClientResponse]) val status = response.getClientResponseStatus status.getStatusCode match { case 204 => // Successfully submitted (202 is "No Content") case x:Int => handleHTTPError(x,path, status) } } def getEndpoint(name:String) = rpc("endpoints/" + name, classOf[EndpointDef]) }
0x6e6562/diffa
agent/src/test/scala/net/lshift/diffa/agent/client/ConfigurationRestClient.scala
Scala
apache-2.0
2,664
package org.dama.datasynth.common.generators.property.empirical import java.io.{BufferedReader, InputStreamReader} import org.dama.datasynth.common.generators.property.PropertyGenerator import org.dama.datasynth.common.utils.FileUtils.File import org.dama.datasynth.runtime.spark.utils.RndGenerator import scala.io.Source /** * Created by aprat on 12/05/17. * * Property generator based on a distribution file. The file has two columns [value,probability], * where probability is the marginal probability of observing the given value * Probabilities of the second column must add 1 or be very close. */ class DistributionBasedGenerator[T](parser: (String) => T, file : File, separator: String) extends PropertyGenerator[T] { private val inputFileLines : List[String] = file.open().toList private val values : List[(T,Double)] = inputFileLines.map( line => line.split(separator)) .map( { case Array(value,prob) => (parser(value),prob.toDouble)}) private val probabilitiesSum = values.foldLeft(0.0)( { case (acc, (_,prob)) => acc + prob} ) if( (1.0-probabilitiesSum) > 0.001 || probabilitiesSum > 1.0) { throw new RuntimeException(s"Invalid input file. Probabilities do not add 1 but $probabilitiesSum") } val data = values.drop(1) .scanLeft(values(0))({ case ((prevValue,accProb),(value,prob)) => (value,accProb+prob)}) .toArray /** * Performs a binary search over the given array containing pairs value-probability, where probability is the * cumulative probability of observing such value. Thus, the array must be sorted increasingly by the probability. * The method returns the value whose probability is closer to the given probability, which has been obtained * uniformly. This method is widely known as the inverse sampling method. * @param data The array with pairs of value-probability * @param prob * @return */ def binarySearchForValue(data : Array[(T,Double)], prob : Double) : T = { def doSearch(data : Array[(T,Double)]) : T = { if (data.size == 1) { data(0) match { case (value,prob) => value } } else { val middleIndex:Int = data.size / 2 val (left,right):(Array[(T,Double)],Array[(T,Double)]) = data.splitAt(middleIndex) (data(middleIndex-1), data(middleIndex)) match { case ((_,_),(value, valProb)) if valProb == prob => value case ((_,_),(value, valProb)) if valProb < prob => doSearch(right) case ((value, valProb),(_,_)) if valProb > prob => doSearch(left) case ((_, _),(value,_)) => value } } } doSearch(data) } override def run(id: Long, random: Long, dependencies: Any*) : T = { val prob:Double = RndGenerator.toDouble(random) binarySearchForValue(data,prob) } }
DAMA-UPC/DataSynth
src/main/scala/org/dama/datasynth/common/generators/property/empirical/DistributionBasedGenerator.scala
Scala
gpl-3.0
2,799
package io.pipeline.prediction.jvm import java.io.BufferedInputStream import java.io.BufferedOutputStream import java.io.FileOutputStream import java.util.zip.ZipEntry import java.util.zip.ZipFile object ZipFileUtil { def unzip(filename: String, outputPath: String): Unit = { val BUFFER = 2048; try { var dest: BufferedOutputStream = null var is: BufferedInputStream = null var entry: ZipEntry = null val zipfile: ZipFile = new ZipFile(filename) val e = zipfile.entries() while(e.hasMoreElements()) { entry = e.nextElement().asInstanceOf[ZipEntry] System.out.println("Extracting: " + entry); is = new BufferedInputStream(zipfile.getInputStream(entry)) var count = 0 val data: Array[Byte] = new Array[Byte](BUFFER) val fos = new FileOutputStream(entry.getName()) dest = new BufferedOutputStream(fos, BUFFER) count = is.read(data, 0, BUFFER) while (count != -1) { dest.write(data, 0, count) count = is.read(data, 0, BUFFER) } dest.flush() dest.close() is.close() } } catch { case e: Throwable => { e.printStackTrace() throw e } } } }
shareactorIO/pipeline
prediction.ml/jvm/src/main/scala/io/pipeline/prediction/jvm/ZipFileUtil.scala
Scala
apache-2.0
1,385
package com.cobble.swaggg.tileentity import net.minecraft.server.gui.IUpdatePlayerListBox import net.minecraft.tileentity.TileEntity class TileEntitySwagExtractor extends TileEntity with IUpdatePlayerListBox { override def update(): Unit = { // println("Test") } def onClick(): Unit = { println("Second Test") } }
Cobbleopolis/Swaggg
src/main/java/com/cobble/swaggg/tileentity/TileEntitySwagExtractor.scala
Scala
lgpl-2.1
355
/* * Copyright 2018 Analytics Zoo Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.zoo.pipeline.api.keras.layers import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule import com.intel.analytics.bigdl.nn.keras.KerasLayer import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.utils.Shape import com.intel.analytics.zoo.pipeline.api.Net import com.intel.analytics.zoo.pipeline.api.keras.layers.utils.KerasUtils import scala.reflect.ClassTag /** * Select an index of the input in the given dim and return the subset part. * The batch dimension needs to be unchanged. * For example, if input is: * 1 2 3 * 4 5 6 * Select(1, 1) will give output [2 5] * Select(1, -1) will give output [3 6] * * When you use this layer as the first layer of a model, you need to provide the argument * inputShape (a Single Shape, does not include the batch dimension). * * Remark: This layer is from Torch and wrapped in Keras style. * * @param dim The dimension to select. 0-based index. Cannot select the batch dimension. * -1 means the last dimension of the input. * @param index The index of the dimension to be selected. 0-based index. * -1 means the last dimension of the input. * @param inputShape A Single Shape, does not include the batch dimension. * @tparam T Numeric type of parameter(e.g. weight, bias). Only support float/double now. */ class Select[T: ClassTag]( val dim: Int, val index: Int, val inputShape: Shape = null)(implicit ev: TensorNumeric[T]) extends KerasLayer[Tensor[T], Tensor[T], T](KerasUtils.addBatch(inputShape)) with Net { private def getPositiveDimAndIndex(inputShape: Shape): (Int, Int) = { val input = inputShape.toSingle().toArray require(input.length >= 2, s"Select requires >= 2D input, but got input dim ${input.length}") val positiveDim = if (dim < 0) dim + input.length else dim require(positiveDim != 0, "Cannot select the batch dimension") require(positiveDim > 0 && positiveDim <= input.length - 1, s"Invalid select dim: $dim, dim should be within range (0, ${input.length - 1}]") val positiveIndex = if (index < 0) index + input(positiveDim) else index require(positiveIndex >= 0 && positiveIndex <= input(positiveDim) - 1, s"Invalid select index for dim $dim: $index, " + s"index should be within range [0, ${input(positiveDim) - 1}]") (positiveDim, positiveIndex) } override def computeOutputShape(inputShape: Shape): Shape = { val input = inputShape.toSingle().toArray.toBuffer input.remove(getPositiveDimAndIndex(inputShape)._1) Shape(input.toArray) } override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = { val (positiveDim, positiveIndex) = getPositiveDimAndIndex(inputShape) val layer = com.intel.analytics.bigdl.nn.Select(positiveDim + 1, positiveIndex + 1) layer.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]] } override private[zoo] def toKeras2(): String = { val params = Net.inputShapeToString(inputShape) ++ Net.param(getName()) ++ Net.param(dim, "dim") Net.kerasDef(this, params) } } object Select { def apply[@specialized(Float, Double) T: ClassTag]( dim: Int, index: Int, inputShape: Shape = null)(implicit ev: TensorNumeric[T]): Select[T] = { new Select[T](dim, index, inputShape) } }
intel-analytics/analytics-zoo
zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/Select.scala
Scala
apache-2.0
4,017
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.analysis import scala.annotation.tailrec import scala.collection.mutable.ArrayBuffer import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst._ import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.encoders.OuterScopes import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.SubExprUtils._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.expressions.objects.{LambdaVariable, MapObjects, NewInstance, UnresolvedMapObjects} import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules._ import org.apache.spark.sql.catalyst.trees.TreeNodeRef import org.apache.spark.sql.catalyst.util.toPrettySQL import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ /** * A trivial [[Analyzer]] with a dummy [[SessionCatalog]] and [[EmptyFunctionRegistry]]. * Used for testing when all relations are already filled in and the analyzer needs only * to resolve attribute references. */ object SimpleAnalyzer extends Analyzer( new SessionCatalog( new InMemoryCatalog, EmptyFunctionRegistry, new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)) { override def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean) {} }, new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)) /** * Provides a way to keep state during the analysis, this enables us to decouple the concerns * of analysis environment from the catalog. * * Note this is thread local. * * @param defaultDatabase The default database used in the view resolution, this overrules the * current catalog database. * @param nestedViewDepth The nested depth in the view resolution, this enables us to limit the * depth of nested views. */ case class AnalysisContext( defaultDatabase: Option[String] = None, nestedViewDepth: Int = 0) object AnalysisContext { private val value = new ThreadLocal[AnalysisContext]() { override def initialValue: AnalysisContext = AnalysisContext() } def get: AnalysisContext = value.get() private def set(context: AnalysisContext): Unit = value.set(context) def withAnalysisContext[A](database: Option[String])(f: => A): A = { val originContext = value.get() val context = AnalysisContext(defaultDatabase = database, nestedViewDepth = originContext.nestedViewDepth + 1) set(context) try f finally { set(originContext) } } } /** * Provides a logical query plan analyzer, which translates [[UnresolvedAttribute]]s and * [[UnresolvedRelation]]s into fully typed objects using information in a [[SessionCatalog]]. */ class Analyzer( catalog: SessionCatalog, conf: SQLConf, maxIterations: Int) extends RuleExecutor[LogicalPlan] with CheckAnalysis { def this(catalog: SessionCatalog, conf: SQLConf) = { this(catalog, conf, conf.optimizerMaxIterations) } def resolver: Resolver = conf.resolver protected val fixedPoint = FixedPoint(maxIterations) /** * Override to provide additional rules for the "Resolution" batch. */ val extendedResolutionRules: Seq[Rule[LogicalPlan]] = Nil /** * Override to provide rules to do post-hoc resolution. Note that these rules will be executed * in an individual batch. This batch is to run right after the normal resolution batch and * execute its rules in one pass. */ val postHocResolutionRules: Seq[Rule[LogicalPlan]] = Nil lazy val batches: Seq[Batch] = Seq( Batch("Hints", fixedPoint, new ResolveHints.ResolveBroadcastHints(conf), ResolveHints.RemoveAllHints), Batch("Simple Sanity Check", Once, LookupFunctions), Batch("Substitution", fixedPoint, CTESubstitution, WindowsSubstitution, EliminateUnions, new SubstituteUnresolvedOrdinals(conf)), Batch("Resolution", fixedPoint, ResolveTableValuedFunctions :: ResolveRelations :: ResolveReferences :: ResolveCreateNamedStruct :: ResolveDeserializer :: ResolveNewInstance :: ResolveUpCast :: ResolveGroupingAnalytics :: ResolvePivot :: ResolveOrdinalInOrderByAndGroupBy :: ResolveAggAliasInGroupBy :: ResolveMissingReferences :: ExtractGenerator :: ResolveGenerate :: ResolveFunctions :: ResolveAliases :: ResolveSubquery :: ResolveSubqueryColumnAliases :: ResolveWindowOrder :: ResolveWindowFrame :: ResolveNaturalAndUsingJoin :: ExtractWindowExpressions :: GlobalAggregates :: ResolveAggregateFunctions :: TimeWindowing :: ResolveInlineTables(conf) :: ResolveTimeZone(conf) :: TypeCoercion.typeCoercionRules ++ extendedResolutionRules : _*), Batch("Post-Hoc Resolution", Once, postHocResolutionRules: _*), Batch("View", Once, AliasViewChild(conf)), Batch("Nondeterministic", Once, PullOutNondeterministic), Batch("UDF", Once, HandleNullInputsForUDF), Batch("FixNullability", Once, FixNullability), Batch("Subquery", Once, UpdateOuterReferences), Batch("Cleanup", fixedPoint, CleanupAliases) ) /** * Analyze cte definitions and substitute child plan with analyzed cte definitions. */ object CTESubstitution extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case With(child, relations) => substituteCTE(child, relations.foldLeft(Seq.empty[(String, LogicalPlan)]) { case (resolved, (name, relation)) => resolved :+ name -> execute(substituteCTE(relation, resolved)) }) case other => other } def substituteCTE(plan: LogicalPlan, cteRelations: Seq[(String, LogicalPlan)]): LogicalPlan = { plan transformDown { case u : UnresolvedRelation => cteRelations.find(x => resolver(x._1, u.tableIdentifier.table)) .map(_._2).getOrElse(u) case other => // This cannot be done in ResolveSubquery because ResolveSubquery does not know the CTE. other transformExpressions { case e: SubqueryExpression => e.withNewPlan(substituteCTE(e.plan, cteRelations)) } } } } /** * Substitute child plan with WindowSpecDefinitions. */ object WindowsSubstitution extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { // Lookup WindowSpecDefinitions. This rule works with unresolved children. case WithWindowDefinition(windowDefinitions, child) => child.transform { case p => p.transformExpressions { case UnresolvedWindowExpression(c, WindowSpecReference(windowName)) => val errorMessage = s"Window specification $windowName is not defined in the WINDOW clause." val windowSpecDefinition = windowDefinitions.getOrElse(windowName, failAnalysis(errorMessage)) WindowExpression(c, windowSpecDefinition) } } } } /** * Replaces [[UnresolvedAlias]]s with concrete aliases. */ object ResolveAliases extends Rule[LogicalPlan] { private def assignAliases(exprs: Seq[NamedExpression]) = { exprs.zipWithIndex.map { case (expr, i) => expr.transformUp { case u @ UnresolvedAlias(child, optGenAliasFunc) => child match { case ne: NamedExpression => ne case go @ GeneratorOuter(g: Generator) if g.resolved => MultiAlias(go, Nil) case e if !e.resolved => u case g: Generator => MultiAlias(g, Nil) case c @ Cast(ne: NamedExpression, _, _) => Alias(c, ne.name)() case e: ExtractValue => Alias(e, toPrettySQL(e))() case e if optGenAliasFunc.isDefined => Alias(child, optGenAliasFunc.get.apply(e))() case e => Alias(e, toPrettySQL(e))() } } }.asInstanceOf[Seq[NamedExpression]] } private def hasUnresolvedAlias(exprs: Seq[NamedExpression]) = exprs.exists(_.find(_.isInstanceOf[UnresolvedAlias]).isDefined) def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case Aggregate(groups, aggs, child) if child.resolved && hasUnresolvedAlias(aggs) => Aggregate(groups, assignAliases(aggs), child) case g: GroupingSets if g.child.resolved && hasUnresolvedAlias(g.aggregations) => g.copy(aggregations = assignAliases(g.aggregations)) case Pivot(groupByExprs, pivotColumn, pivotValues, aggregates, child) if child.resolved && hasUnresolvedAlias(groupByExprs) => Pivot(assignAliases(groupByExprs), pivotColumn, pivotValues, aggregates, child) case Project(projectList, child) if child.resolved && hasUnresolvedAlias(projectList) => Project(assignAliases(projectList), child) } } object ResolveGroupingAnalytics extends Rule[LogicalPlan] { /* * GROUP BY a, b, c WITH ROLLUP * is equivalent to * GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (a), ( ) ). * Group Count: N + 1 (N is the number of group expressions) * * We need to get all of its subsets for the rule described above, the subset is * represented as sequence of expressions. */ def rollupExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.inits.toIndexedSeq /* * GROUP BY a, b, c WITH CUBE * is equivalent to * GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (b, c), (a, c), (a), (b), (c), ( ) ). * Group Count: 2 ^ N (N is the number of group expressions) * * We need to get all of its subsets for a given GROUPBY expression, the subsets are * represented as sequence of expressions. */ def cubeExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = { // `cubeExprs0` is recursive and returns a lazy Stream. Here we call `toIndexedSeq` to // materialize it and avoid serialization problems later on. cubeExprs0(exprs).toIndexedSeq } def cubeExprs0(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.toList match { case x :: xs => val initial = cubeExprs0(xs) initial.map(x +: _) ++ initial case Nil => Seq(Seq.empty) } private[analysis] def hasGroupingFunction(e: Expression): Boolean = { e.collectFirst { case g: Grouping => g case g: GroupingID => g }.isDefined } private def replaceGroupingFunc( expr: Expression, groupByExprs: Seq[Expression], gid: Expression): Expression = { expr transform { case e: GroupingID => if (e.groupByExprs.isEmpty || e.groupByExprs == groupByExprs) { Alias(gid, toPrettySQL(e))() } else { throw new AnalysisException( s"Columns of grouping_id (${e.groupByExprs.mkString(",")}) does not match " + s"grouping columns (${groupByExprs.mkString(",")})") } case e @ Grouping(col: Expression) => val idx = groupByExprs.indexWhere(_.semanticEquals(col)) if (idx >= 0) { Alias(Cast(BitwiseAnd(ShiftRight(gid, Literal(groupByExprs.length - 1 - idx)), Literal(1)), ByteType), toPrettySQL(e))() } else { throw new AnalysisException(s"Column of grouping ($col) can't be found " + s"in grouping columns ${groupByExprs.mkString(",")}") } } } /* * Create new alias for all group by expressions for `Expand` operator. */ private def constructGroupByAlias(groupByExprs: Seq[Expression]): Seq[Alias] = { groupByExprs.map { case e: NamedExpression => Alias(e, e.name)() case other => Alias(other, other.toString)() } } /* * Construct [[Expand]] operator with grouping sets. */ private def constructExpand( selectedGroupByExprs: Seq[Seq[Expression]], child: LogicalPlan, groupByAliases: Seq[Alias], gid: Attribute): LogicalPlan = { // Change the nullability of group by aliases if necessary. For example, if we have // GROUPING SETS ((a,b), a), we do not need to change the nullability of a, but we // should change the nullabilty of b to be TRUE. // TODO: For Cube/Rollup just set nullability to be `true`. val expandedAttributes = groupByAliases.map { alias => if (selectedGroupByExprs.exists(!_.contains(alias.child))) { alias.toAttribute.withNullability(true) } else { alias.toAttribute } } val groupingSetsAttributes = selectedGroupByExprs.map { groupingSetExprs => groupingSetExprs.map { expr => val alias = groupByAliases.find(_.child.semanticEquals(expr)).getOrElse( failAnalysis(s"$expr doesn't show up in the GROUP BY list $groupByAliases")) // Map alias to expanded attribute. expandedAttributes.find(_.semanticEquals(alias.toAttribute)).getOrElse( alias.toAttribute) } } Expand(groupingSetsAttributes, groupByAliases, expandedAttributes, gid, child) } /* * Construct new aggregate expressions by replacing grouping functions. */ private def constructAggregateExprs( groupByExprs: Seq[Expression], aggregations: Seq[NamedExpression], groupByAliases: Seq[Alias], groupingAttrs: Seq[Expression], gid: Attribute): Seq[NamedExpression] = aggregations.map { // collect all the found AggregateExpression, so we can check an expression is part of // any AggregateExpression or not. val aggsBuffer = ArrayBuffer[Expression]() // Returns whether the expression belongs to any expressions in `aggsBuffer` or not. def isPartOfAggregation(e: Expression): Boolean = { aggsBuffer.exists(a => a.find(_ eq e).isDefined) } replaceGroupingFunc(_, groupByExprs, gid).transformDown { // AggregateExpression should be computed on the unmodified value of its argument // expressions, so we should not replace any references to grouping expression // inside it. case e: AggregateExpression => aggsBuffer += e e case e if isPartOfAggregation(e) => e case e => // Replace expression by expand output attribute. val index = groupByAliases.indexWhere(_.child.semanticEquals(e)) if (index == -1) { e } else { groupingAttrs(index) } }.asInstanceOf[NamedExpression] } /* * Construct [[Aggregate]] operator from Cube/Rollup/GroupingSets. */ private def constructAggregate( selectedGroupByExprs: Seq[Seq[Expression]], groupByExprs: Seq[Expression], aggregationExprs: Seq[NamedExpression], child: LogicalPlan): LogicalPlan = { val gid = AttributeReference(VirtualColumn.groupingIdName, IntegerType, false)() // Expand works by setting grouping expressions to null as determined by the // `selectedGroupByExprs`. To prevent these null values from being used in an aggregate // instead of the original value we need to create new aliases for all group by expressions // that will only be used for the intended purpose. val groupByAliases = constructGroupByAlias(groupByExprs) val expand = constructExpand(selectedGroupByExprs, child, groupByAliases, gid) val groupingAttrs = expand.output.drop(child.output.length) val aggregations = constructAggregateExprs( groupByExprs, aggregationExprs, groupByAliases, groupingAttrs, gid) Aggregate(groupingAttrs, aggregations, expand) } private def findGroupingExprs(plan: LogicalPlan): Seq[Expression] = { plan.collectFirst { case a: Aggregate => // this Aggregate should have grouping id as the last grouping key. val gid = a.groupingExpressions.last if (!gid.isInstanceOf[AttributeReference] || gid.asInstanceOf[AttributeReference].name != VirtualColumn.groupingIdName) { failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup") } a.groupingExpressions.take(a.groupingExpressions.length - 1) }.getOrElse { failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup") } } // This require transformUp to replace grouping()/grouping_id() in resolved Filter/Sort def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case a if !a.childrenResolved => a // be sure all of the children are resolved. // Ensure group by expressions and aggregate expressions have been resolved. case Aggregate(Seq(c @ Cube(groupByExprs)), aggregateExpressions, child) if (groupByExprs ++ aggregateExpressions).forall(_.resolved) => constructAggregate(cubeExprs(groupByExprs), groupByExprs, aggregateExpressions, child) case Aggregate(Seq(r @ Rollup(groupByExprs)), aggregateExpressions, child) if (groupByExprs ++ aggregateExpressions).forall(_.resolved) => constructAggregate(rollupExprs(groupByExprs), groupByExprs, aggregateExpressions, child) // Ensure all the expressions have been resolved. case x: GroupingSets if x.expressions.forall(_.resolved) => constructAggregate(x.selectedGroupByExprs, x.groupByExprs, x.aggregations, x.child) // We should make sure all expressions in condition have been resolved. case f @ Filter(cond, child) if hasGroupingFunction(cond) && cond.resolved => val groupingExprs = findGroupingExprs(child) // The unresolved grouping id will be resolved by ResolveMissingReferences val newCond = replaceGroupingFunc(cond, groupingExprs, VirtualColumn.groupingIdAttribute) f.copy(condition = newCond) // We should make sure all [[SortOrder]]s have been resolved. case s @ Sort(order, _, child) if order.exists(hasGroupingFunction) && order.forall(_.resolved) => val groupingExprs = findGroupingExprs(child) val gid = VirtualColumn.groupingIdAttribute // The unresolved grouping id will be resolved by ResolveMissingReferences val newOrder = order.map(replaceGroupingFunc(_, groupingExprs, gid).asInstanceOf[SortOrder]) s.copy(order = newOrder) } } object ResolvePivot extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case p: Pivot if !p.childrenResolved | !p.aggregates.forall(_.resolved) | !p.groupByExprs.forall(_.resolved) | !p.pivotColumn.resolved => p case Pivot(groupByExprs, pivotColumn, pivotValues, aggregates, child) => val singleAgg = aggregates.size == 1 def outputName(value: Literal, aggregate: Expression): String = { val utf8Value = Cast(value, StringType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow) val stringValue: String = Option(utf8Value).map(_.toString).getOrElse("null") if (singleAgg) { stringValue } else { val suffix = aggregate match { case n: NamedExpression => n.name case _ => toPrettySQL(aggregate) } stringValue + "_" + suffix } } if (aggregates.forall(a => PivotFirst.supportsDataType(a.dataType))) { // Since evaluating |pivotValues| if statements for each input row can get slow this is an // alternate plan that instead uses two steps of aggregation. val namedAggExps: Seq[NamedExpression] = aggregates.map(a => Alias(a, a.sql)()) val namedPivotCol = pivotColumn match { case n: NamedExpression => n case _ => Alias(pivotColumn, "__pivot_col")() } val bigGroup = groupByExprs :+ namedPivotCol val firstAgg = Aggregate(bigGroup, bigGroup ++ namedAggExps, child) val castPivotValues = pivotValues.map(Cast(_, pivotColumn.dataType).eval(EmptyRow)) val pivotAggs = namedAggExps.map { a => Alias(PivotFirst(namedPivotCol.toAttribute, a.toAttribute, castPivotValues) .toAggregateExpression() , "__pivot_" + a.sql)() } val groupByExprsAttr = groupByExprs.map(_.toAttribute) val secondAgg = Aggregate(groupByExprsAttr, groupByExprsAttr ++ pivotAggs, firstAgg) val pivotAggAttribute = pivotAggs.map(_.toAttribute) val pivotOutputs = pivotValues.zipWithIndex.flatMap { case (value, i) => aggregates.zip(pivotAggAttribute).map { case (aggregate, pivotAtt) => Alias(ExtractValue(pivotAtt, Literal(i), resolver), outputName(value, aggregate))() } } Project(groupByExprsAttr ++ pivotOutputs, secondAgg) } else { val pivotAggregates: Seq[NamedExpression] = pivotValues.flatMap { value => def ifExpr(expr: Expression) = { If(EqualNullSafe(pivotColumn, value), expr, Literal(null)) } aggregates.map { aggregate => val filteredAggregate = aggregate.transformDown { // Assumption is the aggregate function ignores nulls. This is true for all current // AggregateFunction's with the exception of First and Last in their default mode // (which we handle) and possibly some Hive UDAF's. case First(expr, _) => First(ifExpr(expr), Literal(true)) case Last(expr, _) => Last(ifExpr(expr), Literal(true)) case a: AggregateFunction => a.withNewChildren(a.children.map(ifExpr)) }.transform { // We are duplicating aggregates that are now computing a different value for each // pivot value. // TODO: Don't construct the physical container until after analysis. case ae: AggregateExpression => ae.copy(resultId = NamedExpression.newExprId) } if (filteredAggregate.fastEquals(aggregate)) { throw new AnalysisException( s"Aggregate expression required for pivot, found '$aggregate'") } Alias(filteredAggregate, outputName(value, aggregate))() } } Aggregate(groupByExprs, groupByExprs ++ pivotAggregates, child) } } } /** * Replaces [[UnresolvedRelation]]s with concrete relations from the catalog. */ object ResolveRelations extends Rule[LogicalPlan] { // If the unresolved relation is running directly on files, we just return the original // UnresolvedRelation, the plan will get resolved later. Else we look up the table from catalog // and change the default database name(in AnalysisContext) if it is a view. // We usually look up a table from the default database if the table identifier has an empty // database part, for a view the default database should be the currentDb when the view was // created. When the case comes to resolving a nested view, the view may have different default // database with that the referenced view has, so we need to use // `AnalysisContext.defaultDatabase` to track the current default database. // When the relation we resolve is a view, we fetch the view.desc(which is a CatalogTable), and // then set the value of `CatalogTable.viewDefaultDatabase` to // `AnalysisContext.defaultDatabase`, we look up the relations that the view references using // the default database. // For example: // |- view1 (defaultDatabase = db1) // |- operator // |- table2 (defaultDatabase = db1) // |- view2 (defaultDatabase = db2) // |- view3 (defaultDatabase = db3) // |- view4 (defaultDatabase = db4) // In this case, the view `view1` is a nested view, it directly references `table2`, `view2` // and `view4`, the view `view2` references `view3`. On resolving the table, we look up the // relations `table2`, `view2`, `view4` using the default database `db1`, and look up the // relation `view3` using the default database `db2`. // // Note this is compatible with the views defined by older versions of Spark(before 2.2), which // have empty defaultDatabase and all the relations in viewText have database part defined. def resolveRelation(plan: LogicalPlan): LogicalPlan = plan match { case u: UnresolvedRelation if !isRunningDirectlyOnFiles(u.tableIdentifier) => val defaultDatabase = AnalysisContext.get.defaultDatabase val foundRelation = lookupTableFromCatalog(u, defaultDatabase) resolveRelation(foundRelation) // The view's child should be a logical plan parsed from the `desc.viewText`, the variable // `viewText` should be defined, or else we throw an error on the generation of the View // operator. case view @ View(desc, _, child) if !child.resolved => // Resolve all the UnresolvedRelations and Views in the child. val newChild = AnalysisContext.withAnalysisContext(desc.viewDefaultDatabase) { if (AnalysisContext.get.nestedViewDepth > conf.maxNestedViewDepth) { view.failAnalysis(s"The depth of view ${view.desc.identifier} exceeds the maximum " + s"view resolution depth (${conf.maxNestedViewDepth}). Analysis is aborted to " + "avoid errors. Increase the value of spark.sql.view.maxNestedViewDepth to work " + "aroud this.") } execute(child) } view.copy(child = newChild) case p @ SubqueryAlias(_, view: View) => val newChild = resolveRelation(view) p.copy(child = newChild) case _ => plan } def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case i @ InsertIntoTable(u: UnresolvedRelation, parts, child, _, _) if child.resolved => EliminateSubqueryAliases(lookupTableFromCatalog(u)) match { case v: View => u.failAnalysis(s"Inserting into a view is not allowed. View: ${v.desc.identifier}.") case other => i.copy(table = other) } case u: UnresolvedRelation => resolveRelation(u) } // Look up the table with the given name from catalog. The database we used is decided by the // precedence: // 1. Use the database part of the table identifier, if it is defined; // 2. Use defaultDatabase, if it is defined(In this case, no temporary objects can be used, // and the default database is only used to look up a view); // 3. Use the currentDb of the SessionCatalog. private def lookupTableFromCatalog( u: UnresolvedRelation, defaultDatabase: Option[String] = None): LogicalPlan = { val tableIdentWithDb = u.tableIdentifier.copy( database = u.tableIdentifier.database.orElse(defaultDatabase)) try { catalog.lookupRelation(tableIdentWithDb) } catch { case _: NoSuchTableException => u.failAnalysis(s"Table or view not found: ${tableIdentWithDb.unquotedString}") // If the database is defined and that database is not found, throw an AnalysisException. // Note that if the database is not defined, it is possible we are looking up a temp view. case e: NoSuchDatabaseException => u.failAnalysis(s"Table or view not found: ${tableIdentWithDb.unquotedString}, the " + s"database ${e.db} doesn't exsits.") } } // If the database part is specified, and we support running SQL directly on files, and // it's not a temporary view, and the table does not exist, then let's just return the // original UnresolvedRelation. It is possible we are matching a query like "select * // from parquet.`/path/to/query`". The plan will get resolved in the rule `ResolveDataSource`. // Note that we are testing (!db_exists || !table_exists) because the catalog throws // an exception from tableExists if the database does not exist. private def isRunningDirectlyOnFiles(table: TableIdentifier): Boolean = { table.database.isDefined && conf.runSQLonFile && !catalog.isTemporaryTable(table) && (!catalog.databaseExists(table.database.get) || !catalog.tableExists(table)) } } /** * Replaces [[UnresolvedAttribute]]s with concrete [[AttributeReference]]s from * a logical plan node's children. */ object ResolveReferences extends Rule[LogicalPlan] { /** * Generate a new logical plan for the right child with different expression IDs * for all conflicting attributes. */ private def dedupRight (left: LogicalPlan, right: LogicalPlan): LogicalPlan = { val conflictingAttributes = left.outputSet.intersect(right.outputSet) logDebug(s"Conflicting attributes ${conflictingAttributes.mkString(",")} " + s"between $left and $right") right.collect { // Handle base relations that might appear more than once. case oldVersion: MultiInstanceRelation if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty => val newVersion = oldVersion.newInstance() (oldVersion, newVersion) case oldVersion: SerializeFromObject if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty => (oldVersion, oldVersion.copy(serializer = oldVersion.serializer.map(_.newInstance()))) // Handle projects that create conflicting aliases. case oldVersion @ Project(projectList, _) if findAliases(projectList).intersect(conflictingAttributes).nonEmpty => (oldVersion, oldVersion.copy(projectList = newAliases(projectList))) case oldVersion @ Aggregate(_, aggregateExpressions, _) if findAliases(aggregateExpressions).intersect(conflictingAttributes).nonEmpty => (oldVersion, oldVersion.copy(aggregateExpressions = newAliases(aggregateExpressions))) case oldVersion: Generate if oldVersion.generatedSet.intersect(conflictingAttributes).nonEmpty => val newOutput = oldVersion.generatorOutput.map(_.newInstance()) (oldVersion, oldVersion.copy(generatorOutput = newOutput)) case oldVersion @ Window(windowExpressions, _, _, child) if AttributeSet(windowExpressions.map(_.toAttribute)).intersect(conflictingAttributes) .nonEmpty => (oldVersion, oldVersion.copy(windowExpressions = newAliases(windowExpressions))) } // Only handle first case, others will be fixed on the next pass. .headOption match { case None => /* * No result implies that there is a logical plan node that produces new references * that this rule cannot handle. When that is the case, there must be another rule * that resolves these conflicts. Otherwise, the analysis will fail. */ right case Some((oldRelation, newRelation)) => val attributeRewrites = AttributeMap(oldRelation.output.zip(newRelation.output)) val newRight = right transformUp { case r if r == oldRelation => newRelation } transformUp { case other => other transformExpressions { case a: Attribute => dedupAttr(a, attributeRewrites) case s: SubqueryExpression => s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attributeRewrites)) } } newRight } } private def dedupAttr(attr: Attribute, attrMap: AttributeMap[Attribute]): Attribute = { attrMap.get(attr).getOrElse(attr).withQualifier(attr.qualifier) } /** * The outer plan may have been de-duplicated and the function below updates the * outer references to refer to the de-duplicated attributes. * * For example (SQL): * {{{ * SELECT * FROM t1 * INTERSECT * SELECT * FROM t1 * WHERE EXISTS (SELECT 1 * FROM t2 * WHERE t1.c1 = t2.c1) * }}} * Plan before resolveReference rule. * 'Intersect * :- Project [c1#245, c2#246] * : +- SubqueryAlias t1 * : +- Relation[c1#245,c2#246] parquet * +- 'Project [*] * +- Filter exists#257 [c1#245] * : +- Project [1 AS 1#258] * : +- Filter (outer(c1#245) = c1#251) * : +- SubqueryAlias t2 * : +- Relation[c1#251,c2#252] parquet * +- SubqueryAlias t1 * +- Relation[c1#245,c2#246] parquet * Plan after the resolveReference rule. * Intersect * :- Project [c1#245, c2#246] * : +- SubqueryAlias t1 * : +- Relation[c1#245,c2#246] parquet * +- Project [c1#259, c2#260] * +- Filter exists#257 [c1#259] * : +- Project [1 AS 1#258] * : +- Filter (outer(c1#259) = c1#251) => Updated * : +- SubqueryAlias t2 * : +- Relation[c1#251,c2#252] parquet * +- SubqueryAlias t1 * +- Relation[c1#259,c2#260] parquet => Outer plan's attributes are de-duplicated. */ private def dedupOuterReferencesInSubquery( plan: LogicalPlan, attrMap: AttributeMap[Attribute]): LogicalPlan = { plan transformDown { case currentFragment => currentFragment transformExpressions { case OuterReference(a: Attribute) => OuterReference(dedupAttr(a, attrMap)) case s: SubqueryExpression => s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attrMap)) } } } def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case p: LogicalPlan if !p.childrenResolved => p // If the projection list contains Stars, expand it. case p: Project if containsStar(p.projectList) => p.copy(projectList = buildExpandedProjectList(p.projectList, p.child)) // If the aggregate function argument contains Stars, expand it. case a: Aggregate if containsStar(a.aggregateExpressions) => if (a.groupingExpressions.exists(_.isInstanceOf[UnresolvedOrdinal])) { failAnalysis( "Star (*) is not allowed in select list when GROUP BY ordinal position is used") } else { a.copy(aggregateExpressions = buildExpandedProjectList(a.aggregateExpressions, a.child)) } // If the script transformation input contains Stars, expand it. case t: ScriptTransformation if containsStar(t.input) => t.copy( input = t.input.flatMap { case s: Star => s.expand(t.child, resolver) case o => o :: Nil } ) case g: Generate if containsStar(g.generator.children) => failAnalysis("Invalid usage of '*' in explode/json_tuple/UDTF") // To resolve duplicate expression IDs for Join and Intersect case j @ Join(left, right, _, _) if !j.duplicateResolved => j.copy(right = dedupRight(left, right)) case i @ Intersect(left, right) if !i.duplicateResolved => i.copy(right = dedupRight(left, right)) case i @ Except(left, right) if !i.duplicateResolved => i.copy(right = dedupRight(left, right)) // When resolve `SortOrder`s in Sort based on child, don't report errors as // we still have chance to resolve it based on its descendants case s @ Sort(ordering, global, child) if child.resolved && !s.resolved => val newOrdering = ordering.map(order => resolveExpression(order, child).asInstanceOf[SortOrder]) Sort(newOrdering, global, child) // A special case for Generate, because the output of Generate should not be resolved by // ResolveReferences. Attributes in the output will be resolved by ResolveGenerate. case g @ Generate(generator, _, _, _, _, _) if generator.resolved => g case g @ Generate(generator, join, outer, qualifier, output, child) => val newG = resolveExpression(generator, child, throws = true) if (newG.fastEquals(generator)) { g } else { Generate(newG.asInstanceOf[Generator], join, outer, qualifier, output, child) } // Skips plan which contains deserializer expressions, as they should be resolved by another // rule: ResolveDeserializer. case plan if containsDeserializer(plan.expressions) => plan case q: LogicalPlan => logTrace(s"Attempting to resolve ${q.simpleString}") q.transformExpressionsUp { case u @ UnresolvedAttribute(nameParts) => // Leave unchanged if resolution fails. Hopefully will be resolved next round. val result = withPosition(u) { q.resolveChildren(nameParts, resolver).getOrElse(u) } logDebug(s"Resolving $u to $result") result case UnresolvedExtractValue(child, fieldExpr) if child.resolved => ExtractValue(child, fieldExpr, resolver) } } def newAliases(expressions: Seq[NamedExpression]): Seq[NamedExpression] = { expressions.map { case a: Alias => Alias(a.child, a.name)() case other => other } } def findAliases(projectList: Seq[NamedExpression]): AttributeSet = { AttributeSet(projectList.collect { case a: Alias => a.toAttribute }) } /** * Build a project list for Project/Aggregate and expand the star if possible */ private def buildExpandedProjectList( exprs: Seq[NamedExpression], child: LogicalPlan): Seq[NamedExpression] = { exprs.flatMap { // Using Dataframe/Dataset API: testData2.groupBy($"a", $"b").agg($"*") case s: Star => s.expand(child, resolver) // Using SQL API without running ResolveAlias: SELECT * FROM testData2 group by a, b case UnresolvedAlias(s: Star, _) => s.expand(child, resolver) case o if containsStar(o :: Nil) => expandStarExpression(o, child) :: Nil case o => o :: Nil }.map(_.asInstanceOf[NamedExpression]) } /** * Returns true if `exprs` contains a [[Star]]. */ def containsStar(exprs: Seq[Expression]): Boolean = exprs.exists(_.collect { case _: Star => true }.nonEmpty) /** * Expands the matching attribute.*'s in `child`'s output. */ def expandStarExpression(expr: Expression, child: LogicalPlan): Expression = { expr.transformUp { case f1: UnresolvedFunction if containsStar(f1.children) => f1.copy(children = f1.children.flatMap { case s: Star => s.expand(child, resolver) case o => o :: Nil }) case c: CreateNamedStruct if containsStar(c.valExprs) => val newChildren = c.children.grouped(2).flatMap { case Seq(k, s : Star) => CreateStruct(s.expand(child, resolver)).children case kv => kv } c.copy(children = newChildren.toList ) case c: CreateArray if containsStar(c.children) => c.copy(children = c.children.flatMap { case s: Star => s.expand(child, resolver) case o => o :: Nil }) case p: Murmur3Hash if containsStar(p.children) => p.copy(children = p.children.flatMap { case s: Star => s.expand(child, resolver) case o => o :: Nil }) // count(*) has been replaced by count(1) case o if containsStar(o.children) => failAnalysis(s"Invalid usage of '*' in expression '${o.prettyName}'") } } } private def containsDeserializer(exprs: Seq[Expression]): Boolean = { exprs.exists(_.find(_.isInstanceOf[UnresolvedDeserializer]).isDefined) } protected[sql] def resolveExpression( expr: Expression, plan: LogicalPlan, throws: Boolean = false) = { // Resolve expression in one round. // If throws == false or the desired attribute doesn't exist // (like try to resolve `a.b` but `a` doesn't exist), fail and return the origin one. // Else, throw exception. try { expr transformUp { case GetColumnByOrdinal(ordinal, _) => plan.output(ordinal) case u @ UnresolvedAttribute(nameParts) => withPosition(u) { plan.resolve(nameParts, resolver).getOrElse(u) } case UnresolvedExtractValue(child, fieldName) if child.resolved => ExtractValue(child, fieldName, resolver) } } catch { case a: AnalysisException if !throws => expr } } /** * In many dialects of SQL it is valid to use ordinal positions in order/sort by and group by * clauses. This rule is to convert ordinal positions to the corresponding expressions in the * select list. This support is introduced in Spark 2.0. * * - When the sort references or group by expressions are not integer but foldable expressions, * just ignore them. * - When spark.sql.orderByOrdinal/spark.sql.groupByOrdinal is set to false, ignore the position * numbers too. * * Before the release of Spark 2.0, the literals in order/sort by and group by clauses * have no effect on the results. */ object ResolveOrdinalInOrderByAndGroupBy extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case p if !p.childrenResolved => p // Replace the index with the related attribute for ORDER BY, // which is a 1-base position of the projection list. case Sort(orders, global, child) if orders.exists(_.child.isInstanceOf[UnresolvedOrdinal]) => val newOrders = orders map { case s @ SortOrder(UnresolvedOrdinal(index), direction, nullOrdering, _) => if (index > 0 && index <= child.output.size) { SortOrder(child.output(index - 1), direction, nullOrdering, Set.empty) } else { s.failAnalysis( s"ORDER BY position $index is not in select list " + s"(valid range is [1, ${child.output.size}])") } case o => o } Sort(newOrders, global, child) // Replace the index with the corresponding expression in aggregateExpressions. The index is // a 1-base position of aggregateExpressions, which is output columns (select expression) case Aggregate(groups, aggs, child) if aggs.forall(_.resolved) && groups.exists(_.isInstanceOf[UnresolvedOrdinal]) => val newGroups = groups.map { case u @ UnresolvedOrdinal(index) if index > 0 && index <= aggs.size => aggs(index - 1) case ordinal @ UnresolvedOrdinal(index) => ordinal.failAnalysis( s"GROUP BY position $index is not in select list " + s"(valid range is [1, ${aggs.size}])") case o => o } Aggregate(newGroups, aggs, child) } } /** * Replace unresolved expressions in grouping keys with resolved ones in SELECT clauses. * This rule is expected to run after [[ResolveReferences]] applied. */ object ResolveAggAliasInGroupBy extends Rule[LogicalPlan] { // This is a strict check though, we put this to apply the rule only if the expression is not // resolvable by child. private def notResolvableByChild(attrName: String, child: LogicalPlan): Boolean = { !child.output.exists(a => resolver(a.name, attrName)) } private def mayResolveAttrByAggregateExprs( exprs: Seq[Expression], aggs: Seq[NamedExpression], child: LogicalPlan): Seq[Expression] = { exprs.map { _.transform { case u: UnresolvedAttribute if notResolvableByChild(u.name, child) => aggs.find(ne => resolver(ne.name, u.name)).getOrElse(u) }} } override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case agg @ Aggregate(groups, aggs, child) if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) && groups.exists(!_.resolved) => agg.copy(groupingExpressions = mayResolveAttrByAggregateExprs(groups, aggs, child)) case gs @ GroupingSets(selectedGroups, groups, child, aggs) if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) && groups.exists(_.isInstanceOf[UnresolvedAttribute]) => gs.copy( selectedGroupByExprs = selectedGroups.map(mayResolveAttrByAggregateExprs(_, aggs, child)), groupByExprs = mayResolveAttrByAggregateExprs(groups, aggs, child)) } } /** * In many dialects of SQL it is valid to sort by attributes that are not present in the SELECT * clause. This rule detects such queries and adds the required attributes to the original * projection, so that they will be available during sorting. Another projection is added to * remove these attributes after sorting. * * The HAVING clause could also used a grouping columns that is not presented in the SELECT. */ object ResolveMissingReferences extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { // Skip sort with aggregate. This will be handled in ResolveAggregateFunctions case sa @ Sort(_, _, child: Aggregate) => sa case s @ Sort(order, _, child) if !s.resolved && child.resolved => try { val newOrder = order.map(resolveExpressionRecursively(_, child).asInstanceOf[SortOrder]) val requiredAttrs = AttributeSet(newOrder).filter(_.resolved) val missingAttrs = requiredAttrs -- child.outputSet if (missingAttrs.nonEmpty) { // Add missing attributes and then project them away after the sort. Project(child.output, Sort(newOrder, s.global, addMissingAttr(child, missingAttrs))) } else if (newOrder != order) { s.copy(order = newOrder) } else { s } } catch { // Attempting to resolve it might fail. When this happens, return the original plan. // Users will see an AnalysisException for resolution failure of missing attributes // in Sort case ae: AnalysisException => s } case f @ Filter(cond, child) if !f.resolved && child.resolved => try { val newCond = resolveExpressionRecursively(cond, child) val requiredAttrs = newCond.references.filter(_.resolved) val missingAttrs = requiredAttrs -- child.outputSet if (missingAttrs.nonEmpty) { // Add missing attributes and then project them away. Project(child.output, Filter(newCond, addMissingAttr(child, missingAttrs))) } else if (newCond != cond) { f.copy(condition = newCond) } else { f } } catch { // Attempting to resolve it might fail. When this happens, return the original plan. // Users will see an AnalysisException for resolution failure of missing attributes case ae: AnalysisException => f } } /** * Add the missing attributes into projectList of Project/Window or aggregateExpressions of * Aggregate. */ private def addMissingAttr(plan: LogicalPlan, missingAttrs: AttributeSet): LogicalPlan = { if (missingAttrs.isEmpty) { return plan } plan match { case p: Project => val missing = missingAttrs -- p.child.outputSet Project(p.projectList ++ missingAttrs, addMissingAttr(p.child, missing)) case a: Aggregate => // all the missing attributes should be grouping expressions // TODO: push down AggregateExpression missingAttrs.foreach { attr => if (!a.groupingExpressions.exists(_.semanticEquals(attr))) { throw new AnalysisException(s"Can't add $attr to ${a.simpleString}") } } val newAggregateExpressions = a.aggregateExpressions ++ missingAttrs a.copy(aggregateExpressions = newAggregateExpressions) case g: Generate => // If join is false, we will convert it to true for getting from the child the missing // attributes that its child might have or could have. val missing = missingAttrs -- g.child.outputSet g.copy(join = true, child = addMissingAttr(g.child, missing)) case d: Distinct => throw new AnalysisException(s"Can't add $missingAttrs to $d") case u: UnaryNode => u.withNewChildren(addMissingAttr(u.child, missingAttrs) :: Nil) case other => throw new AnalysisException(s"Can't add $missingAttrs to $other") } } /** * Resolve the expression on a specified logical plan and it's child (recursively), until * the expression is resolved or meet a non-unary node or Subquery. */ @tailrec private def resolveExpressionRecursively(expr: Expression, plan: LogicalPlan): Expression = { val resolved = resolveExpression(expr, plan) if (resolved.resolved) { resolved } else { plan match { case u: UnaryNode if !u.isInstanceOf[SubqueryAlias] => resolveExpressionRecursively(resolved, u.child) case other => resolved } } } } /** * Checks whether a function identifier referenced by an [[UnresolvedFunction]] is defined in the * function registry. Note that this rule doesn't try to resolve the [[UnresolvedFunction]]. It * only performs simple existence check according to the function identifier to quickly identify * undefined functions without triggering relation resolution, which may incur potentially * expensive partition/schema discovery process in some cases. * * @see [[ResolveFunctions]] * @see https://issues.apache.org/jira/browse/SPARK-19737 */ object LookupFunctions extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressions { case f: UnresolvedFunction if !catalog.functionExists(f.name) => withPosition(f) { throw new NoSuchFunctionException(f.name.database.getOrElse("default"), f.name.funcName) } } } /** * Replaces [[UnresolvedFunction]]s with concrete [[Expression]]s. */ object ResolveFunctions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case q: LogicalPlan => q transformExpressions { case u if !u.childrenResolved => u // Skip until children are resolved. case u: UnresolvedAttribute if resolver(u.name, VirtualColumn.hiveGroupingIdName) => withPosition(u) { Alias(GroupingID(Nil), VirtualColumn.hiveGroupingIdName)() } case u @ UnresolvedGenerator(name, children) => withPosition(u) { catalog.lookupFunction(name, children) match { case generator: Generator => generator case other => failAnalysis(s"$name is expected to be a generator. However, " + s"its class is ${other.getClass.getCanonicalName}, which is not a generator.") } } case u @ UnresolvedFunction(funcId, children, isDistinct) => withPosition(u) { catalog.lookupFunction(funcId, children) match { // AggregateWindowFunctions are AggregateFunctions that can only be evaluated within // the context of a Window clause. They do not need to be wrapped in an // AggregateExpression. case wf: AggregateWindowFunction => if (isDistinct) { failAnalysis(s"${wf.prettyName} does not support the modifier DISTINCT") } else { wf } // We get an aggregate function, we need to wrap it in an AggregateExpression. case agg: AggregateFunction => AggregateExpression(agg, Complete, isDistinct) // This function is not an aggregate function, just return the resolved one. case other => if (isDistinct) { failAnalysis(s"${other.prettyName} does not support the modifier DISTINCT") } else { other } } } } } } /** * This rule resolves and rewrites subqueries inside expressions. * * Note: CTEs are handled in CTESubstitution. */ object ResolveSubquery extends Rule[LogicalPlan] with PredicateHelper { /** * Resolve the correlated expressions in a subquery by using the an outer plans' references. All * resolved outer references are wrapped in an [[OuterReference]] */ private def resolveOuterReferences(plan: LogicalPlan, outer: LogicalPlan): LogicalPlan = { plan transformDown { case q: LogicalPlan if q.childrenResolved && !q.resolved => q transformExpressions { case u @ UnresolvedAttribute(nameParts) => withPosition(u) { try { outer.resolve(nameParts, resolver) match { case Some(outerAttr) => OuterReference(outerAttr) case None => u } } catch { case _: AnalysisException => u } } } } } /** * Resolves the subquery plan that is referenced in a subquery expression. The normal * attribute references are resolved using regular analyzer and the outer references are * resolved from the outer plans using the resolveOuterReferences method. * * Outer references from the correlated predicates are updated as children of * Subquery expression. */ private def resolveSubQuery( e: SubqueryExpression, plans: Seq[LogicalPlan])( f: (LogicalPlan, Seq[Expression]) => SubqueryExpression): SubqueryExpression = { // Step 1: Resolve the outer expressions. var previous: LogicalPlan = null var current = e.plan do { // Try to resolve the subquery plan using the regular analyzer. previous = current current = execute(current) // Use the outer references to resolve the subquery plan if it isn't resolved yet. val i = plans.iterator val afterResolve = current while (!current.resolved && current.fastEquals(afterResolve) && i.hasNext) { current = resolveOuterReferences(current, i.next()) } } while (!current.resolved && !current.fastEquals(previous)) // Step 2: If the subquery plan is fully resolved, pull the outer references and record // them as children of SubqueryExpression. if (current.resolved) { // Record the outer references as children of subquery expression. f(current, SubExprUtils.getOuterReferences(current)) } else { e.withNewPlan(current) } } /** * Resolves the subquery. Apart of resolving the subquery and outer references (if any) * in the subquery plan, the children of subquery expression are updated to record the * outer references. This is needed to make sure * (1) The column(s) referred from the outer query are not pruned from the plan during * optimization. * (2) Any aggregate expression(s) that reference outer attributes are pushed down to * outer plan to get evaluated. */ private def resolveSubQueries(plan: LogicalPlan, plans: Seq[LogicalPlan]): LogicalPlan = { plan transformExpressions { case s @ ScalarSubquery(sub, _, exprId) if !sub.resolved => resolveSubQuery(s, plans)(ScalarSubquery(_, _, exprId)) case e @ Exists(sub, _, exprId) if !sub.resolved => resolveSubQuery(e, plans)(Exists(_, _, exprId)) case In(value, Seq(l @ ListQuery(sub, _, exprId, _))) if value.resolved && !l.resolved => val expr = resolveSubQuery(l, plans)((plan, exprs) => { ListQuery(plan, exprs, exprId, plan.output) }) In(value, Seq(expr)) } } /** * Resolve and rewrite all subqueries in an operator tree.. */ def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { // In case of HAVING (a filter after an aggregate) we use both the aggregate and // its child for resolution. case f @ Filter(_, a: Aggregate) if f.childrenResolved => resolveSubQueries(f, Seq(a, a.child)) // Only a few unary nodes (Project/Filter/Aggregate) can contain subqueries. case q: UnaryNode if q.childrenResolved => resolveSubQueries(q, q.children) } } /** * Replaces unresolved column aliases for a subquery with projections. */ object ResolveSubqueryColumnAliases extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case u @ UnresolvedSubqueryColumnAliases(columnNames, child) if child.resolved => // Resolves output attributes if a query has alias names in its subquery: // e.g., SELECT * FROM (SELECT 1 AS a, 1 AS b) t(col1, col2) val outputAttrs = child.output // Checks if the number of the aliases equals to the number of output columns // in the subquery. if (columnNames.size != outputAttrs.size) { u.failAnalysis("Number of column aliases does not match number of columns. " + s"Number of column aliases: ${columnNames.size}; " + s"number of columns: ${outputAttrs.size}.") } val aliases = outputAttrs.zip(columnNames).map { case (attr, aliasName) => Alias(attr, aliasName)() } Project(aliases, child) } } /** * Turns projections that contain aggregate expressions into aggregations. */ object GlobalAggregates extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case Project(projectList, child) if containsAggregates(projectList) => Aggregate(Nil, projectList, child) } def containsAggregates(exprs: Seq[Expression]): Boolean = { // Collect all Windowed Aggregate Expressions. val windowedAggExprs = exprs.flatMap { expr => expr.collect { case WindowExpression(ae: AggregateExpression, _) => ae } }.toSet // Find the first Aggregate Expression that is not Windowed. exprs.exists(_.collectFirst { case ae: AggregateExpression if !windowedAggExprs.contains(ae) => ae }.isDefined) } } /** * This rule finds aggregate expressions that are not in an aggregate operator. For example, * those in a HAVING clause or ORDER BY clause. These expressions are pushed down to the * underlying aggregate operator and then projected away after the original operator. */ object ResolveAggregateFunctions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case filter @ Filter(havingCondition, aggregate @ Aggregate(grouping, originalAggExprs, child)) if aggregate.resolved => // Try resolving the condition of the filter as though it is in the aggregate clause try { val aggregatedCondition = Aggregate( grouping, Alias(havingCondition, "havingCondition")() :: Nil, child) val resolvedOperator = execute(aggregatedCondition) def resolvedAggregateFilter = resolvedOperator .asInstanceOf[Aggregate] .aggregateExpressions.head // If resolution was successful and we see the filter has an aggregate in it, add it to // the original aggregate operator. if (resolvedOperator.resolved) { // Try to replace all aggregate expressions in the filter by an alias. val aggregateExpressions = ArrayBuffer.empty[NamedExpression] val transformedAggregateFilter = resolvedAggregateFilter.transform { case ae: AggregateExpression => val alias = Alias(ae, ae.toString)() aggregateExpressions += alias alias.toAttribute // Grouping functions are handled in the rule [[ResolveGroupingAnalytics]]. case e: Expression if grouping.exists(_.semanticEquals(e)) && !ResolveGroupingAnalytics.hasGroupingFunction(e) && !aggregate.output.exists(_.semanticEquals(e)) => e match { case ne: NamedExpression => aggregateExpressions += ne ne.toAttribute case _ => val alias = Alias(e, e.toString)() aggregateExpressions += alias alias.toAttribute } } // Push the aggregate expressions into the aggregate (if any). if (aggregateExpressions.nonEmpty) { Project(aggregate.output, Filter(transformedAggregateFilter, aggregate.copy(aggregateExpressions = originalAggExprs ++ aggregateExpressions))) } else { filter } } else { filter } } catch { // Attempting to resolve in the aggregate can result in ambiguity. When this happens, // just return the original plan. case ae: AnalysisException => filter } case sort @ Sort(sortOrder, global, aggregate: Aggregate) if aggregate.resolved => // Try resolving the ordering as though it is in the aggregate clause. try { val unresolvedSortOrders = sortOrder.filter(s => !s.resolved || containsAggregate(s)) val aliasedOrdering = unresolvedSortOrders.map(o => Alias(o.child, "aggOrder")()) val aggregatedOrdering = aggregate.copy(aggregateExpressions = aliasedOrdering) val resolvedAggregate: Aggregate = execute(aggregatedOrdering).asInstanceOf[Aggregate] val resolvedAliasedOrdering: Seq[Alias] = resolvedAggregate.aggregateExpressions.asInstanceOf[Seq[Alias]] // If we pass the analysis check, then the ordering expressions should only reference to // aggregate expressions or grouping expressions, and it's safe to push them down to // Aggregate. checkAnalysis(resolvedAggregate) val originalAggExprs = aggregate.aggregateExpressions.map( CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression]) // If the ordering expression is same with original aggregate expression, we don't need // to push down this ordering expression and can reference the original aggregate // expression instead. val needsPushDown = ArrayBuffer.empty[NamedExpression] val evaluatedOrderings = resolvedAliasedOrdering.zip(sortOrder).map { case (evaluated, order) => val index = originalAggExprs.indexWhere { case Alias(child, _) => child semanticEquals evaluated.child case other => other semanticEquals evaluated.child } if (index == -1) { needsPushDown += evaluated order.copy(child = evaluated.toAttribute) } else { order.copy(child = originalAggExprs(index).toAttribute) } } val sortOrdersMap = unresolvedSortOrders .map(new TreeNodeRef(_)) .zip(evaluatedOrderings) .toMap val finalSortOrders = sortOrder.map(s => sortOrdersMap.getOrElse(new TreeNodeRef(s), s)) // Since we don't rely on sort.resolved as the stop condition for this rule, // we need to check this and prevent applying this rule multiple times if (sortOrder == finalSortOrders) { sort } else { Project(aggregate.output, Sort(finalSortOrders, global, aggregate.copy(aggregateExpressions = originalAggExprs ++ needsPushDown))) } } catch { // Attempting to resolve in the aggregate can result in ambiguity. When this happens, // just return the original plan. case ae: AnalysisException => sort } } def containsAggregate(condition: Expression): Boolean = { condition.find(_.isInstanceOf[AggregateExpression]).isDefined } } /** * Extracts [[Generator]] from the projectList of a [[Project]] operator and create [[Generate]] * operator under [[Project]]. * * This rule will throw [[AnalysisException]] for following cases: * 1. [[Generator]] is nested in expressions, e.g. `SELECT explode(list) + 1 FROM tbl` * 2. more than one [[Generator]] is found in projectList, * e.g. `SELECT explode(list), explode(list) FROM tbl` * 3. [[Generator]] is found in other operators that are not [[Project]] or [[Generate]], * e.g. `SELECT * FROM tbl SORT BY explode(list)` */ object ExtractGenerator extends Rule[LogicalPlan] { private def hasGenerator(expr: Expression): Boolean = { expr.find(_.isInstanceOf[Generator]).isDefined } private def hasNestedGenerator(expr: NamedExpression): Boolean = expr match { case UnresolvedAlias(_: Generator, _) => false case Alias(_: Generator, _) => false case MultiAlias(_: Generator, _) => false case other => hasGenerator(other) } private def trimAlias(expr: NamedExpression): Expression = expr match { case UnresolvedAlias(child, _) => child case Alias(child, _) => child case MultiAlias(child, _) => child case _ => expr } private object AliasedGenerator { /** * Extracts a [[Generator]] expression, any names assigned by aliases to the outputs * and the outer flag. The outer flag is used when joining the generator output. * @param e the [[Expression]] * @return (the [[Generator]], seq of output names, outer flag) */ def unapply(e: Expression): Option[(Generator, Seq[String], Boolean)] = e match { case Alias(GeneratorOuter(g: Generator), name) if g.resolved => Some((g, name :: Nil, true)) case MultiAlias(GeneratorOuter(g: Generator), names) if g.resolved => Some((g, names, true)) case Alias(g: Generator, name) if g.resolved => Some((g, name :: Nil, false)) case MultiAlias(g: Generator, names) if g.resolved => Some((g, names, false)) case _ => None } } def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case Project(projectList, _) if projectList.exists(hasNestedGenerator) => val nestedGenerator = projectList.find(hasNestedGenerator).get throw new AnalysisException("Generators are not supported when it's nested in " + "expressions, but got: " + toPrettySQL(trimAlias(nestedGenerator))) case Project(projectList, _) if projectList.count(hasGenerator) > 1 => val generators = projectList.filter(hasGenerator).map(trimAlias) throw new AnalysisException("Only one generator allowed per select clause but found " + generators.size + ": " + generators.map(toPrettySQL).mkString(", ")) case p @ Project(projectList, child) => // Holds the resolved generator, if one exists in the project list. var resolvedGenerator: Generate = null val newProjectList = projectList.flatMap { case AliasedGenerator(generator, names, outer) if generator.childrenResolved => // It's a sanity check, this should not happen as the previous case will throw // exception earlier. assert(resolvedGenerator == null, "More than one generator found in SELECT.") resolvedGenerator = Generate( generator, join = projectList.size > 1, // Only join if there are other expressions in SELECT. outer = outer, qualifier = None, generatorOutput = ResolveGenerate.makeGeneratorOutput(generator, names), child) resolvedGenerator.generatorOutput case other => other :: Nil } if (resolvedGenerator != null) { Project(newProjectList, resolvedGenerator) } else { p } case g: Generate => g case p if p.expressions.exists(hasGenerator) => throw new AnalysisException("Generators are not supported outside the SELECT clause, but " + "got: " + p.simpleString) } } /** * Rewrites table generating expressions that either need one or more of the following in order * to be resolved: * - concrete attribute references for their output. * - to be relocated from a SELECT clause (i.e. from a [[Project]]) into a [[Generate]]). * * Names for the output [[Attribute]]s are extracted from [[Alias]] or [[MultiAlias]] expressions * that wrap the [[Generator]]. */ object ResolveGenerate extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case g: Generate if !g.child.resolved || !g.generator.resolved => g case g: Generate if !g.resolved => g.copy(generatorOutput = makeGeneratorOutput(g.generator, g.generatorOutput.map(_.name))) } /** * Construct the output attributes for a [[Generator]], given a list of names. If the list of * names is empty names are assigned from field names in generator. */ private[analysis] def makeGeneratorOutput( generator: Generator, names: Seq[String]): Seq[Attribute] = { val elementAttrs = generator.elementSchema.toAttributes if (names.length == elementAttrs.length) { names.zip(elementAttrs).map { case (name, attr) => attr.withName(name) } } else if (names.isEmpty) { elementAttrs } else { failAnalysis( "The number of aliases supplied in the AS clause does not match the number of columns " + s"output by the UDTF expected ${elementAttrs.size} aliases but got " + s"${names.mkString(",")} ") } } } /** * Fixes nullability of Attributes in a resolved LogicalPlan by using the nullability of * corresponding Attributes of its children output Attributes. This step is needed because * users can use a resolved AttributeReference in the Dataset API and outer joins * can change the nullability of an AttribtueReference. Without the fix, a nullable column's * nullable field can be actually set as non-nullable, which cause illegal optimization * (e.g., NULL propagation) and wrong answers. * See SPARK-13484 and SPARK-13801 for the concrete queries of this case. */ object FixNullability extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case p if !p.resolved => p // Skip unresolved nodes. case p: LogicalPlan if p.resolved => val childrenOutput = p.children.flatMap(c => c.output).groupBy(_.exprId).flatMap { case (exprId, attributes) => // If there are multiple Attributes having the same ExprId, we need to resolve // the conflict of nullable field. We do not really expect this happen. val nullable = attributes.exists(_.nullable) attributes.map(attr => attr.withNullability(nullable)) }.toSeq // At here, we create an AttributeMap that only compare the exprId for the lookup // operation. So, we can find the corresponding input attribute's nullability. val attributeMap = AttributeMap[Attribute](childrenOutput.map(attr => attr -> attr)) // For an Attribute used by the current LogicalPlan, if it is from its children, // we fix the nullable field by using the nullability setting of the corresponding // output Attribute from the children. p.transformExpressions { case attr: Attribute if attributeMap.contains(attr) => attr.withNullability(attributeMap(attr).nullable) } } } /** * Extracts [[WindowExpression]]s from the projectList of a [[Project]] operator and * aggregateExpressions of an [[Aggregate]] operator and creates individual [[Window]] * operators for every distinct [[WindowSpecDefinition]]. * * This rule handles three cases: * - A [[Project]] having [[WindowExpression]]s in its projectList; * - An [[Aggregate]] having [[WindowExpression]]s in its aggregateExpressions. * - A [[Filter]]->[[Aggregate]] pattern representing GROUP BY with a HAVING * clause and the [[Aggregate]] has [[WindowExpression]]s in its aggregateExpressions. * Note: If there is a GROUP BY clause in the query, aggregations and corresponding * filters (expressions in the HAVING clause) should be evaluated before any * [[WindowExpression]]. If a query has SELECT DISTINCT, the DISTINCT part should be * evaluated after all [[WindowExpression]]s. * * For every case, the transformation works as follows: * 1. For a list of [[Expression]]s (a projectList or an aggregateExpressions), partitions * it two lists of [[Expression]]s, one for all [[WindowExpression]]s and another for * all regular expressions. * 2. For all [[WindowExpression]]s, groups them based on their [[WindowSpecDefinition]]s. * 3. For every distinct [[WindowSpecDefinition]], creates a [[Window]] operator and inserts * it into the plan tree. */ object ExtractWindowExpressions extends Rule[LogicalPlan] { private def hasWindowFunction(projectList: Seq[NamedExpression]): Boolean = projectList.exists(hasWindowFunction) private def hasWindowFunction(expr: NamedExpression): Boolean = { expr.find { case window: WindowExpression => true case _ => false }.isDefined } /** * From a Seq of [[NamedExpression]]s, extract expressions containing window expressions and * other regular expressions that do not contain any window expression. For example, for * `col1, Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5)`, we will extract * `col1`, `col2 + col3`, `col4`, and `col5` out and replace their appearances in * the window expression as attribute references. So, the first returned value will be * `[Sum(_w0) OVER (PARTITION BY _w1 ORDER BY _w2)]` and the second returned value will be * [col1, col2 + col3 as _w0, col4 as _w1, col5 as _w2]. * * @return (seq of expressions containing at least one window expression, * seq of non-window expressions) */ private def extract( expressions: Seq[NamedExpression]): (Seq[NamedExpression], Seq[NamedExpression]) = { // First, we partition the input expressions to two part. For the first part, // every expression in it contain at least one WindowExpression. // Expressions in the second part do not have any WindowExpression. val (expressionsWithWindowFunctions, regularExpressions) = expressions.partition(hasWindowFunction) // Then, we need to extract those regular expressions used in the WindowExpression. // For example, when we have col1 - Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5), // we need to make sure that col1 to col5 are all projected from the child of the Window // operator. val extractedExprBuffer = new ArrayBuffer[NamedExpression]() def extractExpr(expr: Expression): Expression = expr match { case ne: NamedExpression => // If a named expression is not in regularExpressions, add it to // extractedExprBuffer and replace it with an AttributeReference. val missingExpr = AttributeSet(Seq(expr)) -- (regularExpressions ++ extractedExprBuffer) if (missingExpr.nonEmpty) { extractedExprBuffer += ne } // alias will be cleaned in the rule CleanupAliases ne case e: Expression if e.foldable => e // No need to create an attribute reference if it will be evaluated as a Literal. case e: Expression => // For other expressions, we extract it and replace it with an AttributeReference (with // an internal column name, e.g. "_w0"). val withName = Alias(e, s"_w${extractedExprBuffer.length}")() extractedExprBuffer += withName withName.toAttribute } // Now, we extract regular expressions from expressionsWithWindowFunctions // by using extractExpr. val seenWindowAggregates = new ArrayBuffer[AggregateExpression] val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map { _.transform { // Extracts children expressions of a WindowFunction (input parameters of // a WindowFunction). case wf: WindowFunction => val newChildren = wf.children.map(extractExpr) wf.withNewChildren(newChildren) // Extracts expressions from the partition spec and order spec. case wsc @ WindowSpecDefinition(partitionSpec, orderSpec, _) => val newPartitionSpec = partitionSpec.map(extractExpr) val newOrderSpec = orderSpec.map { so => val newChild = extractExpr(so.child) so.copy(child = newChild) } wsc.copy(partitionSpec = newPartitionSpec, orderSpec = newOrderSpec) // Extract Windowed AggregateExpression case we @ WindowExpression( ae @ AggregateExpression(function, _, _, _), spec: WindowSpecDefinition) => val newChildren = function.children.map(extractExpr) val newFunction = function.withNewChildren(newChildren).asInstanceOf[AggregateFunction] val newAgg = ae.copy(aggregateFunction = newFunction) seenWindowAggregates += newAgg WindowExpression(newAgg, spec) // Extracts AggregateExpression. For example, for SUM(x) - Sum(y) OVER (...), // we need to extract SUM(x). case agg: AggregateExpression if !seenWindowAggregates.contains(agg) => val withName = Alias(agg, s"_w${extractedExprBuffer.length}")() extractedExprBuffer += withName withName.toAttribute // Extracts other attributes case attr: Attribute => extractExpr(attr) }.asInstanceOf[NamedExpression] } (newExpressionsWithWindowFunctions, regularExpressions ++ extractedExprBuffer) } // end of extract /** * Adds operators for Window Expressions. Every Window operator handles a single Window Spec. */ private def addWindow( expressionsWithWindowFunctions: Seq[NamedExpression], child: LogicalPlan): LogicalPlan = { // First, we need to extract all WindowExpressions from expressionsWithWindowFunctions // and put those extracted WindowExpressions to extractedWindowExprBuffer. // This step is needed because it is possible that an expression contains multiple // WindowExpressions with different Window Specs. // After extracting WindowExpressions, we need to construct a project list to generate // expressionsWithWindowFunctions based on extractedWindowExprBuffer. // For example, for "sum(a) over (...) / sum(b) over (...)", we will first extract // "sum(a) over (...)" and "sum(b) over (...)" out, and assign "_we0" as the alias to // "sum(a) over (...)" and "_we1" as the alias to "sum(b) over (...)". // Then, the projectList will be [_we0/_we1]. val extractedWindowExprBuffer = new ArrayBuffer[NamedExpression]() val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map { // We need to use transformDown because we want to trigger // "case alias @ Alias(window: WindowExpression, _)" first. _.transformDown { case alias @ Alias(window: WindowExpression, _) => // If a WindowExpression has an assigned alias, just use it. extractedWindowExprBuffer += alias alias.toAttribute case window: WindowExpression => // If there is no alias assigned to the WindowExpressions. We create an // internal column. val withName = Alias(window, s"_we${extractedWindowExprBuffer.length}")() extractedWindowExprBuffer += withName withName.toAttribute }.asInstanceOf[NamedExpression] } // Second, we group extractedWindowExprBuffer based on their Partition and Order Specs. val groupedWindowExpressions = extractedWindowExprBuffer.groupBy { expr => val distinctWindowSpec = expr.collect { case window: WindowExpression => window.windowSpec }.distinct // We do a final check and see if we only have a single Window Spec defined in an // expressions. if (distinctWindowSpec.isEmpty) { failAnalysis(s"$expr does not have any WindowExpression.") } else if (distinctWindowSpec.length > 1) { // newExpressionsWithWindowFunctions only have expressions with a single // WindowExpression. If we reach here, we have a bug. failAnalysis(s"$expr has multiple Window Specifications ($distinctWindowSpec)." + s"Please file a bug report with this error message, stack trace, and the query.") } else { val spec = distinctWindowSpec.head (spec.partitionSpec, spec.orderSpec) } }.toSeq // Third, we aggregate them by adding each Window operator for each Window Spec and then // setting this to the child of the next Window operator. val windowOps = groupedWindowExpressions.foldLeft(child) { case (last, ((partitionSpec, orderSpec), windowExpressions)) => Window(windowExpressions, partitionSpec, orderSpec, last) } // Finally, we create a Project to output windowOps's output // newExpressionsWithWindowFunctions. Project(windowOps.output ++ newExpressionsWithWindowFunctions, windowOps) } // end of addWindow // We have to use transformDown at here to make sure the rule of // "Aggregate with Having clause" will be triggered. def apply(plan: LogicalPlan): LogicalPlan = plan transformDown { // Aggregate with Having clause. This rule works with an unresolved Aggregate because // a resolved Aggregate will not have Window Functions. case f @ Filter(condition, a @ Aggregate(groupingExprs, aggregateExprs, child)) if child.resolved && hasWindowFunction(aggregateExprs) && a.expressions.forall(_.resolved) => val (windowExpressions, aggregateExpressions) = extract(aggregateExprs) // Create an Aggregate operator to evaluate aggregation functions. val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child) // Add a Filter operator for conditions in the Having clause. val withFilter = Filter(condition, withAggregate) val withWindow = addWindow(windowExpressions, withFilter) // Finally, generate output columns according to the original projectList. val finalProjectList = aggregateExprs.map(_.toAttribute) Project(finalProjectList, withWindow) case p: LogicalPlan if !p.childrenResolved => p // Aggregate without Having clause. case a @ Aggregate(groupingExprs, aggregateExprs, child) if hasWindowFunction(aggregateExprs) && a.expressions.forall(_.resolved) => val (windowExpressions, aggregateExpressions) = extract(aggregateExprs) // Create an Aggregate operator to evaluate aggregation functions. val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child) // Add Window operators. val withWindow = addWindow(windowExpressions, withAggregate) // Finally, generate output columns according to the original projectList. val finalProjectList = aggregateExprs.map(_.toAttribute) Project(finalProjectList, withWindow) // We only extract Window Expressions after all expressions of the Project // have been resolved. case p @ Project(projectList, child) if hasWindowFunction(projectList) && !p.expressions.exists(!_.resolved) => val (windowExpressions, regularExpressions) = extract(projectList) // We add a project to get all needed expressions for window expressions from the child // of the original Project operator. val withProject = Project(regularExpressions, child) // Add Window operators. val withWindow = addWindow(windowExpressions, withProject) // Finally, generate output columns according to the original projectList. val finalProjectList = projectList.map(_.toAttribute) Project(finalProjectList, withWindow) } } /** * Pulls out nondeterministic expressions from LogicalPlan which is not Project or Filter, * put them into an inner Project and finally project them away at the outer Project. */ object PullOutNondeterministic extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case p if !p.resolved => p // Skip unresolved nodes. case p: Project => p case f: Filter => f case a: Aggregate if a.groupingExpressions.exists(!_.deterministic) => val nondeterToAttr = getNondeterToAttr(a.groupingExpressions) val newChild = Project(a.child.output ++ nondeterToAttr.values, a.child) a.transformExpressions { case e => nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e) }.copy(child = newChild) // todo: It's hard to write a general rule to pull out nondeterministic expressions // from LogicalPlan, currently we only do it for UnaryNode which has same output // schema with its child. case p: UnaryNode if p.output == p.child.output && p.expressions.exists(!_.deterministic) => val nondeterToAttr = getNondeterToAttr(p.expressions) val newPlan = p.transformExpressions { case e => nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e) } val newChild = Project(p.child.output ++ nondeterToAttr.values, p.child) Project(p.output, newPlan.withNewChildren(newChild :: Nil)) } private def getNondeterToAttr(exprs: Seq[Expression]): Map[Expression, NamedExpression] = { exprs.filterNot(_.deterministic).flatMap { expr => val leafNondeterministic = expr.collect { case n: Nondeterministic => n } leafNondeterministic.distinct.map { e => val ne = e match { case n: NamedExpression => n case _ => Alias(e, "_nondeterministic")() } e -> ne } }.toMap } } /** * Correctly handle null primitive inputs for UDF by adding extra [[If]] expression to do the * null check. When user defines a UDF with primitive parameters, there is no way to tell if the * primitive parameter is null or not, so here we assume the primitive input is null-propagatable * and we should return null if the input is null. */ object HandleNullInputsForUDF extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case p if !p.resolved => p // Skip unresolved nodes. case p => p transformExpressionsUp { case udf @ ScalaUDF(func, _, inputs, _, _, _, _) => val parameterTypes = ScalaReflection.getParameterTypes(func) assert(parameterTypes.length == inputs.length) val inputsNullCheck = parameterTypes.zip(inputs) // TODO: skip null handling for not-nullable primitive inputs after we can completely // trust the `nullable` information. // .filter { case (cls, expr) => cls.isPrimitive && expr.nullable } .filter { case (cls, _) => cls.isPrimitive } .map { case (_, expr) => IsNull(expr) } .reduceLeftOption[Expression]((e1, e2) => Or(e1, e2)) inputsNullCheck.map(If(_, Literal.create(null, udf.dataType), udf)).getOrElse(udf) } } } /** * Check and add proper window frames for all window functions. */ object ResolveWindowFrame extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case logical: LogicalPlan => logical transformExpressions { case WindowExpression(wf: WindowFunction, WindowSpecDefinition(_, _, f: SpecifiedWindowFrame)) if wf.frame != UnspecifiedFrame && wf.frame != f => failAnalysis(s"Window Frame $f must match the required frame ${wf.frame}") case WindowExpression(wf: WindowFunction, s @ WindowSpecDefinition(_, o, UnspecifiedFrame)) if wf.frame != UnspecifiedFrame => WindowExpression(wf, s.copy(frameSpecification = wf.frame)) case we @ WindowExpression(e, s @ WindowSpecDefinition(_, o, UnspecifiedFrame)) if e.resolved => val frame = SpecifiedWindowFrame.defaultWindowFrame(o.nonEmpty, acceptWindowFrame = true) we.copy(windowSpec = s.copy(frameSpecification = frame)) } } } /** * Check and add order to [[AggregateWindowFunction]]s. */ object ResolveWindowOrder extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case logical: LogicalPlan => logical transformExpressions { case WindowExpression(wf: WindowFunction, spec) if spec.orderSpec.isEmpty => failAnalysis(s"Window function $wf requires window to be ordered, please add ORDER BY " + s"clause. For example SELECT $wf(value_expr) OVER (PARTITION BY window_partition " + s"ORDER BY window_ordering) from table") case WindowExpression(rank: RankLike, spec) if spec.resolved => val order = spec.orderSpec.map(_.child) WindowExpression(rank.withOrder(order), spec) } } } /** * Removes natural or using joins by calculating output columns based on output from two sides, * Then apply a Project on a normal Join to eliminate natural or using join. */ object ResolveNaturalAndUsingJoin extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case j @ Join(left, right, UsingJoin(joinType, usingCols), condition) if left.resolved && right.resolved && j.duplicateResolved => commonNaturalJoinProcessing(left, right, joinType, usingCols, None) case j @ Join(left, right, NaturalJoin(joinType), condition) if j.resolvedExceptNatural => // find common column names from both sides val joinNames = left.output.map(_.name).intersect(right.output.map(_.name)) commonNaturalJoinProcessing(left, right, joinType, joinNames, condition) } } private def commonNaturalJoinProcessing( left: LogicalPlan, right: LogicalPlan, joinType: JoinType, joinNames: Seq[String], condition: Option[Expression]) = { val leftKeys = joinNames.map { keyName => left.output.find(attr => resolver(attr.name, keyName)).getOrElse { throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the left " + s"side of the join. The left-side columns: [${left.output.map(_.name).mkString(", ")}]") } } val rightKeys = joinNames.map { keyName => right.output.find(attr => resolver(attr.name, keyName)).getOrElse { throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the right " + s"side of the join. The right-side columns: [${right.output.map(_.name).mkString(", ")}]") } } val joinPairs = leftKeys.zip(rightKeys) val newCondition = (condition ++ joinPairs.map(EqualTo.tupled)).reduceOption(And) // columns not in joinPairs val lUniqueOutput = left.output.filterNot(att => leftKeys.contains(att)) val rUniqueOutput = right.output.filterNot(att => rightKeys.contains(att)) // the output list looks like: join keys, columns from left, columns from right val projectList = joinType match { case LeftOuter => leftKeys ++ lUniqueOutput ++ rUniqueOutput.map(_.withNullability(true)) case LeftExistence(_) => leftKeys ++ lUniqueOutput case RightOuter => rightKeys ++ lUniqueOutput.map(_.withNullability(true)) ++ rUniqueOutput case FullOuter => // in full outer join, joinCols should be non-null if there is. val joinedCols = joinPairs.map { case (l, r) => Alias(Coalesce(Seq(l, r)), l.name)() } joinedCols ++ lUniqueOutput.map(_.withNullability(true)) ++ rUniqueOutput.map(_.withNullability(true)) case _ : InnerLike => leftKeys ++ lUniqueOutput ++ rUniqueOutput case _ => sys.error("Unsupported natural join type " + joinType) } // use Project to trim unnecessary fields Project(projectList, Join(left, right, joinType, newCondition)) } /** * Replaces [[UnresolvedDeserializer]] with the deserialization expression that has been resolved * to the given input attributes. */ object ResolveDeserializer extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case p if !p.childrenResolved => p case p if p.resolved => p case p => p transformExpressions { case UnresolvedDeserializer(deserializer, inputAttributes) => val inputs = if (inputAttributes.isEmpty) { p.children.flatMap(_.output) } else { inputAttributes } validateTopLevelTupleFields(deserializer, inputs) val resolved = resolveExpression( deserializer, LocalRelation(inputs), throws = true) val result = resolved transformDown { case UnresolvedMapObjects(func, inputData, cls) if inputData.resolved => inputData.dataType match { case ArrayType(et, cn) => val expr = MapObjects(func, inputData, et, cn, cls) transformUp { case UnresolvedExtractValue(child, fieldName) if child.resolved => ExtractValue(child, fieldName, resolver) } expr case other => throw new AnalysisException("need an array field but got " + other.simpleString) } } validateNestedTupleFields(result) result } } private def fail(schema: StructType, maxOrdinal: Int): Unit = { throw new AnalysisException(s"Try to map ${schema.simpleString} to Tuple${maxOrdinal + 1}, " + "but failed as the number of fields does not line up.") } /** * For each top-level Tuple field, we use [[GetColumnByOrdinal]] to get its corresponding column * by position. However, the actual number of columns may be different from the number of Tuple * fields. This method is used to check the number of columns and fields, and throw an * exception if they do not match. */ private def validateTopLevelTupleFields( deserializer: Expression, inputs: Seq[Attribute]): Unit = { val ordinals = deserializer.collect { case GetColumnByOrdinal(ordinal, _) => ordinal }.distinct.sorted if (ordinals.nonEmpty && ordinals != inputs.indices) { fail(inputs.toStructType, ordinals.last) } } /** * For each nested Tuple field, we use [[GetStructField]] to get its corresponding struct field * by position. However, the actual number of struct fields may be different from the number * of nested Tuple fields. This method is used to check the number of struct fields and nested * Tuple fields, and throw an exception if they do not match. */ private def validateNestedTupleFields(deserializer: Expression): Unit = { val structChildToOrdinals = deserializer // There are 2 kinds of `GetStructField`: // 1. resolved from `UnresolvedExtractValue`, and it will have a `name` property. // 2. created when we build deserializer expression for nested tuple, no `name` property. // Here we want to validate the ordinals of nested tuple, so we should only catch // `GetStructField` without the name property. .collect { case g: GetStructField if g.name.isEmpty => g } .groupBy(_.child) .mapValues(_.map(_.ordinal).distinct.sorted) structChildToOrdinals.foreach { case (expr, ordinals) => val schema = expr.dataType.asInstanceOf[StructType] if (ordinals != schema.indices) { fail(schema, ordinals.last) } } } } /** * Resolves [[NewInstance]] by finding and adding the outer scope to it if the object being * constructed is an inner class. */ object ResolveNewInstance extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case p if !p.childrenResolved => p case p if p.resolved => p case p => p transformExpressions { case n: NewInstance if n.childrenResolved && !n.resolved => val outer = OuterScopes.getOuterScope(n.cls) if (outer == null) { throw new AnalysisException( s"Unable to generate an encoder for inner class `${n.cls.getName}` without " + "access to the scope that this class was defined in.\\n" + "Try moving this class out of its parent class.") } n.copy(outerPointer = Some(outer)) } } } /** * Replace the [[UpCast]] expression by [[Cast]], and throw exceptions if the cast may truncate. */ object ResolveUpCast extends Rule[LogicalPlan] { private def fail(from: Expression, to: DataType, walkedTypePath: Seq[String]) = { val fromStr = from match { case l: LambdaVariable => "array element" case e => e.sql } throw new AnalysisException(s"Cannot up cast $fromStr from " + s"${from.dataType.simpleString} to ${to.simpleString} as it may truncate\\n" + "The type path of the target object is:\\n" + walkedTypePath.mkString("", "\\n", "\\n") + "You can either add an explicit cast to the input data or choose a higher precision " + "type of the field in the target object") } def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case p if !p.childrenResolved => p case p if p.resolved => p case p => p transformExpressions { case u @ UpCast(child, _, _) if !child.resolved => u case UpCast(child, dataType, walkedTypePath) if Cast.mayTruncate(child.dataType, dataType) => fail(child, dataType, walkedTypePath) case UpCast(child, dataType, walkedTypePath) => Cast(child, dataType.asNullable) } } } } /** * Removes [[SubqueryAlias]] operators from the plan. Subqueries are only required to provide * scoping information for attributes and can be removed once analysis is complete. */ object EliminateSubqueryAliases extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case SubqueryAlias(_, child) => child } } /** * Removes [[Union]] operators from the plan if it just has one child. */ object EliminateUnions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case Union(children) if children.size == 1 => children.head } } /** * Cleans up unnecessary Aliases inside the plan. Basically we only need Alias as a top level * expression in Project(project list) or Aggregate(aggregate expressions) or * Window(window expressions). Notice that if an expression has other expression parameters which * are not in its `children`, e.g. `RuntimeReplaceable`, the transformation for Aliases in this * rule can't work for those parameters. */ object CleanupAliases extends Rule[LogicalPlan] { private def trimAliases(e: Expression): Expression = { e.transformDown { case Alias(child, _) => child } } def trimNonTopLevelAliases(e: Expression): Expression = e match { case a: Alias => a.copy(child = trimAliases(a.child))( exprId = a.exprId, qualifier = a.qualifier, explicitMetadata = Some(a.metadata)) case other => trimAliases(other) } override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case Project(projectList, child) => val cleanedProjectList = projectList.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression]) Project(cleanedProjectList, child) case Aggregate(grouping, aggs, child) => val cleanedAggs = aggs.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression]) Aggregate(grouping.map(trimAliases), cleanedAggs, child) case w @ Window(windowExprs, partitionSpec, orderSpec, child) => val cleanedWindowExprs = windowExprs.map(e => trimNonTopLevelAliases(e).asInstanceOf[NamedExpression]) Window(cleanedWindowExprs, partitionSpec.map(trimAliases), orderSpec.map(trimAliases(_).asInstanceOf[SortOrder]), child) // Operators that operate on objects should only have expressions from encoders, which should // never have extra aliases. case o: ObjectConsumer => o case o: ObjectProducer => o case a: AppendColumns => a case other => other transformExpressionsDown { case Alias(child, _) => child } } } /** * Ignore event time watermark in batch query, which is only supported in Structured Streaming. * TODO: add this rule into analyzer rule list. */ object EliminateEventTimeWatermark extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan transform { case EventTimeWatermark(_, _, child) if !child.isStreaming => child } } /** * Maps a time column to multiple time windows using the Expand operator. Since it's non-trivial to * figure out how many windows a time column can map to, we over-estimate the number of windows and * filter out the rows where the time column is not inside the time window. */ object TimeWindowing extends Rule[LogicalPlan] { import org.apache.spark.sql.catalyst.dsl.expressions._ private final val WINDOW_COL_NAME = "window" private final val WINDOW_START = "start" private final val WINDOW_END = "end" /** * Generates the logical plan for generating window ranges on a timestamp column. Without * knowing what the timestamp value is, it's non-trivial to figure out deterministically how many * window ranges a timestamp will map to given all possible combinations of a window duration, * slide duration and start time (offset). Therefore, we express and over-estimate the number of * windows there may be, and filter the valid windows. We use last Project operator to group * the window columns into a struct so they can be accessed as `window.start` and `window.end`. * * The windows are calculated as below: * maxNumOverlapping <- ceil(windowDuration / slideDuration) * for (i <- 0 until maxNumOverlapping) * windowId <- ceil((timestamp - startTime) / slideDuration) * windowStart <- windowId * slideDuration + (i - maxNumOverlapping) * slideDuration + startTime * windowEnd <- windowStart + windowDuration * return windowStart, windowEnd * * This behaves as follows for the given parameters for the time: 12:05. The valid windows are * marked with a +, and invalid ones are marked with a x. The invalid ones are filtered using the * Filter operator. * window: 12m, slide: 5m, start: 0m :: window: 12m, slide: 5m, start: 2m * 11:55 - 12:07 + 11:52 - 12:04 x * 12:00 - 12:12 + 11:57 - 12:09 + * 12:05 - 12:17 + 12:02 - 12:14 + * * @param plan The logical plan * @return the logical plan that will generate the time windows using the Expand operator, with * the Filter operator for correctness and Project for usability. */ def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators { case p: LogicalPlan if p.children.size == 1 => val child = p.children.head val windowExpressions = p.expressions.flatMap(_.collect { case t: TimeWindow => t }).toSet val numWindowExpr = windowExpressions.size // Only support a single window expression for now if (numWindowExpr == 1 && windowExpressions.head.timeColumn.resolved && windowExpressions.head.checkInputDataTypes().isSuccess) { val window = windowExpressions.head val metadata = window.timeColumn match { case a: Attribute => a.metadata case _ => Metadata.empty } def getWindow(i: Int, overlappingWindows: Int): Expression = { val division = (PreciseTimestampConversion( window.timeColumn, TimestampType, LongType) - window.startTime) / window.slideDuration val ceil = Ceil(division) // if the division is equal to the ceiling, our record is the start of a window val windowId = CaseWhen(Seq((ceil === division, ceil + 1)), Some(ceil)) val windowStart = (windowId + i - overlappingWindows) * window.slideDuration + window.startTime val windowEnd = windowStart + window.windowDuration CreateNamedStruct( Literal(WINDOW_START) :: PreciseTimestampConversion(windowStart, LongType, TimestampType) :: Literal(WINDOW_END) :: PreciseTimestampConversion(windowEnd, LongType, TimestampType) :: Nil) } val windowAttr = AttributeReference( WINDOW_COL_NAME, window.dataType, metadata = metadata)() if (window.windowDuration == window.slideDuration) { val windowStruct = Alias(getWindow(0, 1), WINDOW_COL_NAME)( exprId = windowAttr.exprId, explicitMetadata = Some(metadata)) val replacedPlan = p transformExpressions { case t: TimeWindow => windowAttr } // For backwards compatibility we add a filter to filter out nulls val filterExpr = IsNotNull(window.timeColumn) replacedPlan.withNewChildren( Filter(filterExpr, Project(windowStruct +: child.output, child)) :: Nil) } else { val overlappingWindows = math.ceil(window.windowDuration * 1.0 / window.slideDuration).toInt val windows = Seq.tabulate(overlappingWindows)(i => getWindow(i, overlappingWindows)) val projections = windows.map(_ +: child.output) val filterExpr = window.timeColumn >= windowAttr.getField(WINDOW_START) && window.timeColumn < windowAttr.getField(WINDOW_END) val substitutedPlan = Filter(filterExpr, Expand(projections, windowAttr +: child.output, child)) val renamedPlan = p transformExpressions { case t: TimeWindow => windowAttr } renamedPlan.withNewChildren(substitutedPlan :: Nil) } } else if (numWindowExpr > 1) { p.failAnalysis("Multiple time window expressions would result in a cartesian product " + "of rows, therefore they are currently not supported.") } else { p // Return unchanged. Analyzer will throw exception later } } } /** * Resolve a [[CreateNamedStruct]] if it contains [[NamePlaceholder]]s. */ object ResolveCreateNamedStruct extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressions { case e: CreateNamedStruct if !e.resolved => val children = e.children.grouped(2).flatMap { case Seq(NamePlaceholder, e: NamedExpression) if e.resolved => Seq(Literal(e.name), e) case kv => kv } CreateNamedStruct(children.toList) } } /** * The aggregate expressions from subquery referencing outer query block are pushed * down to the outer query block for evaluation. This rule below updates such outer references * as AttributeReference referring attributes from the parent/outer query block. * * For example (SQL): * {{{ * SELECT l.a FROM l GROUP BY 1 HAVING EXISTS (SELECT 1 FROM r WHERE r.d < min(l.b)) * }}} * Plan before the rule. * Project [a#226] * +- Filter exists#245 [min(b#227)#249] * : +- Project [1 AS 1#247] * : +- Filter (d#238 < min(outer(b#227))) <----- * : +- SubqueryAlias r * : +- Project [_1#234 AS c#237, _2#235 AS d#238] * : +- LocalRelation [_1#234, _2#235] * +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249] * +- SubqueryAlias l * +- Project [_1#223 AS a#226, _2#224 AS b#227] * +- LocalRelation [_1#223, _2#224] * Plan after the rule. * Project [a#226] * +- Filter exists#245 [min(b#227)#249] * : +- Project [1 AS 1#247] * : +- Filter (d#238 < outer(min(b#227)#249)) <----- * : +- SubqueryAlias r * : +- Project [_1#234 AS c#237, _2#235 AS d#238] * : +- LocalRelation [_1#234, _2#235] * +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249] * +- SubqueryAlias l * +- Project [_1#223 AS a#226, _2#224 AS b#227] * +- LocalRelation [_1#223, _2#224] */ object UpdateOuterReferences extends Rule[LogicalPlan] { private def stripAlias(expr: Expression): Expression = expr match { case a: Alias => a.child } private def updateOuterReferenceInSubquery( plan: LogicalPlan, refExprs: Seq[Expression]): LogicalPlan = { plan transformAllExpressions { case e => val outerAlias = refExprs.find(stripAlias(_).semanticEquals(stripOuterReference(e))) outerAlias match { case Some(a: Alias) => OuterReference(a.toAttribute) case _ => e } } } def apply(plan: LogicalPlan): LogicalPlan = { plan transform { case f @ Filter(_, a: Aggregate) if f.resolved => f transformExpressions { case s: SubqueryExpression if s.children.nonEmpty => // Collect the aliases from output of aggregate. val outerAliases = a.aggregateExpressions collect { case a: Alias => a } // Update the subquery plan to record the OuterReference to point to outer query plan. s.withNewPlan(updateOuterReferenceInSubquery(s.plan, outerAliases)) } } } }
akopich/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
Scala
apache-2.0
112,466
package tastytest object TestJColour extends Suite("TestJColour") { test(assert(JColour.Red == JColour.Red)) test(assert(JColour.Green != JColour.Blue)) test(assert(JColour.Blue.compareTo(JColour.Red) > 0)) }
lrytz/scala
test/tasty/run/src-2/tastytest/TestJColour.scala
Scala
apache-2.0
218
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn.tf import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.utils.T import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest import scala.util.Random class LRNGradSerialTest extends ModuleSerializationTest { override def test(): Unit = { val lrnGrad = LRNGrad[Float]().setName("lrnGrad") val input = T(Tensor[Float](4, 8, 8, 3).apply1(_ => Random.nextFloat()), Tensor[Float](4, 8, 8, 3).apply1(_ => Random.nextFloat()), Tensor[Float](4, 8, 8, 3).apply1(_ => Random.nextFloat()) ) runSerializationTest(lrnGrad, input) } }
wzhongyuan/BigDL
spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/tf/LRNGradSpec.scala
Scala
apache-2.0
1,235
package namedTypeArgsR { object t1 { def construct[Elem, Coll[_]](xs: Elem*): Coll[Elem] = ??? val xs3 = construct[Coll = List](1, 2, 3) val xs2 = construct[Coll = List, Elem = Int](1, 2, 3) } }
som-snytt/dotty
tests/pos/reference/named-typeargs.scala
Scala
apache-2.0
207
package com.sksamuel.avro4s import java.io.ByteArrayOutputStream import java.util.UUID import org.scalatest.concurrent.TimeLimits import org.scalatest.{Matchers, WordSpec} import shapeless.{:+:, Coproduct, CNil} class AvroInputStreamTest extends WordSpec with Matchers with TimeLimits { case class Booleans(bool: Boolean) case class BigDecimalTest(decimal: BigDecimal) case class ByteArrayTest(bytes: Array[Byte]) case class Floats(float: Float) case class Ints(int: Int) case class Doubles(double: Double) case class Longs(long: Long) case class LongOptions(opt: Option[Long]) case class Strings(str: String) case class BooleanOptions(opt: Option[Boolean]) case class StringOptions(opt: Option[String]) case class SeqCaseClasses(foos: Seq[Foo]) case class OptionNestedStrings(opt: Option[Strings]) case class SeqStrings(stings: Seq[String]) case class SeqDoubles(opt: Seq[Double]) case class SeqInts(opt: Seq[Int]) case class ArrayStrings(array: Array[String]) case class ArrayCaseClasses(array: Array[Foo]) case class ArrayDoubls(array: Array[Double]) case class ArrayInts(array: Array[Int]) case class ListStrings(list: List[String]) case class ListCaseClasses(list: List[Foo]) case class ListDoubles(list: List[Double]) case class ListInts(list: List[Int]) case class VectorInts(ints: Vector[Int]) case class VectorRecords(records: Vector[Foo]) case class SetStrings(set: Set[String]) case class SetCaseClasses(set: Set[Foo]) case class EitherStringBoolean(either: Either[String, Boolean]) case class EitherArray(payload: Either[Seq[Int], String]) case class EitherMap(payload: Either[Map[String, Int], Boolean]) case class MapBoolean(map: Map[String, Boolean]) case class MapSeq(map: Map[String, Seq[String]]) case class MapOptions(map: Map[String, Option[String]]) case class MapCaseClasses(map: Map[String, Foo]) case class MapStrings(map: Map[String, String]) case class MapInts(map: Map[String, Int]) case class Joo(long: Long) case class Goo(double: Double) case class Moo(either: Either[Joo, Goo]) case class Enums(wine: Wine) case class ComplexElement(unit: Ints, decimal: Doubles, flag: Booleans) case class ComplexType(elements: Seq[ComplexElement]) case class CoPrimitives(cp: String :+: Boolean :+: CNil) case class CoRecords(cp: Joo :+: Goo :+: CNil) case class CoArrays(cp: Seq[String] :+: Int :+: CNil) case class CoMaps(cp: Map[String, Int] :+: Int :+: CNil) def write[T](ts: Seq[T])(implicit schema: SchemaFor[T], ser: ToRecord[T]): Array[Byte] = { val output = new ByteArrayOutputStream val avro = AvroOutputStream.data[T](output) avro.write(ts) avro.close() output.toByteArray } "AvroInputStream" should { "read enums" in { val data = Seq(Enums(Wine.Malbec), Enums(Wine.Shiraz)) val bytes = write(data) val in = AvroInputStream.data[Enums](bytes) in.iterator.toList shouldBe data.toList in.close() } "read big decimals" in { val data = Seq(BigDecimalTest(1235.52344), BigDecimalTest(1234)) val bytes = write(data) val in = AvroInputStream.data[BigDecimalTest](bytes) in.iterator.toList shouldBe data.toList in.close() } "read complex type" in { val data = Seq(ComplexType(Seq(ComplexElement(Ints(2), Doubles(0.12345), Booleans(true)), ComplexElement(Ints(5), Doubles(0.98568), Booleans(false))))) val bytes = write(data) val in = AvroInputStream.data[ComplexType](bytes) in.iterator.toList shouldBe data.toList in.close() } "read byte arrays" in { val data = Seq(ByteArrayTest(Array[Byte](1, 2, 3)), ByteArrayTest(Array[Byte](125, 126, 127))) val bytes = write(data) val in = AvroInputStream.data[ByteArrayTest](bytes) val result = in.iterator.toList result.head.bytes.toList shouldBe Array[Byte](1, 2, 3).toList result.last.bytes.toList shouldBe Array[Byte](125, 126, 127).toList in.close() } "read eithers of nested case classes" in { val data = Seq(Moo(Left(Joo(4l))), Moo(Right(Goo(12.5d))), Moo(Right(Goo(3)))) val bytes = write(data) val in = AvroInputStream.data[Moo](bytes) in.iterator.toList shouldBe data.toList in.close() } "read eithers of primitives" in { val data = Seq(EitherStringBoolean(Left("sammy")), EitherStringBoolean(Right(true)), EitherStringBoolean(Right(false))) val bytes = write(data) val in = AvroInputStream.data[EitherStringBoolean](bytes) in.iterator.toList shouldBe data.toList in.close() } "read eithers of arrays" in { val data = Seq(EitherArray(Left(Seq(1,2))), EitherArray(Right("lammy"))) val bytes = write(data) val in = AvroInputStream.data[EitherArray](bytes) in.iterator.toList shouldBe data.toList in.close() } "read eithers of maps" in { val data = Seq(EitherMap(Left(Map("val" -> 4))), EitherMap(Right(true))) val bytes = write(data) val in = AvroInputStream.data[EitherMap](bytes) in.iterator.toList shouldBe data.toList in.close() } "read coproducts of primitives" in { type SB = String :+: Boolean :+: CNil val data = Seq( CoPrimitives(Coproduct[SB]("gammy")), CoPrimitives(Coproduct[SB](true)), CoPrimitives(Coproduct[SB](false)) ) val bytes = write(data) val in = AvroInputStream.data[CoPrimitives](bytes) in.iterator.toList shouldBe data.toList in.close() } "read coproducts of case classes" in { type JG = Joo :+: Goo :+: CNil val data = Seq( CoRecords(Coproduct[JG](Joo(98l))), CoRecords(Coproduct[JG](Goo(9.4d))) ) val bytes = write(data) val in = AvroInputStream.data[CoRecords](bytes) in.iterator.toList shouldBe data.toList in.close() } "read coproducts of arrays" in { type SSI = Seq[String] :+: Int :+: CNil val data = Seq( CoArrays(Coproduct[SSI](Seq("hello", "goodbye"))), CoArrays(Coproduct[SSI](4)) ) val bytes = write(data) val in = AvroInputStream.data[CoArrays](bytes) in.iterator.toList shouldBe data.toList in.close() } "read coproducts of maps" in { type MSII = Map[String, Int] :+: Int :+: CNil val data = Seq( CoMaps(Coproduct[MSII](Map("v" -> 1))), CoMaps(Coproduct[MSII](9)) ) val bytes = write(data) println(bytes) val in = AvroInputStream.data[CoMaps](bytes) in.iterator.toList shouldBe data.toList in.close() } "read maps of booleans" in { val data = Seq(MapBoolean(Map("sammy" -> true, "hammy" -> false))) val bytes = write(data) val in = AvroInputStream.data[MapBoolean](bytes) in.iterator.toList shouldBe data.toList in.close() } "read maps of seqs of strings" in { val data = Seq(MapSeq(Map("sammy" -> Seq("foo", "moo"), "hammy" -> Seq("boo", "goo")))) val bytes = write(data) val in = AvroInputStream.data[MapSeq](bytes) in.iterator.toList shouldBe data.toList in.close() } "read maps of options" in { val data = Seq(MapOptions(Map("sammy" -> None, "hammy" -> Some("foo")))) val bytes = write(data) val in = AvroInputStream.data[MapOptions](bytes) in.iterator.toList shouldBe data.toList in.close() } "read maps of case classes" in { val data = Seq(MapCaseClasses(Map("sammy" -> Foo("sam", true), "hammy" -> Foo("ham", false)))) val bytes = write(data) val in = AvroInputStream.data[MapCaseClasses](bytes) in.iterator.toList shouldBe data.toList in.close() } "read deep nested maps" in { val data = Level1(Level2(Level3(Level4(Map("a" -> "b"))))) val bytes = write(Seq(data)) val in = AvroInputStream.data[Level1](bytes) in.iterator.toList shouldBe List(data) in.close() } "read maps of strings" in { val data = Seq(MapStrings(Map("sammy" -> "foo", "hammy" -> "boo"))) val bytes = write(data) val in = AvroInputStream.data[MapStrings](bytes) in.iterator.toList shouldBe data.toList in.close() } "read maps of ints" in { val data = Seq(MapInts(Map("sammy" -> 1, "hammy" -> 2))) val bytes = write(data) val in = AvroInputStream.data[MapInts](bytes) in.iterator.toList shouldBe data.toList in.close() } "read set of strings" in { val data = Seq(SetStrings(Set("sammy", "hammy"))) val bytes = write(data) val in = AvroInputStream.data[SetStrings](bytes) in.iterator.toList shouldBe data.toList in.close() } "read set of case classes" in { val data = Seq(SetCaseClasses(Set(Foo("sammy", true), Foo("hammy", false)))) val bytes = write(data) val in = AvroInputStream.data[SetCaseClasses](bytes) in.iterator.toList shouldBe data.toList in.close() } "read list of ints" in { val data = Seq(ListInts(List(1, 2, 3, 4))) val bytes = write(data) val in = AvroInputStream.data[ListInts](bytes) in.iterator.toList shouldBe data.toList in.close() } "read list of doubles" in { val data = Seq(ListDoubles(List(0.1, 0.2, 0.3))) val bytes = write(data) val in = AvroInputStream.data[ListDoubles](bytes) in.iterator.toList shouldBe data.toList in.close() } "read list of strings" in { val data = Seq(ListStrings(List("sammy", "hammy"))) val bytes = write(data) val in = AvroInputStream.data[ListStrings](bytes) in.iterator.toList shouldBe data.toList in.close() } "read list of case classes" in { val data = Seq(ListCaseClasses(List(Foo("sammy", true), Foo("hammy", false)))) val bytes = write(data) val in = AvroInputStream.data[ListCaseClasses](bytes) in.iterator.toList shouldBe data.toList in.close() } "read array of ints" in { val data = Seq(ArrayInts(Array(1, 2, 3, 4))) val bytes = write(data) val in = AvroInputStream.data[ArrayInts](bytes) in.iterator.toList.head.array.toList shouldBe data.toList.head.array.toList in.close() } "read array of doubles" in { val data = Seq(ArrayDoubls(Array(0.1, 0.2, 0.3))) val bytes = write(data) val in = AvroInputStream.data[ArrayDoubls](bytes) in.iterator.toList.head.array.toList shouldBe data.toList.head.array.toList in.close() } "read array of strings" in { val data = Seq(ArrayStrings(Array("sammy", "hammy"))) val bytes = write(data) val in = AvroInputStream.data[ArrayStrings](bytes) in.iterator.toList.head.array.toList shouldBe data.toList.head.array.toList in.close() } "read array of case classes" in { val data = Seq(ArrayCaseClasses(Array(Foo("sammy", true), Foo("hammy", false)))) val bytes = write(data) val in = AvroInputStream.data[ArrayCaseClasses](bytes) in.iterator.toList.head.array.toList shouldBe data.toList.head.array.toList in.close() } "read seq of ints" in { val data = Seq(SeqInts(Seq(1, 2, 3, 4))) val bytes = write(data) val in = AvroInputStream.data[SeqInts](bytes) in.iterator.toList shouldBe data.toList in.close() } "read seq of doubles" in { val data = Seq(SeqDoubles(Seq(0.1, 0.2, 0.3))) val bytes = write(data) val in = AvroInputStream.data[SeqDoubles](bytes) in.iterator.toList shouldBe data.toList in.close() } "read seq of strings" in { val data = Seq(SeqStrings(Seq("sammy", "hammy"))) val bytes = write(data) val in = AvroInputStream.data[SeqStrings](bytes) in.iterator.toList shouldBe data.toList in.close() } "read seq of case classes" in { val data = Seq(SeqCaseClasses(Seq(Foo("sammy", false), Foo("hammy", true)))) val bytes = write(data) val in = AvroInputStream.data[SeqCaseClasses](bytes) in.iterator.toList shouldBe data.toList in.close() } "read options of case classes" in { val data = Seq(OptionNestedStrings(Option(Strings("sammy"))), OptionNestedStrings(None)) val bytes = write(data) val in = AvroInputStream.data[OptionNestedStrings](bytes) in.iterator.toList shouldBe data.toList in.close() } "read options of strings" in { val data = Seq(StringOptions(Option("sammy")), StringOptions(None)) val bytes = write(data) val in = AvroInputStream.data[StringOptions](bytes) in.iterator.toList shouldBe data.toList in.close() } "read options of booleans" in { val data = Seq(BooleanOptions(Option(true)), BooleanOptions(None)) val bytes = write(data) val in = AvroInputStream.data[BooleanOptions](bytes) in.iterator.toList shouldBe data.toList in.close() } "read options of longs" in { val data = Seq(LongOptions(Option(4)), LongOptions(None)) val bytes = write(data) val in = AvroInputStream.data[LongOptions](bytes) in.iterator.toList shouldBe data.toList in.close() } "read strings" in { val data = Seq(Strings("sammy"), Strings("hammy")) val bytes = write(data) val in = AvroInputStream.data[Strings](bytes) in.iterator.toList shouldBe data.toList in.close() } "read booleans" in { val data = Seq(Booleans(true), Booleans(false)) val bytes = write(data) val in = AvroInputStream.data[Booleans](bytes) in.iterator.toList shouldBe data.toList in.close() } "read doubles" in { val data = Seq(Doubles(1.2d), Doubles(2.3d)) val bytes = write(data) val in = AvroInputStream.data[Doubles](bytes) in.iterator.toList shouldBe data.toList in.close() } "read floats" in { val data = Seq(Floats(1.2f), Floats(3.4f)) val bytes = write(data) val in = AvroInputStream.data[Floats](bytes) in.iterator.toList shouldBe data.toList in.close() } "read ints" in { val data = Seq(Ints(1), Ints(2)) val bytes = write(data) val in = AvroInputStream.data[Ints](bytes) in.iterator.toList shouldBe data.toList in.close() } "read longs" in { val data = Seq(Longs(1l), Longs(2l)) val bytes = write(data) val in = AvroInputStream.data[Longs](bytes) in.iterator.toList shouldBe data.toList in.close() } "read scala enums" in { val data = Seq(ScalaEnums(Colours.Red), ScalaEnums(Colours.Green)) val bytes = write(data) val in = AvroInputStream.data[ScalaEnums](bytes) in.iterator.toList shouldBe data.toList in.close() } "read uuids" in { val data = Seq(Ids(UUID.randomUUID()), Ids(UUID.randomUUID())) val bytes = write(data) val in = AvroInputStream.data[Ids](bytes) in.iterator.toList shouldBe data.toList in.close() } "support value classes" in { val data = Seq(ValueWrapper(ValueClass("bob")), ValueWrapper(ValueClass("ann"))) val bytes = write(data) val in = AvroInputStream.data[ValueWrapper](bytes) in.iterator.toList shouldBe data.toList in.close() } "read vectors of ints" in { val data = Seq(VectorInts(Vector(3, 2, 1))) val bytes = write(data) val in = AvroInputStream.data[VectorInts](bytes) in.iterator.toList shouldBe Seq(VectorInts(Vector(3, 2, 1))) in.close() } "read vectors of records" in { val data = Seq(VectorRecords(Vector(Foo("sammy", true), Foo("hammy", false)))) val bytes = write(data) val in = AvroInputStream.data[VectorRecords](bytes) in.iterator.toList shouldBe Seq(VectorRecords(Vector(Foo("sammy", true), Foo("hammy", false)))) in.close() } } }
YuvalItzchakov/avro4s
avro4s-core/src/test/scala/com/sksamuel/avro4s/AvroInputStreamTest.scala
Scala
mit
15,988
package xitrum.scope.request import io.netty.handler.codec.http.multipart.FileUpload import xitrum.Action import xitrum.exception.MissingParam import xitrum.util.DefaultsTo /** * Use "manifest" for Scala 2.10 and "typeOf" for Scala 2.11: * https://github.com/ngocdaothanh/xitrum/issues/155 * * Cache manifests because manifest[T] is a rather expensive operation * (several nested objects are created), the same caveat applies at the sender: * http://groups.google.com/group/akka-user/browse_thread/thread/ee07764dfc1ac794 */ object ParamAccess { val MANIFEST_FILE_UPLOAD = manifest[FileUpload] val MANIFEST_STRING = manifest[String] val MANIFEST_CHAR = manifest[Char] val MANIFEST_BOOLEAN = manifest[Boolean] val MANIFEST_BYTE = manifest[Byte] val MANIFEST_SHORT = manifest[Short] val MANIFEST_INT = manifest[Int] val MANIFEST_LONG = manifest[Long] val MANIFEST_FLOAT = manifest[Float] val MANIFEST_DOUBLE = manifest[Double] } trait ParamAccess { this: Action => import ParamAccess._ //---------------------------------------------------------------------------- def param[T](key: String)(implicit e: T DefaultsTo String, m: Manifest[T]): T = param(key, textParams) def param[T](key: String, coll: Params)(implicit e: T DefaultsTo String, m: Manifest[T]): T = { if (m <:< MANIFEST_FILE_UPLOAD) { bodyFileParams.get(key) match { case None => throw new MissingParam(key) case Some(values) => values.head.asInstanceOf[T] } } else { coll.get(key) match { case None => throw new MissingParam(key) case Some(values) => convertTextParam[T](values.head) } } } def paramo[T](key: String)(implicit e: T DefaultsTo String, m: Manifest[T]): Option[T] = paramo(key, textParams) def paramo[T](key: String, coll: Params)(implicit e: T DefaultsTo String, m: Manifest[T]): Option[T] = { if (m <:< MANIFEST_FILE_UPLOAD) { bodyFileParams.get(key).map(_.head.asInstanceOf[T]) } else { coll.get(key).map(values => convertTextParam[T](values.head)) } } def params[T](key: String)(implicit e: T DefaultsTo String, m: Manifest[T]): Seq[T] = params(key, textParams) def params[T](key: String, coll: Params)(implicit e: T DefaultsTo String, m: Manifest[T]): Seq[T] = { if (m <:< MANIFEST_FILE_UPLOAD) { bodyFileParams.get(key) match { case None => Seq.empty case Some(values) => values.asInstanceOf[Seq[T]] } } else { coll.get(key) match { case None => Seq.empty case Some(values) => values.map(convertTextParam[T]) } } } //---------------------------------------------------------------------------- /** Applications may override this method to convert to more types. */ def convertTextParam[T](value: String)(implicit m: Manifest[T]): T = { val any: Any = if (m <:< MANIFEST_STRING) value else if (m <:< MANIFEST_CHAR) value(0) else if (m <:< MANIFEST_BOOLEAN) value.toBoolean else if (m <:< MANIFEST_BYTE) value.toByte else if (m <:< MANIFEST_SHORT) value.toShort else if (m <:< MANIFEST_INT) value.toInt else if (m <:< MANIFEST_LONG) value.toLong else if (m <:< MANIFEST_FLOAT) value.toFloat else if (m <:< MANIFEST_DOUBLE) value.toDouble else throw new Exception("convertTextParam cannot covert " + value + " to " + m) any.asInstanceOf[T] } }
georgeOsdDev/xitrum
src/main/scala-2.10/xitrum/scope/request/ParamAccess.scala
Scala
mit
3,546
package com.avast.syringe.config.perspective import com.avast.syringe.config.internal.{InjectableProperty, ConfigClassAnalyzer} import org.scalatest.{FlatSpec, BeforeAndAfter} import com.google.common.collect.Lists import java.util.ArrayList import com.avast.syringe.config.PropertyValueConverter import java.lang.reflect.InvocationHandler import com.avast.syringe.aop.Interceptor /** * User: slajchrt * Date: 6/6/12 * Time: 11:10 AM */ //@RunWith(classOf[JUnitRunner]) class InjectionSuite extends FlatSpec with BeforeAndAfter { var propMap: java.util.Map[String, InjectableProperty] = _ var decorPropMap: java.util.Map[String, InjectableProperty] = _ before { propMap = ConfigClassAnalyzer.toMap(classOf[SampleA]) decorPropMap = ConfigClassAnalyzer.toMap(classOf[DecoratorA]) } behavior of "ScalarInjection" it must "inject scalar value to a scalar property" in { expect(10) { val inj = new ScalarInjection(propMap.get("iProp"), List()) val inst = new SampleA inj.inject(inst, List(10)) inst.getiProp() } } it must "inject a reference to a scalar property" in { val refValue = new Runnable { def run() {} } expect(refValue) { val inj = new ScalarInjection(propMap.get("rProp"), List()) val inst = new SampleA val refBuilder = new Builder[Runnable](classOf[Runnable]) { def makeClone(cloneName: String) = sys.error("Not supported") /** * Always creates a new instance * @return a new instance of instanceClass or a decorated one implementing D */ def build[D >: Runnable] = refValue def decorateWith[D >: Runnable](decorator: => Builder[D]) = null def delegateWith[D <: Interceptor[_]](provider: Delegation[D]) = null def addPropertyResolver(resolver: PropertyResolver) = null def setValueConverter(converter: PropertyValueConverter) = null def getPropertyValueConverter = null def getInstanceName = null def getModule = null def syringeAllowMultiInjection = null } inj.inject(inst, List(refBuilder)) inst.getrProp() } } it must "throw an exception when injecting more values to a scalar property" in { intercept[AssertionError] { val inj = new ScalarInjection(propMap.get("iProp"), List()) val inst = new SampleA inj.inject(inst, List(10, 20)) } } it must "throw an exception when injecting no value to a mandatory scalar property" in { intercept[AssertionError] { val inj = new ScalarInjection(propMap.get("sProp"), List()) val inst = new SampleA inj.inject(inst, List()) } } it must "inject a delegate reference taken from the context resolver to a delegate property" in { val inst = new SampleA val decor = new DecoratorA val inj = new ScalarInjection(decorPropMap.get("delegate"), List(new PropertyResolver { def getPropertyValue(instance: Any, property: InjectableProperty) = if (property.isDelegate) inst else throw new NoSuchFieldException() def hasPropertyValue(instance: Any, property: InjectableProperty) = property.isDelegate })) inj.inject(decor, List() /* no explicit value for the delegate, it is taken from the resolver */) assert(decor.delegate == inst) } behavior of "ArrayInjection" it must "inject an array value to an array property" in { val inj = new ArrayInjection(propMap.get("aProp"), List()) val inst = new SampleA val list: List[String] = List("abc", "def") inj.inject(inst, list) assert(inst.getaProp() != null) assert(inst.getaProp().zip(list).forall(p => p._1 == p._2)) } it must "inject an array of references to a reference array property" in { val inj = new ArrayInjection(propMap.get("arProp"), List()) val inst = new SampleA val r1 = new Runnable { def run() {} } val r2 = new Runnable { def run() {} } val list: List[Runnable] = List(r1, r2) inj.inject(inst, list) assert(inst.getARProp != null) assert(inst.getARProp.zip(list).forall(p => p._1 == p._2)) } it must "throw an exception when injecting an array of incompatible type" in { val e = intercept[IllegalArgumentException] { val inj = new ArrayInjection(propMap.get("aProp"), List()) val inst = new SampleA val list: List[Int] = List(1, 2) inj.inject(inst, list) } assert(e.getMessage.contains("type mismatch")) } behavior of "CollectionInjection" it must "inject a list to an unitialized list property" in { val inj = new CollectionInjection(propMap.get("lProp"), List()) val inst = new SampleA val list: List[String] = List("abc", "def") inj.inject(inst, list) assert(inst.getlProp() != null) assert(inst.getlProp().toArray.zip(list).forall(p => p._1 == p._2)) } it must "inject a list of references to a reference collection property" in { val inj = new CollectionInjection(propMap.get("lrProp"), List()) val inst = new SampleA val r1 = new Runnable { def run() {} } val r2 = new Runnable { def run() {} } val list: List[Runnable] = List(r1, r2) inj.inject(inst, list) assert(inst.getLRProp != null) assert(inst.getLRProp.toArray.zip(list).forall(p => p._1 == p._2)) } it must "preserve the existing list in the property and inject values to it" in { val inj = new CollectionInjection(propMap.get("lProp"), List()) val inst = new SampleA val listValue: ArrayList[String] = Lists.newArrayList() inst.setlProp(listValue) val list: List[String] = List("abc", "def") inj.inject(inst, list) assert(inst.getlProp() eq listValue) assert(inst.getlProp() != null) assert(inst.getlProp().toArray.zip(list).forall(p => p._1 == p._2)) } it must "throw an exception when injecting a list of incompatible values" in { val e = intercept[IllegalArgumentException] { val inj = new CollectionInjection(propMap.get("lProp"), List()) val inst = new SampleA val list: List[Int] = List(1, 2) inj.inject(inst, list) } assert(e.getMessage.contains("type mismatch")) } behavior of "MapInjection" it must "inject a list of pairs to an unitialized map property" in { val inj = new MapInjection(propMap.get("mProp"), List()) val inst = new SampleA val list: List[(String, Int)] = List(("a", 1), ("b", 2)) inj.inject(inst, list) assert(inst.getmProp() != null) assert(2 == inst.getmProp().size()) assert(1 == inst.getmProp().get("a")) assert(2 == inst.getmProp().get("b")) } it must "inject a list of reference pairs to a reference map property" in { val inj = new MapInjection(propMap.get("mrProp"), List()) val inst = new SampleA val r1 = new Runnable { def run() {} } val r2 = new Runnable { def run() {} } val list: List[(Runnable, Runnable)] = List((r1, r2), (r2, r1)) inj.inject(inst, list) assert(inst.getMRProp != null) assert(r1 == inst.getMRProp.get(r2)) assert(r2 == inst.getMRProp.get(r1)) } it must "throw an exception when injecting a list of incompatible pairs" in { val inj = new MapInjection(propMap.get("mProp"), List()) val inst = new SampleA val list: List[(Int, String)] = List((1, "a"), (2, "b")) val e = intercept[IllegalArgumentException] { inj.inject(inst, list) } assert(e.getMessage.contains("type mismatch")) val list2: List[(String, String)] = List(("a", "aa"), ("b", "bb")) val e2 = intercept[IllegalArgumentException] { inj.inject(inst, list2) } assert(e2.getMessage.contains("type mismatch")) } it must "throw an exception in case the list of pairs contains a key duplicity" in { val inj = new MapInjection(propMap.get("mProp"), List()) val inst = new SampleA val list: List[(String, Int)] = List(("a", 1), ("a", 2)) val e = intercept[AssertionError] { inj.inject(inst, list) } assert(e.getMessage.contains("To many values for key")) } }
avast/syringe
src/test/scala/com/avast/syringe/config/perspective/InjectionSuite.scala
Scala
bsd-3-clause
8,115
/* * Scala.js (https://www.scala-js.org/) * * Copyright EPFL. * * Licensed under Apache License 2.0 * (https://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package java.nio private[nio] final class HeapCharBuffer private ( _capacity: Int, _array0: Array[Char], _arrayOffset0: Int, _initialPosition: Int, _initialLimit: Int, _readOnly: Boolean) extends CharBuffer(_capacity, _array0, _arrayOffset0) { position(_initialPosition) limit(_initialLimit) private[this] implicit def newHeapCharBuffer = HeapCharBuffer.NewHeapCharBuffer def isReadOnly(): Boolean = _readOnly def isDirect(): Boolean = false @noinline def slice(): CharBuffer = GenHeapBuffer(this).generic_slice() @noinline def duplicate(): CharBuffer = GenHeapBuffer(this).generic_duplicate() @noinline def asReadOnlyBuffer(): CharBuffer = GenHeapBuffer(this).generic_asReadOnlyBuffer() def subSequence(start: Int, end: Int): CharBuffer = { if (start < 0 || end < start || end > remaining()) throw new IndexOutOfBoundsException new HeapCharBuffer(capacity(), _array, _arrayOffset, position() + start, position() + end, isReadOnly()) } @noinline def get(): Char = GenBuffer(this).generic_get() @noinline def put(c: Char): CharBuffer = GenBuffer(this).generic_put(c) @noinline def get(index: Int): Char = GenBuffer(this).generic_get(index) @noinline def put(index: Int, c: Char): CharBuffer = GenBuffer(this).generic_put(index, c) @noinline override def get(dst: Array[Char], offset: Int, length: Int): CharBuffer = GenBuffer(this).generic_get(dst, offset, length) @noinline override def put(src: Array[Char], offset: Int, length: Int): CharBuffer = GenBuffer(this).generic_put(src, offset, length) @noinline def compact(): CharBuffer = GenHeapBuffer(this).generic_compact() def order(): ByteOrder = ByteOrder.nativeOrder() // Internal API @inline private[nio] def load(index: Int): Char = GenHeapBuffer(this).generic_load(index) @inline private[nio] def store(index: Int, elem: Char): Unit = GenHeapBuffer(this).generic_store(index, elem) @inline override private[nio] def load(startIndex: Int, dst: Array[Char], offset: Int, length: Int): Unit = GenHeapBuffer(this).generic_load(startIndex, dst, offset, length) @inline override private[nio] def store(startIndex: Int, src: Array[Char], offset: Int, length: Int): Unit = GenHeapBuffer(this).generic_store(startIndex, src, offset, length) } private[nio] object HeapCharBuffer { private[nio] implicit object NewHeapCharBuffer extends GenHeapBuffer.NewHeapBuffer[CharBuffer, Char] { def apply(capacity: Int, array: Array[Char], arrayOffset: Int, initialPosition: Int, initialLimit: Int, readOnly: Boolean): CharBuffer = { new HeapCharBuffer(capacity, array, arrayOffset, initialPosition, initialLimit, readOnly) } } private[nio] def wrap(array: Array[Char], arrayOffset: Int, capacity: Int, initialPosition: Int, initialLength: Int, isReadOnly: Boolean): CharBuffer = { GenHeapBuffer.generic_wrap( array, arrayOffset, capacity, initialPosition, initialLength, isReadOnly) } }
scala-js/scala-js
javalib/src/main/scala/java/nio/HeapCharBuffer.scala
Scala
apache-2.0
3,384
/* * Copyright 2012 Tumblr Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twitter.zipkin.storage.redis import com.twitter.finagle.redis.Client import com.twitter.util.{Duration, Future} import com.twitter.zipkin.common.Span import com.twitter.zipkin.storage.Storage import org.jboss.netty.buffer.ChannelBuffer trait RedisStorage extends Storage { val database: Client override def close() = database.release() private[this] lazy val spanListMap = new RedisListMap(database, "full_span", ttl) val ttl: Option[Duration] override def storeSpan(span: Span): Future[Unit] = spanListMap.put(span.traceId, span).unit override def setTimeToLive(traceId: Long, ttl: Duration): Future[Unit] = spanListMap.setTTL(traceId, ttl).unit override def getTimeToLive(traceId: Long): Future[Duration] = spanListMap.getTTL(traceId) map (_.getOrElse(Duration.eternity)) override def getSpansByTraceId(traceId: Long) : Future[Seq[Span]] = fetchTraceById(traceId) map (_.get) override def getSpansByTraceIds(traceIds: Seq[Long]): Future[Seq[Seq[Span]]] = Future.collect(traceIds map (traceId => fetchTraceById(traceId))) map (_ flatten) override def getDataTimeToLive: Int = (ttl map (_.inSeconds)).getOrElse(Int.MaxValue) private[this] def fetchTraceById(traceId: Long): Future[Option[Seq[Span]]] = spanListMap.get(traceId) map (buf => optionify(sortedTrace(buf))) override def tracesExist(traceIds: Seq[Long]): Future[Set[Long]] = Future.collect(traceIds map {id => spanListMap.exists(id) map { exists => if (exists) Some(id) else None } }) map (_.flatten.toSet) private[this] def optionify[A](spans: Seq[A]): Option[Seq[A]] = spans.headOption map (_ => spans) private[this] def firstTimestamp(span: Span): Long = span.firstAnnotation match { case Some(anno) => anno.timestamp case None => 0L } private[this] def sortedTrace(trace: Seq[ChannelBuffer]): Seq[Span] = (trace map deserializeSpan).sortBy[Long](firstTimestamp(_))(Ordering.Long.reverse) }
pteichman/zipkin
zipkin-redis/src/main/scala/com/twitter/zipkin/storage/redis/RedisStorage.scala
Scala
apache-2.0
2,572
package net.scalax.cpoi import net.scalax.cpoi.utils.Alias package object api extends Alias
scalax/poi-collection
src/main/scala/net/scalax/cpoi/api/package.scala
Scala
mit
94
val name: type = value
MartinThoma/LaTeX-examples
documents/Programmierparadigmen/scripts/scala/val-syntax.scala
Scala
mit
22
/* Copyright 2014 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding.platform import org.scalatest.{ BeforeAndAfterAll, Suite } trait HadoopSharedPlatformTest extends BeforeAndAfterAll { this: Suite => org.apache.log4j.Logger.getLogger("org.apache.hadoop").setLevel(org.apache.log4j.Level.ERROR) org.apache.log4j.Logger.getLogger("org.mortbay").setLevel(org.apache.log4j.Level.ERROR) org.apache.log4j.Logger.getLogger("org.apache.hadoop.metrics2.util").setLevel(org.apache.log4j.Level.ERROR) val cluster = LocalCluster() def initialize() = cluster.initialize() override def beforeAll() { cluster.synchronized { initialize() } super.beforeAll() } //TODO is there a way to buffer such that we see test results AFTER afterEach? Otherwise the results // get lost in the logging override def afterAll() { try super.afterAll() finally { // Necessary because afterAll can be called from a different thread and we want to make sure that the state // is visible. Note that this assumes there is no contention for LocalCluster (which LocalCluster ensures), // otherwise there could be deadlock. cluster.synchronized { cluster.shutdown() } } } }
AaroC357/scalding
scalding-hadoop-test/src/main/scala/com/twitter/scalding/platform/HadoopSharedPlatformTest.scala
Scala
apache-2.0
1,747
package org.scalameter import collection._ trait Gen[T] extends Serializable { self => def map[S](f: T => S): Gen[S] = new Gen[S] { def warmupset = for (x <- self.warmupset) yield f(x) def dataset = for (params <- self.dataset) yield params def generate(params: Parameters) = f(self.generate(params)) } def flatMap[S](f: T => Gen[S]): Gen[S] = new Gen[S] { def warmupset = for { x <- self.warmupset y <- f(x).warmupset } yield y def dataset = for { selfparams <- self.dataset x = self.generate(selfparams) thatparams <- f(x).dataset } yield selfparams ++ thatparams def generate(params: Parameters) = { val x = self.generate(params) val mapped = f(x) mapped.generate(params) } } def warmupset: Iterator[T] def dataset: Iterator[Parameters] def generate(params: Parameters): T } object Gen { def unit(axisName: String): Gen[Unit] = new Gen[Unit] { def warmupset = Iterator.single(()) def dataset = Iterator.single(Parameters(axisName -> ())) def generate(params: Parameters) = params[Unit](axisName) } def single[T](axisName: String)(v: T): Gen[T] = enumeration(axisName)(v) def range(axisName: String)(from: Int, upto: Int, hop: Int): Gen[Int] = new Gen[Int] { def warmupset = Iterator.single(upto) def dataset = (from to upto by hop).iterator.map(x => Parameters(axisName -> x)) def generate(params: Parameters) = params[Int](axisName) } def enumeration[T](axisName: String)(xs: T*): Gen[T] = new Gen[T] { def warmupset = Iterator.single(xs.last) def dataset = xs.iterator.map(x => Parameters(axisName -> x)) def generate(params: Parameters) = params[T](axisName) } def exponential(axisName: String)(from: Int, until: Int, factor: Int): Gen[Int] = new Gen[Int] { def warmupset = Iterator.single((until - from) / 2) def dataset = Iterator.iterate(from)(_ * factor).takeWhile(_ <= until).map(x => Parameters(axisName -> x)) def generate(params: Parameters) = params[Int](axisName) } /** Provides most collection generators given that a size generator is defined. */ trait Collections { def sizes: Gen[Int] /* sequences */ def lists = for { size <- sizes } yield (0 until size).toList def arrays = for { size <- sizes } yield (0 until size).toArray def vectors = for { size <- sizes } yield (0 until size).toVector def arraybuffers = for { size <- sizes } yield mutable.ArrayBuffer(0 until size: _*) def ranges = for { size <- sizes } yield 0 until size /* maps */ def hashtablemaps = for { size <- sizes } yield { val hm = mutable.HashMap[Int, Int]() for (x <- 0 until size) hm(x) = x hm } def linkedhashtablemaps = for { size <- sizes } yield { val hm = mutable.LinkedHashMap[Int, Int]() for (x <- 0 until size) hm(x) = x hm } def hashtriemaps = for { size <- sizes } yield { var hm = immutable.HashMap[Int, Int]() for (x <- 0 until size) hm += ((x, x)) hm } def redblackmaps = for { size <- sizes } yield { var am = immutable.TreeMap[Int, Int]() for (x <- 0 until size) am += ((x, x)) am } /* sets */ def hashtablesets = for { size <- sizes } yield { val hs = mutable.HashSet[Int]() for (x <- 0 until size) hs.add(x) hs } def linkedhashtablesets = for { size <- sizes } yield { val hs = mutable.LinkedHashSet[Int]() for (x <- 0 until size) hs.add(x) hs } def avlsets = for { size <- sizes } yield { val as = mutable.TreeSet[Int]() for (x <- 0 until size) as.add(x) as } def redblacksets = for { size <- sizes } yield { var as = immutable.TreeSet[Int]() for (x <- 0 until size) as += x as } def hashtriesets = for { size <- sizes } yield { var hs = immutable.HashSet[Int]() for (x <- 0 until size) hs += x hs } } }
lossyrob/scalpel
src/main/scala/org/scalameter/Gen.scala
Scala
bsd-3-clause
4,185
package org.webant.extension.link import java.sql.SQLException import org.apache.commons.dbutils.handlers.BeanListHandler import org.apache.log4j.LogManager import org.webant.commons.entity.Link import org.webant.commons.link.JdbcLinkProvider import scala.collection.JavaConverters._ class HsqldbLinkProvider extends JdbcLinkProvider { private val logger = LogManager.getLogger(classOf[HsqldbLinkProvider]) DRIVER = "org.hsqldb.jdbc.JDBCDriver" override def init(params: java.util.Map[String, Object]): Boolean = { // url = "jdbc:hsqldb:hsql://localhost;sql.syntax_mys=true" // url = "jdbc:hsqldb:file:D:/workspace/webant/data/hsqldb/webant;sql.syntax_mys=true" // user = "sa" // password = "" if (!super.init(params)) { logger.error(s"init ${getClass.getSimpleName} failed!") return false } logger.info(s"init ${getClass.getSimpleName} success!") createTable() } override def createTable(): Boolean = { val sql = "CREATE TABLE IF NOT EXISTS `LINK` (" + " `id` varchar(64) NOT NULL," + " `taskId` varchar(64) DEFAULT NULL," + " `siteId` varchar(64) DEFAULT NULL," + " `url` varchar(1024) DEFAULT NULL," + " `referer` varchar(1024) DEFAULT NULL," + " `priority` smallint(255) DEFAULT NULL," + " `lastCrawlTime` datetime DEFAULT NULL," + " `status` varchar(32) DEFAULT NULL," + " `dataVersion` int(11) DEFAULT NULL," + " `dataCreateTime` TimeStamp DEFAULT NULL," + " `dataUpdateTime` TimeStamp DEFAULT NULL," + " `dataDeleteTime` TimeStamp DEFAULT NULL," + " PRIMARY KEY (`id`)" + ")" try { runner.update(conn, sql) } catch { case e: SQLException => e.printStackTrace() return false } true } override def read(): Iterable[Link] = { read(Link.LINK_STATUS_INIT, batch) } private def read(status: String, size: Int): Iterable[Link] = { val sql = """SELECT "id", "taskId", "siteId", "url", "referer", "priority", "lastCrawlTime", "status", "dataVersion", "dataCreateTime", |"dataUpdateTime", "dataDeleteTime" FROM LINK WHERE "status" = ? ORDER by "dataCreateTime" desc LIMIT ?, ?""".stripMargin val offset: Integer = 0 val pageSize: Integer = if (size <= 0 || size > 1000) 1000 else size val selectParams = Array[Object](status, offset, pageSize) var links = Iterable.empty[Link] conn.setAutoCommit(false) try { links = runner.query(conn, sql, new BeanListHandler[Link](classOf[Link]), selectParams: _*).asScala if (links.nonEmpty) { val pending = links.map(link => { link.setStatus(Link.LINK_STATUS_PENDING) link }) update(pending) } conn.commit() } catch { case e: Exception => conn.rollback() e.printStackTrace() } finally { conn.setAutoCommit(true) } links } override def upsert(link: Link): Int = { // no reflection, simple and fast val sql = """insert into link ("id", "taskId", "siteId", "url", "referer", "priority", "lastCrawlTime", "status", "dataVersion", "dataCreateTime", |"dataUpdateTime", "dataDeleteTime" ) values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) ON DUPLICATE KEY UPDATE |"taskId" = ?, "siteId" = ?, "url" = ?, "referer" = ?, "priority" = ?, "lastCrawlTime" = ?, "status" = ?, "dataVersion" = "dataVersion" + 1, "dataUpdateTime" = now()""".stripMargin val values = Array[Object]( link.getId, link.getTaskId, link.getSiteId, link.getUrl, link.getReferer, link.getPriority, link.getLastCrawlTime, link.getStatus, link.getDataVersion, link.getDataCreateTime, link.getDataUpdateTime, link.getDataDeleteTime, link.getTaskId, link.getSiteId, link.getUrl, link.getReferer, link.getPriority, link.getLastCrawlTime, link.getStatus ) runner.update(conn, sql, values: _*) } override def upsert(links: Iterable[Link]): Int = { // no reflection, simple and fast val placeholders = links.map(_ => "( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )").mkString(", ") val sql = s"""insert into link ("id", "taskId", "siteId", "url", "referer", "priority", "lastCrawlTime", "status", "dataVersion", "dataCreateTime", |"dataUpdateTime", "dataDeleteTime" ) values $placeholders ON DUPLICATE KEY UPDATE |"dataVersion" = "dataVersion" + 1, "dataUpdateTime" = now()""".stripMargin val values = links.flatMap(link => Array(link.getId, link.getTaskId, link.getSiteId, link.getUrl, link.getReferer, link.getPriority, link.getLastCrawlTime, link.getStatus, link.getDataVersion, link.getDataCreateTime, link.getDataUpdateTime, link.getDataDeleteTime)).toArray runner.update(conn, sql, values: _*) } private def update(links: Iterable[Link]): Int = { // no reflection, simple and fast val placeholders = links.map(_ => "( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )").mkString(", ") val sql = s"""insert into link ("id", "taskId", "siteId", "url", "referer", "priority", "lastCrawlTime", "status", "dataVersion", "dataCreateTime", |"dataUpdateTime", "dataDeleteTime" ) values $placeholders ON DUPLICATE KEY UPDATE |"priority" = values("priority"), "lastCrawlTime" = values("lastCrawlTime"), "status" = values("status"), |"dataVersion" = "dataVersion" + 1, "dataUpdateTime" = now()""".stripMargin val values = links.flatMap(link => Array(link.getId, link.getTaskId, link.getSiteId, link.getUrl, link.getReferer, link.getPriority, link.getLastCrawlTime, link.getStatus, link.getDataVersion, link.getDataCreateTime, link.getDataUpdateTime, link.getDataDeleteTime)).toArray runner.update(conn, sql, values: _*) } override def write(link: Link): Int = { require(conn != null) if (link == null) return 0 upsert(link) } override def write(links: Iterable[Link]): Int = { require(conn != null) if (links == null || links.isEmpty) return 0 upsert(links) } }
sutine/webant
webant-extension/src/main/scala/org/webant/extension/link/HsqldbLinkProvider.scala
Scala
apache-2.0
6,040
package es.uvigo.ei.sing.biomsef package controller import play.api.libs.concurrent.Execution.Implicits.defaultContext import play.api.libs.functional.syntax._ import play.api.libs.json._ import play.api.mvc._ import entity._ import searcher._ object SearcherController extends Controller { lazy val searcher = new Searcher implicit val SearchResultWrites: Writes[(Article, Double, Set[Keyword])] = ( (__ \\ 'article).write[Article] and (__ \\ 'tfidf).write[Double] and (__ \\ 'keywords).write[Set[Keyword]] )(s => s) def search(query: String, page: Option[Int], pageSize: Option[Int]): Action[AnyContent] = Action.async(searcher.search(query, page.getOrElse(0), pageSize.getOrElse(50)) map { result => Ok(Json.toJson(result)) }) def advSearch(query: String, page: Option[Int], pageSize: Option[Int], categories: List[Category], fromYear: Long, toYear: Long): Action[AnyContent] = Action.async(searcher.advSearch(query, page.getOrElse(0), pageSize.getOrElse(50), categories.toSet, fromYear, toYear) map { result => Ok(Json.toJson(result)) }) }
agjacome/biomsef
src/main/scala/controller/SearcherController.scala
Scala
mit
1,100
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.orc import java.io.File import java.nio.charset.StandardCharsets.UTF_8 import java.sql.{Date, Timestamp} import java.util.Locale import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, FileSystem, Path} import org.apache.orc.OrcConf.COMPRESS import org.apache.orc.OrcFile import org.apache.orc.OrcProto.ColumnEncoding.Kind.{DICTIONARY_V2, DIRECT, DIRECT_V2} import org.apache.orc.OrcProto.Stream.Kind import org.apache.orc.impl.RecordReaderImpl import org.scalatest.BeforeAndAfterAll import org.apache.spark.{SPARK_VERSION_SHORT, SparkException} import org.apache.spark.sql.{Row, SPARK_VERSION_METADATA_KEY} import org.apache.spark.sql.execution.datasources.{CommonFileDataSourceSuite, SchemaMergeUtils} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ import org.apache.spark.util.Utils case class OrcData(intField: Int, stringField: String) abstract class OrcSuite extends OrcTest with BeforeAndAfterAll with CommonFileDataSourceSuite { import testImplicits._ override protected def dataSourceFormat = "orc" var orcTableDir: File = null var orcTableAsDir: File = null protected override def beforeAll(): Unit = { super.beforeAll() orcTableAsDir = Utils.createTempDir(namePrefix = "orctests") orcTableDir = Utils.createTempDir(namePrefix = "orctests") sparkContext .makeRDD(1 to 10) .map(i => OrcData(i, s"part-$i")) .toDF() .createOrReplaceTempView("orc_temp_table") } protected def testBloomFilterCreation(bloomFilterKind: Kind): Unit = { val tableName = "bloomFilter" withTempDir { dir => withTable(tableName) { val sqlStatement = orcImp match { case "native" => s""" |CREATE TABLE $tableName (a INT, b STRING) |USING ORC |OPTIONS ( | path '${dir.toURI}', | orc.bloom.filter.columns '*', | orc.bloom.filter.fpp 0.1 |) """.stripMargin case "hive" => s""" |CREATE TABLE $tableName (a INT, b STRING) |STORED AS ORC |LOCATION '${dir.toURI}' |TBLPROPERTIES ( | orc.bloom.filter.columns='*', | orc.bloom.filter.fpp=0.1 |) """.stripMargin case impl => throw new UnsupportedOperationException(s"Unknown ORC implementation: $impl") } sql(sqlStatement) sql(s"INSERT INTO $tableName VALUES (1, 'str')") val partFiles = dir.listFiles() .filter(f => f.isFile && !f.getName.startsWith(".") && !f.getName.startsWith("_")) assert(partFiles.length === 1) val orcFilePath = new Path(partFiles.head.getAbsolutePath) val readerOptions = OrcFile.readerOptions(new Configuration()) val reader = OrcFile.createReader(orcFilePath, readerOptions) var recordReader: RecordReaderImpl = null try { recordReader = reader.rows.asInstanceOf[RecordReaderImpl] // BloomFilter array is created for all types; `struct`, int (`a`), string (`b`) val sargColumns = Array(true, true, true) val orcIndex = recordReader.readRowIndex(0, null, sargColumns) // Check the types and counts of bloom filters assert(orcIndex.getBloomFilterKinds.forall(_ === bloomFilterKind)) assert(orcIndex.getBloomFilterIndex.forall(_.getBloomFilterCount > 0)) } finally { if (recordReader != null) { recordReader.close() } } } } } protected def testSelectiveDictionaryEncoding(isSelective: Boolean, isHiveOrc: Boolean): Unit = { val tableName = "orcTable" withTempDir { dir => withTable(tableName) { val sqlStatement = orcImp match { case "native" => s""" |CREATE TABLE $tableName (zipcode STRING, uniqColumn STRING, value DOUBLE) |USING ORC |OPTIONS ( | path '${dir.toURI}', | orc.dictionary.key.threshold '1.0', | orc.column.encoding.direct 'uniqColumn' |) """.stripMargin case "hive" => s""" |CREATE TABLE $tableName (zipcode STRING, uniqColumn STRING, value DOUBLE) |STORED AS ORC |LOCATION '${dir.toURI}' |TBLPROPERTIES ( | orc.dictionary.key.threshold '1.0', | hive.exec.orc.dictionary.key.size.threshold '1.0', | orc.column.encoding.direct 'uniqColumn' |) """.stripMargin case impl => throw new UnsupportedOperationException(s"Unknown ORC implementation: $impl") } sql(sqlStatement) sql(s"INSERT INTO $tableName VALUES ('94086', 'random-uuid-string', 0.0)") val partFiles = dir.listFiles() .filter(f => f.isFile && !f.getName.startsWith(".") && !f.getName.startsWith("_")) assert(partFiles.length === 1) val orcFilePath = new Path(partFiles.head.getAbsolutePath) val readerOptions = OrcFile.readerOptions(new Configuration()) val reader = OrcFile.createReader(orcFilePath, readerOptions) var recordReader: RecordReaderImpl = null try { recordReader = reader.rows.asInstanceOf[RecordReaderImpl] // Check the kind val stripe = recordReader.readStripeFooter(reader.getStripes.get(0)) // The encodings are divided into direct or dictionary-based categories and // further refined as to whether they use RLE v1 or v2. RLE v1 is used by // Hive 0.11 and RLE v2 is introduced in Hive 0.12 ORC with more improvements. // For more details, see https://orc.apache.org/specification/ assert(stripe.getColumns(1).getKind === DICTIONARY_V2) if (isSelective || isHiveOrc) { assert(stripe.getColumns(2).getKind === DIRECT_V2) } else { assert(stripe.getColumns(2).getKind === DICTIONARY_V2) } // Floating point types are stored with DIRECT encoding in IEEE 754 floating // point bit layout. assert(stripe.getColumns(3).getKind === DIRECT) } finally { if (recordReader != null) { recordReader.close() } } } } } protected def testMergeSchemasInParallel( ignoreCorruptFiles: Boolean, schemaReader: (Seq[FileStatus], Configuration, Boolean) => Seq[StructType]): Unit = { withSQLConf( SQLConf.IGNORE_CORRUPT_FILES.key -> ignoreCorruptFiles.toString, SQLConf.ORC_IMPLEMENTATION.key -> orcImp) { withTempDir { dir => val fs = FileSystem.get(spark.sessionState.newHadoopConf()) val basePath = dir.getCanonicalPath val path1 = new Path(basePath, "first") val path2 = new Path(basePath, "second") val path3 = new Path(basePath, "third") spark.range(1).toDF("a").coalesce(1).write.orc(path1.toString) spark.range(1, 2).toDF("b").coalesce(1).write.orc(path2.toString) spark.range(2, 3).toDF("a").coalesce(1).write.json(path3.toString) val fileStatuses = Seq(fs.listStatus(path1), fs.listStatus(path2), fs.listStatus(path3)).flatten val schema = SchemaMergeUtils.mergeSchemasInParallel( spark, Map.empty, fileStatuses, schemaReader) assert(schema.isDefined) assert(schema.get == StructType(Seq( StructField("a", LongType, true), StructField("b", LongType, true)))) } } } protected def testMergeSchemasInParallel( schemaReader: (Seq[FileStatus], Configuration, Boolean) => Seq[StructType]): Unit = { testMergeSchemasInParallel(true, schemaReader) val exception = intercept[SparkException] { testMergeSchemasInParallel(false, schemaReader) }.getCause assert(exception.getCause.getMessage.contains("Could not read footer for file")) } test("create temporary orc table") { checkAnswer(sql("SELECT COUNT(*) FROM normal_orc_source"), Row(10)) checkAnswer( sql("SELECT * FROM normal_orc_source"), (1 to 10).map(i => Row(i, s"part-$i"))) checkAnswer( sql("SELECT * FROM normal_orc_source where intField > 5"), (6 to 10).map(i => Row(i, s"part-$i"))) checkAnswer( sql("SELECT COUNT(intField), stringField FROM normal_orc_source GROUP BY stringField"), (1 to 10).map(i => Row(1, s"part-$i"))) } test("create temporary orc table as") { checkAnswer(sql("SELECT COUNT(*) FROM normal_orc_as_source"), Row(10)) checkAnswer( sql("SELECT * FROM normal_orc_source"), (1 to 10).map(i => Row(i, s"part-$i"))) checkAnswer( sql("SELECT * FROM normal_orc_source WHERE intField > 5"), (6 to 10).map(i => Row(i, s"part-$i"))) checkAnswer( sql("SELECT COUNT(intField), stringField FROM normal_orc_source GROUP BY stringField"), (1 to 10).map(i => Row(1, s"part-$i"))) } test("appending insert") { sql("INSERT INTO TABLE normal_orc_source SELECT * FROM orc_temp_table WHERE intField > 5") checkAnswer( sql("SELECT * FROM normal_orc_source"), (1 to 5).map(i => Row(i, s"part-$i")) ++ (6 to 10).flatMap { i => Seq.fill(2)(Row(i, s"part-$i")) }) } test("overwrite insert") { sql( """INSERT OVERWRITE TABLE normal_orc_as_source |SELECT * FROM orc_temp_table WHERE intField > 5 """.stripMargin) checkAnswer( sql("SELECT * FROM normal_orc_as_source"), (6 to 10).map(i => Row(i, s"part-$i"))) } test("write null values") { sql("DROP TABLE IF EXISTS orcNullValues") val df = sql( """ |SELECT | CAST(null as TINYINT) as c0, | CAST(null as SMALLINT) as c1, | CAST(null as INT) as c2, | CAST(null as BIGINT) as c3, | CAST(null as FLOAT) as c4, | CAST(null as DOUBLE) as c5, | CAST(null as DECIMAL(7,2)) as c6, | CAST(null as TIMESTAMP) as c7, | CAST(null as DATE) as c8, | CAST(null as STRING) as c9, | CAST(null as VARCHAR(10)) as c10 |FROM orc_temp_table limit 1 """.stripMargin) df.write.format("orc").saveAsTable("orcNullValues") checkAnswer( sql("SELECT * FROM orcNullValues"), Row.fromSeq(Seq.fill(11)(null))) sql("DROP TABLE IF EXISTS orcNullValues") } test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") { val conf = spark.sessionState.conf val option = new OrcOptions(Map(COMPRESS.getAttribute.toUpperCase(Locale.ROOT) -> "NONE"), conf) assert(option.compressionCodec == "NONE") } test("SPARK-21839: Add SQL config for ORC compression") { val conf = spark.sessionState.conf // Test if the default of spark.sql.orc.compression.codec is snappy assert(new OrcOptions(Map.empty[String, String], conf).compressionCodec == "SNAPPY") // OrcOptions's parameters have a higher priority than SQL configuration. // `compression` -> `orc.compression` -> `spark.sql.orc.compression.codec` withSQLConf(SQLConf.ORC_COMPRESSION.key -> "uncompressed") { assert(new OrcOptions(Map.empty[String, String], conf).compressionCodec == "NONE") val map1 = Map(COMPRESS.getAttribute -> "zlib") val map2 = Map(COMPRESS.getAttribute -> "zlib", "compression" -> "lzo") assert(new OrcOptions(map1, conf).compressionCodec == "ZLIB") assert(new OrcOptions(map2, conf).compressionCodec == "LZO") } // Test all the valid options of spark.sql.orc.compression.codec Seq("NONE", "UNCOMPRESSED", "SNAPPY", "ZLIB", "LZO", "ZSTD", "LZ4").foreach { c => withSQLConf(SQLConf.ORC_COMPRESSION.key -> c) { val expected = if (c == "UNCOMPRESSED") "NONE" else c assert(new OrcOptions(Map.empty[String, String], conf).compressionCodec == expected) } } } // SPARK-28885 String value is not allowed to be stored as numeric type with // ANSI store assignment policy. ignore("SPARK-23340 Empty float/double array columns raise EOFException") { Seq(Seq(Array.empty[Float]).toDF(), Seq(Array.empty[Double]).toDF()).foreach { df => withTempPath { path => df.write.format("orc").save(path.getCanonicalPath) checkAnswer(spark.read.orc(path.getCanonicalPath), df) } } } test("SPARK-24322 Fix incorrect workaround for bug in java.sql.Timestamp") { withTempPath { path => val ts = Timestamp.valueOf("1900-05-05 12:34:56.000789") Seq(ts).toDF.write.orc(path.getCanonicalPath) checkAnswer(spark.read.orc(path.getCanonicalPath), Row(ts)) } } test("Write Spark version into ORC file metadata") { withTempPath { path => spark.range(1).repartition(1).write.orc(path.getCanonicalPath) val partFiles = path.listFiles() .filter(f => f.isFile && !f.getName.startsWith(".") && !f.getName.startsWith("_")) assert(partFiles.length === 1) val orcFilePath = new Path(partFiles.head.getAbsolutePath) val readerOptions = OrcFile.readerOptions(new Configuration()) Utils.tryWithResource(OrcFile.createReader(orcFilePath, readerOptions)) { reader => val version = UTF_8.decode(reader.getMetadataValue(SPARK_VERSION_METADATA_KEY)).toString assert(version === SPARK_VERSION_SHORT) } } } test("SPARK-11412 test orc merge schema option") { val conf = spark.sessionState.conf // Test if the default of spark.sql.orc.mergeSchema is false assert(new OrcOptions(Map.empty[String, String], conf).mergeSchema == false) // OrcOptions's parameters have a higher priority than SQL configuration. // `mergeSchema` -> `spark.sql.orc.mergeSchema` withSQLConf(SQLConf.ORC_SCHEMA_MERGING_ENABLED.key -> "true") { val map1 = Map(OrcOptions.MERGE_SCHEMA -> "true") val map2 = Map(OrcOptions.MERGE_SCHEMA -> "false") assert(new OrcOptions(map1, conf).mergeSchema == true) assert(new OrcOptions(map2, conf).mergeSchema == false) } withSQLConf(SQLConf.ORC_SCHEMA_MERGING_ENABLED.key -> "false") { val map1 = Map(OrcOptions.MERGE_SCHEMA -> "true") val map2 = Map(OrcOptions.MERGE_SCHEMA -> "false") assert(new OrcOptions(map1, conf).mergeSchema == true) assert(new OrcOptions(map2, conf).mergeSchema == false) } } test("SPARK-11412 test enabling/disabling schema merging") { def testSchemaMerging(expectedColumnNumber: Int): Unit = { withTempDir { dir => val basePath = dir.getCanonicalPath spark.range(0, 10).toDF("a").write.orc(new Path(basePath, "foo=1").toString) spark.range(0, 10).toDF("b").write.orc(new Path(basePath, "foo=2").toString) assert(spark.read.orc(basePath).columns.length === expectedColumnNumber) // OrcOptions.MERGE_SCHEMA has higher priority assert(spark.read.option(OrcOptions.MERGE_SCHEMA, true) .orc(basePath).columns.length === 3) assert(spark.read.option(OrcOptions.MERGE_SCHEMA, false) .orc(basePath).columns.length === 2) } } withSQLConf(SQLConf.ORC_SCHEMA_MERGING_ENABLED.key -> "true") { testSchemaMerging(3) } withSQLConf(SQLConf.ORC_SCHEMA_MERGING_ENABLED.key -> "false") { testSchemaMerging(2) } } test("SPARK-11412 test enabling/disabling schema merging with data type conflicts") { withTempDir { dir => val basePath = dir.getCanonicalPath spark.range(0, 10).toDF("a").write.orc(new Path(basePath, "foo=1").toString) spark.range(0, 10).map(s => s"value_$s").toDF("a") .write.orc(new Path(basePath, "foo=2").toString) // with schema merging, there should throw exception withSQLConf(SQLConf.ORC_SCHEMA_MERGING_ENABLED.key -> "true") { val exception = intercept[SparkException] { spark.read.orc(basePath).columns.length }.getCause val innerMessage = orcImp match { case "native" => exception.getMessage case "hive" => exception.getCause.getMessage case impl => throw new UnsupportedOperationException(s"Unknown ORC implementation: $impl") } assert(innerMessage.contains("Failed to merge incompatible data types")) } // it is ok if no schema merging withSQLConf(SQLConf.ORC_SCHEMA_MERGING_ENABLED.key -> "false") { assert(spark.read.orc(basePath).columns.length === 2) } } } test("SPARK-11412 test schema merging with corrupt files") { withSQLConf(SQLConf.ORC_SCHEMA_MERGING_ENABLED.key -> "true") { withTempDir { dir => val basePath = dir.getCanonicalPath spark.range(0, 10).toDF("a").write.orc(new Path(basePath, "foo=1").toString) spark.range(0, 10).toDF("b").write.orc(new Path(basePath, "foo=2").toString) spark.range(0, 10).toDF("c").write.json(new Path(basePath, "foo=3").toString) // ignore corrupt files withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") { assert(spark.read.orc(basePath).columns.length === 3) } // don't ignore corrupt files withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") { val exception = intercept[SparkException] { spark.read.orc(basePath).columns.length }.getCause assert(exception.getCause.getMessage.contains("Could not read footer for file")) } } } } test("SPARK-31238: compatibility with Spark 2.4 in reading dates") { Seq(false, true).foreach { vectorized => withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> vectorized.toString) { checkAnswer( readResourceOrcFile("test-data/before_1582_date_v2_4.snappy.orc"), Row(java.sql.Date.valueOf("1200-01-01"))) } } } test("SPARK-31238, SPARK-31423: rebasing dates in write") { withTempPath { dir => val path = dir.getAbsolutePath Seq("1001-01-01", "1582-10-10").toDF("dateS") .select($"dateS".cast("date").as("date")) .write .orc(path) Seq(false, true).foreach { vectorized => withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> vectorized.toString) { checkAnswer( spark.read.orc(path), Seq(Row(Date.valueOf("1001-01-01")), Row(Date.valueOf("1582-10-15")))) } } } } test("SPARK-31284: compatibility with Spark 2.4 in reading timestamps") { Seq(false, true).foreach { vectorized => withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> vectorized.toString) { checkAnswer( readResourceOrcFile("test-data/before_1582_ts_v2_4.snappy.orc"), Row(java.sql.Timestamp.valueOf("1001-01-01 01:02:03.123456"))) } } } test("SPARK-31284, SPARK-31423: rebasing timestamps in write") { withTempPath { dir => val path = dir.getAbsolutePath Seq("1001-01-01 01:02:03.123456", "1582-10-10 11:12:13.654321").toDF("tsS") .select($"tsS".cast("timestamp").as("ts")) .write .orc(path) Seq(false, true).foreach { vectorized => withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> vectorized.toString) { checkAnswer( spark.read.orc(path), Seq( Row(java.sql.Timestamp.valueOf("1001-01-01 01:02:03.123456")), Row(java.sql.Timestamp.valueOf("1582-10-15 11:12:13.654321")))) } } } } test("SPARK-35612: Support LZ4 compression in ORC data source") { withTempPath { dir => val path = dir.getAbsolutePath spark.range(3).write.option("compression", "lz4").orc(path) checkAnswer(spark.read.orc(path), Seq(Row(0), Row(1), Row(2))) val files = OrcUtils.listOrcFiles(path, spark.sessionState.newHadoopConf()) assert(files.nonEmpty && files.forall(_.getName.contains("lz4"))) } } } class OrcSourceSuite extends OrcSuite with SharedSparkSession { protected override def beforeAll(): Unit = { super.beforeAll() sql( s"""CREATE TABLE normal_orc( | intField INT, | stringField STRING |) |USING ORC |LOCATION '${orcTableAsDir.toURI}' """.stripMargin) sql( s"""INSERT INTO TABLE normal_orc |SELECT intField, stringField FROM orc_temp_table """.stripMargin) spark.sql( s"""CREATE TEMPORARY VIEW normal_orc_source |USING ORC |OPTIONS ( | PATH '${new File(orcTableAsDir.getAbsolutePath).toURI}' |) """.stripMargin) spark.sql( s"""CREATE TEMPORARY VIEW normal_orc_as_source |USING ORC |OPTIONS ( | PATH '${new File(orcTableAsDir.getAbsolutePath).toURI}' |) """.stripMargin) } test("Check BloomFilter creation") { testBloomFilterCreation(Kind.BLOOM_FILTER_UTF8) // After ORC-101 } test("Enforce direct encoding column-wise selectively") { testSelectiveDictionaryEncoding(isSelective = true, isHiveOrc = false) } test("SPARK-11412 read and merge orc schemas in parallel") { testMergeSchemasInParallel(OrcUtils.readOrcSchemasInParallel) } test("SPARK-31580: Read a file written before ORC-569") { // Test ORC file came from ORC-621 val df = readResourceOrcFile("test-data/TestStringDictionary.testRowIndex.orc") assert(df.where("str < 'row 001000'").count() === 1000) } test("SPARK-33978: Write and read a file with ZSTD compression") { withTempPath { dir => val path = dir.getAbsolutePath spark.range(3).write.option("compression", "zstd").orc(path) checkAnswer(spark.read.orc(path), Seq(Row(0), Row(1), Row(2))) val files = OrcUtils.listOrcFiles(path, spark.sessionState.newHadoopConf()) assert(files.nonEmpty && files.forall(_.getName.contains("zstd"))) } } test("SPARK-34897: Support reconcile schemas based on index after nested column pruning") { withTable("t1") { spark.sql( """ |CREATE TABLE t1 ( | _col0 INT, | _col1 STRING, | _col2 STRUCT<c1: STRING, c2: STRING, c3: STRING, c4: BIGINT>) |USING ORC |""".stripMargin) spark.sql("INSERT INTO t1 values(1, '2', struct('a', 'b', 'c', 10L))") checkAnswer(spark.sql("SELECT _col0, _col2.c1 FROM t1"), Seq(Row(1, "a"))) } } test("SPARK-36663: OrcUtils.toCatalystSchema should correctly handle " + "a column name which consists of only numbers") { withTempPath { dir => val path = dir.getAbsolutePath spark.sql("SELECT 'a' as `1`, 'b' as `2`, 'c' as `3`").write.orc(path) val df = spark.read.orc(path) checkAnswer(df, Row("a", "b", "c")) assert(df.schema.toArray === Array( StructField("1", StringType), StructField("2", StringType), StructField("3", StringType))) } // test for struct in struct withTempPath { dir => val path = dir.getAbsolutePath spark.sql( "SELECT 'a' as `10`, named_struct('20', 'b', '30', named_struct('40', 'c')) as `50`") .write.orc(path) val df = spark.read.orc(path) checkAnswer(df, Row("a", Row("b", Row("c")))) assert(df.schema.toArray === Array( StructField("10", StringType), StructField("50", StructType( StructField("20", StringType) :: StructField("30", StructType( StructField("40", StringType) :: Nil)) :: Nil)))) } // test for struct in array withTempPath { dir => val path = dir.getAbsolutePath spark.sql("SELECT array(array(named_struct('123', 'a'), named_struct('123', 'b'))) as `789`") .write.orc(path) val df = spark.read.orc(path) checkAnswer(df, Row(Seq(Seq(Row("a"), Row("b"))))) assert(df.schema.toArray === Array( StructField("789", ArrayType( ArrayType( StructType( StructField("123", StringType) :: Nil)))))) } // test for struct in map withTempPath { dir => val path = dir.getAbsolutePath spark.sql( """ |SELECT | map( | named_struct('123', 'a'), | map( | named_struct('456', 'b'), | named_struct('789', 'c'))) as `012`""".stripMargin).write.orc(path) val df = spark.read.orc(path) checkAnswer(df, Row(Map(Row("a") -> Map(Row("b") -> Row("c"))))) assert(df.schema.toArray === Array( StructField("012", MapType( StructType( StructField("123", StringType) :: Nil), MapType( StructType( StructField("456", StringType) :: Nil), StructType( StructField("789", StringType) :: Nil)))))) } // test for deeply nested struct with complex types withTempPath { dir => val path = dir.getAbsolutePath spark.sql( """ |SELECT | named_struct('123', | array( | map( | named_struct('456', 'a'), | named_struct('789', 'b')))) as `1000`, | named_struct('123', | map( | array(named_struct('456', 'a')), | array(named_struct('789', 'b')))) as `2000`, | array( | named_struct('123', | map( | named_struct('456', 'a'), | named_struct('789', 'b')))) as `3000`, | array( | map( | named_struct('123', 'a'), | named_struct('456', 'b'))) as `4000`, | map( | named_struct('123', | array( | named_struct('456', 'a'))), | named_struct('789', | array( | named_struct('012', 'b')))) as `5000`, | map( | array( | named_struct('123', 'a')), | array( | named_struct('456', 'b'))) as `6000` """.stripMargin).write.orc(path) val df = spark.read.orc(path) checkAnswer(df, Row( Row(Seq(Map(Row("a") -> Row("b")))), Row(Map(Seq(Row("a")) -> Seq(Row("b")))), Seq(Row(Map(Row("a") -> Row("b")))), Seq(Map(Row("a") -> Row("b"))), Map(Row(Seq(Row("a"))) -> Row(Seq(Row("b")))), Map(Seq(Row("a")) -> Seq(Row("b"))))) assert(df.schema.toArray === Array( StructField("1000", StructType( StructField("123", ArrayType( MapType( StructType( StructField("456", StringType) :: Nil), StructType( StructField("789", StringType) :: Nil)))) :: Nil)), StructField("2000", StructType( StructField("123", MapType( ArrayType( StructType( StructField("456", StringType) :: Nil)), ArrayType( StructType( StructField("789", StringType) :: Nil)))) :: Nil)), StructField("3000", ArrayType( StructType( StructField("123", MapType( StructType( StructField("456", StringType) :: Nil), StructType( StructField("789", StringType) :: Nil))) :: Nil))), StructField("4000", ArrayType( MapType( StructType( StructField("123", StringType) :: Nil), StructType( StructField("456", StringType) :: Nil)))), StructField("5000", MapType( StructType( StructField("123", ArrayType( StructType( StructField("456", StringType) :: Nil))) :: Nil), StructType( StructField("789", ArrayType( StructType( StructField("012", StringType) :: Nil))) :: Nil))), StructField("6000", MapType( ArrayType( StructType( StructField("123", StringType) :: Nil)), ArrayType( StructType( StructField("456", StringType) :: Nil)))))) } } }
taroplus/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala
Scala
apache-2.0
29,543
package se.nimsa.sbx.app.routing import java.util.UUID import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server._ import org.scalatest.{FlatSpecLike, Matchers} import se.nimsa.sbx.anonymization.{AnonymizationProfile, ConfidentialityOption} import se.nimsa.sbx.anonymization.AnonymizationProtocol._ import se.nimsa.sbx.box.BoxProtocol._ import se.nimsa.sbx.dicom.DicomHierarchy.Image import se.nimsa.sbx.storage.RuntimeStorage import se.nimsa.sbx.util.FutureUtil.await import se.nimsa.sbx.util.TestUtil import scala.concurrent.Future class BoxRoutesTest extends { val dbConfig = TestUtil.createTestDb("boxroutestest") val storage = new RuntimeStorage } with FlatSpecLike with Matchers with RoutesTestBase { val profile = AnonymizationProfile(Seq(ConfidentialityOption.BASIC_PROFILE)) override def afterEach(): Unit = await(Future.sequence(Seq( metaDataDao.clear(), seriesTypeDao.clear(), propertiesDao.clear(), boxDao.clear() ))) def addPollBox(name: String): Box = PostAsAdmin("/api/boxes/createconnection", RemoteBoxConnectionData(name, profile)) ~> routes ~> check { status should be(Created) val response = responseAs[Box] response } def addPushBox(name: String): Unit = addPushBox(name, "http://some.url/api/box/" + UUID.randomUUID()) def addPushBox(name: String, url: String): Unit = PostAsAdmin("/api/boxes/connect", RemoteBox(name, url, profile)) ~> routes ~> check { status should be(Created) val box = responseAs[Box] box.sendMethod should be(BoxSendMethod.PUSH) box.name should be(name) box } "Box routes" should "return a success message when asked to generate a new base url" in { addPollBox("hosp") } it should "return a bad request message when asking to generate a new base url with a malformed request body" in { val malformedEntity = Seq.empty[Box] PostAsAdmin("/api/boxes/createconnection", malformedEntity) ~> Route.seal(routes) ~> check { status should be(BadRequest) } } it should "return 201 Created when adding two poll boxes with the same name" in { addPollBox("hosp") PostAsAdmin("/api/boxes/createconnection", RemoteBoxConnectionData("hosp", profile)) ~> Route.seal(routes) ~> check { status shouldBe Created } GetAsUser("/api/boxes") ~> routes ~> check { responseAs[List[Box]] should have length 1 } } it should "return 400 bad request message when adding two boxes, one push and one poll, with the same name" in { addPushBox("mybox") PostAsAdmin("/api/boxes/createconnection", RemoteBoxConnectionData("mybox", profile)) ~> Route.seal(routes) ~> check { status should be(BadRequest) } } it should "return a success message when asked to add a remote box" in { addPushBox("uni") } it should "return 201 Created when adding two push boxes with the same name and url" in { val url = "http://some.url/api/box/" + UUID.randomUUID() addPushBox("mybox", url) addPushBox("mybox", url) GetAsUser("/api/boxes") ~> routes ~> check { responseAs[List[Box]] should have length 1 } } it should "return 400 bad request when adding two push boxes with the same name different urls" in { addPushBox("mybox") PostAsAdmin("/api/boxes/connect", RemoteBox("mybox", "http://some.url/api/box/" + UUID.randomUUID(), profile)) ~> routes ~> check { status shouldBe BadRequest } } it should "return 201 Created when adding two push boxes with different names but the same urls" in { val url = "http://some.url/api/box/" + UUID.randomUUID() addPushBox("mybox1", url) addPushBox("mybox2", url) GetAsUser("/api/boxes") ~> routes ~> check { responseAs[List[Box]] should have length 2 } } it should "return a bad request message when asked to add a remote box with a malformed base url" in { PostAsAdmin("/api/boxes/connect", RemoteBox("uni2", "", profile)) ~> Route.seal(routes) ~> check { status should be(BadRequest) } PostAsAdmin("/api/boxes/connect", RemoteBox("uni2", "malformed/url", profile)) ~> Route.seal(routes) ~> check { status should be(BadRequest) } } it should "return a list of two boxes when listing boxes" in { addPollBox("hosp") addPushBox("uni") GetAsUser("/api/boxes") ~> routes ~> check { val boxes = responseAs[List[Box]] boxes.size should be(2) } } it should "return a list of one boxes when listing boxes with page size set to one" in { addPollBox("hosp") addPushBox("uni") GetAsUser("/api/boxes?startindex=0&count=1") ~> routes ~> check { val boxes = responseAs[List[Box]] boxes.size should be(1) } } it should "return a no content message when asked to send images" in { val box1 = addPollBox("hosp") PostAsAdmin(s"/api/boxes/${box1.id}/send", BulkAnonymizationData(profile, Seq(ImageTagValues(1, Seq.empty)))) ~> routes ~> check { status should be(NoContent) } } it should "return a no content message when asked to send images with empty images list" in { val box1 = addPollBox("hosp") PostAsAdmin(s"/api/boxes/${box1.id}/send", BulkAnonymizationData(profile, Seq.empty[ImageTagValues])) ~> routes ~> check { status should be(NoContent) } } it should "return a not found message when asked to send images with unknown box id" in { PostAsAdmin("/api/boxes/999/send", BulkAnonymizationData(profile, Seq(ImageTagValues(1, Seq.empty)))) ~> Route.seal(routes) ~> check { status should be(NotFound) } } it should "support removing a box" in { val box1 = addPollBox("hosp1") val box2 = addPollBox("hosp2") DeleteAsAdmin("/api/boxes/" + box1.id) ~> routes ~> check { status should be(NoContent) } DeleteAsAdmin("/api/boxes/" + box2.id) ~> routes ~> check { status should be(NoContent) } } it should "return a no content message when asked to remove a box that does not exist" in { DeleteAsAdmin("/api/boxes/999") ~> routes ~> check { status should be(NoContent) } } it should "return a non-empty result when listing outgoing entries" in { val box1 = addPollBox("hosp") PostAsAdmin(s"/api/boxes/${box1.id}/send", BulkAnonymizationData(profile, Seq(ImageTagValues(1, Seq.empty)))) ~> routes ~> check { status should be(NoContent) } GetAsUser("/api/boxes/outgoing") ~> routes ~> check { status should be(OK) responseAs[List[OutgoingTransaction]].length should be > 0 } } it should "support listing incoming entries" in { await(boxDao.insertIncomingTransaction(IncomingTransaction(-1, 1, "some box", 1, 3, 3, 4, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) await(boxDao.insertIncomingTransaction(IncomingTransaction(-1, 1, "some box", 2, 3, 3, 5, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) GetAsUser("/api/boxes/incoming") ~> routes ~> check { responseAs[List[IncomingTransaction]].size should be(2) } } it should "support removing incoming entries" in { val entry = await(boxDao.insertIncomingTransaction(IncomingTransaction(-1, 1, "some box", 2, 3, 3, 4, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) DeleteAsUser(s"/api/boxes/incoming/${entry.id}") ~> routes ~> check { status should be(NoContent) } GetAsUser("/api/boxes/incoming") ~> routes ~> check { responseAs[List[IncomingTransaction]].size should be(0) } } it should "support removing outgoing entries" in { val entry = await(boxDao.insertOutgoingTransaction(OutgoingTransaction(1, 1, "some box", profile, 0, 1, 1000, 1000, TransactionStatus.WAITING))) await(boxDao.insertOutgoingImage(OutgoingImage(-1, entry.id, 1, 1, sent = false))) DeleteAsUser(s"/api/boxes/outgoing/${entry.id}") ~> routes ~> check { status should be(NoContent) } GetAsUser("/api/boxes/outgoing") ~> routes ~> check { responseAs[List[OutgoingTransaction]].size should be(0) } await(boxDao.listOutgoingImages) shouldBe empty } it should "support listing images corresponding to an incoming entry" in { val entry = { val (_, (_, _), (_, _, _, _), (dbImage1, dbImage2, _, _, _, _, _, _)) = await(TestUtil.insertMetaData(metaDataDao)) val entry = await(boxDao.insertIncomingTransaction(IncomingTransaction(-1, 1, "some box", 2, 3, 3, 4, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) await(boxDao.insertIncomingImage(IncomingImage(-1, entry.id, dbImage1.id, 1, overwrite = false))) await(boxDao.insertIncomingImage(IncomingImage(-1, entry.id, dbImage2.id, 2, overwrite = false))) entry } GetAsUser(s"/api/boxes/incoming/${entry.id}/images") ~> routes ~> check { status should be(OK) responseAs[List[Image]].length should be(2) } } it should "only list images corresponding to an incoming entry that exists" in { val entry = { val (_, (_, _), (_, _, _, _), (dbImage1, dbImage2, _, _, _, _, _, _)) = await(TestUtil.insertMetaData(metaDataDao)) val entry = await(boxDao.insertIncomingTransaction(IncomingTransaction(-1, 1, "some box", 2, 3, 3, 4, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) await(boxDao.insertIncomingImage(IncomingImage(-1, entry.id, dbImage1.id, 1, overwrite = false))) await(boxDao.insertIncomingImage(IncomingImage(-1, entry.id, dbImage2.id, 2, overwrite = false))) await(boxDao.insertIncomingImage(IncomingImage(-1, entry.id, 666, 3, overwrite = false))) entry } GetAsUser(s"/api/boxes/incoming/${entry.id}/images") ~> routes ~> check { status should be(OK) responseAs[List[Image]].length should be(2) } } it should "support listing images corresponding to an outgoing entry" in { val entry = { val (_, (_, _), (_, _, _, _), (dbImage1, dbImage2, _, _, _, _, _, _)) = await(TestUtil.insertMetaData(metaDataDao)) val entry = await(boxDao.insertOutgoingTransaction(OutgoingTransaction(-1, 1, "some box", profile, 3, 4, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) await(boxDao.insertOutgoingImage(OutgoingImage(-1, entry.id, dbImage1.id, 1, sent = false))) await(boxDao.insertOutgoingImage(OutgoingImage(-1, entry.id, dbImage2.id, 2, sent = false))) entry } GetAsUser(s"/api/boxes/outgoing/${entry.id}/images") ~> routes ~> check { status should be(OK) responseAs[List[Image]].length should be(2) } } it should "only list images corresponding to an outgoing entry that exists" in { val entry = { val (_, (_, _), (_, _, _, _), (dbImage1, dbImage2, _, _, _, _, _, _)) = await(TestUtil.insertMetaData(metaDataDao)) val entry = await(boxDao.insertOutgoingTransaction(OutgoingTransaction(-1, 1, "some box", profile, 3, 4, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) await(boxDao.insertOutgoingImage(OutgoingImage(-1, entry.id, dbImage1.id, 1, sent = false))) await(boxDao.insertOutgoingImage(OutgoingImage(-1, entry.id, dbImage2.id, 2, sent = false))) await(boxDao.insertOutgoingImage(OutgoingImage(-1, entry.id, 666, 3, sent = false))) entry } GetAsUser(s"/api/boxes/outgoing/${entry.id}/images") ~> routes ~> check { status should be(OK) responseAs[List[Image]].length should be(2) } } it should "remove related image record in incoming when an image is deleted" in { val image = PostAsUser("/api/images", TestUtil.testImageFormData) ~> routes ~> check { status shouldBe Created responseAs[Image] } val entry = await(boxDao.insertIncomingTransaction(IncomingTransaction(-1, 1, "some box", 2, 3, 3, 4, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) val imageTransaction = await(boxDao.insertIncomingImage(IncomingImage(-1, entry.id, image.id, 1, overwrite = false))) GetAsUser(s"/api/boxes/incoming/${entry.id}/images") ~> routes ~> check { status shouldBe OK responseAs[List[Image]] should have length 1 } DeleteAsUser(s"/api/images/${imageTransaction.imageId}") ~> routes ~> check { status shouldBe NoContent } Thread.sleep(1000) // wait for ImageDeleted event to reach BoxServiceActor GetAsUser(s"/api/boxes/incoming/${entry.id}/images") ~> routes ~> check { status shouldBe OK responseAs[List[Image]] shouldBe empty } } it should "remove related image record in outgoing when an image is deleted" in { val image = PostAsUser("/api/images", TestUtil.testImageFormData) ~> routes ~> check { status shouldBe Created responseAs[Image] } val entry = await(boxDao.insertOutgoingTransaction(OutgoingTransaction(-1, 1, "some box", profile, 3, 4, System.currentTimeMillis(), System.currentTimeMillis(), TransactionStatus.WAITING))) val imageTransaction = await(boxDao.insertOutgoingImage(OutgoingImage(-1, entry.id, image.id, 1, sent = false))) GetAsUser(s"/api/boxes/outgoing/${entry.id}/images") ~> routes ~> check { status shouldBe OK responseAs[List[Image]] should have length 1 } DeleteAsUser(s"/api/images/${imageTransaction.imageId}") ~> routes ~> check { status shouldBe NoContent } Thread.sleep(1000) // wait for ImageDeleted event to reach BoxServiceActor GetAsUser(s"/api/boxes/outgoing/${entry.id}/images") ~> routes ~> check { status shouldBe OK responseAs[List[Image]] shouldBe empty } } }
slicebox/slicebox
src/test/scala/se/nimsa/sbx/app/routing/BoxRoutesTest.scala
Scala
apache-2.0
13,765
/* * Copyright 2012 Pellucid and Zenexity * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package datomisca import org.scalatest.{FlatSpec, Matchers} import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global class MovieGraph2SampleSpec extends FlatSpec with Matchers with DatomicFixture with AwaitHelper { object MovieGraph2Schema { object ns { val actor = Namespace("actor") val role = Namespace("role") val movie = Namespace("movie") } val actorName = Attribute(ns.actor / "name", SchemaType.string, Cardinality.one) .withDoc("The name of the actor") val actorForRole = Attribute(ns.role / "actor", SchemaType.ref, Cardinality.one).withDoc("The actor for this role") val movieForRole = Attribute(ns.role / "movie", SchemaType.ref, Cardinality.one).withDoc("The movie in which this role appears") val character = Attribute(ns.role / "character", SchemaType.string, Cardinality.one).withDoc("The charcter name of this role") val movieTitle = Attribute(ns.movie / "title", SchemaType.string, Cardinality.one).withDoc("The title of the movie") val movieYear = Attribute(ns. movie / "year", SchemaType.long, Cardinality.one).withDoc("The year the movie was released") val txData = Seq( actorName, actorForRole, movieForRole, character, movieTitle, movieYear ) } object MovieGraph2Data { import MovieGraph2Schema._ val `Carrie-Ann Moss` = SchemaFact.add(DId(Partition.USER))(actorName -> "Carrie-Ann Moss") val `Hugo Weaving` = SchemaFact.add(DId(Partition.USER))(actorName -> "Hugo Weaving") val `Guy Peace` = SchemaFact.add(DId(Partition.USER))(actorName -> "Guy Pearce") val `Joe Pantoliano` = SchemaFact.add(DId(Partition.USER))(actorName -> "Joe Pantoliano") val actors = Seq(`Carrie-Ann Moss`, `Hugo Weaving`, `Guy Peace`, `Joe Pantoliano`) val `The Matrix` = ( SchemaEntity.newBuilder += (movieTitle -> "The Matrix") += (movieYear -> 1999) ) withId DId(Partition.USER) val `The Matrix Reloaded` = ( SchemaEntity.newBuilder += (movieTitle -> "The Matrix Reloaded") += (movieYear -> 2003) ) withId DId(Partition.USER) val Memento = ( SchemaEntity.newBuilder += (movieTitle -> "Memento") += (movieYear -> 2000) ) withId DId(Partition.USER) val movies = Seq(`The Matrix`, `The Matrix Reloaded`, Memento) val graphNodesTxData = actors ++ movies val graphEdgesTxData = Seq( (SchemaEntity.newBuilder += (actorForRole -> `Carrie-Ann Moss`.id) += (movieForRole -> `The Matrix`.id) += (character -> "Trinity") ) withId DId(Partition.USER), (SchemaEntity.newBuilder += (actorForRole -> `Carrie-Ann Moss`.id) += (movieForRole -> `The Matrix Reloaded`.id) += (character -> "Trinity") ) withId DId(Partition.USER), (SchemaEntity.newBuilder += (actorForRole -> `Carrie-Ann Moss`.id) += (movieForRole -> Memento.id) += (character -> "Natalie") ) withId DId(Partition.USER), (SchemaEntity.newBuilder += (actorForRole -> `Hugo Weaving`.id) += (movieForRole -> `The Matrix`.id) += (character -> "Agent Smith") ) withId DId(Partition.USER), (SchemaEntity.newBuilder += (actorForRole -> `Hugo Weaving`.id) += (movieForRole -> `The Matrix Reloaded`.id) += (character -> "Agent Smith") ) withId DId(Partition.USER), (SchemaEntity.newBuilder += (actorForRole -> `Guy Peace`.id) += (movieForRole -> Memento.id) += (character -> "Leonard Shelby") ) withId DId(Partition.USER), (SchemaEntity.newBuilder += (actorForRole -> `Joe Pantoliano`.id) += (movieForRole -> `The Matrix`.id) += (character -> "Cypher") ) withId DId(Partition.USER), (SchemaEntity.newBuilder += (actorForRole -> `Joe Pantoliano`.id) += (movieForRole -> Memento.id) += (character -> "Teddy Gammell") ) withId DId(Partition.USER) ) val txData = graphNodesTxData ++ graphEdgesTxData } object MovieGraph2Queries { import MovieGraph2Schema._ val queryFindMovieByTitle = Query(s""" [:find ?title ?year :in $$ ?title :where [?movie ${movieTitle} ?title] [?movie ${movieYear} ?year]] """) val queryFindMovieByTitlePrefix = Query(s""" [:find ?title ?year :in $$ ?prefix :where [?movie ${movieTitle} ?title] [?movie ${movieYear} ?year] [(.startsWith ^String ?title ?prefix)]] """) val queryFindActorsInTitle = Query(s""" [:find ?name :in $$ ?title :where [?movie ${movieTitle} ?title] [?role ${movieForRole} ?movie] [?role ${actorForRole} ?actor] [?actor ${actorName} ?name]] """) val queryFindTitlesAndRolesForActor = Query(s""" [:find ?role ?title :in $$ ?name :where [?actor ${actorName} ?name] [?role ${actorForRole} ?actor] [?role ${character} ?character] [?role ${movieForRole} ?movie] [?movie ${movieTitle} ?title]] """) val queryFindMoviesThatIncludeActorsInGivenMovie = Query(s""" [:find ?othertitle :in $$ ?title :where [?movie ${movieTitle} ?title] [?role1 ${movieForRole} ?movie1] [?role1 ${actorForRole} ?actor] [?role2 ${actorForRole} ?actor] [?role2 ${movieForRole} ?movie2] [?movie2 ${movieTitle} ?othertitle]] """) val queryFindAllMoviesWithRole = Query(s""" [:find ?title :in $$ ?character :where [?role ${character} ?character] [?role ${movieForRole} ?movie] [?movie ${movieTitle} ?title]] """) } "Movie Graph 2 Sample" should "run to completion" in withDatomicDB { implicit conn => import MovieGraph2Queries._ await { Datomic.transact(MovieGraph2Schema.txData) } await { Datomic.transact(MovieGraph2Data.txData) } val db = conn.database() Datomic.q(queryFindMovieByTitle, db, "The Matrix") should have size (1) Datomic.q(queryFindMovieByTitlePrefix, db, "The Matrix") should have size (2) Datomic.q(queryFindActorsInTitle, db, "Memento") should have size (3) Datomic.q(queryFindTitlesAndRolesForActor, db, "Carrie-Ann Moss") should have size (3) Datomic.q(queryFindMoviesThatIncludeActorsInGivenMovie, db, "The Matrix Reloaded") should have size (3) Datomic.q(queryFindAllMoviesWithRole, db, "Agent Smith") should have size (2) } }
Enalmada/datomisca
integration/src/it/scala/datomisca/MovieGraph2SampleSpec.scala
Scala
apache-2.0
7,345
object Moo { implicit class IdOps[V](self: V) { def left[A]: Either[V, A] = Left(self) def right[E]: Either[E, V] = Right(self) } implicit class EitherOps[E, A](self: Either[E, A]) { def flatMap[EE >: E, B](f: A => Either[EE, B]): Either[EE, B] = self match { case Left(e) => Left(e) case Right(v) => f(v) } def map2[EE >: E, B, C](that: Either[EE, B])(f: (A, B) => C): Either[EE, C] = { self flatMap { a => that flatMap { b => f(a, b).right } } } } implicit class ListOps[E, A](self: List[A]) { def traverse[B](f: A => Either[E, B]): Either[E, List[B]] = { (self :\\ List.empty[B].right[E])((x, evs) => (f(x) map2 evs) (_ :: _)) } } def id[A](v: A): A = v implicit class ListEitherOps[E, A](self: List[Either[E, A]]) { def sequence(es: List[Either[E, A]]): Either[E, List[A]] = /*start*/self.traverse(id)/*end*/ // Type mismatched } } //Either[E, List[A]]
ilinum/intellij-scala
testdata/typeInference/bugs5/SCL8232.scala
Scala
apache-2.0
971
/*** * Copyright 2014 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rackspace.com.papi.components.checker class ValidatorException(msg : String, cause : Throwable) extends Throwable(msg, cause) {}
tylerroyal/api-checker
core/src/main/scala/com/rackspace/com/papi/components/checker/ValidatorException.scala
Scala
apache-2.0
766
package com.twitter.finagle.memcached.unit.util import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import com.twitter.finagle.memcached.util.Bufs.RichBuf import com.twitter.io.Buf @RunWith(classOf[JUnitRunner]) class BufsTest extends FunSuite { test("RichBuf.toInt") { val buf = Buf.Utf8(Int.MaxValue.toString) assert(RichBuf(buf).toInt === Int.MaxValue) } test("RichBuf.toLong") { val buf = Buf.Utf8(Long.MaxValue.toString) assert(RichBuf(buf).toLong === Long.MaxValue) } test("RichBuf.apply") { val str = "12345" val expectedBytes = str.getBytes val buf = RichBuf(Buf.Utf8("12345")) (1 until str.length) foreach { idx => assert(buf(idx) === expectedBytes(idx)) } } test("RichBuf.split on space") { val splits = RichBuf(Buf.Utf8("hello world")).split(' ') assert(splits(0) === Buf.Utf8("hello")) assert(splits(1) === Buf.Utf8("world")) } test("RichBuf.split on comma") { val splits = RichBuf(Buf.Utf8("hello,world")).split(',') assert(splits(0) === Buf.Utf8("hello")) assert(splits(1) === Buf.Utf8("world")) } test("RichBuf.startsWith") { val buf = RichBuf(Buf.Utf8("hello world")) assert(buf.startsWith(Buf.Utf8(""))) assert(buf.startsWith(Buf.Utf8("h"))) assert(buf.startsWith(Buf.Utf8("he"))) assert(buf.startsWith(Buf.Utf8("hel"))) assert(buf.startsWith(Buf.Utf8("hell"))) assert(buf.startsWith(Buf.Utf8("hello"))) assert(buf.startsWith(Buf.Utf8("hello "))) assert(buf.startsWith(Buf.Utf8("hello w"))) assert(buf.startsWith(Buf.Utf8("hello wo"))) assert(buf.startsWith(Buf.Utf8("hello wor"))) assert(buf.startsWith(Buf.Utf8("hello worl"))) assert(buf.startsWith(Buf.Utf8("hello world"))) assert(false == buf.startsWith(Buf.Utf8("a"))) assert(false == buf.startsWith(Buf.Utf8(" "))) } }
suls/finagle
finagle-memcached/src/test/scala/com/twitter/finagle/memcached/unit/util/BufsTest.scala
Scala
apache-2.0
1,902
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.consumer import java.net.InetAddress import java.util.UUID import java.util.concurrent._ import java.util.concurrent.atomic._ import java.util.concurrent.locks.ReentrantLock import com.yammer.metrics.core.Gauge import kafka.api._ import kafka.client.ClientUtils import kafka.cluster._ import kafka.common._ import kafka.javaapi.consumer.ConsumerRebalanceListener import kafka.metrics._ import kafka.network.BlockingChannel import kafka.serializer._ import kafka.utils.CoreUtils.inLock import kafka.utils.ZkUtils._ import kafka.utils._ import org.I0Itec.zkclient.exception.ZkNodeExistsException import org.I0Itec.zkclient.{IZkChildListener, IZkDataListener, IZkStateListener} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.security.JaasUtils import org.apache.kafka.common.utils.Time import org.apache.zookeeper.Watcher.Event.KeeperState import scala.collection._ import scala.collection.JavaConverters._ /** * This class handles the consumers interaction with zookeeper * * Directories: * 1. Consumer id registry: * /consumers/[group_id]/ids/[consumer_id] -> topic1,...topicN * A consumer has a unique consumer id within a consumer group. A consumer registers its id as an ephemeral znode * and puts all topics that it subscribes to as the value of the znode. The znode is deleted when the client is gone. * A consumer subscribes to event changes of the consumer id registry within its group. * * The consumer id is picked up from configuration, instead of the sequential id assigned by ZK. Generated sequential * ids are hard to recover during temporary connection loss to ZK, since it's difficult for the client to figure out * whether the creation of a sequential znode has succeeded or not. More details can be found at * (http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling) * * 2. Broker node registry: * /brokers/[0...N] --> { "host" : "host:port", * "topics" : {"topic1": ["partition1" ... "partitionN"], ..., * "topicN": ["partition1" ... "partitionN"] } } * This is a list of all present broker brokers. A unique logical node id is configured on each broker node. A broker * node registers itself on start-up and creates a znode with the logical node id under /brokers. The value of the znode * is a JSON String that contains (1) the host name and the port the broker is listening to, (2) a list of topics that * the broker serves, (3) a list of logical partitions assigned to each topic on the broker. * A consumer subscribes to event changes of the broker node registry. * * 3. Partition owner registry: * /consumers/[group_id]/owner/[topic]/[broker_id-partition_id] --> consumer_node_id * This stores the mapping before broker partitions and consumers. Each partition is owned by a unique consumer * within a consumer group. The mapping is reestablished after each rebalancing. * * 4. Consumer offset tracking: * /consumers/[group_id]/offsets/[topic]/[broker_id-partition_id] --> offset_counter_value * Each consumer tracks the offset of the latest message consumed for each partition. * */ @deprecated("This object has been deprecated and will be removed in a future release.", "0.11.0.0") private[kafka] object ZookeeperConsumerConnector { val shutdownCommand: FetchedDataChunk = new FetchedDataChunk(null, null, -1L) } @deprecated("This class has been deprecated and will be removed in a future release.", "0.11.0.0") private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig, val enableFetcher: Boolean) // for testing only extends ConsumerConnector with Logging with KafkaMetricsGroup { private val isShuttingDown = new AtomicBoolean(false) private val rebalanceLock = new Object private var fetcher: Option[ConsumerFetcherManager] = None private var zkUtils: ZkUtils = null private var topicRegistry = new Pool[String, Pool[Int, PartitionTopicInfo]] private val checkpointedZkOffsets = new Pool[TopicAndPartition, Long] private val topicThreadIdAndQueues = new Pool[(String, ConsumerThreadId), BlockingQueue[FetchedDataChunk]] private val scheduler = new KafkaScheduler(threads = 1, threadNamePrefix = "kafka-consumer-scheduler-") private val messageStreamCreated = new AtomicBoolean(false) private var sessionExpirationListener: ZKSessionExpireListener = null private var topicPartitionChangeListener: ZKTopicPartitionChangeListener = null private var loadBalancerListener: ZKRebalancerListener = null private var offsetsChannel: BlockingChannel = null private val offsetsChannelLock = new Object private var wildcardTopicWatcher: ZookeeperTopicEventWatcher = null private var consumerRebalanceListener: ConsumerRebalanceListener = null // useful for tracking migration of consumers to store offsets in kafka private val kafkaCommitMeter = newMeter("KafkaCommitsPerSec", "commits", TimeUnit.SECONDS, Map("clientId" -> config.clientId)) private val zkCommitMeter = newMeter("ZooKeeperCommitsPerSec", "commits", TimeUnit.SECONDS, Map("clientId" -> config.clientId)) private val rebalanceTimer = new KafkaTimer(newTimer("RebalanceRateAndTime", TimeUnit.MILLISECONDS, TimeUnit.SECONDS, Map("clientId" -> config.clientId))) newGauge( "yammer-metrics-count", new Gauge[Int] { def value = { com.yammer.metrics.Metrics.defaultRegistry().allMetrics().size() } } ) val consumerIdString = { var consumerUuid : String = null config.consumerId match { case Some(consumerId) // for testing only => consumerUuid = consumerId case None // generate unique consumerId automatically => val uuid = UUID.randomUUID() consumerUuid = "%s-%d-%s".format( InetAddress.getLocalHost.getHostName, System.currentTimeMillis, uuid.getMostSignificantBits().toHexString.substring(0,8)) } config.groupId + "_" + consumerUuid } this.logIdent = "[" + consumerIdString + "], " connectZk() createFetcher() ensureOffsetManagerConnected() if (config.autoCommitEnable) { scheduler.startup info("starting auto committer every " + config.autoCommitIntervalMs + " ms") scheduler.schedule("kafka-consumer-autocommit", autoCommit _, delay = config.autoCommitIntervalMs, period = config.autoCommitIntervalMs, unit = TimeUnit.MILLISECONDS) } KafkaMetricsReporter.startReporters(config.props) AppInfo.registerInfo() def this(config: ConsumerConfig) = this(config, true) def createMessageStreams(topicCountMap: Map[String,Int]): Map[String, List[KafkaStream[Array[Byte],Array[Byte]]]] = createMessageStreams(topicCountMap, new DefaultDecoder(), new DefaultDecoder()) def createMessageStreams[K,V](topicCountMap: Map[String,Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V]) : Map[String, List[KafkaStream[K,V]]] = { if (messageStreamCreated.getAndSet(true)) throw new MessageStreamsExistException(this.getClass.getSimpleName + " can create message streams at most once",null) consume(topicCountMap, keyDecoder, valueDecoder) } def createMessageStreamsByFilter[K,V](topicFilter: TopicFilter, numStreams: Int, keyDecoder: Decoder[K] = new DefaultDecoder(), valueDecoder: Decoder[V] = new DefaultDecoder()) = { val wildcardStreamsHandler = new WildcardStreamsHandler[K,V](topicFilter, numStreams, keyDecoder, valueDecoder) wildcardStreamsHandler.streams } def setConsumerRebalanceListener(listener: ConsumerRebalanceListener) { if (messageStreamCreated.get()) throw new MessageStreamsExistException(this.getClass.getSimpleName + " can only set consumer rebalance listener before creating streams",null) consumerRebalanceListener = listener } private def createFetcher() { if (enableFetcher) fetcher = Some(new ConsumerFetcherManager(consumerIdString, config, zkUtils)) } private def connectZk() { info("Connecting to zookeeper instance at " + config.zkConnect) zkUtils = ZkUtils(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs, JaasUtils.isZkSecurityEnabled()) } // Blocks until the offset manager is located and a channel is established to it. private def ensureOffsetManagerConnected() { if (config.offsetsStorage == "kafka") { if (offsetsChannel == null || !offsetsChannel.isConnected) offsetsChannel = ClientUtils.channelToOffsetManager(config.groupId, zkUtils, config.offsetsChannelSocketTimeoutMs, config.offsetsChannelBackoffMs) debug("Connected to offset manager %s:%d.".format(offsetsChannel.host, offsetsChannel.port)) } } def shutdown() { val canShutdown = isShuttingDown.compareAndSet(false, true) if (canShutdown) { info("ZKConsumerConnector shutting down") val startTime = System.nanoTime() KafkaMetricsGroup.removeAllConsumerMetrics(config.clientId) if (wildcardTopicWatcher != null) wildcardTopicWatcher.shutdown() rebalanceLock synchronized { try { if (config.autoCommitEnable) scheduler.shutdown() fetcher match { case Some(f) => f.stopConnections case None => } sendShutdownToAllQueues() if (config.autoCommitEnable) commitOffsets(true) if (zkUtils != null) { zkUtils.close() zkUtils = null } if (offsetsChannel != null) offsetsChannel.disconnect() } catch { case e: Throwable => fatal("error during consumer connector shutdown", e) } info("ZKConsumerConnector shutdown completed in " + (System.nanoTime() - startTime) / 1000000 + " ms") } } } def consume[K, V](topicCountMap: scala.collection.Map[String,Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V]) : Map[String,List[KafkaStream[K,V]]] = { debug("entering consume ") if (topicCountMap == null) throw new RuntimeException("topicCountMap is null") val topicCount = TopicCount.constructTopicCount(consumerIdString, topicCountMap) val topicThreadIds = topicCount.getConsumerThreadIdsPerTopic // make a list of (queue,stream) pairs, one pair for each threadId val queuesAndStreams = topicThreadIds.values.map(threadIdSet => threadIdSet.map(_ => { val queue = new LinkedBlockingQueue[FetchedDataChunk](config.queuedMaxMessages) val stream = new KafkaStream[K,V]( queue, config.consumerTimeoutMs, keyDecoder, valueDecoder, config.clientId) (queue, stream) }) ).flatten.toList val dirs = new ZKGroupDirs(config.groupId) registerConsumerInZK(dirs, consumerIdString, topicCount) reinitializeConsumer(topicCount, queuesAndStreams) loadBalancerListener.kafkaMessageAndMetadataStreams.asInstanceOf[Map[String, List[KafkaStream[K,V]]]] } // this API is used by unit tests only def getTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]] = topicRegistry private def registerConsumerInZK(dirs: ZKGroupDirs, consumerIdString: String, topicCount: TopicCount) { info("begin registering consumer " + consumerIdString + " in ZK") val timestamp = Time.SYSTEM.milliseconds.toString val consumerRegistrationInfo = Json.encode(Map("version" -> 1, "subscription" -> topicCount.getTopicCountMap, "pattern" -> topicCount.pattern, "timestamp" -> timestamp)) val zkWatchedEphemeral = new ZKCheckedEphemeral(dirs. consumerRegistryDir + "/" + consumerIdString, consumerRegistrationInfo, zkUtils.zkConnection.getZookeeper, false) zkWatchedEphemeral.create() info("end registering consumer " + consumerIdString + " in ZK") } private def sendShutdownToAllQueues() = { for (queue <- topicThreadIdAndQueues.values.toSet[BlockingQueue[FetchedDataChunk]]) { debug("Clearing up queue") queue.clear() queue.put(ZookeeperConsumerConnector.shutdownCommand) debug("Cleared queue and sent shutdown command") } } def autoCommit() { trace("auto committing") try { commitOffsets(isAutoCommit = false) } catch { case t: Throwable => // log it and let it go error("exception during autoCommit: ", t) } } def commitOffsetToZooKeeper(topicPartition: TopicAndPartition, offset: Long) { if (checkpointedZkOffsets.get(topicPartition) != offset) { val topicDirs = new ZKGroupTopicDirs(config.groupId, topicPartition.topic) zkUtils.updatePersistentPath(topicDirs.consumerOffsetDir + "/" + topicPartition.partition, offset.toString) checkpointedZkOffsets.put(topicPartition, offset) zkCommitMeter.mark() } } /** * KAFKA-1743: This method added for backward compatibility. */ def commitOffsets { commitOffsets(true) } def commitOffsets(isAutoCommit: Boolean) { val offsetsToCommit = immutable.Map(topicRegistry.values.flatMap { partitionTopicInfos => partitionTopicInfos.values.map { info => TopicAndPartition(info.topic, info.partitionId) -> OffsetAndMetadata(info.getConsumeOffset()) } }.toSeq: _*) commitOffsets(offsetsToCommit, isAutoCommit) } def commitOffsets(offsetsToCommit: immutable.Map[TopicAndPartition, OffsetAndMetadata], isAutoCommit: Boolean) { trace("OffsetMap: %s".format(offsetsToCommit)) var retriesRemaining = 1 + (if (isAutoCommit) 0 else config.offsetsCommitMaxRetries) // no retries for commits from auto-commit var done = false while (!done) { val committed = offsetsChannelLock synchronized { // committed when we receive either no error codes or only MetadataTooLarge errors if (offsetsToCommit.size > 0) { if (config.offsetsStorage == "zookeeper") { offsetsToCommit.foreach { case (topicAndPartition, offsetAndMetadata) => commitOffsetToZooKeeper(topicAndPartition, offsetAndMetadata.offset) } true } else { val offsetCommitRequest = OffsetCommitRequest(config.groupId, offsetsToCommit, clientId = config.clientId) ensureOffsetManagerConnected() try { kafkaCommitMeter.mark(offsetsToCommit.size) offsetsChannel.send(offsetCommitRequest) val offsetCommitResponse = OffsetCommitResponse.readFrom(offsetsChannel.receive().payload()) trace("Offset commit response: %s.".format(offsetCommitResponse)) val (commitFailed, retryableIfFailed, shouldRefreshCoordinator, errorCount) = { offsetCommitResponse.commitStatus.foldLeft(false, false, false, 0) { case (folded, (topicPartition, error)) => if (error == Errors.NONE && config.dualCommitEnabled) { val offset = offsetsToCommit(topicPartition).offset commitOffsetToZooKeeper(topicPartition, offset) } (folded._1 || // update commitFailed error != Errors.NONE, folded._2 || // update retryableIfFailed - (only metadata too large is not retryable) (error != Errors.NONE && error != Errors.OFFSET_METADATA_TOO_LARGE), folded._3 || // update shouldRefreshCoordinator error == Errors.NOT_COORDINATOR || error == Errors.COORDINATOR_NOT_AVAILABLE, // update error count folded._4 + (if (error != Errors.NONE) 1 else 0)) } } debug(errorCount + " errors in offset commit response.") if (shouldRefreshCoordinator) { debug("Could not commit offsets (because offset coordinator has moved or is unavailable).") offsetsChannel.disconnect() } if (commitFailed && retryableIfFailed) false else true } catch { case t: Throwable => error("Error while committing offsets.", t) offsetsChannel.disconnect() false } } } else { debug("No updates to offsets since last commit.") true } } done = { retriesRemaining -= 1 retriesRemaining == 0 || committed } if (!done) { debug("Retrying offset commit in %d ms".format(config.offsetsChannelBackoffMs)) Thread.sleep(config.offsetsChannelBackoffMs) } } } private def fetchOffsetFromZooKeeper(topicPartition: TopicAndPartition) = { val dirs = new ZKGroupTopicDirs(config.groupId, topicPartition.topic) val offsetString = zkUtils.readDataMaybeNull(dirs.consumerOffsetDir + "/" + topicPartition.partition)._1 offsetString match { case Some(offsetStr) => (topicPartition, OffsetMetadataAndError(offsetStr.toLong)) case None => (topicPartition, OffsetMetadataAndError.NoOffset) } } private def fetchOffsets(partitions: Seq[TopicAndPartition]) = { if (partitions.isEmpty) Some(OffsetFetchResponse(Map.empty)) else if (config.offsetsStorage == "zookeeper") { val offsets = partitions.map(fetchOffsetFromZooKeeper) Some(OffsetFetchResponse(immutable.Map(offsets:_*))) } else { val offsetFetchRequest = OffsetFetchRequest(groupId = config.groupId, requestInfo = partitions, clientId = config.clientId) var offsetFetchResponseOpt: Option[OffsetFetchResponse] = None while (!isShuttingDown.get && !offsetFetchResponseOpt.isDefined) { offsetFetchResponseOpt = offsetsChannelLock synchronized { ensureOffsetManagerConnected() try { offsetsChannel.send(offsetFetchRequest) val offsetFetchResponse = OffsetFetchResponse.readFrom(offsetsChannel.receive().payload()) trace("Offset fetch response: %s.".format(offsetFetchResponse)) val (leaderChanged, loadInProgress) = offsetFetchResponse.requestInfo.values.foldLeft(false, false) { case (folded, offsetMetadataAndError) => (folded._1 || (offsetMetadataAndError.error == Errors.NOT_COORDINATOR), folded._2 || (offsetMetadataAndError.error == Errors.COORDINATOR_LOAD_IN_PROGRESS)) } if (leaderChanged) { offsetsChannel.disconnect() debug("Could not fetch offsets (because offset manager has moved).") None // retry } else if (loadInProgress) { debug("Could not fetch offsets (because offset cache is being loaded).") None // retry } else { if (config.dualCommitEnabled) { // if dual-commit is enabled (i.e., if a consumer group is migrating offsets to kafka), then pick the // maximum between offsets in zookeeper and kafka. val kafkaOffsets = offsetFetchResponse.requestInfo val mostRecentOffsets = kafkaOffsets.map { case (topicPartition, kafkaOffset) => val zkOffset = fetchOffsetFromZooKeeper(topicPartition)._2.offset val mostRecentOffset = zkOffset.max(kafkaOffset.offset) (topicPartition, OffsetMetadataAndError(mostRecentOffset, kafkaOffset.metadata, Errors.NONE)) } Some(OffsetFetchResponse(mostRecentOffsets)) } else Some(offsetFetchResponse) } } catch { case e: Exception => warn("Error while fetching offsets from %s:%d. Possible cause: %s".format(offsetsChannel.host, offsetsChannel.port, e.getMessage)) offsetsChannel.disconnect() None // retry } } if (offsetFetchResponseOpt.isEmpty) { debug("Retrying offset fetch in %d ms".format(config.offsetsChannelBackoffMs)) Thread.sleep(config.offsetsChannelBackoffMs) } } offsetFetchResponseOpt } } class ZKSessionExpireListener(val dirs: ZKGroupDirs, val consumerIdString: String, val topicCount: TopicCount, val loadBalancerListener: ZKRebalancerListener) extends IZkStateListener { @throws[Exception] def handleStateChanged(state: KeeperState) { // do nothing, since zkclient will do reconnect for us. } /** * Called after the zookeeper session has expired and a new session has been created. You would have to re-create * any ephemeral nodes here. * * @throws Exception * On any error. */ @throws[Exception] def handleNewSession() { /** * When we get a SessionExpired event, we lost all ephemeral nodes and zkclient has reestablished a * connection for us. We need to release the ownership of the current consumer and re-register this * consumer in the consumer registry and trigger a rebalance. */ info("ZK expired; release old broker parition ownership; re-register consumer " + consumerIdString) loadBalancerListener.resetState() registerConsumerInZK(dirs, consumerIdString, topicCount) // explicitly trigger load balancing for this consumer loadBalancerListener.syncedRebalance() // There is no need to resubscribe to child and state changes. // The child change watchers will be set inside rebalance when we read the children list. } override def handleSessionEstablishmentError(error: Throwable): Unit = { fatal("Could not establish session with zookeeper", error) } } class ZKTopicPartitionChangeListener(val loadBalancerListener: ZKRebalancerListener) extends IZkDataListener { def handleDataChange(dataPath : String, data: Object) { try { info("Topic info for path " + dataPath + " changed to " + data.toString + ", triggering rebalance") // queue up the rebalance event loadBalancerListener.rebalanceEventTriggered() // There is no need to re-subscribe the watcher since it will be automatically // re-registered upon firing of this event by zkClient } catch { case e: Throwable => error("Error while handling topic partition change for data path " + dataPath, e ) } } @throws[Exception] def handleDataDeleted(dataPath : String) { // TODO: This need to be implemented when we support delete topic warn("Topic for path " + dataPath + " gets deleted, which should not happen at this time") } } class ZKRebalancerListener(val group: String, val consumerIdString: String, val kafkaMessageAndMetadataStreams: mutable.Map[String,List[KafkaStream[_,_]]]) extends IZkChildListener { private val partitionAssignor = PartitionAssignor.createInstance(config.partitionAssignmentStrategy) private var isWatcherTriggered = false private val lock = new ReentrantLock private val cond = lock.newCondition() @volatile private var allTopicsOwnedPartitionsCount = 0 newGauge("OwnedPartitionsCount", new Gauge[Int] { def value() = allTopicsOwnedPartitionsCount }, Map("clientId" -> config.clientId, "groupId" -> config.groupId)) private def ownedPartitionsCountMetricTags(topic: String) = Map("clientId" -> config.clientId, "groupId" -> config.groupId, "topic" -> topic) private val watcherExecutorThread = new Thread(consumerIdString + "_watcher_executor") { override def run() { info("starting watcher executor thread for consumer " + consumerIdString) var doRebalance = false while (!isShuttingDown.get) { try { lock.lock() try { if (!isWatcherTriggered) cond.await(1000, TimeUnit.MILLISECONDS) // wake up periodically so that it can check the shutdown flag } finally { doRebalance = isWatcherTriggered isWatcherTriggered = false lock.unlock() } if (doRebalance) syncedRebalance } catch { case t: Throwable => error("error during syncedRebalance", t) } } info("stopping watcher executor thread for consumer " + consumerIdString) } } watcherExecutorThread.start() @throws[Exception] def handleChildChange(parentPath : String, curChilds : java.util.List[String]) { rebalanceEventTriggered() } def rebalanceEventTriggered() { inLock(lock) { isWatcherTriggered = true cond.signalAll() } } private def deletePartitionOwnershipFromZK(topic: String, partition: Int) { val topicDirs = new ZKGroupTopicDirs(group, topic) val znode = topicDirs.consumerOwnerDir + "/" + partition zkUtils.deletePath(znode) debug("Consumer " + consumerIdString + " releasing " + znode) } private def releasePartitionOwnership(localTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]])= { info("Releasing partition ownership") for ((topic, infos) <- localTopicRegistry) { for(partition <- infos.keys) { deletePartitionOwnershipFromZK(topic, partition) } removeMetric("OwnedPartitionsCount", ownedPartitionsCountMetricTags(topic)) localTopicRegistry.remove(topic) } allTopicsOwnedPartitionsCount = 0 } def resetState() { topicRegistry.clear } def syncedRebalance() { rebalanceLock synchronized { rebalanceTimer.time { for (i <- 0 until config.rebalanceMaxRetries) { if(isShuttingDown.get()) { return } info("begin rebalancing consumer " + consumerIdString + " try #" + i) var done = false var cluster: Cluster = null try { cluster = zkUtils.getCluster() done = rebalance(cluster) } catch { case e: Throwable => /** occasionally, we may hit a ZK exception because the ZK state is changing while we are iterating. * For example, a ZK node can disappear between the time we get all children and the time we try to get * the value of a child. Just let this go since another rebalance will be triggered. **/ info("exception during rebalance ", e) } info("end rebalancing consumer " + consumerIdString + " try #" + i) if (done) { return } else { /* Here the cache is at a risk of being stale. To take future rebalancing decisions correctly, we should * clear the cache */ info("Rebalancing attempt failed. Clearing the cache before the next rebalancing operation is triggered") } // stop all fetchers and clear all the queues to avoid data duplication closeFetchersForQueues(cluster, kafkaMessageAndMetadataStreams, topicThreadIdAndQueues.map(q => q._2)) Thread.sleep(config.rebalanceBackoffMs) } } } throw new ConsumerRebalanceFailedException(consumerIdString + " can't rebalance after " + config.rebalanceMaxRetries +" retries") } private def rebalance(cluster: Cluster): Boolean = { val myTopicThreadIdsMap = TopicCount.constructTopicCount( group, consumerIdString, zkUtils, config.excludeInternalTopics).getConsumerThreadIdsPerTopic val brokers = zkUtils.getAllBrokersInCluster() if (brokers.size == 0) { // This can happen in a rare case when there are no brokers available in the cluster when the consumer is started. // We log a warning and register for child changes on brokers/id so that rebalance can be triggered when the brokers // are up. warn("no brokers found when trying to rebalance.") zkUtils.zkClient.subscribeChildChanges(BrokerIdsPath, loadBalancerListener) true } else { /** * fetchers must be stopped to avoid data duplication, since if the current * rebalancing attempt fails, the partitions that are released could be owned by another consumer. * But if we don't stop the fetchers first, this consumer would continue returning data for released * partitions in parallel. So, not stopping the fetchers leads to duplicate data. */ closeFetchers(cluster, kafkaMessageAndMetadataStreams, myTopicThreadIdsMap) if (consumerRebalanceListener != null) { info("Invoking rebalance listener before relasing partition ownerships.") consumerRebalanceListener.beforeReleasingPartitions( if (topicRegistry.size == 0) new java.util.HashMap[String, java.util.Set[java.lang.Integer]] else topicRegistry.map(topics => topics._1 -> topics._2.keys // note this is incorrect, see KAFKA-2284 ).toMap.asJava.asInstanceOf[java.util.Map[String, java.util.Set[java.lang.Integer]]] ) } releasePartitionOwnership(topicRegistry) val assignmentContext = new AssignmentContext(group, consumerIdString, config.excludeInternalTopics, zkUtils) val globalPartitionAssignment = partitionAssignor.assign(assignmentContext) val partitionAssignment = globalPartitionAssignment.get(assignmentContext.consumerId) val currentTopicRegistry = new Pool[String, Pool[Int, PartitionTopicInfo]]( valueFactory = Some((_: String) => new Pool[Int, PartitionTopicInfo])) // fetch current offsets for all topic-partitions val topicPartitions = partitionAssignment.keySet.toSeq val offsetFetchResponseOpt = fetchOffsets(topicPartitions) if (isShuttingDown.get || !offsetFetchResponseOpt.isDefined) false else { val offsetFetchResponse = offsetFetchResponseOpt.get topicPartitions.foreach(topicAndPartition => { val (topic, partition) = topicAndPartition.asTuple val offset = offsetFetchResponse.requestInfo(topicAndPartition).offset val threadId = partitionAssignment(topicAndPartition) addPartitionTopicInfo(currentTopicRegistry, partition, topic, offset, threadId) }) /** * move the partition ownership here, since that can be used to indicate a truly successful re-balancing attempt * A rebalancing attempt is completed successfully only after the fetchers have been started correctly */ if(reflectPartitionOwnershipDecision(partitionAssignment)) { allTopicsOwnedPartitionsCount = partitionAssignment.size partitionAssignment.view.groupBy { case (topicPartition, _) => topicPartition.topic } .foreach { case (topic, partitionThreadPairs) => newGauge("OwnedPartitionsCount", new Gauge[Int] { def value() = partitionThreadPairs.size }, ownedPartitionsCountMetricTags(topic)) } topicRegistry = currentTopicRegistry // Invoke beforeStartingFetchers callback if the consumerRebalanceListener is set. if (consumerRebalanceListener != null) { info("Invoking rebalance listener before starting fetchers.") // Partition assignor returns the global partition assignment organized as a map of [TopicPartition, ThreadId] // per consumer, and we need to re-organize it to a map of [Partition, ThreadId] per topic before passing // to the rebalance callback. val partitionAssginmentGroupByTopic = globalPartitionAssignment.values.flatten.groupBy[String] { case (topicPartition, _) => topicPartition.topic } val partitionAssigmentMapForCallback = partitionAssginmentGroupByTopic.map({ case (topic, partitionOwnerShips) => val partitionOwnershipForTopicScalaMap = partitionOwnerShips.map({ case (topicAndPartition, consumerThreadId) => (topicAndPartition.partition: Integer) -> consumerThreadId }).toMap topic -> partitionOwnershipForTopicScalaMap.asJava }) consumerRebalanceListener.beforeStartingFetchers( consumerIdString, partitionAssigmentMapForCallback.asJava ) } updateFetcher(cluster) true } else { false } } } } private def closeFetchersForQueues(cluster: Cluster, messageStreams: Map[String,List[KafkaStream[_,_]]], queuesToBeCleared: Iterable[BlockingQueue[FetchedDataChunk]]) { val allPartitionInfos = topicRegistry.values.map(p => p.values).flatten fetcher match { case Some(f) => f.stopConnections clearFetcherQueues(allPartitionInfos, cluster, queuesToBeCleared, messageStreams) /** * here, we need to commit offsets before stopping the consumer from returning any more messages * from the current data chunk. Since partition ownership is not yet released, this commit offsets * call will ensure that the offsets committed now will be used by the next consumer thread owning the partition * for the current data chunk. Since the fetchers are already shutdown and this is the last chunk to be iterated * by the consumer, there will be no more messages returned by this iterator until the rebalancing finishes * successfully and the fetchers restart to fetch more data chunks **/ if (config.autoCommitEnable) { info("Committing all offsets after clearing the fetcher queues") commitOffsets(true) } case None => } } private def clearFetcherQueues(topicInfos: Iterable[PartitionTopicInfo], cluster: Cluster, queuesTobeCleared: Iterable[BlockingQueue[FetchedDataChunk]], messageStreams: Map[String,List[KafkaStream[_,_]]]) { // Clear all but the currently iterated upon chunk in the consumer thread's queue queuesTobeCleared.foreach(_.clear) info("Cleared all relevant queues for this fetcher") // Also clear the currently iterated upon chunk in the consumer threads if(messageStreams != null) messageStreams.foreach(_._2.foreach(s => s.clear())) info("Cleared the data chunks in all the consumer message iterators") } private def closeFetchers(cluster: Cluster, messageStreams: Map[String,List[KafkaStream[_,_]]], relevantTopicThreadIdsMap: Map[String, Set[ConsumerThreadId]]) { // only clear the fetcher queues for certain topic partitions that *might* no longer be served by this consumer // after this rebalancing attempt val queuesTobeCleared = topicThreadIdAndQueues.filter(q => relevantTopicThreadIdsMap.contains(q._1._1)).map(q => q._2) closeFetchersForQueues(cluster, messageStreams, queuesTobeCleared) } private def updateFetcher(cluster: Cluster) { // update partitions for fetcher var allPartitionInfos : List[PartitionTopicInfo] = Nil for (partitionInfos <- topicRegistry.values) for (partition <- partitionInfos.values) allPartitionInfos ::= partition info("Consumer " + consumerIdString + " selected partitions : " + allPartitionInfos.sortWith((s,t) => s.partitionId < t.partitionId).map(_.toString).mkString(",")) fetcher match { case Some(f) => f.startConnections(allPartitionInfos, cluster) case None => } } private def reflectPartitionOwnershipDecision(partitionAssignment: Map[TopicAndPartition, ConsumerThreadId]): Boolean = { var successfullyOwnedPartitions : List[(String, Int)] = Nil val partitionOwnershipSuccessful = partitionAssignment.map { partitionOwner => val topic = partitionOwner._1.topic val partition = partitionOwner._1.partition val consumerThreadId = partitionOwner._2 val partitionOwnerPath = zkUtils.getConsumerPartitionOwnerPath(group, topic, partition) try { zkUtils.createEphemeralPathExpectConflict(partitionOwnerPath, consumerThreadId.toString) info(consumerThreadId + " successfully owned partition " + partition + " for topic " + topic) successfullyOwnedPartitions ::= (topic, partition) true } catch { case _: ZkNodeExistsException => // The node hasn't been deleted by the original owner. So wait a bit and retry. info("waiting for the partition ownership to be deleted: " + partition + " for topic " + topic) false } } val hasPartitionOwnershipFailed = partitionOwnershipSuccessful.foldLeft(0)((sum, decision) => sum + (if(decision) 0 else 1)) /* even if one of the partition ownership attempt has failed, return false */ if(hasPartitionOwnershipFailed > 0) { // remove all paths that we have owned in ZK successfullyOwnedPartitions.foreach(topicAndPartition => deletePartitionOwnershipFromZK(topicAndPartition._1, topicAndPartition._2)) false } else true } private def addPartitionTopicInfo(currentTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]], partition: Int, topic: String, offset: Long, consumerThreadId: ConsumerThreadId) { val partTopicInfoMap = currentTopicRegistry.getAndMaybePut(topic) val queue = topicThreadIdAndQueues.get((topic, consumerThreadId)) val consumedOffset = new AtomicLong(offset) val fetchedOffset = new AtomicLong(offset) val partTopicInfo = new PartitionTopicInfo(topic, partition, queue, consumedOffset, fetchedOffset, new AtomicInteger(config.fetchMessageMaxBytes), config.clientId) partTopicInfoMap.put(partition, partTopicInfo) debug(partTopicInfo + " selected new offset " + offset) checkpointedZkOffsets.put(TopicAndPartition(topic, partition), offset) } } private def reinitializeConsumer[K,V]( topicCount: TopicCount, queuesAndStreams: List[(LinkedBlockingQueue[FetchedDataChunk],KafkaStream[K,V])]) { val dirs = new ZKGroupDirs(config.groupId) // listener to consumer and partition changes if (loadBalancerListener == null) { val topicStreamsMap = new mutable.HashMap[String,List[KafkaStream[K,V]]] loadBalancerListener = new ZKRebalancerListener( config.groupId, consumerIdString, topicStreamsMap.asInstanceOf[scala.collection.mutable.Map[String, List[KafkaStream[_,_]]]]) } // create listener for session expired event if not exist yet if (sessionExpirationListener == null) sessionExpirationListener = new ZKSessionExpireListener( dirs, consumerIdString, topicCount, loadBalancerListener) // create listener for topic partition change event if not exist yet if (topicPartitionChangeListener == null) topicPartitionChangeListener = new ZKTopicPartitionChangeListener(loadBalancerListener) val topicStreamsMap = loadBalancerListener.kafkaMessageAndMetadataStreams // map of {topic -> Set(thread-1, thread-2, ...)} val consumerThreadIdsPerTopic: Map[String, Set[ConsumerThreadId]] = topicCount.getConsumerThreadIdsPerTopic val allQueuesAndStreams = topicCount match { case _: WildcardTopicCount => /* * Wild-card consumption streams share the same queues, so we need to * duplicate the list for the subsequent zip operation. */ (1 to consumerThreadIdsPerTopic.keySet.size).flatMap(_ => queuesAndStreams).toList case _: StaticTopicCount => queuesAndStreams } val topicThreadIds = consumerThreadIdsPerTopic.map { case (topic, threadIds) => threadIds.map((topic, _)) }.flatten require(topicThreadIds.size == allQueuesAndStreams.size, "Mismatch between thread ID count (%d) and queue count (%d)" .format(topicThreadIds.size, allQueuesAndStreams.size)) val threadQueueStreamPairs = topicThreadIds.zip(allQueuesAndStreams) threadQueueStreamPairs.foreach(e => { val topicThreadId = e._1 val q = e._2._1 topicThreadIdAndQueues.put(topicThreadId, q) debug("Adding topicThreadId %s and queue %s to topicThreadIdAndQueues data structure".format(topicThreadId, q.toString)) newGauge( "FetchQueueSize", new Gauge[Int] { def value = q.size }, Map("clientId" -> config.clientId, "topic" -> topicThreadId._1, "threadId" -> topicThreadId._2.threadId.toString) ) }) val groupedByTopic = threadQueueStreamPairs.groupBy(_._1._1) groupedByTopic.foreach(e => { val topic = e._1 val streams = e._2.map(_._2._2).toList topicStreamsMap += (topic -> streams) debug("adding topic %s and %d streams to map.".format(topic, streams.size)) }) // listener to consumer and partition changes zkUtils.zkClient.subscribeStateChanges(sessionExpirationListener) zkUtils.zkClient.subscribeChildChanges(dirs.consumerRegistryDir, loadBalancerListener) topicStreamsMap.foreach { topicAndStreams => // register on broker partition path changes val topicPath = BrokerTopicsPath + "/" + topicAndStreams._1 zkUtils.zkClient.subscribeDataChanges(topicPath, topicPartitionChangeListener) } // explicitly trigger load balancing for this consumer loadBalancerListener.syncedRebalance() } class WildcardStreamsHandler[K,V](topicFilter: TopicFilter, numStreams: Int, keyDecoder: Decoder[K], valueDecoder: Decoder[V]) extends TopicEventHandler[String] { if (messageStreamCreated.getAndSet(true)) throw new RuntimeException("Each consumer connector can create " + "message streams by filter at most once.") private val wildcardQueuesAndStreams = (1 to numStreams) .map(_ => { val queue = new LinkedBlockingQueue[FetchedDataChunk](config.queuedMaxMessages) val stream = new KafkaStream[K,V](queue, config.consumerTimeoutMs, keyDecoder, valueDecoder, config.clientId) (queue, stream) }).toList // bootstrap with existing topics private var wildcardTopics = zkUtils.getChildrenParentMayNotExist(BrokerTopicsPath) .filter(topic => topicFilter.isTopicAllowed(topic, config.excludeInternalTopics)) private val wildcardTopicCount = TopicCount.constructTopicCount( consumerIdString, topicFilter, numStreams, zkUtils, config.excludeInternalTopics) val dirs = new ZKGroupDirs(config.groupId) registerConsumerInZK(dirs, consumerIdString, wildcardTopicCount) reinitializeConsumer(wildcardTopicCount, wildcardQueuesAndStreams) /* * Topic events will trigger subsequent synced rebalances. */ info("Creating topic event watcher for topics " + topicFilter) wildcardTopicWatcher = new ZookeeperTopicEventWatcher(zkUtils, this) def handleTopicEvent(allTopics: Seq[String]) { debug("Handling topic event") val updatedTopics = allTopics.filter(topic => topicFilter.isTopicAllowed(topic, config.excludeInternalTopics)) val addedTopics = updatedTopics filterNot (wildcardTopics contains) if (addedTopics.nonEmpty) info("Topic event: added topics = %s" .format(addedTopics)) /* * TODO: Deleted topics are interesting (and will not be a concern until * 0.8 release). We may need to remove these topics from the rebalance * listener's map in reinitializeConsumer. */ val deletedTopics = wildcardTopics filterNot (updatedTopics contains) if (deletedTopics.nonEmpty) info("Topic event: deleted topics = %s" .format(deletedTopics)) wildcardTopics = updatedTopics info("Topics to consume = %s".format(wildcardTopics)) if (addedTopics.nonEmpty || deletedTopics.nonEmpty) reinitializeConsumer(wildcardTopicCount, wildcardQueuesAndStreams) } def streams: Seq[KafkaStream[K,V]] = wildcardQueuesAndStreams.map(_._2) } }
wangcy6/storm_app
frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala
Scala
apache-2.0
46,802
// These are meant to be typed into the REPL. You can also run // scala -Xnojline < repl-session.scala to run them all at once. val coll = Range(1, 10) coll.head coll.last coll.headOption List().headOption coll.lastOption coll.length coll.isEmpty coll.sum coll.product coll.max coll.min coll.count(_ % 2 == 0) coll.forall(_ % 2 == 0) coll.exists(_ % 2 == 0) coll.filter(_ % 2 == 0) coll.filterNot(_ % 2 == 0) coll.partition(_ % 2 == 0) coll.takeWhile(_ < 3) coll.dropWhile(_ < 3) coll.span(_ < 3) coll.take(4) coll.drop(4) coll.splitAt(4) coll.takeRight(4) coll.dropRight(4) coll.slice(2, 8) coll.grouped(3).toArray coll.sliding(3).toArray coll.mkString("<", "|", ">") coll.toIterable coll.toSeq coll.toIndexedSeq coll.toArray coll.toList coll.toSet // Seq methods coll.indexWhere(_ % 3 == 0) coll.prefixLength(_ % 4 != 0) coll.segmentLength(_ % 4 != 0, 4) coll.padTo(20, 0) val a = Seq(1, 1, 2, 3, 1, 1, 1) val b = Seq(1, 2, 3, 2, 1) a intersect b a diff b val words = "Mary had a little lamb".split(" ") words.reverse words.sorted words.sortWith(_.length < _.length) words.sortBy(_.length) words.permutations.toArray words.combinations(3).toArray
yeahnoob/scala-impatient-2e-code
src/ch13/sec07/repl-session.scala
Scala
gpl-3.0
1,171
// more complicated example abstract class A { type C[X] def foo[B](x: C[B]): C[B] = {println("A.C"); x} def foo[B](x: List[B]): List[B] = {println("A.List"); x} def give[X]: C[X] } class B extends A { type C[X] = List[X] override def give[X] = Nil override def foo[B](x: C[B]): C[B] = {println("B.C"); x} // error: merge error during erasure val a: A = this } object Test extends B { def main(args: Array[String]): Unit = a.foo(a.give[Int]) // what method should be called here in runtime? }
som-snytt/dotty
tests/pending/neg/i1240a.scala
Scala
apache-2.0
526
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.zoo.models.recommendation import com.intel.analytics.bigdl.mkl.MklDnn import com.intel.analytics.bigdl.optim.{Adam, EmbeddingAdam2} import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.utils.{Engine, T} import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers} class EmbeddingAdamSpec extends FlatSpec with Matchers with BeforeAndAfter { "adam result" should "be same for one update" in { System.setProperty("bigdl.localMode", "true") Engine.init val userCount = 10 val itemCount = 5 val embedding1 = 3 val embedding2 = 4 val testAdam = new EmbeddingAdam2[Float](userCount = userCount, itemCount = itemCount, embedding1 = embedding1, embedding2 = embedding2, parallelism = Some(1)) val refAdam = new Adam[Float]() val length = itemCount * embedding1 + userCount * embedding1 + itemCount * embedding2 + userCount * embedding2 val weight1 = Tensor[Float](length).fill(1.0f) val weight2 = weight1.clone() MklDnn.isLoaded testAdam.updateWeight(Tensor[Float](T(1.0, 1.0)), weight1) testAdam.gradients(3)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 2.0, 3.0)))) testAdam.gradients(2)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 2.0, 3.0)))) testAdam.gradients(1)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.gradients(0)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.optimize(null, weight1) var offset = 0 val denseGradient = weight1.clone().zero() denseGradient.setValue(offset + 1, 1.0f) denseGradient.setValue(offset + 2, 2.0f) denseGradient.setValue(offset + 3, 3.0f) offset += itemCount * embedding1 denseGradient.setValue(offset + 4, 1.0f) denseGradient.setValue(offset + 5, 2.0f) denseGradient.setValue(offset + 6, 3.0f) offset += userCount * embedding1 denseGradient.narrow(1, offset + 1, 4).fill(1.0f) offset += itemCount * embedding2 denseGradient.narrow(1, offset + 1, 4).fill(1.0f) refAdam.optimize(_ => (1.0f, denseGradient), weight2) weight1 should be (weight2) } "adam result" should "be same for one update with multiple ids" in { System.setProperty("bigdl.localMode", "true") Engine.init val userCount = 10 val itemCount = 5 val embedding1 = 3 val embedding2 = 4 val testAdam = new EmbeddingAdam2[Float](userCount = userCount, itemCount = itemCount, embedding1 = embedding1, embedding2 = embedding2, parallelism = Some(1)) val refAdam = new Adam[Float]() val length = itemCount * embedding1 + userCount * embedding1 + itemCount * embedding2 + userCount * embedding2 val weight1 = Tensor[Float](length).fill(1.0f) val weight2 = weight1.clone() testAdam.gradients(3)(0) = (Tensor[Float](T(1.0, 1.0)), Tensor[Float](T(T(1.0, 2.0, 3.0), T(2.0, 3.0, 4.0)))) testAdam.gradients(2)(0) = (Tensor[Float](T(1.0, 2.0)), Tensor[Float](T(T(1.0, 2.0, 3.0), T(2.0, 3.0, 4.0)))) testAdam.gradients(1)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.gradients(0)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) MklDnn.isLoaded testAdam.updateWeight(Tensor[Float](T(1.0, 1.0)), weight1) testAdam.optimize(null, weight1) var offset = 0 val denseGradient = weight1.clone().zero() denseGradient.setValue(offset + 1, 1.5f) denseGradient.setValue(offset + 2, 2.5f) denseGradient.setValue(offset + 3, 3.5f) offset += itemCount * embedding1 denseGradient.narrow(1, offset + 1, 3) denseGradient.setValue(offset + 1, 1.0f) denseGradient.setValue(offset + 2, 2.0f) denseGradient.setValue(offset + 3, 3.0f) denseGradient.setValue(offset + 4, 2.0f) denseGradient.setValue(offset + 5, 3.0f) denseGradient.setValue(offset + 6, 4.0f) offset += userCount * embedding1 denseGradient.narrow(1, offset + 1, 4).fill(1.0f) offset += itemCount * embedding2 denseGradient.narrow(1, offset + 1, 4).fill(1.0f) refAdam.optimize(_ => (1.0f, denseGradient), weight2) weight1 should be(weight2) } "adam result" should "be same for two update" in { System.setProperty("bigdl.localMode", "true") Engine.init val userCount = 10 val itemCount = 5 val embedding1 = 3 val embedding2 = 4 val testAdam = new EmbeddingAdam2[Float](userCount = userCount, itemCount = itemCount, embedding1 = embedding1, embedding2 = embedding2, parallelism = Some(1)) val refAdam = new Adam[Float]() val length = itemCount * embedding1 + userCount * embedding1 + itemCount * embedding2 + userCount * embedding2 val weight1 = Tensor[Float](length).fill(1.0f) val weight2 = weight1.clone() testAdam.gradients(3)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(2)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(1)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.gradients(0)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) MklDnn.isLoaded testAdam.updateWeight(Tensor[Float](T(1.0, 1.0)), weight1) testAdam.optimize(null, weight1) testAdam.updateWeight(Tensor[Float](T(2.0, 2.0)), weight1) testAdam.gradients(3)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(2)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(1)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.gradients(0)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.optimize(null, weight1) testAdam.updateWeight(Tensor[Float](T(1.0, 1.0)), weight1) var offset = 0 val denseGradient = weight1.clone().zero() denseGradient.narrow(1, offset + 1, 3).fill(1.0f) offset += itemCount * embedding1 denseGradient.narrow(1, offset + 1, 3).fill(1.0f) offset += userCount * embedding1 denseGradient.narrow(1, offset + 1, 4).fill(1.0f) offset += itemCount * embedding2 denseGradient.narrow(1, offset + 1, 4).fill(1.0f) refAdam.optimize(_ => (1.0f, denseGradient), weight2) offset = 0 denseGradient.zero() denseGradient.narrow(1, offset + 3 + 1, 3).fill(1.0f) offset += itemCount * embedding1 denseGradient.narrow(1, offset + 3 + 1, 3).fill(1.0f) offset += userCount * embedding1 denseGradient.narrow(1, offset + 4 + 1, 4).fill(1.0f) offset += itemCount * embedding2 denseGradient.narrow(1, offset + 4 + 1, 4).fill(1.0f) refAdam.optimize(_ => (1.0f, denseGradient), weight2) weight1 should be(weight2) } "adam result" should "be same for three update" in { System.setProperty("bigdl.localMode", "true") Engine.init val userCount = 10 val itemCount = 5 val embedding1 = 3 val embedding2 = 4 val testAdam = new EmbeddingAdam2[Float](userCount = userCount, itemCount = itemCount, embedding1 = embedding1, embedding2 = embedding2, parallelism = Some(1)) val refAdam = new Adam[Float]() val length = itemCount * embedding1 + userCount * embedding1 + itemCount * embedding2 + userCount * embedding2 val weight1 = Tensor[Float](length).fill(1.0f) val weight2 = weight1.clone() MklDnn.isLoaded testAdam.updateWeight(Tensor[Float](T(1.0, 1.0)), weight1) testAdam.gradients(3)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(2)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(1)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.gradients(0)(0) = (Tensor[Float](T(1.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.optimize(null, weight1) testAdam.updateWeight(Tensor[Float](T(2.0, 2.0)), weight1) testAdam.gradients(3)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(2)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(1)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.gradients(0)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.optimize(null, weight1) testAdam.updateWeight(Tensor[Float](T(2.0, 2.0)), weight1) testAdam.gradients(3)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(2)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0)))) testAdam.gradients(1)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.gradients(0)(0) = (Tensor[Float](T(2.0)), Tensor[Float](T(T(1.0, 1.0, 1.0, 1.0)))) testAdam.optimize(null, weight1) testAdam.updateWeight(Tensor[Float](T(1.0, 1.0)), weight1) var offset = 0 val denseGradient = weight1.clone().zero() denseGradient.narrow(1, offset + 1, 3).fill(1.0f) offset += itemCount * embedding1 denseGradient.narrow(1, offset + 1, 3).fill(1.0f) offset += userCount * embedding1 denseGradient.narrow(1, offset + 1, 4).fill(1.0f) offset += itemCount * embedding2 denseGradient.narrow(1, offset + 1, 4).fill(1.0f) refAdam.optimize(_ => (1.0f, denseGradient), weight2) offset = 0 denseGradient.zero() denseGradient.narrow(1, offset + 3 + 1, 3).fill(1.0f) offset += itemCount * embedding1 denseGradient.narrow(1, offset + 3 + 1, 3).fill(1.0f) offset += userCount * embedding1 denseGradient.narrow(1, offset + 4 + 1, 4).fill(1.0f) offset += itemCount * embedding2 denseGradient.narrow(1, offset + 4 + 1, 4).fill(1.0f) refAdam.optimize(_ => (1.0f, denseGradient), weight2) refAdam.optimize(_ => (1.0f, denseGradient), weight2) weight1 should be(weight2) } }
mlperf/training_results_v0.5
v0.5.0/intel/intel_ncf_submission/code/ncf/src/test/scala/com/intel/analytics/zoo/models/recommendation/EmbeddingAdamSpec.scala
Scala
apache-2.0
10,501
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.sources import org.apache.flink.streaming.api.datastream.DataStream import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment /** Defines an external stream table and provides access to its data. * * @tparam T Type of the [[DataStream]] created by this [[TableSource]]. */ trait StreamTableSource[T] extends TableSource[T] { /** * Returns the data of the table as a [[DataStream]]. * * NOTE: This method is for internal use only for defining a [[TableSource]]. * Do not use it in Table API programs. */ def getDataStream(execEnv: StreamExecutionEnvironment): DataStream[T] }
WangTaoTheTonic/flink
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/StreamTableSource.scala
Scala
apache-2.0
1,471
package gitbucket.core.view import gitbucket.core.controller.Context import gitbucket.core.service.{RepositoryService, RequestCache} import gitbucket.core.util.Implicits.RichString trait LinkConverter { self: RequestCache => /** * Creates a link to the issue or the pull request from the issue id. */ protected def createIssueLink(repository: RepositoryService.RepositoryInfo, issueId: Int)(implicit context: Context): String = { val userName = repository.repository.userName val repositoryName = repository.repository.repositoryName getIssue(userName, repositoryName, issueId.toString) match { case Some(issue) if (issue.isPullRequest) => s"""<a href="${context.path}/${userName}/${repositoryName}/pull/${issueId}">Pull #${issueId}</a>""" case Some(_) => s"""<a href="${context.path}/${userName}/${repositoryName}/issues/${issueId}">Issue #${issueId}</a>""" case None => s"Unknown #${issueId}" } } /** * Converts issue id, username and commit id to link in the given text. */ protected def convertRefsLinks(text: String, repository: RepositoryService.RepositoryInfo, issueIdPrefix: String = "#", escapeHtml: Boolean = true)(implicit context: Context): String = { // escape HTML tags val escaped = if(escapeHtml) text.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;").replace("\"", "&quot;") else text escaped // convert username/project@SHA to link .replaceBy("(?<=(^|\\W))([a-zA-Z0-9\\-_]+)/([a-zA-Z0-9\\-_\\.]+)@([a-f0-9]{40})(?=(\\W|$))".r){ m => getAccountByUserName(m.group(2)).map { _ => s"""<a href="${context.path}/${m.group(2)}/${m.group(3)}/commit/${m.group(4)}">${m.group(2)}/${m.group(3)}@${m.group(4).substring(0, 7)}</a>""" } } // convert username/project#Num to link .replaceBy( ("(?<=(^|\\W))([a-zA-Z0-9\\-_]+)/([a-zA-Z0-9\\-_\\.]+)" + issueIdPrefix + "([0-9]+)(?=(\\W|$))").r){ m => getIssue(m.group(2), m.group(3), m.group(4)) match { case Some(issue) if (issue.isPullRequest) => Some(s"""<a href="${context.path}/${m.group(2)}/${m.group(3)}/pull/${m.group(4)}">${m.group(2)}/${m.group(3)}#${m.group(4)}</a>""") case Some(_) => Some(s"""<a href="${context.path}/${m.group(2)}/${m.group(3)}/issues/${m.group(4)}">${m.group(2)}/${m.group(3)}#${m.group(4)}</a>""") case None => Some(s"""${m.group(2)}/${m.group(3)}#${m.group(4)}""") } } // convert username@SHA to link .replaceBy( ("(?<=(^|\\W))([a-zA-Z0-9\\-_]+)@([a-f0-9]{40})(?=(\\W|$))").r ) { m => getAccountByUserName(m.group(2)).map { _ => s"""<a href="${context.path}/${m.group(2)}/${repository.name}/commit/${m.group(3)}">${m.group(2)}@${m.group(3).substring(0, 7)}</a>""" } } // convert username#Num to link .replaceBy( ("(?<=(^|\\W))([a-zA-Z0-9\\-_]+)" + issueIdPrefix + "([0-9]+)(?=(\\W|$))").r ) { m => getIssue(m.group(2), repository.name, m.group(3)) match { case Some(issue) if(issue.isPullRequest) => Some(s"""<a href="${context.path}/${m.group(2)}/${repository.name}/pull/${m.group(3)}">${m.group(2)}#${m.group(3)}</a>""") case Some(_) => Some(s"""<a href="${context.path}/${m.group(2)}/${repository.name}/issues/${m.group(3)}">${m.group(2)}#${m.group(3)}</a>""") case None => Some(s"""${m.group(2)}#${m.group(3)}""") } } // convert issue id to link .replaceBy(("(?<=(^|\\W))(GH-|" + issueIdPrefix + ")([0-9]+)(?=(\\W|$))").r){ m => val prefix = if(m.group(2) == "issue:") "#" else m.group(2) getIssue(repository.owner, repository.name, m.group(3)) match { case Some(issue) if(issue.isPullRequest) => Some(s"""<a href="${context.path}/${repository.owner}/${repository.name}/pull/${m.group(3)}">${prefix}${m.group(3)}</a>""") case Some(_) => Some(s"""<a href="${context.path}/${repository.owner}/${repository.name}/issues/${m.group(3)}">${prefix}${m.group(3)}</a>""") case None => Some(s"""${m.group(2)}${m.group(3)}""") } } // convert @username to link .replaceBy("(?<=(^|\\W))@([a-zA-Z0-9\\-_\\.]+)(?=(\\W|$))".r){ m => getAccountByUserName(m.group(2)).map { _ => s"""<a href="${context.path}/${m.group(2)}">@${m.group(2)}</a>""" } } // convert commit id to link .replaceAll("(?<=(^|[^\\w/@]))([a-f0-9]{40})(?=(\\W|$))", s"""<a href="${context.path}/${repository.owner}/${repository.name}/commit/$$2">$$2</a>""") } }
noc06140728/gitbucket
src/main/scala/gitbucket/core/view/LinkConverter.scala
Scala
apache-2.0
4,716
// Databricks notebook source exported at Sun, 8 May 2016 16:00:03 UTC import org.apache.log4j.Logger import org.apache.log4j.Level import org.apache.spark.graphx._ import org.apache.spark.rdd._ import scala.io.Source Logger.getLogger("org").setLevel(Level.ERROR) Logger.getLogger("akka").setLevel(Level.ERROR) // COMMAND ---------- //upload CSV files via Tables option in the left panel, then move contents to a more user-friendly folder for further operations import java.nio.file.{Path, Paths, Files} def moveToTarget(str:String):Unit = { val path = Paths.get(str) if (Files.exists(path)) { dbutils.fs.mv(path.toString, "/FileStore/tables/graphx/" + path.getFileName) dbutils.fs.rm(path.getParent.toString) } } //moveToTarget("/FileStore/tables/i1kzjjyx1462604201728/country_continent.csv") display(dbutils.fs.ls("/FileStore/tables/graphx")) // COMMAND ---------- def isHeader(str:String):Boolean = str.startsWith("#") // COMMAND ---------- // class definition class PlaceNode(val name:String) extends Serializable case class Metro(override val name:String, population:Int) extends PlaceNode(name) case class Country(override val name:String) extends PlaceNode(name) case class Continent(override val name: String) extends PlaceNode(name) // when creating tuples for vertices and edges differentiate by adding a certain amount to their ids: +0:metros; +100:countries; +200:continents / +0:metro-country; +100:country-continent // load metros and countries, then set connections between them, then create the metros graph val metros:RDD[(VertexId, PlaceNode)] = sc.textFile("/FileStore/tables/graphx/metro.csv").filter(!isHeader(_)).map(_.split(",")).map(x => (0L + x(0).toInt, Metro(x(1), x(2).toInt))) val countries:RDD[(VertexId, PlaceNode)] = sc.textFile("/FileStore/tables/graphx/country.csv").filter(!isHeader(_)).map(_.split(",")).map(x => (100L + x(0).toInt, Country(x(1)))) val mclinks:RDD[Edge[Int]] = sc.textFile("/FileStore/tables/graphx/metro_country.csv").filter(!isHeader(_)).map(_.split(",")).map(x => Edge(0L + x(0).toInt, 100L + x(1).toInt, 1)) val mcnodes = metros ++ countries val mcGraph = Graph(mcnodes, mclinks) // load continents and connections with countries, then create the countries Graph val continents:RDD[(VertexId, PlaceNode)] = sc.textFile("/FileStore/tables/graphx/continent.csv").filter(!isHeader(_)).map(_.split(",")).map(x => (200L + x(0).toInt, Continent(x(1)))) val cclinks:RDD[Edge[Int]] = sc.textFile("/FileStore/tables/graphx/country_continent.csv").filter(!isHeader(_)).map(_.split(",")).map(x => Edge(100L + x(0).toInt, 200L + x(1).toInt, 1)) val mcclinks = mclinks ++ cclinks val mccnodes = metros ++ countries ++ continents val mccGraph = Graph(mccnodes, mcclinks) // note: somehow I cannot join metros, countries and continents unless they are created under the same widget (...) // COMMAND ---------- val v5 = mcGraph.vertices.take(5) // take 5 vertices val e5 = mcGraph.edges.take(5) // take 5 edges val dst1 = mcGraph.edges.filter(_.srcId == 1).map(_.dstId).collect() // get destination vertices reached from vertex 1 val src103 = mcGraph.edges.filter(_.dstId == 103).map(_.srcId).collect() // get source vertices that reach vertex 103 // COMMAND ---------- mcGraph.numEdges // print number of edges mcGraph.numVertices // print number of vertices (metros + countries) // define utility type and functions type VertexDegree = (VertexId, Int) def max(a: VertexDegree, b: VertexDegree) = if (a._2 > b._2) a else b def min(a: VertexDegree, b: VertexDegree) = if (a._2 <= b._2) a else b mcGraph.outDegrees.reduce(max) // every metropolitis has one country mcGraph.vertices.filter(_._1 == 43).collect() mcGraph.inDegrees.reduce(max) // get the country with more metropolis mcGraph.vertices.filter(_._1 == 108).collect() mcGraph.outDegrees.filter(_._2 <= 1).count() // number of vertices with one outgoing edge is equals to number of metropolis (65) mcGraph.degrees.reduce(max) // get the vertex with more connections (inbound + outbound): connectedness // build a degree histogram for the countries to get number of countries (x) with 1 metro (1, x), number of countries (y) with 2 metros (2, y), ... where x, y are the degree val metrosHist = mcGraph.degrees.filter(_._1 >= 100).map(x => (x._2, x._1)).groupByKey.map(x => (x._1, x._2.size)).sortBy(_._1).collect() // COMMAND ---------- //import breeze.linalg._ // not really used, see below //import breeze.plot._ // not working in Databricks thus another approach is used for plotting, see below // define a function to calculate the degree histogram (same as before thus no need to recalculate, just use metrosHist instead) //def degreeHistogram(net: Graph[PlaceNode, Int]): Array[(Int, Int)] = net.degrees.filter(_._1 >= 100).map(x => (x._2, x._1)).groupByKey.map(x => (x._1, x._2.size)).sortBy(_._1).collect() // calculate probability distribution val totalCountries = countries.count() // total number of countries val degreeDistribution = metrosHist.map { case (degree, degreeCountries) => (degree, degreeCountries.toDouble / totalCountries) } //val x = new DenseVector(degreeDistribution map (_._1.toDouble)) //val y = new DenseVector(degreeDistribution map (_._2)) // plot degree distribution by transforming to a DataFrame so that I can use Databricks plotting capabilities val ddLast = degreeDistribution.last._1 // take last degree val ddMap = degreeDistribution.toMap val ddHist = (for (i <- 0 to ddLast) yield { if (ddMap.contains(i)) (i, ddMap(i)) else (i, 0.0) }).toArray case class DegreeDistribution(degree: Int, distribution: Double) val ddDF = sc.parallelize(ddHist.map { case (degree, distribution) => DegreeDistribution(degree, distribution)}).toDF() ddDF.registerTempTable("degree_distribution_table") display(sqlContext.sql("select * from degree_distribution_table")) // COMMAND ---------- import java.util.Locale var countries:Map[String, String] = Map() for (iso <- Locale.getISOCountries()) countries ++= Map(new Locale("", iso).getDisplayCountry() -> new Locale("", iso).getISO3Country()) def iso(country:String) = countries(country) // COMMAND ---------- case class Vertex(name:String, population:Int, entity:String) // override previous definitions; common structure now: id, Vertex(name, population, type (metro, country, continent)) // by default population in countries and continents is zero... in the next widget they will be calculated val metros = sc.textFile("/FileStore/tables/graphx/metro.csv").filter(!isHeader(_)).map(_.split(",")).map(x => (0L + x(0).toInt, Vertex(x(1), x(2).toInt, "metro"))) val countries = sc.textFile("/FileStore/tables/graphx/country.csv").filter(!isHeader(_)).map(_.split(",")).map(x => (100L + x(0).toInt, Vertex(iso(x(1)), 0, "country"))) val mclinks = sc.textFile("/FileStore/tables/graphx/metro_country.csv").filter(!isHeader(_)).map(_.split(",")).map(x => Edge(0L + x(0).toInt, 100L + x(1).toInt, 1)) val continents = sc.textFile("/FileStore/tables/graphx/continent.csv").filter(!isHeader(_)).map(_.split(",")).map(x => (200L + x(0).toInt, Vertex(x(1), 0, "continent"))) val cclinks = sc.textFile("/FileStore/tables/graphx/country_continent.csv").filter(!isHeader(_)).map(_.split(",")).map(x => Edge(100L + x(0).toInt, 200L + x(1).toInt, 1)) val mccGraph = Graph(metros ++ countries ++ continents, mclinks ++ cclinks) // goal: aggregate population from metros to countries // to destination vertex sends a message with 3 values: Vertex = (dst name, src population, dst entity) val countriesAgg: VertexRDD[Vertex] = mccGraph.aggregateMessages[Vertex]( t => { if (t.dstAttr.entity == "country") t.sendToDst(Vertex(t.dstAttr.name, t.srcAttr.population, t.dstAttr.entity)) }, (a, b) => Vertex(a.name, a.population + b.population, a.entity) ) val mccGraphAggC = mccGraph.joinVertices(countriesAgg)((id, a, b) => b) // goal: aggregate population from countries to continents // to destination vertex sends a message with 3 values: Vertex = (dst name, src population, dst entity) val continentsAgg: VertexRDD[Vertex] = mccGraphAggC.aggregateMessages[Vertex]( t => { if (t.dstAttr.entity == "continent") t.sendToDst(Vertex(t.dstAttr.name, t.srcAttr.population, t.dstAttr.entity)) }, (a, b) => Vertex(a.name, a.population + b.population, a.entity) ) val mccGraphAggCC = mccGraphAggC.joinVertices(continentsAgg)((id, a, b) => b) //val mccGF = GraphFrame.fromGraphX(mccGraphMCC) //display(mccGF.vertices) import org.apache.spark.sql.Row import org.apache.spark.sql.types.{StructType,StructField,StringType,LongType} val countryRows = mccGraphAggCC.vertices. filter{ case (id, Vertex(name, population, entity)) => entity == "country" }. map{ case (id, Vertex(name, population, entity)) => Row(name, population.toLong) }.collect() object countrySchema { val countryCode = StructField("countryCode", StringType) val population = StructField("population", LongType) val struct = StructType(Array(countryCode, population)) } // COMMAND ---------- val worldDF = sqlContext.createDataFrame(sc.parallelize(countryRows), countrySchema.struct) display(worldDF) // COMMAND ----------
flopezlasanta/spark-notebooks
coursera/graph_analytics/GraphX.scala
Scala
mit
9,146
object Test extends App { println(Macros.foo_with_macros_enabled) println(Macros.foo_with_macros_disabled) }
scala/scala
test/files/run/macro-typecheck-macrosdisabled2/Test_2.scala
Scala
apache-2.0
113
package edu.uchicago.cs.ndnn import org.junit.Assert._ import org.junit.Test import org.nd4j.linalg.factory.Nd4j class IndexerTest { @Test def testGet: Unit = { val a = Nd4j.create(Array(Array(3d, 4d, 5d), Array(1d, 6d, 7d))) val b = Nd4j.create(Array(2d, 1d)).reshape(2, 1) val fetched = Index.get(a, b) assertArrayEquals(Array(2, 1), fetched.shape) assertEquals(5d, fetched.getDouble(0, 0), 0.001) assertEquals(6d, fetched.getDouble(1, 0), 0.001) } @Test def testPut: Unit = { val a = Nd4j.createUninitialized(Array(2, 3)).assign(0) val b = Nd4j.create(Array(2d, 1d)).reshape(2, 1) val value = Nd4j.create(Array(3d, 5d)).reshape(2, 1) Index.put(a, b, value) assertEquals(3d, a.getDouble(0, 2), 0.001) assertEquals(5d, a.getDouble(1, 1), 0.001) } }
harperjiang/enc-selector
src/test/scala/edu/uchicago/cs/ndnn/IndexTest.scala
Scala
apache-2.0
817
package spire package macros import language.experimental.macros import spire.algebra._ import spire.macros.compat.{Context, termName} object Auto { object scala { def semiring[A]: Semiring[A] = macro ScalaAutoMacros.semiringImpl[A] def rig[A](z: A, o: A): Rig[A] = macro ScalaAutoMacros.rigImpl[A] def rng[A](z: A): Rng[A] = macro ScalaAutoMacros.rngImpl[A] def ring[A](z: A, o: A): Ring[A] = macro ScalaAutoMacros.ringImpl[A] def euclideanRing[A](z: A, o: A)(implicit ev: Eq[A]): EuclideanRing[A] = macro ScalaAutoMacros.euclideanRingImpl[A] def field[A](z: A, o: A)(implicit ev: Eq[A]): Field[A] = macro ScalaAutoMacros.fieldImpl[A] def eq[A]: Eq[A] = macro ScalaAutoMacros.eqImpl[A] // TODO: partialOrder ? def order[A]: Order[A] = macro ScalaAutoMacros.orderImpl[A] object collection { def semigroup[A]: Semigroup[A] = macro ScalaAutoMacros.collectionSemigroupImpl[A] def monoid[A](z: A): Monoid[A] = macro ScalaAutoMacros.collectionMonoidImpl[A] } } object java { def semiring[A]: Semiring[A] = macro JavaAutoMacros.semiringImpl[A] def rig[A](z: A, o: A): Rig[A] = macro JavaAutoMacros.rigImpl[A] def rng[A](z: A): Rng[A] = macro JavaAutoMacros.rngImpl[A] def ring[A](z: A, o: A): Ring[A] = macro JavaAutoMacros.ringImpl[A] def euclideanRing[A](z: A, o: A)(implicit ev: Eq[A]): EuclideanRing[A] = macro JavaAutoMacros.euclideanRingImpl[A] def field[A](z: A, o: A)(implicit ev: Eq[A]): Field[A] = macro JavaAutoMacros.fieldImpl[A] def eq[A]: Eq[A] = macro JavaAutoMacros.eqImpl[A] // TODO: partialOrder ? def order[A]: Order[A] = macro JavaAutoMacros.orderImpl[A] object collection { def monoid[A](empty: A): Monoid[A] = macro JavaAutoMacros.collectionMonoidImpl[A] } } } abstract class AutoOps { val c: Context import c.universe._ def unop[A](name: String, x: String = "x"): c.Expr[A] = c.Expr[A](Select(Ident(termName(c)(x)), termName(c)(name))) def binop[A](name: String, x: String = "x", y: String = "y"): c.Expr[A] = c.Expr[A](Apply( Select(Ident(termName(c)(x)), termName(c)(name)), List(Ident(termName(c)(y))))) def binopSearch[A: c.WeakTypeTag](names: List[String], x: String = "x", y: String = "y"): Option[c.Expr[A]] = names find { name => hasMethod1[A, A, A](name) } map (binop[A](_, x, y)) def unopSearch[A: c.WeakTypeTag](names: List[String], x: String = "x"): Option[c.Expr[A]] = names find { name => hasMethod0[A, A](name) } map (unop[A](_, x)) def hasMethod0[A: c.WeakTypeTag, B: c.WeakTypeTag](name: String): Boolean = { val tpeA = c.weakTypeTag[A].tpe val tpeB = c.weakTypeTag[B].tpe tpeA.members exists { m => m.isMethod && m.isPublic && m.name.encodedName.toString == name && (m.typeSignature match { case MethodType(Nil, ret) => ret =:= tpeB case _ => false }) } } def hasMethod1[A: c.WeakTypeTag, B: c.WeakTypeTag, C: c.WeakTypeTag](name: String): Boolean = { val tpeA = c.weakTypeTag[A].tpe val tpeB = c.weakTypeTag[B].tpe val tpeC = c.weakTypeTag[C].tpe tpeA.members exists { m => m.isMethod && m.isPublic && m.name.encodedName.toString == name && (m.typeSignature match { case MethodType(List(param), ret) => param.typeSignature =:= tpeB && ret =:= tpeC case _ => false }) } } def failedSearch(name: String, op: String): c.Expr[Nothing] = c.abort(c.enclosingPosition, "Couldn't find matching method for op %s (%s)." format (name, op)) } abstract class AutoAlgebra extends AutoOps { ops => def plus[A: c.WeakTypeTag]: c.Expr[A] def minus[A: c.WeakTypeTag]: c.Expr[A] def times[A: c.WeakTypeTag]: c.Expr[A] def negate[A: c.WeakTypeTag]: c.Expr[A] def div[A: c.WeakTypeTag]: c.Expr[A] def euclideanFunction[A: c.WeakTypeTag]: c.Expr[BigInt] def quot[A: c.WeakTypeTag]: c.Expr[A] def mod[A: c.WeakTypeTag](stub: => c.Expr[A] = failedSearch("mod", "%")): c.Expr[A] def equals: c.Expr[Boolean] def compare: c.Expr[Int] def Semiring[A: c.WeakTypeTag](): c.Expr[Semiring[A]] = { c.universe.reify { new Semiring[A] { def plus(x: A, y: A): A = ops.plus[A].splice def times(x: A, y: A): A = ops.times[A].splice } } } def Rig[A: c.WeakTypeTag](z: c.Expr[A], o: c.Expr[A]): c.Expr[Rig[A]] = { c.universe.reify { new Rig[A] { def zero: A = z.splice def one: A = o.splice def plus(x: A, y: A): A = ops.plus[A].splice def times(x: A, y: A): A = ops.times[A].splice } } } def Rng[A: c.WeakTypeTag](z: c.Expr[A]): c.Expr[Rng[A]] = { c.universe.reify { new Rng[A] { def zero: A = z.splice def plus(x: A, y: A): A = ops.plus[A].splice def times(x: A, y: A): A = ops.times[A].splice override def minus(x: A, y: A): A = ops.minus[A].splice def negate(x: A): A = ops.negate[A].splice } } } def Ring[A: c.WeakTypeTag](z: c.Expr[A], o: c.Expr[A]): c.Expr[Ring[A]] = { c.universe.reify { new Ring[A] { def zero: A = z.splice def one: A = o.splice def plus(x: A, y: A): A = ops.plus[A].splice def times(x: A, y: A): A = ops.times[A].splice override def minus(x: A, y: A): A = ops.minus[A].splice def negate(x: A): A = ops.negate[A].splice } } } /* TODO: missing GCD ring. Any examples of types with .gcd and .lcm in Scala/Java ? */ def EuclideanRing[A: c.WeakTypeTag](z: c.Expr[A], o: c.Expr[A]) (ev: c.Expr[Eq[A]]): c.Expr[EuclideanRing[A]] = { c.universe.reify { new EuclideanRing[A] { self => // default implementations from EuclideanRing.WithEuclideanAlgorithm @tailrec final def euclid(a: A, b: A)(implicit ev: Eq[A]): A = if (isZero(b)) a else euclid(b, emod(a, b)) def gcd(a: A, b: A)(implicit ev: Eq[A]): A = euclid(a, b)(ev) def lcm(a: A, b: A)(implicit ev: Eq[A]): A = if (isZero(a) || isZero(b)) zero else times(equot(a, gcd(a, b)), b) def zero: A = z.splice def one: A = o.splice def plus(x: A, y: A): A = ops.plus[A].splice def times(x: A, y: A): A = ops.times[A].splice override def minus(x: A, y: A): A = ops.minus[A].splice def negate(x: A): A = ops.negate[A].splice def euclideanFunction(x: A): BigInt = ops.euclideanFunction[BigInt].splice def equot(x: A, y: A): A = ops.quot[A].splice def emod(x: A, y: A): A = ops.mod[A]().splice } } } def Field[A: c.WeakTypeTag] (z: c.Expr[A], o: c.Expr[A])(ev: c.Expr[Eq[A]]): c.Expr[Field[A]] = { c.universe.reify { new Field[A] { // default implementations from Field.WithDefaultGCD override def gcd(a: A, b: A)(implicit eqA: Eq[A]): A = if (isZero(a) && isZero(b)) zero else one override def lcm(a: A, b: A)(implicit eqA: Eq[A]): A = times(a, b) def zero: A = z.splice def one: A = o.splice def plus(x: A, y: A): A = ops.plus[A].splice def times(x: A, y: A): A = ops.times[A].splice override def minus(x: A, y: A): A = ops.minus[A].splice def negate(x: A): A = ops.negate[A].splice override def euclideanFunction(x: A): BigInt = BigInt(0) override def equot(x: A, y: A): A = ops.div[A].splice override def emod(x: A, y: A): A = ops.mod[A](z).splice def div(x: A, y: A): A = ops.div[A].splice } } } def Eq[A: c.WeakTypeTag](): c.Expr[Eq[A]] = { c.universe.reify { new Eq[A] { def eqv(x: A, y: A): Boolean = ops.equals.splice } } } def Order[A: c.WeakTypeTag](): c.Expr[Order[A]] = { c.universe.reify { new Order[A] { override def eqv(x: A, y: A): Boolean = ops.equals.splice def compare(x: A, y: A): Int = ops.compare.splice } } } } case class ScalaAlgebra[C <: Context](c: C) extends AutoAlgebra { // we munge these names with dollar signs in them to avoid getting // warnings about possible interpolations. these are not intended to // be interpolations. def plusplus[A]: c.Expr[A] = binop[A]("$" + "plus" + "$" + "plus") def plus[A: c.WeakTypeTag]: c.Expr[A] = binop[A]("$" + "plus") def minus[A: c.WeakTypeTag]: c.Expr[A] = binop[A]("$" + "minus") def times[A: c.WeakTypeTag]: c.Expr[A] = binop[A]("$" + "times") def negate[A: c.WeakTypeTag]: c.Expr[A] = unop[A]("unary_" + "$" + "minus") /* TODO: this is a bit careless, but works for our examples */ def euclideanFunction[A: c.WeakTypeTag]: c.Expr[BigInt] = { import c.universe._ c.Expr[BigInt](q"x.toBigInt.abs") } def quot[A: c.WeakTypeTag]: c.Expr[A] = binopSearch[A]("quot" :: ("$" + "div") :: Nil) getOrElse failedSearch("quot", "/~") def div[A: c.WeakTypeTag]: c.Expr[A] = binop[A]("$" + "div") def mod[A: c.WeakTypeTag](stub: => c.Expr[A]): c.Expr[A] = binop[A]("$" + "percent") def equals: c.Expr[Boolean] = binop[Boolean]("$" + "eq" + "$" + "eq") def compare: c.Expr[Int] = binop[Int]("compare") } case class JavaAlgebra[C <: Context](c: C) extends AutoAlgebra { def plus[A: c.WeakTypeTag]: c.Expr[A] = binopSearch[A]("add" :: "plus" :: Nil) getOrElse failedSearch("plus", "+") def minus[A: c.WeakTypeTag]: c.Expr[A] = binopSearch[A]("subtract" :: "minus" :: Nil) getOrElse failedSearch("minus", "-") def times[A: c.WeakTypeTag]: c.Expr[A] = binopSearch[A]("multiply" :: "times" :: Nil) getOrElse failedSearch("times", "*") def div[A: c.WeakTypeTag]: c.Expr[A] = binopSearch[A]("divide" :: "div" :: Nil) getOrElse failedSearch("div", "/") def negate[A: c.WeakTypeTag]: c.Expr[A] = unopSearch[A]("negate" :: "negative" :: Nil) getOrElse { // We can implement negate interms of minus. This is actually required // for JScience's Rational :( import c.universe._ c.Expr[A](Apply( Select(Ident(termName(c)("zero")), termName(c)("minus")), List(Ident(termName(c)("x"))))) } /* TODO: this is a bit careless, but works for our examples */ def euclideanFunction[A: c.WeakTypeTag]: c.Expr[BigInt] = { import c.universe._ c.Expr[BigInt](q"_root_.scala.BigInt(x.toBigInteger).abs") } def quot[A: c.WeakTypeTag]: c.Expr[A] = binopSearch[A]("quot" :: "divide" :: "div" :: Nil) getOrElse failedSearch("quot", "/~") def mod[A: c.WeakTypeTag](stub: => c.Expr[A]): c.Expr[A] = binopSearch("mod" :: "remainder" :: Nil) getOrElse stub def equals: c.Expr[Boolean] = binop[Boolean]("equals") def compare: c.Expr[Int] = binop[Int]("compareTo") } object ScalaAutoMacros { def semiringImpl[A: c.WeakTypeTag](c: Context): c.Expr[Semiring[A]] = ScalaAlgebra[c.type](c).Semiring[A]() def rigImpl[A: c.WeakTypeTag](c: Context)(z: c.Expr[A], o: c.Expr[A]): c.Expr[Rig[A]] = ScalaAlgebra[c.type](c).Rig[A](z, o) def rngImpl[A: c.WeakTypeTag](c: Context)(z: c.Expr[A]): c.Expr[Rng[A]] = ScalaAlgebra[c.type](c).Rng[A](z) def ringImpl[A: c.WeakTypeTag](c: Context)(z: c.Expr[A], o: c.Expr[A]): c.Expr[Ring[A]] = ScalaAlgebra[c.type](c).Ring[A](z, o) def euclideanRingImpl[A: c.WeakTypeTag](c: Context) (z: c.Expr[A], o: c.Expr[A])(ev: c.Expr[Eq[A]]): c.Expr[EuclideanRing[A]] = ScalaAlgebra[c.type](c).EuclideanRing[A](z, o)(ev) def fieldImpl[A: c.WeakTypeTag](c: Context) (z: c.Expr[A], o: c.Expr[A])(ev: c.Expr[Eq[A]]): c.Expr[Field[A]] = ScalaAlgebra[c.type](c).Field[A](z, o)(ev) def eqImpl[A: c.WeakTypeTag](c: Context): c.Expr[Eq[A]] = ScalaAlgebra[c.type](c).Eq[A]() def orderImpl[A: c.WeakTypeTag](c: Context): c.Expr[Order[A]] = ScalaAlgebra[c.type](c).Order[A]() def collectionSemigroupImpl[A: c.WeakTypeTag](c: Context): c.Expr[Semigroup[A]] = { val ops = ScalaAlgebra[c.type](c) c.universe.reify { new Semigroup[A] { def combine(x: A, y: A): A = ops.plusplus[A].splice } } } def collectionMonoidImpl[A: c.WeakTypeTag](c: Context)(z: c.Expr[A]): c.Expr[Monoid[A]] = { val ops = ScalaAlgebra[c.type](c) c.universe.reify { new Monoid[A] { def empty: A = z.splice def combine(x: A, y: A): A = ops.plusplus[A].splice } } } } object JavaAutoMacros { def semiringImpl[A: c.WeakTypeTag](c: Context): c.Expr[Semiring[A]] = JavaAlgebra[c.type](c).Semiring[A]() def rigImpl[A: c.WeakTypeTag](c: Context)(z: c.Expr[A], o: c.Expr[A]): c.Expr[Rig[A]] = JavaAlgebra[c.type](c).Rig[A](z, o) def rngImpl[A: c.WeakTypeTag](c: Context)(z: c.Expr[A]): c.Expr[Rng[A]] = JavaAlgebra[c.type](c).Rng[A](z) def ringImpl[A: c.WeakTypeTag](c: Context)(z: c.Expr[A], o: c.Expr[A]): c.Expr[Ring[A]] = JavaAlgebra[c.type](c).Ring[A](z, o) def euclideanRingImpl[A: c.WeakTypeTag](c: Context) (z: c.Expr[A], o: c.Expr[A])(ev: c.Expr[Eq[A]]): c.Expr[EuclideanRing[A]] = JavaAlgebra[c.type](c).EuclideanRing[A](z, o)(ev) def fieldImpl[A: c.WeakTypeTag](c: Context) (z: c.Expr[A], o: c.Expr[A])(ev: c.Expr[Eq[A]]): c.Expr[Field[A]] = JavaAlgebra[c.type](c).Field[A](z, o)(ev) def eqImpl[A: c.WeakTypeTag](c: Context): c.Expr[Eq[A]] = JavaAlgebra[c.type](c).Eq[A]() def orderImpl[A: c.WeakTypeTag](c: Context): c.Expr[Order[A]] = JavaAlgebra[c.type](c).Order[A]() def collectionMonoidImpl[A: c.WeakTypeTag](c: Context)(empty: c.Expr[A]): c.Expr[Monoid[A]] = { val ops = JavaAlgebra[c.type](c) val addx = ops.binop[Unit]("addAll", "z", "x") val addy = ops.binop[Unit]("addAll", "z", "y") val z = empty c.universe.reify { new Monoid[A] { def empty: A = z.splice def combine(x: A, y: A): A = { val z = empty addx.splice addy.splice z } } } } }
non/spire
core/src/main/scala/spire/macros/Auto.scala
Scala
mit
13,830
/* Copyright 2012 Christian Douven Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package almhirt.syntax object almfuture extends almhirt.almfuture.ToAlmFutureOps
chridou/almhirt
almhirt-common/src/main/scala/almhirt/syntax/almfuture.scala
Scala
apache-2.0
671
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.analysis import org.scalatest.Assertions._ import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Complete, Count, Max} import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.catalyst.plans.{Cross, LeftOuter, RightOuter} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, GenericArrayData, MapData} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ private[sql] case class GroupableData(data: Int) { def getData: Int = data } private[sql] class GroupableUDT extends UserDefinedType[GroupableData] { override def sqlType: DataType = IntegerType override def serialize(groupableData: GroupableData): Int = groupableData.data override def deserialize(datum: Any): GroupableData = { datum match { case data: Int => GroupableData(data) } } override def userClass: Class[GroupableData] = classOf[GroupableData] private[spark] override def asNullable: GroupableUDT = this } private[sql] case class UngroupableData(data: Map[Int, Int]) { def getData: Map[Int, Int] = data } private[sql] class UngroupableUDT extends UserDefinedType[UngroupableData] { override def sqlType: DataType = MapType(IntegerType, IntegerType) override def serialize(ungroupableData: UngroupableData): MapData = { val keyArray = new GenericArrayData(ungroupableData.data.keys.toSeq) val valueArray = new GenericArrayData(ungroupableData.data.values.toSeq) new ArrayBasedMapData(keyArray, valueArray) } override def deserialize(datum: Any): UngroupableData = { datum match { case data: MapData => val keyArray = data.keyArray().array val valueArray = data.valueArray().array assert(keyArray.length == valueArray.length) val mapData = keyArray.zip(valueArray).toMap.asInstanceOf[Map[Int, Int]] UngroupableData(mapData) } } override def userClass: Class[UngroupableData] = classOf[UngroupableData] private[spark] override def asNullable: UngroupableUDT = this } case class TestFunction( children: Seq[Expression], inputTypes: Seq[AbstractDataType]) extends Expression with ImplicitCastInputTypes with Unevaluable { override def nullable: Boolean = true override def dataType: DataType = StringType override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Expression = copy(children = newChildren) } case class UnresolvedTestPlan() extends LeafNode { override lazy val resolved = false override def output: Seq[Attribute] = Nil } class AnalysisErrorSuite extends AnalysisTest { import TestRelations._ def errorTest( name: String, plan: LogicalPlan, errorMessages: Seq[String], caseSensitive: Boolean = true): Unit = { test(name) { assertAnalysisError(plan, errorMessages, caseSensitive) } } def errorClassTest( name: String, plan: LogicalPlan, errorClass: String, messageParameters: Array[String]): Unit = { test(name) { assertAnalysisErrorClass(plan, errorClass, messageParameters) } } val dateLit = Literal.create(null, DateType) errorTest( "scalar subquery with 2 columns", testRelation.select( (ScalarSubquery(testRelation.select($"a", dateLit.as("b"))) + Literal(1)).as("a")), "Scalar subquery must return only one column, but got 2" :: Nil) errorTest( "scalar subquery with no column", testRelation.select(ScalarSubquery(LocalRelation()).as("a")), "Scalar subquery must return only one column, but got 0" :: Nil) errorTest( "single invalid type, single arg", testRelation.select(TestFunction(dateLit :: Nil, IntegerType :: Nil).as("a")), "cannot resolve" :: "testfunction(CAST(NULL AS DATE))" :: "argument 1" :: "requires int type" :: "'CAST(NULL AS DATE)' is of date type" :: Nil) errorTest( "single invalid type, second arg", testRelation.select( TestFunction(dateLit :: dateLit :: Nil, DateType :: IntegerType :: Nil).as("a")), "cannot resolve" :: "testfunction(CAST(NULL AS DATE), CAST(NULL AS DATE))" :: "argument 2" :: "requires int type" :: "'CAST(NULL AS DATE)' is of date type" :: Nil) errorTest( "multiple invalid type", testRelation.select( TestFunction(dateLit :: dateLit :: Nil, IntegerType :: IntegerType :: Nil).as("a")), "cannot resolve" :: "testfunction(CAST(NULL AS DATE), CAST(NULL AS DATE))" :: "argument 1" :: "argument 2" :: "requires int type" :: "'CAST(NULL AS DATE)' is of date type" :: Nil) errorTest( "invalid window function", testRelation2.select( WindowExpression( Literal(0), WindowSpecDefinition( UnresolvedAttribute("a") :: Nil, SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil, UnspecifiedFrame)).as("window")), "not supported within a window function" :: Nil) errorTest( "distinct aggregate function in window", testRelation2.select( WindowExpression( AggregateExpression(Count(UnresolvedAttribute("b")), Complete, isDistinct = true), WindowSpecDefinition( UnresolvedAttribute("a") :: Nil, SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil, UnspecifiedFrame)).as("window")), "Distinct window functions are not supported" :: Nil) errorTest( "window aggregate function with filter predicate", testRelation2.select( WindowExpression( AggregateExpression( Count(UnresolvedAttribute("b")), Complete, isDistinct = false, filter = Some(UnresolvedAttribute("b") > 1)), WindowSpecDefinition( UnresolvedAttribute("a") :: Nil, SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil, UnspecifiedFrame)).as("window")), "window aggregate function with filter predicate is not supported" :: Nil ) errorTest( "distinct function", CatalystSqlParser.parsePlan("SELECT hex(DISTINCT a) FROM TaBlE"), "Function hex does not support DISTINCT" :: Nil) errorTest( "non aggregate function with filter predicate", CatalystSqlParser.parsePlan("SELECT hex(a) FILTER (WHERE c = 1) FROM TaBlE2"), "Function hex does not support FILTER clause" :: Nil) errorTest( "distinct window function", CatalystSqlParser.parsePlan("SELECT percent_rank(DISTINCT a) OVER () FROM TaBlE"), "Function percent_rank does not support DISTINCT" :: Nil) errorTest( "window function with filter predicate", CatalystSqlParser.parsePlan("SELECT percent_rank(a) FILTER (WHERE c > 1) OVER () FROM TaBlE2"), "Function percent_rank does not support FILTER clause" :: Nil) errorTest( "higher order function with filter predicate", CatalystSqlParser.parsePlan("SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) " + "FILTER (WHERE c > 1)"), "Function aggregate does not support FILTER clause" :: Nil) errorTest( "non-deterministic filter predicate in aggregate functions", CatalystSqlParser.parsePlan("SELECT count(a) FILTER (WHERE rand(int(c)) > 1) FROM TaBlE2"), "FILTER expression is non-deterministic, it cannot be used in aggregate functions" :: Nil) errorTest( "function don't support ignore nulls", CatalystSqlParser.parsePlan("SELECT hex(a) IGNORE NULLS FROM TaBlE2"), "Function hex does not support IGNORE NULLS" :: Nil) errorTest( "some window function don't support ignore nulls", CatalystSqlParser.parsePlan("SELECT percent_rank(a) IGNORE NULLS FROM TaBlE2"), "Function percent_rank does not support IGNORE NULLS" :: Nil) errorTest( "aggregate function don't support ignore nulls", CatalystSqlParser.parsePlan("SELECT count(a) IGNORE NULLS FROM TaBlE2"), "Function count does not support IGNORE NULLS" :: Nil) errorTest( "higher order function don't support ignore nulls", CatalystSqlParser.parsePlan("SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) " + "IGNORE NULLS"), "Function aggregate does not support IGNORE NULLS" :: Nil) errorTest( "nested aggregate functions", testRelation.groupBy($"a")( AggregateExpression( Max(AggregateExpression(Count(Literal(1)), Complete, isDistinct = false)), Complete, isDistinct = false)), "not allowed to use an aggregate function in the argument of another aggregate function." :: Nil ) errorTest( "offset window function", testRelation2.select( WindowExpression( new Lead(UnresolvedAttribute("b")), WindowSpecDefinition( UnresolvedAttribute("a") :: Nil, SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil, SpecifiedWindowFrame(RangeFrame, Literal(1), Literal(2)))).as("window")), "Cannot specify window frame for lead function" :: Nil) errorTest( "the offset of nth_value window function is negative or zero", testRelation2.select( WindowExpression( new NthValue(AttributeReference("b", IntegerType)(), Literal(0)), WindowSpecDefinition( UnresolvedAttribute("a") :: Nil, SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil, SpecifiedWindowFrame(RowFrame, Literal(0), Literal(0)))).as("window")), "The 'offset' argument of nth_value must be greater than zero but it is 0." :: Nil) errorTest( "the offset of nth_value window function is not int literal", testRelation2.select( WindowExpression( new NthValue(AttributeReference("b", IntegerType)(), Literal(true)), WindowSpecDefinition( UnresolvedAttribute("a") :: Nil, SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil, SpecifiedWindowFrame(RowFrame, Literal(0), Literal(0)))).as("window")), "argument 2 requires int type, however, 'true' is of boolean type." :: Nil) errorTest( "too many generators", listRelation.select(Explode($"list").as("a"), Explode($"list").as("b")), "only one generator" :: "explode" :: Nil) errorClassTest( "unresolved attributes", testRelation.select($"abcd"), "MISSING_COLUMN", Array("abcd", "a")) errorClassTest( "unresolved attributes with a generated name", testRelation2.groupBy($"a")(max($"b")) .where(sum($"b") > 0) .orderBy($"havingCondition".asc), "MISSING_COLUMN", Array("havingCondition", "max(b)")) errorTest( "unresolved star expansion in max", testRelation2.groupBy($"a")(sum(UnresolvedStar(None))), "Invalid usage of '*'" :: "in expression 'sum'" :: Nil) errorTest( "sorting by unsupported column types", mapRelation.orderBy($"map".asc), "sort" :: "type" :: "map<int,int>" :: Nil) errorClassTest( "sorting by attributes are not from grouping expressions", testRelation2.groupBy($"a", $"c")($"a", $"c", count($"a").as("a3")).orderBy($"b".asc), "MISSING_COLUMN", Array("b", "a, c, a3")) errorTest( "non-boolean filters", testRelation.where(Literal(1)), "filter" :: "'1'" :: "not a boolean" :: Literal(1).dataType.simpleString :: Nil) errorTest( "non-boolean join conditions", testRelation.join(testRelation, condition = Some(Literal(1))), "condition" :: "'1'" :: "not a boolean" :: Literal(1).dataType.simpleString :: Nil) errorTest( "missing group by", testRelation2.groupBy($"a")($"b"), "'b'" :: "group by" :: Nil ) errorTest( "ambiguous field", nestedRelation.select($"top.duplicateField"), "Ambiguous reference to fields" :: "duplicateField" :: Nil, caseSensitive = false) errorTest( "ambiguous field due to case insensitivity", nestedRelation.select($"top.differentCase"), "Ambiguous reference to fields" :: "differentCase" :: "differentcase" :: Nil, caseSensitive = false) errorTest( "missing field", nestedRelation2.select($"top.c"), "No such struct field" :: "aField" :: "bField" :: "cField" :: Nil, caseSensitive = false) errorTest( "catch all unresolved plan", UnresolvedTestPlan(), "unresolved" :: Nil) errorTest( "union with unequal number of columns", testRelation.union(testRelation2), "union" :: "number of columns" :: testRelation2.output.length.toString :: testRelation.output.length.toString :: Nil) errorTest( "intersect with unequal number of columns", testRelation.intersect(testRelation2, isAll = false), "intersect" :: "number of columns" :: testRelation2.output.length.toString :: testRelation.output.length.toString :: Nil) errorTest( "except with unequal number of columns", testRelation.except(testRelation2, isAll = false), "except" :: "number of columns" :: testRelation2.output.length.toString :: testRelation.output.length.toString :: Nil) errorTest( "union with incompatible column types", testRelation.union(nestedRelation), "union" :: "the compatible column types" :: Nil) errorTest( "union with a incompatible column type and compatible column types", testRelation3.union(testRelation4), "union" :: "the compatible column types" :: "map" :: "decimal" :: Nil) errorTest( "intersect with incompatible column types", testRelation.intersect(nestedRelation, isAll = false), "intersect" :: "the compatible column types" :: Nil) errorTest( "intersect with a incompatible column type and compatible column types", testRelation3.intersect(testRelation4, isAll = false), "intersect" :: "the compatible column types" :: "map" :: "decimal" :: Nil) errorTest( "except with incompatible column types", testRelation.except(nestedRelation, isAll = false), "except" :: "the compatible column types" :: Nil) errorTest( "except with a incompatible column type and compatible column types", testRelation3.except(testRelation4, isAll = false), "except" :: "the compatible column types" :: "map" :: "decimal" :: Nil) errorClassTest( "SPARK-9955: correct error message for aggregate", // When parse SQL string, we will wrap aggregate expressions with UnresolvedAlias. testRelation2.where($"bad_column" > 1).groupBy($"a")(UnresolvedAlias(max($"b"))), "MISSING_COLUMN", Array("bad_column", "a, b, c, d, e")) errorTest( "slide duration greater than window in time window", testRelation2.select( TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "2 second", "0 second").as("window")), s"The slide duration " :: " must be less than or equal to the windowDuration " :: Nil ) errorTest( "start time greater than slide duration in time window", testRelation.select( TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "1 minute").as("window")), "The absolute value of start time " :: " must be less than the slideDuration " :: Nil ) errorTest( "start time equal to slide duration in time window", testRelation.select( TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "1 second").as("window")), "The absolute value of start time " :: " must be less than the slideDuration " :: Nil ) errorTest( "SPARK-21590: absolute value of start time greater than slide duration in time window", testRelation.select( TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "-1 minute").as("window")), "The absolute value of start time " :: " must be less than the slideDuration " :: Nil ) errorTest( "SPARK-21590: absolute value of start time equal to slide duration in time window", testRelation.select( TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "1 second", "-1 second").as("window")), "The absolute value of start time " :: " must be less than the slideDuration " :: Nil ) errorTest( "negative window duration in time window", testRelation.select( TimeWindow(Literal("2016-01-01 01:01:01"), "-1 second", "1 second", "0 second").as("window")), "The window duration " :: " must be greater than 0." :: Nil ) errorTest( "zero window duration in time window", testRelation.select( TimeWindow(Literal("2016-01-01 01:01:01"), "0 second", "1 second", "0 second").as("window")), "The window duration " :: " must be greater than 0." :: Nil ) errorTest( "negative slide duration in time window", testRelation.select( TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "-1 second", "0 second").as("window")), "The slide duration " :: " must be greater than 0." :: Nil ) errorTest( "zero slide duration in time window", testRelation.select( TimeWindow(Literal("2016-01-01 01:01:01"), "1 second", "0 second", "0 second").as("window")), "The slide duration" :: " must be greater than 0." :: Nil ) errorTest( "generator nested in expressions", listRelation.select(Explode($"list") + 1), "Generators are not supported when it's nested in expressions, but got: (explode(list) + 1)" :: Nil ) errorTest( "SPARK-30998: unsupported nested inner generators", { val nestedListRelation = LocalRelation( AttributeReference("nestedList", ArrayType(ArrayType(IntegerType)))()) nestedListRelation.select(Explode(Explode($"nestedList"))) }, "Generators are not supported when it's nested in expressions, but got: " + "explode(explode(nestedList))" :: Nil ) errorTest( "SPARK-30998: unsupported nested inner generators for aggregates", testRelation.select(Explode(Explode( CreateArray(CreateArray(min($"a") :: max($"a") :: Nil) :: Nil)))), "Generators are not supported when it's nested in expressions, but got: " + "explode(explode(array(array(min(a), max(a)))))" :: Nil ) errorTest( "generator nested in expressions for aggregates", testRelation.select(Explode(CreateArray(min($"a") :: max($"a") :: Nil)) + 1), "Generators are not supported when it's nested in expressions, but got: " + "(explode(array(min(a), max(a))) + 1)" :: Nil ) errorTest( "generator appears in operator which is not Project", listRelation.sortBy(Explode($"list").asc), "Generators are not supported outside the SELECT clause, but got: Sort" :: Nil ) errorTest( "an evaluated limit class must not be null", testRelation.limit(Literal(null, IntegerType)), "The evaluated limit expression must not be null, but got " :: Nil ) errorTest( "num_rows in limit clause must be equal to or greater than 0", listRelation.limit(-1), "The limit expression must be equal to or greater than 0, but got -1" :: Nil ) errorTest( "more than one generators in SELECT", listRelation.select(Explode($"list"), Explode($"list")), "Only one generator allowed per select clause but found 2: explode(list), explode(list)" :: Nil ) errorTest( "more than one generators for aggregates in SELECT", testRelation.select(Explode(CreateArray(min($"a") :: Nil)), Explode(CreateArray(max($"a") :: Nil))), "Only one generator allowed per select clause but found 2: " + "explode(array(min(a))), explode(array(max(a)))" :: Nil ) test("SPARK-6452 regression test") { // CheckAnalysis should throw AnalysisException when Aggregate contains missing attribute(s) // Since we manually construct the logical plan at here and Sum only accept // LongType, DoubleType, and DecimalType. We use LongType as the type of a. val attrA = AttributeReference("a", LongType)(exprId = ExprId(1)) val otherA = AttributeReference("a", LongType)(exprId = ExprId(2)) val attrC = AttributeReference("c", LongType)(exprId = ExprId(3)) val aliases = Alias(sum(attrA), "b")() :: Alias(sum(attrC), "d")() :: Nil val plan = Aggregate( Nil, aliases, LocalRelation(otherA)) assert(plan.resolved) val resolved = s"${attrA.toString},${attrC.toString}" val errorMsg = s"Resolved attribute(s) $resolved missing from ${otherA.toString} " + s"in operator !Aggregate [${aliases.mkString(", ")}]. " + s"Attribute(s) with the same name appear in the operation: a. " + "Please check if the right attribute(s) are used." assertAnalysisError(plan, errorMsg :: Nil) } test("error test for self-join") { val join = Join(testRelation, testRelation, Cross, None, JoinHint.NONE) val error = intercept[AnalysisException] { SimpleAnalyzer.checkAnalysis(join) } assert(error.message.contains("Failure when resolving conflicting references in Join")) assert(error.message.contains("Conflicting attributes")) } test("check grouping expression data types") { def checkDataType(dataType: DataType, shouldSuccess: Boolean): Unit = { val plan = Aggregate( AttributeReference("a", dataType)(exprId = ExprId(2)) :: Nil, Alias(sum(AttributeReference("b", IntegerType)(exprId = ExprId(1))), "c")() :: Nil, LocalRelation( AttributeReference("a", dataType)(exprId = ExprId(2)), AttributeReference("b", IntegerType)(exprId = ExprId(1)))) if (shouldSuccess) { assertAnalysisSuccess(plan, true) } else { assertAnalysisError(plan, "expression a cannot be used as a grouping expression" :: Nil) } } val supportedDataTypes = Seq( StringType, BinaryType, NullType, BooleanType, ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5), DateType, TimestampType, ArrayType(IntegerType), new StructType() .add("f1", FloatType, nullable = true) .add("f2", StringType, nullable = true), new StructType() .add("f1", FloatType, nullable = true) .add("f2", ArrayType(BooleanType, containsNull = true), nullable = true), new GroupableUDT()) supportedDataTypes.foreach { dataType => checkDataType(dataType, shouldSuccess = true) } val unsupportedDataTypes = Seq( MapType(StringType, LongType), new StructType() .add("f1", FloatType, nullable = true) .add("f2", MapType(StringType, LongType), nullable = true), new UngroupableUDT()) unsupportedDataTypes.foreach { dataType => checkDataType(dataType, shouldSuccess = false) } } test("we should fail analysis when we find nested aggregate functions") { val plan = Aggregate( AttributeReference("a", IntegerType)(exprId = ExprId(2)) :: Nil, Alias(sum(sum(AttributeReference("b", IntegerType)(exprId = ExprId(1)))), "c")() :: Nil, LocalRelation( AttributeReference("a", IntegerType)(exprId = ExprId(2)), AttributeReference("b", IntegerType)(exprId = ExprId(1)))) assertAnalysisError( plan, "It is not allowed to use an aggregate function in the argument of " + "another aggregate function." :: Nil) } test("Join can work on binary types but can't work on map types") { val left = LocalRelation(Symbol("a").binary, Symbol("b").map(StringType, StringType)) val right = LocalRelation(Symbol("c").binary, Symbol("d").map(StringType, StringType)) val plan1 = left.join( right, joinType = Cross, condition = Some(Symbol("a") === Symbol("c"))) assertAnalysisSuccess(plan1) val plan2 = left.join( right, joinType = Cross, condition = Some(Symbol("b") === Symbol("d"))) assertAnalysisError(plan2, "EqualTo does not support ordering on type map" :: Nil) } test("PredicateSubQuery is used outside of a filter") { val a = AttributeReference("a", IntegerType)() val b = AttributeReference("b", IntegerType)() val plan = Project( Seq(a, Alias(InSubquery(Seq(a), ListQuery(LocalRelation(b))), "c")()), LocalRelation(a)) assertAnalysisError(plan, "Predicate sub-queries can only be used" + " in Filter" :: Nil) } test("PredicateSubQuery correlated predicate is nested in an illegal plan") { val a = AttributeReference("a", IntegerType)() val b = AttributeReference("b", IntegerType)() val c = AttributeReference("c", IntegerType)() val plan1 = Filter( Exists( Join( LocalRelation(b), Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)), LeftOuter, Option(EqualTo(b, c)), JoinHint.NONE)), LocalRelation(a)) assertAnalysisError(plan1, "Accessing outer query column is not allowed in" :: Nil) val plan2 = Filter( Exists( Join( Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)), LocalRelation(b), RightOuter, Option(EqualTo(b, c)), JoinHint.NONE)), LocalRelation(a)) assertAnalysisError(plan2, "Accessing outer query column is not allowed in" :: Nil) val plan3 = Filter( Exists(Union(LocalRelation(b), Filter(EqualTo(UnresolvedAttribute("a"), c), LocalRelation(c)))), LocalRelation(a)) assertAnalysisError(plan3, "Accessing outer query column is not allowed in" :: Nil) val plan4 = Filter( Exists( Limit(1, Filter(EqualTo(UnresolvedAttribute("a"), b), LocalRelation(b))) ), LocalRelation(a)) assertAnalysisError(plan4, "Accessing outer query column is not allowed in" :: Nil) val plan5 = Filter( Exists( Sample(0.0, 0.5, false, 1L, Filter(EqualTo(UnresolvedAttribute("a"), b), LocalRelation(b))).select("b") ), LocalRelation(a)) assertAnalysisError(plan5, "Accessing outer query column is not allowed in" :: Nil) } test("Error on filter condition containing aggregate expressions") { val a = AttributeReference("a", IntegerType)() val b = AttributeReference("b", IntegerType)() val plan = Filter(Symbol("a") === UnresolvedFunction("max", Seq(b), true), LocalRelation(a, b)) assertAnalysisError(plan, "Aggregate/Window/Generate expressions are not valid in where clause of the query" :: Nil) } test("SPARK-30811: CTE should not cause stack overflow when " + "it refers to non-existent table with same name") { val plan = UnresolvedWith( UnresolvedRelation(TableIdentifier("t")), Seq("t" -> SubqueryAlias("t", Project( Alias(Literal(1), "x")() :: Nil, UnresolvedRelation(TableIdentifier("t", Option("nonexist"))))))) assertAnalysisError(plan, "Table or view not found:" :: Nil) } test("SPARK-33909: Check rand functions seed is legal at analyer side") { Seq(Rand("a".attr), Randn("a".attr)).foreach { r => val plan = Project(Seq(r.as("r")), testRelation) assertAnalysisError(plan, s"Input argument to ${r.prettyName} must be a constant." :: Nil) } Seq(Rand(1.0), Rand("1"), Randn("a")).foreach { r => val plan = Project(Seq(r.as("r")), testRelation) assertAnalysisError(plan, s"data type mismatch: argument 1 requires (int or bigint) type" :: Nil) } } test("SPARK-34946: correlated scalar subquery in grouping expressions only") { val c1 = AttributeReference("c1", IntegerType)() val c2 = AttributeReference("c2", IntegerType)() val t = LocalRelation(c1, c2) val plan = Aggregate( ScalarSubquery( Aggregate(Nil, sum($"c2").as("sum") :: Nil, Filter($"t1.c1" === $"t2.c1", t.as("t2"))) ) :: Nil, sum($"c2").as("sum") :: Nil, t.as("t1")) assertAnalysisError(plan, "Correlated scalar subqueries in the group by clause must also be " + "in the aggregate expressions" :: Nil) } test("SPARK-34946: correlated scalar subquery in aggregate expressions only") { val c1 = AttributeReference("c1", IntegerType)() val c2 = AttributeReference("c2", IntegerType)() val t = LocalRelation(c1, c2) val plan = Aggregate( $"c1" :: Nil, ScalarSubquery( Aggregate(Nil, sum($"c2").as("sum") :: Nil, Filter($"t1.c1" === $"t2.c1", t.as("t2"))) ).as("sub") :: Nil, t.as("t1")) assertAnalysisError(plan, "Correlated scalar subquery 'scalarsubquery(t1.c1)' is " + "neither present in the group by, nor in an aggregate function. Add it to group by " + "using ordinal position or wrap it in first() (or first_value) if you don't care " + "which value you get." :: Nil) } errorTest( "SPARK-34920: error code to error message", testRelation2.where($"bad_column" > 1).groupBy($"a")(UnresolvedAlias(max($"b"))), "Column 'bad_column' does not exist. Did you mean one of the following? [a, b, c, d, e]" :: Nil) test("SPARK-35080: Unsupported correlated equality predicates in subquery") { val a = AttributeReference("a", IntegerType)() val b = AttributeReference("b", IntegerType)() val c = AttributeReference("c", IntegerType)() val d = AttributeReference("d", DoubleType)() val t1 = LocalRelation(a, b, d) val t2 = LocalRelation(c) val conditions = Seq( (abs($"a") === $"c", "abs(a) = outer(c)"), (abs($"a") <=> $"c", "abs(a) <=> outer(c)"), ($"a" + 1 === $"c", "(a + 1) = outer(c)"), ($"a" + $"b" === $"c", "(a + b) = outer(c)"), ($"a" + $"c" === $"b", "(a + outer(c)) = b"), (And($"a" === $"c", Cast($"d", IntegerType) === $"c"), "CAST(d AS INT) = outer(c)")) conditions.foreach { case (cond, msg) => val plan = Project( ScalarSubquery( Aggregate(Nil, count(Literal(1)).as("cnt") :: Nil, Filter(cond, t1)) ).as("sub") :: Nil, t2) assertAnalysisError(plan, s"Correlated column is not allowed in predicate ($msg)" :: Nil) } } test("SPARK-35673: fail if the plan still contains UnresolvedHint after analysis") { val hintName = "some_random_hint_that_does_not_exist" val plan = UnresolvedHint(hintName, Seq.empty, Project(Alias(Literal(1), "x")() :: Nil, OneRowRelation()) ) assert(plan.resolved) val error = intercept[AnalysisException] { SimpleAnalyzer.checkAnalysis(plan) } assert(error.message.contains(s"Hint not found: ${hintName}")) // UnresolvedHint be removed by batch `Remove Unresolved Hints` assertAnalysisSuccess(plan, true) } test("SPARK-35618: Resolve star expressions in subqueries") { val a = AttributeReference("a", IntegerType)() val b = AttributeReference("b", IntegerType)() val t0 = OneRowRelation() val t1 = LocalRelation(a, b).as("t1") // t1.* in the subquery should be resolved into outer(t1.a) and outer(t1.b). assertAnalysisError( Project(ScalarSubquery(t0.select(star("t1"))).as("sub") :: Nil, t1), "Scalar subquery must return only one column, but got 2" :: Nil) // t2.* cannot be resolved and the error should be the initial analysis exception. assertAnalysisError( Project(ScalarSubquery(t0.select(star("t2"))).as("sub") :: Nil, t1), "cannot resolve 't2.*' given input columns ''" :: Nil ) } test("SPARK-35618: Invalid star usage in subqueries") { val a = AttributeReference("a", IntegerType)() val b = AttributeReference("b", IntegerType)() val c = AttributeReference("c", IntegerType)() val t1 = LocalRelation(a, b).as("t1") val t2 = LocalRelation(b, c).as("t2") // SELECT * FROM t1 WHERE a = (SELECT sum(c) FROM t2 WHERE t1.* = t2.b) assertAnalysisError( Filter(EqualTo(a, ScalarSubquery(t2.select(sum(c)).where(star("t1") === b))), t1), "Invalid usage of '*' in Filter" :: Nil ) // SELECT * FROM t1 JOIN t2 ON (EXISTS (SELECT 1 FROM t2 WHERE t1.* = b)) assertAnalysisError( t1.join(t2, condition = Some(Exists(t2.select(1).where(star("t1") === b)))), "Invalid usage of '*' in Filter" :: Nil ) } test("SPARK-36488: Regular expression expansion should fail with a meaningful message") { withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "true") { assertAnalysisError(testRelation.select(Divide(UnresolvedRegex(".?", None, false), "a")), s"Invalid usage of regular expression '.?' in" :: Nil) assertAnalysisError(testRelation.select( Divide(UnresolvedRegex(".?", None, false), UnresolvedRegex(".*", None, false))), s"Invalid usage of regular expressions '.?', '.*' in" :: Nil) assertAnalysisError(testRelation.select( Divide(UnresolvedRegex(".?", None, false), UnresolvedRegex(".?", None, false))), s"Invalid usage of regular expression '.?' in" :: Nil) assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None), "a")), "Invalid usage of '*' in" :: Nil) assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None), UnresolvedStar(None))), "Invalid usage of '*' in" :: Nil) assertAnalysisError(testRelation.select(Divide(UnresolvedStar(None), UnresolvedRegex(".?", None, false))), "Invalid usage of '*' and regular expression '.?' in" :: Nil) assertAnalysisError(testRelation.select(Least(Seq(UnresolvedStar(None), UnresolvedRegex(".*", None, false), UnresolvedRegex(".?", None, false)))), "Invalid usage of '*' and regular expressions '.*', '.?' in" :: Nil) } } }
vinodkc/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
Scala
apache-2.0
34,491
/* * Copyright 2015 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rhttpc.client import akka.actor.ActorRef import akka.pattern._ import akka.util.Timeout import rhttpc.client.protocol.{Correlated, FailureExchange, SuccessExchange} import rhttpc.transport._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success} class MockTransport(awaitCond: (() => Boolean) => Unit)(implicit ec: ExecutionContext) extends PubSubTransport { @volatile private var _publicationPromise: Promise[Unit] = _ @volatile private var _replySubscriptionPromise: Promise[String] = _ @volatile private var _ackOnReplySubscriptionFuture: Future[Any] = _ @volatile private var consumer: ActorRef = _ def publicationPromise: Promise[Unit] = { awaitCond(() => _publicationPromise != null) _publicationPromise } def replySubscriptionPromise: Promise[String] = { awaitCond(() => _replySubscriptionPromise != null) _replySubscriptionPromise } def ackOnReplySubscriptionFuture: Future[Any] = { awaitCond(() => _ackOnReplySubscriptionFuture != null) _ackOnReplySubscriptionFuture } override def publisher[PubMsg: Serializer](data: OutboundQueueData): Publisher[PubMsg] = new Publisher[PubMsg] { override def publish(request: Message[PubMsg]): Future[Unit] = { request.content match { case Correlated(msg, correlationId) => _publicationPromise = Promise[Unit]() _replySubscriptionPromise = Promise[String]() implicit val timeout = Timeout(5 seconds) _replySubscriptionPromise.future.onComplete { case Success(result) => _ackOnReplySubscriptionFuture = consumer ? Correlated(SuccessExchange(msg, result), correlationId) case Failure(ex) => _ackOnReplySubscriptionFuture = consumer ? Correlated(FailureExchange(msg, ex), correlationId) } _publicationPromise.future case other => throw new IllegalArgumentException("Illegal message content: " + other) } } override def start(): Unit = {} override def stop(): Future[Unit] = Future.unit } override def fullMessageSubscriber[SubMsg: Deserializer](data: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] = subscriber(data, consumer) override def subscriber[SubMsg: Deserializer](data: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] = new Subscriber[SubMsg] { MockTransport.this.consumer = consumer override def start(): Unit = {} override def stop(): Future[Unit] = Future.unit } override def stop(): Future[Unit] = Future.unit } object MockProxyTransport extends PubSubTransport { override def publisher[PubMsg: Serializer](queueData: OutboundQueueData): Publisher[PubMsg] = new Publisher[PubMsg] { override def publish(msg: Message[PubMsg]): Future[Unit] = Future.unit override def start(): Unit = {} override def stop(): Future[Unit] = Future.unit } override def fullMessageSubscriber[SubMsg: Deserializer](data: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] = subscriber(data, consumer) override def subscriber[SubMsg: Deserializer](queueData: InboundQueueData, consumer: ActorRef): Subscriber[SubMsg] = new Subscriber[SubMsg] { override def start(): Unit = {} override def stop(): Future[Unit] = Future.unit } override def stop(): Future[Unit] = Future.unit }
arkadius/reliable-http-client
rhttpc-client/src/test/scala/rhttpc/client/MockTransport.scala
Scala
apache-2.0
4,111
package io.github.hamsters import org.scalatest.{FlatSpec, Matchers} class SealedSpec extends FlatSpec with Matchers { "Sealed.values on a valid sealed trait" should "return a set of sealed trait childs" in { sealed trait Colors case object Red extends Colors case object Orange extends Colors case object Green extends Colors Sealed.values[Colors] should be equals Set(Red, Orange, Green) } /**"Sealed.values on a simple trait (non sealed)" should "not compile" in { trait Colors case object Red extends Colors case object Orange extends Colors case object Green extends Colors Sealed.values[Colors] should be equals Set(Red, Orange, Green) // doesn't compile }**/ }
dgouyette/hamsters
shared/src/test/scala/io/github/hamsters/SealedSpec.scala
Scala
apache-2.0
723
/** * Copyright 2015 Otto (GmbH & Co KG) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.schedoscope.scheduler.driver import java.nio.file.Files import net.lingala.zip4j.core.ZipFile import org.apache.commons.io.FileUtils import org.schedoscope.Schedoscope import org.schedoscope.conf.DriverSettings import org.schedoscope.dsl.transformations.Transformation import org.schedoscope.test.resources.TestResources import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} import scala.util.{Failure, Random, Success} /** * In Schedoscope, drivers are responsible for executing transformations. * * Drivers encapsulate the respective APIs required to perform a transformation type. * They might be executed from within the DriverActor or directly from a test. * * A driver is parameterized by the type of transformation that it is able to execute. * * An execution of a transformation - a driver run - is represented by a driver run handle. Depending on the * state of transformation execution, the current driver run state can be queried for a driver * run handle. */ trait Driver[T <: Transformation] { /** * The name of the transformations executed by this driver. Must be equal to t.name for any t: T. */ def transformationName: String /** * Kill the given driver run. Default: do nothing */ def killRun(run: DriverRunHandle[T]): Unit = {} /** * Get the current driver run state for a given driver run represented by the handle. */ def getDriverRunState(run: DriverRunHandle[T]): DriverRunState[T] /** * Create a driver run, i.e., start the execution of the transformation asychronously. */ def run(t: T): DriverRunHandle[T] /** * Execute the transformation synchronously and block until it's done. Return the final state * of the driver run. */ def runAndWait(t: T): DriverRunState[T] /** * Deploy all resources for this transformation type to the cluster. By default, this deploys all * jars defined in the libJars section of the transformation configuration (@see DriverSettings) */ def deployAll(ds: DriverSettings): Boolean = { val fsd = FilesystemDriver(ds) // clear destination fsd.delete(ds.location, true) fsd.mkdirs(ds.location) val succ = ds.libJars .map(f => { if (ds.unpack) { val tmpDir = Files.createTempDirectory("schedoscope-" + Random.nextLong.abs.toString).toFile new ZipFile(f.replaceAll("file:", "")).extractAll(tmpDir.getAbsolutePath) val succ = fsd.copy("file://" + tmpDir + "/*", ds.location, true) FileUtils.deleteDirectory(tmpDir) succ } else { fsd.copy(f, ds.location, true) } }) succ.filter(_.isInstanceOf[DriverRunFailed[_]]).isEmpty } /** * Perform any rigging of a transformation necessary to execute it within the scope of the * test framework represented by an instance of TestResources using this driver. * * The rigged transformation is returned. * * By default, the transformation is not changed. */ def rigTransformationForTest(t: T, testResources: TestResources): T = t /** * Needs to be overridden to return the class names of driver run completion handlers to apply. * * E.g., provide a val of the same name to the constructor of the driver implementation. */ def driverRunCompletionHandlerClassNames: List[String] lazy val driverRunCompletionHandlers: List[DriverRunCompletionHandler[T]] = driverRunCompletionHandlerClassNames.map { className => Class.forName(className).newInstance().asInstanceOf[DriverRunCompletionHandler[T]] } /** * Invokes completion handlers prior to the given driver run. */ def driverRunStarted(run: DriverRunHandle[T]) { driverRunCompletionHandlers.foreach(_.driverRunStarted(run)) } /** * Invokes completion handlers after the given driver run. */ def driverRunCompleted(run: DriverRunHandle[T]) { getDriverRunState(run) match { case s: DriverRunSucceeded[T] => driverRunCompletionHandlers.foreach(_.driverRunCompleted(s, run)) case f: DriverRunFailed[T] => driverRunCompletionHandlers.foreach(_.driverRunCompleted(f, run)) case _ => throw RetryableDriverException("driverRunCompleted called with non-final driver run state") } } } /** * DriverOnBlockingApi provides a default implementation for most of the driver contract * for transformations working on blocking APIs. * * The asynchronism of the driver contract is implemented using futures. I.e., the state * handle of the respective driver run state is a future returning the final driver run state * for the transformation being executed. * * Subclasses only need to provide an implementation of the methods transformationName and run * as well as driverRunCompletionHandlerClassNames. * * As examples, @see HiveDriver, @see PigDriver, @see FileSystemDriver * */ trait DriverOnBlockingApi[T <: Transformation] extends Driver[T] { implicit val executionContext = Schedoscope.actorSystem.dispatchers.lookup("akka.actor.future-driver-dispatcher") def runTimeOut: Duration = Schedoscope.settings.getDriverSettings(transformationName).timeout def getDriverRunState(run: DriverRunHandle[T]): DriverRunState[T] = { val runState = run.stateHandle.asInstanceOf[Future[DriverRunState[T]]] if (runState.isCompleted) runState.value.get match { case s: Success[DriverRunState[T]] => s.value case f: Failure[DriverRunState[T]] => throw f.exception } else DriverRunOngoing[T](this, run) } def runAndWait(t: T): DriverRunState[T] = Await.result(run(t).stateHandle.asInstanceOf[Future[DriverRunState[T]]], runTimeOut) } /** * DriverOnNonBlockingApi provides a simple default implementation for parts of the driver * contract for asynchronous APIs (namely, the runAndWait method). * * The state handle of driver run handles for such APIs should be the corresponding handle * mechanism used by that API. * * As examples, @see MapreduceDriver and @see OozieDriver */ trait DriverOnNonBlockingApi[T <: Transformation] extends Driver[T] { def runAndWait(t: T): DriverRunState[T] = { val runHandle = run(t) while (getDriverRunState(runHandle).isInstanceOf[DriverRunOngoing[T]]) Thread.sleep(5000) getDriverRunState(runHandle) } } /** * Companion objects for driver implementations must implement the following trait, which ensures a common protocol * for instantiating drivers from their driver settings as well as for instantiating test instances. */ trait DriverCompanionObject[T <: Transformation] { /** * Construct the driver from its settings. The settings are picked up via the name of the driver * from the configurations * * @param driverSettings the driver settings * @return the instantiated driver */ def apply(driverSettings: DriverSettings): Driver[T] /** * Construct the driver from its settings in the context of the Schedoscope test framework. * * @param driverSettings the driver settings * @param testResources the resources within the test environment * @return the instantiated test driver */ def apply(driverSettings: DriverSettings, testResources: TestResources): Driver[T] } /** * Companion object with factory methods for drivers */ object Driver { /** * Returns the names of the transformations for which drivers are configured. */ def transformationsWithDrivers = Schedoscope.settings.availableTransformations.keySet() /** * Returns the driver settings for a given transformation type. */ def driverSettings(transformationName: String): DriverSettings = Schedoscope.settings.getDriverSettings(transformationName) /** * Returns the driver settings for a given transformation. */ def driverSettings(t: Transformation): DriverSettings = driverSettings(t.name) /** * Returns an appropriately set up driver for the given driver settings. If optional test * resources are passed then the driver is set up for testing in that context. */ def driverFor[T <: Transformation](ds: DriverSettings, testResources: Option[TestResources]): Driver[T] = try { val driverCompanionObjectClass = Class.forName(ds.driverClassName + "$") val driverCompanionObjectConstructor = driverCompanionObjectClass.getDeclaredConstructor() driverCompanionObjectConstructor.setAccessible(true) val driverCompanionObject = driverCompanionObjectConstructor.newInstance().asInstanceOf[DriverCompanionObject[T]] testResources match { case Some(resources) => driverCompanionObject(ds, resources) case None => driverCompanionObject(ds) } } catch { case t: Throwable => throw new IllegalArgumentException(s"Could not instantiate driver class ${ds.driverClassName} with settings ${ds}", t) } /** * Returns an appropriately set up driver for the given driver settings. */ def driverFor[T <: Transformation](ds: DriverSettings): Driver[T] = driverFor[T](ds, None) /** * Returns an appropriately set up driver for the given transformation type using the configured settings. If optional test * resources are passed then the driver is set up for testing in that context. */ def driverFor[T <: Transformation](transformationName: String, testResources: Option[TestResources]): Driver[T] = driverFor[T](driverSettings(transformationName), testResources) /** * Returns an appropriately set up driver for the given transformation type using the configured settings. */ def driverFor[T <: Transformation](transformationName: String): Driver[T] = driverFor[T](transformationName, None) /** * Returns an appropriately set up driver for the given transformation and the configured settings.If optional test * resources are passed then the driver is set up for testing in that context. */ def driverFor[T <: Transformation](t: T, testResources: Option[TestResources]): Driver[T] = driverFor[T](driverSettings(t), testResources) /** * Returns an appropriately set up driver for the given transformation and the configured settings. */ def driverFor[T <: Transformation](t: T): Driver[T] = driverFor[T](t, None) }
utzwestermann/schedoscope
schedoscope-core/src/main/scala/org/schedoscope/scheduler/driver/Driver.scala
Scala
apache-2.0
10,887
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.api.java import java.util.Comparator import scala.language.implicitConversions import scala.reflect.ClassTag import org.apache.spark._ import org.apache.spark.api.java.JavaSparkContext.fakeClassTag import org.apache.spark.api.java.function.{Function => JFunction} import org.apache.spark.rdd.RDD import org.apache.spark.storage.StorageLevel import org.apache.spark.util.Utils class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T]) extends AbstractJavaRDDLike[T, JavaRDD[T]] { override def wrapRDD(rdd: RDD[T]): JavaRDD[T] = JavaRDD.fromRDD(rdd) // Common RDD functions /** Persist this RDD with the default storage level (`MEMORY_ONLY`). */ def cache(): JavaRDD[T] = wrapRDD(rdd.cache()) /** * Set this RDD's storage level to persist its values across operations after the first time * it is computed. This can only be used to assign a new storage level if the RDD does not * have a storage level set yet.. */ def persist(newLevel: StorageLevel): JavaRDD[T] = wrapRDD(rdd.persist(newLevel)) /** * Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. * This method blocks until all blocks are deleted. */ def unpersist(): JavaRDD[T] = wrapRDD(rdd.unpersist()) /** * Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. * * @param blocking Whether to block until all blocks are deleted. */ def unpersist(blocking: Boolean): JavaRDD[T] = wrapRDD(rdd.unpersist(blocking)) // Transformations (return a new RDD) /** * Return a new RDD containing the distinct elements in this RDD. */ def distinct(): JavaRDD[T] = wrapRDD(rdd.distinct()) /** * Return a new RDD containing the distinct elements in this RDD. */ def distinct(numPartitions: Int): JavaRDD[T] = wrapRDD(rdd.distinct(numPartitions)) /** * Return a new RDD containing only the elements that satisfy a predicate. */ def filter(f: JFunction[T, java.lang.Boolean]): JavaRDD[T] = wrapRDD(rdd.filter((x => f.call(x).booleanValue()))) /** * Return a new RDD that is reduced into `numPartitions` partitions. */ def coalesce(numPartitions: Int): JavaRDD[T] = rdd.coalesce(numPartitions) /** * Return a new RDD that is reduced into `numPartitions` partitions. */ def coalesce(numPartitions: Int, shuffle: Boolean): JavaRDD[T] = rdd.coalesce(numPartitions, shuffle) /** * Return a new RDD that has exactly numPartitions partitions. * * Can increase or decrease the level of parallelism in this RDD. Internally, this uses * a shuffle to redistribute data. * * If you are decreasing the number of partitions in this RDD, consider using `coalesce`, * which can avoid performing a shuffle. */ def repartition(numPartitions: Int): JavaRDD[T] = rdd.repartition(numPartitions) /** * Return a sampled subset of this RDD. */ def sample(withReplacement: Boolean, fraction: Double): JavaRDD[T] = sample(withReplacement, fraction, Utils.random.nextLong) /** * Return a sampled subset of this RDD. */ def sample(withReplacement: Boolean, fraction: Double, seed: Long): JavaRDD[T] = wrapRDD(rdd.sample(withReplacement, fraction, seed)) /** * Randomly splits this RDD with the provided weights. * * @param weights weights for splits, will be normalized if they don't sum to 1 * * @return split RDDs in an array */ def randomSplit(weights: Array[Double]): Array[JavaRDD[T]] = randomSplit(weights, Utils.random.nextLong) /** * Randomly splits this RDD with the provided weights. * * @param weights weights for splits, will be normalized if they don't sum to 1 * @param seed random seed * * @return split RDDs in an array */ def randomSplit(weights: Array[Double], seed: Long): Array[JavaRDD[T]] = rdd.randomSplit(weights, seed).map(wrapRDD) /** * Return the union of this RDD and another one. Any identical elements will appear multiple * times (use `.distinct()` to eliminate them). */ def union(other: JavaRDD[T]): JavaRDD[T] = wrapRDD(rdd.union(other.rdd)) /** * Return the intersection of this RDD and another one. The output will not contain any duplicate * elements, even if the input RDDs did. * * Note that this method performs a shuffle internally. */ def intersection(other: JavaRDD[T]): JavaRDD[T] = wrapRDD(rdd.intersection(other.rdd)) /** * Return an RDD with the elements from `this` that are not in `other`. * * Uses `this` partitioner/partition size, because even if `other` is huge, the resulting * RDD will be <= us. */ def subtract(other: JavaRDD[T]): JavaRDD[T] = wrapRDD(rdd.subtract(other)) /** * Return an RDD with the elements from `this` that are not in `other`. */ def subtract(other: JavaRDD[T], numPartitions: Int): JavaRDD[T] = wrapRDD(rdd.subtract(other, numPartitions)) /** * Return an RDD with the elements from `this` that are not in `other`. */ def subtract(other: JavaRDD[T], p: Partitioner): JavaRDD[T] = wrapRDD(rdd.subtract(other, p)) override def toString = rdd.toString /** Assign a name to this RDD */ def setName(name: String): JavaRDD[T] = { rdd.setName(name) this } /** * Return this RDD sorted by the given key function. */ def sortBy[S](f: JFunction[T, S], ascending: Boolean, numPartitions: Int): JavaRDD[T] = { import scala.collection.JavaConverters._ def fn = (x: T) => f.call(x) import com.google.common.collect.Ordering // shadows scala.math.Ordering implicit val ordering = Ordering.natural().asInstanceOf[Ordering[S]] implicit val ctag: ClassTag[S] = fakeClassTag wrapRDD(rdd.sortBy(fn, ascending, numPartitions)) } } object JavaRDD { implicit def fromRDD[T: ClassTag](rdd: RDD[T]): JavaRDD[T] = new JavaRDD[T](rdd) implicit def toRDD[T](rdd: JavaRDD[T]): RDD[T] = rdd.rdd }
Dax1n/spark-core
core/src/main/scala/org/apache/spark/api/java/JavaRDD.scala
Scala
apache-2.0
6,757
package ch.epfl.scala.index.data.github import org.json4s.{DefaultFormats, native} object Json4s { implicit val formats = DefaultFormats implicit val serialization = native.Serialization }
adamwy/scaladex
data/src/main/scala/ch.epfl.scala.index.data/github/Json4s.scala
Scala
bsd-3-clause
195
/******************************************************************************* * Copyright 2010 Maxime Lévesque * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ***************************************************************************** */ package org.squeryl.adapters import java.sql.SQLException import org.squeryl.dsl.CompositeKey import org.squeryl.dsl.ast.{ExpressionNode, QueryExpressionElements} import org.squeryl._ import org.squeryl.internals._ class SQLiteAdapter extends DatabaseAdapter { override def uuidTypeDeclaration = "uuid" override def isFullOuterJoinSupported = false override def writeColumnDeclaration(fmd: FieldMetaData, isPrimaryKey: Boolean, schema: Schema): String = { var res = " " + fmd.columnName + " " + databaseTypeFor(fmd) for(d <- fmd.defaultValue) { val v = convertToJdbcValue(d.value.asInstanceOf[AnyRef]) if(v.isInstanceOf[String]) res += " default '" + v + "'" else res += " default " + v } if(!fmd.isOption) res += " not null" if(isPrimaryKey) res += " primary key" if(supportsAutoIncrementInColumnDeclaration && fmd.isAutoIncremented) res += " autoincrement" res } override def writeCreateTable[T](t: Table[T], sw: StatementWriter, schema: Schema): Unit = { sw.write("create table ") sw.write(quoteName(t.prefixedName)) sw.write(" (\\n") sw.writeIndented { sw.writeLinesWithSeparator( t.posoMetaData.fieldsMetaData.map( fmd => writeColumnDeclaration(fmd, fmd.declaredAsPrimaryKeyInSchema, schema) ), "," ) } val compositePrimaryKeys = _allCompositePrimaryKeys(t) if (compositePrimaryKeys.nonEmpty) { sw.write(", PRIMARY KEY (") sw.write(compositePrimaryKeys map (_.columnName) mkString ", ") sw.write(")") } sw.write(")") } private def _allCompositePrimaryKeys[T](t: Table[T]): collection.Seq[FieldMetaData] = { (t.ked map { ked => Utils.mapSampleObject( t.asInstanceOf[Table[AnyRef]], (z: AnyRef) => { val id = ked.asInstanceOf[KeyedEntityDef[AnyRef, AnyRef]].getId(z) id match { case key: CompositeKey => key._fields case _ => Seq.empty[FieldMetaData] } } ) }) getOrElse Seq.empty[FieldMetaData] } override def intTypeDeclaration: String = "INTEGER" override def longTypeDeclaration = "INTEGER" override def supportsForeignKeyConstraints: Boolean = false override def writeCompositePrimaryKeyConstraint(t: Table[_], cols: Iterable[FieldMetaData]): String = s"SELECT * FROM sqlite_master WHERE 1 = 2" override def writeDropTable(tableName: String): String = s"DROP TABLE IF EXISTS $tableName" override def isTableDoesNotExistException(e: SQLException): Boolean = e.getErrorCode == 42102 override def supportsCommonTableExpressions = false override def writeEndOfQueryHint(isForUpdate: () => Boolean, qen: QueryExpressionElements, sw: StatementWriter) = if(isForUpdate()) { sw.pushPendingNextLine } override def writeRegexExpression(left: ExpressionNode, pattern: String, sw: StatementWriter) = { sw.write("(") left.write(sw) sw.write(" LIKE ?)") sw.addParam(ConstantStatementParam(InternalFieldMapper.stringTEF.createConstant(pattern))) } }
squeryl/squeryl
src/main/scala/org/squeryl/adapters/SQLiteAdapter.scala
Scala
apache-2.0
3,998
/* * Copyright 2015 Webtrends (http://www.webtrends.com) * * See the LICENCE.txt file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.webtrends.harness.component.netty import akka.actor._ import akka.pattern.ask import com.webtrends.harness.app.HActor import com.webtrends.harness.component.{ComponentHelper, ComponentRequest} import com.webtrends.harness.HarnessConstants import com.webtrends.harness.health.HealthResponseType.HealthResponseType import com.webtrends.harness.health._ import com.webtrends.harness.service.ServiceManager.GetMetaDataByName import com.webtrends.harness.service.messages.GetMetaData import scala.util.{Failure, Success} @SerialVersionUID(1L) case class GetSystemInfo[T](name: String, msg:ComponentRequest[T]) @SerialVersionUID(1L) case class GetHealth(msg: HealthResponseType) @SerialVersionUID(1L) case class GetServiceInfo(name:Option[String]=None) /** * Created by wallinm on 12/16/14. */ protected class CoreNettyWorker extends HActor with ComponentHelper { import context._ val healthActor = actorSelection(HarnessConstants.HealthFullName) val serviceActor = actorSelection(HarnessConstants.ServicesFullName) /** * Establish our routes and other receive handlers */ override def receive = super.receive orElse { case GetSystemInfo(name, msg) => respondToComponentRequest(name, msg) case GetHealth(msg) => respondToHealthRequest(msg) case GetServiceInfo(name) => respondToServiceRequest(name) } def respondToServiceRequest(name:Option[String]) = { val caller = sender() val msg = name match { case Some(n) => GetMetaDataByName(n) case None => GetMetaData(None) } (serviceActor ? msg) onComplete { case Success(resp) => caller ! resp case Failure(f) => caller ! Status.Failure(f) } } def respondToComponentRequest[T, M](name:String, msg:ComponentRequest[T]): Unit = { val caller = sender() componentRequest[T, M](name, msg) onComplete { case Success(resp) => caller ! resp case Failure(f) => caller ! Status.Failure(f) } } def respondToHealthRequest(msg:HealthResponseType): Unit = { val caller = sender() (healthActor ? HealthRequest(msg)) onComplete { case Success(resp) => caller ! resp case Failure(f) => caller ! Status.Failure(f) } } }
Webtrends/wookiee-netty
src/main/scala/com/webtrends/harness/component/netty/CoreNettyWorker.scala
Scala
apache-2.0
2,939
package org.jetbrains.plugins.scala.lang.psi import com.intellij.openapi.project.Project import com.intellij.psi.search.GlobalSearchScope import com.intellij.psi.{PsiClass, PsiElement} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTrait} import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager import org.jetbrains.plugins.scala.lang.psi.types.api.{TypeParameterType, UndefinedType} import org.jetbrains.plugins.scala.lang.psi.types.{ScParameterizedType, ScalaType} import org.jetbrains.plugins.scala.project.ProjectContext /** * Nikolay.Tropin * 19-Apr-17 */ case class ElementScope(project: Project, scope: GlobalSearchScope) { implicit def projectContext: ProjectContext = project def getCachedClass(fqn: String): Option[PsiClass] = getCachedClasses(fqn).find { !_.isInstanceOf[ScObject] } def getCachedObject(fqn: String): Option[ScObject] = getCachedClasses(fqn).collect { case o: ScObject => o }.headOption def cachedFunction1Type: Option[ScParameterizedType] = manager.cachedFunction1Type(this) def function1Type(level: Int = 1): Option[ScParameterizedType] = getCachedClass("scala.Function1").collect { case t: ScTrait => t }.map { t => val parameters = t.typeParameters.map { TypeParameterType(_) }.map { UndefinedType(_, level = level) } ScParameterizedType(ScalaType.designator(t), parameters) }.collect { case p: ScParameterizedType => p } def getCachedClasses(fqn: String): Array[PsiClass] = manager.getCachedClasses(scope, fqn) private def manager = ScalaPsiManager.instance(project) } object ElementScope { def apply(element: PsiElement): ElementScope = ElementScope(element.getProject, element.getResolveScope) def apply(project: Project): ElementScope = ElementScope(project, GlobalSearchScope.allScope(project)) implicit def toProjectContext(implicit elementScope: ElementScope): ProjectContext = elementScope.project }
ilinum/intellij-scala
src/org/jetbrains/plugins/scala/lang/psi/ElementScope.scala
Scala
apache-2.0
2,033
package jp.co.cyberagent.aeromock import java.nio.file.Path import jp.co.cyberagent.aeromock.core.bootstrap.BootstrapManager import jp.co.cyberagent.aeromock.server.AeromockServer import org.slf4j.LoggerFactory import scaldi.Injectable /** * launcher of Aeromock * @author stormcat24 */ object Aeromock extends App with Injectable { println("Welcome to") println(AeromockInfo.splash) implicit val module = new AeromockAppModule(args) val LOG = LoggerFactory.getLogger(this.getClass()) val configFile = inject[Path] (identified by 'configFile) val listenPort = inject[Int] (identified by 'listenPort) LOG.info(s"configuration file = ${configFile.toAbsolutePath.toString}") LOG.info(s"listening port = ${listenPort}") try { BootstrapManager.delegate new AeromockServer(listenPort).run LOG.info("Aeromock Server Running.") } catch { case e: Exception => LOG.error("Failed to start Aeromock Server.", e) } }
CyberAgent/aeromock
aeromock-server/src/main/scala/jp/co/cyberagent/aeromock/Aeromock.scala
Scala
mit
955
package org.workcraft.plugins.fsm import org.workcraft.scala.Expressions._ import org.workcraft.scala.effects.IO._ import org.workcraft.scala.effects.IO import scalaz.Scalaz._ import org.workcraft.dependencymanager.advanced.user.Variable import java.awt.geom.Point2D import org.workcraft.dom.visual.connections.StaticVisualConnectionData import org.workcraft.dom.visual.connections.Polyline import org.workcraft.plugins.petri2.NameGenerator import scalaz.NonEmptyList class EditableFSM( val states: ModifiableExpression[NonEmptyList[State]], val arcs: ModifiableExpression[List[Arc]], val labels: ModifiableExpression[Map[State, String]], val stateNames: ModifiableExpression[Map[String, State]], val arcLabels: ModifiableExpression[Map[Arc, String]], val finalStates: ModifiableExpression[Set[State]], val initialState: ModifiableExpression[State], val layout: ModifiableExpression[Map[State, Point2D.Double]], val visualArcs: ModifiableExpression[Map[Arc, StaticVisualConnectionData]]) { val nodes = (states.expr <**> arcs)(_.list ++ _) val nameGen = NameGenerator(stateNames, "s") val incidentArcs: Expression[Map[State, List[Arc]]] = (arcs.expr <**> states)((arcs, states) => states.list.map(c => (c, arcs.filter(arc => (arc.to == c) || (arc.from == c)))).toMap) val presetV = saveState.map (_.fsm.preset) val postsetV = saveState.map (_.fsm.postset) def preset(s: State) = presetV.map(_(s)) def postset(s: State) = postsetV.map(_(s)) private def newState = ioPure.pure { new State } private def newArc(from: State, to: State) = ioPure.pure { new Arc(from, to) } def createState(where: Point2D.Double): IO[State] = for { s <- newState; name <- nameGen.newName; _ <- states.update( s <:: _); _ <- labels.update(_ + (s -> name)); _ <- stateNames.update(_ + (name -> s)); _ <- layout.update(_ + (s -> where)) } yield s def createArc(from: State, to: State) = for { arc <- newArc(from, to); _ <- arcs.update(arc :: _); _ <- arcLabels.update(_ + (arc -> "")) _ <- visualArcs.update(_ + (arc -> Polyline(List()))) } yield arc def deleteNode(n: Node): IO[Unit] = n match { case s: State => deleteState(s) case a: Arc => deleteArc(a) } def deleteArc(a: Arc) = arcs.update(_ - a) >>=| arcLabels.update (_ - a) >>=| visualArcs.update(_ - a) def remove[A](list: NonEmptyList[A], what: A): NonEmptyList[A] = { if (list.head == what) (if (list.tail != Nil) NonEmptyList(list.tail.head, list.tail.tail:_*) else throw new RuntimeException("cannot remove last element from NonEmptyList")) else NonEmptyList(list.head, (list.tail - what):_*) } def deleteState(s: State) = (incidentArcs.eval <|***|> (labels.eval, initialState.eval, states.eval)) >>= { case (a, l, ist, st) => val del = a(s).map(deleteArc(_)).sequence >>=| states.update(remove(_, s)) >>=| layout.update(_ - s) >>=| stateNames.update(_ - l(s)) >>=| labels.update(_ - s) >>=| finalStates.update(_ - s) val updateInitial = if (ist == s) states.eval >>= (st => initialState.set(st.head)) else IO.Empty del >>=| updateInitial } def deleteNodes(nodes: Set[Node]): IO[Either[String, IO[Unit]]] = states.eval.map(states => { if ((nodes.count { case s: State => true; case _ => false }) == states.tail.length + 1) Left("Cannot delete selection: at least one state must be defined.") else Right(nodes.toList.map(deleteNode(_)).sequence >| {}) }) def saveState = for { states <- states; arcs <- arcs; finalStates <- finalStates; initial <- initialState; labels <- labels; arcLabels <- arcLabels; layout <- layout; visualArcs <- visualArcs } yield VisualFSM(FSM(states, arcs, finalStates, initial, labels, arcLabels), layout, visualArcs) def loadState(state: VisualFSM): IO[Unit] = states.set(state.fsm.states) >>=| labels.set(state.fsm.labels) >>=| stateNames.set(state.fsm.names) >>=| finalStates.set(state.fsm.finalStates) >>=| initialState.set(state.fsm.initialState) >>=| arcs.set(state.fsm.arcs) >>=| arcLabels.set(state.fsm.arcLabels) >>=| layout.set(state.layout) >>=| visualArcs.set(state.visualArcs) } object EditableFSM { def create(initialState: VisualFSM) = for { states <- newVar(initialState.fsm.states); arcs <- newVar(initialState.fsm.arcs); labels <- newVar(initialState.fsm.labels); stateNames <- newVar(initialState.fsm.names); arcLabels <- newVar(initialState.fsm.arcLabels); finalStates <- newVar(initialState.fsm.finalStates); initial <- newVar(initialState.fsm.initialState); layout <- newVar(initialState.layout); visualArcs <- newVar(initialState.visualArcs) } yield new EditableFSM(states, arcs, labels, stateNames, arcLabels, finalStates, initial, layout, visualArcs) }
mechkg/workcraft
FSMPlugin/src/main/scala/org/workcraft/plugins/fsm/EditableFSM.scala
Scala
gpl-3.0
4,968
package org.jetbrains.plugins.scala package debugger package evaluation import com.intellij.debugger.SourcePosition import com.intellij.debugger.engine.evaluation.CodeFragmentFactoryContextWrapper import com.intellij.debugger.engine.evaluation.expression._ import com.intellij.debugger.engine.{JVMName, JVMNameUtil} import com.intellij.lang.java.JavaLanguage import com.intellij.openapi.project.DumbService import com.intellij.openapi.util.Condition import com.intellij.psi._ import com.intellij.psi.search.LocalSearchScope import com.intellij.psi.search.searches.ReferencesSearch import com.intellij.psi.util.PsiTreeUtil import org.jetbrains.annotations.Nls import org.jetbrains.plugins.scala.caches.BlockModificationTracker import org.jetbrains.plugins.scala.debugger.ScalaPositionManager.{InsideAsync, isCompiledWithIndyLambdas} import org.jetbrains.plugins.scala.debugger.TopLevelMembers.{hasTopLevelMembers, topLevelMemberClassName} import org.jetbrains.plugins.scala.debugger.evaluation.evaluator._ import org.jetbrains.plugins.scala.debugger.evaluation.util.DebuggerUtil import org.jetbrains.plugins.scala.debugger.evaluation.util.DebuggerUtil.isAtLeast212 import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil import org.jetbrains.plugins.scala.lang.psi.api.base._ import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._ import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElementExt import org.jetbrains.plugins.scala.lang.psi.api.expr._ import org.jetbrains.plugins.scala.lang.psi.api.expr.xml.ScXmlPattern import org.jetbrains.plugins.scala.lang.psi.api.statements._ import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter, ScParameterClause} import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._ import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScEarlyDefinitions, ScModifierListOwner, ScNamedElement, ScPackaging, ScTypedDefinition} import org.jetbrains.plugins.scala.lang.psi.api.{ImplicitArgumentsOwner, ScPackage, ScalaFile} import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory._ import org.jetbrains.plugins.scala.lang.psi.impl.source.ScalaCodeFragment import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticFunction import org.jetbrains.plugins.scala.lang.psi.types._ import org.jetbrains.plugins.scala.lang.psi.types.api._ import org.jetbrains.plugins.scala.lang.psi.types.api.designator.{ScDesignatorType, ScProjectionType, ScThisType} import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter import org.jetbrains.plugins.scala.lang.psi.types.result._ import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult import org.jetbrains.plugins.scala.macroAnnotations.CachedInUserData import scala.annotation.tailrec import scala.collection.immutable.ArraySeq import scala.reflect.NameTransformer /** * Nikolay.Tropin * 2014-09-28 */ private[evaluation] trait ScalaEvaluatorBuilderUtil { this: ScalaEvaluatorBuilder => import ScalaEvaluatorBuilderUtil._ private val stdTypes = projectContext.stdTypes import stdTypes._ def fileName: String = contextClass.toOption.flatMap(_.getContainingFile.toOption).map(_.name).orNull def importedQualifierEvaluator(ref: ScReference, resolveResult: ScalaResolveResult): Evaluator = { val message = ScalaBundle.message("cannot.evaluate.imported.reference") resolveResult.fromType match { case Some(ScDesignatorType(element)) => element match { case obj: ScObject => stableObjectEvaluator(obj) case cl: PsiClass if cl.getLanguage.isInstanceOf[JavaLanguage] => new ScalaTypeEvaluator(JVMNameUtil.getJVMQualifiedName(cl)) case _ => val expr = createExpressionWithContextFromText(element.name, ref.getContext, ref) evaluatorFor(expr) } case Some(p: ScProjectionType) => def exprToEvaluate(p: ScProjectionType): String = p.projected match { case ScDesignatorType(elem) => elem.name + "." + p.actualElement.name case projected: ScProjectionType => exprToEvaluate(projected) + "." + projected.actualElement.name case ScThisType(cl) if contextClass == cl => s"this.${p.actualElement.name}" case ScThisType(cl) => s"${cl.name}.this.${p.actualElement.name}" case _ => throw EvaluationException(message) } val expr = createExpressionWithContextFromText(exprToEvaluate(p), ref.getContext, ref) evaluatorFor(expr) case _ => throw EvaluationException(message) } } def thisOrImportedQualifierEvaluator(ref: ScReference): Evaluator = { ref.bind() match { case Some(resolveResult) => if (resolveResult.importsUsed.nonEmpty) importedQualifierEvaluator(ref, resolveResult) else thisEvaluator(resolveResult) case None => new ScalaThisEvaluator() } } def thisEvaluator(resolveResult: ScalaResolveResult): Evaluator = { //this reference val elem = resolveResult.element val containingClass = resolveResult.fromType match { case Some(ScThisType(clazz)) => clazz case Some(tp) => val project = elem.getProject tp.extractClass match { case Some(x) => x case None => getContextClass(elem) } case _ => getContextClass(elem) } containingClass match { case o: ScObject if isStable(o) => return stableObjectEvaluator(o) case _ => } val (outerClass, iterationCount) = findContextClass(e => e == null || e == containingClass) if (outerClass != null) new ScalaThisEvaluator(iterationCount) else new ScalaThisEvaluator() } def thisOrSuperEvaluator(refOpt: Option[ScStableCodeReference], isSuper: Boolean): Evaluator = { def thisEval(i: Int) = if (isSuper) new ScalaSuperEvaluator(i) else new ScalaThisEvaluator(i) def stableEvaluator(e: Evaluator) = if (isSuper) new ScalaSuperDelegate(e) else e def default: Evaluator = { val (result, iters) = findContextClass(e => e == null || e.isInstanceOf[PsiClass]) if (result == null) thisEval(0) else thisEval(iters) } refOpt match { case Some(ResolvesTo(clazz: PsiClass)) => clazz match { case o: ScObject if isStable(o) => stableEvaluator(stableObjectEvaluator(o)) case _ => val (result, iters) = findContextClass(e => e == null || e == clazz) if (result == null) thisEval(0) else thisEval(iters) } case Some(ref) => val refName = ref.refName val (result, iters) = findContextClass { case null => true case cl: PsiClass if cl.name != null && cl.name == refName => true case _ => false } result match { case o: ScObject if isStable(o) => stableEvaluator(stableObjectEvaluator(o)) case null => default case _ => thisEval(iters) } case _ => default } } def findContextClass(stopCondition: PsiElement => Boolean): (PsiElement, Int) = { var current: PsiElement = contextClass var iterations = 0 while (!stopCondition(current)) { iterations += anonClassCount(current) current = getContextClass(current) } (current, iterations) } def localMethodEvaluator(fun: ScFunctionDefinition, argEvaluators: Seq[Evaluator]): Evaluator = { def localFunName() = { val transformed = NameTransformer.encode(fun.name) fun match { case InsideAsync(call) if !fun.parentOfType(classOf[ScFunctionDefinition]).exists(call.isAncestorOf(_)) => transformed + "$macro" case _ => transformed } } val name = localFunName() val containingClass = if (fun.isSynthetic) fun.containingClass else getContextClass(fun) val message = ScalaBundle.message("cannot.evaluate.local.method") if (contextClass == null) { throw EvaluationException(message) } val thisEvaluator: Evaluator = containingClass match { case obj: ScObject if isStable(obj) => stableObjectEvaluator(obj) case _: ScTrait => thisOrSuperEvaluator(None, isSuper = true) case _: ScalaFile | _: ScPackaging if fun.isTopLevel => stableObjectEvaluator(topLevelMemberClassName(fun)) case _ => val (outerClass, iters) = findContextClass(e => e == null || e == containingClass) if (outerClass != null) new ScalaThisEvaluator(iters) else null } if (thisEvaluator != null) { val locals = DebuggerUtil.localParamsForFunDef(fun) val evaluators = argEvaluators ++ locals.map(fromLocalArgEvaluator) val signature = DebuggerUtil.getFunctionJVMSignature(fun) val positions = DebuggerUtil.getSourcePositions(fun.getNavigationElement) val idx = localFunctionIndex(fun) ScalaMethodEvaluator(thisEvaluator, name, signature, evaluators, traitImplementation(fun), positions, idx) } else throw EvaluationException(message) } def stableObjectEvaluator(qual: String): ScalaFieldEvaluator = { val jvm = JVMNameUtil.getJVMRawText(qual) ScalaFieldEvaluator(new TypeEvaluator(jvm), "MODULE$") } def stableObjectEvaluator(obj: ScObject): Evaluator = { val qualName = if (obj.isPackageObject) obj.qualifiedName + ".package" else obj.getQualifiedNameForDebugger val qual = qualName.split('.').map(NameTransformer.encode).mkString(".") + "$" stableObjectEvaluator(qual) } def objectEvaluator(obj: ScObject, qualEvaluator: () => Evaluator): Evaluator = { if (isStable(obj)) stableObjectEvaluator(obj) else { val objName = NameTransformer.encode(obj.name) ScalaMethodEvaluator(qualEvaluator(), objName, null /* todo? */ , Seq.empty, traitImplementation(obj), DebuggerUtil.getSourcePositions(obj.getNavigationElement)) } } def syntheticFunctionEvaluator(synth: ScSyntheticFunction, qualOpt: Option[ScExpression], ref: ScReferenceExpression, arguments: Iterable[ScExpression]): Evaluator = { if (synth.isStringPlusMethod && arguments.size == 1) { val qualText = qualOpt.fold("this")(_.getText) val exprText = s"($qualText).concat(_root_.java.lang.String.valueOf(${arguments.head.getText}))" val expr = createExpressionWithContextFromText(exprText, ref.getContext, ref) return evaluatorFor(expr) } val name = synth.name val argEvaluators = arguments.map(evaluatorFor(_)) def unaryEval(operatorName: String, function: Evaluator => Evaluator): Evaluator = { if (argEvaluators.isEmpty) { val eval = qualOpt match { case None => new ScalaThisEvaluator() case Some(qual) => evaluatorFor(qual) } function(eval) } else throw EvaluationException(ScalaBundle.message("wrong.number.of.arguments", operatorName)) } def unaryEvalForBoxes(operatorName: String, boxesName: String): Evaluator = { unaryEval(operatorName, unaryEvaluator(_, boxesName)) } def binaryEval(operatorName: String, function: (Evaluator, Evaluator) => Evaluator): Evaluator = { if (argEvaluators.size == 1) { val eval = qualOpt match { case None => new ScalaThisEvaluator() case Some(qual) => evaluatorFor(qual) } function(eval, argEvaluators.head) } else throw EvaluationException(ScalaBundle.message("wrong.number.of.arguments", operatorName)) } def binaryEvalForBoxes(operatorName: String, boxesName: String): Evaluator = { binaryEval(operatorName, binaryEvaluator(_, _, boxesName)) } def equalsEval(opName: String): Evaluator = { val rawText = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;Ljava/lang/Object;)Z") binaryEval(name, (l, r) => ScalaMethodEvaluator(BOXES_RUN_TIME, "equals", rawText, boxed(l, r))) } def extractTypeArgumentForSyntheticMethod(methodName: String): ScType = { def missingTypeArgument() = throw EvaluationException(ScalaBundle.message("missing.type.argument.synthetic.method", methodName)) ref.getParent match { case gen: ScGenericCall => gen.typeArgs.typeArgs match { case Seq(arg) => arg.calcType case _ => missingTypeArgument() } case _ => missingTypeArgument() } } def asInstanceOfEval: Evaluator = { val methodName = "asInstanceOf" unaryEval(methodName, eval => new AsInstanceOfEvaluator(eval, extractTypeArgumentForSyntheticMethod(methodName))) } def isInstanceOfEval: Evaluator = { val methodName = "isInstanceOf" unaryEval(methodName, eval => new IsInstanceOfEvaluator(eval, extractTypeArgumentForSyntheticMethod(methodName))) } def trueEval = expressionFromTextEvaluator("true", ref) def falseEval = expressionFromTextEvaluator("false", ref) def conditionalOr = binaryEval("||", (first, second) => new ScalaIfEvaluator(first, trueEval, Some(second))) def conditionalAnd = binaryEval("&&", (first, second) => new ScalaIfEvaluator(first, second, Some(falseEval))) name match { case "isInstanceOf" => isInstanceOfEval case "asInstanceOf" => asInstanceOfEval case "##" => unaryEval(name, eval => { // Used in Scala 2.10 and Scala 2.11 val oldSyntheticHash = ScalaMethodEvaluator(SCALA_RUN_TIME, "hash", JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)I"), boxed(eval)) // Used since Scala 2.12 val newSyntheticHash = ScalaMethodEvaluator(SCALA_RUNTIME_STATICS, "anyHash", JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)I"), boxed(eval)) ScalaDuplexEvaluator(newSyntheticHash, oldSyntheticHash) }) case "==" => equalsEval("==") case "!=" => unaryEvaluator(equalsEval("!="), "takeNot") case "unary_!" => unaryEvalForBoxes("!", "takeNot") case "unary_~" => unaryEvalForBoxes("~", "complement") case "unary_+" => unaryEvalForBoxes("+", "positive") case "unary_-" => unaryEvalForBoxes("-", "negate") case "eq" => binaryEval(name, eqEvaluator) case "ne" => binaryEval(name, neEvaluator) case "<" => binaryEvalForBoxes(name, "testLessThan") case ">" => binaryEvalForBoxes(name, "testGreaterThan") case ">=" => binaryEvalForBoxes(name, "testGreaterOrEqualThan") case "<=" => binaryEvalForBoxes(name, "testLessOrEqualThan") case "+" => binaryEvalForBoxes(name, "add") case "-" => binaryEvalForBoxes(name, "subtract") case "*" => binaryEvalForBoxes(name, "multiply") case "/" => binaryEvalForBoxes(name, "divide") case "%" => binaryEvalForBoxes(name, "takeModulo") case ">>" => binaryEvalForBoxes(name, "shiftSignedRight") case "<<" => binaryEvalForBoxes(name, "shiftSignedLeft") case ">>>" => binaryEvalForBoxes(name, "shiftLogicalRight") case "&" => binaryEvalForBoxes(name, "takeAnd") case "|" => binaryEvalForBoxes(name, "takeOr") case "^" => binaryEvalForBoxes(name, "takeXor") case "&&" => conditionalAnd case "||" => conditionalOr case "toInt" => unaryEvalForBoxes(name, "toInteger") case "toChar" => unaryEvalForBoxes(name, "toCharacter") case "toShort" => unaryEvalForBoxes(name, "toShort") case "toByte" => unaryEvalForBoxes(name, "toByte") case "toDouble" => unaryEvalForBoxes(name, "toDouble") case "toLong" => unaryEvalForBoxes(name, "toLong") case "toFloat" => unaryEvalForBoxes(name, "toFloat") case "synchronized" => throw EvaluationException(ScalaBundle.message("synchronized.statement.is.not.supported")) case _ => throw EvaluationException(ScalaBundle.message("cannot.evaluate.synthetic.method", name)) } } def arrayMethodEvaluator(name: String, qual: Option[ScExpression], argEvaluators: Seq[Evaluator]): Evaluator = { val qualEval = qual match { case Some(q) => evaluatorFor(q) case None => throw EvaluationException(ScalaBundle.message("array.instance.is.not.found", name)) } def message = ScalaBundle.message("wrong.number.of.arguments", s"Array.$name") name match { case "apply" => if (argEvaluators.length == 1) new ScalaArrayAccessEvaluator(qualEval, argEvaluators.head) else throw EvaluationException(message) case "length" => if (argEvaluators.isEmpty) ScalaFieldEvaluator(qualEval, "length") else throw EvaluationException(message) case "clone" => val signature = JVMNameUtil.getJVMRawText("()[Ljava/lang/Object;") if (argEvaluators.isEmpty) ScalaMethodEvaluator(qualEval, "clone", signature, Nil) else throw EvaluationException(message) case "hashCode" => val signature = JVMNameUtil.getJVMRawText("()I;") if (argEvaluators.isEmpty) ScalaMethodEvaluator(qualEval, "hashCode", signature, Nil) else throw EvaluationException(message) case "update" => if (argEvaluators.length == 2) { val leftEval = new ScalaArrayAccessEvaluator(qualEval, argEvaluators.head) new AssignmentEvaluator(leftEval, argEvaluators(1)) } else throw EvaluationException(message) case "toString" => val signature = JVMNameUtil.getJVMRawText("()Ljava/lang/String;") if (argEvaluators.isEmpty) ScalaMethodEvaluator(qualEval, "toString", signature, Nil) else throw EvaluationException(message) case _ => throw EvaluationException(ScalaBundle.message("array.method.not.supported")) } } def isArrayFunction(fun: ScFunction): Boolean = { fun.getContext match { case _: ScTemplateBody => fun.containingClass match { case clazz: ScClass if clazz.qualifiedName == "scala.Array" => true case _ => false } case _ => false } } def isClassOfFunction(fun: ScFunction): Boolean = { if (fun.name != "classOf") return false fun.getContext match { case _: ScTemplateBody => fun.containingClass match { case clazz: PsiClass if clazz.qualifiedName == "scala.Predef" => true case _ => false } case _ => false } } def classOfFunctionEvaluator(ref: ScReferenceExpression): Evaluator = { ref.getContext match { case gen: ScGenericCall => val tpe = gen.arguments.head.`type`().getOrAny new ClassOfEvaluator(tpe) case _ => ScalaLiteralEvaluator.empty } } def valueClassInstanceEvaluator(value: Evaluator, innerType: ScType, classType: ScType): Evaluator = { val valueClassType = new ScalaTypeEvaluator(DebuggerUtil.getJVMQualifiedName(classType)) val innerJvmName = DebuggerUtil.getJVMStringForType(innerType) val signature = JVMNameUtil.getJVMRawText(s"($innerJvmName)V") ScalaDuplexEvaluator(new ScalaNewClassInstanceEvaluator(valueClassType, signature, Array(value)), value) } def unwrapValueClass(instance: Evaluator, valueClassType: ScType, param: ScClassParameter): Evaluator = UnwrapValueClassEvaluator(instance, DebuggerUtil.getJVMQualifiedName(valueClassType), param.name, param.isPrivate) def repeatedArgEvaluator(exprsForP: Seq[ScExpression], expectedType: ScType, context: PsiElement): Evaluator = { def seqEvaluator: Evaluator = { val argTypes = exprsForP.map(_.`type`().getOrAny) val argType = if (argTypes.isEmpty) expectedType else argTypes.lub() val argTypeText = argType.canonicalText val arguments = exprsForP.sortBy(_.startOffset).map { argExpr => val eval = evaluatorFor(argExpr) argExpr.smartExpectedType() match { case Some(tp @ ValueClassType(inner)) => valueClassInstanceEvaluator(eval, inner, tp) case _ => boxEvaluator(eval) } } val builderExprText = s"_root_.scala.collection.Seq.newBuilder[$argTypeText]" val builderExpr = createExpressionWithContextFromText(builderExprText, context, context) val builderEval = evaluatorFor(builderExpr) val addOneJVMName = if (builderExpr.newCollectionsFramework) { JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)Lscala/collection/mutable/Growable") } else { JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)Lscala/collection/mutable/Builder") } val addEval = arguments.foldLeft(builderEval) { (acc, arg) => ScalaMethodEvaluator(acc, "$plus$eq", addOneJVMName, Seq(arg)) } val resultJVMName = JVMNameUtil.getJVMRawText("()Ljava/lang/Object") ScalaMethodEvaluator(addEval, "result", resultJVMName, Seq.empty) } if (exprsForP.length == 1) { exprsForP.head match { case t: ScTypedExpression if t.isSequenceArg => evaluatorFor(t.expr) case _ => seqEvaluator } } else seqEvaluator } def implicitArgEvaluator(fun: ScMethodLike, param: ScParameter, owner: ImplicitArgumentsOwner): Evaluator = { assert(param.owner == fun) val implicitParameters = fun.effectiveParameterClauses.lastOption match { case Some(clause) if clause.isImplicit => clause.effectiveParameters case _ => Seq.empty } val i = implicitParameters.indexOf(param) val cannotFindMessage = ScalaBundle.message("cannot.find.implicit.parameters") owner.findImplicitArguments match { case Some(resolveResults) if resolveResults.length == implicitParameters.length => if (resolveResults(i) == null) throw EvaluationException(cannotFindMessage) val exprText = resolveResults(i) match { case ScalaResolveResult(clazz: ScTrait, substitutor) if clazz.qualifiedName == "scala.reflect.ClassManifest" => val argType = substitutor(clazz.`type`().get) argType match { case ParameterizedType(_, Seq(paramType)) => classManifestText(paramType) case _ => throw EvaluationException(cannotFindMessage) } case ScalaResolveResult(clazz: ScTrait, substitutor) if clazz.qualifiedName == "scala.reflect.ClassTag" => val argType = substitutor(clazz.`type`().get) argType match { case ParameterizedType(_, Seq(arg)) => classTagText(arg) case _ => throw EvaluationException(cannotFindMessage) } case ScalaResolveResult(elem, _) => val context = ScalaPsiUtil.nameContext(elem) val clazz = context.getContext match { case _: ScTemplateBody | _: ScEarlyDefinitions => ScalaPsiUtil.getContextOfType(context, true, classOf[PsiClass]) case _ if context.isInstanceOf[ScClassParameter] => ScalaPsiUtil.getContextOfType(context, true, classOf[PsiClass]) case _ => null } clazz match { case o: ScObject if isStable(o) => o.qualifiedName + "." + elem.name case _: ScObject => //todo: It can cover many cases! throw EvaluationException(ScalaBundle.message("implicit.parameters.from.dependent.objects")) case _ => elem.name //from scope } } val newExpr = createExpressionWithContextFromText(exprText, owner.getContext, owner) evaluatorFor(newExpr) case _ => throw EvaluationException(cannotFindMessage) } } def parameterEvaluator(fun: PsiElement, resolve: PsiElement): Evaluator = { val name = NameTransformer.encode(resolve.asInstanceOf[PsiNamedElement].name) val evaluator = new ScalaLocalVariableEvaluator(name, fileName) fun match { case funDef: ScFunctionDefinition => def paramIndex(fun: ScFunctionDefinition, context: PsiElement, elem: PsiElement): Int = { val locIndex = DebuggerUtil.localParamsForFunDef(fun).indexOf(elem) val funParams = fun.effectiveParameterClauses.flatMap(_.effectiveParameters) if (locIndex < 0) funParams.indexOf(elem) else locIndex + funParams.size } val pIndex = paramIndex(funDef, getContextClass(fun), resolve) evaluator.setParameterIndex(pIndex) evaluator.setMethodName(funDef.name) case funExpr: ScFunctionExpr => evaluator.setParameterIndex(funExpr.parameters.indexOf(resolve)) evaluator.setMethodName("apply") case _ => throw EvaluationException(ScalaBundle.message("cannot.evaluate.parameter", name)) } evaluator } def javaFieldEvaluator(field: PsiField, ref: ScReferenceExpression): Evaluator = { ref.qualifier match { case Some(qual) => if (field.hasModifierPropertyScala("static")) { val eval = new ScalaTypeEvaluator(JVMNameUtil.getContextClassJVMQualifiedName(SourcePosition.createFromElement(field))) val name = field.name ScalaFieldEvaluator(eval, name) } else { val qualEvaluator = evaluatorFor(qual) ScalaFieldEvaluator(qualEvaluator, field.name) } case None => val evaluator = thisOrImportedQualifierEvaluator(ref) ScalaFieldEvaluator(evaluator, field.name) } } def javaMethodEvaluator(method: PsiMethod, ref: ScReferenceExpression, arguments: Seq[ScExpression]): Evaluator = { def boxArguments(arguments: Seq[Evaluator], method: PsiElement): Seq[Evaluator] = { val params = method match { case fun: ScMethodLike => fun.effectiveParameterClauses.flatMap(_.parameters) case m: PsiMethod => m.parameters case _ => return arguments } arguments.zipWithIndex.map { case (arg, i) => if (params.length <= i || isOfPrimitiveType(params(i))) arg else boxEvaluator(arg) } } val argEvals = boxArguments(arguments.map(evaluatorFor(_)), method) val methodPosition = DebuggerUtil.getSourcePositions(method.getNavigationElement) val signature = JVMNameUtil.getJVMSignature(method) ref.qualifier match { case Some(qual@Typeable(tp)) if tp.isPrimitive => val boxEval = boxEvaluator(evaluatorFor(qual)) ScalaMethodEvaluator(boxEval, method.name, signature, argEvals, None, methodPosition) case Some(_) if method.hasModifierPropertyScala("static") => val eval = new ScalaTypeEvaluator(JVMNameUtil.getContextClassJVMQualifiedName(SourcePosition.createFromElement(method))) val name = method.name ScalaMethodEvaluator(eval, name, signature, argEvals, None, methodPosition) case Some(q) => val name = method.name ScalaMethodEvaluator(evaluatorFor(q), name, signature, argEvals, None, methodPosition) case _ => val evaluator = thisOrImportedQualifierEvaluator(ref) val name = method.name ScalaMethodEvaluator(evaluator, name, signature, argEvals, None, methodPosition) } } def unresolvedMethodEvaluator(ref: ScReferenceExpression, args: Seq[ScExpression]): Evaluator = { val argEvals = args.map(evaluatorFor(_)) val name = NameTransformer.encode(ref.refName) ref.qualifier match { case Some(q) => ScalaMethodEvaluator(evaluatorFor(q), name, null, argEvals) case _ => ScalaMethodEvaluator(thisOrImportedQualifierEvaluator(ref), name, null, argEvals) } } def argumentEvaluators(fun: ScMethodLike, matchedParameters: Map[Parameter, Seq[ScExpression]], call: ScExpression, ref: ScReferenceExpression, arguments: Seq[ScExpression], isArrayFunction: Boolean = false): Seq[Evaluator] = { val clauses = fun.effectiveParameterClauses val parameters = clauses.flatMap(_.effectiveParameters).map(Parameter(_)) def addForNextClause(previousClausesEvaluators: Seq[Evaluator], clause: ScParameterClause): Seq[Evaluator] = { def isDefaultExpr(expr: ScExpression) = expr match { case ChildOf(p: ScParameter) => p.isDefaultParam case _ => false } previousClausesEvaluators ++ clause.effectiveParameters.map { param => val p = Parameter(param) val exprsForP = matchedParameters.find(_._1.name == p.name).map(_._2).getOrElse(Seq.empty).filter(_ != null) if (p.isByName) throw new NeedCompilationException(ScalaBundle.message("method.with.by-name.parameters")) val evaluator = if (p.isRepeated) repeatedArgEvaluator(exprsForP, p.expectedType, call) else if (exprsForP.size > 1) throw EvaluationException(ScalaBundle.message("wrong.number.of.expressions")) else if (exprsForP.length == 1 && !isDefaultExpr(exprsForP.head)) { val expr = exprsForP.head val eval = evaluatorFor(expr) expr.smartExpectedType() match { case Some(tp @ ValueClassType(inner)) if isArrayFunction => valueClassInstanceEvaluator(eval, inner, tp) case _ => eval } } else if (param.isImplicitParameter) implicitArgEvaluator(fun, param, call) else if (p.isDefault) { val paramIndex = parameters.indexOf(p) val methodName = defaultParameterMethodName(fun, paramIndex) val localParams = p.paramInCode.toSeq.flatMap(DebuggerUtil.localParamsForDefaultParam(_)) val localParamRefs = localParams.map(td => createExpressionWithContextFromText(td.name, call.getContext, call)) val localEvals = localParamRefs.map(evaluatorFor(_)) functionEvaluator(ref.qualifier, ref, methodName, previousClausesEvaluators ++ localEvals, isDefaultArg = true) } else throw EvaluationException(ScalaBundle.message("cannot.evaluate.parameter", p.name)) if (!isOfPrimitiveType(param) && !isArrayFunction) boxEvaluator(evaluator) else evaluator } } val argEvaluators: Seq[Evaluator] = clauses.foldLeft(Seq.empty[Evaluator])(addForNextClause) if (argEvaluators.contains(null)) arguments.map(arg => evaluatorFor(arg)) else argEvaluators } def functionEvaluator(qualOption: Option[ScExpression], ref: ScReferenceExpression, funName: String, argEvaluators: Seq[Evaluator], isDefaultArg: Boolean = false): Evaluator = { def qualEvaluator(r: ScalaResolveResult) = { def defaultQualEvaluator = qualifierEvaluator(qualOption, ref) r.getActualElement match { case o: ScObject if funName == "apply" => objectEvaluator(o, () => defaultQualEvaluator) case _ => defaultQualEvaluator } } val name = NameTransformer.encode(funName) ref.bind() match { case Some(r) if r.tuplingUsed => throw EvaluationException(ScalaBundle.message("tupling.not.supported")) case None => throw EvaluationException(ScalaBundle.message("cannot.evaluate.method", funName)) case Some(r @ traitMethod(tr, fun)) if fun.isPrivate || fun.isLocal || isDefaultArg => val traitTypeEval = new ScalaTypeEvaluator(DebuggerUtil.getClassJVMName(tr, withPostfix = true)) val qualEval = qualEvaluator(r) val withTraitImpl = ScalaMethodEvaluator(traitTypeEval, name, null, qualEval +: argEvaluators) val withDefault = ScalaMethodEvaluator(qualEval, name, null, argEvaluators, traitImplementation(fun)) ScalaDuplexEvaluator(withTraitImpl, withDefault) case Some(ScalaResolveResult(fun: ScFunction, _)) if fun.isTopLevel => val objectEval = stableObjectEvaluator(topLevelMemberClassName(fun)) val signature = DebuggerUtil.getFunctionJVMSignature(fun) ScalaMethodEvaluator(objectEval, name, signature, argEvaluators, None, DebuggerUtil.getSourcePositions(fun.getNavigationElement)) case Some(r) => val resolve = r.element val qualEval = qualEvaluator(r) val signature = resolve match { case fun: ScFunction => DebuggerUtil.getFunctionJVMSignature(fun) case _ => null } ScalaMethodEvaluator(qualEval, name, signature, argEvaluators, traitImplementation(resolve), DebuggerUtil.getSourcePositions(resolve.getNavigationElement)) } } def methodCallEvaluator(call: ScExpression, arguments: Seq[ScExpression], matchedParameters: Map[Parameter, Seq[ScExpression]]): Evaluator = { val ref = call match { case hasDeepestInvokedReference(r) => r case _ => throw EvaluationException(ScalaBundle.message("cannot.evaluate.method", call.getText)) } val qualOption = ref.qualifier val resolve = ref.resolve() resolve match { case fun: ScFunctionDefinition if fun.isLocal => val args = argumentEvaluators(fun, matchedParameters, call, ref, arguments) localMethodEvaluator(fun, args) case fun: ScFunction if isClassOfFunction(fun) => classOfFunctionEvaluator(ref) case synth: ScSyntheticFunction => syntheticFunctionEvaluator(synth, qualOption, ref, arguments) //todo: use matched parameters case fun: ScFunction if isArrayFunction(fun) => val args = argumentEvaluators(fun, matchedParameters, call, ref, arguments, isArrayFunction = true) arrayMethodEvaluator(fun.name, qualOption, args) case fun: ScFunction => ref match { case isInsideValueClass(c) if qualOption.isEmpty => val clName = c.name val paramName = c.allClauses.flatMap(_.parameters).map(_.name).headOption.getOrElse("$this") val text = s"new $clName($paramName).${call.getText}" val expr = createExpressionFromText(text, call.getContext) evaluatorFor(expr) case _ => val args = argumentEvaluators(fun, matchedParameters, call, ref, arguments) functionEvaluator(qualOption, ref, fun.name, args) } case method: PsiMethod => javaMethodEvaluator(method, ref, arguments) case _ => unresolvedMethodEvaluator(ref, arguments) } } def byNameParamEvaluator(ref: ScReferenceExpression, p: ScParameter, computeValue: Boolean): Evaluator = { val paramEval = p match { case cp: ScClassParameter if cp.isCallByNameParameter => val qualEval = qualifierEvaluator(ref.qualifier, ref) val name = NameTransformer.encode(cp.name) ScalaFieldEvaluator(qualEval, name, classPrivateThisField = true) case _: ScParameter if p.isCallByNameParameter => calcLocal(p) case _ => throw EvaluationException(ScalaBundle.message("by.name.parameter.expected")) } if (computeValue) ScalaMethodEvaluator(paramEval, "apply", null, Nil) else paramEval } private def withOuterFieldEvaluator(containingClass: PsiElement, name: String, @Nls message: String) = { val (innerClass, iterationCount) = findContextClass { e => e == null || {val nextClass = getContextClass(e); nextClass == null || nextClass == containingClass} } if (innerClass == null) throw EvaluationException(message) val thisEval = new ScalaThisEvaluator(iterationCount) ScalaFieldEvaluator(thisEval, name) } private def calcLocal(named: PsiNamedElement): Evaluator = { val name = NameTransformer.encode(named.name) val containingClass = getContextClass(named) val localVariableEvaluator: Evaluator = ScalaPsiUtil.nameContext(named) match { case param: ScParameter => param.owner match { case fun@(_: ScFunction | _: ScFunctionExpr) => parameterEvaluator(fun, param) case _ => // TODO: could also be primary constructor or extension method... maybe need handling too? throw EvaluationException(ScalaBundle.message("cannot.evaluate.parameter", param.name)) } case caseCl: ScCaseClause => patternEvaluator(caseCl, named) case _: ScGenerator | _: ScForBinding if position != null && isNotUsedEnumerator(named, position.getElementAt) => throw EvaluationException(ScalaBundle.message("not.used.from.for.statement", name)) case LazyVal(_) => localLazyValEvaluator(named) case InsideAsync(_) => val simpleLocal = new ScalaLocalVariableEvaluator(name, fileName) val fieldMacro = ScalaFieldEvaluator(new ScalaThisEvaluator(), name + "$macro") ScalaDuplexEvaluator(simpleLocal, fieldMacro) case _ => new ScalaLocalVariableEvaluator(name, fileName) } containingClass match { case `contextClass` | _: ScGenerator | _: ScForBinding => localVariableEvaluator case _ if contextClass == null => localVariableEvaluator case _ => val fieldEval = withOuterFieldEvaluator(containingClass, name, ScalaBundle.message("cannot.evaluate.local.variable", name)) ScalaDuplexEvaluator(fieldEval, localVariableEvaluator) } } def evaluatorForReferenceWithoutParameters(qualifier: Option[ScExpression], resolve: PsiElement, ref: ScReferenceExpression): Evaluator = { def calcLocalObject(obj: ScObject) = { val containingClass = getContextClass(obj) val name = NameTransformer.encode(obj.name) + "$module" if (containingClass == contextClass) { new ScalaLocalVariableEvaluator(name, fileName) } else { val fieldEval = withOuterFieldEvaluator(containingClass, name, ScalaBundle.message("cannot.evaluate.local.object", name)) fieldEval } } def fieldEvaluatorFromElement(element: PsiElement, isPrivateThis: Boolean): ScalaFieldEvaluator = { val named = element.asInstanceOf[ScNamedElement] val qualEval = qualifierEvaluator(qualifier, ref) val name = NameTransformer.encode(named.name) ScalaFieldEvaluator(qualEval, name, classPrivateThisField = isPrivateThis) } val labeledOrSynthetic = labeledOrSyntheticEvaluator(ref, resolve) if (labeledOrSynthetic.isDefined) return labeledOrSynthetic.get val isLocalValue = DebuggerUtil.isLocalV(resolve) resolve match { case isInsideLocalFunction(fun) && (named: PsiNamedElement) if isLocalValue => ScalaDuplexEvaluator(calcLocal(named), parameterEvaluator(fun, resolve)) case p: ScParameter if p.isCallByNameParameter => byNameParamEvaluator(ref, p, computeValue = true) case obj: ScObject if isLocalValue => calcLocalObject(obj) case named: PsiNamedElement if isLocalValue => calcLocal(named) case obj: ScObject => objectEvaluator(obj, () => qualifierEvaluator(qualifier, ref)) case _: PsiMethod | _: ScSyntheticFunction => methodCallEvaluator(ref, Nil, Map.empty) case cp: ScClassParameter if !cp.isClassMember => val local = new ScalaLocalVariableEvaluator(cp.name, fileName) val field = fieldEvaluatorFromElement(resolve, cp.isPrivateThis) val duplex = ScalaDuplexEvaluator(local, field) new ErrorWrapperEvaluator(duplex, ScalaBundle.message("constructor.param.inaccessible.outside.of.constructor", cp.name)) case privateThisField(_) => fieldEvaluatorFromElement(resolve, isPrivateThis = true) case cp: ScClassParameter if qualifier.isEmpty && ValueClassType.isValueClass(cp.containingClass) => //methods of value classes have hidden argument with underlying value new ScalaLocalVariableEvaluator("$this", fileName) case _: ScClassParameter | _: ScBindingPattern => //this is scala "field" or a top-level val/var val named = resolve.asInstanceOf[ScNamedElement] val name = NameTransformer.encode(named.name) val qualEval = qualifierEvaluator(qualifier, ref) val withSimpleNameEval = ScalaMethodEvaluator(qualEval, name, null /* todo */ , Seq.empty, traitImplementation(resolve), DebuggerUtil.getSourcePositions(resolve.getNavigationElement)) getContextClass(named) match { //in some cases compiler uses full qualified names for fields and methods case clazz: ScTemplateDefinition if ScalaPsiUtil.hasStablePath(clazz) && clazz.members.contains(ScalaPsiUtil.nameContext(named)) => val qualName = clazz.qualifiedName val newName = qualName.split('.').map(NameTransformer.encode).mkString("$") + "$$" + name val reserveEval = ScalaMethodEvaluator(qualEval, newName, null /* todo */ , Seq.empty, traitImplementation(resolve), DebuggerUtil.getSourcePositions(resolve.getNavigationElement)) ScalaDuplexEvaluator(withSimpleNameEval, reserveEval) case _ => withSimpleNameEval } case field: PsiField => javaFieldEvaluator(field, ref) case pack: ScPackage => //let's try to find package object: val qual = (pack.getQualifiedName + ".package$").split('.').map(NameTransformer.encode).mkString(".") stableObjectEvaluator(qual) case _ => //unresolved symbol => try to resolve it dynamically val name = NameTransformer.encode(ref.refName) val fieldOrVarEval = qualifier match { case Some(qual) => ScalaFieldEvaluator(evaluatorFor(qual), name) case None => new ScalaLocalVariableEvaluator(name, fileName) } ScalaDuplexEvaluator(fieldOrVarEval, unresolvedMethodEvaluator(ref, Seq.empty)) } } def labeledOrSyntheticEvaluator(ref: ScReferenceExpression, resolve: PsiElement): Option[Evaluator] = { if (resolve == null) return None val labeledValue = resolve.getUserData(CodeFragmentFactoryContextWrapper.LABEL_VARIABLE_VALUE_KEY) if (labeledValue != null) return Some(new IdentityEvaluator(labeledValue)) val isSynthetic = codeFragment.isAncestorOf(resolve) if (isSynthetic && ref.qualifier.isEmpty) Some(syntheticVariableEvaluator(ref.refName)) else None } def qualifierEvaluator(qualifier: Option[ScExpression], ref: ScReferenceExpression): Evaluator = qualifier match { case Some(q) => evaluatorFor(q) case _ => thisOrImportedQualifierEvaluator(ref) } def patternEvaluator(caseCl: ScCaseClause, namedElement: PsiNamedElement): Evaluator = { val name = namedElement.name if (caseCl.getParent != null) { val pattern = caseCl.pattern if (pattern.isEmpty) throw EvaluationException(ScalaBundle.message("cannot.find.pattern")) caseCl.getParent.getParent match { case matchStmt: ScMatch if namedElement.isInstanceOf[ScPattern] => val expr = matchStmt.expression match { case None => throw EvaluationException(ScalaBundle.message("cannot.find.expression.of.match")) case Some(e) => e } val exprEval = try evaluatorFor(expr) catch { case _: NeedCompilationException => new ScalaCompilingEvaluator(expr, ScalaCodeFragment(expr.getText)(caseCl.getProject)) } val fromPatternEvaluator = evaluateSubpatternFromPattern(exprEval, pattern.get, namedElement.asInstanceOf[ScPattern]) ScalaDuplexEvaluator(new ScalaLocalVariableEvaluator(name, fileName), fromPatternEvaluator) case _: ScBlockExpr => //it is anonymous function val argEvaluator = new ScalaLocalVariableEvaluator("", fileName) argEvaluator.setMethodName("apply") argEvaluator.setParameterIndex(0) val fromPatternEvaluator = evaluateSubpatternFromPattern(argEvaluator, pattern.get, namedElement.asInstanceOf[ScPattern]) ScalaDuplexEvaluator(new ScalaLocalVariableEvaluator(name, fileName), fromPatternEvaluator) case _ => new ScalaLocalVariableEvaluator(name, fileName) } } else throw EvaluationException(ScalaBundle.message("invalid.case.clause")) } def assignmentEvaluator(stmt: ScAssignment): Evaluator = { val message = ScalaBundle.message("assignent.without.expression") if (stmt.isNamedParameter) { stmt.rightExpression match { case Some(expr) => evaluatorFor(expr) case _ => throw EvaluationException(message) } } else { stmt.leftExpression match { case call: ScMethodCall => val invokedText = call.getInvokedExpr.getText val rExprText = stmt.rightExpression.fold("null")(_.getText) val args = (call.args.exprs.map(_.getText) :+ rExprText).mkString("(", ", ", ")") val exprText = s"($invokedText).update$args" val expr = createExpressionWithContextFromText(exprText, stmt.getContext, stmt) evaluatorFor(expr) case _ => val leftEvaluator = evaluatorFor(stmt.leftExpression) val rightEvaluator = stmt.rightExpression match { case Some(expr) => evaluatorFor(expr) case _ => throw EvaluationException(message) } def createAssignEvaluator(leftEvaluator: Evaluator): Option[Evaluator] = { leftEvaluator match { case m: ScalaMethodEvaluator => Some(m.copy(_methodName = m.methodName + "_$eq", argumentEvaluators = Seq(rightEvaluator))) //todo: signature? case ScalaDuplexEvaluator(first, second) => createAssignEvaluator(first) orElse createAssignEvaluator(second) case _ => None } } createAssignEvaluator(leftEvaluator).getOrElse(new AssignmentEvaluator(leftEvaluator, rightEvaluator)) } } } def evaluateSubpatternFromPattern(exprEval: Evaluator, pattern: ScPattern, subPattern: ScPattern): Evaluator = { def evaluateConstructorOrInfix(exprEval: Evaluator, ref: ScStableCodeReference, pattern: ScPattern, nextPatternIndex: Int): Evaluator = { ref.resolve() match { case fun: ScFunctionDefinition => val elem = ref.bind().get.getActualElement //object or case class val qual = ref.qualifier.map(q => createExpressionWithContextFromText(q.getText, q.getContext, q)) val refExpr = createExpressionWithContextFromText(ref.getText, ref.getContext, ref) val refEvaluator = evaluatorForReferenceWithoutParameters(qual, elem, refExpr.asInstanceOf[ScReferenceExpression]) val funName = fun.name val newEval = if (funName == "unapply") { val extractEval = ScalaMethodEvaluator(refEvaluator, funName, DebuggerUtil.getFunctionJVMSignature(fun), Seq(exprEval)) if (pattern.subpatterns.length == 1) ScalaMethodEvaluator(extractEval, "get", null, Seq.empty) else if (pattern.subpatterns.length > 1) { val getEval = ScalaMethodEvaluator(extractEval, "get", null, Seq.empty) ScalaFieldEvaluator(getEval, s"_${nextPatternIndex + 1}") } else throw EvaluationException(ScalaBundle.message("unapply.without.arguments")) } else if (funName == "unapplySeq") { val extractEval = ScalaMethodEvaluator(refEvaluator, funName, DebuggerUtil.getFunctionJVMSignature(fun), Seq(exprEval)) val getEval = ScalaMethodEvaluator(extractEval, "get", null, Seq.empty) val indexExpr = createExpressionFromText("" + nextPatternIndex)(pattern.getManager) val indexEval = evaluatorFor(indexExpr) ScalaMethodEvaluator(getEval, "apply", JVMNameUtil.getJVMRawText("(I)Ljava/lang/Object;"), Seq(indexEval)) } else throw EvaluationException(ScalaBundle.message("pattern.doesnot.resolves.to.unapply", ref.refName)) val nextPattern = pattern.subpatterns(nextPatternIndex) evaluateSubpatternFromPattern(newEval, nextPattern, subPattern) case _ => throw EvaluationException(ScalaBundle.message("pattern.doesnot.resolves.to.unapply", ref.refName)) } } if (pattern == null || subPattern == null) throw new IllegalArgumentException("Patterns should not be null") val nextPatternIndex: Int = pattern.subpatterns.indexWhere(next => subPattern.withParentsInFile.contains(next)) if (pattern == subPattern) exprEval else if (nextPatternIndex < 0) throw new IllegalArgumentException("Pattern is not ancestor of subpattern") else { pattern match { case naming: ScNamingPattern => evaluateSubpatternFromPattern(exprEval, naming.named, subPattern) case _: ScTypedPattern => evaluateSubpatternFromPattern(exprEval, pattern.subpatterns.head, subPattern) case par: ScParenthesisedPattern => val withoutPars = par.innerElement.getOrElse(throw new IllegalStateException("Empty parentheses pattern")) evaluateSubpatternFromPattern(exprEval, withoutPars, subPattern) case tuple: ScTuplePattern => val nextPattern = tuple.subpatterns(nextPatternIndex) val newEval = ScalaFieldEvaluator(exprEval, s"_${nextPatternIndex + 1}") evaluateSubpatternFromPattern(newEval, nextPattern, subPattern) case constr: ScConstructorPattern => val ref: ScStableCodeReference = constr.ref evaluateConstructorOrInfix(exprEval, ref, constr, nextPatternIndex) case infix: ScInfixPattern => val ref: ScStableCodeReference = infix.operation evaluateConstructorOrInfix(exprEval, ref, infix, nextPatternIndex) //todo: handle infix with tuple right pattern case _: ScCompositePattern => throw EvaluationException(ScalaBundle.message("pattern.alternatives.cannot.bind.vars")) case _: ScXmlPattern => throw EvaluationException(ScalaBundle.message("xml.patterns.not.supported")) //todo: xml patterns case _ => throw EvaluationException(ScalaBundle.message("kind.of.patterns.not.supported", pattern.getText)) //todo: xml patterns } } } def newTemplateDefinitionEvaluator(templ: ScNewTemplateDefinition): Evaluator = { templ.extendsBlock.templateParents match { case Some(parents) => if (parents.typeElements.length != 1) { throw new NeedCompilationException(ScalaBundle.message("anon.classes.not.supported")) } parents.constructorInvocation match { case Some(constrInvocation) => constrInvocation.typeElement.calcType.extractClass match { case Some(clazz) if clazz.qualifiedName == "scala.Array" => def unspecifiedParameters = throw EvaluationException(ScalaBundle.message("array.constructor.unspecified.parameters")) def tooManyArguments = throw EvaluationException(ScalaBundle.message("array.constructor.too.many.arguments")) val typeArgs = constrInvocation.typeArgList.fold("")(_.getText) val args = constrInvocation.args.fold(unspecifiedParameters) { as => if (as.getArgsCount == 0) unspecifiedParameters else if (as.getArgsCount > 1) tooManyArguments else as.getText } val exprText = s"_root_.scala.Array.ofDim$typeArgs$args" val expr = createExpressionWithContextFromText(exprText, templ.getContext, templ) evaluatorFor(expr) case Some(clazz) if isScalaSynthetic(clazz) => // Synthetic classes cannot be instantiated with `new`, unless the class is `scala.AnyRef`. if (clazz.qualifiedName == "scala.AnyRef") { val exprText = "new _root_.java.lang.Object()" val expr = createExpressionWithContextFromText(exprText, templ.getContext, templ) evaluatorFor(expr) } else { val modifier = if (clazz.qualifiedName == "scala.Singleton") "trait" else "class" throw EvaluationException( ScalaBundle.message("new.synthetic.instantiation", modifier, clazz.name)) } case Some(clazz) => val jvmName = DebuggerUtil.getClassJVMName(clazz) val typeEvaluator = new ScalaTypeEvaluator(jvmName) val argumentEvaluators = constructorArgumentsEvaluators(templ, constrInvocation, clazz) constrInvocation.reference.map(_.resolve()) match { case Some(named: PsiNamedElement) => val signature = DebuggerUtil.constructorSignature(named) ScalaMethodEvaluator(typeEvaluator, "<init>", signature, argumentEvaluators) case _ => ScalaMethodEvaluator(typeEvaluator, "<init>", null, argumentEvaluators) } case _ => throw EvaluationException(ScalaBundle.message("new.expression.without.class.reference")) } case None => throw EvaluationException(ScalaBundle.message("new.expression.without.constructor.call")) } case _ => throw EvaluationException(ScalaBundle.message("new.expression.without.template.parents")) } } def constructorArgumentsEvaluators(newTd: ScNewTemplateDefinition, constrInvocation: ScConstructorInvocation, clazz: PsiClass): Seq[Evaluator] = { val constrDef = constrInvocation.reference match { case Some(ResolvesTo(elem)) => elem case _ => throw EvaluationException(ScalaBundle.message("could.not.resolve.constructor")) } val explicitArgs = constrInvocation.arguments.flatMap(_.exprs) val explEvaluators = for { arg <- explicitArgs } yield { val eval = evaluatorFor(arg) val param = ScalaPsiUtil.parameterOf(arg).flatMap(_.psiParam) if (param.exists(!isOfPrimitiveType(_))) boxEvaluator(eval) else eval } constrDef match { case scMethod: ScMethodLike => val scClass = scMethod.containingClass.asInstanceOf[ScClass] val containingClass = getContextClass(scClass) val implicitParams = scMethod.parameterList.params.filter(_.isImplicitParameter) val implicitsEvals = for { p <- implicitParams } yield { val eval = implicitArgEvaluator(scMethod, p, constrInvocation) if (isOfPrimitiveType(p)) eval else boxEvaluator(eval) } val (outerClass, iters) = findContextClass(e => e == null || e == containingClass) val outerThis = outerClass match { case obj: ScObject if isStable(obj) => None case null => None case _ => Some(new ScalaThisEvaluator(iters)) } val locals = DebuggerUtil.localParamsForConstructor(scClass) outerThis ++: explEvaluators ++: implicitsEvals ++: locals.map(fromLocalArgEvaluator) case _ => explEvaluators } } def fromLocalArgEvaluator(local: ScTypedDefinition): Evaluator = { val name = local.asInstanceOf[PsiNamedElement].name val elemAt = position.getElementAt val ref = createExpressionWithContextFromText(name, elemAt, elemAt) val refEval = evaluatorFor(ref) FromLocalArgEvaluator(refEval) } def expressionFromTextEvaluator(string: String, context: PsiElement): Evaluator = { val expr = createExpressionWithContextFromText(string, context.getContext, context) evaluatorFor(expr) } def localLazyValEvaluator(named: PsiNamedElement): Evaluator = { val name = named.name val localRefName = s"$name$$lzy" val localRefEval = new ScalaLocalVariableEvaluator(localRefName, fileName) val lzyIndex = lazyValIndex(named) val bitmapName = "bitmap$" + (lzyIndex / 8) val bitmapEval = new ScalaLocalVariableEvaluator(bitmapName, fileName) val localFunIndex = localFunctionIndex(named) val methodName = s"$name$$$localFunIndex" ScalaMethodEvaluator(new ScalaThisEvaluator(), methodName, null, Seq(localRefEval, bitmapEval)) } def ifStmtEvaluator(stmt: ScIf): Evaluator = { val condEvaluator = stmt.condition match { case Some(cond) => evaluatorFor(cond) case None => throw EvaluationException(ScalaBundle.message("if.statement.without.condition")) } val ifBranch = stmt.thenExpression match { case Some(th) => evaluatorFor(th) case None => throw EvaluationException(ScalaBundle.message("if.statement.without.if.branch")) } val elseBranch = stmt.elseExpression.map(evaluatorFor(_)) new ScalaIfEvaluator(condEvaluator, ifBranch, elseBranch) } def literalEvaluator(literal: ScLiteral): Evaluator = literal match { case interpolated: ScInterpolatedStringLiteral => interpolated.desugaredExpression .fold(ScalaLiteralEvaluator(literal, literal.getValue): Evaluator) { case (_, call) => evaluatorFor(call) } case _ => literal.getValue match { case symbol: Symbol => val expr = createExpressionFromText( s"""Symbol("${symbol.name}")""", literal.getContext ) evaluatorFor(expr) case value => ScalaLiteralEvaluator(literal, value) } } def whileStmtEvaluator(ws: ScWhile): Evaluator = { val condEvaluator = ws.condition match { case Some(cond) => evaluatorFor(cond) case None => throw EvaluationException(ScalaBundle.message("while.statement.without.condition")) } val iterationEvaluator = ws.expression match { case Some(body) => evaluatorFor(body) case None => throw EvaluationException(ScalaBundle.message("while.statement.without.body")) } val whileStatementEvaluator = new WhileStatementEvaluator(condEvaluator, iterationEvaluator, null) new BlockStatementEvaluator(Array(whileStatementEvaluator, UnitEvaluator)) } def doStmtEvaluator(doSt: ScDo): Evaluator = { val condEvaluator = doSt.condition match { case Some(cond) => evaluatorFor(cond) case None => throw EvaluationException(ScalaBundle.message("do.statement.without.condition")) } val bodyEvaluator = doSt.body match { case Some(body) => evaluatorFor(body) case None => throw EvaluationException(ScalaBundle.message("do.statement.without.body")) } val whileStatementEvaluator = new WhileStatementEvaluator(condEvaluator, bodyEvaluator, null) new BlockStatementEvaluator(Array(bodyEvaluator, whileStatementEvaluator, UnitEvaluator)) } def scMethodCallEvaluator(methodCall: ScMethodCall): Evaluator = { def applyCall(invokedText: String, argsText: String) = { val newExprText = s"($invokedText).apply$argsText" createExpressionWithContextFromText(newExprText, methodCall.getContext, methodCall) } @tailrec def collectArgumentsAndBuildEvaluator(call: ScMethodCall, collected: Seq[ScExpression] = Seq.empty, tailString: String = "", matchedParameters: Map[Parameter, Seq[ScExpression]] = Map.empty): Evaluator = { if (call.isApplyOrUpdateCall) { if (!call.isUpdateCall) { val expr = applyCall(call.getInvokedExpr.getText, call.args.getText + tailString) return evaluatorFor(expr) } else { //should be handled on assignment throw new NeedCompilationException(ScalaBundle.message("update.method.is.not.supported")) } } val message = ScalaBundle.message("cannot.evaluate.method", call.getText) import MethodInvocation.matchedParametersMap call match { case ScMethodCall(_: ScReferenceExpression, argumentExpressions) => methodCallEvaluator(methodCall, argumentExpressions ++ collected, matchedParameters ++ matchedParametersMap(call)) case ScMethodCall(newCall: ScMethodCall, argumentExpressions) => collectArgumentsAndBuildEvaluator(newCall, argumentExpressions ++ collected, call.args.getText + tailString, matchedParameters ++ matchedParametersMap(call)) case ScMethodCall(ScGenericCall(ScReferenceExpression(_: PsiMethod), _), argumentExpressions) => methodCallEvaluator(methodCall, argumentExpressions ++ collected, matchedParameters ++ matchedParametersMap(call)) case ScMethodCall(gen@ScGenericCall(ref, _), _) => ref.`type`().getOrAny match { //isApplyOrUpdateCall does not work for generic calls case ExtractClass(psiClass) if psiClass.findMethodsByName("apply", true).nonEmpty => val typeArgsText = gen.typeArgs.getText.mkString val expr = applyCall(ref.getText, s"$typeArgsText${call.args.getText}$tailString") evaluatorFor(expr) case _ => throw EvaluationException(message) } case _ => throw EvaluationException(message) } } methodCall match { case hasDeepestInvokedReference(ScReferenceExpression.withQualifier(implicitlyConvertedTo(expr))) => val copy = methodCall.copy().asInstanceOf[ScMethodCall] copy match { case hasDeepestInvokedReference(ScReferenceExpression.withQualifier(q)) => q.replaceExpression(expr, removeParenthesis = false) evaluatorFor(copy) case _ => val message = ScalaBundle.message("method.call.implicitly.converted.qualifier", methodCall.getText) throw EvaluationException(message) } case _ => //todo: handle partially applied functions collectArgumentsAndBuildEvaluator(methodCall) } } def infixExpressionEvaluator(infix: ScInfixExpr): Evaluator = { object isUpdate { private val Regex = "(.+)=$".r def unapply(operation: ScReferenceExpression): Option[String] = operation.refName match { case Regex(name) => operation.resolve() match { case named: PsiNamedElement if named.name == name => Some(name) case _ => None } case _ => None } } val newExpression = infix match { case ScInfixExpr.withAssoc(ElementText(baseText), isUpdate(operation), ElementText(argumentText)) => val exprText = s"$baseText = $baseText $operation $argumentText" createExpressionWithContextFromText(exprText, infix.getContext, infix) case _ => createEquivMethodCall(infix) } evaluatorFor(newExpression) } def blockExprEvaluator(block: ScBlock): Evaluator = { withNewSyntheticVariablesHolder { val evaluators = block.statements.filter(!_.is[ScImportStmt]).map(e => evaluatorFor(e)) new BlockStatementEvaluator(evaluators.toArray) } } def postfixExprEvaluator(p: ScPostfixExpr): Evaluator = { val equivRef = createEquivQualifiedReference(p) evaluatorFor(equivRef) } def prefixExprEvaluator(p: ScPrefixExpr): Evaluator = { val newExprText = s"(${p.operand.getText}).unary_${p.operation.refName}" val newExpr = createExpressionWithContextFromText(newExprText, p.getContext, p) evaluatorFor(newExpr) } def refExpressionEvaluator(ref: ScReferenceExpression): Evaluator = { ref.qualifier match { case Some(implicitlyConvertedTo(e)) => val copy = ref.copy().asInstanceOf[ScReferenceExpression] copy.qualifier.get.replaceExpression(e, removeParenthesis = false) evaluatorFor(copy) case _ => val resolve: PsiElement = ref.resolve() evaluatorForReferenceWithoutParameters(ref.qualifier, resolve, ref) } } def tupleEvaluator(tuple: ScTuple): Evaluator = { val exprText = "_root_.scala.Tuple" + tuple.exprs.length + tuple.exprs.map(_.getText).mkString("(", ", ", ")") val expr = createExpressionWithContextFromText(exprText, tuple.getContext, tuple) evaluatorFor(expr) } def valOrVarDefinitionEvaluator(pList: ScPatternList, expr: ScExpression): BlockStatementEvaluator = { val evaluatorsBuilder = Array.newBuilder[Evaluator] val exprEval = new ScalaCachingEvaluator(evaluatorFor(expr)) evaluatorsBuilder += exprEval for { pattern <- pList.patterns binding <- pattern.bindings } { val name = binding.name createSyntheticVariable(name) val leftEval = syntheticVariableEvaluator(name) val rightEval = evaluateSubpatternFromPattern(exprEval, pattern, binding) evaluatorsBuilder += new AssignmentEvaluator(leftEval, rightEval) } new BlockStatementEvaluator(evaluatorsBuilder.result()) } def variableDefinitionEvaluator(vd: ScVariableDefinition): Evaluator = { vd.expr match { case None => throw EvaluationException(ScalaBundle.message("variable.definition.needs.right.hand.side", vd.getText)) case Some(e) => valOrVarDefinitionEvaluator(vd.pList, e) } } def patternDefinitionEvaluator(pd: ScPatternDefinition) : Evaluator = { pd.expr match { case None => throw EvaluationException(ScalaBundle.message("value.definition.needs.right.hand.side", pd.getText)) case Some(e) => valOrVarDefinitionEvaluator(pd.pList, e) } } def postProcessExpressionEvaluator(expr: ScExpression, evaluator: Evaluator): Evaluator = { //boxing and unboxing actions def unbox(typeTo: String) = unaryEvaluator(unboxEvaluator(evaluator), typeTo) def unboxTo(valType: ValType) = valType match { case Int => unbox("toInteger") case Byte => unbox("toByte") case Long => unbox("toLong") case Boolean => unboxEvaluator(evaluator) case Float => unbox("toFloat") case Short => unbox("toShort") case Double => unbox("toDouble") case Char => unbox("toCharacter") case Unit => new BlockStatementEvaluator(Array(evaluator, unitEvaluator())) case _ => evaluator } def valueClassInstance(eval: Evaluator) = { expr match { case _: ScNewTemplateDefinition => eval case Typeable(_: ValType) => eval case Typeable(tp) => tp.tryExtractDesignatorSingleton match { case vc @ ValueClassType(inner) => valueClassInstanceEvaluator(eval, inner, vc) case _ => eval } case _ => eval } } expr.smartExpectedType() match { case Some(valType: ValType) => unboxTo(valType) case Some(tp @ ValueClassType.Param(cp)) => unwrapValueClass(evaluator, tp, cp) case Some(_) => // Here, value types are used as other types, so they have to be boxed. boxEvaluator(valueClassInstance(evaluator)) case None => valueClassInstance(evaluator) } } def classTagText(arg: ScType): String = { arg match { case Short => "_root_.scala.reflect.ClassTag.Short" case Byte => "_root_.scala.reflect.ClassTag.Byte" case Char => "_root_.scala.reflect.ClassTag.Char" case Int => "_root_.scala.reflect.ClassTag.Int" case Long => "_root_.scala.reflect.ClassTag.Long" case Float => "_root_.scala.reflect.ClassTag.Float" case Double => "_root_.scala.reflect.ClassTag.Double" case Boolean => "_root_.scala.reflect.ClassTag.Boolean" case Unit => "_root_.scala.reflect.ClassTag.Unit" case Any => "_root_.scala.reflect.ClassTag.Any" case AnyRef => "_root_.scala.reflect.ClassTag.AnyRef" case AnyVal => "_root_.scala.reflect.ClassTag.AnyVal" case Nothing => "_root_.scala.reflect.ClassTag.Nothing" case Null => "_root_.scala.reflect.ClassTag.Null" case Singleton => "_root_.scala.reflect.ClassTag.Object" case _ => val cls = arg.extractClass.fold("_root_.java.lang.Object")(_.qualifiedName) s"_root_.scala.reflect.ClassTag.apply(classOf[$cls])" } } def classManifestText(scType: ScType): String = { scType match { case Short => "_root_.scala.reflect.ClassManifest.Short" case Byte => "_root_.scala.reflect.ClassManifest.Byte" case Char => "_root_.scala.reflect.ClassManifest.Char" case Int => "_root_.scala.reflect.ClassManifest.Int" case Long => "_root_.scala.reflect.ClassManifest.Long" case Float => "_root_.scala.reflect.ClassManifest.Float" case Double => "_root_.scala.reflect.ClassManifest.Double" case Boolean => "_root_.scala.reflect.ClassManifest.Boolean" case Unit => "_root_.scala.reflect.ClassManifest.Unit" case Any => "_root_.scala.reflect.ClassManifest.Any" case AnyVal => "_root_.scala.reflect.ClassManifest.AnyVal" case Nothing => "_root_.scala.reflect.ClassManifest.Nothing" case Null => "_root_.scala.reflect.ClassManifest.Null" case Singleton => "_root_.scala.reflect.ClassManifest.Object" case JavaArrayType(argument) => "_root_.scala.reflect.ClassManifest.arrayType(" + classManifestText(argument) + ")" case ParameterizedType(ScDesignatorType(clazz: ScClass), Seq(arg)) if clazz.qualifiedName == "scala.Array" => "_root_.scala.reflect.ClassManifest.arrayType(" + classManifestText(arg) + ")" /*case ScParameterizedType(des, args) => ScType.extractClass(des, Option(expr.getProject)) match { case Some(clazz) => "_root_.scala.reflect.ClassManifest.classType(" + case _ => "null" }*/ //todo: case _ => scType.extractClass match { case Some(clss) => "_root_.scala.reflect.ClassManifest.classType(classOf[_root_." + clss.qualifiedName + "])" case _ => "_root_.scala.reflect.ClassManifest.classType(classOf[_root_.java.lang." + "Object])" } } } def isOfPrimitiveType(param: PsiParameter): Boolean = param match { //todo specialized type parameters case p: ScParameter => p.`type`().getOrAny match { case ValueClassType(inner) => inner.isPrimitive case tp => tp.isPrimitive } case _: PsiParameter => val tp = param.getType import com.intellij.psi.PsiType._ Set[PsiType](BOOLEAN, INT, CHAR, DOUBLE, FLOAT, LONG, BYTE, SHORT).contains(tp) case _ => false } } object ScalaEvaluatorBuilderUtil { private val BOXES_RUN_TIME = new TypeEvaluator(JVMNameUtil.getJVMRawText("scala.runtime.BoxesRunTime")) private val BOXED_UNIT = new TypeEvaluator(JVMNameUtil.getJVMRawText("scala.runtime.BoxedUnit")) private val SCALA_RUN_TIME = new TypeEvaluator(JVMNameUtil.getJVMRawText("scala.runtime.ScalaRunTime")) private val SCALA_RUNTIME_STATICS = new TypeEvaluator(JVMNameUtil.getJVMRawText("scala.runtime.Statics")) private val ScalaSyntheticClasses: Set[String] = Set("scala.Any", "scala.AnyRef", "scala.AnyVal", "scala.Null", "scala.Nothing", "scala.Unit", "scala.Singleton") def isScalaSynthetic(psiClass: PsiClass): Boolean = ScalaSyntheticClasses(psiClass.qualifiedName) def boxEvaluator(eval: Evaluator): Evaluator = new ScalaBoxingEvaluator(eval) def boxed(evaluators: Evaluator*): Seq[Evaluator] = evaluators.map(boxEvaluator) def unboxEvaluator(eval: Evaluator): Evaluator = new UnBoxingEvaluator(eval) def notEvaluator(eval: Evaluator): Evaluator = { val rawText = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)Ljava/lang/Object;") unboxEvaluator(ScalaMethodEvaluator(BOXES_RUN_TIME, "takeNot", rawText, boxed(eval))) } def eqEvaluator(left: Evaluator, right: Evaluator): Evaluator = { new ScalaEqEvaluator(left, right) } def neEvaluator(left: Evaluator, right: Evaluator): Evaluator = { notEvaluator(eqEvaluator(left, right)) } def unitEvaluator(): Evaluator = { ScalaFieldEvaluator(BOXED_UNIT, "UNIT") } def unaryEvaluator(eval: Evaluator, boxesRunTimeName: String): Evaluator = { val rawText = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;)Ljava/lang/Object;") unboxEvaluator(ScalaMethodEvaluator(BOXES_RUN_TIME, boxesRunTimeName, rawText, boxed(eval))) } def binaryEvaluator(left: Evaluator, right: Evaluator, boxesRunTimeName: String): Evaluator = { val rawText = JVMNameUtil.getJVMRawText("(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;") unboxEvaluator(ScalaMethodEvaluator(BOXES_RUN_TIME, boxesRunTimeName, rawText, boxed(left, right))) } object hasDeepestInvokedReference { @tailrec final def unapply(expr: ScExpression): Option[ScReferenceExpression] = { expr match { case call: ScMethodCall => unapply(call.deepestInvokedExpr) case genCall: ScGenericCall => unapply(genCall.referencedExpr) case ref: ScReferenceExpression => Some(ref) case _ => None } } } object implicitlyConvertedTo { def unapply(expr: ScExpression): Option[ScExpression] = { expr.implicitElement(fromUnderscore = true).collect { case function: ScFunction => function }.map { fun => val callText = s"${fun.name}(${expr.getText})" val newExprText = fun.containingClass match { case o: ScObject if isStable(o) => s"${o.qualifiedName}.$callText" case _: ScObject => //todo: It can cover many cases! throw EvaluationException(ScalaBundle.message("implicit.conversions.from.dependent.objects")) case _ => callText //from scope } createExpressionWithContextFromText(newExprText, expr.getContext, expr) } } } @tailrec final def isStable(o: ScObject): Boolean = { val context = PsiTreeUtil.getParentOfType(o, classOf[ScTemplateDefinition], classOf[ScExpression]) if (context == null) return true context match { case o: ScObject => isStable(o) case _ => false } } def getContextClass(elem: PsiElement, strict: Boolean = true): PsiElement = { if (!strict && isGenerateClass(elem)) elem else elem.contexts.find(isGenerateClass).orNull } def isGenerateClass(elem: PsiElement): Boolean = isGenerateNonAnonfunClass(elem) || hasTopLevelMembers(elem) || isGenerateAnonfun(elem) private def isGenerateAnonfun(elem: PsiElement): Boolean = if (isCompiledWithIndyLambdas(elem.getContainingFile)) isPartialFunction(elem) || isAnonfunInsideSuperCall(elem) else isGenerateAnonfun211(elem) def isGenerateNonAnonfunClass(elem: PsiElement): Boolean = { elem match { case newTd: ScNewTemplateDefinition if !DebuggerUtil.generatesAnonClass(newTd) => false case _: PsiClass => true case _ => false } } def isAnonfunInsideSuperCall(elem: PsiElement): Boolean = { def isInsideSuperCall(td: ScTypeDefinition) = { val extBlock = Option(td).map(_.extendsBlock).orNull PsiTreeUtil.getParentOfType(elem, classOf[ScEarlyDefinitions], classOf[ScConstructorInvocation]) match { case ed: ScEarlyDefinitions if ed.getParent == extBlock => true case c: ScConstructorInvocation if c.getParent.getParent == extBlock => true case _ => false } } val containingClass = PsiTreeUtil.getParentOfType(elem, classOf[ScTypeDefinition]) isGenerateAnonfun211(elem) && isInsideSuperCall(containingClass) } def isGenerateAnonfun211(elem: PsiElement): Boolean = { @CachedInUserData(elem, BlockModificationTracker(elem)) def isAnonfunCached: Boolean = { elem match { case e: ScExpression if ScUnderScoreSectionUtil.underscores(e).nonEmpty => true case b: ScBlock if b.isAnonymousFunction => false //handled in isGenerateAnonfunSimple case e: ScExpression if ScalaPsiUtil.isByNameArgument(e) || ScalaPsiUtil.isArgumentOfFunctionType(e) => true case ScalaPsiUtil.MethodValue(_) => true case ChildOf(argExprs: ScArgumentExprList) && InsideAsync(call) if call.args == argExprs => true case _ => false } } def isGenerateAnonfunWithCache: Boolean = { if (elem == null || !elem.isValid || DumbService.isDumb(elem.getProject)) false else isAnonfunCached } def isGenerateAnonfunSimple: Boolean = { elem match { case _: ScFunctionExpr => true case (_: ScExpression) childOf (_: ScFor) => true case (_: ScGuard) childOf (_: ScEnumerators) => true case (g: ScGenerator) childOf (enums: ScEnumerators) if !enums.generators.headOption.contains(g) => true case _: ScForBinding => true case _ => false } } isGenerateAnonfunSimple || isPartialFunction(elem) || isGenerateAnonfunWithCache } def isPartialFunction(elem: PsiElement): Boolean = elem match { case (_: ScCaseClauses) childOf (b: ScBlockExpr) if b.isAnonymousFunction => true case _ => false } def anonClassCount(elem: PsiElement): Int = { //todo: non irrefutable patterns? elem match { case (_: ScExpression) childOf (f: ScFor) => f.enumerators.fold(1)(e => e.generators.length) case (e: ScEnumerator) childOf (enums: ScEnumerators) => enums.children.takeWhile(_ != e).count(_.isInstanceOf[ScGenerator]) case _ => 1 } } private def localFunctionIndex(named: PsiNamedElement): Int = { elementsWithSameNameIndex(named, { case f: ScFunction if f.isLocal => true case ScalaPsiUtil.inNameContext(LazyVal(_)) => true case _ => false }) } private def lazyValIndex(named: PsiNamedElement): Int = { elementsWithSameNameIndex(named, { case ScalaPsiUtil.inNameContext(LazyVal(_)) => true case _ => false }) } private def defaultParameterMethodName(method: ScMethodLike, paramIndex: Int): String = { method match { case fun: ScFunction if !fun.isConstructor => def hasDefaultParamAtIndex(f: ScFunction, paramIndex: Int): Boolean = { val paramAtIndex = f.effectiveParameterClauses .flatMap(_.effectiveParameters) .lift(paramIndex) paramAtIndex.exists(_.isDefaultParam) } def localFunctionSuffix: String = { if (!fun.isLocal) "" else "$" + elementsWithSameNameIndex(fun, { case f: ScFunction if f.isLocal && hasDefaultParamAtIndex(f, paramIndex) => true case _ => false }) } fun.name + "$default$" + (paramIndex + 1) + localFunctionSuffix case _ if method.isConstructor => "$lessinit$greater$default$" + (paramIndex + 1) + "()" } } private def elementsWithSameNameIndex(named: PsiNamedElement, condition: PsiElement => Boolean): Int = { val containingClass = getContextClass(named) if (containingClass == null) return -1 val name = named.name val traverser = SyntaxTraverser.psiTraverser(containingClass).forceIgnore { case `containingClass` => false case elem if isGenerateClass(elem) => true case _ => false } val sameNameCondition: Condition[PsiNamedElement] = { n => n.name == name && condition(n) } val traversal = if (isAtLeast212(named)) traverser.postOrderDfsTraversal() else traverser.preOrderDfsTraversal() val sameNameElements = traversal .filter(classOf[PsiNamedElement]) .filter(sameNameCondition) sameNameElements.indexOf(_ == named) + 1 } def traitImplementation(elem: PsiElement): Option[JVMName] = { val clazz = getContextClass(elem) clazz match { case t: ScTrait => Some(DebuggerUtil.getClassJVMName(t, withPostfix = true)) case _ => None } } def isNotUsedEnumerator(named: PsiNamedElement, place: PsiElement): Boolean = { named match { case ScalaPsiUtil.inNameContext(enum @ (_: ScForBinding | _: ScGenerator)) => enum.getParent.getParent match { case ScFor(enums, body) => enums.namings.map(_.pattern) match { case Seq(_: ScReferencePattern) => return false //can always evaluate from single simple generator case _ => } def insideBody = PsiTreeUtil.isAncestor(body, place, false) def isNotUsed = ReferencesSearch.search(named, new LocalSearchScope(body)).findFirst() == null insideBody && isNotUsed case _ => false } case _ => false } } object isInsideValueClass { def unapply(elem: PsiElement): Option[ScClass] = { getContextClass(elem) match { case c: ScClass if ValueClassType.isValueClass(c) => Some(c) case _ => None } } } object isInsideLocalFunction { def unapply(elem: PsiElement): Option[ScFunction] = { @tailrec def inner(element: PsiElement): Option[ScFunction] = { element match { case null => None case fun: ScFunction if fun.isLocal && !fun.parameters.exists(param => PsiTreeUtil.isAncestor(param, elem, false)) => Some(fun) case other if other.getContext != null => inner(other.getContext) case _ => None } } inner(elem) } } object traitMethod { def unapply(r: ScalaResolveResult): Option[(ScTrait, ScFunctionDefinition)] = { r.getElement match { case fun: ScFunctionDefinition => fun.getContainingClassLoose match { case tr: ScTrait => Some(tr, fun) case _ => None } case _ => None } } } object privateThisField { def unapply(elem: PsiElement): Option[ScNamedElement] = { elem match { case c: ScClassParameter if c.isPrivateThis => Some(c) case (bp: ScBindingPattern) && ScalaPsiUtil.inNameContext(v @ (_: ScVariable | _: ScValue)) => v match { case mo: ScModifierListOwner if mo.getModifierList.accessModifier.exists(am => am.isPrivate && am.isThis) => Some(bp) case _ => None } case _ => None } } } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/debugger/evaluation/ScalaEvaluatorBuilderUtil.scala
Scala
apache-2.0
81,707
package actors import actors.DisplayOrderActor.{DisplayOrderCommand, OrderDisplayedEvent} import akka.actor.{Actor, Props} import domain.DataWithPaymentMethod class DisplayOrderActor extends Actor { override def receive = { case DisplayOrderCommand(order) => println("Order: " + order + " processed") sender ! OrderDisplayedEvent } } object DisplayOrderActor { def props: Props = Props[DisplayOrderActor] sealed trait DisplayOrderActorCommand case class DisplayOrderCommand(order: DataWithPaymentMethod) extends DisplayOrderActorCommand sealed trait DisplayOrderActorEvent case object OrderDisplayedEvent extends DisplayOrderActorEvent }
kkrzys/eShop
eShop-core/src/main/scala/actors/DisplayOrderActor.scala
Scala
apache-2.0
673
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner import org.apache.flink.api.common.functions.MapFunction import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.api.java.typeutils.{GenericTypeInfo, PojoTypeInfo, TupleTypeInfoBase} import org.apache.flink.table.api.{TableConfig, TableException, TableSchema} import org.apache.flink.table.codegen.{FunctionCodeGenerator, GeneratedFunction} import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo import org.apache.flink.types.Row object Conversions { /** * Utility method for generating converter [[MapFunction]] that converts from * given input [[TypeInformation]] of type [[Row]] to requested type, based on a * logical [[TableSchema]] of the input type. */ def generateRowConverterFunction[OUT]( physicalInputType: TypeInformation[Row], logicalInputSchema: TableSchema, requestedOutputType: TypeInformation[OUT], functionName: String, config: TableConfig) : Option[GeneratedFunction[MapFunction[Row, OUT], OUT]] = { // validate that at least the field types of physical and logical type match // we do that here to make sure that plan translation was correct val typeInfo = logicalInputSchema.toRowType if (typeInfo != physicalInputType) { throw new TableException( s"The field types of physical and logical row types do not match. " + s"Physical type is [$typeInfo], Logical type is [$physicalInputType]. " + s"This is a bug and should not happen. Please file an issue.") } // generic row needs no conversion if (requestedOutputType.isInstanceOf[GenericTypeInfo[_]] && requestedOutputType.getTypeClass == classOf[Row]) { return None } val fieldTypes = logicalInputSchema.getFieldTypes val fieldNames = logicalInputSchema.getFieldNames // check for valid type info if (requestedOutputType.getArity != fieldTypes.length) { throw new TableException( s"Arity [${fieldTypes.length}] of result [$fieldTypes] does not match " + s"the number[${requestedOutputType.getArity}] of requested type [$requestedOutputType].") } // check requested types def validateFieldType(fieldType: TypeInformation[_]): Unit = fieldType match { case _: TimeIndicatorTypeInfo => throw new TableException("The time indicator type is an internal type only.") case _ => // ok } requestedOutputType match { // POJO type requested case pt: PojoTypeInfo[_] => fieldNames.zip(fieldTypes) foreach { case (fName, fType) => val pojoIdx = pt.getFieldIndex(fName) if (pojoIdx < 0) { throw new TableException(s"POJO does not define field name: $fName") } val requestedTypeInfo = pt.getTypeAt(pojoIdx) validateFieldType(requestedTypeInfo) if (fType != requestedTypeInfo) { throw new TableException(s"Result field does not match requested type. " + s"Requested: $requestedTypeInfo; Actual: $fType") } } // Tuple/Case class/Row type requested case tt: TupleTypeInfoBase[_] => fieldTypes.zipWithIndex foreach { case (fieldTypeInfo, i) => val requestedTypeInfo = tt.getTypeAt(i) validateFieldType(requestedTypeInfo) if (fieldTypeInfo != requestedTypeInfo) { throw new TableException(s"Result field does not match requested type. " + s"Requested: $requestedTypeInfo; Actual: $fieldTypeInfo") } } // atomic type requested case t: TypeInformation[_] => if (fieldTypes.size != 1) { throw new TableException(s"Requested result type is an atomic type but " + s"result[$fieldTypes] has more or less than a single field.") } val requestedTypeInfo = fieldTypes.head validateFieldType(requestedTypeInfo) if (requestedTypeInfo != t) { throw new TableException(s"Result field does not match requested type. " + s"Requested: $t; Actual: $requestedTypeInfo") } case _ => throw new TableException(s"Unsupported result type: $requestedOutputType") } // code generate MapFunction val generator = new FunctionCodeGenerator( config, false, physicalInputType, None, None) val conversion = generator.generateConverterResultExpression( requestedOutputType, fieldNames) val body = s""" |${conversion.code} |return ${conversion.resultTerm}; |""".stripMargin val generated = generator.generateFunction( functionName, classOf[MapFunction[Row, OUT]], body, requestedOutputType) Some(generated) } }
jinglining/flink
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/Conversions.scala
Scala
apache-2.0
5,657
/* * Copyright 2015 Otto (GmbH & Co KG) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.flinkspector.core.collection import io.flinkspector.core.{CoreSpec, Order} import scala.collection.JavaConversions._ class MatcherBuilderSpec extends CoreSpec { "The MatcherBuilder" should "check for all per default" in { val builder = new MatcherBuilder[Int](List(1, 2, 3, 4)) builder.matchesSafely(List(1, 2, 3, 4)) shouldBe true builder.matchesSafely(List(1, 2, 3)) shouldBe false builder.matchesSafely(List(1, 2, 3, 4, 5)) shouldBe true } it should "check for all if only was not defined" in { val builder = new MatcherBuilder[Int](List(1, 2, 3, 4)) builder.sameFrequency() builder.matchesSafely(List(1, 2, 3, 4)) shouldBe true builder.matchesSafely(List(1, 2, 3)) shouldBe false builder.matchesSafely(List(1, 2, 3, 4, 5)) shouldBe true } it should "check for only if only was defined" in { val onlyBuilder = new MatcherBuilder[Int](List(1, 2, 3, 4)).only() onlyBuilder.matchesSafely(List(1, 2, 3, 4)) shouldBe true onlyBuilder.matchesSafely(List(1, 2, 3)) shouldBe false onlyBuilder.matchesSafely(List(1, 2, 3, 4, 5)) shouldBe false } it should "check for only if only was defined in combination" in { val onlyBuilder = new MatcherBuilder[Int](List(1, 2, 3, 4)) .only() .sameFrequency() onlyBuilder.matchesSafely(List(1, 2, 3, 4)) shouldBe true onlyBuilder.matchesSafely(List(1, 2, 3)) shouldBe false onlyBuilder.matchesSafely(List(1, 2, 3, 4, 5)) shouldBe false } it should "check for order" in { val builder = new MatcherBuilder[Int](List(1, 2, 3, 4)) builder.inOrder(Order.NONSTRICT).all() builder.matchesSafely(List(1, 2, 3, 4)) shouldBe true builder.matchesSafely(List(1, 2, 4, 3)) shouldBe false } it should "check for partial order" in { val fromToBuilder = new MatcherBuilder[Int](List(1, 2, 3, 4)) fromToBuilder.inOrder(Order.NONSTRICT).from(1).to(2) fromToBuilder.matchesSafely(List(1, 2, 3, 4)) shouldBe true fromToBuilder.matchesSafely(List(1, 3, 2, 4)) shouldBe false val indicesBuilder = new MatcherBuilder[Int](List(1, 2, 3, 4)) indicesBuilder.inOrder(Order.NONSTRICT).indices(0, 3) indicesBuilder.matchesSafely(List(1, 2, 3, 4)) shouldBe true indicesBuilder.matchesSafely(List(4, 3, 2, 1)) shouldBe false } it should "check for order in combination" in { val builder = new MatcherBuilder[Int](List(1, 2, 3, 4)) builder.only().inOrder(Order.NONSTRICT).all() builder.matchesSafely(List(1, 2, 3, 4)) shouldBe true builder.matchesSafely(List(1, 2, 3, 4, 5)) shouldBe false builder.matchesSafely(List(1, 2, 4, 3)) shouldBe false } it should "check for series" in { val builder = new MatcherBuilder[Int](List(1, 2, 3, 4)) builder.inOrder(Order.STRICT).all() builder.matchesSafely(List(1, 2, 3, 4)) shouldBe true builder.matchesSafely(List(1, 2, 4, 3)) shouldBe false } it should "check for partial series" in { val fromToBuilder = new MatcherBuilder[Int](List(1, 2, 3, 4)) fromToBuilder.inOrder(Order.STRICT).from(1).to(2) fromToBuilder.matchesSafely(List(1, 2, 3, 4)) shouldBe true fromToBuilder.matchesSafely(List(1, 3, 2, 4)) shouldBe false val indicesBuilder = new MatcherBuilder[Int](List(1, 2, 3, 4)) indicesBuilder.inOrder(Order.STRICT).indices(0, 3) indicesBuilder.matchesSafely(List(1, 4, 2, 3)) shouldBe true indicesBuilder.matchesSafely(List(4, 2, 3, 1)) shouldBe false } it should "check for two order statements" in { val combinedOrder = new MatcherBuilder[Int](List(1, 2, 3, 4)) combinedOrder.inOrder(Order.STRICT).from(1).to(2) combinedOrder.inOrder(Order.NONSTRICT).indices(0, 3) combinedOrder.matchesSafely(List(1, 2, 3, 4)) shouldBe true combinedOrder.matchesSafely(List(1, 3, 2, 4)) shouldBe false combinedOrder.matchesSafely(List(1, 4, 2, 3)) shouldBe true combinedOrder.matchesSafely(List(4, 2, 3, 1)) shouldBe false } it should "check for series in combination" in { val builder = new MatcherBuilder[Int](List(1, 2, 3, 4)) builder.only().inOrder(Order.STRICT).all() builder.matchesSafely(List(1, 2, 3, 4)) shouldBe true builder.matchesSafely(List(1, 2, 3, 4, 5)) shouldBe false builder.matchesSafely(List(1, 2, 4, 3)) shouldBe false } it should "check for duplicates" in { val builder = new MatcherBuilder[Int](List(1, 2, 3, 4)) builder.sameFrequency() builder.matchesSafely(List(1, 2, 3, 4, 5)) shouldBe true builder.matchesSafely(List(1, 2, 3, 4, 4)) shouldBe false } it should "check for duplicates in combination" in { val builder = new MatcherBuilder[Int](List(1, 2, 3, 4)) builder.sameFrequency().only() builder.matchesSafely(List(1, 2, 3, 4)) shouldBe true builder.matchesSafely(List(1, 2, 3, 4, 5)) shouldBe false builder.matchesSafely(List(1, 2, 3, 4, 4)) shouldBe false } }
ottogroup/flink-spector
flinkspector-core/src/test/scala/io/flinkspector/core/collection/MatcherBuilderSpec.scala
Scala
apache-2.0
5,497
package org.awong.graphs class EdgeWeightedGraph[V] { var nEdges: Int = 0 val isDirected = false var adjacencyList = Map[V, Seq[WeightedEdge[V]]]() def nVertices: Int = vertices.size def vertices: Iterable[V] = adjacencyList.keySet def adj(v: V): Iterable[WeightedEdge[V]] = { adjacencyList.get(v) match { case Some(edges) => edges case None => Seq[WeightedEdge[V]]() } } def edges: Iterable[WeightedEdge[V]] = { for (vertex <- vertices; edge <- adj(vertex)) yield edge } protected def add(node: V, edge: WeightedEdge[V]): Unit = { adjacencyList = adjacencyList.get(node) match { case Some(edges) => adjacencyList + (node -> (edge +: edges)) case None => adjacencyList + (node -> Seq[WeightedEdge[V]](edge)) } } def addEdge(v: V, w: V, weight: Double): Unit = { add(WeightedEdge(v,w,weight)) } def +(edge: WeightedEdge[V]): Unit = add(edge) def add(edge: WeightedEdge[V]): Unit = { val v = edge.either val w = edge.other(v) add(v, edge) add(w, edge) nEdges = nEdges + 1 } } case class WeightedEdge[V](v: V, w: V, weight: Double) extends Ordered[WeightedEdge[V]] with WeightedEdgeLike[V] { def either: V = v def other(vertex: V) = { vertex match { case `v` => w case `w` => v case _ => throw new RuntimeException(s"${vertex} is not in edge") } } override def toString(): String = { "%d - %d %.2f".format(v, w, weight) } // rank weighted edges from smallest weight to biggest weight override def compareTo(that: WeightedEdge[V]): Int = { if (this.weight < that.weight) +1 else if (this.weight > that.weight) -1 else 0 } override def compare(that: WeightedEdge[V]): Int = { compareTo(that) } } object EdgeWeightedGraph { }
alanktwong/algorithms-scala
graphs/src/main/scala/org/awong/graphs/EdgeWeightedGraph.scala
Scala
mit
1,801
package forex import scalaz._ import Scalaz._ import scalaz.Validation.FlatMap._ import forex.{OrderTotal => OT} import forex.{ExchangeRateLookup => ERL} object OrderTotalConverter2 { def convert(rawCurrency: String, rawAmount: String): ValidationNel[String, OrderTotal] = { OT.parse(rawCurrency, rawAmount).flatMap(total => // a ERL.lookup(total.currency).toValidationNel.map((rate: Double) => // b OT(Currency.Eur, total.amount * rate))) // c } }
alexanderdean/Unified-Log-Processing
ch07/7.4/forex/src/main/scala/forex/OrderTotalConverter2.scala
Scala
apache-2.0
512
package composition import com.tzavellas.sse.guice.ScalaModule import email.{EmailData, EmailFlags, RetainEmailService} import models.{BusinessDetailsModel, ConfirmFormModel, EligibilityModel} import org.scalatest.mock.MockitoSugar import org.mockito.Matchers.any import org.mockito.Mockito.when import uk.gov.dvla.vehicles.presentation.common.clientsidesession.TrackingId import uk.gov.dvla.vehicles.presentation.common.model.VehicleAndKeeperDetailsModel final class TestEmailService extends ScalaModule with MockitoSugar { val stub = mock[RetainEmailService] when(stub.emailRequest( any[String], any[VehicleAndKeeperDetailsModel], any[EligibilityModel], any[EmailData], any[Option[ConfirmFormModel]], any[Option[BusinessDetailsModel]], any[EmailFlags], any[TrackingId] )(any[play.api.i18n.Lang])).thenReturn(None) def configure() = bind[RetainEmailService].toInstance(stub) }
dvla/vrm-retention-online
test/composition/TestEmailService.scala
Scala
mit
922
case class Link(url: String, occurences: String) object Link { def apply(arr: Array[String]): Link = { Link(arr(0), arr(1)) } }
softberries/ugproject
HTMLgenerator/src/main/scala/Link.scala
Scala
unlicense
136
package com.sksamuel.elastic4s.requests.searches.aggs.responses.bucket import com.sksamuel.elastic4s.requests.searches.aggs.responses.{AggBucket, AggSerde, BucketAggregation} case class KeyedDateRangeAggResult(name: String, buckets: Map[String, DateRangeBucket]) extends BucketAggregation object KeyedDateRangeAggResult { // type clash with `buckets` on apply method private[elastic4s] def fromData(name: String, data: Map[String, Any]): KeyedDateRangeAggResult = KeyedDateRangeAggResult( name, data("buckets").asInstanceOf[Map[String, Map[String, Any]]].mapValues(DateRangeBucket(_)).toMap ) } case class DateRangeBucket(from: Option[String], fromAsString: Option[String], to: Option[String], toAsString: Option[String], key: Option[String], override val docCount: Long, private[elastic4s] val data: Map[String, Any]) extends AggBucket object DateRangeBucket { private[elastic4s] def apply(map: Map[String, Any]): DateRangeBucket = DateRangeBucket( map.get("from").map(_.toString), map.get("from_as_string").map(_.toString), map.get("to").map(_.toString), map.get("to_as_string").map(_.toString), map.get("key").map(_.toString), map("doc_count").toString.toLong, map ) } case class DateRange(name: String, buckets: Seq[DateRangeBucket]) extends BucketAggregation object DateRange { implicit object DateRangeAggSerde extends AggSerde[DateRange] { override def read(name: String, data: Map[String, Any]): DateRange = apply(name, data) } private[elastic4s] def apply(name: String, data: Map[String, Any]): DateRange = DateRange( name, data("buckets").asInstanceOf[Seq[Map[String, Any]]].map(DateRangeBucket(_)) ) }
sksamuel/elastic4s
elastic4s-domain/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/responses/bucket/daterange.scala
Scala
apache-2.0
1,862
package epic.parser.kbest import epic.trees._ import breeze.config.{Configuration, CommandLineParser, Help} import java.io.{PrintWriter, File} import breeze.util._ import epic.parser.Parser import scala.collection.parallel.ForkJoinTaskSupport import scala.concurrent.forkjoin.ForkJoinPool import epic.trees.ProcessedTreebank import epic.trees.TreeInstance import epic.util.CacheBroker object KBestParseTreebank { /** * The type of the parameters to read in via dlwh.epic.config */ case class Params(treebank: ProcessedTreebank, @Help(text="Path to write parses. Will write (train, dev, test)") dir: File, @Help(text="Size of kbest list. Default: 200") k: Int = 200, @Help(text="Cache information") cache: CacheBroker, @Help(text="Path to the parser file. Look in parsers/") parser: File, @Help(text="Should we evaluate on the test set? Or just the dev set?") evalOnTest: Boolean = false, @Help(text="Print this and exit.") help: Boolean = false, @Help(text="How many threads to parse with. Default is whatever Scala wants") threads: Int = -1) def main(args: Array[String]) = { val params = CommandLineParser.readIn[Params](args) println("Command line arguments for recovery:\\n" + Configuration.fromObject(params).toCommandLineString) println("Evaluating Parser...") implicit def cache = params.cache val parser = readObject[Parser[AnnotatedLabel,String]](params.parser) val kbest = KBestParser.cached(new AStarKBestParser(parser))(cache) params.dir.mkdirs() def parse(trainTrees: IndexedSeq[TreeInstance[AnnotatedLabel, String]], out: PrintWriter) = { val parred = trainTrees.par if(params.threads > 0) parred.tasksupport = new ForkJoinTaskSupport(new ForkJoinPool(params.threads)) parred .map(ti => ti.words -> kbest.bestKParses(ti.words, params.k)) .map{case (words,seq) => seq.map{case (tree, score) => Debinarizer.AnnotatedLabelDebinarizer(tree).render(words, newline = false) + " " + score}.mkString("\\n")} .seq.foreach{str => out.println(str); out.println()} } parse(params.treebank.trainTrees, new PrintWriter(new File(params.dir, "train.kbest"))) parse(params.treebank.devTrees, new PrintWriter(new File(params.dir, "dev.kbest"))) parse(params.treebank.testTrees, new PrintWriter(new File(params.dir, "test.kbest"))) } }
maxim-rabinovich/epic
src/main/scala/epic/parser/kbest/KBestParseTreebank.scala
Scala
apache-2.0
2,629
package net.cucumbersome.rpgRoller.warhammer.combat import java.util.UUID import akka.actor.ActorRef import akka.http.scaladsl.model.ContentTypes import akka.http.scaladsl.server.Route import net.cucumbersome.RouteSpec import net.cucumbersome.rpgRoller.warhammer.combat.CombatController.{CombatIdGenerator, DefaultIdGenerator} import net.cucumbersome.rpgRoller.warhammer.combat.CombatJsonSerializer._ import net.cucumbersome.rpgRoller.warhammer.combat.domain._ import net.cucumbersome.rpgRoller.warhammer.infrastructure.CommandGateway import net.cucumbersome.rpgRoller.warhammer.infrastructure.repositories.{ActorRepository, InMemoryActorRepository} import net.cucumbersome.rpgRoller.warhammer.player.CombatActor import net.cucumbersome.test.MockedCombatIdGenerator import spray.json._ class CombatControllerSpec extends RouteSpec { "A combat controller" when { val expectedId = "myId:3" val generator = MockedCombatIdGenerator(expectedId) "initializing new combat" should { "initialize it without any actors" in { val gateway = buildGateway val repository = new InMemoryActorRepository(List()) val (service, route) = getRoute(gateway, repository, generator) Post("/combat").withEntity(ContentTypes.`application/json`, "{}") ~> route ~> check { responseAs[String].parseJson mustBe CombatPresenter(expectedId, Array()).toJson } } "initialize it with actors" in { val actors = random[CombatActor](2).toList val gateway = buildGateway val repository = new InMemoryActorRepository(actors) val (service, route) = getRoute(gateway, repository, generator) val requestBody = CreateCombatParameters(actors.map(_.id.data).toArray).toJson.compactPrint Post("/combat").withEntity(ContentTypes.`application/json`, requestBody) ~> route ~> check { val inCombatActorPresenters = actors.map(a => InCombatActor.buildFromCombatActor(a, idGenerator = mockedActorIdGenerator(generator))).map(InCombatActorPresenter.fromInCombatActor.get) val presenter = CombatPresenter(expectedId, inCombatActorPresenters.toArray) responseAs[String].parseJson mustBe presenter.toJson } } } "adding actor to an existing combat" should { "add actors if combat is there" in { val gateway = buildGateway val (actor1, actor2, actor3, actor4) = build4Actors val actors = List(actor1, actor2, actor3, actor4) val repository = new InMemoryActorRepository(List(actor1, actor2, actor3, actor4)) val requestBody = AddActorsToCombatParameters(Array(actor3.id.data, actor4.id.data)).toJson.compactPrint val (service, route) = getRoute(gateway, repository, generator) val combatId = futureValue(service.createCombat(CreateCombatParameters(List(actor1, actor2).map(_.id.data).toArray))).id Patch(s"/combat/$combatId/add-actors").withEntity(ContentTypes.`application/json`, requestBody) ~> route ~> check { val inCombatActorPresenters = actors.map(a => InCombatActor.buildFromCombatActor(a, idGenerator = mockedActorIdGenerator(generator))).map(InCombatActorPresenter.fromInCombatActor.get) val presenter = CombatPresenter(combatId, inCombatActorPresenters.toArray) responseAs[String].parseJson mustBe presenter.toJson } } } "removing actor from an existing combat" should { "remove actors if combat is there" in { val gateway = buildGateway val (actor1, actor2, actor3, actor4) = build4Actors val actors = List(actor1, actor2, actor3, actor4) val actorsWhichShouldBeKept = actors.take(2) val actorsToBeRemoved = actors.diff(actorsWhichShouldBeKept) val repository = new InMemoryActorRepository(List(actor1, actor2, actor3, actor4)) val (service, route) = getRoute(gateway, repository, DefaultIdGenerator) val createdCombat = futureValue(service.createCombat(CreateCombatParameters(actors.map(_.id.data).toArray))) val combatId = createdCombat.id val actorsToBeRemovedIds = actorsToBeRemoved.map(_.id.data).toArray val actorsWhichShouldBeKeptIds = actorsWhichShouldBeKept.map(_.id.data) val createdCombatActors = createdCombat.actors.toList val inCombatActorsWhichShouldBeKept =createdCombatActors.filter(actor => actorsWhichShouldBeKeptIds.contains(actor.actor.id)) val inCombatActorsWhichShouldBeRemoved = createdCombatActors.filter(actor => actorsToBeRemovedIds.contains(actor.actor.id)) val requestBody = RemoveActorsFromCombatParameters(inCombatActorsWhichShouldBeRemoved.map(_.id).toArray).toJson.compactPrint Patch(s"/combat/$combatId/remove-actors").withEntity(ContentTypes.`application/json`, requestBody) ~> route ~> check { val inCombatActorPresenters = inCombatActorsWhichShouldBeKept val presenter = CombatPresenter(combatId, inCombatActorPresenters.toArray) responseAs[String].parseJson mustBe presenter.toJson } } } } def getRoute(handler: ActorRef, repo: ActorRepository, idGenerator: CombatIdGenerator): (CombatService, Route) = { val service = CombatInitializer.initializeCombatService(handler, repo, idGenerator) val route = new CombatController(service).route (service, route) } def build4Actors: (CombatActor, CombatActor, CombatActor, CombatActor) = { random[CombatActor](4) match { case a1 :: a2 :: a3 :: a4 :: Nil => (a1, a2, a3, a4) } } def buildGateway: ActorRef = { val id = UUID.randomUUID().toString val combatHandler = system.actorOf(CombatHandler.props(id), id) system.actorOf(CommandGateway.props(combatHandler)) } def mockedActorIdGenerator(idGenerator: MockedCombatIdGenerator): () => InCombatActor.Id = () => InCombatActor.Id(idGenerator.generateId) }
CucumisSativus/rpgRollerBackend
src/test/scala/net/cucumbersome/rpgRoller/warhammer/combat/CombatControllerSpec.scala
Scala
mit
5,894
/* * Copyright 2019 Spotify AB. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.spotify.scio.testing import java.lang.{Iterable => JIterable} import java.util.{Map => JMap} import com.spotify.scio.coders.Coder import com.spotify.scio.values.SCollection import com.twitter.chill.Externalizer import org.apache.beam.sdk.testing.PAssert import org.apache.beam.sdk.testing.PAssert.{IterableAssert, SingletonAssert} import org.apache.beam.sdk.transforms.SerializableFunction import org.apache.beam.sdk.transforms.windowing.BoundedWindow import org.apache.beam.sdk.util.CoderUtils import org.scalatest.matchers.{MatchResult, Matcher} import org.{hamcrest => h} import org.hamcrest.Matchers import org.hamcrest.MatcherAssert.assertThat import scala.jdk.CollectionConverters._ import scala.reflect.ClassTag import com.twitter.chill.ClosureCleaner import cats.kernel.Eq import org.apache.beam.sdk.testing.SerializableMatchers import com.spotify.scio.coders.CoderMaterializer import com.spotify.scio.ScioContext import org.apache.beam.sdk.testing.SerializableMatcher final private case class TestWrapper[T: Eq](get: T) { override def toString: String = Pretty.printer.apply(get).render override def equals(other: Any): Boolean = other match { case TestWrapper(o: T @unchecked) => Eq[T].eqv(get, o) case o => get.equals(o) } } private object TestWrapper { def wrap[T: Coder: Eq](coll: SCollection[T]): SCollection[TestWrapper[T]] = coll.map(t => TestWrapper(t)) def wrap[T: Eq](coll: JIterable[T]): JIterable[TestWrapper[T]] = coll.asScala.map(t => TestWrapper(t)).asJava implicit def testWrapperCoder[T: Coder: Eq]: Coder[TestWrapper[T]] = Coder.xmap(Coder[T])(t => TestWrapper(t), w => w.get) } private object ScioMatchers { /** Create a hamcrest matcher that can be serialized using a Coder[T]. */ private def supplierFromCoder[A: Coder, B](@transient a: A, @transient context: ScioContext)( builder: A => B ) = { val coder = CoderMaterializer.beam(context, Coder[A]) val encoded = CoderUtils.encodeToByteArray(coder, a) new SerializableMatchers.SerializableSupplier[B] { def a = CoderUtils.decodeFromByteArray(coder, encoded) def get() = builder(a) } } /** * This is equivalent to [[org.apache.beam.sdk.testing.PAssert#containsInAnyOrder()]] but will but * have a nicer message in case of failure. */ def containsInAnyOrder[T: Coder]( ts: Seq[T], context: ScioContext ): h.Matcher[JIterable[T]] = SerializableMatchers.fromSupplier { supplierFromCoder(ts, context) { ds => val items = ds.mkString("\n\t\t", "\n\t\t", "\n") val message = s"Expected: iterable with items [$items]" val c = Matchers .containsInAnyOrder(ds: _*) .asInstanceOf[h.Matcher[JIterable[T]]] new h.BaseMatcher[JIterable[T]] { override def matches(o: AnyRef): Boolean = c.matches(o) override def describeTo(d: h.Description): Unit = d.appendText(message) override def describeMismatch(i: AnyRef, d: h.Description): Unit = c.describeMismatch(i, d) } } } def makeFn[T]( f: JIterable[T] => Unit ): SerializableFunction[JIterable[T], Void] = new SerializableFunction[JIterable[T], Void] { // delegate serialization to Kryo to avoid serialization issues in tests // when a non-serializable object is captured by the closure private[this] val impl = Externalizer(f) override def apply(input: JIterable[T]): Void = { impl.get(input) null } } def makeFnSingle[T](f: T => Unit): SerializableFunction[T, Void] = new SerializableFunction[T, Void] { // delegate serialization to Kryo to avoid serialization issues in tests // when a non-serializable object is captured by the closure private[this] val impl = Externalizer(f) override def apply(input: T): Void = { impl.get(input) null } } def assertThatFn[T: Eq: Coder]( mm: h.Matcher[JIterable[TestWrapper[T]]] ): SerializableFunction[JIterable[T], Void] = makeFn[T](in => assertThat(TestWrapper.wrap(in), mm)) def assertThatNotFn[T: Eq: Coder]( mm: h.Matcher[JIterable[TestWrapper[T]]] ): SerializableFunction[JIterable[T], Void] = makeFn[T](in => assertThat(TestWrapper.wrap(in), Matchers.not(mm))) def assert[T: Eq: Coder]( p: Iterable[TestWrapper[T]] => Boolean ): SerializableFunction[JIterable[T], Void] = makeFn[T](in => Predef.assert(p(TestWrapper.wrap(in).asScala))) def assertSingle[T: Eq: Coder](p: TestWrapper[T] => Boolean): SerializableFunction[T, Void] = makeFnSingle[T](in => Predef.assert(p(TestWrapper(in)))) def assertNot[T: Eq: Coder]( p: Iterable[TestWrapper[T]] => Boolean ): SerializableFunction[JIterable[T], Void] = makeFn[T](in => Predef.assert(!p(TestWrapper.wrap(in).asScala))) def assertNotSingle[T: Eq: Coder](p: TestWrapper[T] => Boolean): SerializableFunction[T, Void] = makeFnSingle[T](in => Predef.assert(!p(TestWrapper(in)))) def isEqualTo[T: Eq: Coder](context: ScioContext, t: T): SerializableFunction[T, Void] = { val mm: SerializableMatcher[Any] = SerializableMatchers.fromSupplier { supplierFromCoder(t, context)(t => Matchers.equalTo[Any](TestWrapper(t))) } makeFnSingle[T](in => assertThat(TestWrapper(in), mm)) } def notEqualTo[T: Eq: Coder](context: ScioContext, t: T): SerializableFunction[T, Void] = { val mm: SerializableMatcher[Any] = SerializableMatchers.fromSupplier { supplierFromCoder(t, context)(t => Matchers.not(Matchers.equalTo[Any](TestWrapper(t)))) } makeFnSingle[T](in => assertThat(TestWrapper(in), mm)) } def hasItem[T: Coder](t: T, context: ScioContext): h.Matcher[JIterable[T]] = SerializableMatchers.fromSupplier { supplierFromCoder(t, context) { t => Matchers.hasItem(t).asInstanceOf[h.Matcher[JIterable[T]]] } } } /** * Trait with ScalaTest [[org.scalatest.matchers.Matcher Matcher]] s for * [[com.spotify.scio.values.SCollection SCollection]] s. */ trait SCollectionMatchers extends EqInstances { import ScioMatchers.makeFn sealed trait MatcherBuilder[T] { _: Matcher[T] => type From type To >: From type AssertBuilder = From => To def matcher(builder: AssertBuilder): Matcher[T] def matcher: Matcher[T] = matcher(identity) } sealed trait IterableMatcher[T, B] extends MatcherBuilder[T] with Matcher[T] { type From = IterableAssert[B] type To = From override def apply(left: T): MatchResult = matcher(left) } sealed trait SingleMatcher[T, B] extends MatcherBuilder[T] with Matcher[T] { type From = SingletonAssert[B] type To = From override def apply(left: T): MatchResult = matcher(left) } /* Wrapper for PAssert statements. PAssert does not perform assertions or throw exceptions until sc.run() is called. So MatchResult should always match true for "a should $Matcher" cases and false for "a shouldNot $Matcher" cases. We also need to run different assertions for positive (shouldFn) and negative (shouldNotFn) cases. */ private def m(shouldFn: () => Any, shouldNotFn: () => Any): MatchResult = { val isShouldNot = Thread .currentThread() .getStackTrace .filter(_.getClassName.startsWith("org.scalatest.")) .exists(e => e.getClassName.contains("NotWord") || e.getMethodName == "shouldNot") val r = if (isShouldNot) { shouldNotFn() false } else { shouldFn() true } MatchResult(r, "", "") } // Due to https://github.com/GoogleCloudPlatform/DataflowJavaSDK/issues/434 // SerDe cycle on each element to keep consistent with values on the expected side private def serDeCycle[T: Coder](scollection: SCollection[T]): SCollection[T] = { val coder = scollection.internal.getCoder scollection .map(e => CoderUtils .decodeFromByteArray(coder, CoderUtils.encodeToByteArray(coder, e)) ) } /** * SCollection assertion only applied to the specified window, running the checker only on the * on-time pane for each key. */ def inOnTimePane[T: ClassTag](window: BoundedWindow)(matcher: MatcherBuilder[T]): Matcher[T] = matcher match { case value: SingleMatcher[T, _] => value.matcher(_.inOnTimePane(window)) case value: IterableMatcher[T, _] => value.matcher(_.inOnTimePane(window)) } /** SCollection assertion only applied to the specified window. */ def inWindow[T: ClassTag, B: ClassTag]( window: BoundedWindow )(matcher: IterableMatcher[T, B]): Matcher[T] = matcher.matcher(_.inWindow(window)) /** * SCollection assertion only applied to the specified window across all panes that were not * produced by the arrival of late data. */ def inCombinedNonLatePanes[T: ClassTag, B: ClassTag]( window: BoundedWindow )(matcher: IterableMatcher[T, B]): Matcher[T] = matcher.matcher(_.inCombinedNonLatePanes(window)) /** * SCollection assertion only applied to the specified window, running the checker only on the * final pane for each key. */ def inFinalPane[T: ClassTag, B: ClassTag]( window: BoundedWindow )(matcher: MatcherBuilder[T]): Matcher[T] = matcher match { case value: SingleMatcher[T, _] => value.matcher(_.inFinalPane(window)) case value: IterableMatcher[T, _] => value.matcher(_.inFinalPane(window)) } /** * SCollection assertion only applied to the specified window, running the checker only on the * late pane for each key. */ def inLatePane[T: ClassTag, B: ClassTag]( window: BoundedWindow )(matcher: MatcherBuilder[T]): Matcher[T] = matcher match { case value: SingleMatcher[T, _] => value.matcher(_.inLatePane(window)) case value: IterableMatcher[T, _] => value.matcher(_.inLatePane(window)) } /** * SCollection assertion only applied to the specified window, running the checker only on the * early pane for each key. */ def inEarlyPane[T]( window: BoundedWindow )(matcher: MatcherBuilder[T]): Matcher[T] = matcher match { case value: SingleMatcher[T, _] => value.matcher(_.inEarlyPane(window)) case value: IterableMatcher[T, _] => value.matcher(_.inEarlyPane(window)) } /** * SCollection assertion only applied to the specified window. The assertion expect outputs to be * produced to the provided window exactly once. */ def inOnlyPane[T: ClassTag, B: ClassTag]( window: BoundedWindow )(matcher: SingleMatcher[T, B]): Matcher[T] = matcher.matcher(_.inOnlyPane(window)) /** SCollection assertion only applied to early timing global window. */ def inEarlyGlobalWindowPanes[T: ClassTag, B: ClassTag]( matcher: IterableMatcher[T, B] ): Matcher[T] = matcher.matcher(_.inEarlyGlobalWindowPanes) /** Assert that the SCollection in question contains the provided elements. */ def containInAnyOrder[T: Coder: Eq]( value: Iterable[T] ): IterableMatcher[SCollection[T], T] = new IterableMatcher[SCollection[T], T] { override def matcher(builder: AssertBuilder): Matcher[SCollection[T]] = new Matcher[SCollection[T]] { override def apply(left: SCollection[T]): MatchResult = { val v = Externalizer(value.toSeq.map(x => TestWrapper(x))) // defeat closure val mm = ScioMatchers.containsInAnyOrder(v.get, left.context) val f = ScioMatchers.assertThatNotFn[T](mm) val g = ScioMatchers.assertThatFn[T](mm) val assertion = builder(PAssert.that(serDeCycle(left).internal)) m( () => assertion.satisfies(g), () => assertion.satisfies(f) ) } } } /** Assert that the SCollection in question contains a single provided element. */ def containSingleValue[T: Coder: Eq](value: T): SingleMatcher[SCollection[T], T] = new SingleMatcher[SCollection[T], T] { override def matcher(builder: AssertBuilder): Matcher[SCollection[T]] = new Matcher[SCollection[T]] { override def apply(left: SCollection[T]): MatchResult = { ScioMatchers val assertion = builder(PAssert.thatSingleton(serDeCycle(left).internal)) m( () => assertion.satisfies(ScioMatchers.isEqualTo(left.context, value)), () => assertion.satisfies(ScioMatchers.notEqualTo(left.context, value)) ) } } } /** * Assert that the SCollection in question contains the provided element without making * assumptions about other elements in the collection. */ def containValue[T: Coder: Eq](value: T): IterableMatcher[SCollection[T], T] = new IterableMatcher[SCollection[T], T] { override def matcher(builder: AssertBuilder): Matcher[SCollection[T]] = new Matcher[SCollection[T]] { override def apply(left: SCollection[T]): MatchResult = { val v = Externalizer(TestWrapper[T](value)) // defeat closure val mm = ScioMatchers.hasItem(v.get, left.context) val should = ScioMatchers.assertThatFn[T](mm) val shouldNot = ScioMatchers.assertThatNotFn[T](mm) val assertion = builder(PAssert.that(serDeCycle(left).internal)) m( () => assertion.satisfies(should), () => assertion.satisfies(shouldNot) ) } } } /** Assert that the SCollection in question is empty. */ val beEmpty: IterableMatcher[SCollection[_], Any] = new IterableMatcher[SCollection[_], Any] { override def matcher(builder: AssertBuilder): Matcher[SCollection[_]] = new Matcher[SCollection[_]] { override def apply(left: SCollection[_]): MatchResult = { val assertion = PAssert.that(left.asInstanceOf[SCollection[Any]].internal) m( () => builder(assertion).empty(), () => builder(assertion) .satisfies(makeFn(in => assert(in.iterator().hasNext, "SCollection is empty"))) ) } } } /** Assert that the SCollection in question has provided size. */ def haveSize(size: Int): IterableMatcher[SCollection[_], Any] = new IterableMatcher[SCollection[_], Any] { override def matcher(builder: AssertBuilder): Matcher[SCollection[_]] = new Matcher[SCollection[_]] { override def apply(left: SCollection[_]): MatchResult = { val s = size val f = makeFn[Any] { in => val inSize = in.asScala.size assert(inSize == s, s"SCollection expected size: $s, actual: $inSize") } val g = makeFn[Any] { in => val inSize = in.asScala.size assert(inSize != s, s"SCollection expected size: not $s, actual: $inSize") } val assertion = PAssert.that(left.asInstanceOf[SCollection[Any]].internal) m( () => builder(assertion).satisfies(f), () => builder(assertion).satisfies(g) ) } } } /** Assert that the SCollection in question is equivalent to the provided map. */ def equalMapOf[K: Coder, V: Coder]( value: Map[K, V] ): SingleMatcher[SCollection[(K, V)], JMap[K, V]] = new SingleMatcher[SCollection[(K, V)], JMap[K, V]] { override def matcher(builder: AssertBuilder): Matcher[SCollection[(K, V)]] = new Matcher[SCollection[(K, V)]] { override def apply(left: SCollection[(K, V)]): MatchResult = { val assertion = builder(PAssert.thatMap(serDeCycle(left).toKV.internal)) m( () => assertion.isEqualTo(value.asJava), () => assertion.notEqualTo(value.asJava) ) } } } // TODO: investigate why multi-map doesn't work /** Assert that the SCollection in question satisfies the provided function. */ def satisfy[T: Coder: Eq]( predicate: Iterable[T] => Boolean ): IterableMatcher[SCollection[T], T] = new IterableMatcher[SCollection[T], T] { override def matcher(builder: AssertBuilder): Matcher[SCollection[T]] = new Matcher[SCollection[T]] { override def apply(left: SCollection[T]): MatchResult = { val cleanedPredicate = ClosureCleaner.clean(predicate) val p: Iterable[TestWrapper[T]] => Boolean = it => cleanedPredicate(it.map(_.get)) val f = ScioMatchers.assert(p) val g = ScioMatchers.assertNot(p) val assertion = builder(PAssert.that(serDeCycle(left).internal)) m( () => assertion.satisfies(f), () => assertion.satisfies(g) ) } } } /** * Assert that the SCollection in question contains a single element which satisfies the provided * function. */ def satisfySingleValue[T: Coder: Eq]( predicate: T => Boolean ): SingleMatcher[SCollection[T], T] = new SingleMatcher[SCollection[T], T] { override def matcher(builder: AssertBuilder): Matcher[SCollection[T]] = new Matcher[SCollection[T]] { override def apply(left: SCollection[T]): MatchResult = { val cleanedPredicate = ClosureCleaner.clean(predicate) val p: TestWrapper[T] => Boolean = t => cleanedPredicate(t.get) val f = ScioMatchers.assertSingle(p) val g = ScioMatchers.assertNotSingle(p) val assertion = builder(PAssert.thatSingleton(serDeCycle(left).internal)) m( () => assertion.satisfies(f), () => assertion.satisfies(g) ) } } } /** Assert that all elements of the SCollection in question satisfy the provided function. */ def forAll[T: Coder: Eq]( predicate: T => Boolean ): IterableMatcher[SCollection[T], T] = { val f = ClosureCleaner.clean(predicate) satisfy(_.forall(f)) } /** Assert that some elements of the SCollection in question satisfy the provided function. */ def exist[T: Coder: Eq]( predicate: T => Boolean ): IterableMatcher[SCollection[T], T] = { val f = ClosureCleaner.clean(predicate) satisfy(_.exists(f)) } }
spotify/scio
scio-test/src/main/scala/com/spotify/scio/testing/SCollectionMatchers.scala
Scala
apache-2.0
18,987
// code-examples/BasicOOP/scoping/private-type-nested-wont-compile.scala // WON'T COMPILE package scopeA { class PrivateClass1 { class Nested { private[PrivateClass1] val nestedField = 1 } private[PrivateClass1] val nested = new Nested val nestedNested = nested.nestedField } class PrivateClass2 extends PrivateClass1 { val nField = new Nested().nestedField // ERROR } class PrivateClass3 { val privateClass1 = new PrivateClass1 val privateNField = privateClass1.nested.nestedField // ERROR } }
XClouded/t4f-core
scala/src/tmp/BasicOOP/scoping/private-type-nested-wont-compile.scala
Scala
apache-2.0
547
package libs import scala.collection.mutable.Map import scala.collection.mutable.MutableList object CaffeWeightCollection { def scalarDivide(weights: Map[String, MutableList[NDArray]], v: Float) = { for (name <- weights.keys) { for (j <- 0 to weights(name).length - 1) { weights(name)(j).scalarDivide(v) } } } def add(weights1: Map[String, MutableList[NDArray]], weights2: Map[String, MutableList[NDArray]]): Map[String, MutableList[NDArray]] = { if (weights1.keys != weights2.keys) { throw new Exception("weights1.keys != weights2.keys, weights1.keys = " + weights1.keys.toString + ", and weights2.keys = " + weights2.keys.toString + "\n") } val newWeights = Map[String, MutableList[NDArray]]() for (name <- weights1.keys) { newWeights += (name -> MutableList()) if (weights1(name).length != weights2(name).length) { throw new Exception("weights1(name).length != weights2(name).length, name = " + name + ", weights1(name).length = " + weights1(name).length.toString + ", weights2(name).length = " + weights2(name).length.toString) } for (j <- 0 to weights1(name).length - 1) { if (weights1(name)(j).shape.deep != weights2(name)(j).shape.deep) { throw new Exception("weights1(name)(j).shape != weights2(name)(j).shape, name = " + name + ", j = " + j.toString + ", weights1(name)(j).shape = " + weights1(name)(j).shape.deep.toString + ", weights2(name)(j).shape = " + weights2(name)(j).shape.deep.toString) } newWeights(name) += NDArray.plus(weights1(name)(j), weights2(name)(j)) } } newWeights } def checkEqual(weights1: Map[String, MutableList[NDArray]], weights2: Map[String, MutableList[NDArray]], tol: Float): Boolean = { if (weights1.keys != weights2.keys) { return false } for (name <- weights1.keys) { if (weights1(name).length != weights2(name).length) { return false } for (j <- 0 to weights1(name).length - 1) { if (!NDArray.checkEqual(weights1(name)(j), weights2(name)(j), tol)) { return false } } } return true } }
amplab/SparkNet
src/main/scala/libs/CaffeWeightCollection.scala
Scala
mit
2,158
package adapter.xpay trait PayD { def getCustCardNo: String def setCustCardNo(custCardNo: String) def getCardOwnerName: String def setCardOwnerName(cardOwnerName: String) def getCardExpMonthDate: String def setCardExpMonthDate(cardExpMonthDate: String) def getCVVNo: Integer def setCVVNo(cVVNo: Integer) def getTotalAmount: Double def setTotalAmount(totalAmount: Double) }
BBK-PiJ-2015-67/sdp-portfolio
exercises/week07/src/main/scala/adapter/xpay/PayD.scala
Scala
unlicense
403
package scommons.client.ui.list import scala.scalajs.js import scala.scalajs.js.annotation.JSImport @JSImport("scommons/client/ui/list/ListBox.css", JSImport.Namespace) @js.native object ListBoxCss extends js.Object { val listBoxItem: String = js.native val listBoxSelectedItem: String = js.native }
viktor-podzigun/scommons
ui/src/main/scala/scommons/client/ui/list/ListBoxCss.scala
Scala
apache-2.0
310
package fr.marouni.spark.sql import org.apache.spark.rdd.RDD import org.apache.spark.sql.{DataFrame, SQLContext} import org.apache.spark.{SparkContext, SparkConf} /** * Created by abbass on 12/03/16. * * SQL support in Spark SQL. */ object SqlSupport extends App { case class order(company: String, client: String, item: Integer, qty: Double, price: Double) case class companyinfo(company: String, address: String) override def main(args: Array[String]) { val sparkConf = new SparkConf() val sc = new SparkContext("local[*]", "SQL tests", sparkConf) val sqlContext = new SQLContext(sc) val file: RDD[String] = sc.textFile("/home/abbass/dev/spark/sqldata/transcations.csv") val rdd: RDD[order] = file.map(line => { val splits: Array[String] = line.split(",") order(splits(0), splits(1), splits(2).toInt, splits(3).toDouble, splits(4).toDouble) }) val file2: RDD[String] = sc.textFile("/home/abbass/dev/spark/sqldata/directory.csv") val rdd2: RDD[companyinfo] = file2.map(line => { val splits: Array[String] = line.split(",") companyinfo(splits(0), splits(1)) }) val df: DataFrame = sqlContext.createDataFrame(rdd) df.registerTempTable("tab1") val df2: DataFrame = sqlContext.createDataFrame(rdd2) df2.registerTempTable("tab2") // 1- SELECT // sqlContext.sql("SELECT item FROM tab1").show(10) // 2- Expressions in SELECT // sqlContext.sql("SELECT price, price * 1.25 AS taxed_price FROM tab1").show(10) // 3- Built-in functions // sqlContext.sql("SELECT price, round(price * 1.25, 1) AS taxed_price FROM tab1").show(10) // 4- Text concatenation // sqlContext.sql("SELECT concat(company, client) AS id, price, round(price * 1.25, 1) AS taxed_price FROM tab1").show(10) // 5- Conditions in WHERE // sqlContext.sql( // """ // |SELECT * FROM tab1 WHERE // |( // |(qty BETWEEN 1 AND 5 // |AND // |price <> 30.0 // |AND // |client IN ('XXX', 'YYY', 'ZZZ') // |) // |OR // |client LIKE 'AB%' // |) // |AND // |company IS NOT NULL // """.stripMargin).show(10) // 6- Handle nulls as zeros // sqlContext.sql( // """ // |SELECT * FROM tab1 WHERE // |coalesce(price, 0.0) < 10.0 // """.stripMargin).show(10) // 7- Aggregations (Spark SQL doesn't support ordinal position in GROUP BY statements) // sqlContext.sql( // """ // |SELECT company, client, COUNT(*) as count FROM tab1 // |WHERE price >= 30.0 // |GROUP BY company, client // |ORDER BY count DESC // """.stripMargin).show(10) // 8- Having statement // sqlContext.sql( // """ // |SELECT company, COUNT(*) as count FROM tab1 // |WHERE price >= 30.0 // |GROUP BY company // |HAVING count > 1 // |ORDER BY count DESC // """.stripMargin).show(10) // 9- Distincts // sqlContext.sql( // """ // |SELECT COUNT(DISTINCT company, client, item) as dcount FROM tab1 // """.stripMargin).show(10) // 10- CASE with no ordinal support // sqlContext.sql( // """ // |SELECT // |CASE // |WHEN price >= 30.0 THEN 'HIGH' // |WHEN price >= 10.0 THEN 'MODERATE' // |ELSE 'LOW' // |END AS price_tag, // |COUNT(*) as count // |FROM tab1 // |GROUP BY ( // |CASE // |WHEN price >= 30.0 THEN 'HIGH' // |WHEN price >= 10.0 THEN 'MODERATE' // |ELSE 'LOW' // |END // |) // |ORDER BY count DESC // """.stripMargin).show(10) // 11- CASE in aggregations (Horrible exception when FROM is omitted) // sqlContext.sql( // """ // |SELECT company, // |SUM( // | CASE // | WHEN price <= 0.0 THEN 1.0 // | ELSE price // | END // |) as sum // |FROM tab1 // |GROUP BY company // |ORDER BY sum DESC // """.stripMargin).show(10) // 12- JOINS sqlContext.sql( """ |SELECT company |FROM tab1 """.stripMargin).show(10) sqlContext.sql( """ |SELECT * |FROM tab2 """.stripMargin).show(10) sqlContext.sql( """ |SELECT tab1.company, tab1.client, tab2.address |FROM tab1 |JOIN tab2 ON tab1.company = tab2.company |WHERE address is NULL """.stripMargin).show(10) // sqlContext.sql( // """ // |SELECT COUNT(DISTINCT tab1.company) // |FROM tab1 // |FULL JOIN tab2 ON tab1.company = tab2.company // |WHERE address is NULL OR client IS NULL // """.stripMargin).show(10) // sqlContext.sql( // """ // |SELECT client, SUM(price*qty) as total_price // |FROM tab1 // |LEFT JOIN tab2 ON tab1.company = tab2.company // |WHERE address is NOT NULL // |GROUP BY client // |ORDER BY total_price DESC // """.stripMargin).show(10) // http://blog.jooq.org/2016/03/17/10-easy-steps-to-a-complete-understanding-of-sql/ /** * FROM WHERE GROUP BY HAVING SELECT DISTINCT UNION ORDER BY */ // 1- Cannot use derived columns in WHERE clause : /*sqlContext.sql( """ |SELECT price * qty as total | FROM tab1 | WHERE (price*qty) > 10.0 """.stripMargin).show(10)*/ // 2- Table references (cartesian product MAP ONLY with no shuffle) /*sqlContext.sql( """ |SELECT * |FROM tab1, tab2 """.stripMargin).show(10)*/ // 3- JOIN with a cartesian product /*sqlContext.sql( """ |SELECT * |FROM tab1 JOIN tab2 ON tab1.company = tab2.company, tab2 """.stripMargin).show(10)*/ // 4- Cartesian product with condition (Doesn't generate a cartesian product) /*sqlContext.sql( """ |SELECT * |FROM tab1, tab2 |WHERE tab1.company = tab2.company """.stripMargin).show(10)*/ // 5- SEMI JOINS using IN clause not supported (https://issues.apache.org/jira/browse/SPARK-4226) /*sqlContext.sql( """ |SELECT company |FROM tab1 |WHERE compnay IN (SELECT company FROM tab2) """.stripMargin).show(10)*/ // 6- SEMI JOINS using EXISTS clause not supported /*sqlContext.sql( """ |SELECT company |FROM tab1 |WHERE EXISTS (SELECT company FROM tab2) """.stripMargin).show(10)*/ // 7- SEMI JOIN might be replaced with INNER JOIN with DISTINCT /*sqlContext.sql( """ |SELECT DISTINCT tab1.company |FROM tab1 JOIN tab2 ON tab1.company = tab2.company """.stripMargin).show(10)*/ // 8- Derived Tables (Supported sub queries in FROM clause) /*sqlContext.sql( """ |SELECT cmp, total |FROM (SELECT company AS cmp, price * qty AS total FROM tab1) tab1D |WHERE total > 10.0 """.stripMargin).show(10)*/ // 9- Unions (Union can only be performed on tables with the same number of columns) /*sqlContext.sql( """ |SELECT company, client |FROM tab1 |WHERE price > 1.0 |UNION |SELECT company, address |FROM tab2 """.stripMargin).show(10)*/ // Do not quit we need to check webUI Thread.sleep(10000000) } }
marouni/spark-tests
src/main/scala/fr/marouni/spark/sql/SqlSupport.scala
Scala
apache-2.0
7,407
package formats import models._ import play.api.Play import play.api.Play.current import play.api.libs.json.Reads._ import play.api.libs.json._ import play.api.libs.functional.syntax._ trait APIJsonFormats extends CommonJsonFormats { implicit def traversableWrites[A: Writes] = new Writes[Traversable[A]] { def writes(as: Traversable[A]) = JsArray(as.map(Json.toJson(_)).toSeq) } def addHref[T](objType: String, w : Writes[T]): Writes[T] = w.transform { js => js.as[JsObject] ++ Json.obj("href" -> JsString("/%s/%s".format(objType,(js \\ "id").as[String]))) } implicit val tokenWrites: Writes[Token] = addHref("tokens",Json.writes[Token].transform{ js => js.as[JsObject] - "userId" }) implicit val userWrite: Writes[User] = addHref("users",Json.writes[User].transform( js => js.as[JsObject] - "passwordHash" - "facebookToken")) private val sha256Regex = "[0-9a-z]{64}".r private val emailRegex = """^(?!\\.)("([^"\\r\\\\]|\\\\["\\r\\\\])*"|([-a-zA-Z0-9!#$%&'*+/=?^_`{|}~]|(?<!\\.)\\.)*)(?<!\\.)@[a-zA-Z0-9][\\w\\.-]*[a-zA-Z0-9]\\.[a-zA-Z][a-zA-Z\\.]*[a-zA-Z]$""".r private val usernameRegex = "[0-9a-zA-Z.]{2,20}".r private val facebookTokenRegex = "[^;\\t\\n]{1,1024}".r implicit val newUserRead: Reads[NewUser] = ( (__ \\ "email").readNullable[String](pattern(emailRegex, "error.email")) and (__ \\ "password").readNullable[String](pattern(sha256Regex, "error.sha256")) and (__ \\ "username").readNullable[String](pattern(usernameRegex, "error.username")) and (__ \\ "facebookToken").readNullable[String](pattern(facebookTokenRegex, "error.facebookToken")) )(NewUser.apply _) implicit val loginUserRead: Reads[LoginUser] = ( (__ \\ "email").readNullable[String](pattern(emailRegex, "error.email")) and (__ \\ "password").readNullable[String](pattern(sha256Regex, "error.sha256")) and (__ \\ "username").readNullable[String](pattern(usernameRegex, "error.username")) and (__ \\ "facebookToken").readNullable[String](pattern(facebookTokenRegex, "error.facebookToken")) )(LoginUser.apply _) implicit val errorWrite = Json.writes[Error] implicit val emailWrite = addHref("emails",Json.writes[Email]) implicit val topLevelWrite = Json.writes[TopLevel] }
jdauphant/play_api_example
app/formats/APIJsonFormats.scala
Scala
isc
2,229
package cz.senkadam.gatlingsql.requests import io.gatling.core.config.Protocol /** * Created by senk on 7.1.15. */ /** * SLQ protocol */ case class SqlProtocol() extends Protocol
veraicon/gatlingsql
src/main/scala/cz/senkadam/gatlingsql/requests/SqlProtocol.scala
Scala
apache-2.0
185
package inox package parsing import org.scalatest.funsuite.AnyFunSuite class ArithmeticParserSuite extends AnyFunSuite { import inox.trees._ import interpolator._ implicit val symbols = NoSymbols test("Parsing additions.") { assertResult(Plus(IntegerLiteral(3), IntegerLiteral(4))) { e"3 + 4" } assertResult(Plus(Plus(IntegerLiteral(3), IntegerLiteral(4)), IntegerLiteral(5))) { e"3 + 4 + 5" } } test("Parsing substractions.") { assertResult(Minus(IntegerLiteral(3), IntegerLiteral(4))) { e"3 - 4" } assertResult(Minus(Minus(IntegerLiteral(3), IntegerLiteral(4)), IntegerLiteral(5))) { e"3 - 4 - 5" } } test("Parsing multiplications.") { assertResult(Division(IntegerLiteral(3), IntegerLiteral(4))) { e"3 / 4" } assertResult(Division(Division(IntegerLiteral(3), IntegerLiteral(4)), IntegerLiteral(5))) { e"3 / 4 / 5" } } test("Parsing divisions.") { assertResult(Times(IntegerLiteral(3), IntegerLiteral(4))) { e"3 * 4" } assertResult(Times(Times(IntegerLiteral(3), IntegerLiteral(4)), IntegerLiteral(5))) { e"3 * 4 * 5" } } test("Parsing unary negation.") { assertResult(UMinus(IntegerLiteral(3))) { e"- 3" } assertResult(UMinus(IntegerLiteral(3))) { e"-(3)" } assertResult(UMinus(IntegerLiteral(-3))) { e"- -3" } assertResult(UMinus(IntegerLiteral(-3))) { e"-(-3)" } assertResult(UMinus(IntegerLiteral(-3))) { e"--3" } } test("Operator precedence.") { assertResult(Plus(IntegerLiteral(4), Times(IntegerLiteral(5), IntegerLiteral(6)))) { e"4 + 5 * 6" } assertResult(Plus(Times(IntegerLiteral(4), IntegerLiteral(5)), IntegerLiteral(6))) { e"4 * 5 + 6" } assertResult(Minus(Plus(Minus(IntegerLiteral(0), Division(IntegerLiteral(1), IntegerLiteral(2))), Times(Division(Times(UMinus(IntegerLiteral(3)), IntegerLiteral(4)), IntegerLiteral(5)), IntegerLiteral(6))), IntegerLiteral(7))) { e"0 - 1 / 2 + - 3 * 4 / 5 * 6 - 7" } } test("Parenthesized expressions.") { assertResult(IntegerLiteral(0)) { e"(0)" } assertResult(IntegerLiteral(42)) { e"((((42))))" } assertResult(Times(IntegerLiteral(1), Plus(IntegerLiteral(2), IntegerLiteral(3)))) { e"1 * (2 + 3)" } assertResult(UMinus(Times(IntegerLiteral(1), UMinus(Plus(IntegerLiteral(2), IntegerLiteral(3)))))) { e"-(1 * -(2 + ((3))))" } } }
epfl-lara/inox
src/test/scala/inox/parsing/ArithmeticParserSuite.scala
Scala
apache-2.0
2,528