text
stringlengths 2
1.04M
| meta
dict |
---|---|
/*
DO NOT EDIT THIS FILE - the content was created using a source code generator
*/
#include <QtCore/Qt>
#ifndef __XHARBOUR__
#include <QtXml/QtXmlVersion>
#endif
#include "qt5xhb_common.h"
#include "qt5xhb_macros.h"
#include "qt5xhb_utils.h"
#ifdef __XHARBOUR__
#include <QtXml/QtXmlVersion>
#endif
HB_FUNC( QTXML_VERSION_STR )
{
hb_retc( (const char *) QTXML_VERSION_STR );
}
HB_FUNC( QTXML_VERSION )
{
hb_retni( QTXML_VERSION );
}
| {
"content_hash": "811a24e83beb154f5707a017ccf230e9",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 16.448275862068964,
"alnum_prop": 0.6477987421383647,
"repo_name": "marcosgambeta/Qt5xHb",
"id": "54ea5f8d32ddc297e0df2c68508f0f4d7efbc9bf",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/QtXml/QtXmlVersion.cpp",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "32864"
},
{
"name": "C",
"bytes": "450079"
},
{
"name": "C++",
"bytes": "2140920"
},
{
"name": "Charity",
"bytes": "8108"
},
{
"name": "Makefile",
"bytes": "250157"
},
{
"name": "QML",
"bytes": "894"
},
{
"name": "xBase",
"bytes": "18166801"
}
],
"symlink_target": ""
} |
package org.apache.spark.sql.execution.datasources
import java.io._
import java.util.concurrent.atomic.AtomicInteger
import java.util.zip.GZIPOutputStream
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{BlockLocation, FileStatus, Path, RawLocalFileSystem}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionSet, PredicateHelper}
import org.apache.spark.sql.catalyst.util
import org.apache.spark.sql.execution.{DataSourceScanExec, SparkPlan}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.util.Utils
class FileSourceStrategySuite extends QueryTest with SharedSQLContext with PredicateHelper {
import testImplicits._
protected override def sparkConf = super.sparkConf.set("spark.default.parallelism", "1")
test("unpartitioned table, single partition") {
val table =
createTable(
files = Seq(
"file1" -> 1,
"file2" -> 1,
"file3" -> 1,
"file4" -> 1,
"file5" -> 1,
"file6" -> 1,
"file7" -> 1,
"file8" -> 1,
"file9" -> 1,
"file10" -> 1))
checkScan(table.select('c1)) { partitions =>
// 10 one byte files should fit in a single partition with 10 files.
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 10, "when checking partition 1")
// 1 byte files are too small to split so we should read the whole thing.
assert(partitions.head.files.head.start == 0)
assert(partitions.head.files.head.length == 1)
}
checkPartitionSchema(StructType(Nil))
checkDataSchema(StructType(Nil).add("c1", IntegerType))
}
test("unpartitioned table, multiple partitions") {
val table =
createTable(
files = Seq(
"file1" -> 5,
"file2" -> 5,
"file3" -> 5))
withSQLConf(SQLConf.FILES_MAX_PARTITION_BYTES.key -> "11",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "1") {
checkScan(table.select('c1)) { partitions =>
// 5 byte files should be laid out [(5, 5), (5)]
assert(partitions.size == 2, "when checking partitions")
assert(partitions(0).files.size == 2, "when checking partition 1")
assert(partitions(1).files.size == 1, "when checking partition 2")
// 5 byte files are too small to split so we should read the whole thing.
assert(partitions.head.files.head.start == 0)
assert(partitions.head.files.head.length == 5)
}
checkPartitionSchema(StructType(Nil))
checkDataSchema(StructType(Nil).add("c1", IntegerType))
}
}
test("Unpartitioned table, large file that gets split") {
val table =
createTable(
files = Seq(
"file1" -> 15,
"file2" -> 3))
withSQLConf(SQLConf.FILES_MAX_PARTITION_BYTES.key -> "10",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "1") {
checkScan(table.select('c1)) { partitions =>
// Files should be laid out [(0-10), (10-15, 4)]
assert(partitions.size == 2, "when checking partitions")
assert(partitions(0).files.size == 1, "when checking partition 1")
assert(partitions(1).files.size == 2, "when checking partition 2")
// Start by reading 10 bytes of the first file
assert(partitions.head.files.head.start == 0)
assert(partitions.head.files.head.length == 10)
// Second partition reads the remaining 5
assert(partitions(1).files.head.start == 10)
assert(partitions(1).files.head.length == 5)
}
checkPartitionSchema(StructType(Nil))
checkDataSchema(StructType(Nil).add("c1", IntegerType))
}
}
test("Unpartitioned table, many files that get split") {
val table =
createTable(
files = Seq(
"file1" -> 2,
"file2" -> 2,
"file3" -> 1,
"file4" -> 1,
"file5" -> 1,
"file6" -> 1))
withSQLConf(SQLConf.FILES_MAX_PARTITION_BYTES.key -> "4",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "1") {
checkScan(table.select('c1)) { partitions =>
// Files should be laid out [(file1), (file2, file3), (file4, file5), (file6)]
assert(partitions.size == 4, "when checking partitions")
assert(partitions(0).files.size == 1, "when checking partition 1")
assert(partitions(1).files.size == 2, "when checking partition 2")
assert(partitions(2).files.size == 2, "when checking partition 3")
assert(partitions(3).files.size == 1, "when checking partition 4")
// First partition reads (file1)
assert(partitions(0).files(0).start == 0)
assert(partitions(0).files(0).length == 2)
// Second partition reads (file2, file3)
assert(partitions(1).files(0).start == 0)
assert(partitions(1).files(0).length == 2)
assert(partitions(1).files(1).start == 0)
assert(partitions(1).files(1).length == 1)
// Third partition reads (file4, file5)
assert(partitions(2).files(0).start == 0)
assert(partitions(2).files(0).length == 1)
assert(partitions(2).files(1).start == 0)
assert(partitions(2).files(1).length == 1)
// Final partition reads (file6)
assert(partitions(3).files(0).start == 0)
assert(partitions(3).files(0).length == 1)
}
checkPartitionSchema(StructType(Nil))
checkDataSchema(StructType(Nil).add("c1", IntegerType))
}
}
test("partitioned table") {
val table =
createTable(
files = Seq(
"p1=1/file1" -> 10,
"p1=2/file2" -> 10))
// Only one file should be read.
checkScan(table.where("p1 = 1")) { partitions =>
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 1, "when files in partition 1")
}
// We don't need to reevaluate filters that are only on partitions.
checkDataFilters(Set.empty)
// Only one file should be read.
checkScan(table.where("p1 = 1 AND c1 = 1 AND (p1 + c1) = 2")) { partitions =>
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 1, "when checking files in partition 1")
assert(partitions.head.files.head.partitionValues.getInt(0) == 1,
"when checking partition values")
}
// Only the filters that do not contain the partition column should be pushed down
checkDataFilters(Set(IsNotNull("c1"), EqualTo("c1", 1)))
}
test("partitioned table - case insensitive") {
withSQLConf("spark.sql.caseSensitive" -> "false") {
val table =
createTable(
files = Seq(
"p1=1/file1" -> 10,
"p1=2/file2" -> 10))
// Only one file should be read.
checkScan(table.where("P1 = 1")) { partitions =>
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 1, "when files in partition 1")
}
// We don't need to reevaluate filters that are only on partitions.
checkDataFilters(Set.empty)
// Only one file should be read.
checkScan(table.where("P1 = 1 AND C1 = 1 AND (P1 + C1) = 2")) { partitions =>
assert(partitions.size == 1, "when checking partitions")
assert(partitions.head.files.size == 1, "when checking files in partition 1")
assert(partitions.head.files.head.partitionValues.getInt(0) == 1,
"when checking partition values")
}
// Only the filters that do not contain the partition column should be pushed down
checkDataFilters(Set(IsNotNull("c1"), EqualTo("c1", 1)))
}
}
test("partitioned table - after scan filters") {
val table =
createTable(
files = Seq(
"p1=1/file1" -> 10,
"p1=2/file2" -> 10))
val df1 = table.where("p1 = 1 AND (p1 + c1) = 2 AND c1 = 1")
// Filter on data only are advisory so we have to reevaluate.
assert(getPhysicalFilters(df1) contains resolve(df1, "c1 = 1"))
// Don't reevaluate partition only filters.
assert(!(getPhysicalFilters(df1) contains resolve(df1, "p1 = 1")))
val df2 = table.where("(p1 + c2) = 2 AND c1 = 1")
// Filter on data only are advisory so we have to reevaluate.
assert(getPhysicalFilters(df2) contains resolve(df2, "c1 = 1"))
// Need to evaluate filters that are not pushed down.
assert(getPhysicalFilters(df2) contains resolve(df2, "(p1 + c2) = 2"))
}
test("bucketed table") {
val table =
createTable(
files = Seq(
"p1=1/file1_0000" -> 1,
"p1=1/file2_0000" -> 1,
"p1=1/file3_0002" -> 1,
"p1=2/file4_0002" -> 1,
"p1=2/file5_0000" -> 1,
"p1=2/file6_0000" -> 1,
"p1=2/file7_0000" -> 1),
buckets = 3)
// No partition pruning
checkScan(table) { partitions =>
assert(partitions.size == 3)
assert(partitions(0).files.size == 5)
assert(partitions(1).files.size == 0)
assert(partitions(2).files.size == 2)
}
// With partition pruning
checkScan(table.where("p1=2")) { partitions =>
assert(partitions.size == 3)
assert(partitions(0).files.size == 3)
assert(partitions(1).files.size == 0)
assert(partitions(2).files.size == 1)
}
}
test("Locality support for FileScanRDD") {
val partition = FilePartition(0, Seq(
PartitionedFile(InternalRow.empty, "fakePath0", 0, 10, Array("host0", "host1")),
PartitionedFile(InternalRow.empty, "fakePath0", 10, 20, Array("host1", "host2")),
PartitionedFile(InternalRow.empty, "fakePath1", 0, 5, Array("host3")),
PartitionedFile(InternalRow.empty, "fakePath2", 0, 5, Array("host4"))
))
val fakeRDD = new FileScanRDD(
spark,
(file: PartitionedFile) => Iterator.empty,
Seq(partition)
)
assertResult(Set("host0", "host1", "host2")) {
fakeRDD.preferredLocations(partition).toSet
}
}
test("Locality support for FileScanRDD - one file per partition") {
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> "10",
"fs.file.impl" -> classOf[LocalityTestFileSystem].getName,
"fs.file.impl.disable.cache" -> "true") {
val table =
createTable(files = Seq(
"file1" -> 10,
"file2" -> 10
))
checkScan(table) { partitions =>
val Seq(p1, p2) = partitions
assert(p1.files.length == 1)
assert(p1.files.flatMap(_.locations).length == 1)
assert(p2.files.length == 1)
assert(p2.files.flatMap(_.locations).length == 1)
val fileScanRDD = getFileScanRDD(table)
assert(partitions.flatMap(fileScanRDD.preferredLocations).length == 2)
}
}
}
test("Locality support for FileScanRDD - large file") {
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> "10",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "0",
"fs.file.impl" -> classOf[LocalityTestFileSystem].getName,
"fs.file.impl.disable.cache" -> "true") {
val table =
createTable(files = Seq(
"file1" -> 15,
"file2" -> 5
))
checkScan(table) { partitions =>
val Seq(p1, p2) = partitions
assert(p1.files.length == 1)
assert(p1.files.flatMap(_.locations).length == 1)
assert(p2.files.length == 2)
assert(p2.files.flatMap(_.locations).length == 2)
val fileScanRDD = getFileScanRDD(table)
assert(partitions.flatMap(fileScanRDD.preferredLocations).length == 3)
}
}
}
test("SPARK-15654 do not split non-splittable files") {
// Check if a non-splittable file is not assigned into partitions
Seq("gz", "snappy", "lz4").foreach { suffix =>
val table = createTable(
files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, s"file3.${suffix}" -> 1)
)
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> "2",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "0") {
checkScan(table.select('c1)) { partitions =>
assert(partitions.size == 2)
assert(partitions(0).files.size == 1)
assert(partitions(1).files.size == 2)
}
}
}
// Check if a splittable compressed file is assigned into multiple partitions
Seq("bz2").foreach { suffix =>
val table = createTable(
files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, s"file3.${suffix}" -> 1)
)
withSQLConf(
SQLConf.FILES_MAX_PARTITION_BYTES.key -> "2",
SQLConf.FILES_OPEN_COST_IN_BYTES.key -> "0") {
checkScan(table.select('c1)) { partitions =>
assert(partitions.size == 3)
assert(partitions(0).files.size == 1)
assert(partitions(1).files.size == 2)
assert(partitions(2).files.size == 1)
}
}
}
}
test("SPARK-14959: Do not call getFileBlockLocations on directories") {
// Setting PARALLEL_PARTITION_DISCOVERY_THRESHOLD to 2. So we will first
// list file statues at driver side and then for the level of p2, we will list
// file statues in parallel.
withSQLConf(
"fs.file.impl" -> classOf[MockDistributedFileSystem].getName,
SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "2") {
withTempPath { path =>
val tempDir = path.getCanonicalPath
Seq("p1=1/p2=2/p3=3/file1", "p1=1/p2=3/p3=3/file1").foreach { fileName =>
val file = new File(tempDir, fileName)
assert(file.getParentFile.exists() || file.getParentFile.mkdirs())
util.stringToFile(file, fileName)
}
val fileCatalog = new InMemoryFileIndex(
sparkSession = spark,
rootPathsSpecified = Seq(new Path(tempDir)),
parameters = Map.empty[String, String],
userSpecifiedSchema = None)
// This should not fail.
fileCatalog.listLeafFiles(Seq(new Path(tempDir)))
// Also have an integration test.
checkAnswer(
spark.read.text(tempDir).select("p1", "p2", "p3", "value"),
Row(1, 2, 3, "p1=1/p2=2/p3=3/file1") :: Row(1, 3, 3, "p1=1/p2=3/p3=3/file1") :: Nil)
}
}
}
test("[SPARK-16818] partition pruned file scans implement sameResult correctly") {
withTempPath { path =>
val tempDir = path.getCanonicalPath
spark.range(100)
.selectExpr("id", "id as b")
.write
.partitionBy("id")
.parquet(tempDir)
val df = spark.read.parquet(tempDir)
def getPlan(df: DataFrame): SparkPlan = {
df.queryExecution.executedPlan
}
assert(getPlan(df.where("id = 2")).sameResult(getPlan(df.where("id = 2"))))
assert(!getPlan(df.where("id = 2")).sameResult(getPlan(df.where("id = 3"))))
}
}
test("[SPARK-16818] exchange reuse respects differences in partition pruning") {
spark.conf.set("spark.sql.exchange.reuse", true)
withTempPath { path =>
val tempDir = path.getCanonicalPath
spark.range(10)
.selectExpr("id % 2 as a", "id % 3 as b", "id as c")
.write
.partitionBy("a")
.parquet(tempDir)
val df = spark.read.parquet(tempDir)
val df1 = df.where("a = 0").groupBy("b").agg("c" -> "sum")
val df2 = df.where("a = 1").groupBy("b").agg("c" -> "sum")
checkAnswer(df1.join(df2, "b"), Row(0, 6, 12) :: Row(1, 4, 8) :: Row(2, 10, 5) :: Nil)
}
}
test("spark.files.ignoreCorruptFiles should work in SQL") {
val inputFile = File.createTempFile("input-", ".gz")
try {
// Create a corrupt gzip file
val byteOutput = new ByteArrayOutputStream()
val gzip = new GZIPOutputStream(byteOutput)
try {
gzip.write(Array[Byte](1, 2, 3, 4))
} finally {
gzip.close()
}
val bytes = byteOutput.toByteArray
val o = new FileOutputStream(inputFile)
try {
// It's corrupt since we only write half of bytes into the file.
o.write(bytes.take(bytes.length / 2))
} finally {
o.close()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val e = intercept[SparkException] {
spark.read.text(inputFile.toURI.toString).collect()
}
assert(e.getCause.isInstanceOf[EOFException])
assert(e.getCause.getMessage === "Unexpected end of input stream")
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
assert(spark.read.text(inputFile.toURI.toString).collect().isEmpty)
}
} finally {
inputFile.delete()
}
}
test("[SPARK-18753] keep pushed-down null literal as a filter in Spark-side post-filter") {
val ds = Seq(Tuple1(Some(true)), Tuple1(None), Tuple1(Some(false))).toDS()
withTempPath { p =>
val path = p.getAbsolutePath
ds.write.parquet(path)
val readBack = spark.read.parquet(path).filter($"_1" === "true")
val filtered = ds.filter($"_1" === "true").toDF()
checkAnswer(readBack, filtered)
}
}
// Helpers for checking the arguments passed to the FileFormat.
protected val checkPartitionSchema =
checkArgument("partition schema", _.partitionSchema, _: StructType)
protected val checkDataSchema =
checkArgument("data schema", _.dataSchema, _: StructType)
protected val checkDataFilters =
checkArgument("data filters", _.filters.toSet, _: Set[Filter])
/** Helper for building checks on the arguments passed to the reader. */
protected def checkArgument[T](name: String, arg: LastArguments.type => T, expected: T): Unit = {
if (arg(LastArguments) != expected) {
fail(
s"""
|Wrong $name
|expected: $expected
|actual: ${arg(LastArguments)}
""".stripMargin)
}
}
/** Returns a resolved expression for `str` in the context of `df`. */
def resolve(df: DataFrame, str: String): Expression = {
df.select(expr(str)).queryExecution.analyzed.expressions.head.children.head
}
/** Returns a set with all the filters present in the physical plan. */
def getPhysicalFilters(df: DataFrame): ExpressionSet = {
ExpressionSet(
df.queryExecution.executedPlan.collect {
case execution.FilterExec(f, _) => splitConjunctivePredicates(f)
}.flatten)
}
/** Plans the query and calls the provided validation function with the planned partitioning. */
def checkScan(df: DataFrame)(func: Seq[FilePartition] => Unit): Unit = {
func(getFileScanRDD(df).filePartitions)
}
/**
* Constructs a new table given a list of file names and sizes expressed in bytes. The table
* is written out in a temporary directory and any nested directories in the files names
* are automatically created.
*
* When `buckets` is > 0 the returned [[DataFrame]] will have metadata specifying that number of
* buckets. However, it is the responsibility of the caller to assign files to each bucket
* by appending the bucket id to the file names.
*/
def createTable(
files: Seq[(String, Int)],
buckets: Int = 0): DataFrame = {
val tempDir = Utils.createTempDir()
files.foreach {
case (name, size) =>
val file = new File(tempDir, name)
assert(file.getParentFile.exists() || file.getParentFile.mkdirs())
util.stringToFile(file, "*" * size)
}
val df = spark.read
.format(classOf[TestFileFormat].getName)
.load(tempDir.getCanonicalPath)
if (buckets > 0) {
val bucketed = df.queryExecution.analyzed transform {
case l @ LogicalRelation(r: HadoopFsRelation, _, _, _) =>
l.copy(relation =
r.copy(bucketSpec =
Some(BucketSpec(numBuckets = buckets, "c1" :: Nil, Nil)))(r.sparkSession))
}
Dataset.ofRows(spark, bucketed)
} else {
df
}
}
def getFileScanRDD(df: DataFrame): FileScanRDD = {
df.queryExecution.executedPlan.collect {
case scan: DataSourceScanExec if scan.inputRDDs().head.isInstanceOf[FileScanRDD] =>
scan.inputRDDs().head.asInstanceOf[FileScanRDD]
}.headOption.getOrElse {
fail(s"No FileScan in query\n${df.queryExecution}")
}
}
}
/** Holds the last arguments passed to [[TestFileFormat]]. */
object LastArguments {
var partitionSchema: StructType = _
var dataSchema: StructType = _
var filters: Seq[Filter] = _
var options: Map[String, String] = _
}
/** A test [[FileFormat]] that records the arguments passed to buildReader, and returns nothing. */
class TestFileFormat extends TextBasedFileFormat {
override def toString: String = "TestFileFormat"
/**
* When possible, this method should return the schema of the given `files`. When the format
* does not support inference, or no valid files are given should return None. In these cases
* Spark will require that user specify the schema manually.
*/
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] =
Some(
StructType(Nil)
.add("c1", IntegerType)
.add("c2", IntegerType))
/**
* Prepares a write job and returns an [[OutputWriterFactory]]. Client side job preparation can
* be put here. For example, user defined output committer can be configured here
* by setting the output committer class in the conf of spark.sql.sources.outputCommitterClass.
*/
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
throw new UnsupportedOperationException("JUST FOR TESTING")
}
override def buildReader(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): PartitionedFile => Iterator[InternalRow] = {
// Record the arguments so they can be checked in the test case.
LastArguments.partitionSchema = partitionSchema
LastArguments.dataSchema = requiredSchema
LastArguments.filters = filters
LastArguments.options = options
(file: PartitionedFile) => { Iterator.empty }
}
}
class LocalityTestFileSystem extends RawLocalFileSystem {
private val invocations = new AtomicInteger(0)
override def getFileBlockLocations(
file: FileStatus, start: Long, len: Long): Array[BlockLocation] = {
require(!file.isDirectory, "The file path can not be a directory.")
val count = invocations.getAndAdd(1)
Array(new BlockLocation(Array(s"host$count:50010"), Array(s"host$count"), 0, len))
}
}
// This file system is for SPARK-14959 (DistributedFileSystem will throw an exception
// if we call getFileBlockLocations on a dir).
class MockDistributedFileSystem extends RawLocalFileSystem {
override def getFileBlockLocations(
file: FileStatus, start: Long, len: Long): Array[BlockLocation] = {
require(!file.isDirectory, "The file path can not be a directory.")
super.getFileBlockLocations(file, start, len)
}
}
| {
"content_hash": "c43e1bfc17a0bf820a1517cbb66d26d3",
"timestamp": "",
"source": "github",
"line_count": 645,
"max_line_length": 99,
"avg_line_length": 36.457364341085274,
"alnum_prop": 0.6293004465234956,
"repo_name": "yanboliang/spark",
"id": "955c3e3fa6f7470fe8983166e8e674db66506759",
"size": "24315",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "39193"
},
{
"name": "Batchfile",
"bytes": "30468"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26884"
},
{
"name": "Dockerfile",
"bytes": "8699"
},
{
"name": "HTML",
"bytes": "70197"
},
{
"name": "HiveQL",
"bytes": "1823426"
},
{
"name": "Java",
"bytes": "3442382"
},
{
"name": "JavaScript",
"bytes": "196704"
},
{
"name": "Makefile",
"bytes": "9397"
},
{
"name": "PLpgSQL",
"bytes": "191716"
},
{
"name": "PowerShell",
"bytes": "3856"
},
{
"name": "Python",
"bytes": "2869229"
},
{
"name": "R",
"bytes": "1177706"
},
{
"name": "Roff",
"bytes": "15911"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "28354530"
},
{
"name": "Shell",
"bytes": "202851"
},
{
"name": "Thrift",
"bytes": "33605"
},
{
"name": "q",
"bytes": "146878"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width" />
<link rel="shortcut icon" type="image/x-icon" href="../../../../../../../favicon.ico" />
<title>com.google.android.gms.fitness.data | Android Developers</title>
<!-- STYLESHEETS -->
<link rel="stylesheet"
href="http://fonts.googleapis.com/css?family=Roboto+Condensed">
<link rel="stylesheet" href="http://fonts.googleapis.com/css?family=Roboto:light,regular,medium,thin,italic,mediumitalic,bold"
title="roboto">
<link href="../../../../../../../assets/css/default.css?v=5" rel="stylesheet" type="text/css">
<!-- FULLSCREEN STYLESHEET -->
<link href="../../../../../../../assets/css/fullscreen.css" rel="stylesheet" class="fullscreen"
type="text/css">
<!-- JAVASCRIPT -->
<script src="http://www.google.com/jsapi" type="text/javascript"></script>
<script src="../../../../../../../assets/js/android_3p-bundle.js" type="text/javascript"></script>
<script type="text/javascript">
var toRoot = "../../../../../../../";
var metaTags = [];
var devsite = false;
</script>
<script src="../../../../../../../assets/js/docs.js?v=3" type="text/javascript"></script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-5831155-1', 'android.com');
ga('create', 'UA-49880327-2', 'android.com', {'name': 'universal'}); // New tracker);
ga('send', 'pageview');
ga('universal.send', 'pageview'); // Send page view for new tracker.
</script>
</head>
<body class="gc-documentation
develop reference">
<div id="doc-api-level" class="" style="display:none"></div>
<a name="top"></a>
<a name="top"></a>
<!-- dialog to prompt lang pref change when loaded from hardcoded URL
<div id="langMessage" style="display:none">
<div>
<div class="lang en">
<p>You requested a page in English, would you like to proceed with this language setting?</p>
</div>
<div class="lang es">
<p>You requested a page in Spanish (Español), would you like to proceed with this language setting?</p>
</div>
<div class="lang ja">
<p>You requested a page in Japanese (日本語), would you like to proceed with this language setting?</p>
</div>
<div class="lang ko">
<p>You requested a page in Korean (한국어), would you like to proceed with this language setting?</p>
</div>
<div class="lang ru">
<p>You requested a page in Russian (Русский), would you like to proceed with this language setting?</p>
</div>
<div class="lang zh-cn">
<p>You requested a page in Simplified Chinese (简体中文), would you like to proceed with this language setting?</p>
</div>
<div class="lang zh-tw">
<p>You requested a page in Traditional Chinese (繁體中文), would you like to proceed with this language setting?</p>
</div>
<a href="#" class="button yes" onclick="return false;">
<span class="lang en">Yes</span>
<span class="lang es">Sí</span>
<span class="lang ja">Yes</span>
<span class="lang ko">Yes</span>
<span class="lang ru">Yes</span>
<span class="lang zh-cn">是的</span>
<span class="lang zh-tw">没有</span>
</a>
<a href="#" class="button" onclick="$('#langMessage').hide();return false;">
<span class="lang en">No</span>
<span class="lang es">No</span>
<span class="lang ja">No</span>
<span class="lang ko">No</span>
<span class="lang ru">No</span>
<span class="lang zh-cn">没有</span>
<span class="lang zh-tw">没有</span>
</a>
</div>
</div> -->
<!-- Header -->
<div id="header-wrapper">
<div id="header">
<div class="wrap" id="header-wrap">
<div class="col-3 logo">
<a href="../../../../../../../index.html">
<img src="../../../../../../../assets/images/dac_logo.png"
srcset="../../../../../../../assets/images/[email protected] 2x"
width="123" height="25" alt="Android Developers" />
</a>
<div class="btn-quicknav" id="btn-quicknav">
<a href="#" class="arrow-inactive">Quicknav</a>
<a href="#" class="arrow-active">Quicknav</a>
</div>
</div>
<ul class="nav-x col-9">
<li class="design">
<a href="../../../../../../../design/index.html"
zh-tw-lang="設計"
zh-cn-lang="设计"
ru-lang="Проектирование"
ko-lang="디자인"
ja-lang="設計"
es-lang="Diseñar"
>Design</a></li>
<li class="develop"><a href="../../../../../../../develop/index.html"
zh-tw-lang="開發"
zh-cn-lang="开发"
ru-lang="Разработка"
ko-lang="개발"
ja-lang="開発"
es-lang="Desarrollar"
>Develop</a></li>
<li class="distribute last"><a href="../../../../../../../distribute/googleplay/index.html"
zh-tw-lang="發佈"
zh-cn-lang="分发"
ru-lang="Распространение"
ko-lang="배포"
ja-lang="配布"
es-lang="Distribuir"
>Distribute</a></li>
</ul>
<div class="menu-container">
<div class="moremenu">
<div id="more-btn"></div>
</div>
<div class="morehover" id="moremenu">
<div class="top"></div>
<div class="mid">
<div class="header">Links</div>
<ul>
<li><a href="https://play.google.com/apps/publish/" target="_googleplay">Google Play Developer Console</a></li>
<li><a href="http://android-developers.blogspot.com/">Android Developers Blog</a></li>
<li><a href="../../../../../../../about/index.html">About Android</a></li>
</ul>
<div class="header">Android Sites</div>
<ul>
<li><a href="http://www.android.com">Android.com</a></li>
<li class="active"><a>Android Developers</a></li>
<li><a href="http://source.android.com">Android Open Source Project</a></li>
</ul>
<br class="clearfix" />
</div><!-- end 'mid' -->
<div class="bottom"></div>
</div><!-- end 'moremenu' -->
<div class="search" id="search-container">
<div class="search-inner">
<div id="search-btn"></div>
<div class="left"></div>
<form onsubmit="return submit_search()">
<input id="search_autocomplete" type="text" value="" autocomplete="off" name="q"
onfocus="search_focus_changed(this, true)" onblur="search_focus_changed(this, false)"
onkeydown="return search_changed(event, true, '../../../../../../../')"
onkeyup="return search_changed(event, false, '../../../../../../../')" />
</form>
<div class="right"></div>
<a class="close hide">close</a>
<div class="left"></div>
<div class="right"></div>
</div><!-- end search-inner -->
</div><!-- end search-container -->
<div class="search_filtered_wrapper reference">
<div class="suggest-card reference no-display">
<ul class="search_filtered">
</ul>
</div>
</div>
<div class="search_filtered_wrapper docs">
<div class="suggest-card dummy no-display"> </div>
<div class="suggest-card develop no-display">
<ul class="search_filtered">
</ul>
<div class="child-card guides no-display">
</div>
<div class="child-card training no-display">
</div>
<div class="child-card samples no-display">
</div>
</div>
<div class="suggest-card design no-display">
<ul class="search_filtered">
</ul>
</div>
<div class="suggest-card distribute no-display">
<ul class="search_filtered">
</ul>
</div>
</div>
</div><!-- end menu-container (search and menu widget) -->
<!-- Expanded quicknav -->
<div id="quicknav" class="col-13">
<ul>
<li class="about">
<ul>
<li><a href="../../../../../../../about/index.html">About</a></li>
<li><a href="../../../../../../../wear/index.html">Wear</a></li>
<li><a href="../../../../../../../tv/index.html">TV</a></li>
<li><a href="../../../../../../../auto/index.html">Auto</a></li>
</ul>
</li>
<li class="design">
<ul>
<li><a href="../../../../../../../design/index.html">Get Started</a></li>
<li><a href="../../../../../../../design/devices.html">Devices</a></li>
<li><a href="../../../../../../../design/style/index.html">Style</a></li>
<li><a href="../../../../../../../design/patterns/index.html">Patterns</a></li>
<li><a href="../../../../../../../design/building-blocks/index.html">Building Blocks</a></li>
<li><a href="../../../../../../../design/downloads/index.html">Downloads</a></li>
<li><a href="../../../../../../../design/videos/index.html">Videos</a></li>
</ul>
</li>
<li class="develop">
<ul>
<li><a href="../../../../../../../training/index.html"
zh-tw-lang="訓練課程"
zh-cn-lang="培训"
ru-lang="Курсы"
ko-lang="교육"
ja-lang="トレーニング"
es-lang="Capacitación"
>Training</a></li>
<li><a href="../../../../../../../guide/index.html"
zh-tw-lang="API 指南"
zh-cn-lang="API 指南"
ru-lang="Руководства по API"
ko-lang="API 가이드"
ja-lang="API ガイド"
es-lang="Guías de la API"
>API Guides</a></li>
<li><a href="../../../../../../../reference/packages.html"
zh-tw-lang="參考資源"
zh-cn-lang="参考"
ru-lang="Справочник"
ko-lang="참조문서"
ja-lang="リファレンス"
es-lang="Referencia"
>Reference</a></li>
<li><a href="../../../../../../../sdk/index.html"
zh-tw-lang="相關工具"
zh-cn-lang="工具"
ru-lang="Инструменты"
ko-lang="도구"
ja-lang="ツール"
es-lang="Herramientas"
>Tools</a>
</li>
<li><a href="../../../../../../../google/index.html">Google Services</a>
</li>
</ul>
</li>
<li class="distribute last">
<ul>
<li><a href="../../../../../../../distribute/googleplay/index.html">Google Play</a></li>
<li><a href="../../../../../../../distribute/essentials/index.html">Essentials</a></li>
<li><a href="../../../../../../../distribute/users/index.html">Get Users</a></li>
<li><a href="../../../../../../../distribute/engage/index.html">Engage & Retain</a></li>
<li><a href="../../../../../../../distribute/monetize/index.html">Monetize</a></li>
<li><a href="../../../../../../../distribute/analyze/index.html">Analyze</a></li>
<li><a href="../../../../../../../distribute/tools/index.html">Tools & Reference</a></li>
<li><a href="../../../../../../../distribute/stories/index.html">Developer Stories</a></li>
</ul>
</li>
</ul>
</div><!-- /Expanded quicknav -->
</div><!-- end header-wrap.wrap -->
</div><!-- end header -->
<!-- Secondary x-nav -->
<div id="nav-x">
<div class="wrap" style="position:relative;z-index:1">
<ul class="nav-x col-9 develop" style="width:100%">
<li class="training"><a href="../../../../../../../training/index.html"
zh-tw-lang="訓練課程"
zh-cn-lang="培训"
ru-lang="Курсы"
ko-lang="교육"
ja-lang="トレーニング"
es-lang="Capacitación"
>Training</a></li>
<li class="guide"><a href="../../../../../../../guide/index.html"
zh-tw-lang="API 指南"
zh-cn-lang="API 指南"
ru-lang="Руководства по API"
ko-lang="API 가이드"
ja-lang="API ガイド"
es-lang="Guías de la API"
>API Guides</a></li>
<li class="reference"><a href="../../../../../../../reference/packages.html"
zh-tw-lang="參考資源"
zh-cn-lang="参考"
ru-lang="Справочник"
ko-lang="참조문서"
ja-lang="リファレンス"
es-lang="Referencia"
>Reference</a></li>
<li class="tools"><a href="../../../../../../../sdk/index.html"
zh-tw-lang="相關工具"
zh-cn-lang="工具"
ru-lang="Инструменты"
ko-lang="도구"
ja-lang="ツール"
es-lang="Herramientas"
>Tools</a></li>
<li class="google"><a href="../../../../../../../google/index.html"
>Google Services</a>
</li>
</ul>
</div>
</div>
<!-- /Sendondary x-nav DEVELOP -->
<div id="searchResults" class="wrap" style="display:none;">
<h2 id="searchTitle">Results</h2>
<div id="leftSearchControl" class="search-control">Loading...</div>
</div>
</div> <!--end header-wrapper -->
<div id="sticky-header">
<div>
<a class="logo" href="#top"></a>
<a class="top" href="#top"></a>
<ul class="breadcrumb">
<li class="current">com.google.android.gms.fitness.data</li>
</ul>
</div>
</div>
<div class="wrap clearfix" id="body-content">
<div class="col-4" id="side-nav" itemscope itemtype="http://schema.org/SiteNavigationElement">
<div id="devdoc-nav">
<div id="api-nav-header">
<div id="api-level-toggle">
<label for="apiLevelCheckbox" class="disabled"
title="Select your target API level to dim unavailable APIs">API level: </label>
<div class="select-wrapper">
<select id="apiLevelSelector">
<!-- option elements added by buildApiLevelSelector() -->
</select>
</div>
</div><!-- end toggle -->
<div id="api-nav-title">Android APIs</div>
</div><!-- end nav header -->
<script>
var SINCE_DATA = [ ];
buildApiLevelSelector();
</script>
<div id="swapper">
<div id="nav-panels">
<div id="resize-packages-nav">
<div id="packages-nav" class="scroll-pane">
<ul>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/package-summary.html">com.google.android.gms</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/actions/package-summary.html">com.google.android.gms.actions</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/ads/package-summary.html">com.google.android.gms.ads</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/ads/doubleclick/package-summary.html">com.google.android.gms.ads.doubleclick</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/ads/identifier/package-summary.html">com.google.android.gms.ads.identifier</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/ads/mediation/package-summary.html">com.google.android.gms.ads.mediation</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/ads/mediation/admob/package-summary.html">com.google.android.gms.ads.mediation.admob</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/ads/mediation/customevent/package-summary.html">com.google.android.gms.ads.mediation.customevent</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/ads/purchase/package-summary.html">com.google.android.gms.ads.purchase</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/ads/search/package-summary.html">com.google.android.gms.ads.search</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/analytics/package-summary.html">com.google.android.gms.analytics</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/analytics/ecommerce/package-summary.html">com.google.android.gms.analytics.ecommerce</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/appindexing/package-summary.html">com.google.android.gms.appindexing</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/appstate/package-summary.html">com.google.android.gms.appstate</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/auth/package-summary.html">com.google.android.gms.auth</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/cast/package-summary.html">com.google.android.gms.cast</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/common/package-summary.html">com.google.android.gms.common</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/common/annotation/package-summary.html">com.google.android.gms.common.annotation</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/common/api/package-summary.html">com.google.android.gms.common.api</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/common/data/package-summary.html">com.google.android.gms.common.data</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/common/images/package-summary.html">com.google.android.gms.common.images</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/drive/package-summary.html">com.google.android.gms.drive</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/drive/events/package-summary.html">com.google.android.gms.drive.events</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/drive/metadata/package-summary.html">com.google.android.gms.drive.metadata</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/drive/query/package-summary.html">com.google.android.gms.drive.query</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/drive/widget/package-summary.html">com.google.android.gms.drive.widget</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/fitness/package-summary.html">com.google.android.gms.fitness</a></li>
<li class="selected api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/fitness/data/package-summary.html">com.google.android.gms.fitness.data</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/fitness/request/package-summary.html">com.google.android.gms.fitness.request</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/fitness/result/package-summary.html">com.google.android.gms.fitness.result</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/fitness/service/package-summary.html">com.google.android.gms.fitness.service</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/package-summary.html">com.google.android.gms.games</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/achievement/package-summary.html">com.google.android.gms.games.achievement</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/event/package-summary.html">com.google.android.gms.games.event</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/leaderboard/package-summary.html">com.google.android.gms.games.leaderboard</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/multiplayer/package-summary.html">com.google.android.gms.games.multiplayer</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/multiplayer/realtime/package-summary.html">com.google.android.gms.games.multiplayer.realtime</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/multiplayer/turnbased/package-summary.html">com.google.android.gms.games.multiplayer.turnbased</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/quest/package-summary.html">com.google.android.gms.games.quest</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/request/package-summary.html">com.google.android.gms.games.request</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/games/snapshot/package-summary.html">com.google.android.gms.games.snapshot</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/gcm/package-summary.html">com.google.android.gms.gcm</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/identity/intents/package-summary.html">com.google.android.gms.identity.intents</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/identity/intents/model/package-summary.html">com.google.android.gms.identity.intents.model</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/location/package-summary.html">com.google.android.gms.location</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/location/places/package-summary.html">com.google.android.gms.location.places</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/location/places/ui/package-summary.html">com.google.android.gms.location.places.ui</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/maps/package-summary.html">com.google.android.gms.maps</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/maps/model/package-summary.html">com.google.android.gms.maps.model</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/nearby/package-summary.html">com.google.android.gms.nearby</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/nearby/connection/package-summary.html">com.google.android.gms.nearby.connection</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/panorama/package-summary.html">com.google.android.gms.panorama</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/plus/package-summary.html">com.google.android.gms.plus</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/plus/model/moments/package-summary.html">com.google.android.gms.plus.model.moments</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/plus/model/people/package-summary.html">com.google.android.gms.plus.model.people</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/safetynet/package-summary.html">com.google.android.gms.safetynet</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/security/package-summary.html">com.google.android.gms.security</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/tagmanager/package-summary.html">com.google.android.gms.tagmanager</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/wallet/package-summary.html">com.google.android.gms.wallet</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/wallet/fragment/package-summary.html">com.google.android.gms.wallet.fragment</a></li>
<li class="api apilevel-">
<a href="../../../../../../../reference/com/google/android/gms/wearable/package-summary.html">com.google.android.gms.wearable</a></li>
</ul><br/>
</div> <!-- end packages-nav -->
</div> <!-- end resize-packages -->
<div id="classes-nav" class="scroll-pane">
<ul>
<li><h2>Classes</h2>
<ul>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/BleDevice.html">BleDevice</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Bucket.html">Bucket</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataPoint.html">DataPoint</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSet.html">DataSet</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSource.html">DataSource</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSource.Builder.html">DataSource.Builder</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataType.html">DataType</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Device.html">Device</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Field.html">Field</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Session.html">Session</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Session.Builder.html">Session.Builder</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Subscription.html">Subscription</a></li>
<li class="api apilevel-"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Value.html">Value</a></li>
</ul>
</li>
</ul><br/>
</div><!-- end classes -->
</div><!-- end nav-panels -->
<div id="nav-tree" style="display:none" class="scroll-pane">
<div id="tree-list"></div>
</div><!-- end nav-tree -->
</div><!-- end swapper -->
<div id="nav-swap">
<a class="fullscreen">fullscreen</a>
<a href='#' onclick='swapNav();return false;'><span id='tree-link'>Use Tree Navigation</span><span id='panel-link' style='display:none'>Use Panel Navigation</span></a>
</div>
</div> <!-- end devdoc-nav -->
</div> <!-- end side-nav -->
<script type="text/javascript">
// init fullscreen based on user pref
var fullscreen = readCookie("fullscreen");
if (fullscreen != 0) {
if (fullscreen == "false") {
toggleFullscreen(false);
} else {
toggleFullscreen(true);
}
}
// init nav version for mobile
if (isMobile) {
swapNav(); // tree view should be used on mobile
$('#nav-swap').hide();
} else {
chooseDefaultNav();
if ($("#nav-tree").is(':visible')) {
init_default_navtree("../../../../../../../");
}
}
// scroll the selected page into view
$(document).ready(function() {
scrollIntoView("packages-nav");
scrollIntoView("classes-nav");
});
</script>
<div class="col-12" id="doc-col">
<div id="api-info-block">
<div class="api-level">
</div>
</div>
<div id="jd-header">
package
<h1>com.google.android.gms.fitness.data</h1>
</div><!-- end header -->
<div id="naMessage"></div>
<div id="jd-content" class="api apilevel-">
<div class="jd-descr">
Contains the Google Fit data model.
</div>
<h2>Classes</h2>
<div class="jd-sumtable">
<table class="jd-sumtable-expando">
<tr class="alt-color api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/BleDevice.html">BleDevice</a></td>
<td class="jd-descrcol" width="100%">
Representation of a BLE Device (such as a heart rate monitor) that broadcasts information
about its on board sensors.
</td>
</tr>
<tr class=" api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Bucket.html">Bucket</a></td>
<td class="jd-descrcol" width="100%">
A bucket represents a time interval over which aggregated data is computed.
</td>
</tr>
<tr class="alt-color api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataPoint.html">DataPoint</a></td>
<td class="jd-descrcol" width="100%">
Represents a single data point in a <code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataType.html">data type's</a></code> stream from a particular
<code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSource.html">data source</a></code>.
</td>
</tr>
<tr class=" api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSet.html">DataSet</a></td>
<td class="jd-descrcol" width="100%">
Represents a fixed set of <code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataPoint.html">data points</a></code> in a <code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataType.html">data type's</a></code> stream
from a particular <code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSource.html">data source</a></code>.
</td>
</tr>
<tr class="alt-color api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSource.html">DataSource</a></td>
<td class="jd-descrcol" width="100%">
Definition of a unique source of sensor data.
</td>
</tr>
<tr class=" api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSource.Builder.html">DataSource.Builder</a></td>
<td class="jd-descrcol" width="100%">
A builder that can be used to construct new data source objects.
</td>
</tr>
<tr class="alt-color api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataType.html">DataType</a></td>
<td class="jd-descrcol" width="100%">
The data type defines the schema for a stream of data being collected by, inserted into, or
queried from Google Fit.
</td>
</tr>
<tr class=" api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Device.html">Device</a></td>
<td class="jd-descrcol" width="100%">
Representation of an integrated device (such as a phone or a wearable) that can hold sensors.
</td>
</tr>
<tr class="alt-color api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Field.html">Field</a></td>
<td class="jd-descrcol" width="100%">
A field represents one dimension of a data type.
</td>
</tr>
<tr class=" api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Session.html">Session</a></td>
<td class="jd-descrcol" width="100%">
A Session represents a time interval with associated metadata.
</td>
</tr>
<tr class="alt-color api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Session.Builder.html">Session.Builder</a></td>
<td class="jd-descrcol" width="100%">
Builder used to create new Sessions.
</td>
</tr>
<tr class=" api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Subscription.html">Subscription</a></td>
<td class="jd-descrcol" width="100%">
Subscription for persistent storage of data from a given <code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataSource.html">data source</a></code> or for a
given <code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataType.html">data type</a></code>.
</td>
</tr>
<tr class="alt-color api apilevel-" >
<td class="jd-linkcol"><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Value.html">Value</a></td>
<td class="jd-descrcol" width="100%">
Holder object for the value of a single <code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/Field.html">field</a></code> in a <code><a href="../../../../../../../reference/com/google/android/gms/fitness/data/DataPoint.html">data point</a></code>.
</td>
</tr>
</table>
</div>
<div id="footer" class="wrap" >
<div id="copyright">
Except as noted, this content is licensed under <a
href="http://www.apache.org/licenses/LICENSE-2.0">Apache 2.0</a>.
For details and restrictions, see the <a href="../../../../../../../license.html">
Content License</a>.
</div>
<div id="build_info">
Android GmsCore 1784785 r —
<script src="../../../../../../../timestamp.js" type="text/javascript"></script>
<script>document.write(BUILD_TIMESTAMP)</script>
</div>
<div id="footerlinks">
<p>
<a href="../../../../../../../about/index.html">About Android</a> |
<a href="../../../../../../../legal.html">Legal</a> |
<a href="../../../../../../../support.html">Support</a>
</p>
</div>
</div> <!-- end footer -->
</div><!-- end jd-content -->
</div><!-- doc-content -->
</div> <!-- end body-content -->
</body>
</html>
| {
"content_hash": "f66ccae8a8f902c91b175022ce28d1c5",
"timestamp": "",
"source": "github",
"line_count": 958,
"max_line_length": 292,
"avg_line_length": 38.71085594989562,
"alnum_prop": 0.5469596872050695,
"repo_name": "CMPUT301W15T05/TrackerExpress",
"id": "789b732f207b8d8a4e10e46151a29d4dfca7f155",
"size": "37514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-play-services_lib/docs/reference/com/google/android/gms/fitness/data/package-summary.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "248814"
},
{
"name": "HTML",
"bytes": "44729034"
},
{
"name": "Java",
"bytes": "313720"
},
{
"name": "JavaScript",
"bytes": "608845"
}
],
"symlink_target": ""
} |
<?php
namespace Admin\Controller;
use Zend\View\Model\ViewModel;
use Core\Controller\ActionController;
use Admin\Model\User;
use Admin\Form\User as UserForm;
use Doctrine\ORM\EntityManager;
/**
* Controlador que gerencia os posts
*
* @category Admin
* @package Controller
* @author Elton Minetto <[email protected]>
*/
class UserController extends ActionController
{
/**
* @var Doctrine\ORM\EntityManager
*/
protected $em;
public function setEntityManager(EntityManager $em)
{
$this->em = $em;
}
public function getEntityManager()
{
if (null === $this->em) {
$this->em = $this->getServiceLocator()->get('Doctrine\ORM\EntityManager');
}
return $this->em;
}
/**
* Mostra os usuário cadastrados
* @return void
*/
public function indexAction()
{
$users = $this->getEntityManager()
->getRepository('Admin\Model\User')
->findAll();
return new ViewModel(array(
'users' => $users
));
}
/**
* Cria ou edita um user
* @return void
*/
public function saveAction()
{
$form = new UserForm();
$request = $this->getRequest();
if ($request->isPost()) {
$user = new User;
$form->setInputFilter($user->getInputFilter());
$form->setData($request->getPost());
if ($form->isValid()) {
$data = $form->getData();
unset($data['submit']);
$data['valid'] = 1;
$data['password'] = md5($data['password']);
if (isset($data['id']) && $data['id'] > 0) {
$user = $this->getEntityManager()->find('Admin\Model\User', $data['id']);
}
$user->setData($data);
$this->getEntityManager()->persist($user);
$this->getEntityManager()->flush();
return $this->redirect()->toUrl('/admin/user');
}
}
$id = (int) $this->params()->fromRoute('id', 0);
if ($id > 0) {
$user = $this->getEntityManager()->find('Admin\Model\User', $id);
$form->bind($user);
$form->get('submit')->setAttribute('value', 'Edit');
}
return new ViewModel(
array('form' => $form)
);
}
/**
* Exclui um post
* @return void
*/
public function deleteAction()
{
$id = (int) $this->params()->fromRoute('id', 0);
if ($id == 0) {
throw new \Exception("Código obrigatório");
}
$user = $this->getEntityManager()->find('Admin\Model\User', $id);
if ($user) {
$this->getEntityManager()->remove($user);
$this->getEntityManager()->flush();
}
return $this->redirect()->toUrl('/admin/user');
}
} | {
"content_hash": "ec97b442b0509d784ddeb13485192c86",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 93,
"avg_line_length": 26.783783783783782,
"alnum_prop": 0.4961318533467878,
"repo_name": "andersondefaria1980/zf2napratica",
"id": "44abb31afd76837d86c04d438ba1fcad35e4c321",
"size": "2976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module/Admin/src/Admin/Controller/UserController.php",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "288"
},
{
"name": "CSS",
"bytes": "4715"
},
{
"name": "HTML",
"bytes": "1150802"
},
{
"name": "PHP",
"bytes": "163531"
}
],
"symlink_target": ""
} |
using System.Collections.Generic;
using Build.DomainModel;
using Build.IO;
namespace Build.Watchdog
{
/// <summary>
/// Is responsible for creating in-memory-representations of files in a sandbox.
/// </summary>
public sealed class SandboxLoader
{
private readonly Dictionary<Filetype, IFileStore> _files;
private readonly CSharpProjectStore _cSharpProjects;
private readonly SolutionStore _solutions;
private bool _isSandboxDirty;
private Sandbox _lastSandbox;
public SandboxLoader(IFileSystem filesystem)
{
_cSharpProjects = new CSharpProjectStore(filesystem);
_solutions = new SolutionStore(filesystem);
_files = new Dictionary<Filetype, IFileStore>
{
{Filetype.Project, _cSharpProjects},
{Filetype.Solution, _solutions}
};
_isSandboxDirty = true;
}
private static bool TryGetExtension(string filename, out string extension)
{
extension = Path.GetExtension(filename);
if (extension == null)
return false;
extension = extension.ToLowerInvariant();
return true;
}
private static Filetype GetFileType(string filename)
{
string extension;
if (!TryGetExtension(filename, out extension))
{
return Filetype.Unknown;
}
switch (extension)
{
case ".csproj":
return Filetype.Project;
case "*.sln":
return Filetype.Solution;
default:
return Filetype.Unknown;
}
}
public void CreateOrUpdate(string filename)
{
Filetype type = GetFileType(filename);
IFileStore store;
if (_files.TryGetValue(type, out store))
{
store.CreateOrUpdate(filename);
_isSandboxDirty = true;
}
}
public void Delete(string filename)
{
Filetype type = GetFileType(filename);
IFileStore store;
if (_files.TryGetValue(type, out store))
{
store.Remove(filename);
_isSandboxDirty = true;
}
}
/// <summary>
/// Creates a new sandbox from the current values.
/// </summary>
/// <returns></returns>
public Sandbox CreateSandbox()
{
// We are called pretty regularly and we want to avoid creating garbage when *nothing* has changed
// (which is a pretty significant amount of total time this class is used) and therefore we
// only create new objects when something has changed.
if (_isSandboxDirty)
{
var projects = _cSharpProjects.CreateProjects();
IEnumerable<Solution> solutions = _solutions.CreateSolutions(projects);
_lastSandbox = new Sandbox(solutions, projects.Values);
_isSandboxDirty = false;
}
return _lastSandbox;
}
}
} | {
"content_hash": "7ccd689d573b26622e925cb4b5b26b09",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 101,
"avg_line_length": 24.737864077669904,
"alnum_prop": 0.695447409733124,
"repo_name": "Kittyfisto/.NETBuild",
"id": "ea70d1b0170e6b1c191395b4f94aa9c4445e720e",
"size": "2550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Build/Watchdog/SandboxLoader.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "397264"
},
{
"name": "Smalltalk",
"bytes": "17"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="utf-8" standalone="yes" ?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
<channel>
<title> Palmer | Gregor von Laszewski</title>
<link>/author/palmer/</link>
<atom:link href="/author/palmer/index.xml" rel="self" type="application/rss+xml" />
<description> Palmer</description>
<generator>Source Themes Academic (https://sourcethemes.com/academic/)</generator><language>en-us</language><lastBuildDate>Tue, 01 Jan 2013 00:00:00 +0000</lastBuildDate>

<item>
<title>Using XDMoD to Facilitate XSEDE Operations, Planning and Analysis</title>
<link>/publication/las-2013-xdmod/</link>
<pubDate>Tue, 01 Jan 2013 00:00:00 +0000</pubDate>
<guid>/publication/las-2013-xdmod/</guid>
<description></description>
</item>
</channel>
</rss>
| {
"content_hash": "2c372d61a4e28736532fc498d6b703d9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 174,
"avg_line_length": 43.166666666666664,
"alnum_prop": 0.667953667953668,
"repo_name": "laszewski/laszewski.github.io",
"id": "b0fb6acb796d9bf49575effeaef241cb3935500f",
"size": "1036",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "docs/author/palmer/index.xml",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39364"
},
{
"name": "Dockerfile",
"bytes": "3027"
},
{
"name": "Emacs Lisp",
"bytes": "113"
},
{
"name": "HTML",
"bytes": "4876120"
},
{
"name": "JavaScript",
"bytes": "38361"
},
{
"name": "Jupyter Notebook",
"bytes": "20048"
},
{
"name": "Makefile",
"bytes": "6013"
},
{
"name": "Python",
"bytes": "5846"
},
{
"name": "SCSS",
"bytes": "269531"
},
{
"name": "Shell",
"bytes": "11650"
},
{
"name": "TeX",
"bytes": "856927"
}
],
"symlink_target": ""
} |
<?php
/* Prototype : string gmdate(string format [, long timestamp])
* Description: Format a GMT date/time
* Source code: ext/date/php_date.c
* Alias to functions:
*/
echo "*** Testing gmdate() : usage variation ***\n";
// Initialise all required variables
date_default_timezone_set('UTC');
$timestamp = mktime(8, 8, 8, 8, 8, 2008);
$timestamp_non_leap_year = mktime(8, 8, 8, 8, 8, 2007);
echo "\n-- Testing gmdate() function with checking non leap year using Leap Year format --\n";
var_dump( gmdate('L', $timestamp_non_leap_year) );
echo "\n-- Testing gmdate() function with checking leap year using Leap Year format --\n";
var_dump( gmdate('L') );
var_dump( gmdate('L', $timestamp) );
echo "\n-- Testing gmdate() function with ISO-8601 year number format --\n";
var_dump( gmdate('o') );
var_dump( gmdate('o', $timestamp) );
echo "\n-- Testing gmdate() function with full numeric representation of year format --\n";
var_dump( gmdate('Y') );
var_dump( gmdate('Y', $timestamp) );
echo "\n-- Testing gmdate() function with 2 digit representation year format --\n";
var_dump( gmdate('y') );
var_dump( gmdate('y', $timestamp) );
?>
===DONE===
| {
"content_hash": "a2f39e18c0c091dac0532dcc1661d665",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 94,
"avg_line_length": 33.02857142857143,
"alnum_prop": 0.6652249134948097,
"repo_name": "evnix/go-php-parser",
"id": "0261e35272de22ddf8ab4077f68adabcda8b57a7",
"size": "1156",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "testdata/fuzzdir/corpus/ext_date_tests_gmdate_variation7.php",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Go",
"bytes": "176842"
},
{
"name": "PHP",
"bytes": "9752494"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
} |
import React, {PropTypes} from 'react'
import * as d3 from 'd3'
import autobind from 'autobind-decorator'
import SightingGraph from './SightingGraph'
class Timeline extends React.Component {
constructor (props) {
super(props)
this.state = {
padding: [2, 8],
x: 0,
cursorY: 0,
width: 0,
height: 0,
data: [],
range: [0, 0],
domain: [0, 0],
scaleDays: null,
scaleTime: null,
totalSightings: [],
mouseOver: false
}
}
componentWillReceiveProps (nextProps) {
const { expedition } = nextProps
if (expedition) {
const { padding } = this.state
const startDate = expedition.start
const height = window.innerHeight - 72
const width = window.innerWidth * 0.05
const x = width * 0.58
var dayCount = expedition.dayCount + 1
var data = []
for (var i = 0; i < dayCount; i++) {
var d = new Date(startDate.getTime() + i * (1000 * 3600 * 24))
data.push(d)
}
const range = [0 + padding[1], height - padding[1]]
const domain = [dayCount - 1, 0]
const scaleDays = d3.scaleLinear()
.domain(domain)
.range(range)
const scaleTime = d3.scaleLinear()
.domain([startDate.getTime() + (dayCount - 1) * (1000 * 3600 * 24), startDate.getTime()])
.range(range)
const cursorY = this.state.mouseOver ? this.state.cursorY : (scaleTime(expedition.currentDate.getTime()) - 8)
var totalSightings = expedition.totalSightings
this.setState({
...this.state,
x,
cursorY,
width,
height,
data,
range,
domain,
scaleDays,
scaleTime,
totalSightings
})
}
}
@autobind
onClick (e) {
const { scaleTime, range } = this.state
const { jumpTo, expeditionID } = this.props
var y = e.nativeEvent.offsetY
jumpTo(new Date(scaleTime.invert(Math.max(range[0] + 1, Math.min(range[1] - 1, y)))), expeditionID)
}
@autobind
onMouseMove (e) {
const { range, padding } = this.state
const cursorY = Math.max(range[0], Math.min(range[1], e.nativeEvent.offsetY)) - padding[1]
this.setState({
...this.state,
cursorY,
mouseOver: true
})
}
@autobind
onMouseOut (e) {
const { expedition } = this.props
const { scaleTime } = this.state
const cursorY = (scaleTime(expedition.currentDate.getTime()) - 8)
this.setState({
...this.state,
cursorY,
mouseOver: false
})
}
render () {
const { expedition } = this.props
if (!expedition) return <svg id="timeline"></svg>
const { width, height, data, x, range, scaleDays, cursorY, totalSightings, padding } = this.state
const days = data.map((d, i) => {
return <circle cx={x} cy={scaleDays(i)} r={3} key={i} fill="white"/>
})
return (
<svg
id="timeline"
className={location.pathname === '/about' || location.pathname === '/' ? 'invisible' : 'visible'}
style={{height: height + 'px'}}
onMouseOut={this.onMouseOut}
onMouseMove={this.onMouseMove}
onClick={this.onClick}>
<filter id="dropshadow" height="120%">
<feGaussianBlur in="SourceAlpha" stdDeviation="3"/>
<feOffset dx="2" dy="0" result="offsetblur"/>
<feMerge>
<feMergeNode/>
<feMergeNode in="SourceGraphic"/>
</feMerge>
</filter>
{/*<g transform={'translate(' + 0 + ',' + padding[1] + ')'} style={{pointerEvents: 'none'}}>
<SightingGraph sightings={totalSightings} width={width} height={height - padding[1] * 2}/>
</g>*/}
<line x1={x} x2={x} y1={range[0]} y2={range[1]} style={{stroke: 'white'}}/>
<g>{ days }</g>
<g transform={'translate(' + (x - 20) + ',' + cursorY + ')'} style={{pointerEvents: 'none'}}>
<path fill="#F9D144" d="M8,0c5,0,12,8,12,8s-7,8-12,8c-4.4,0-8-3.6-8-8C0,3.6,3.6,0,8,0z" style={{filter: 'url(#dropshadow)'}}/>
<circle fill="#1F1426" cx="7.9" cy="7.8" r="3"/>
</g>
</svg>
)
}
}
Timeline.propTypes = {
expedition: PropTypes.object,
jumpTo: PropTypes.func.isRequired,
expeditionID: PropTypes.string
}
export default Timeline
| {
"content_hash": "3fee18f7c4419d641c83655fc4f48642",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 136,
"avg_line_length": 29.41780821917808,
"alnum_prop": 0.5662398137369034,
"repo_name": "O-C-R/intotheokavango",
"id": "5c259917422dc5c032865f616c2280475bdeac35",
"size": "4295",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "static/js/components/Timeline.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "80160"
},
{
"name": "GLSL",
"bytes": "3892"
},
{
"name": "HTML",
"bytes": "189733"
},
{
"name": "JavaScript",
"bytes": "377069"
},
{
"name": "Python",
"bytes": "126328"
},
{
"name": "Shell",
"bytes": "12804"
}
],
"symlink_target": ""
} |
package org.kie.server.integrationtests.shared;
import org.kie.server.integrationtests.shared.basetests.KieServerBaseIntegrationTest;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import org.drools.compiler.kie.builder.impl.KieServicesImpl;
import org.jboss.resteasy.plugins.server.tjws.TJWSEmbeddedJaxrsServer;
import org.kie.api.KieServices;
import org.kie.server.api.KieServerConstants;
import org.kie.server.api.KieServerEnvironment;
import org.kie.server.integrationtests.config.TestConfig;
import org.kie.server.remote.rest.common.resource.KieServerRestImpl;
import org.kie.server.services.api.KieServerExtension;
import org.kie.server.services.api.SupportedTransports;
import org.kie.server.services.impl.KieServerImpl;
public class KieServerExecutor {
protected TJWSEmbeddedJaxrsServer server;
// Need to hold kie server instance because we need to manually handle startup/shutdown behavior defined in
// context listener org.kie.server.services.Bootstrap. Embedded server doesn't support ServletContextListeners.
private KieServerImpl kieServer;
private static SimpleDateFormat serverIdSuffixDateFormat = new SimpleDateFormat("yyyy-MM-DD-HHmmss_SSS");
public void startKieServer() {
if (server != null) {
throw new RuntimeException("Kie execution server is already created!");
}
registerKieServerId();
setKieServerProperties();
server = new TJWSEmbeddedJaxrsServer();
server.setPort(TestConfig.getKieServerAllocatedPort());
server.start();
addServerSingletonResources();
}
protected void setKieServerProperties() {
System.setProperty(KieServerConstants.CFG_BYPASS_AUTH_USER, "true");
System.setProperty(KieServerConstants.CFG_HT_CALLBACK, "custom");
System.setProperty(KieServerConstants.CFG_HT_CALLBACK_CLASS, "org.kie.server.integrationtests.jbpm.util.FixedUserGroupCallbackImpl");
System.setProperty(KieServerConstants.CFG_PERSISTANCE_DS, "jdbc/jbpm-ds");
System.setProperty(KieServerConstants.CFG_PERSISTANCE_TM, "org.hibernate.service.jta.platform.internal.JBossStandAloneJtaPlatform");
System.setProperty(KieServerConstants.KIE_SERVER_CONTROLLER, TestConfig.getControllerHttpUrl());
System.setProperty(KieServerConstants.CFG_KIE_CONTROLLER_USER, TestConfig.getUsername());
System.setProperty(KieServerConstants.CFG_KIE_CONTROLLER_PASSWORD, TestConfig.getPassword());
System.setProperty(KieServerConstants.KIE_SERVER_LOCATION, TestConfig.getEmbeddedKieServerHttpUrl());
System.setProperty(KieServerConstants.KIE_SERVER_STATE_REPO, "./target");
// kie server policy settings
System.setProperty(KieServerConstants.KIE_SERVER_ACTIVATE_POLICIES, "KeepLatestOnly");
System.setProperty("policy.klo.interval", "5000");
}
private void registerKieServerId() {
if (KieServerEnvironment.getServerId() == null) {
KieServerEnvironment.setServerId(KieServerBaseIntegrationTest.class.getSimpleName() + "@" + serverIdSuffixDateFormat.format(new Date()));
KieServerEnvironment.setServerName("KieServer");
}
}
private void addServerSingletonResources() {
kieServer = new KieServerImpl();
server.getDeployment().getRegistry().addSingletonResource(new KieServerRestImpl(kieServer));
List<KieServerExtension> extensions = kieServer.getServerExtensions();
for (KieServerExtension extension : extensions) {
List<Object> components = extension.getAppComponents(SupportedTransports.REST);
for (Object component : components) {
server.getDeployment().getRegistry().addSingletonResource(component);
}
}
}
public void stopKieServer() {
if (server == null) {
throw new RuntimeException("Kie execution server is already stopped!");
}
kieServer.destroy();
// The KieServices instance that was seen by the kieserver, will never be seen again at this point
((KieServicesImpl) KieServices.Factory.get()).nullAllContainerIds();
server.stop();
server = null;
}
}
| {
"content_hash": "99e5ab0d83b4a1d82851ab8d788b9472",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 149,
"avg_line_length": 46.59340659340659,
"alnum_prop": 0.7337264150943397,
"repo_name": "reynoldsm88/droolsjbpm-integration",
"id": "8eef4460ac6ad5d2ba1e2c67839fb14b60386263",
"size": "4841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kie-server-parent/kie-server-tests/kie-server-integ-tests-common/src/main/java/org/kie/server/integrationtests/shared/KieServerExecutor.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2569"
},
{
"name": "CSS",
"bytes": "7748"
},
{
"name": "FreeMarker",
"bytes": "20776"
},
{
"name": "HTML",
"bytes": "2654"
},
{
"name": "Java",
"bytes": "6923430"
},
{
"name": "JavaScript",
"bytes": "32051"
},
{
"name": "Shell",
"bytes": "3525"
},
{
"name": "Visual Basic",
"bytes": "4658"
},
{
"name": "XSLT",
"bytes": "1094"
}
],
"symlink_target": ""
} |
package net.anotheria.moskito.core.predefined;
import net.anotheria.moskito.core.stats.Interval;
import net.anotheria.moskito.core.stats.TimeUnit;
import java.util.ArrayList;
import java.util.List;
/**
* Stats for virtual pools, i.e. Heap and Non-Heap memory, which consists of multiple underlying pools.
* The VirtualMemoryPoolStats do not measure themself, they just aggregate values provided by JMX Beans.
* @author lrosenberg
*
*/
public class VirtualMemoryPoolStats extends AbstractMemoryPoolStats implements IMemoryPoolStats{
/**
* Underlying 'real' stats.
*/
private List<MemoryPoolStats> realStats;
/**
* Creates a new VirtualMemoryPoolStats object.
*/
public VirtualMemoryPoolStats(){
this("unnamed", Constants.getDefaultIntervals());
}
/**
* Creates a new VirtualMemoryPoolStats object with given name.
*/
public VirtualMemoryPoolStats(String aName){
this(aName, Constants.getDefaultIntervals());
}
/**
* Creates a new VirtualMemoryPoolStats object with given name and special intervals.
*/
public VirtualMemoryPoolStats(String aName, Interval[] selectedIntervals){
super(aName);
realStats = new ArrayList<MemoryPoolStats>();
}
/**
* Adds an underlying 'real' stats object.
* @param stats
*/
public void addStats(MemoryPoolStats stats){
realStats.add(stats);
}
@Override public String toStatsString(String intervalName, TimeUnit timeUnit) {
StringBuilder b = new StringBuilder();
b.append(getName()).append(' ');
b.append(" INIT: ").append(getInit(intervalName));
b.append(" MIN USED: ").append(getMinUsed(intervalName));
b.append(" USED: ").append(getUsed(intervalName));
b.append(" MAX USED: ").append(getMaxUsed(intervalName));
b.append(" MIN COMMITED: ").append(getMinCommited(intervalName));
b.append(" COMMITED: ").append(getCommited(intervalName));
b.append(" MAX COMMITED: ").append(getMaxCommited(intervalName));
b.append(" MAX: ").append(getMax(intervalName));
return b.toString();
}
@Override public long getInit(String intervalName){
long ret = 0L;
for (MemoryPoolStats s : realStats){
ret += s.getInit(intervalName);
}
return ret;
}
@Override public long getUsed(String intervalName){
long ret = 0L;
for (MemoryPoolStats s : realStats){
ret += s.getUsed(intervalName);
}
return ret;
}
@Override public long getMinUsed(String intervalName){
long ret = 0L;
for (MemoryPoolStats s : realStats){
ret += s.getMinUsed(intervalName);
}
return ret;
}
@Override public long getMaxUsed(String intervalName){
long ret = 0L;
for (MemoryPoolStats s : realStats){
ret += s.getMaxUsed(intervalName);
}
return ret;
}
@Override public long getCommited(String intervalName){
long ret = 0L;
for (MemoryPoolStats s : realStats){
ret += s.getCommited(intervalName);
}
return ret;
}
@Override public long getMinCommited(String intervalName){
long ret = 0L;
for (MemoryPoolStats s : realStats){
ret += s.getMinCommited(intervalName);
}
return ret;
}
@Override public long getMaxCommited(String intervalName){
long ret = 0L;
for (MemoryPoolStats s : realStats){
ret += s.getMaxCommited(intervalName);
}
return ret;
}
@Override public long getMax(String intervalName){
long ret = 0L;
for (MemoryPoolStats s : realStats){
ret += s.getMax(intervalName);
}
return ret;
}
@Override public long getFree(String intervalName){
return getCommited(intervalName) - getUsed(intervalName);
}
}
| {
"content_hash": "af559c1923f53c835aace98d69b04504",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 104,
"avg_line_length": 25.727941176470587,
"alnum_prop": 0.7144898542440697,
"repo_name": "vkazhdan/moskito",
"id": "67bfa4f58600769307cd58fbae05551eed7946d1",
"size": "3499",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "moskito-core/src/main/java/net/anotheria/moskito/core/predefined/VirtualMemoryPoolStats.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "230335"
},
{
"name": "HTML",
"bytes": "2654358"
},
{
"name": "Java",
"bytes": "1832876"
},
{
"name": "JavaScript",
"bytes": "435818"
},
{
"name": "Shell",
"bytes": "289"
}
],
"symlink_target": ""
} |
namespace allocgc { namespace details {
namespace threads {
class world_snapshot;
}
class gc_heap : public utils::noncopyable, public utils::nonmovable
{
typedef allocators::gc_core_allocator core_alloc_t;
typedef allocators::gc_so_allocator so_alloc_t;
typedef allocators::gc_lo_allocator lo_alloc_t;
public:
typedef so_alloc_t tlab;
explicit gc_heap(gc_launcher* launcher);
gc_alloc::response allocate(const gc_alloc::request& rqst);
tlab* allocate_tlab(std::thread::id thrd_id);
gc_collect_stat collect(
const threads::world_snapshot& snapshot,
size_t threads_available,
collectors::static_root_set* static_roots
);
gc_memstat stats();
void shrink();
void set_limit(size_t limit);
private:
typedef std::unordered_map<std::thread::id, so_alloc_t> tlab_map_t;
core_alloc_t m_core_alloc;
lo_alloc_t m_loa;
tlab_map_t m_tlab_map;
std::mutex m_mutex;
};
}}
#endif //ALLOCGC_HEAP_H
| {
"content_hash": "ce62588142d231a052600e5e90a316d7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 23.790697674418606,
"alnum_prop": 0.6480938416422287,
"repo_name": "eucpp/allocgc",
"id": "e325b7d6d46c1d5a3ddaf9c381219dc2ef95ed16",
"size": "1835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "allocgc/include/liballocgc/details/collectors/gc_heap.hpp",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "337"
},
{
"name": "C#",
"bytes": "3785"
},
{
"name": "C++",
"bytes": "570114"
},
{
"name": "CMake",
"bytes": "21201"
},
{
"name": "Makefile",
"bytes": "2251"
},
{
"name": "Python",
"bytes": "2978"
}
],
"symlink_target": ""
} |
PhoneNumberKit is using metadata from Google's libphonenumber.
The metadata exists in PhoneNumberMetadata.json and the original XML can be found at [Original/PhoneNumberMetadata.xml](https://github.com/marmelroy/PhoneNumberKit/blob/master/PhoneNumberKit/Resources/Original/PhoneNumberMetadata.xml)
## Updating the metadata
We try to keep the metadata of PhoneNumberKit up to date and making sure you are running on the latest release will be sufficient for most apps
However, you can also update the metadata youself by following these steps:
1. Download a newer version of the XML metadata file from [libPhoneNumber](https://github.com/googlei18n/libphonenumber/blob/master/resources/)
2. Replace the XML file in your PhoneNumberKit projects.
3. Run
```bash
./update.sh
```
You will need a python library called 'xmljson' installed. You can install it with pip
```bash
pip install xmljson
``` | {
"content_hash": "8691f9d07f2c1edfe0be0eff30e721f4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 234,
"avg_line_length": 45.05,
"alnum_prop": 0.8013318534961155,
"repo_name": "marmelroy/PhoneNumberKit",
"id": "98b4c817a1640f5e15bd3838c0415b89c2c20602",
"size": "912",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PhoneNumberKit/Resources/README.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "538"
},
{
"name": "Ruby",
"bytes": "2230"
},
{
"name": "Shell",
"bytes": "3669"
},
{
"name": "Swift",
"bytes": "231671"
}
],
"symlink_target": ""
} |
"""Treadmill Tenant REST api.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import flask
import flask_restplus as restplus
from flask_restplus import fields
# Disable E0611: No 'name' in module
from treadmill import webutils # pylint: disable=E0611
# Old style classes, no init method.
#
# pylint: disable=W0232
def init(api, cors, impl):
"""Configures REST handlers for tenant resource."""
namespace = webutils.namespace(
api, __name__, 'Tenant REST operations'
)
model = {
# Tenant return is inconsistent, for list it uses "tenant" and GET, it
# uses _id, now need both in here.
'_id': fields.String(description='Tenant name'),
'tenant': fields.String(description='Tenant name'),
'systems': fields.List(
fields.Integer(description='System ID', required=True),
min_items=1)
}
tenant_model = api.model(
'Tenant', model
)
@namespace.route('/',)
class _TenantList(restplus.Resource):
"""Treadmill Tenant resource"""
@webutils.get_api(api, cors,
marshal=api.marshal_list_with,
resp_model=tenant_model)
def get(self):
"""Returns list of configured tenants."""
return impl.list()
@namespace.route('/<tenant_id>')
@api.doc(params={'tenant_id': 'Tenant ID/name'})
class _TenantResource(restplus.Resource):
"""Treadmill Tenant resource."""
@webutils.get_api(api, cors,
marshal=api.marshal_with,
resp_model=tenant_model)
def get(self, tenant_id):
"""Return Treadmill tenant configuration."""
return impl.get(tenant_id)
@webutils.post_api(api, cors,
req_model=tenant_model,
resp_model=tenant_model)
def post(self, tenant_id):
"""Creates Treadmill tenant."""
return impl.create(tenant_id, flask.request.json)
@webutils.put_api(api, cors,
req_model=tenant_model,
resp_model=tenant_model)
def put(self, tenant_id):
"""Updates Treadmill tenant configuration."""
return impl.update(tenant_id, flask.request.json)
@webutils.delete_api(api, cors)
def delete(self, tenant_id):
"""Deletes Treadmill tenant."""
return impl.delete(tenant_id)
| {
"content_hash": "d5d87ed3a80f5c5418046f9594914e3d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 78,
"avg_line_length": 32.074074074074076,
"alnum_prop": 0.5800615858352579,
"repo_name": "captiosus/treadmill",
"id": "fae63938c6752ccf991e7912f04f19cd62f01fe1",
"size": "2598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treadmill/rest/api/tenant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "570"
},
{
"name": "Python",
"bytes": "2598791"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "58099"
}
],
"symlink_target": ""
} |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>SFML - Simple and Fast Multimedia Library</title>
<meta http-equiv="Content-Type" content="text/html;"/>
<meta charset="utf-8"/>
<!--<link rel='stylesheet' type='text/css' href="http://fonts.googleapis.com/css?family=Ubuntu:400,700,400italic"/>-->
<link rel="stylesheet" type="text/css" href="doxygen.css" title="default" media="screen,print" />
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
</head>
<body>
<div id="banner-container">
<div id="banner">
<span id="sfml">SFML 2.3.0</span>
</div>
</div>
<div id="content">
<!-- Generated by Doxygen 1.8.8 -->
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main Page</span></a></li>
<li><a href="pages.html"><span>Related Pages</span></a></li>
<li><a href="modules.html"><span>Modules</span></a></li>
<li class="current"><a href="annotated.html"><span>Classes</span></a></li>
<li><a href="files.html"><span>Files</span></a></li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="annotated.html"><span>Class List</span></a></li>
<li><a href="classes.html"><span>Class Index</span></a></li>
<li><a href="hierarchy.html"><span>Class Hierarchy</span></a></li>
<li><a href="functions.html"><span>Class Members</span></a></li>
</ul>
</div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><b>sf</b></li><li class="navelem"><a class="el" href="classsf_1_1GlResource.html">GlResource</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="summary">
<a href="#pro-methods">Protected Member Functions</a> |
<a href="#pro-static-methods">Static Protected Member Functions</a> |
<a href="classsf_1_1GlResource-members.html">List of all members</a> </div>
<div class="headertitle">
<div class="title">sf::GlResource Class Reference<div class="ingroups"><a class="el" href="group__window.html">Window module</a></div></div> </div>
</div><!--header-->
<div class="contents">
<p>Base class for classes that require an OpenGL context.
<a href="classsf_1_1GlResource.html#details">More...</a></p>
<p><code>#include <<a class="el" href="GlResource_8hpp_source.html">GlResource.hpp</a>></code></p>
<div class="dynheader">
Inheritance diagram for sf::GlResource:</div>
<div class="dyncontent">
<div class="center">
<img src="classsf_1_1GlResource.png" usemap="#sf::GlResource_map" alt=""/>
<map id="sf::GlResource_map" name="sf::GlResource_map">
<area href="classsf_1_1Context.html" title="Class holding a valid drawing context. " alt="sf::Context" shape="rect" coords="0,56,113,80"/>
<area href="classsf_1_1Shader.html" title="Shader class (vertex and fragment) " alt="sf::Shader" shape="rect" coords="123,56,236,80"/>
<area href="classsf_1_1Texture.html" title="Image living on the graphics card that can be used for drawing. " alt="sf::Texture" shape="rect" coords="246,56,359,80"/>
<area href="classsf_1_1Window.html" title="Window that serves as a target for OpenGL rendering. " alt="sf::Window" shape="rect" coords="369,56,482,80"/>
<area href="classsf_1_1RenderWindow.html" title="Window that can serve as a target for 2D drawing. " alt="sf::RenderWindow" shape="rect" coords="369,112,482,136"/>
</map>
</div></div>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pro-methods"></a>
Protected Member Functions</h2></td></tr>
<tr class="memitem:ad8fb7a0674f0f77e530dacc2a3b0dc6a"><td class="memItemLeft" align="right" valign="top"> </td><td class="memItemRight" valign="bottom"><a class="el" href="classsf_1_1GlResource.html#ad8fb7a0674f0f77e530dacc2a3b0dc6a">GlResource</a> ()</td></tr>
<tr class="memdesc:ad8fb7a0674f0f77e530dacc2a3b0dc6a"><td class="mdescLeft"> </td><td class="mdescRight">Default constructor. <a href="#ad8fb7a0674f0f77e530dacc2a3b0dc6a">More...</a><br /></td></tr>
<tr class="separator:ad8fb7a0674f0f77e530dacc2a3b0dc6a"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ab99035b67052331d1e8cf67abd93de98"><td class="memItemLeft" align="right" valign="top"> </td><td class="memItemRight" valign="bottom"><a class="el" href="classsf_1_1GlResource.html#ab99035b67052331d1e8cf67abd93de98">~GlResource</a> ()</td></tr>
<tr class="memdesc:ab99035b67052331d1e8cf67abd93de98"><td class="mdescLeft"> </td><td class="mdescRight">Destructor. <a href="#ab99035b67052331d1e8cf67abd93de98">More...</a><br /></td></tr>
<tr class="separator:ab99035b67052331d1e8cf67abd93de98"><td class="memSeparator" colspan="2"> </td></tr>
</table><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pro-static-methods"></a>
Static Protected Member Functions</h2></td></tr>
<tr class="memitem:ae0efa7935241644608ca32ba47b22a33"><td class="memItemLeft" align="right" valign="top">static void </td><td class="memItemRight" valign="bottom"><a class="el" href="classsf_1_1GlResource.html#ae0efa7935241644608ca32ba47b22a33">ensureGlContext</a> ()</td></tr>
<tr class="memdesc:ae0efa7935241644608ca32ba47b22a33"><td class="mdescLeft"> </td><td class="mdescRight">Make sure that a valid OpenGL context exists in the current thread. <a href="#ae0efa7935241644608ca32ba47b22a33">More...</a><br /></td></tr>
<tr class="separator:ae0efa7935241644608ca32ba47b22a33"><td class="memSeparator" colspan="2"> </td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><p>Base class for classes that require an OpenGL context. </p>
<p>This class is for internal use only, it must be the base of every class that requires a valid OpenGL context in order to work.</p>
<p>Definition at line <a class="el" href="GlResource_8hpp_source.html#l00040">40</a> of file <a class="el" href="GlResource_8hpp_source.html">GlResource.hpp</a>.</p>
</div><h2 class="groupheader">Constructor & Destructor Documentation</h2>
<a class="anchor" id="ad8fb7a0674f0f77e530dacc2a3b0dc6a"></a>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname">sf::GlResource::GlResource </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">protected</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
<p>Default constructor. </p>
</div>
</div>
<a class="anchor" id="ab99035b67052331d1e8cf67abd93de98"></a>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname">sf::GlResource::~GlResource </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">protected</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
<p>Destructor. </p>
</div>
</div>
<h2 class="groupheader">Member Function Documentation</h2>
<a class="anchor" id="ae0efa7935241644608ca32ba47b22a33"></a>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname">static void sf::GlResource::ensureGlContext </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">static</span><span class="mlabel">protected</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
<p>Make sure that a valid OpenGL context exists in the current thread. </p>
</div>
</div>
<hr/>The documentation for this class was generated from the following file:<ul>
<li><a class="el" href="GlResource_8hpp_source.html">GlResource.hpp</a></li>
</ul>
</div><!-- contents -->
</div>
<div id="footer-container">
<div id="footer">
SFML is licensed under the terms and conditions of the <a href="http://www.sfml-dev.org/license.php">zlib/png license</a>.<br>
Copyright © Laurent Gomila ::
Documentation generated by <a href="http://www.doxygen.org/" title="doxygen website">doxygen</a> ::
</div>
</div>
</body>
</html>
| {
"content_hash": "07f3c0aa73b8f547706e6320bff2db70",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 282,
"avg_line_length": 49.705555555555556,
"alnum_prop": 0.6549681457471779,
"repo_name": "zulfikar2/HeroCity",
"id": "06c5a37acd02d822a738348512a968d67c664afe",
"size": "8947",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "SFML-2.3/doc/html/classsf_1_1GlResource.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "916006"
},
{
"name": "CMake",
"bytes": "17563"
},
{
"name": "CSS",
"bytes": "25894"
},
{
"name": "GLSL",
"bytes": "3928"
},
{
"name": "HTML",
"bytes": "6408850"
},
{
"name": "JavaScript",
"bytes": "3422"
}
],
"symlink_target": ""
} |
import time
def main(request, response):
response.add_required_headers = False # Don't implicitly add HTTP headers
response.writer.write_status(200)
response.writer.write_header("Content-Type", "text/html")
response.writer.end_headers()
response.writer.write(b'<!DOCTYPE html><script src="script.py?uuid=%s&character=ζ"></script>' % request.GET[b"uuid"]);
time.sleep(0.2)
response.writer.write(b'<meta charset="windows-1251"><p>Test: \xE6</p>');
| {
"content_hash": "843461145b20685d89c347c9390b443a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 129,
"avg_line_length": 43.90909090909091,
"alnum_prop": 0.6977225672877847,
"repo_name": "nwjs/chromium.src",
"id": "c72c469ce3cd23580f88713cefcc2799fed8ad66",
"size": "483",
"binary": false,
"copies": "20",
"ref": "refs/heads/nw70",
"path": "third_party/blink/web_tests/external/wpt/html/syntax/speculative-charset/support/speculative-script.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
/*********************************************************************/
/* */
/* Nagoya Institute of Technology, Aichi, Japan, */
/* Nara Institute of Science and Technology, Nara, Japan */
/* and */
/* Carnegie Mellon University, Pittsburgh, PA */
/* Copyright (c) 2003-2004 */
/* All Rights Reserved. */
/* */
/* Permission is hereby granted, free of charge, to use and */
/* distribute this software and its documentation without */
/* restriction, including without limitation the rights to use, */
/* copy, modify, merge, publish, distribute, sublicense, and/or */
/* sell copies of this work, and to permit persons to whom this */
/* work is furnished to do so, subject to the following conditions: */
/* */
/* 1. The code must retain the above copyright notice, this list */
/* of conditions and the following disclaimer. */
/* 2. Any modifications must be clearly marked as such. */
/* 3. Original authors' names are not deleted. */
/* */
/* NAGOYA INSTITUTE OF TECHNOLOGY, NARA INSTITUTE OF SCIENCE AND */
/* TECHNOLOGY, CARNEGIE MELLON UNIVERSITY, AND THE CONTRIBUTORS TO */
/* THIS WORK DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, */
/* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, */
/* IN NO EVENT SHALL NAGOYA INSTITUTE OF TECHNOLOGY, NARA */
/* INSTITUTE OF SCIENCE AND TECHNOLOGY, CARNEGIE MELLON UNIVERSITY, */
/* NOR THE CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR */
/* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM */
/* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, */
/* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN */
/* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
/* */
/*********************************************************************/
/* */
/* Author : Hideki Banno */
/* */
/*-------------------------------------------------------------------*/
/* */
/* Slightly modified by Tomoki Toda ([email protected]) */
/* June 2004 */
/* Integrate as a Voice Conversion module */
/* */
/*-------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/*#include <unistd.h>*/
#include <sys/types.h>
#include <sys/stat.h>
/*#include <pwd.h>*/
#include <math.h>
#ifdef VARARGS
#include <varargs.h>
#else
#include <stdarg.h>
#endif
#include "../include/defs.h"
#include "../include/memory.h"
#include "../include/option.h"
#include "../include/fileio.h"
/*
* get basic name
*/
const char *getbasicname(const char *name)
{
int i;
int len;
const char *string;
if (name == NULL || *name == NUL)
return NULL;
len = strlen(name) - 1;
for (i = len;; i--) {
if (*(name + i) == '/') {
if (i == len)
return NULL;
string = name + i + 1;
break;
}
if (i <= 0) {
string = name;
break;
}
}
return string;
}
/*
* get allocated basic name
*/
char *xgetbasicname(const char *name)
{
const char *string;
char *basicname;
if (name == NULL || *name == NUL)
return NULL;
string = getbasicname(name);
basicname = strclone(string);
return basicname;
}
/*
* get directory name
*/
char *xgetdirname(const char *filename)
{
char *p;
char *dirname;
/* get directory name */
if ((dirname = xgetexactname(filename)) == NULL) {
dirname = strclone("/");
} else {
if ((p = strrchr(dirname, '/')) == NULL) {
xfree(dirname);
dirname = strclone("/");
} else {
*p = NUL;
}
}
return dirname;
}
/*
* get exact name
*/
/*char *xgetexactname(char *name)
{
int len;
char buf[MAX_PATHNAME];
char *home, *username;
char *exactname;
struct passwd *entry;
if (name == NULL || *name == NUL) {
getcwd(buf, MAX_PATHNAME);
strcat(buf, "/");
len = strlen(buf) + 1;
exactname = xalloc(len, char);
sprintf(exactname, "%s", buf);
} else if (*name == '~') {
name++;
if (*name == '/') {
name++;
if ((home = getenv("HOME")) == NULL) {
if ((entry = getpwuid(getuid())) == NULL) {
return NULL;
}
home = entry->pw_dir;
}
len = strlen(home) + strlen(name) + 2;
exactname = xalloc(len, char);
sprintf(exactname, "%s/%s", home, name);
} else {
strcpy(buf, name);
if ((username = strchr(buf, '/')) != NULL)
*username = NUL;
if ((entry = getpwnam(buf)) != NULL) {
home = entry->pw_dir;
} else {
return NULL;
}
while (*name != '/' && *name != NUL) {
name++;
}
name++;
len = strlen(home) + strlen(name) + 2;
exactname = xalloc(len, char);
sprintf(exactname, "%s/%s", home, name);
}
} else if (streq(name, "..")) {
getcwd(buf, MAX_PATHNAME);
if ((username = strrchr(buf, '/')) != NULL)
*username = NUL;
strcat(buf, "/");
len = strlen(buf) + 1;
exactname = xalloc(len, char);
sprintf(exactname, "%s", buf);
} else if (strveq(name, "../")) {
name += 2;
getcwd(buf, MAX_PATHNAME);
if ((username = strrchr(buf, '/')) != NULL)
*username = NUL;
strcat(buf, name);
len = strlen(buf) + 1;
exactname = xalloc(len, char);
sprintf(exactname, "%s", buf);
} else if (streq(name, ".")) {
getcwd(buf, MAX_PATHNAME);
strcat(buf, "/");
len = strlen(buf) + 1;
exactname = xalloc(len, char);
sprintf(exactname, "%s", buf);
} else if (strveq(name, "./")) {
name++;
getcwd(buf, MAX_PATHNAME);
strcat(buf, name);
len = strlen(buf) + 1;
exactname = xalloc(len, char);
sprintf(exactname, "%s", buf);
} else if (strveq(name, "/")) {
len = strlen(name) + 1;
exactname = xalloc(len, char);
sprintf(exactname, "%s", name);
} else {
getcwd(buf, MAX_PATHNAME);
strcat(buf, "/");
strcat(buf, name);
len = strlen(buf) + 1;
exactname = xalloc(len, char);
sprintf(exactname, "%s", buf);
}
return exactname;
}
*/
char *xgetexactname(const char *name)
{
int len;
char *exactname;
if (name == NULL || *name == NUL) {
exactname = xalloc(5, char);
sprintf(exactname, "gomi");
} else if (*name == '~') {
exactname = xalloc(5, char);
sprintf(exactname, "gomi");
} else if (streq(name, "..")) {
exactname = xalloc(5, char);
sprintf(exactname, "gomi");
} else if (streq(name, "../")) {
exactname = xalloc(5, char);
sprintf(exactname, "gomi");
} else if (streq(name, ".")) {
exactname = xalloc(5, char);
sprintf(exactname, "gomi");
} else if (streq(name, "./")) {
exactname = xalloc(5, char);
sprintf(exactname, "gomi");
} else if (streq(name, "/")) {
exactname = xalloc(5, char);
sprintf(exactname, "gomi");
} else {
len = strlen(name) + 1;
exactname = xalloc(len, char);
sprintf(exactname, "%s", name);
}
return exactname;
}
/*
* get option number of being equal to flag
*/
int flageq(char *flag, OPTIONS options)
{
int i;
for (i = 0;; i++) {
if (i >= options.num_option) {
return UNKNOWN;
}
if (streq(options.option[i].flag, flag) ||
streq(options.option[i].subflag, flag)) {
break;
}
}
return i;
}
/*
* convert option value
*/
int convoptvalue(char *value, OPTION *option)
{
int incr;
if (value == NULL || *value == NUL) {
incr = UNKNOWN;
} else if (eqtype(option->type, TYPE_INT)) {
*(int *)option->value = atoi(value);
incr = 1;
} else if (eqtype(option->type, TYPE_SHORT)) {
*(short *)option->value = (short)atol(value);
incr = 1;
} else if (eqtype(option->type, TYPE_LONG)) {
*(long *)option->value = atol(value);
incr = 1;
} else if (eqtype(option->type, TYPE_FLOAT)) {
*(float *)option->value = (float)atof(value);
incr = 1;
} else if (eqtype(option->type, TYPE_DOUBLE)) {
*(double *)option->value = (double)atof(value);
incr = 1;
} else if (eqtype(option->type, TYPE_STRING)) {
*(char **)option->value = strclone(value);
incr = 1;
} else if (eqtype(option->type, TYPE_STRING_S)) {
strcpy(*(char **)option->value, value);
incr = 1;
} else if (eqtype(option->type, TYPE_BOOLEAN)) {
*(XBOOL *)option->value = str2bool(value);
incr = 0;
} else {
fprintf(stderr, "unknown option data type\n");
incr = UNKNOWN;
}
/* set changed flag true */
if (incr != UNKNOWN) {
option->changed = XTRUE;
}
return incr;
}
/*
* set option value
*/
int setoptvalue(char *value, OPTION *option)
{
int incr;
if (eqtype(option->type, TYPE_BOOLEAN)) {
if (option->value == NULL || *(XBOOL *)option->value != XTRUE) {
*(XBOOL *)option->value = XTRUE;
} else {
*(XBOOL *)option->value = XFALSE;
}
incr = 0;
} else {
incr = convoptvalue(value, option);
}
return incr;
}
/*
* get option
*/
int getoption(int argc, char *argv[], int *ac, OPTIONS *options)
{
int i;
int oc;
int incr;
if (*ac >= argc)
usage(*options);
i = *ac;
if ((oc = flageq(argv[i], *options)) != UNKNOWN) {
if (i + 1 >= argc) {
incr = setoptvalue((char *)NULL, &(options->option[oc]));
} else {
incr = setoptvalue(argv[i + 1], &(options->option[oc]));
}
if (incr == UNKNOWN)
usage(*options);
} else {
return UNKNOWN;
}
if (incr != UNKNOWN)
*ac += incr;
return incr;
}
/*
* set changed flag
*/
void setchanged(int argc, char *argv[], OPTIONS *options)
{
int i;
int oc;
for (i = 1; i < argc; i++) {
if ((oc = flageq(argv[i], *options)) != UNKNOWN) {
options->option[oc].changed = XTRUE;
}
}
return;
}
/*
* get arg file
*/
int getargfile(char *filename, int *fc, OPTIONS *options)
{
int i;
int incr = 1;
if (!streq(filename, "-") && strveq(filename, "-")) {
printerr(*options, "unknown option %s", filename);
}
if (fc == NULL) {
i = 0;
} else {
i = *fc;
*fc += incr;
}
if (i >= options->num_file) {
printerr(*options, "too many files");
}
options->file[i].name = xgetexactname(filename);
return incr;
}
#ifdef VARARGS
/*
* print help
*/
void printhelp(va_alist)
va_dcl
{
va_list args;
char *format;
OPTIONS options;
char buf[MAX_LINE];
char message[MAX_LINE];
va_start(args);
options = va_arg(args, OPTIONS);
format = va_arg(args, char *);
vsprintf(message, format, args);
va_end(args);
sprintf(buf, "%s (%d)", options.progname, options.section);
fprintf(stderr, "%-24s- %s\n", buf, message);
usage(options);
}
/*
* print error
*/
void printerr(va_alist)
va_dcl
{
va_list args;
char *format;
OPTIONS options;
char message[MAX_LINE];
va_start(args);
options = va_arg(args, OPTIONS);
format = va_arg(args, char *);
vsprintf(message, format, args);
va_end(args);
fprintf(stderr, "%s: %s\n", options.progname, message);
usage(options);
}
#else
/*
* print help
*/
void printhelp(OPTIONS options, const char *format, ...)
{
va_list args;
char buf[MAX_LINE];
char message[MAX_LINE];
va_start(args, format);
vsprintf(message, format, args);
va_end(args);
sprintf(buf, "%s (%d)", options.progname, options.section);
fprintf(stderr, "%-24s- %s\n", buf, message);
usage(options);
}
/*
* print error
*/
void printerr(OPTIONS options, const char *format, ...)
{
va_list args;
char message[MAX_LINE];
va_start(args, format);
vsprintf(message, format, args);
va_end(args);
fprintf(stderr, "%s: %s\n", options.progname, message);
usage(options);
}
#endif
/*
* get option number of being equal to label
*/
int labeleq(char *label, OPTIONS *options)
{
int i;
for (i = 0;; i++) {
if (i >= options->num_option) {
i = UNKNOWN;
break;
}
if (!strnone(options->option[i].label) &&
streq(options->option[i].label, label)) {
break;
}
}
return i;
}
/*
* read setup file
*/
void readsetup(char *filename, OPTIONS *options)
{
int j;
char *exactname;
char name[MAX_LINE] = "";
char value[MAX_LINE] = "";
char line[MAX_MESSAGE] = "";
FILE *fp;
if (strnone(filename))
return;
exactname = xgetexactname(filename);
if (NULL == (fp = fopen(exactname, "r"))) {
return;
}
while (fgetline(line, fp) != EOF) {
/*sscanf(line, "%s %s", name, value);*/
sscanf_setup(line, name, value);
if (!strnone(value) && (j = labeleq(name, options)) >= 0) {
if (options->option[j].changed != XTRUE) {
if (eqtype(options->option[j].type, TYPE_BOOLEAN)) {
*(XBOOL *)options->option[j].value = str2bool(value);
} else if (eqtype(options->option[j].type, TYPE_INT)) {
*(int *)options->option[j].value = atoi(value);
} else if (eqtype(options->option[j].type, TYPE_SHORT)) {
*(short *)options->option[j].value = atoi(value);
} else if (eqtype(options->option[j].type, TYPE_LONG)) {
*(long *)options->option[j].value = atol(value);
} else if (eqtype(options->option[j].type, TYPE_FLOAT)) {
*(float *)options->option[j].value = (float)atof(value);
} else if (eqtype(options->option[j].type, TYPE_DOUBLE)) {
*(double *)options->option[j].value = atof(value);
} else if (eqtype(options->option[j].type, TYPE_STRING)) {
*(char **)options->option[j].value = strclone(value);
} else if (eqtype(options->option[j].type, TYPE_STRING_S)) {
strcpy(*(char **)options->option[j].value, value);
}
}
}
strcpy(name, "");
strcpy(value, "");
}
fclose(fp);
xfree(exactname);
return;
}
/*
* write setup file
*/
void writesetup(char *filename, OPTIONS options)
{
int i;
char *exactname;
FILE *fp;
if (strnone(filename))
return;
exactname = xgetexactname(filename);
if (NULL == (fp = fopen(exactname, "w"))) {
fprintf(stderr, "can't open file: %s\n", exactname);
return;
}
for (i = 0; i < options.num_option; i++) {
if (strnone(options.option[i].label))
continue;
fprintf(fp, "%s ", options.option[i].label);
if (eqtype(options.option[i].type, TYPE_BOOLEAN)) {
fprintf(fp, "%s", bool2str(options.option[i].value));
} else if (eqtype(options.option[i].type, TYPE_INT)) {
fprintf(fp, "%d", *(int *)options.option[i].value);
} else if (eqtype(options.option[i].type, TYPE_SHORT)) {
fprintf(fp, "%d", *(short *)options.option[i].value);
} else if (eqtype(options.option[i].type, TYPE_LONG)) {
fprintf(fp, "%ld", *(long *)options.option[i].value);
} else if (eqtype(options.option[i].type, TYPE_FLOAT)) {
fprintf(fp, "%f", *(float *)options.option[i].value);
} else if (eqtype(options.option[i].type, TYPE_DOUBLE)) {
fprintf(fp, "%f", *(double *)options.option[i].value);
} else if (eqtype(options.option[i].type, TYPE_STRING) ||
eqtype(options.option[i].type, TYPE_STRING_S)) {
fprintf(fp, "%s", *(char **)options.option[i].value);
}
fprintf(fp, "\n");
}
fclose(fp);
xfree(exactname);
return;
}
/*
* print usage
*/
void usage(OPTIONS options)
{
int i;
char buf[MAX_LINE] = "";
char label[MAX_LINE];
char filename[MAX_LINE];
for (i = 0; i < options.num_file; i++) {
sprintf(filename, " %s", options.file[i].label);
strcat(buf, filename);
}
if (options.num_option <= 0) {
fprintf(stderr, "usage: %s%s\n", options.progname, buf);
} else {
fprintf(stderr, "usage: %s [options...]%s\n", options.progname, buf);
fprintf(stderr, "options:\n");
}
for (i = 0; i < options.num_option; i++) {
if (strnone(options.option[i].flag) || strnone(options.option[i].desc))
continue;
if (!strnone(options.option[i].label)) {
strcpy(label, options.option[i].label);
} else {
strcpy(label, USAGE_LABEL_STRING);
}
if (eqtype(options.option[i].type, TYPE_BOOLEAN)) {
fprintf(stderr, "\t%-32s: %s\n",
options.option[i].flag, options.option[i].desc);
} else if (options.option[i].value != NULL) {
if (eqtype(options.option[i].type, TYPE_INT)) {
sprintf(buf, "%s %s[%d]", options.option[i].flag, label,
*(int *)options.option[i].value);
} else if (eqtype(options.option[i].type, TYPE_SHORT)) {
sprintf(buf, "%s %s[%d]", options.option[i].flag, label,
*(short *)options.option[i].value);
} else if (eqtype(options.option[i].type, TYPE_LONG)) {
sprintf(buf, "%s %s[%ld]", options.option[i].flag, label,
*(long *)options.option[i].value);
} else if (eqtype(options.option[i].type, TYPE_FLOAT) ||
eqtype(options.option[i].type, TYPE_DOUBLE)) {
int j;
char value[MAX_LINE];
if (eqtype(options.option[i].type, TYPE_FLOAT)) {
sprintf(value, "%f", *(float *)options.option[i].value);
} else {
sprintf(value, "%f", *(double *)options.option[i].value);
}
for (j = strlen(value) - 1; j >= 0; j--) {
if (value[j] == '.') {
value[j + 2] = NUL;
break;
} else if (value[j] != '0') {
value[j + 1] = NUL;
break;
}
}
sprintf(buf, "%s %s[%s]", options.option[i].flag, label,
value);
} else if (eqtype(options.option[i].type, TYPE_STRING) ||
eqtype(options.option[i].type, TYPE_STRING_S)) {
if (strnone(*(char **)options.option[i].value)) {
sprintf(buf, "%s %s", options.option[i].flag, label);
} else {
sprintf(buf, "%s %s[%s]", options.option[i].flag, label,
*(char **)options.option[i].value);
}
} else {
fprintf(stderr, "unknown option data type\n");
exit(1);
}
fprintf(stderr, "\t%-32s: %s\n", buf, options.option[i].desc);
} else {
sprintf(buf, "%s %s", options.option[i].flag, label);
fprintf(stderr, "\t%-32s: %s\n", buf, options.option[i].desc);
}
}
fprintf(stderr, "\n");
exit(1);
}
| {
"content_hash": "f9ce9c6a171c7d3b2167f2b9c7334923",
"timestamp": "",
"source": "github",
"line_count": 720,
"max_line_length": 75,
"avg_line_length": 25.9625,
"alnum_prop": 0.5402557106938426,
"repo_name": "saikrishnarallabandi/Festival-Speech-Synthesis-System",
"id": "a0342be3e82e79c99c33b50c45bf64be073d5177",
"size": "18693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "festvox/src/vc/src/sp/option.cc",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "1474"
},
{
"name": "C",
"bytes": "88349863"
},
{
"name": "C++",
"bytes": "12251382"
},
{
"name": "Emacs Lisp",
"bytes": "11103"
},
{
"name": "HTML",
"bytes": "259194"
},
{
"name": "Java",
"bytes": "295069"
},
{
"name": "Lex",
"bytes": "3451"
},
{
"name": "Makefile",
"bytes": "1712645"
},
{
"name": "Objective-C",
"bytes": "6472"
},
{
"name": "Perl",
"bytes": "281843"
},
{
"name": "Perl6",
"bytes": "3088"
},
{
"name": "Python",
"bytes": "219301"
},
{
"name": "Roff",
"bytes": "38391"
},
{
"name": "Scheme",
"bytes": "3145735"
},
{
"name": "Shell",
"bytes": "1202737"
},
{
"name": "Smarty",
"bytes": "6225"
},
{
"name": "Tcl",
"bytes": "1914"
},
{
"name": "TeX",
"bytes": "15010"
},
{
"name": "XSLT",
"bytes": "1518"
}
],
"symlink_target": ""
} |
layout: post
title: Implementing Clean Architecture - An Overview
description: A brief overview on Uncle Bobs Clean Architecture you will find here.
tags: [clean-architecture]
series: "Implementing Clean Architecture"
excerpt_separator: <!--more-->
lint-nowarn: JL0003
---
Let's briefly summarize what the Clean Architecture is ...
<img src="{{ site.url }}/assets/clean-architecture/Circles.png" class="dynimg" title="Layers of the Clean Architecture with Dependency Rule" alt="The Clean Architecture consists of multiple layers organized as circles while dependencies are only allowed from outer circles to inner circles. The inner circles contain the business logic. All details, devices and frameworks are in the outer circles."/>
<!--more-->
Wait! Let me rephrase: Let others briefly summarize what the Clean Architecture is about ...
(I really like the DRY principle so why should I repeat what others have put together nicely already ;-) )
- [Uncle Bob's original article on Clean Architecture](https://8thlight.com/blog/uncle-bob/2012/08/13/the-clean-architecture.html).
Of course this is the ultimate source. If you have not read it yet, do it right now!
- [Better Software Design with Clean Architecture](https://fullstackmark.com/post/11/better-software-design-with-clean-architecture).
Mark nicely introduces into Clean Architecture by showing code right away.
If you thought Uncle Bob's article was missing code then go through Mark's post.
- [clean-architecture-example](https://github.com/mattia-battiston/clean-architecture-example) is a GitHub
project which describes Clean Architecture "by example". It has great documentation and of course code :)
- [Clean Architecture: Standing on the shoulders of giants](https://herbertograca.com/2017/09/28/clean-architecture-standing-on-the-shoulders-of-giants/)
explains Clean Architecture by comparing it other approaches like Hexagonal and Onion Architectures.
Brief enough?
# Update 2018-04-08
- [A Clean Architecture in .Net](https://medium.com/@stephanhoekstra/clean-architecture-in-net-8eed6c224c50) is a great post about
realizing Clean Architecture in .Net in an Asp.Net environment - quite similar to my setup ;-) Definitively
worth reading if you want to start implementing Clean Architecture.
{% include series.html %}
| {
"content_hash": "91489217606ab1624c8059add0c03f4c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 401,
"avg_line_length": 60.94736842105263,
"alnum_prop": 0.7823834196891192,
"repo_name": "plainionist/plainionist.github.io",
"id": "3c9b14ea94450944453d74d803ff7c0ff24c50a4",
"size": "2320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_posts/2018-02-02-Implementing-Clean-Architecture-Overview.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "24"
},
{
"name": "CSS",
"bytes": "78379"
},
{
"name": "HTML",
"bytes": "28785"
}
],
"symlink_target": ""
} |
"use strict";
var package_json = require('./../package.json');
var nodemailer = require('nodemailer');
var request = require('request');
var Email = require('./email');
var SmtpapiHeaders = require('./smtpapi_headers');
module.exports = function(api_user, api_key) {
var self;
var send = function(email, callback) {
web(email, callback);
}
/*
* Sends an email via web and returns true if the
* message was sent successfully.
*
* @param {Email|Object} email An email object or a hash that has
* the values for the email to be sent.
* @param {Function} callback A function to call when the processing is done.
* This parameter is optional.
*/
var web = function(email, callback) {
self = this;
var callback = callback || function() { };
if (email.constructor !== Email) {
email = new Email(email);
}
_sendWeb(email, callback);
};
/*
* Sends an email via SMTP and returns true if the
* message was sent successfully.
*
* @param {Email|Object} email An email object or a hash that has
* the values for the email to be sent.
* @param {Function} callback A function to call when the processing is done.
* This parameter is optional.
*/
var smtp = function(email, callback) {
self = this;
var callback = callback || function() { };
if (email.constructor !== Email) {
email = new Email(email);
}
_sendSmtp(email, callback);
};
/*
* Psuedo-private methods
*/
var _sendWeb = function(email, callback) {
var req = request({
method : 'POST',
uri : "https://sendgrid.com/api/mail.send.json"
}, function(err, resp, body) {
if(err) return callback(err, null);
var json = JSON.parse(body);
if (json.message !== 'success') {
var error = 'sendgrid error';
if (json.errors) { error = json.errors.shift(); }
return callback(error, null);
}
return callback(null, json);
});
var form = email.toWebFormat();
form['api_user'] = api_user;
form['api_key'] = api_key;
var reqForm = req.form();
for (var field in form) {
var value = form[field];
if (value && value.filename) {
if (value.cid) {
reqForm.append("content["+value.filename+"]", value.cid);
}
reqForm.append("files["+value.filename+"]", value.content, {filename: value.filename, contentType: value.contentType});
} else {
try {
reqForm.append(field, value);
} catch(err) {}
}
}
}
var _sendSmtp = function(email, callback) {
// SMTP settings
var smtpTransport = nodemailer.createTransport(self.SMTP, {
service: 'SendGrid',
auth: {
user: api_user,
pass: api_key
}
});
smtpTransport.sendMail(email.toSmtpFormat(), function(err, response) {
smtpTransport.close();
if(err) { return callback(err.data, null);}
return callback(null, {'message': 'success'});
});
}
/*
* Expose public API calls
*/
return {
version : package_json.version,
SMTP : "SMTP",
Email : Email,
SmtpapiHeaders : SmtpapiHeaders,
api_user : api_user,
api_key : api_key,
web : web,
smtp : smtp,
send : send
};
}
| {
"content_hash": "c44ecb5914322174797253b98c81e3a9",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 127,
"avg_line_length": 27.899224806201552,
"alnum_prop": 0.5398721867185329,
"repo_name": "floored/lunch",
"id": "be04e963619e567a4f92562e97d393c9387e0240",
"size": "3599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "node_modules/sendgrid/lib/sendgrid.js",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3185"
},
{
"name": "JavaScript",
"bytes": "22505"
}
],
"symlink_target": ""
} |
namespace performance_manager {
class FrameNodeImplDescriber;
namespace execution_context {
class ExecutionContextAccess;
} // namespace execution_context
// Frame nodes for a tree structure that is described in
// components/performance_manager/public/graph/frame_node.h.
class FrameNodeImpl
: public PublicNodeImpl<FrameNodeImpl, FrameNode>,
public TypedNodeBase<FrameNodeImpl, FrameNode, FrameNodeObserver>,
public mojom::DocumentCoordinationUnit {
public:
static const char kDefaultPriorityReason[];
static constexpr NodeTypeEnum Type() { return NodeTypeEnum::kFrame; }
// Construct a frame node associated with a |process_node|, a |page_node| and
// optionally with a |parent_frame_node|. For the main frame of |page_node|
// the |parent_frame_node| parameter should be nullptr. |render_frame_id| is
// the routing id of the frame (from RenderFrameHost::GetRoutingID).
FrameNodeImpl(ProcessNodeImpl* process_node,
PageNodeImpl* page_node,
FrameNodeImpl* parent_frame_node,
int render_frame_id,
const blink::LocalFrameToken& frame_token,
content::BrowsingInstanceId browsing_instance_id,
content::SiteInstanceId site_instance_id);
FrameNodeImpl(const FrameNodeImpl&) = delete;
FrameNodeImpl& operator=(const FrameNodeImpl&) = delete;
~FrameNodeImpl() override;
void Bind(mojo::PendingReceiver<mojom::DocumentCoordinationUnit> receiver);
// mojom::DocumentCoordinationUnit implementation.
void SetNetworkAlmostIdle() override;
void SetLifecycleState(LifecycleState state) override;
void SetHasNonEmptyBeforeUnload(bool has_nonempty_beforeunload) override;
void SetIsAdFrame(bool is_ad_frame) override;
void SetHadFormInteraction() override;
void OnNonPersistentNotificationCreated() override;
void OnFirstContentfulPaint(
base::TimeDelta time_since_navigation_start) override;
const RenderFrameHostProxy& GetRenderFrameHostProxy() const override;
void OnWebMemoryMeasurementRequested(
mojom::WebMemoryMeasurement::Mode mode,
OnWebMemoryMeasurementRequestedCallback callback) override;
// Partial FrameNodbase::TimeDelta time_since_navigatione implementation:
bool IsMainFrame() const override;
// Getters for const properties.
FrameNodeImpl* parent_frame_node() const;
PageNodeImpl* page_node() const;
ProcessNodeImpl* process_node() const;
int render_frame_id() const;
const blink::LocalFrameToken& frame_token() const;
content::BrowsingInstanceId browsing_instance_id() const;
content::SiteInstanceId site_instance_id() const;
const RenderFrameHostProxy& render_frame_host_proxy() const;
// Getters for non-const properties. These are not thread safe.
const base::flat_set<FrameNodeImpl*>& child_frame_nodes() const;
const base::flat_set<PageNodeImpl*>& opened_page_nodes() const;
const base::flat_set<PageNodeImpl*>& embedded_page_nodes() const;
LifecycleState lifecycle_state() const;
bool has_nonempty_beforeunload() const;
const GURL& url() const;
bool is_current() const;
bool network_almost_idle() const;
bool is_ad_frame() const;
bool is_holding_weblock() const;
bool is_holding_indexeddb_lock() const;
const base::flat_set<WorkerNodeImpl*>& child_worker_nodes() const;
const PriorityAndReason& priority_and_reason() const;
bool had_form_interaction() const;
bool is_audible() const;
const absl::optional<gfx::Rect>& viewport_intersection() const;
Visibility visibility() const;
// Setters are not thread safe.
void SetIsCurrent(bool is_current);
void SetIsHoldingWebLock(bool is_holding_weblock);
void SetIsHoldingIndexedDBLock(bool is_holding_indexeddb_lock);
void SetIsAudible(bool is_audible);
void SetViewportIntersection(const gfx::Rect& viewport_intersection);
void SetVisibility(Visibility visibility);
// Invoked when a navigation is committed in the frame.
void OnNavigationCommitted(const GURL& url, bool same_document);
// Invoked by |worker_node| when it starts/stops being a child of this frame.
void AddChildWorker(WorkerNodeImpl* worker_node);
void RemoveChildWorker(WorkerNodeImpl* worker_node);
// Invoked to set the frame priority, and the reason behind it.
void SetPriorityAndReason(const PriorityAndReason& priority_and_reason);
base::WeakPtr<FrameNodeImpl> GetWeakPtrOnUIThread();
base::WeakPtr<FrameNodeImpl> GetWeakPtr();
void SeverPageRelationshipsAndMaybeReparentForTesting() {
SeverPageRelationshipsAndMaybeReparent();
}
// Implementation details below this point.
// Invoked by opened pages when this frame is set/cleared as their opener.
// See PageNodeImpl::(Set|Clear)OpenerFrameNode.
void AddOpenedPage(base::PassKey<PageNodeImpl> key, PageNodeImpl* page_node);
void RemoveOpenedPage(base::PassKey<PageNodeImpl> key,
PageNodeImpl* page_node);
// Invoked by embedded pages when this frame is set/cleared as their embedder.
// See PageNodeImpl::(Set|Clear)EmbedderFrameNodeAndEmbeddingType.
void AddEmbeddedPage(base::PassKey<PageNodeImpl> key,
PageNodeImpl* page_node);
void RemoveEmbeddedPage(base::PassKey<PageNodeImpl> key,
PageNodeImpl* page_node);
// Used by the ExecutionContextRegistry mechanism.
std::unique_ptr<NodeAttachedData>* GetExecutionContextStorage(
base::PassKey<execution_context::ExecutionContextAccess> key) {
return &execution_context_;
}
private:
friend class ExecutionContextPriorityAccess;
friend class FrameNodeImplDescriber;
friend class ProcessNodeImpl;
// Rest of FrameNode implementation. These are private so that users of the
// impl use the private getters rather than the public interface.
const FrameNode* GetParentFrameNode() const override;
const PageNode* GetPageNode() const override;
const ProcessNode* GetProcessNode() const override;
const blink::LocalFrameToken& GetFrameToken() const override;
content::BrowsingInstanceId GetBrowsingInstanceId() const override;
content::SiteInstanceId GetSiteInstanceId() const override;
bool VisitChildFrameNodes(const FrameNodeVisitor& visitor) const override;
const base::flat_set<const FrameNode*> GetChildFrameNodes() const override;
bool VisitOpenedPageNodes(const PageNodeVisitor& visitor) const override;
const base::flat_set<const PageNode*> GetOpenedPageNodes() const override;
bool VisitEmbeddedPageNodes(const PageNodeVisitor& visitor) const override;
const base::flat_set<const PageNode*> GetEmbeddedPageNodes() const override;
LifecycleState GetLifecycleState() const override;
bool HasNonemptyBeforeUnload() const override;
const GURL& GetURL() const override;
bool IsCurrent() const override;
bool GetNetworkAlmostIdle() const override;
bool IsAdFrame() const override;
bool IsHoldingWebLock() const override;
bool IsHoldingIndexedDBLock() const override;
const base::flat_set<const WorkerNode*> GetChildWorkerNodes() const override;
bool VisitChildDedicatedWorkers(
const WorkerNodeVisitor& visitor) const override;
const PriorityAndReason& GetPriorityAndReason() const override;
bool HadFormInteraction() const override;
bool IsAudible() const override;
const absl::optional<gfx::Rect>& GetViewportIntersection() const override;
Visibility GetVisibility() const override;
// Properties associated with a Document, which are reset when a
// different-document navigation is committed in the frame.
struct DocumentProperties {
DocumentProperties();
~DocumentProperties();
void Reset(FrameNodeImpl* frame_node, const GURL& url_in);
ObservedProperty::NotifiesOnlyOnChangesWithPreviousValue<
GURL,
const GURL&,
&FrameNodeObserver::OnURLChanged>
url;
bool has_nonempty_beforeunload = false;
// Network is considered almost idle when there are no more than 2 network
// connections.
ObservedProperty::NotifiesOnlyOnChanges<
bool,
&FrameNodeObserver::OnNetworkAlmostIdleChanged>
network_almost_idle{false};
// Indicates if a form in the frame has been interacted with.
ObservedProperty::NotifiesOnlyOnChanges<
bool,
&FrameNodeObserver::OnHadFormInteractionChanged>
had_form_interaction{false};
};
// Invoked by subframes on joining/leaving the graph.
void AddChildFrame(FrameNodeImpl* frame_node);
void RemoveChildFrame(FrameNodeImpl* frame_node);
// NodeBase:
void OnJoiningGraph() override;
void OnBeforeLeavingGraph() override;
void RemoveNodeAttachedData() override;
// Helper function to sever all opened/embedded page relationships. This is
// called before destroying the frame node in "OnBeforeLeavingGraph". Note
// that this will reparent embedded pages to this frame's parent so that
// tracking is maintained.
void SeverPageRelationshipsAndMaybeReparent();
// This is not quite the same as GetMainFrame, because there can be multiple
// main frames while the main frame is navigating. This explicitly walks up
// the tree to find the main frame that corresponds to this frame tree node,
// even if it is not current.
FrameNodeImpl* GetFrameTreeRoot() const;
bool HasFrameNodeInAncestors(FrameNodeImpl* frame_node) const;
bool HasFrameNodeInDescendants(FrameNodeImpl* frame_node) const;
bool HasFrameNodeInTree(FrameNodeImpl* frame_node) const;
// Returns the initial visibility of this frame. Should only be called when
// the frame node joins the graph.
Visibility GetInitialFrameVisibility() const;
mojo::Receiver<mojom::DocumentCoordinationUnit> receiver_{this};
const raw_ptr<FrameNodeImpl> parent_frame_node_;
const raw_ptr<PageNodeImpl> page_node_;
const raw_ptr<ProcessNodeImpl> process_node_;
// The routing id of the frame.
const int render_frame_id_;
// This is the unique token for this frame instance as per e.g.
// RenderFrameHost::GetFrameToken().
const blink::LocalFrameToken frame_token_;
// The unique ID of the BrowsingInstance this frame belongs to. Frames in the
// same BrowsingInstance are allowed to script each other at least
// asynchronously (if cross-site), and sometimes synchronously (if same-site,
// and thus same SiteInstance).
const content::BrowsingInstanceId browsing_instance_id_;
// The unique ID of the SiteInstance this frame belongs to. Frames in the
// same SiteInstance may sychronously script each other. Frames with the
// same |site_instance_id_| will also have the same |browsing_instance_id_|.
const content::SiteInstanceId site_instance_id_;
// A proxy object that lets the underlying RFH be safely dereferenced on the
// UI thread.
const RenderFrameHostProxy render_frame_host_proxy_;
base::flat_set<FrameNodeImpl*> child_frame_nodes_;
// The set of pages that have been opened by this frame.
base::flat_set<PageNodeImpl*> opened_page_nodes_;
// The set of pages that have been embedded by this frame.
base::flat_set<PageNodeImpl*> embedded_page_nodes_;
// Does *not* change when a navigation is committed.
ObservedProperty::NotifiesOnlyOnChanges<
LifecycleState,
&FrameNodeObserver::OnFrameLifecycleStateChanged>
lifecycle_state_{LifecycleState::kRunning};
ObservedProperty::
NotifiesOnlyOnChanges<bool, &FrameNodeObserver::OnIsAdFrameChanged>
is_ad_frame_{false};
// Locks held by a frame are tracked independently from navigation
// (specifically, a few tasks must run in the Web Lock and IndexedDB
// subsystems after a navigation for locks to be released).
ObservedProperty::NotifiesOnlyOnChanges<
bool,
&FrameNodeObserver::OnFrameIsHoldingWebLockChanged>
is_holding_weblock_{false};
ObservedProperty::NotifiesOnlyOnChanges<
bool,
&FrameNodeObserver::OnFrameIsHoldingIndexedDBLockChanged>
is_holding_indexeddb_lock_{false};
ObservedProperty::
NotifiesOnlyOnChanges<bool, &FrameNodeObserver::OnIsCurrentChanged>
is_current_{false};
// Properties associated with a Document, which are reset when a
// different-document navigation is committed in the frame.
//
// TODO(fdoray): Cleanup this once there is a 1:1 mapping between
// RenderFrameHost and Document https://crbug.com/936696.
DocumentProperties document_;
// The child workers of this frame.
base::flat_set<WorkerNodeImpl*> child_worker_nodes_;
// Frame priority information. Set via ExecutionContextPriorityDecorator.
ObservedProperty::NotifiesOnlyOnChangesWithPreviousValue<
PriorityAndReason,
const PriorityAndReason&,
&FrameNodeObserver::OnPriorityAndReasonChanged>
priority_and_reason_{PriorityAndReason(base::TaskPriority::LOWEST,
kDefaultPriorityReason)};
// Indicates if the frame is audible. This is tracked independently of a
// document, and if a document swap occurs the audio stream monitor machinery
// will keep this up to date.
ObservedProperty::
NotifiesOnlyOnChanges<bool, &FrameNodeObserver::OnIsAudibleChanged>
is_audible_{false};
// Tracks the intersection of this frame with the viewport.
//
// Note that the viewport intersection for the main frame is always invalid.
// This is because the main frame always occupies the entirety of the viewport
// so there is no point in tracking it. To avoid programming mistakes, it is
// forbidden to query this property for the main frame.
ObservedProperty::NotifiesOnlyOnChanges<
absl::optional<gfx::Rect>,
&FrameNodeObserver::OnViewportIntersectionChanged>
viewport_intersection_;
// Indicates if the frame is visible. This is initialized in
// FrameNodeImpl::OnJoiningGraph() and then maintained by
// FrameVisibilityDecorator.
ObservedProperty::NotifiesOnlyOnChangesWithPreviousValue<
Visibility,
Visibility,
&FrameNodeObserver::OnFrameVisibilityChanged>
visibility_{Visibility::kUnknown};
// Inline storage for ExecutionContext.
std::unique_ptr<NodeAttachedData> execution_context_;
base::WeakPtr<FrameNodeImpl> weak_this_;
base::WeakPtrFactory<FrameNodeImpl> weak_factory_
GUARDED_BY_CONTEXT(sequence_checker_){this};
};
} // namespace performance_manager
#endif // COMPONENTS_PERFORMANCE_MANAGER_GRAPH_FRAME_NODE_IMPL_H_
| {
"content_hash": "b6738f2c4910be2bbad370f296762722",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 80,
"avg_line_length": 42.523668639053255,
"alnum_prop": 0.751895915953524,
"repo_name": "scheib/chromium",
"id": "4f333b7ccd9c9987adf42a280a339b8c02f71bae",
"size": "15536",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "components/performance_manager/graph/frame_node_impl.h",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
/* $NetBSD: if_ieee1394.h,v 1.3 2000/11/20 12:12:19 onoe Exp $ */
/*
* Copyright (c) 2000 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Atsushi Onoe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _NET_IF_IEEE1394_H_
#define _NET_IF_IEEE1394_H_
/* hardware address information for arp / nd */
struct ieee1394_hwaddr {
u_int8_t iha_uid[8]; /* node unique ID */
u_int8_t iha_maxrec; /* max_rec in the config ROM */
u_int8_t iha_speed; /* min of link/PHY speed */
u_int8_t iha_offset[6]; /* unicast FIFO address */
};
/* pseudo header */
struct ieee1394_header {
u_int8_t ih_uid[8]; /* dst/src uid */
u_int8_t ih_maxrec; /* dst maxrec for tx */
u_int8_t ih_speed; /* speed */
u_int8_t ih_offset[6]; /* dst offset */
};
/* unfragment encapsulation header */
struct ieee1394_unfraghdr {
u_int16_t iuh_ft; /* fragment type == 0 */
u_int16_t iuh_etype; /* ether_type */
};
/* fragmented encapsulation header */
struct ieee1394_fraghdr {
u_int16_t ifh_ft_size; /* fragment type, data size-1 */
u_int16_t ifh_etype_off; /* etype for first fragment */
/* offset for subseq frag */
u_int16_t ifh_dgl; /* datagram label */
u_int16_t ifh_reserved;
};
#define IEEE1394_FT_SUBSEQ 0x8000
#define IEEE1394_FT_MORE 0x4000
#define IEEE1394MTU 1500
#define IEEE1394_GASP_LEN 8 /* GASP header for Stream */
#define IEEE1394_ADDR_LEN 8
#define IEEE1394_CRC_LEN 4
struct ieee1394_reass_pkt {
LIST_ENTRY(ieee1394_reass_pkt) rp_next;
struct mbuf *rp_m;
struct ieee1394_header rp_hdr;
u_int16_t rp_size;
u_int16_t rp_etype;
u_int16_t rp_off;
u_int16_t rp_dgl;
u_int16_t rp_len;
u_int16_t rp_ttl;
};
struct ieee1394_reassq {
LIST_ENTRY(ieee1394_reassq) rq_node;
LIST_HEAD(, ieee1394_reass_pkt) rq_pkt;
u_int8_t rq_uid[8];
};
struct ieee1394com {
struct ifnet ic_if;
struct ieee1394_hwaddr ic_hwaddr;
u_int16_t ic_dgl;
LIST_HEAD(, ieee1394_reassq) ic_reassq;
};
const char *ieee1394_sprintf(const u_int8_t *);
void ieee1394_ifattach(struct ifnet *, const struct ieee1394_hwaddr *);
void ieee1394_ifdetach(struct ifnet *);
int ieee1394_ioctl(struct ifnet *, u_long, caddr_t);
struct mbuf * ieee1394_fragment(struct ifnet *, struct mbuf *, int, u_int16_t);
void ieee1394_drain(struct ifnet *);
void ieee1394_watchdog(struct ifnet *);
#endif /* _NET_IF_IEEE1394_H_ */
| {
"content_hash": "96cda281e625b0e4cd826d47d93b5b91",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 35.66086956521739,
"alnum_prop": 0.7212874908558888,
"repo_name": "MarginC/kame",
"id": "800d4438088d4ce1d675688c76fa61008cace649",
"size": "4101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netbsd/sys/net/if_ieee1394.h",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arc",
"bytes": "7491"
},
{
"name": "Assembly",
"bytes": "14375563"
},
{
"name": "Awk",
"bytes": "313712"
},
{
"name": "Batchfile",
"bytes": "6819"
},
{
"name": "C",
"bytes": "356715789"
},
{
"name": "C++",
"bytes": "4231647"
},
{
"name": "DIGITAL Command Language",
"bytes": "11155"
},
{
"name": "Emacs Lisp",
"bytes": "790"
},
{
"name": "Forth",
"bytes": "253695"
},
{
"name": "GAP",
"bytes": "9964"
},
{
"name": "Groff",
"bytes": "2220485"
},
{
"name": "Lex",
"bytes": "168376"
},
{
"name": "Logos",
"bytes": "570213"
},
{
"name": "Makefile",
"bytes": "1778847"
},
{
"name": "Mathematica",
"bytes": "16549"
},
{
"name": "Objective-C",
"bytes": "529629"
},
{
"name": "PHP",
"bytes": "11283"
},
{
"name": "Perl",
"bytes": "151251"
},
{
"name": "Perl6",
"bytes": "2572"
},
{
"name": "Ruby",
"bytes": "7283"
},
{
"name": "Scheme",
"bytes": "76872"
},
{
"name": "Shell",
"bytes": "583253"
},
{
"name": "Stata",
"bytes": "408"
},
{
"name": "Yacc",
"bytes": "606054"
}
],
"symlink_target": ""
} |
/* ==Scripting Parameters==
Source Server Version : SQL Server 2016 (13.0.4422)
Source Database Engine Edition : Microsoft SQL Server Express Edition
Source Database Engine Type : Standalone SQL Server
Target Server Version : SQL Server 2016
Target Database Engine Edition : Microsoft SQL Server Express Edition
Target Database Engine Type : Standalone SQL Server
*/
USE [master]
GO
/****** Object: Database [imdb] Script Date: 8/18/2017 12:31:42 AM ******/
CREATE DATABASE [imdb]
CONTAINMENT = NONE
ON PRIMARY
( NAME = N'imdb', FILENAME = N'D:\RDSDBDATA\DATA\imdb.mdf' , SIZE = 8192KB , MAXSIZE = UNLIMITED, FILEGROWTH = 10%)
LOG ON
( NAME = N'imdb_log', FILENAME = N'D:\RDSDBDATA\DATA\imdb_log.ldf' , SIZE = 1024KB , MAXSIZE = 2048GB , FILEGROWTH = 10%)
GO
ALTER DATABASE [imdb] SET COMPATIBILITY_LEVEL = 130
GO
IF (1 = FULLTEXTSERVICEPROPERTY('IsFullTextInstalled'))
begin
EXEC [imdb].[dbo].[sp_fulltext_database] @action = 'enable'
end
GO
ALTER DATABASE [imdb] SET ANSI_NULL_DEFAULT OFF
GO
ALTER DATABASE [imdb] SET ANSI_NULLS OFF
GO
ALTER DATABASE [imdb] SET ANSI_PADDING OFF
GO
ALTER DATABASE [imdb] SET ANSI_WARNINGS OFF
GO
ALTER DATABASE [imdb] SET ARITHABORT OFF
GO
ALTER DATABASE [imdb] SET AUTO_CLOSE OFF
GO
ALTER DATABASE [imdb] SET AUTO_SHRINK OFF
GO
ALTER DATABASE [imdb] SET AUTO_UPDATE_STATISTICS ON
GO
ALTER DATABASE [imdb] SET CURSOR_CLOSE_ON_COMMIT OFF
GO
ALTER DATABASE [imdb] SET CURSOR_DEFAULT GLOBAL
GO
ALTER DATABASE [imdb] SET CONCAT_NULL_YIELDS_NULL OFF
GO
ALTER DATABASE [imdb] SET NUMERIC_ROUNDABORT OFF
GO
ALTER DATABASE [imdb] SET QUOTED_IDENTIFIER OFF
GO
ALTER DATABASE [imdb] SET RECURSIVE_TRIGGERS OFF
GO
ALTER DATABASE [imdb] SET DISABLE_BROKER
GO
ALTER DATABASE [imdb] SET AUTO_UPDATE_STATISTICS_ASYNC OFF
GO
ALTER DATABASE [imdb] SET DATE_CORRELATION_OPTIMIZATION OFF
GO
ALTER DATABASE [imdb] SET TRUSTWORTHY OFF
GO
ALTER DATABASE [imdb] SET ALLOW_SNAPSHOT_ISOLATION OFF
GO
ALTER DATABASE [imdb] SET PARAMETERIZATION SIMPLE
GO
ALTER DATABASE [imdb] SET READ_COMMITTED_SNAPSHOT OFF
GO
ALTER DATABASE [imdb] SET HONOR_BROKER_PRIORITY OFF
GO
ALTER DATABASE [imdb] SET RECOVERY FULL
GO
ALTER DATABASE [imdb] SET MULTI_USER
GO
ALTER DATABASE [imdb] SET PAGE_VERIFY CHECKSUM
GO
ALTER DATABASE [imdb] SET DB_CHAINING OFF
GO
ALTER DATABASE [imdb] SET FILESTREAM( NON_TRANSACTED_ACCESS = OFF )
GO
ALTER DATABASE [imdb] SET TARGET_RECOVERY_TIME = 60 SECONDS
GO
ALTER DATABASE [imdb] SET DELAYED_DURABILITY = DISABLED
GO
ALTER DATABASE [imdb] SET QUERY_STORE = OFF
GO
USE [imdb]
GO
ALTER DATABASE SCOPED CONFIGURATION SET LEGACY_CARDINALITY_ESTIMATION = OFF;
GO
ALTER DATABASE SCOPED CONFIGURATION FOR SECONDARY SET LEGACY_CARDINALITY_ESTIMATION = PRIMARY;
GO
ALTER DATABASE SCOPED CONFIGURATION SET MAXDOP = 0;
GO
ALTER DATABASE SCOPED CONFIGURATION FOR SECONDARY SET MAXDOP = PRIMARY;
GO
ALTER DATABASE SCOPED CONFIGURATION SET PARAMETER_SNIFFING = ON;
GO
ALTER DATABASE SCOPED CONFIGURATION FOR SECONDARY SET PARAMETER_SNIFFING = PRIMARY;
GO
ALTER DATABASE SCOPED CONFIGURATION SET QUERY_OPTIMIZER_HOTFIXES = OFF;
GO
ALTER DATABASE SCOPED CONFIGURATION FOR SECONDARY SET QUERY_OPTIMIZER_HOTFIXES = PRIMARY;
GO
ALTER DATABASE [imdb] SET READ_WRITE
GO
| {
"content_hash": "626dda9c46e42f30a84d73a42a3f5625",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 121,
"avg_line_length": 21.69078947368421,
"alnum_prop": 0.7543221110100091,
"repo_name": "MohamadElarabi/IMDBAPI",
"id": "98afa6542de5e73db34427e3f260af85c3f20661",
"size": "3297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ScriptDB-imdb.sql",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "7039"
}
],
"symlink_target": ""
} |
<!DOCTYPE HTML>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="canvas-display-p3.js"></script>
<script>
// Test that patterns created from images with different bit depths and color
// profiles into can be drawn into sRGB and Display P3 canvases, by reading
// pixels with getImageData() as sRGB and Display P3 values.
for (let [filename, expectedPixels] of Object.entries(imageTests)) {
for (let contextColorSpace of ["srgb", "display-p3"]) {
for (let imageDataColorSpace of ["srgb", "display-p3"]) {
async_test(function(t) {
let image = new Image();
image.onload = t.step_func_done(function() {
let canvas = document.createElement("canvas");
canvas.width = 4;
canvas.height = 4;
let ctx = canvas.getContext("2d", { colorSpace: contextColorSpace });
ctx.fillStyle = ctx.createPattern(image, "repeat");
ctx.fillRect(0, 0, 4, 4);
let imageData = ctx.getImageData(2, 2, 1, 1, { colorSpace: imageDataColorSpace });
let expected = expectedPixels[`${contextColorSpace} ${imageDataColorSpace}`];
assert_true(pixelsApproximatelyEqual(imageData.data, expected), `Actual pixel value ${[...imageData.data]} is approximately equal to ${expected}.`);
t.done();
});
image.src = `resources/${filename}`;
}, `${filename}, Context ${contextColorSpace}, ImageData ${imageDataColorSpace}`);
}
}
}
</script>
| {
"content_hash": "34d0afe1cb758ad2983d06f29653aa0e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 168,
"avg_line_length": 45.37837837837838,
"alnum_prop": 0.5902322811197142,
"repo_name": "nwjs/chromium.src",
"id": "75a8392c0274c2c19a763805e23f05dc18f8825f",
"size": "1679",
"binary": false,
"copies": "21",
"ref": "refs/heads/nw70",
"path": "third_party/blink/web_tests/external/wpt/html/canvas/element/manual/wide-gamut-canvas/canvas-display-p3-pattern-image.html",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
'use strict';
/**
* Pages
* @type {exports}
*/
exports.pages = require('./pages')
/**
* Staff
* @type {exports}
*/
exports.staff = require('./staff')
/**
* Packages
* @type {exports}
*/
exports.packages = require('./packages')
/**
* Homepage
* @param {object} req
* @param {object} res
*/
exports.index = function(req,res){
res.redirect('/packages')
}
| {
"content_hash": "a5165d4aebec4b66380b6e5eb49d21b0",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 40,
"avg_line_length": 11.71875,
"alnum_prop": 0.584,
"repo_name": "eSited/bowercdn",
"id": "c95fb321d937270aa308885b7e0a2d7b4ca02b96",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin/routes/index.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1494"
},
{
"name": "HTML",
"bytes": "14015"
},
{
"name": "JavaScript",
"bytes": "39082"
}
],
"symlink_target": ""
} |
package org.apache.derby.impl.store.raw.data;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInput;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.zip.CRC32;
import org.apache.derby.iapi.error.StandardException;
import org.apache.derby.iapi.reference.MessageId;
import org.apache.derby.iapi.reference.SQLState;
import org.apache.derby.iapi.services.io.ArrayInputStream;
import org.apache.derby.iapi.services.io.ArrayOutputStream;
import org.apache.derby.iapi.services.io.CompressedNumber;
import org.apache.derby.iapi.services.io.DataInputUtil;
import org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream;
import org.apache.derby.iapi.services.io.ErrorObjectInput;
import org.apache.derby.iapi.services.io.FormatIdInputStream;
import org.apache.derby.iapi.services.io.FormatIdOutputStream;
import org.apache.derby.iapi.services.io.FormatIdUtil;
import org.apache.derby.iapi.services.io.FormatableBitSet;
import org.apache.derby.iapi.services.io.LimitObjectInput;
import org.apache.derby.iapi.services.io.StoredFormatIds;
import org.apache.derby.iapi.services.io.StreamStorable;
import org.apache.derby.iapi.services.i18n.MessageService;
import org.apache.derby.shared.common.sanity.SanityManager;
import org.apache.derby.iapi.store.access.Qualifier;
import org.apache.derby.iapi.store.access.RowUtil;
import org.apache.derby.iapi.store.access.conglomerate.LogicalUndo;
import org.apache.derby.iapi.store.raw.ContainerHandle;
import org.apache.derby.iapi.store.raw.FetchDescriptor;
import org.apache.derby.iapi.store.raw.Page;
import org.apache.derby.iapi.store.raw.PageKey;
import org.apache.derby.iapi.store.raw.PageTimeStamp;
import org.apache.derby.iapi.store.raw.RawStoreFactory;
import org.apache.derby.iapi.store.raw.RecordHandle;
import org.apache.derby.iapi.store.raw.log.LogInstant;
import org.apache.derby.iapi.store.raw.xact.RawTransaction;
import org.apache.derby.iapi.types.DataValueDescriptor;
import org.apache.derby.iapi.util.ByteArray;
/**
StoredPage is a sub class of CachedPage that stores page data in a
fixed size byte array and is designed to be written out to a file
through a DataInput/DataOutput interface. A StoredPage can exist
in its clean or dirty state without the FileContainer it was created
from being in memory.
<P><B>Page Format</B><BR>
The page is broken into five sections
<PRE>
+----------+-------------+-------------------+-------------------+----------+
| formatId | page header | records | slot offset table | checksum |
+----------+-------------+-------------------+-------------------+----------+
</PRE>
<BR><B>FormatId</B><BR>
The formatId is a 4 bytes array, it contains the format Id of this page.
<BR><B>Page Header</B><BR>
The page header is a fixed size, 56 bytes
<PRE>
1 byte boolean is page an overflow page
1 byte byte page status (a field maintained in base page)
8 bytes long pageVersion (a field maintained in base page)
2 bytes unsigned short number of slots in slot offset table
4 bytes integer next record identifier
4 bytes integer generation number of this page (Future Use)
4 bytes integer previous generation of this page (Future Use)
8 bytes bipLocation the location of the beforeimage page (Future Use)
2 bytes unsigned short number of deleted rows on page. (new release 2.0)
2 bytes unsigned short % of the page to keep free for updates
2 bytes short spare for future use
4 bytes long spare for future use (encryption uses to write
random bytes here).
8 bytes long spare for future use
8 bytes long spare for future use
</PRE>
Note that spare space has been guaranteed to be writen with "0", so
that future use of field should not either not use "0" as a valid data
item or pick 0 as a valid default value so that on the fly upgrade can
assume that 0 means field was never assigned.
<BR><B>Records</B>
The records section contains zero or more records, the format of each record
follows.
minimumRecordSize is the minimum user record size, excluding the space we
use for the record header and field headers. When a record is inserted, it
is stored in a space at least as large as the sum of the minimumRecordSize
and total header size.
For example,
If minimumRecordSize is 10 bytes,
the user record is 7 bytes,
we used 5 bytes for record and field headers,
this record will take (10 + 5) bytes of space, extra 3 bytes is
put into reserve.
If minimumRecordSize is 10 bytes,
user record is 17 bytes,
we used 5 bytes for record and field headers,
this record will take (17 + 5) bytes of space, no reserve space
here.
minimumRecordSize is defined by user on per container basis.
The default for minimumRecordSize is set to 1.
This implementation always keeps occupied bytes at the low end of the record
section. Thus removing (purging) a record moves all other records down, and
their slots are also moved down.
A page has no empty slot (an empty page has no slot)
<BR><B>Record & Field Format</B>
Record Header format is defined in the StoredRecordHeader class.
<PRE>
<BR><B>Fields</B>
1 byte Boolean - is null, if true no more data follows.
4 bytes Integer - length of field that follows (excludes these four bytes).
StoredPage will use the static method provided by StoredFieldHeader
to read/write field status and field data length.
Field Header format is defined in the StoredFieldHeader class.
<data>
</PRE>
<BR><B>Slot Offset Table</B><BR>
The slot offset table is a table of 6 or 12 bytes per record, depending on
the pageSize being less or greater than 64K:
2 bytes (unsigned short) or 4 bytes (int) page offset for the record that
is assigned to the slot, and 2 bytes (unsigned short) or 4 bytes (int)
for the length of the record on this page.
2 bytes (unsigned short) or 4 bytes (int) for the length of the reserved
number of bytes for this record on this page.
First slot is slot 0. The slot table grows backwards. Slots are never
left empty.
<BR><B>Checksum</B><BR>
8 bytes of a java.util.zip.CRC32 checksum of the entire's page contents
without the 8 bytes representing the checksum.
<P><B>Page Access</B>
The page data is accessed in this class by one of three methods.
<OL>
<LI>As a byte array using pageData (field in cachedPage). This is the
fastest.
<LI>As an ArrayInputStream (rawDataIn) and ArrayOutputStream (rawDataOut),
this is used to set limits on any one reading the page logically.
<LI>Logically through rawDataIn (ArrayInputStream) and
logicalDataOut (FormatIdOutputStream), this provides the methods to write
logical data (e.g. booleans and integers etc.) and the ObjectInput
and ObjectOutput interfaces for DataValueDescriptor's. These logical
streams are constructed using the array streams.
</OL>
@see java.util.zip.CRC32
@see ArrayInputStream
@see ArrayOutputStream
**/
public class StoredPage extends CachedPage
{
/**************************************************************************
* static final Fields of the class
**************************************************************************
*/
/*
* typed format
*/
public static final int FORMAT_NUMBER =
StoredFormatIds.RAW_STORE_STORED_PAGE;
/**
* Return my format identifier.
**/
public int getTypeFormatId()
{
return StoredFormatIds.RAW_STORE_STORED_PAGE;
}
/**
* Constants used to find different portions of data on the page.
* <p>
* The page is laid out as follows:
* The page is broken into five sections
* +----------+-------------+---------+-------------------+----------+
* | formatId | page header | records | slot offset table | checksum |
* +----------+-------------+---------+-------------------+----------+
*
* offset size section
* ------ ------------------- --------------------------
* 0 PAGE_FORMAT_ID_SIZE formatId
* PAGE_FORMAT_ID_SIZE: PAGE_HEADER_SIZE (56) page header
* RECORD_SPACE_OFFSET: variable records
**/
/**
* Start of page, formatId must fit in 4 bytes.
* <p>
* where the page header starts - page format is mandated by cached page
**/
protected static final int PAGE_HEADER_OFFSET = PAGE_FORMAT_ID_SIZE;
/**
* Fixed size of the page header
**/
protected static final int PAGE_HEADER_SIZE = 56;
/**
Start of the record storage area
*/
/**
* Start of the record storage area.
* <p>
* Note: a subclass may change the start of the record storage area.
* Don't always count on this number.
**/
protected static final int RECORD_SPACE_OFFSET =
PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE;
/**
* offset of the page version number
**/
protected static final int PAGE_VERSION_OFFSET = PAGE_HEADER_OFFSET + 2;
/**
* SMALL_SLOT_SIZE are for pages smaller than 64K,
* LARGE_SLOT_SIZE is for pages bigger than 64K.
**/
protected static final int SMALL_SLOT_SIZE = 2;
protected static final int LARGE_SLOT_SIZE = 4;
/**
* Size of the checksum stored on the page.
*
* The checksum is stored in the last 8 bytes of the page, the slot table
* grows backward up the page starting at the end of the page just before
* the checksum.
**/
protected static final int CHECKSUM_SIZE = 8;
/**
* OVERFLOW_POINTER_SIZE - Number of bytes to reserve for overflow pointer
*
* The overflow pointer is the pointer that the takes the place of the
* last column of a row if the row can't fit on the page. The pointer
* then points to another page where the next column of the row can be
* found. The overflow pointer can be bigger than a row, so when
* overflowing a row the code must overflow enough columns so that there
* is enough free space to write the row. Note this means that the
* minimum space a row can take on a page must allow for at least the
* size of the overflow pointers so that if the row is updated it can
* write the over flow pointer.
*
**/
protected static final int OVERFLOW_POINTER_SIZE = 12;
/**
* OVERFLOW_PTR_FIELD_SIZE - Number of bytes of an overflow field
*
* This is the length to reserve for either a column or row overflow
* pointer field. It includes the size of the field header plus the
* maxium length of the overflow pointer (it could be shorter due to
* compressed storage).
*
* The calcualtion is:
*
* OVERFLOW_PTR_FIELD_SIZE =
* OVERFLOW_POINTER_SIZE +
* sizeof(status byte) +
* sizeof(field length field for a field which is just an overflow ptr)
*
*
**/
protected static final int OVERFLOW_PTR_FIELD_SIZE =
OVERFLOW_POINTER_SIZE + 1 + 1;
/**
* In memory buffer used as scratch space for streaming columns.
**/
ByteHolder bh = null;
/**************************************************************************
* Fields of the class
**************************************************************************
*/
/**
* Constants used in call to logColumn.
* <p>
* Action taken in this routine is determined by the kind of column as
* specified in the columnFlag:
* COLUMN_NONE - the column is insignificant
* COLUMN_FIRST - this is the first column in a logRow() call
* COLUMN_LONG - this is a known long column, therefore we will
* store part of the column on the current page and
* overflow the rest if necessary.
* COLUMN_CREATE_NULL - the column was recently added.
* it doesn't actually exist in the on-disk row yet.
* we will need to put a null in it as soon as possible.
* see DERBY-5679.
**/
protected static final int COLUMN_NONE = 0;
protected static final int COLUMN_FIRST = 1;
protected static final int COLUMN_LONG = 2;
protected static final int COLUMN_CREATE_NULL = 3;
/**
* maxFieldSize is a worst case calculation for the size of a record
* on an empty page, with a single field, but still allow room for
* an overflow pointer if another field is to be added. See initSpace().
* maxFieldSize is a worst case calculation for the size of a record
*
* This is used as the threshold for a long column.
*
* maxFieldSize =
* totalSpace - slotEntrySize - 16 - OVERFLOW_POINTER_SIZE;
**/
private int maxFieldSize;
/**
* The page header is a fixed size, 56 bytes, following are variables used
* to access the fields in the header:
* <p>
* 1 byte boolean isOverflowPage is page an overflow page
* 1 byte byte pageStatus page status (field in base page)
* 8 bytes long pageVersion page version (field in base page)
* 2 bytes ushort slotsInUse number of slots in slot offset table
* 4 bytes integer nextId next record identifier
* 4 bytes integer generation generation number of this page(FUTURE USE)
* 4 bytes integer prevGeneration previous generation of page (FUTURE USE)
* 8 bytes long bipLocation the location of the BI page (FUTURE USE)
* 2 bytes ushort deletedRowCount number of deleted rows on page.(rel 2.0)
* 2 bytes long spare for future use
* 4 bytes long spare (encryption writes random bytes)
* 8 bytes long spare for future use
* 8 bytes long spare for future use
*
* Note that spare space has been guaranteed to be writen with "0", so
* that future use of field should not either not use "0" as a valid data
* item or pick 0 as a valid default value so that on the fly upgrade can
* assume that 0 means field was never assigned.
*
**/
private boolean isOverflowPage; // is page an overflow page?
private int slotsInUse; // number of slots in slot offset table.
private int nextId; // next record identifier
private int generation; // (Future Use) generation number of this page
private int prevGeneration; // (Future Use) previous generation of page
private long bipLocation; // (Future Use) the location of the BI page
private int deletedRowCount; // number of deleted rows on page.
/**
* Is the header in the byte array out of date wrt the fields.
* <p>
* this field must be set to true whenever one of the above header fields
* is modified. Ie any of (isOverflowPage, slotsInUse, nextId, generation,
* prevGeneration, bipLocation, deletedRowCount)
**/
private boolean headerOutOfDate;
/**
* holder for the checksum.
**/
private CRC32 checksum;
/**
* Minimum space to reserve for record portion length of row.
* <p>
* minimumRecordSize is stored in the container handle. It is used to
* reserve minimum space for recordPortionLength. Default is 1. To
* get the value from the container handle:
* myContainer.getMinimumRecordSize();
*
* minimumRecordSize is the minimum user record size, excluding the space we
* use for the record header and field headers. When a record is inserted,
* it is stored in a space at least as large as the sum of the
* minimumRecordSize and total header size.
*
* For example,
* If minimumRecordSize is 10 bytes,
* the user record is 7 bytes,
* we used 5 bytes for record and field headers,
* this record will take (10 + 5) bytes of space, extra 3 bytes is
* put into reserve.
*
* If minimumRecordSize is 10 bytes,
* user record is 17 bytes,
* we used 5 bytes for record and field headers,
* this record will take (17 + 5) bytes of space, no reserve space
* here.
*
* minimumRecordSize is defined by user on per container basis.
* The default for minimumRecordSize is set to 1.
*
**/
protected int minimumRecordSize;
/**
* scratch variable used to keep track of the total user size for the row.
* the information is used by logRow to maintain minimumRecordSize
* on Page. minimumRecordSize is only considered for main data pages,
* therefore, the page must be latched during an insert operation.
**/
private int userRowSize;
/**
* slot field and slot entry size.
* <p>
* The size of these fields is dependant on the page size.
* These 2 variables should be set when pageSize is determined, and should
* not be changed for that page.
*
* Each slot entry contains 3 fields (slotOffet, recordPortionLength and
* reservedSpace) for the record the slot is pointing to.
* slotFieldSize is the size for each of the slot field.
* slotEntrySize is the total space used for a single slot entry.
**/
private int slotFieldSize;
private int slotEntrySize;
/**
* Offset of the first entry in the slot table.
* <p>
* Offset table is located at end of page, just before checksum. It
* grows backward as an array from this point toward the middle of the
* page.
* <p>
* slotTableOffsetToFirstEntry is the offset to the beginning of the
* first entry (slot[0]) in the slot table. This allows the following
* math to get to the offset of N'th entry in the slot table:
*
* offset of slot[N] = slotTableOffsetToFirstEntry + (N * slotEntrySize)
**/
private int slotTableOffsetToFirstEntry;
/**
* Offset of the record length entry in the 1st slot table entry.
* <p>
* Offset table is located at end of page, just before checksum. It
* grows backward as an array from this point toward the middle of the
* page. The record length is stored as the second "field" of the
* slot table entry.
* <p>
* slotTableOffsetToFirstRecordLengthField is the offset to the beginning
* of the record length field in the first entry (slot[0]) in the slot
* table. This allows the following
* math to get to the record length field of N'th entry in the slot table:
*
* offset of record length of slot[N] slot entry =
* slotTableOffsetToFirstRecordLengthField + (N * slotEntrySize)
**/
private int slotTableOffsetToFirstRecordLengthField;
/**
* Offset of the reserved space length entry in the 1st slot table entry.
* <p>
* Offset table is located at end of page, just before checksum. It
* grows backward as an array from this point toward the middle of the
* page. The reserved space length is stored as the third "field" of the
* slot table entry.
* <p>
* slotTableOffsetToFirstReservedSpaceField is the offset to the beginning
* of the reserved space field in the first entry (slot[0]) in the slot
* table. This allows the following
* math to get to the reserved space field of N'th entry in the slot table:
*
* offset of reserved space of slot[N] slot entry =
* slotTableOffsetToFirstReservedSpaceField + (N * slotEntrySize)
**/
private int slotTableOffsetToFirstReservedSpaceField;
/**
* total usable space on a page.
* <p>
* This is the space not taken by page hdr, page table, and existing
* slot entries/rows.
**/
protected int totalSpace; // total usable space on a page
// freeSpace and firstFreeByte are initliazed to a minimum value.
protected int freeSpace = Integer.MIN_VALUE; // free space on the page
private int firstFreeByte = Integer.MIN_VALUE; // 1st free byte on page
/**
* % of page to keep free for updates.
* <p>
* How much of a head page should be reserved as "free" so that the space
* can be used by update which expands the row without needing to overflow
* it. 1 means save 1% of the free space for expansion.
**/
protected int spareSpace;
/**
* Scratch variable used when you need a overflowRecordHeader. Declared
* globally so that object is only allocated once per page.
**/
private StoredRecordHeader overflowRecordHeader;
/**
* Input streams used to read/write bytes to/from the page byte array.
**/
protected ArrayInputStream rawDataIn;
protected ArrayOutputStream rawDataOut;
protected FormatIdOutputStream logicalDataOut;
/**************************************************************************
* Constructors for This class:
**************************************************************************
*/
/**
* Simple no-arg constructor for StoredPage.
**/
public StoredPage()
{
super();
}
/**************************************************************************
* Private/Protected methods of This class:
**************************************************************************
*/
/**
* get scratch space for over flow record header.
* <p>
*
* @exception StandardException Standard exception policy.
**/
private StoredRecordHeader getOverFlowRecordHeader()
throws StandardException
{
return(
overflowRecordHeader != null ?
overflowRecordHeader :
(overflowRecordHeader = new StoredRecordHeader()));
}
/**
* Initialize the StoredPage.
* <p>
* Initialize the object, ie. perform work normally perfomed in constructor.
* Called by setIdentity() and createIdentity() - the Cacheable interfaces
* which are used to move a page in/out of cache.
**/
protected void initialize()
{
super.initialize();
if (rawDataIn == null)
{
rawDataIn = new ArrayInputStream();
checksum = new CRC32();
}
if (pageData != null)
rawDataIn.setData(pageData);
}
/**
* Create the output streams.
* <p>
* Create the output streams, these are created on demand
* to avoid creating unrequired objects for pages that are
* never modified during their lifetime in the cache.
* <p>
*
* @exception StandardException Standard exception policy.
**/
private void createOutStreams()
{
rawDataOut = new ArrayOutputStream();
rawDataOut.setData(pageData);
logicalDataOut = new FormatIdOutputStream(rawDataOut);
}
/**
* Tie the logical output stream to a passed in OutputStream.
* <p>
* Tie the logical output stream to a passed in OutputStream with
* no limit as to the number of bytes that can be written.
**/
private void setOutputStream(OutputStream out)
{
if (rawDataOut == null)
createOutStreams();
logicalDataOut.setOutput(out);
}
/**
* Reset the logical output stream.
* <p>
* Reset the logical output stream (logicalDataOut) to be attached
* to the page array stream as is the norm, no limits are placed
* on any writes.
*
**/
private void resetOutputStream()
{
logicalDataOut.setOutput(rawDataOut);
}
/**************************************************************************
* Protected Methods of CachedPage class: (create, read and write a page.)
**************************************************************************
*/
/**
* use this passed in page buffer as this object's page data.
* <p>
* The page content may not have been read in from disk yet.
* For pagesize smaller than 64K:
* Size of the record offset stored in a slot (unsigned short)
* Size of the record portion length stored in a slot (unsigned short)
* Size of the record portion length stored in a slot (unsigned short)
* For pagesize greater than 64K, but less than 2gig:
* Size of the record offset stored in a slot (int)
* Size of the record portion length stored in a slot (int)
* Size of the record portion length stored in a slot (int)
* <p>
*
* @param pageBuffer The array of bytes to use as the page buffer.
**/
protected void usePageBuffer(byte[] pageBuffer)
{
pageData = pageBuffer;
int pageSize = pageData.length;
if (rawDataIn != null)
rawDataIn.setData(pageData);
// Note that the slotFieldSize and slotEntrySize need to be
// calculated BEFORE initSpace() is called, because the
// maxFieldSize computation in initSpace() includes these
// values in its calculations. (DERBY-3099)
slotFieldSize = calculateSlotFieldSize(pageSize);
slotEntrySize = 3 * slotFieldSize;
initSpace();
// offset of slot table entry[0]
slotTableOffsetToFirstEntry =
(pageSize - CHECKSUM_SIZE - slotEntrySize);
// offset of record length field in slot table entry[0]
slotTableOffsetToFirstRecordLengthField =
slotTableOffsetToFirstEntry + slotFieldSize;
// offset of reserved space field in slot table entry[0]
slotTableOffsetToFirstReservedSpaceField =
slotTableOffsetToFirstEntry + (2 * slotFieldSize);
if (rawDataOut != null)
rawDataOut.setData(pageData);
}
/**
* Calculate the slot field size from the page size.
*
* @param pageSize page size in bytes
* @return slot field size in bytes
*/
private int calculateSlotFieldSize(int pageSize) {
if (pageSize < 65536) {
// slots are 2 bytes (unsigned short data type) for pages <64KB
return SMALL_SLOT_SIZE;
} else {
// slots are 4 bytes (int data type) for pages >=64KB
return LARGE_SLOT_SIZE;
}
}
/**
* Create a new StoredPage.
* <p>
* Make this object represent a new page (ie. a page that never existed
* before, as opposed to reading in an existing page from disk).
* <p>
*
* @param newIdentity The key describing page (segment,container,page).
* @param args information stored about the page, once in the
* container header and passed in through the object.
*
* @exception StandardException Standard exception policy.
**/
protected void createPage(PageKey newIdentity, PageCreationArgs args)
throws StandardException
{
spareSpace = args.spareSpace;
minimumRecordSize = args.minimumRecordSize;
setPageArray(args.pageSize);
cleanPage(); // clean up the page array
setPageVersion(0); // page is being created for the first time
nextId = RecordHandle.FIRST_RECORD_ID; // first record Id
generation = 0;
prevGeneration = 0; // there is no previous generation
bipLocation = 0L;
createOutStreams();
}
/**
* Initialize the page from values in the page buffer.
* <p>
* Initialize in memory structure using the buffer in pageData. This
* is how a StoredPage object is intialized to represent page read in
* from disk.
* <p>
*
* @param myContainer The container to read the page in from.
* @param newIdentity The key representing page being read in (segment,
* container, page number)
*
* @exception StandardException If the page cannot be read correctly,
* or is inconsistent.
**/
protected void initFromData(
FileContainer myContainer,
PageKey newIdentity)
throws StandardException
{
if (myContainer != null)
{
// read in info about page stored once in the container header.
spareSpace = myContainer.getSpareSpace();
minimumRecordSize = myContainer.getMinimumRecordSize();
}
// if it is null, assume spareSpace and minimumRecordSize is the
// same. We would only call initFromData after a restore then.
try
{
validateChecksum(newIdentity);
}
catch (StandardException se)
{
if (se.getMessageId().equals(SQLState.FILE_BAD_CHECKSUM))
{
// it is remotely possible that the disk transfer got garbled,
// i.e., the page is actually fine on disk but the version we
// got has some rubbish on it. Double check.
int pagesize = getPageSize();
byte[] corruptPage = pageData;
pageData = null; // clear this
// set up the new page array
setPageArray(pagesize);
try
{
myContainer.readPage(newIdentity.getPageNumber(), pageData);
}
catch (IOException ioe)
{
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, newIdentity));
}
if (SanityManager.DEBUG)
{
SanityManager.DEBUG_CLEAR("TEST_BAD_CHECKSUM");
}
// see if this read confirms the checksum error
try
{
validateChecksum(newIdentity);
}
catch (StandardException sse)
{
// really bad
throw dataFactory.markCorrupt(se);
}
// If we got here, this means the first read is bad but the
// second read is good. This could be due to disk I/O error or
// a bug in the way the file pointer is mis-managed.
String firstImage = pagedataToHexDump(corruptPage);
String secondImage =
(SanityManager.DEBUG) ?
toString() : pagedataToHexDump(corruptPage);
throw StandardException.newException(
SQLState.FILE_IO_GARBLED, se,
newIdentity, firstImage, secondImage);
}
else
{
throw se;
}
}
try
{
readPageHeader();
initSlotTable(newIdentity);
}
catch (IOException ioe)
{
// i/o methods on the byte array have thrown an IOException
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, newIdentity));
}
}
/**
* Validate the check sum on the page.
* <p>
* Compare the check sum stored in the page on disk with the checksum
* calculated from the bytes on the page.
* <p>
*
* @param id The key that describes the page.
*
* @exception StandardException Standard exception policy.
**/
protected void validateChecksum(PageKey id)
throws StandardException
{
long onDiskChecksum;
try
{
// read the checksum stored on the page on disk. It is stored
// in the last "CHECKSUM_SIZE" bytes of the page, and is a long.
rawDataIn.setPosition(getPageSize() - CHECKSUM_SIZE);
onDiskChecksum = rawDataIn.readLong();
}
catch (IOException ioe)
{
// i/o methods on the byte array have thrown an IOException
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, id));
}
// Force the checksum to be recalculated based on the current page.
checksum.reset();
checksum.update(pageData, 0, getPageSize() - CHECKSUM_SIZE);
// force a bad checksum error
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON("TEST_BAD_CHECKSUM"))
{
// set on disk checksum to wrong value
onDiskChecksum = 123456789;
}
}
if (onDiskChecksum != checksum.getValue())
{
// try again using new checksum object to be doubly sure
CRC32 newChecksum = new CRC32();
newChecksum.reset();
newChecksum.update(pageData, 0, getPageSize()-CHECKSUM_SIZE);
if (onDiskChecksum != newChecksum.getValue())
{
throw StandardException.newException(
SQLState.FILE_BAD_CHECKSUM,
id,
new Long(checksum.getValue()),
new Long(onDiskChecksum),
pagedataToHexDump(pageData));
}
else
{
// old one is bad, get rid of it
if (SanityManager.DEBUG)
SanityManager.THROWASSERT("old checksum gets wrong value");
checksum = newChecksum;
}
}
}
/**
* Recalculate checksum and write it to the page array.
* <p>
* Recalculate the checksum of the page, and write the result back into
* the last bytes of the page.
*
* @exception IOException if writing to end of array fails.
**/
protected void updateChecksum() throws IOException
{
checksum.reset();
checksum.update(pageData, 0, getPageSize() - CHECKSUM_SIZE);
rawDataOut.setPosition(getPageSize() - CHECKSUM_SIZE);
logicalDataOut.writeLong(checksum.getValue());
}
/**
* Write information about page from variables into page byte array.
* <p>
* This routine insures that all information about the page is reflected
* in the page byte buffer. This involves moving information from local
* variables into encoded version on the page in page header and checksum.
* <p>
*
* @param identity The key of this page.
*
* @exception StandardException Standard exception policy.
**/
protected void writePage(PageKey identity)
throws StandardException
{
if (SanityManager.DEBUG)
{
// some consistency checks on fields of the page, good to check
// before we write them into the page.
if ((freeSpace < 0) ||
(firstFreeByte + freeSpace) != (getSlotOffset(slotsInUse - 1)))
{
// make sure free space is not negative and does not overlap
// used space.
SanityManager.THROWASSERT(
"writePage detected problem in freespace and used space."
+ "slotsInUse = " + slotsInUse
+ ", firstFreeByte = " + firstFreeByte
+ ", freeSpace = " + freeSpace
+ ", slotOffset = " + (getSlotOffset(slotsInUse - 1))
+ ", page = " + this);
}
if ((slotsInUse == 0) &&
(firstFreeByte != (getPageSize() - totalSpace - CHECKSUM_SIZE)))
{
SanityManager.THROWASSERT("slotsInUse = " + slotsInUse
+ ", firstFreeByte = " + firstFreeByte
+ ", freeSpace = " + freeSpace
+ ", slotOffset = " + (getSlotOffset(slotsInUse - 1))
+ ", page = " + this);
}
}
try
{
if (headerOutOfDate)
{
updatePageHeader();
}
else
{
// page version always need to be updated if page is dirty,
// either do it in updatePageHeader or by itself
updatePageVersion();
}
updateChecksum();
}
catch (IOException ioe)
{
// i/o methods on the byte array have thrown an IOException
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, identity));
}
}
/**
* Write out the format id of this page
*
* @param identity The key of this page.
*
* @exception StandardException Standard exception policy.
**/
protected void writeFormatId(PageKey identity) throws StandardException
{
try
{
if (rawDataOut == null)
createOutStreams();
rawDataOut.setPosition(0);
FormatIdUtil.writeFormatIdInteger(
logicalDataOut, getTypeFormatId());
}
catch (IOException ioe)
{
// i/o methods on the byte array have thrown an IOException
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, identity));
}
}
/**************************************************************************
* Protected Methods of Cacheable Interface:
**************************************************************************
*/
/**************************************************************************
* Protected OverRidden Methods of BasePage:
**************************************************************************
*/
/**
* Ensure that the page is released from the cache when it is unlatched.
*
* @see org.apache.derby.impl.store.raw.data.BasePage#releaseExclusive
*
**/
protected void releaseExclusive()
{
super.releaseExclusive();
pageCache.release(this);
}
/**
* Return the total number of bytes used, reserved, or wasted by the
* record at this slot.
* <p>
* The amount of space the record on this slot is currently taking on the
* page.
*
* If there is any reserve space or wasted space, count that in also
* Do NOT count the slot entry size
* <p>
*
* @return The number of bytes used by the row at slot "slot".
*
* @param slot look at row at this slot.
*
* @exception StandardException Standard exception policy.
**/
public int getTotalSpace(int slot)
throws StandardException
{
try
{
if (SanityManager.DEBUG)
{
if (getRecordOffset(slot) <= 0)
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"getTotalSpace failed with getRecordOffset(" +
slot + ") = " +
getRecordOffset(slot) + " must be greater than 0." +
"page dump = \n" +
toUncheckedString());
SanityManager.THROWASSERT(
"bad record offset found in getTotalSpace()");
}
}
// A slot entry looks like the following:
// 1st field: offset of the record on the page
// 2nd field: length of the record on the page
// 3rd field: amount of space reserved for the record to grow.
// position the read at the beginning of the 2nd field.
rawDataIn.setPosition(getSlotOffset(slot) + slotFieldSize);
// return the size of the record + size of the reserved space.
// the size of the fields to read is determined by slotFieldSize.
return(
((slotFieldSize == SMALL_SLOT_SIZE) ?
(rawDataIn.readUnsignedShort() +
rawDataIn.readUnsignedShort()) :
(rawDataIn.readInt() +
rawDataIn.readInt())));
}
catch (IOException ioe)
{
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
}
}
/**
* Is there minimal space for insert?
* <p>
* Does quick calculation to see if average size row on this page could
* be inserted on the page. This is done because the actual row size
* being inserted isn't known until we actually copy the columns from
* their object form into their on disk form which is expensive. So
* we use this calculation so that in the normal case we only do one
* copy of the row directly onto the page.
* <p>
*
* @return true if we think the page will allow an insert, false otherwise.
*
* @exception StandardException Standard exception policy.
**/
public boolean spaceForInsert()
throws StandardException
{
// is this an empty page
if (slotsInUse == 0)
return(true);
if (!allowInsert())
return(false);
int usedSpace = totalSpace - freeSpace;
int bytesPerRow = usedSpace / slotsInUse;
return(bytesPerRow <= freeSpace);
}
/**
* Is row guaranteed to be inserted successfully on this page?
* <p>
* Return true if this record is guaranteed to be inserted successfully
* using insert() or insertAtSlot(). This guarantee is only valid while
* the row remains unchanged and the page latch is held.
* <p>
*
* @return bolean indicating if row can be inserted on this page.
*
* @param row The row to check for insert.
* @param validColumns bit map to interpret valid columns in row.
* @param overflowThreshold The percentage of the page to use for the
* insert. 100 means use 100% of the page,
* 50 means use 50% of page (ie. make sure
* 2 rows fit per page).
*
* @exception StandardException Standard exception policy.
**/
public boolean spaceForInsert(
Object[] row,
FormatableBitSet validColumns,
int overflowThreshold)
throws StandardException
{
// is this an empty page
if (slotsInUse == 0)
return true;
// does the estimate think it won't fit, if not return false to avoid
// cost of calling logRow() just to figure out if the row will fit.
if (!allowInsert())
return false;
DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();
try
{
// This is a public call, start column is rawstore only.
// set the starting Column for the row to be 0.
logRow(
0, true, nextId, row, validColumns, out,
0, Page.INSERT_DEFAULT, -1, -1, overflowThreshold);
}
catch (NoSpaceOnPage nsop)
{
return false;
}
catch (IOException ioe)
{
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
return true;
}
/**
* Is row guaranteed to be inserted successfully on this page?
* <p>
* Return true if this record is guaranteed to be inserted successfully
* using insert() or insertAtSlot(). This guarantee is only valid while
* the row remains unchanged and the page latch is held.
* <p>
* This is a private call only used when calculating whether an overflow
* page can be used to insert part of an overflow row/column.
*
* @return bolean indicating if row can be inserted on this page.
*
* @param row The row to check for insert.
* @param validColumns bit map to interpret valid columns in row.
* @param overflowThreshold The percentage of the page to use for the
* insert. 100 means use 100% of the page,
* 50 means use 50% of page (ie. make sure
* 2 rows fit per page).
*
* @exception StandardException Standard exception policy.
**/
private boolean spaceForInsert(
Object[] row,
FormatableBitSet validColumns,
int spaceNeeded,
int startColumn,
int overflowThreshold)
throws StandardException
{
if (!(spaceForInsert() && (freeSpace >= spaceNeeded)))
return false;
DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();
try
{
logRow(
0, true, nextId, row, validColumns, out, startColumn,
Page.INSERT_DEFAULT, -1, -1, overflowThreshold);
}
catch (NoSpaceOnPage nsop)
{
return false;
}
catch (IOException ioe)
{
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
return true;
}
/**
* Is this page unfilled?
* <p>
* Returns true if page is relatively unfilled,
* which means the page is < 1/2 full and has enough space to insert an
* "average" sized row onto the page.
* <p>
*
* @return true if page is relatively unfilled.
**/
public boolean unfilled()
{
return (allowInsert() && (freeSpace > (getPageSize() / 2)));
}
/**
* Is there enough space on the page to insert a minimum size row?
* <p>
* Calculate whether there is enough space on the page to insert a
* minimum size row. The calculation includes maintaining the required
* reserved space on the page for existing rows to grow on the page.
* <p>
*
* @return boolean indicating if a minimum sized row can be inserted.
**/
public boolean allowInsert()
{
// is this an empty page
if (slotsInUse == 0)
return true;
int spaceAvailable = freeSpace;
spaceAvailable -= slotEntrySize; // need to account new slot entry
if ((spaceAvailable < minimumRecordSize) ||
(spaceAvailable < StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE))
{
return false;
}
// see that we reserve enough space for existing rows to grow on page
if (((spaceAvailable * 100) / totalSpace) < spareSpace)
return false;
return true;
}
/**
* Does this page have enough space to insert the input rows?
* <p>
* Can the rows with lengths spaceNeeded[0..num_rows-1] be copied onto
* this page?
* <p>
*
* @return true if the sum of the lengths will fit on the page.
*
* @param num_rows number of rows to check for.
* @param spaceNeeded array of lengths of the rows to insert.
**/
public boolean spaceForCopy(int num_rows, int[] spaceNeeded)
{
// determine how many more bytes are needed for the slot entries
int bytesNeeded = slotEntrySize * num_rows;
for (int i = 0; i < num_rows; i++)
{
if (spaceNeeded[i] > 0)
{
// add up the space needed by the rows, add in minimumRecordSize
// if length of actual row is less than minimumRecordSize.
bytesNeeded +=
(spaceNeeded[i] >= minimumRecordSize ?
spaceNeeded[i] : minimumRecordSize);
}
}
return((freeSpace - bytesNeeded) >= 0);
}
/**
* Does this page have enough space to move the row to it.
* <p>
* Calculate if a row of length "spaceNeeded" with current record id
* "source_id" will fit on this page.
*
* @param spaceNeeded length of the row encoded with source_id record id.
* @param source_id record id of the row being moved.
*
* @return true if the record will fit on this page, after being given a
* new record id as the next id on this page.
**/
protected boolean spaceForCopy(
int spaceNeeded,
int source_id)
{
spaceNeeded =
spaceNeeded
- StoredRecordHeader.getStoredSizeRecordId(source_id)
+ StoredRecordHeader.getStoredSizeRecordId(nextId);
// add up the space needed by the rows, add in minimumRecordSize
// if length of actual row is less than minimumRecordSize.
int bytesNeeded = slotEntrySize +
(spaceNeeded >= minimumRecordSize ?
spaceNeeded : minimumRecordSize);
return((freeSpace - bytesNeeded) >= 0);
}
/**
* Read the record at the given slot into the given row.
* <P>
* This reads and initializes the columns in the row array from the raw
* bytes stored in the page associated with the given slot. If validColumns
* is non-null then it will only read those columns indicated by the bit
* set, otherwise it will try to read into every column in row[].
* <P>
* If there are more columns than entries in row[] then it just stops after
* every entry in row[] is full.
* <P>
* If there are more entries in row[] than exist on disk, the requested
* excess columns will be set to null by calling the column's object's
* restoreToNull() routine (ie. ((Object) column).restoreToNull() ).
* <P>
* If a qualifier list is provided then the row will only be read from
* disk if all of the qualifiers evaluate true. Some of the columns may
* have been read into row[] in the process of evaluating the qualifier.
* <p>
* This routine should only be called on the head portion of a row, it
* will call a utility routine to read the rest of the row if it is a
* long row.
*
*
* @param slot the slot number
* @param row (out) filled in sparse row
* @param fetchDesc Information describing fetch, including what
* columns to fetch and qualifiers.
* @param recordToLock the record handle for the row at top level,
* and is used in OverflowInputStream to lock the
* row for Blobs/Clobs.
* @param isHeadRow The row on this page includes the head record
* handle. Will be false for the overflow portions
* of a "long" row, where columns of a row span
* multiple pages.
*
* @return false if a qualifier_list is provided and the row does not
* qualifier (no row read in that case), else true.
*
* @exception StandardException Standard Derby error policy
**/
protected boolean restoreRecordFromSlot(
int slot,
Object[] row,
FetchDescriptor fetchDesc,
RecordHandle recordToLock,
StoredRecordHeader recordHeader,
boolean isHeadRow)
throws StandardException
{
try
{
int offset_to_row_data =
getRecordOffset(slot) + recordHeader.size();
if (SanityManager.DEBUG)
{
if (getRecordOffset(slot) <
(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
{
SanityManager.THROWASSERT(
"Incorrect offset. offset = " +
getRecordOffset(slot) +
", offset should be < " +
"(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " +
(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) +
", current slot = " + slot +
", total slotsInUse = " + slotsInUse);
}
SanityManager.ASSERT(
isHeadRow, "restoreRecordFromSlot called on a non-headrow");
SanityManager.ASSERT(
!isOverflowPage(),
"restoreRecordFromSlot called on an overflow page.");
}
// position the array reading stream at beginning of row data just
// past the record header.
ArrayInputStream lrdi = rawDataIn;
lrdi.setPosition(offset_to_row_data);
if (!recordHeader.hasOverflow())
{
if (isHeadRow)
{
if (fetchDesc != null &&
fetchDesc.getQualifierList() != null)
{
fetchDesc.reset();
if (!qualifyRecordFromSlot(
row,
offset_to_row_data,
fetchDesc,
recordHeader,
recordToLock))
{
return(false);
}
else
{
// reset position back for subsequent record read.
lrdi.setPosition(offset_to_row_data);
}
}
}
// call routine to do the real work. Note that
// readRecordFromStream() may return false for non-overflow
// record, this is in the case where caller requests more
// columns than exist on disk. In that case we still return
// true at this point as there are no more columns that we
// can return.
if (fetchDesc != null)
{
readRecordFromArray(
row,
(fetchDesc.getValidColumns() == null) ?
row.length -1 : fetchDesc.getMaxFetchColumnId(),
fetchDesc.getValidColumnsArray(),
fetchDesc.getMaterializedColumns(),
lrdi,
recordHeader,
recordToLock);
}
else
{
readRecordFromArray(
row,
row.length - 1,
(int[]) null,
(int[]) null,
lrdi,
recordHeader,
recordToLock);
}
return(true);
}
else
{
if (fetchDesc != null)
{
if (fetchDesc.getQualifierList() != null)
{
fetchDesc.reset();
}
readRecordFromArray(
row,
(fetchDesc.getValidColumns() == null) ?
row.length - 1 : fetchDesc.getMaxFetchColumnId(),
fetchDesc.getValidColumnsArray(),
fetchDesc.getMaterializedColumns(),
lrdi,
recordHeader,
recordToLock);
}
else
{
readRecordFromArray(
row,
row.length - 1,
(int[]) null,
(int[]) null,
lrdi,
recordHeader,
recordToLock);
}
// call routine to loop through all the overflow portions of
// the row, reading it into "row".
while (recordHeader != null)
{
// The record is a long row, loop callng code to read the
// pieces of the row located in a linked list of rows on
// overflow pages.
StoredPage overflowPage =
getOverflowPage(recordHeader.getOverflowPage());
if (SanityManager.DEBUG)
{
if (overflowPage == null)
SanityManager.THROWASSERT(
"cannot get overflow page");
}
// This call reads in the columns of the row that reside
// on "overflowPage", and if there is another piece it
// returns the recordHeader of the row on overFlowPage,
// from which we can find the next piece of the row. A
// null return means that we have read in the entire row,
// and are done.
recordHeader =
overflowPage.restoreLongRecordFromSlot(
row,
fetchDesc,
recordToLock,
recordHeader);
overflowPage.unlatch();
overflowPage = null;
}
// for overflow rows just apply qualifiers at end for now.
if ((fetchDesc != null) &&
(fetchDesc.getQualifierList() != null))
{
if (!qualifyRecordFromRow(
row, fetchDesc.getQualifierList()))
{
return(false);
}
}
return(true);
}
}
catch (IOException ioe)
{
if (SanityManager.DEBUG)
{
if (pageData == null)
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"caught an IOException in restoreRecordFromSlot " +
(PageKey)getIdentity() + " slot " + slot +
", pageData is null");
}
else
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"caught an IOException in reestoreRecordFromSlot, " +
(PageKey)getIdentity() + " slot " + slot +
", pageData.length = " +
pageData.length + " pageSize = " + getPageSize());
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"Hex dump of pageData \n " +
"--------------------------------------------------\n" +
pagedataToHexDump(pageData) +
"--------------------------------------------------\n");
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"Attempt to dump page " + this.toString());
}
}
// i/o methods on the byte array have thrown an IOException
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
}
}
private StoredRecordHeader restoreLongRecordFromSlot(
Object[] row,
FetchDescriptor fetchDesc,
RecordHandle recordToLock,
StoredRecordHeader parent_recordHeader)
throws StandardException
{
int slot =
findRecordById(
parent_recordHeader.getOverflowId(), Page.FIRST_SLOT_NUMBER);
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
try
{
int offset_to_row_data =
getRecordOffset(slot) + recordHeader.size();
if (SanityManager.DEBUG)
{
if (getRecordOffset(slot) <
(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
{
SanityManager.THROWASSERT(
"Incorrect offset. offset = " +
getRecordOffset(slot) +
", offset should be < " +
"(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " +
(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) +
", current slot = " + slot +
", total slotsInUse = " + slotsInUse);
}
}
// position the array reading stream at beginning of row data
// just past the record header.
ArrayInputStream lrdi = rawDataIn;
lrdi.setPosition(offset_to_row_data);
if (fetchDesc != null)
{
if (fetchDesc.getQualifierList() != null)
{
fetchDesc.reset();
}
readRecordFromArray(
row,
(fetchDesc.getValidColumns() == null) ?
row.length - 1 : fetchDesc.getMaxFetchColumnId(),
fetchDesc.getValidColumnsArray(),
fetchDesc.getMaterializedColumns(),
lrdi,
recordHeader,
recordToLock);
}
else
{
readRecordFromArray(
row,
row.length - 1,
(int[]) null,
(int[]) null,
lrdi,
recordHeader,
recordToLock);
}
return(recordHeader.hasOverflow() ? recordHeader : null);
}
catch (IOException ioe)
{
if (SanityManager.DEBUG)
{
if (pageData == null)
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"caught an IOException in restoreRecordFromSlot " +
(PageKey)getIdentity() + " slot " + slot +
", pageData is null");
}
else
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"caught an IOException in reestoreRecordFromSlot, " +
(PageKey)getIdentity() + " slot " + slot +
", pageData.length = " +
pageData.length + " pageSize = " + getPageSize());
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"Hex dump of pageData \n " +
"--------------------------------------------------\n" +
pagedataToHexDump(pageData) +
"--------------------------------------------------\n");
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"Attempt to dump page " + this.toString());
}
}
// i/o methods on the byte array have thrown an IOException
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
}
}
/**
* Create a new record handle.
* <p>
* Return the next record id for allocation. Callers of this interface
* expect the next id to get bumped some where else - probably by
* storeRecordForInsert().
* <p>
*
* @return The next id to assing to a row.
**/
public int newRecordId()
{
return nextId;
}
/**
* Create a new record handle, and bump the id.
* <p>
* Create a new record handle, and bump the id while holding the latch
* so that no other user can ever see this record id. This will lead
* to unused record id's in the case where an insert fails because there
* is not enough space on the page.
* <p>
*
* @return The next id to assing to a row.
**/
public int newRecordIdAndBump()
{
// headerOutOfDate must be bumped as nextId is changing, and must
// eventually be updated in the page array.
headerOutOfDate = true;
return nextId++;
}
/**
* Create a new record id based on current one passed in.
* <p>
* This interface is used for the "copy" insert interface of raw store
* where multiple rows are inserted into a page in a single logged
* operation. We don't want to bump the id until the operation is logged
* so we just allocated each id in order and then bump the next id at
* the end of the operation.
* <p>
*
* @return the next id based on the input id.
*
* @param recordId The id caller just used, return the next one.
*
**/
protected int newRecordId(int recordId)
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
recordId >= nextId,
"should not create a record Id that is already given out");
}
return recordId + 1;
}
public boolean isOverflowPage()
{
return isOverflowPage;
}
/**************************************************************************
* Public Methods specific to StoredPage:
**************************************************************************
*/
/**
* Get the full size of the page.
**/
public final int getPageSize()
{
return pageData.length;
}
/**
* Zero out a portion of the page.
*
* @param offset position of first byte to clear
* @param length how many bytes to clear
**/
protected final void clearSection(int offset, int length)
{
Arrays.fill(pageData, offset, offset + length, (byte) 0);
}
/**
* The maximum free space on this page possible.
* <p>
* The the maximum amount of space that can be used on the page
* for the records and the slot offset table.
* NOTE: subclass may have overwitten it to report less freeSpace
*
* @return the maximum free space on this page possible.
*
**/
protected int getMaxFreeSpace()
{
return getPageSize() - RECORD_SPACE_OFFSET - CHECKSUM_SIZE;
}
/**
* The current free space on the page.
**/
protected int getCurrentFreeSpace()
{
return freeSpace;
}
/**************************************************************************
* Page header routines
**************************************************************************
*/
/**
* Read the page header from the page array.
* <p>
* Read the page header from byte form in the page array into in memory
* variables.
**/
private void readPageHeader()
throws IOException
{
// these reads are always against the page array
ArrayInputStream lrdi = rawDataIn;
lrdi.setPosition(PAGE_HEADER_OFFSET);
long spare;
isOverflowPage = lrdi.readBoolean();
setPageStatus (lrdi.readByte());
setPageVersion (lrdi.readLong());
slotsInUse = lrdi.readUnsignedShort();
nextId = lrdi.readInt();
generation = lrdi.readInt(); // page generation (Future Use)
prevGeneration = lrdi.readInt(); // previous generation (Future Use)
bipLocation = lrdi.readLong(); // BIPage location (Future Use)
// number of deleted rows on page, we start to store this release 2.0.
// for upgrade reasons, a 0 on disk means -1, so, we subtract one here.
deletedRowCount = lrdi.readUnsignedShort() - 1;
// the next 4 (total 22 bytes) are reserved for future
spare = lrdi.readUnsignedShort();
spare = lrdi.readInt(); // used by encryption
spare = lrdi.readLong();
spare = lrdi.readLong();
}
/**
* Update the page header in the page array.
* <p>
* Write the bytes of the page header, taking the values from those
* in the in memory variables.
**/
private void updatePageHeader()
throws IOException
{
rawDataOut.setPosition(PAGE_HEADER_OFFSET);
logicalDataOut.writeBoolean(isOverflowPage);
logicalDataOut.writeByte(getPageStatus());
logicalDataOut.writeLong(getPageVersion());
logicalDataOut.writeShort(slotsInUse);
logicalDataOut.writeInt(nextId);
logicalDataOut.writeInt(generation); // page generation (Future Use)
logicalDataOut.writeInt(prevGeneration); // previous generation (Future Use)
logicalDataOut.writeLong(bipLocation); // BIPage location (Future Use)
// number of deleted rows on page, we start to store this release 2.0.
// for upgrade reasons, a 0 on disk means -1, so, we add one when we
// write it to disk.
logicalDataOut.writeShort(deletedRowCount + 1);
logicalDataOut.writeShort(0); // reserved for future
logicalDataOut.writeInt(
dataFactory.random()); // random bytes for encryption
logicalDataOut.writeLong(0); // reserved for future
logicalDataOut.writeLong(0); // reserved for future
// we put a random value int into the page if the database is encrypted
// so that the checksum will be very different even with the same
// page image, when we encrypt or decrypt the page, we move the
// checksum to the front so that the encrypted page will look very
// different even with just the one int difference. We never look at
// the value of the random number and we could have put it anywhere in
// the page as long as it doesn't obscure real data.
headerOutOfDate = false;
}
/**
* Update the page version number in the byte array
**/
private void updatePageVersion()
throws IOException
{
rawDataOut.setPosition(PAGE_VERSION_OFFSET);
logicalDataOut.writeLong(getPageVersion());
}
/**************************************************************************
* Slot Offset & Length table manipulation
**************************************************************************
*/
/**
* Get the page offset of a given slot entry.
* <p>
* Get the page offset of a slot entry, this is not the offset of
* the record stored in the slot, but the offset of the actual slot.
*
* @return The page offset of a given slot entry.
*
* @param slot The array entry of the slot to find.
**/
private int getSlotOffset(int slot)
{
// slot table grows backward from the spot at the end of the page just
// before the checksum which is located in the last 8 bytes of the page.
return(slotTableOffsetToFirstEntry - (slot * slotEntrySize));
}
/**
* Get the page offset of the record associated with the input slot.
* <p>
* This is the actual offset on the page of the beginning of the record.
*
* @return The page offset of the record associated with the input slot.
*
* @param slot The array entry of the slot to find.
**/
private int getRecordOffset(int slot)
{
byte[] data = pageData;
int offset = slotTableOffsetToFirstEntry - (slot * slotEntrySize);
// offset on the page of the record is stored in the first 2 or 4 bytes
// of the slot table entry. Code has been inlined for performance
// critical low level routine.
//
// return(
// (slotFieldSize == SMALL_SLOT_SIZE) ?
// readUnsignedShort() : readInt());
return(
(slotFieldSize == SMALL_SLOT_SIZE) ?
((data[offset++] & 0xff) << 8) |
(data[offset] & 0xff) :
(((data[offset++] & 0xff) << 24) |
((data[offset++] & 0xff) << 16) |
((data[offset++] & 0xff) << 8) |
((data[offset] & 0xff) )));
}
/**
* Set the page offset of the record associated with the input slot.
* <p>
* This is the actual offset on the page of the beginning of the record.
*
* @param slot The array entry of the slot to set.
* @param recordOffset the new offset to set.
**/
private void setRecordOffset(int slot, int recordOffset)
throws IOException
{
rawDataOut.setPosition(getSlotOffset(slot));
if (slotFieldSize == SMALL_SLOT_SIZE)
logicalDataOut.writeShort(recordOffset);
else
logicalDataOut.writeInt(recordOffset);
}
/**
* Return length of row on this page.
* <p>
* Return the total length of data and header stored on this page for
* this record. This length is stored as the second "field" of the
* slot table entry.
*
* @return The length of the row on this page.
*
* @param slot the slot of the row to look up the length of.
*
**/
protected int getRecordPortionLength(int slot)
throws IOException
{
if (SanityManager.DEBUG)
{
if (getRecordOffset(slot) <= 0)
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"getRecordPortionLength failed with getRecordOffset(" +
slot + ") = " +
getRecordOffset(slot) + " must be greater than 0." +
"page dump = \n" +
toUncheckedString());
SanityManager.THROWASSERT(
"bad record offset found in getRecordPortionLength()");
}
}
// these reads are always against the page array
ArrayInputStream lrdi = rawDataIn;
lrdi.setPosition(
slotTableOffsetToFirstRecordLengthField - (slot * slotEntrySize));
return(
(slotFieldSize == SMALL_SLOT_SIZE) ?
lrdi.readUnsignedShort() : lrdi.readInt());
}
/**
* Return reserved length of row on this page.
* <p>
* Return the reserved length of this record.
* This length is stored as the third "field" of the slot table entry.
*
* @return The reserved length of the row on this page.
*
* @param slot the slot of the row to look up the length of.
*
**/
public int getReservedCount(int slot) throws IOException
{
if (SanityManager.DEBUG)
{
if (getRecordOffset(slot) <= 0)
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"getReservedCount failed with getRecordOffset(" +
slot + ") = " +
getRecordOffset(slot) + " must be greater than 0." +
"page dump = \n" +
toUncheckedString());
SanityManager.THROWASSERT(
"bad record offset found in getReservedCount");
}
}
// these reads are always against the page array
ArrayInputStream lrdi = rawDataIn;
lrdi.setPosition(
slotTableOffsetToFirstReservedSpaceField - (slot * slotEntrySize));
return(
(slotFieldSize == SMALL_SLOT_SIZE) ?
lrdi.readUnsignedShort() : lrdi.readInt());
}
/**
Update the length of data stored on this page for this record
*/
/**
* Update the length of data stored on this page for this record
* <p>
* Update both the record length "field" and the reserved space "field"
* of the slot table entry associated with "slot". This length is stored
* as the second "field" of the slot table entry. The changes to these
* 2 fields are represented as the delta to apply to each field as input
* in "delta" and "reservedDelta."
* <p>
*
* @param slot the slot of the record to set.
* @param delta The amount the record length changed.
* @param reservedDelta The amount the reserved length changed.
*
* @exception StandardException Standard exception policy.
**/
private void updateRecordPortionLength(
int slot,
int delta,
int reservedDelta)
throws IOException
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(getRecordOffset(slot) != 0);
if ((delta + reservedDelta) < 0)
SanityManager.THROWASSERT(
"total space of record is not allowed to shrink, delta == "
+ delta + " reservedDelta = " + reservedDelta);
if ((getRecordPortionLength(slot) + delta) < 0)
SanityManager.THROWASSERT(
"record portion length cannot be < 0.recordPortionLength = "
+ getRecordPortionLength(slot) + " delta = " + delta);
if ((getReservedCount(slot) + reservedDelta) < 0)
SanityManager.THROWASSERT(
"reserved space for record cannot be < 0. reservedCount = "
+ getReservedCount(slot) + " reservedDelta = "
+ reservedDelta);
}
// position the stream to beginning of 2nd field of slot entry.
rawDataOut.setPosition(
slotTableOffsetToFirstRecordLengthField - (slot * slotEntrySize));
// write the new record length to 2nd field
if (slotFieldSize == SMALL_SLOT_SIZE)
logicalDataOut.writeShort(getRecordPortionLength(slot) + delta);
else
logicalDataOut.writeInt(getRecordPortionLength(slot) + delta);
// if necessary, write the 3rd field - above write has positioned the
// stream to the 3rd field.
if (reservedDelta != 0)
{
if (slotFieldSize == SMALL_SLOT_SIZE)
{
logicalDataOut.writeShort(
getReservedCount(slot) + reservedDelta);
}
else
{
logicalDataOut.writeInt(
getReservedCount(slot) + reservedDelta);
}
}
}
/**
* Initialize the in-memory slot table.
* <p>
* Initialize the in-memory slot table, ie. that of our super-class
* BasePage. Go through all the records on the page and set the
* freeSpace and firstFreeByte on page.
* <p>
* @param newIdentity The identity of the page we are trying to
* initialize, since we are in the middle of trying
* to build the page existing info in the class is
* not set up yet (like getIdentity()).
*
* @exception StandardException Standard exception policy.
**/
private void initSlotTable(
PageKey newIdentity)
throws StandardException
{
int localSlotsInUse = slotsInUse;
// must initialize the header now
initializeHeaders(localSlotsInUse);
// mark all the space on the page as free
clearAllSpace();
// first count the space occupied by the slot table
freeSpace -= localSlotsInUse * slotEntrySize;
int lastSlotOnPage = -1;
int lastRecordOffset = -1;
try
{
for (int slot = 0; slot < localSlotsInUse; slot++)
{
if (SanityManager.DEBUG)
{
int total_space = getTotalSpace(slot);
if ((!isOverflowPage() &&
(minimumRecordSize > total_space)) ||
(isOverflowPage() &&
(StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE >
total_space)))
{
// head rows including reserved space must be larger
// than minimumRecordSize.
//
// Overflow rows including reserved space must be
// larger than MAX_OVERFLOW_ONLY_REC_SIZE.
SanityManager.THROWASSERT(
"initSlotTable consistency check failed: " +
" slot " + slot +
" minimumRecordSize = " + minimumRecordSize +
" totalSpace = " + total_space +
" recordPortionLength = " +
getRecordPortionLength(slot) +
" reservedCount = " + getReservedCount(slot));
}
}
int recordOffset = getRecordOffset(slot);
// check that offset points into the record space area.
if ((recordOffset < RECORD_SPACE_OFFSET) ||
(recordOffset >= (getPageSize() - CHECKSUM_SIZE)))
{
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, newIdentity));
}
if (recordOffset > lastRecordOffset)
{
lastRecordOffset = recordOffset;
lastSlotOnPage = slot;
}
}
bumpRecordCount(localSlotsInUse);
if (lastSlotOnPage != -1)
{
// Calculate the firstFreeByte for the page,
// and the freeSpace on Page
firstFreeByte =
lastRecordOffset + getTotalSpace(lastSlotOnPage);
freeSpace -= firstFreeByte - RECORD_SPACE_OFFSET;
}
if (SanityManager.DEBUG)
{
if ((freeSpace < 0) ||
(firstFreeByte > getSlotOffset(slotsInUse - 1)) ||
((firstFreeByte + freeSpace) !=
getSlotOffset(slotsInUse - 1)))
{
SanityManager.THROWASSERT(
"firstFreeByte = " + firstFreeByte
+ ", freeSpace = " + freeSpace
+ ", slotOffset = " + (getSlotOffset(slotsInUse - 1))
+ ", slotsInUse = " + localSlotsInUse);
}
if (localSlotsInUse == 0)
{
SanityManager.ASSERT(
firstFreeByte ==
(getPageSize() - totalSpace - CHECKSUM_SIZE));
}
}
// upgrade issue. Pre 1.5 release, we do not store deletedRowCount
// therefore, if we are accessing an older database,
// we need to calculate the deletedRowCount here.
if (deletedRowCount == -1)
{
int count = 0;
int maxSlot = slotsInUse;
for (int slot = FIRST_SLOT_NUMBER ; slot < maxSlot; slot++)
{
if (isDeletedOnPage(slot))
count++;
}
deletedRowCount = count;
}
}
catch (IOException ioe)
{
// i/o methods on the byte array have thrown an IOException
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, newIdentity));
}
}
/**
* Set up a new slot entry.
* <p>
*
* @param slot the slot to initialize.
* @param recordOffset the offset on the page to find the record.
* @param recordPortionLength the actual length of record+hdr on page.
* @param reservedSpace the reserved length associated with record.
*
* @exception StandardException Standard exception policy.
**/
private void setSlotEntry(
int slot,
int recordOffset,
int recordPortionLength,
int reservedSpace)
throws IOException
{
rawDataOut.setPosition(getSlotOffset(slot));
if (SanityManager.DEBUG)
{
if ((recordPortionLength < 0) ||
(reservedSpace < 0) ||
(recordPortionLength >= getPageSize()) ||
(reservedSpace >= getPageSize()))
{
SanityManager.THROWASSERT(
"recordPortionLength and reservedSpace must " +
"be > 0, and < page size."
+ " slot = " + slot
+ ", in use = " + slotsInUse
+ ", recordOffset = " + recordOffset
+ ", recordPortionLength = " + recordPortionLength
+ ", reservedSpace = " + reservedSpace);
}
if (recordOffset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
{
SanityManager.THROWASSERT(
"Record offset must be after the page header."
+ " slot = " + slot
+ ", in use = " + slotsInUse
+ ", recordOffset = " + recordOffset
+ ", recordPortionLength = " + recordPortionLength
+ ", reservedSpace = " + reservedSpace);
}
}
if (slotFieldSize == SMALL_SLOT_SIZE)
{
logicalDataOut.writeShort(recordOffset);
logicalDataOut.writeShort(recordPortionLength);
logicalDataOut.writeShort(reservedSpace);
}
else
{
logicalDataOut.writeInt(recordOffset);
logicalDataOut.writeInt(recordPortionLength);
logicalDataOut.writeInt(reservedSpace);
}
}
/**
* Insert a new slot entry into the current slot array.
* <p>
* Shift the existing slots from slot to (slotsInUse - 1) up by one.
* Up here means from low slot to high slot (e.g from slot 2 to slot 3).
* Our slot table grows backward so we have to be careful here.
*
* @param slot Position the new slot will take
* @param recordOffset Offset of the record for the new slot
* @param recordPortionLength Length of the record stored in the new slot
* @param reservedSpace Length of reserved space of record in slot
*
**/
private void addSlotEntry(
int slot,
int recordOffset,
int recordPortionLength,
int reservedSpace)
throws IOException
{
if (SanityManager.DEBUG)
{
if ((slot < 0) || (slot > slotsInUse))
SanityManager.THROWASSERT("invalid slot " + slot);
if ((recordPortionLength < 0) || (reservedSpace < 0))
SanityManager.THROWASSERT(
"recordPortionLength and reservedSpace must be > 0." +
"recordPortionLength = " + recordPortionLength +
" reservedSpace = " + reservedSpace);
if (recordOffset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
{
SanityManager.THROWASSERT(
"Record offset must be after the page header."
+ " slot = " + slot
+ ", in use = " + slotsInUse
+ ", recordOffset = " + recordOffset
+ ", recordPortionLength = " + recordPortionLength
+ ", reservedSpace = " + reservedSpace);
}
}
int newSlotOffset;
// TODO - (mikem) - I think the math below could be slightly optimized.
if (slot < slotsInUse)
{
// inserting a slot into the middle of array so shift all the
// slots from "slot" logically up by one
int startOffset =
getSlotOffset(slotsInUse - 1);
int length =
(getSlotOffset(slot) + slotEntrySize) - startOffset;
newSlotOffset = getSlotOffset(slotsInUse);
System.arraycopy(
pageData, startOffset, pageData, newSlotOffset, length);
}
else
{
// We are adding at end of slot table, so no moving necessary.
newSlotOffset = getSlotOffset(slot);
}
freeSpace -= slotEntrySize;
slotsInUse++;
headerOutOfDate = true; // headerOutOfDate must be set after setDirty
// because isDirty may be called unlatched
setSlotEntry(slot, recordOffset, recordPortionLength, reservedSpace);
}
/**
* Remove slot entry from slot array.
* <p>
* Remove a storage slot at slot. Shift the existing slots from
* slot+1 to (slotsInUse - 1) down by one..
* Down here means from high slot to low slot (e.g from slot 3 to slot 2)
*
* @param slot The slot to delete.
*
**/
private void removeSlotEntry(int slot)
throws IOException
{
if (SanityManager.DEBUG)
{
if ((slot < 0) || (slot >= slotsInUse))
SanityManager.THROWASSERT("invalid slot " + slot);
}
int oldEndOffset = getSlotOffset(slotsInUse - 1);
int newEndOffset = getSlotOffset(slotsInUse - 2);
if (slot != slotsInUse - 1)
{
// if not removing the last slot, need to shift
// now shift all the slots logically down by one
// from (slot+1 to slotsInUse-1) to (slot and slotsInUse-2)
int length = getSlotOffset(slot) - oldEndOffset;
System.arraycopy(
pageData, oldEndOffset, pageData, newEndOffset, length);
}
// clear out the last slot
clearSection(oldEndOffset, slotEntrySize);
// mark the space as free after we have removed the slot
// no need to keep the space reserved for rollback as this is only
// called for purge.
freeSpace += slotEntrySize;
slotsInUse--;
headerOutOfDate = true; // headerOutOfDate must be set after setDirty
// because isDirty maybe called unlatched
}
/**
* create the record header for the specific slot.
* <p>
* Create a new record header object, initialize it, and add it
* to the array of cache'd record headers on this page. Finally return
* reference to the initialized record header.
*
* @return The record header for the specific slot.
*
* @param slot return record header of this slot.
**/
public StoredRecordHeader recordHeaderOnDemand(int slot)
{
StoredRecordHeader recordHeader =
new StoredRecordHeader(pageData, getRecordOffset(slot));
setHeaderAtSlot(slot, recordHeader);
return recordHeader;
}
/**************************************************************************
* Record based routines.
**************************************************************************
*/
/**
* Is entire record on the page?
* <p>
*
* @return true if the entire record at slot is on this page,
* i.e, no overflow row or long columns.
*
* @param slot Check record at this slot.
*
* @exception StandardException Standard exception policy.
**/
public boolean entireRecordOnPage(int slot)
throws StandardException
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(isLatched());
}
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
if (recordHeader.hasOverflow())
return false;
// the row chain does not overflow, we need to walk all the fields to
// make sure they are not long columns.
try
{
int offset = getRecordOffset(slot);
if (SanityManager.DEBUG)
{
if (offset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE))
{
SanityManager.THROWASSERT(
"Incorrect offset. offset = " + offset +
", offset should be < " +
"(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " +
(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) +
", current slot = " + slot +
", total slotsInUse = " + slotsInUse);
}
SanityManager.ASSERT(recordHeader.getFirstField() == 0,
"Head row piece should start at field 0 but is not");
}
int numberFields = recordHeader.getNumberFields();
// these reads are always against the page array
ArrayInputStream lrdi = rawDataIn;
// position after the record header, at 1st column.
lrdi.setPosition(offset + recordHeader.size());
for (int i = 0; i < numberFields; i++)
{
int fieldStatus = StoredFieldHeader.readStatus(lrdi);
if (StoredFieldHeader.isOverflow(fieldStatus))
return false;
int fieldLength =
StoredFieldHeader.readFieldDataLength(
lrdi, fieldStatus, slotFieldSize);
if (fieldLength != 0)
lrdi.setPosition(lrdi.getPosition() + fieldLength);
}
}
catch (IOException ioe)
{
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
}
// we have examined all the fields on this page and none overflows
return true;
}
/**
* Purge one row on an overflow page.
* <p>
* HeadRowHandle is the recordHandle pointing to the head row piece.
* <p>
*
* @param slot slot number of row to purge.
* @param headRowHandle recordHandle of the head row piece.
* @param needDataLogged when true data is logged for purges otherwise just headers.
*
* @exception StandardException Standard exception policy.
**/
protected void purgeOverflowAtSlot(
int slot,
RecordHandle headRowHandle,
boolean needDataLogged)
throws StandardException
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(isLatched());
SanityManager.ASSERT(isOverflowPage());
}
if ((slot < 0) || (slot >= slotsInUse))
{
throw StandardException.newException(
SQLState.DATA_SLOT_NOT_ON_PAGE);
}
// TODO (mikem) - should a global scratch variable be used?
// this is an overflow page purge, no need to lock the head row (it
// has already been locked, hopefully). No need to check for long rows
// (they have already been deleted, hopefully).
RawTransaction t = owner.getTransaction();
int[] recordId = new int[1];
recordId[0] = getHeaderAtSlot(slot).getId();
owner.getActionSet().actionPurge(t, this, slot, 1, recordId, needDataLogged);
}
/**
* Purge the column chain that starts at overflowPageId, overflowRecordId
* <p>
* Purge just the column chain that starts at the input address.
* The long column chain is pointed at by a field in a row. The long
* column is then chained as a sequence of "rows", the last column then
* points to the next segment of the chain on each page.
* Long columns chains currently are only one row per page so the next
* slot of a row in a long row chain should always be the first slot.
* <p>
*
* @param overflowPageId The page where the long column chain starts.
* @param overflowRecordId The record id where long column chain starts.
*
* @exception StandardException Standard exception policy.
**/
private void purgeOneColumnChain(
long overflowPageId,
int overflowRecordId)
throws StandardException
{
StoredPage pageOnColumnChain = null;
boolean removePageHappened = false;
try
{
while (overflowPageId != ContainerHandle.INVALID_PAGE_NUMBER)
{
// Now loop over the column chain and get all the column pieces.
pageOnColumnChain = getOverflowPage(overflowPageId);
removePageHappened = false;
if (pageOnColumnChain == null)
{
if (SanityManager.DEBUG)
SanityManager.THROWASSERT(
"got null page following long column chain. " +
"Head column piece at " + getIdentity() +
" null page at " + overflowPageId);
break; // Don't know what to do here, the column chain
// is broken. Don't bomb, go to the next field.
}
int overflowSlotId = FIRST_SLOT_NUMBER;
if (SanityManager.DEBUG)
{
int checkSlot =
pageOnColumnChain.findRecordById(
overflowRecordId, FIRST_SLOT_NUMBER);
if (overflowSlotId != checkSlot)
{
SanityManager.THROWASSERT(
"Long column is not at the expected " +
FIRST_SLOT_NUMBER + " slot, instead at slot " +
checkSlot);
}
SanityManager.ASSERT(pageOnColumnChain.recordCount() == 1,
"long column page has > 1 record");
}
// Hold on to the pointer to next page on the chain before
// we remove the long column page.
RecordHandle nextColumnPiece =
pageOnColumnChain.getNextColumnPiece(overflowSlotId);
if (pageOnColumnChain.recordCount() == 1)
{
removePageHappened = true;
owner.removePage(pageOnColumnChain);
}
else
{
if (SanityManager.DEBUG)
SanityManager.THROWASSERT(
"page on column chain has more then one record" +
pageOnColumnChain.toString());
pageOnColumnChain.unlatch();
pageOnColumnChain = null;
}
// Chase the column chain pointer.
if (nextColumnPiece != null)
{
overflowPageId = nextColumnPiece.getPageNumber();
overflowRecordId = nextColumnPiece.getId();
}
else
{
// terminate the loop
overflowPageId = ContainerHandle.INVALID_PAGE_NUMBER;
}
}
}
finally
{
// if we raised an exception before the page is removed, make sure
// we unlatch the page
if (!removePageHappened && pageOnColumnChain != null)
{
pageOnColumnChain.unlatch();
pageOnColumnChain = null;
}
}
}
/**
* purge long columns chains which eminate from this page.
* <p>
* Purge all the long column chains emanating from the record on this slot
* of this page. The headRowHandle is the record handle of the head row
* piece of this row - if this page is the head row, then headRowHandle is
* the record handle at the slot. Otherwise, headRowHandle points to a
* row on a different page, i.e., the head page.
* <p>
*
* @param t The raw transaction doing the purging.
* @param slot The slot of the row to purge.
* @param headRowHandle The RecordHandle of the head row.
*
*
* @exception StandardException Standard exception policy.
**/
private void purgeColumnChains(
RawTransaction t,
int slot,
RecordHandle headRowHandle)
throws StandardException
{
try
{
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
int numberFields = recordHeader.getNumberFields();
// these reads are always against the page array
ArrayInputStream lrdi = rawDataIn;
// position the stream to just after record header.
int offset = getRecordOffset(slot) + recordHeader.size();
lrdi.setPosition(offset);
for (int i = 0; i < numberFields; i++)
{
int fieldStatus = StoredFieldHeader.readStatus(lrdi);
int fieldLength =
StoredFieldHeader.readFieldDataLength(
lrdi, fieldStatus, slotFieldSize);
if (!StoredFieldHeader.isOverflow(fieldStatus))
{
// skip this field, it is not an long column
if (fieldLength != 0)
lrdi.setPosition(lrdi.getPosition() + fieldLength);
continue;
}
else
{
// Got an overflow field. The column value is the
// <pageId, recordId> pair where the next column piece is
// residing
long overflowPageId =
CompressedNumber.readLong((InputStream)lrdi);
int overflowRecordId =
CompressedNumber.readInt((InputStream)lrdi);
purgeOneColumnChain(overflowPageId, overflowRecordId);
}
}
}
catch (IOException ioe)
{
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
}
}
/**
* Purge all the overflow columns and overflow rows of the record at slot.
* <p>
* Purge all the overflow columns and overflow rows of the record at slot.
* This is called by BasePage.purgeAtSlot, the head row piece is purged
* there.
* <p>
*
* @param t The raw transaction doing the purging.
* @param slot The slot of the row to purge.
* @param headRowHandle The RecordHandle of the head row.
* @param needDataLogged when true data is logged for purges otherwise just headers.
*
* @exception StandardException Standard exception policy.
**/
protected void purgeRowPieces(
RawTransaction t,
int slot,
RecordHandle headRowHandle,
boolean needDataLogged)
throws StandardException
{
if (SanityManager.DEBUG)
SanityManager.ASSERT(isOverflowPage() == false,
"not expected to call purgeRowPieces on a overflow page");
// purge the long columns which start on this page.
purgeColumnChains(t, slot, headRowHandle);
// drive this loop from the head page. Walk each "long" row piece in
// the row chain.
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
while (recordHeader.hasOverflow())
{
// nextPageInRowChain, is the page with the next row piece
StoredPage nextPageInRowChain =
getOverflowPage(recordHeader.getOverflowPage());
if (nextPageInRowChain == null)
{
if (SanityManager.DEBUG)
{
SanityManager.THROWASSERT(
"got null page following long row chain. " +
"Head row piece at " + getIdentity() + " slot " +
slot + " headRecord " + headRowHandle +
". Broken row chain at " +
recordHeader.getOverflowPage() + ", " +
recordHeader.getOverflowId());
}
break; // Don't know what to do here, the row chain is
// broken. Don't bomb, just return.
}
try
{
int nextPageSlot =
getOverflowSlot(nextPageInRowChain, recordHeader);
// First get rid of all long columns from the next row piece.
nextPageInRowChain.purgeColumnChains(
t, nextPageSlot, headRowHandle);
// Before we purge the next row piece, get the row header to
// see if we need to continue the loop.
recordHeader = nextPageInRowChain.getHeaderAtSlot(nextPageSlot);
// Lastly, purge the next row piece. If the next row piece is
// the only thing in the entire page, just deallocate the page.
// We can do this because the page is deallocated in this
// transaction. If we defer this to post commit processing,
// then we have to first purge the row piece and also remember
// the page time stamp.
if (nextPageSlot == 0 && nextPageInRowChain.recordCount() == 1)
{
// This is an overflow page and we just purged the last row.
// Free the page. Cannot do it in post commit because the
// head row is gone and cannot be locked at post commit to
// stablelize the row chain.
try
{
owner.removePage(nextPageInRowChain);
}
finally
{
// Remove Page guarantees to unlatch the page even
// if an exception is thrown, need not unlatch it
// again.
nextPageInRowChain = null;
}
}
else
{
nextPageInRowChain.purgeOverflowAtSlot(
nextPageSlot, headRowHandle, needDataLogged);
nextPageInRowChain.unlatch();
nextPageInRowChain = null;
}
}
finally
{
// Unlatch the next row piece before getting the next page in
// the row chain.
if (nextPageInRowChain != null)
{
nextPageInRowChain.unlatch();
nextPageInRowChain = null;
}
}
}
}
/**
* Remove a column chain that may have been orphaned by an update.
* <p>
* Remove a column chain that may have been orphaned by an update. This
* is executed as a post commit operation. This page is the head page of
* the row which used to point to the column chain in question. The
* location of the orphaned column chain is in the ReclaimSpace record.
* <BR>
* MT - latched. No lock will be gotten, the head record must already be
* locked exclusive with no outstanding changes that can be rolled back.
* <p>
*
* @param work object describing the chain to remove.
* @param containerHdl open container handle to use to remove chain.
*
* @exception StandardException Standard exception policy.
**/
/* package */
void removeOrphanedColumnChain(
ReclaimSpace work,
ContainerHandle containerHdl)
throws StandardException
{
// First we need to make sure that this is the first and only time
// this long column is begin reclaimed, to do this we get the first
// page on the long column chain and compare its page time stamp.
// If it is different, don't do anything.
//
// Next we need to make sure the update operation commits - we do
// this by finding the row headed by headRecord, go to the column
// in question and see if it points to the first page of the long
// column chain we want to reclaim. If it does then the update
// operation has rolled back and we don't want to reclaim it.
//
// After we do the above 2 checks, we can reclaim the column
// chain.
StoredPage headOfChain =
(StoredPage)containerHdl.getPageNoWait(work.getColumnPageId());
// If someone has it latched, not reclaimable
if (headOfChain == null)
return;
// If the column has been touched, it is not orphaned. Not reclaimable.
boolean pageUnchanged =
headOfChain.equalTimeStamp(work.getPageTimeStamp());
headOfChain.unlatch(); // unlatch it for now.
if (pageUnchanged == false)
return;
// Now get to the column in question and make sure it is no longer
// pointing to the column chain.
RecordHandle headRowHandle = work.getHeadRowHandle();
if (SanityManager.DEBUG)
{
// System.out.println("Executing in removeOrphanedColumnChain.");
// System.out.println("work = " + work);
// System.out.println("head = " + headOfChain);
// System.out.println("this = " + this);
SanityManager.ASSERT(isLatched());
SanityManager.ASSERT(
headRowHandle.getPageNumber() == getPageNumber(),
"got wrong head page");
}
// First get the row.
int slot =
findRecordById(
headRowHandle.getId(), headRowHandle.getSlotNumberHint());
// If slot < 0, it means the whole record is gone, the column chain is
// definitely orphaned.
if (slot >= 0)
{
if (SanityManager.DEBUG)
{
if (isOverflowPage())
{
SanityManager.THROWASSERT(
"Page " + getPageNumber() + " is overflow " +
"\nwork = " + work +
"\nhead = " + headOfChain +
"\nthis = " + this);
}
}
// Find the page with the column in question on it.
StoredPage pageInRowChain = this; // Start with the head page.
try
{
int columnId = work.getColumnId();
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
if (SanityManager.DEBUG)
SanityManager.ASSERT(recordHeader.getFirstField() == 0,
"Head row piece should start at field 0 but is not");
// See if columnId is on pageInRowChain.
while ((recordHeader.getNumberFields() +
recordHeader.getFirstField()) <= columnId)
{
// The column in question is not on pageInRowChain.
if (pageInRowChain != this)
{
// Keep the head page latched.
pageInRowChain.unlatch();
pageInRowChain = null;
}
if (recordHeader.hasOverflow())
{
// Go to the next row piece
pageInRowChain =
getOverflowPage(recordHeader.getOverflowPage());
recordHeader =
pageInRowChain.getHeaderAtSlot(
getOverflowSlot(pageInRowChain, recordHeader));
}
else
{
// Don't know why, but this is the last column.
// Anyway, the column chain is definite orphaned.
// This can happen if the update, or subsequent
// updates, shrink the number of columns in the row.
break;
}
}
if ((recordHeader.getNumberFields() +
recordHeader.getFirstField()) > columnId)
{
// RecordHeader is the record header of the row piece on
// pageInRowChain. The column in question exists and is in
// that row piece.
if (!pageInRowChain.isColumnOrphaned(
recordHeader, columnId,
work.getColumnPageId(), work.getColumnRecordId()))
{
// The column is not orphaned, row still points to it.
if (pageInRowChain != this)
{
// Keep the head page latched.
pageInRowChain.unlatch();
pageInRowChain = null;
}
return;
}
}
}
catch (IOException ioe)
{
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
finally
{
if (pageInRowChain != this && pageInRowChain != null)
pageInRowChain.unlatch();
}
}
// If we get this far, we have verified that the column chain is indeed
// orphaned. Get rid of the column chain.
long nextPageId = work.getColumnPageId();
int nextRecordId = work.getColumnRecordId();
purgeOneColumnChain(nextPageId, nextRecordId);
}
/**
* See if there is a orphaned long colum chain or not.
* <p>
* See if there is a orphaned long colum chain or not. This is a helper
* function for removeOrphanedChain. This page, which may be a head page
* or overflow page, contains the column specified in columnId. It used to
* point to a long column chain at oldPageId and oldRecordId. Returns true
* if it no longer points to that long column chain.
* <p>
*
* @return true if page no longer points to the long column chain.
*
* @param recordHeader record header which used to point at the long column
* @param columnId column id of the long column in head.
* @param oldPageId the page id where the long column used to be.
* @param oldRecordId the record id where the long column used to be.
*
* @exception StandardException Standard exception policy.
**/
private boolean isColumnOrphaned(
StoredRecordHeader recordHeader,
int columnId,
long oldPageId,
long oldRecordId)
throws StandardException, IOException
{
int slot = findRecordById(recordHeader.getId(), Page.FIRST_SLOT_NUMBER);
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(slot >= 0, "overflow row chain truncated");
SanityManager.ASSERT(
columnId >= recordHeader.getFirstField(),
"first column on page > expected");
}
// these reads are always against the page array
ArrayInputStream lrdi = rawDataIn;
// set read position to data portion of record to check.
int offset = getRecordOffset(slot);
lrdi.setPosition(offset + recordHeader.size());
// skip until you get to the record in question.
for (int i = recordHeader.getFirstField(); i < columnId; i++)
skipField(lrdi);
// read in the info of the column we are interested in.
int fieldStatus = StoredFieldHeader.readStatus(lrdi);
int fieldLength = StoredFieldHeader.readFieldDataLength
(lrdi, fieldStatus, slotFieldSize);
if (StoredFieldHeader.isOverflow(fieldStatus))
{
// it is still an overflow field, check if it still points to
// overflow column in question.
long ovflowPage = CompressedNumber.readLong((InputStream) lrdi);
int ovflowRid = CompressedNumber.readInt((InputStream) lrdi);
if (ovflowPage == oldPageId && ovflowRid == oldRecordId)
{
// This field still points to the column chain, the
// update must have rolled back.
return false;
}
}
// Else, either the field is no longer a long column, or it doesn't
// point to oldPageId, oldRecordId. The column chain is orphaned.
return true;
}
/**
@return a recordHandle pointing to the next piece of the column chain.
This page must be an overflow page that is in a column chain. If this
is the last piece of the overflow colum, return null.
@param slot the slot number where the current piece of overflow column
is at.
@exception StandardException Derby Standard Error Policy
*/
/**
* Return the next recordHandle in a long column chain.
* <p>
* Return a recordHandle pointing to the next piece of the column chain.
* This page must be an overflow page that is in a column chain. If this
* is the last piece of the overflow colum, return null.
* <p>
*
* @return The next record handle in a long column chain.
*
* @param slot The slot of the current long column piece.
*
* @exception StandardException Standard exception policy.
**/
private RecordHandle getNextColumnPiece(int slot)
throws StandardException
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(isLatched());
SanityManager.ASSERT(isOverflowPage(),
"not expected to call getNextColumnPiece on non-overflow page");
if (recordCount() != 1)
{
SanityManager.THROWASSERT(
"getNextColumnPiece called on a page with " +
recordCount() + " rows");
}
}
try
{
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
int numberFields =
recordHeader.getNumberFields();
if (SanityManager.DEBUG)
{
if ((numberFields > 2) || (numberFields < 1))
{
SanityManager.THROWASSERT(
"longColumn record header must have 1 or 2 fields." +
" numberFields = " + numberFields);
}
}
if (numberFields != 2) // End of column chain.
return null;
// these reads are always against the page array
ArrayInputStream lrdi = rawDataIn;
// The 2nd field is the pointer to the next page in column chain.
int offset = getRecordOffset(slot) + recordHeader.size();
lrdi.setPosition(offset);
// skip the first field
skipField(lrdi);
// the 2nd field should be <pageId, recordId> pair, return the
// pageId part and skip over the length.
int fieldStatus = StoredFieldHeader.readStatus(lrdi);
int fieldLength = StoredFieldHeader.readFieldDataLength
(lrdi, fieldStatus, slotFieldSize);
long ovflowPage = CompressedNumber.readLong((InputStream) lrdi);
int ovflowRid = CompressedNumber.readInt((InputStream) lrdi);
if (SanityManager.DEBUG)
{
if (!StoredFieldHeader.isOverflow(fieldStatus))
{
// In version 1.5, the first field is overflow and the
// second is not. In version 2.0 onwards, the first field
// is not overflow and the second is overflow (the overflow
// bit goes with the overflow pointer). Check first field
// to make sure its overflow bit is set on.
// Offset still points to the first column.
lrdi.setPosition(offset);
fieldStatus = StoredFieldHeader.readStatus(lrdi);
SanityManager.ASSERT(
StoredFieldHeader.isOverflow(fieldStatus));
}
}
// RESOLVE : this new can get expensive if the column chain is very
// long. The reason we do this is because we need to return the
// page number and the rid, if we assume that the long column is
// always at slot 0, we can return only the page.
return owner.makeRecordHandle(ovflowPage, ovflowRid);
}
catch (IOException ioe)
{
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
}
}
/**************************************************************************
* Page space usage
**************************************************************************
*/
/**
* initialize the in memory variables associated with space maintenance.
* <p>
* Get the total available space on an empty page.
* initSlotTable() must be called after the page has been read in.
**/
private void initSpace()
{
// NOTE: subclass may have overwitten it to report less freeSpace,
// always call getMaxFreeSpace() to get total space.
totalSpace = getMaxFreeSpace();
// estimate RH will be about 16 bytes:
// (1 - status, 1 - id, 1 - #fields, 1 - 1stField, 12 - overflow ptr)
// RESOLVED: track# 3370, 3368
// In the old code below, spareSpace/100 is integer division. This means
// that you get a value of 0 for it as long as spareSpace is between 0
// and 99. But if spareSpace is 100 you get a value of 1. This resulted
// in a negative value for maxFieldSize. This caused e.g. the isLong
// method to behave incorrectly when spareSpace is 100.
//
// RESOLVED: track# 4385
// maxFieldSize is a worst case calculation for the size of a record
// on an empty page, with a single field, but still allow room for
// an overflow pointer if another field is to be added. If you don't
// account for the overflow pointer then you can get into the situation
// where the code puts the field on the page (not making it a long
// column), then runs out of space on next column but can't fit overflow
// pointer, so backs up and removes the column from page, and tries
// again on next overflow page - looping forever.
//
// maxFieldSize =
// totalSpace * (1 - spareSpace/100) - slotEntrySize
// - 16 - OVERFLOW_POINTER_SIZE;
maxFieldSize = totalSpace - slotEntrySize - 16 - OVERFLOW_POINTER_SIZE;
if (SanityManager.DEBUG) {
SanityManager.ASSERT(maxFieldSize >= 0);
// DERBY-3099: maxFieldSize was calculated before slotFieldSize and
// slotEntrySize had been initialized.
int expectedFieldSize = calculateSlotFieldSize(pageData.length);
SanityManager.ASSERT(slotFieldSize == expectedFieldSize,
"slotFieldSize uninitialized");
SanityManager.ASSERT(slotEntrySize == 3 * expectedFieldSize,
"slotEntrySize uninitialized");
}
}
/**
* Initialize the freeSpace count and set the firstFreeByte on page
**/
private void clearAllSpace()
{
freeSpace = totalSpace;
firstFreeByte = getPageSize() - totalSpace - CHECKSUM_SIZE;
}
/**
* Compress out the space specified by startByte and endByte.
* <p>
* As part of moving rows, updating rows, purging rows compact the space
* left between rows.
* <p>
*
* @param startByte compress out space starting at startByte offset
* @param endByte compress out space ending at endByte offset
*
**/
private void compressPage(
int startByte,
int endByte)
throws IOException
{
if (SanityManager.DEBUG)
{
if (((endByte + 1) > firstFreeByte) || (startByte > firstFreeByte))
{
SanityManager.THROWASSERT(
"startByte = " + startByte + " endByte = " + endByte +
" firstFreeByte = " + firstFreeByte);
}
}
int lengthToClear = endByte + 1 - startByte;
// see if these were not the last occupied record space on the page
if ((endByte + 1) != firstFreeByte)
{
// Shift everything down the page.
int moveLength = (firstFreeByte - endByte - 1);
System.arraycopy(
pageData, (endByte + 1), pageData, startByte, moveLength);
// fix the page offsets of the rows further down the page
for (int slot = 0; slot < slotsInUse; slot++)
{
int offset = getRecordOffset(slot);
if (offset >= (endByte + 1))
{
offset -= lengthToClear;
setRecordOffset(slot, offset);
}
}
}
freeSpace += lengthToClear;
firstFreeByte -= lengthToClear;
clearSection(firstFreeByte, lengthToClear);
}
/**
* Free up required bytes by shifting rows "down" the page.
* <p>
* Expand page, move all the data from start Offset down the page by
* the amount required to free up the required bytes.
*
* @param startOffset offset on page to begin the shift
* @param requiredBytes the number of bytes that must be freed.
*
* @exception IOException If IOException is raised during the page mod.
**/
protected void expandPage(
int startOffset,
int requiredBytes)
throws IOException
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(requiredBytes <= freeSpace);
SanityManager.ASSERT(startOffset <= firstFreeByte);
}
int totalLength = firstFreeByte - startOffset;
if (totalLength > 0)
{
System.arraycopy(
pageData, startOffset,
pageData, startOffset + requiredBytes, totalLength);
// fix the page offsets of the rows further down the page
for (int slot = 0; slot < slotsInUse; slot++)
{
int offset = getRecordOffset(slot);
if (offset >= startOffset)
{
offset += requiredBytes;
setRecordOffset(slot, offset);
}
}
}
freeSpace -= requiredBytes;
firstFreeByte += requiredBytes;
}
/**
* Shrink page.
* <p>
* move all the data from start Offset up the page by the amount shrunk.
*
*
* @param startOffset offset on page to begin the shift
* @param shrinkBytes the number of bytes that must be moved.
*
* @exception IOException some IOException is raised during the page mod,
* (unlikely as this is just writing to array).
**/
private void shrinkPage(int startOffset, int shrinkBytes)
throws IOException
{
// the number of bytes that needs to be moved up.
int totalLength = firstFreeByte - startOffset;
if (SanityManager.DEBUG)
{
SanityManager.DEBUG(
"shrinkPage", "page " + getIdentity() +
" shrinking " + shrinkBytes +
" from offset " + startOffset +
" to offset " + (startOffset-shrinkBytes) +
" moving " + totalLength +
" bytes. FirstFreeByte at " + firstFreeByte);
SanityManager.ASSERT(
totalLength >= 0, "firstFreeByte - startOffset <= 0");
SanityManager.ASSERT(
(startOffset-shrinkBytes) > RECORD_SPACE_OFFSET ,
"shrinking too much ");
if (startOffset != firstFreeByte)
{
// make sure startOffset is at the beginning of a record
boolean foundslot = false;
for (int slot = 0; slot < slotsInUse; slot++)
{
if (getRecordOffset(slot) == startOffset)
{
foundslot = true;
break;
}
}
if (!foundslot)
{
SanityManager.THROWASSERT(
"startOffset " + startOffset +
" not at the beginning of a record");
}
}
}
if (totalLength > 0)
{
System.arraycopy(
pageData, startOffset,
pageData, startOffset-shrinkBytes , totalLength);
// fix the page offsets of the rows further down the page
for (int slot = 0; slot < slotsInUse; slot++)
{
int offset = getRecordOffset(slot);
if (offset >= startOffset)
{
offset -= shrinkBytes;
setRecordOffset(slot, offset);
}
}
}
freeSpace += shrinkBytes;
firstFreeByte -= shrinkBytes;
}
public int getRecordLength(int slot) throws IOException
{
return getRecordPortionLength(slot);
}
protected boolean getIsOverflow(int slot) throws IOException
{
return getHeaderAtSlot(slot).hasOverflow();
}
/**
* Log a row into the StoreOuput stream.
* <p>
* Write the row in its record format to the stream. Record format is a
* record header followed by each field with its field header. See this
* class's description for the specifics of these headers.
*
* startColumn is used to specified which column for this logRow to
* start logging. When realStartColumn is specified, that means part of
* the row has already been logged. startColumn here indicates that the
* first column was logged in the logBuffer, need to continue log the rest
* of the row starting at realStartColumn.
*
* This is used when a longColumn is encountered during a long row.
* After done logging the long column, we need to continue logging the
* rest of the row.
* A -1 value for realStartColumn, means that it is not significant.
*
* logRow will not throw an noSpaceOnPage exception, if it is an overflow
* page, and the record we are inserting is the only record on the page.
* We are supporting rows expanding multiple pages through this mechanism.
*
* logRow expects row to be a sparse row.
* <p>
*
* @return the "realStartColumn" value, -1 if not a long row.
*
* @param slot the slot of the row being logged.
* @param forInsert this is logging an insert (not update/delete).
* @param recordId record id of the row being logged.
* @param row actual data of row in object form. If row is
* null then we are logging an overflow pointer.
* @param validColumns bit map describing valid columns in row.
* @param out stream to log to.
* @param startColumn what column to start with (see above for detail)
* @param insertFlag flag indicating mode we are in,
* INSERT_DEFAULT - default insert
* INSERT_SPLIT - splitting a row/column
* across pages.
* @param realStartColumn If -1 ignore variable, else part of row has
* already been logged, and should continue with
* this column.
* @param realSpaceOnPage Use this as space on page if realStartColumn
* is not -1.
* @param overflowThreshold How much of the page to use before deciding
* to overflow a row.
*
* @exception IOException RESOLVE
* @exception StandardException Standard exception policy.
*
* @see BasePage#logRow
**/
public int logRow(
int slot,
boolean forInsert,
int recordId,
Object[] row,
FormatableBitSet validColumns,
DynamicByteArrayOutputStream out,
int startColumn,
byte insertFlag,
int realStartColumn,
int realSpaceOnPage,
int overflowThreshold)
throws StandardException, IOException
{
// Is this an update that just went through handleIncompleteLogRow
// and handleIncompleteLogRow threw an excepiton. In this case the
// buffer is already finished.
if (!forInsert)
{
if ((realStartColumn != -1) && (realSpaceOnPage == -1))
{
return realStartColumn;
}
}
int spaceAvailable = freeSpace;
setOutputStream(out);
int beginPosition = out.getPosition();
// if we are inserting in the headPage,
// we need to make sure that there is enough room
// on the page for the reserve space.
userRowSize = 0;
boolean calcMinimumRecordSize = false;
if (realStartColumn != (-1))
{
// in the middle of logging a long row/column.
spaceAvailable = realSpaceOnPage;
beginPosition = out.getBeginPosition();
}
else
{
// logging row part that is on head page.
if (!forInsert)
{
// an update can use the total space of the record,
// even if not all of the fields are being updated.
// If the updated columns will not fit then some
// columns will move off the page to a new chunk.
spaceAvailable += getTotalSpace(slot);
}
else
{
// need to account for the slot table using extra space...
spaceAvailable -= slotEntrySize;
if (startColumn == 0)
calcMinimumRecordSize = true;
}
// <= is ok here as we know we want to write at least one more byte
if (spaceAvailable <= 0)
throw new NoSpaceOnPage(isOverflowPage());
}
try
{
if (row == null)
{
// if the row is null, we must be writing an overflow pointer.
return(logOverflowRecord(slot, spaceAvailable, out));
}
int numberFields = 0;
StoredRecordHeader recordHeader;
if (forInsert)
{
recordHeader = new StoredRecordHeader();
}
else
{
// Get a full copy of the record header since we might change
// it, and we can't modify the one on the page
recordHeader =
new StoredRecordHeader(getHeaderAtSlot(slot));
// an update always starts at the first column on this page
startColumn = recordHeader.getFirstField();
}
if (validColumns == null)
{
// all columns in row[] are valid, we will be logging them all.
numberFields = row.length - startColumn;
}
else
{
// RESOLVE (mikem) - counting on validColumns.length may be bad
// for performance.
for (int i = validColumns.getLength() - 1;
i >= startColumn;
i--)
{
if (validColumns.isSet(i))
{
numberFields = i + 1 - startColumn;
break;
}
}
}
int onPageNumberFields = -1; // only valid for update
if (forInsert)
{
recordHeader.setId(recordId);
recordHeader.setNumberFields(numberFields);
}
else
{
// an update
onPageNumberFields = recordHeader.getNumberFields();
if (numberFields > onPageNumberFields)
{
// number of fields *might* be increasing
if (recordHeader.hasOverflow())
{
// other fields will be handled in next portion update
numberFields = onPageNumberFields;
}
else
{
// number of fields is increasing
recordHeader.setNumberFields(numberFields);
}
}
else if (numberFields < onPageNumberFields)
{
if (validColumns == null)
{
// number of fields is decreasing,
// but only allowed when the complete
// row is being updated.
recordHeader.setNumberFields(numberFields);
// RESOLVE -
// need some post commit work if row has overflow
// if (recordHeader.hasOverflow()) {
// remove overflow portion after commit.
// }
}
else
{
// we process all the fields, the unchanged ones
// at the end will have a single byte written out
// indicating they are unchanged (nonexistent)
numberFields = onPageNumberFields;
}
}
}
int endFieldExclusive = startColumn + numberFields;
if (realStartColumn >= endFieldExclusive)
{
// The realStartColumn is greater than the last column we need
// to log, so we are done.
return (-1);
}
if ((insertFlag & Page.INSERT_DEFAULT) != Page.INSERT_DEFAULT)
{
// if this is not logging the part of the row being inserted
// on the main page, then use startColumn as first field.
recordHeader.setFirstField(startColumn);
}
// what column to start with?
int firstColumn = realStartColumn;
if (realStartColumn == (-1))
{
// logging on the head page.
int recordHeaderLength = recordHeader.write(logicalDataOut);
spaceAvailable -= recordHeaderLength;
if (spaceAvailable < 0)
{
// ran out of space just writing the record header.
throw new NoSpaceOnPage(isOverflowPage());
}
firstColumn = startColumn;
}
boolean monitoringOldFields = false;
int validColumnsSize =
(validColumns == null) ? 0 : validColumns.getLength();
if (validColumns != null)
{
if (!forInsert)
{
// we monitor the length of the old fields by skipping them
// but only on a partial update.
if ((validColumns != null) &&
(firstColumn < (startColumn + onPageNumberFields)))
{
rawDataIn.setPosition(
getFieldOffset(slot, firstColumn));
monitoringOldFields = true;
}
}
}
int lastSpaceAvailable = spaceAvailable;
int recordSize = 0;
int lastColumnPositionAllowOverflow = out.getPosition();
int lastColumnAllowOverflow = startColumn;
if (spaceAvailable > OVERFLOW_POINTER_SIZE)
lastColumnPositionAllowOverflow = -1;
int columnFlag = COLUMN_FIRST;
for (int i = firstColumn; i < endFieldExclusive; i++)
{
Object ref = null;
boolean ignoreColumn = false;
// should we log this column or not?
if ((validColumns == null) ||
(validColumnsSize > i && validColumns.isSet(i)))
{
if (i < row.length)
ref = row[i];
}
else if (!forInsert)
{
// field is not supplied, log as non-existent
ignoreColumn = true;
}
if (spaceAvailable > OVERFLOW_POINTER_SIZE)
{
lastColumnPositionAllowOverflow = out.getPosition();
lastColumnAllowOverflow = i;
}
lastSpaceAvailable = spaceAvailable;
if (ignoreColumn)
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
ref == null,
"ref should be null for an ignored column");
SanityManager.ASSERT(
validColumns != null,
"validColumns should be non-null for ignored col");
}
if (i < (startColumn + onPageNumberFields))
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
monitoringOldFields,
"monitoringOldFields must be true");
}
// need to keep track of the old field lengths
// as they are remaining in the row.
int oldOffset = rawDataIn.getPosition();
skipField(rawDataIn);
int oldFieldLength =
rawDataIn.getPosition() - oldOffset;
if (oldFieldLength <= spaceAvailable)
{
// if field doesn't fit,
// spaceAvailable must be left unchanged.
logColumn(
null, 0, out, Integer.MAX_VALUE,
COLUMN_NONE, overflowThreshold);
spaceAvailable -= oldFieldLength;
}
}
else
{
// this is an update that is increasing the number of
// columns but not providing any value. this can happen
// if you are updating a new column that does not
// actually exist in the table after using
// ALTER TABLE to add a couple new columns. This
// case is going to create actual null entries for
// the non-existent columns that are before the actual
// column being updated by the user.
// see DERBY-5679.
spaceAvailable =
logColumn(
null, 0, out, spaceAvailable,
COLUMN_CREATE_NULL, overflowThreshold);
}
}
else
{
// ignoreColumn is false, we are logging this column.
if (monitoringOldFields &&
(i < (startColumn + onPageNumberFields)))
{
// skip the old version of the field so that
// rawDataIn is correctly positioned.
skipField(rawDataIn);
}
try
{
if (ref == null)
{
// no new value to provide, use the on page value.
spaceAvailable =
logColumn(
null, 0, out, spaceAvailable,
columnFlag, overflowThreshold);
}
else
{
// log the value provided in the row[i]
spaceAvailable =
logColumn(
row, i, out, spaceAvailable,
columnFlag, overflowThreshold);
}
}
catch (LongColumnException lce)
{
// logColumn determined that the column would not fit
// and that the column length exceeded the long column
// threshold so turn this column into a long column.
if ((insertFlag & Page.INSERT_DEFAULT) ==
Page.INSERT_DEFAULT)
{
// if default insert, just throw no space exception.
// if the lce has throw the column as an InputStream,
// in the following 2 situations
// 1. If column came in 'row[i]' as InputStream
// 2. If the object stream of 'row[i]' is not
// null, which means that the object state of
// the column is null.
//
// we need to set the original InputStream column to
// the column that has been thrown by lce. It is a
// store formated InputStream which remembers all
// the bytes that has been read, but not yet stored.
// Therefore, we will not lose any bytes.
//
// In any other situation, we should not change the
// state of the column,
// i.e. if 'row[i]' has an object state, it should
// not be turned into an InputStream.
if ((lce.getColumn() instanceof InputStream)
&& (row[i] instanceof StreamStorable) )
{
if ((row[i] instanceof InputStream) ||
(((StreamStorable) row[i]).returnStream()
!= null) )
{
// change state of stream so that it uses
// the stream just created by the lce -
// which is remembering the bytes it has
// already read from the stream but couldn't
// log as there was not enough room on
// current page.
((StreamStorable) row[i]).setStream(
(InputStream) lce.getColumn());
}
}
throw new NoSpaceOnPage(isOverflowPage());
}
// When one of the following two conditions is true,
// we will allow the insert of the long column:
//
// 1. if this is the last field,
// and overflow field header fits on page.
// 2. if it is not the last field,
// and overflow field header fits on page (for col)
// and another overflow ptr fits (for row).
//
//
if (((spaceAvailable >= OVERFLOW_PTR_FIELD_SIZE) &&
(i == (endFieldExclusive - 1))) ||
((spaceAvailable >= (OVERFLOW_PTR_FIELD_SIZE * 2))&&
(i < (endFieldExclusive - 1))))
{
// If the column is a long column, it must be a
// InputStream. We have made the input stream into
// a RememberBytesInputStream, have to set the
// column to that, in order to preserve the bytes
// we already read off the stream.
// caught a long column exception,
// set the variables, and rethrow the error
out.setBeginPosition(beginPosition);
lce.setExceptionInfo(out, i, spaceAvailable);
throw (lce);
}
}
}
int nextColumn;
recordSize += (lastSpaceAvailable - spaceAvailable);
boolean recordIsLong =
(overflowThreshold == 100) ?
false : isLong(recordSize, overflowThreshold);
// get the no overflow case out of the way asap
if ((lastSpaceAvailable == spaceAvailable) || recordIsLong)
{
if ((insertFlag & Page.INSERT_DEFAULT) ==
Page.INSERT_DEFAULT)
{
throw new NoSpaceOnPage(isOverflowPage());
}
if (recordIsLong)
{
// if the record is long because of threshold,
// then, we need to reset the logicalOut.
// set position to the end of the previous field
out.setPosition(out.getPosition() - recordSize);
}
// did not write this column
nextColumn = i;
}
else
{
// assume that all fields will be written to this page.
nextColumn = endFieldExclusive;
}
// See if we have enough room to write an overflow field if the
// row needs to overflow. We need overflow if we need to
// write another portion or another portion already exists and
// we will need to point to it.
if ((lastSpaceAvailable == spaceAvailable) ||
((insertFlag & Page.INSERT_FOR_SPLIT) ==
Page.INSERT_FOR_SPLIT))
{
// The current row has filled the page.
if (spaceAvailable < OVERFLOW_POINTER_SIZE)
{
// DERBY-4923
// The fix for DERBY-4923 was to change the above
// check from <= to <. The test case for DERBY-4923
// got the system into a state where it needed to
// exactly write an overflow field pointer and it
// had exactly OVERFLOW_POINTER_SIZE spaceAvailable,
// but was off by one in its check.
// The system insures all rows on an overflow page
// have at least OVERFLOW_POINTER_SIZE, so updating
// them should check for exactly OVERFLOW_POINTER_SIZE
// not <=.
if ((i == startColumn) ||
(lastColumnPositionAllowOverflow < 0))
{
// not enough room for the overflow recordheader,
// and this is the first column on this page so
// need to try another page.
//
if (SanityManager.DEBUG)
{
if (!forInsert)
{
// should not get into this path on an
// update, only on an insert. Update should
// reserve space on page so you can always
// at least update the row with a single
// overflow pointer.
SanityManager.THROWASSERT(
"no space to update a field on page. " +
"i = " + i +
"; startColumn = " + startColumn +
"; lastColumnPositionAllowOverflow = " +
lastColumnPositionAllowOverflow +
"; spaceAvailable = " +
spaceAvailable +
"; lastSpaceAvailable = " +
lastSpaceAvailable +
"; insertFlag = " +
insertFlag +
"; Page.INSERT_FOR_SPLIT = " +
Page.INSERT_FOR_SPLIT +
"; isOverflowPage() = " +
isOverflowPage() +
"; OVERFLOW_POINTER_SIZE = " +
OVERFLOW_POINTER_SIZE +
"\npage = \n" + this);
}
}
// DERBY-4577, on an update this bug may cause
// the following to be thrown on an update. Update
// code never expects this error to be thrown, and
// does not handle it. The fix to DERBY-4577 was
// to fix insert code to make sure enough space is
// always reserved on overflow pages such that
// updates will never fail. But the fix does not
// affect existing problem overflow pages. If
// this error is encountered in a table created
// by software before the fix, run compress to
// upgrade all data in table so that error will
// not be encountered in future.
throw new NoSpaceOnPage(isOverflowPage());
}
else
{
// we need to go back to the last column
// that left enough room for an overflow pointer.
out.setPosition(lastColumnPositionAllowOverflow);
nextColumn = lastColumnAllowOverflow;
}
}
}
if (nextColumn < endFieldExclusive)
{
// If the number of cols has been reduced.
int actualNumberFields = nextColumn - startColumn;
// go back and update that numberFields in recordHeader.
// no need to update spaceAvailable here, because if we are
// here, we will be returning any way, and spaceAvailable
// will be thrown away.
int oldSize = recordHeader.size();
recordHeader.setNumberFields(actualNumberFields);
int newSize = recordHeader.size();
// now we are ready to write the new record header.
int endPosition = out.getPosition();
if (oldSize > newSize)
{
// if the old size is bigger than the new size, then
// leave extra bytes at the beginning of byte stream.
int delta = oldSize - newSize;
out.setBeginPosition(beginPosition + delta);
out.setPosition(beginPosition + delta);
}
else if (newSize > oldSize)
{
out.setPosition(beginPosition);
}
else
{
out.setBeginPosition(beginPosition);
out.setPosition(beginPosition);
}
int realLen = recordHeader.write(logicalDataOut);
if (SanityManager.DEBUG)
{
if ((realLen + (oldSize - newSize)) != oldSize)
{
SanityManager.THROWASSERT(
"recordHeader size incorrect. realLen = " +
realLen + ", delta = " +
(oldSize - newSize) + ", oldSize = " + oldSize);
}
}
out.setPosition(endPosition);
if (!forInsert)
{
// The update is incomplete, fields beyond this
// point will have to move off the page. For any fields
// that are not being updated we have to save their
// values from this page to insert into an overflow
// portion.
//
// When the complete row is being updated there is no
// need to save any fields so just return.
if (validColumns != null)
{
handleIncompleteLogRow(
slot, nextColumn, validColumns, out);
}
}
return (nextColumn);
}
columnFlag = COLUMN_NONE;
}
out.setBeginPosition(beginPosition);
startColumn = -1;
if ((calcMinimumRecordSize) &&
(spaceAvailable < (minimumRecordSize - userRowSize)))
{
throw new NoSpaceOnPage(isOverflowPage());
}
}
finally
{
resetOutputStream();
}
return (startColumn);
}
/**
* Handle an update of a record portion that is incomplete.
* <p>
* Handle an update of a record portion that is incomplete.
* Ie. Columns have expanded that require other columns to move
* off the page into a new portion.
* <P>
* This method works out of the columns that need to be moved which are not
* being updated and makes a copy of their data. It then throws an
* exception with this data, much like the long column exception which will
* then allow the original insert to complete.
* <P>
* If no columns need to be saved (ie all the ones that would move are
* being updated) then no exception is thrown, logRow() will return and the
* update completes normally.
* <p>
*
* @param slot slot of the current update.
* @param startColumn column to start at, handles start in middle of row
* @param columnList bit map indicating which columns are being updated.
* @param out place to lot to.
*
* @exception StandardException Standard exception policy.
**/
private void handleIncompleteLogRow(
int slot,
int startColumn,
FormatableBitSet columnList,
DynamicByteArrayOutputStream out)
throws StandardException
{
if (SanityManager.DEBUG)
SanityManager.ASSERT(columnList != null);
StoredRecordHeader rh = getHeaderAtSlot(slot);
int endFieldExclusive = rh.getFirstField() + rh.getNumberFields();
// first see if any fields are not being modified
boolean needSave = false;
int columnListSize = columnList.size();
for (int i = startColumn; i < endFieldExclusive; i++)
{
if (!(columnListSize > i && columnList.get(i)))
{
needSave = true;
break;
}
}
if (!needSave)
return;
Object[] savedFields =
new Object[endFieldExclusive - startColumn];
ByteArrayOutputStream fieldStream = null;
for (int i = startColumn; i < endFieldExclusive; i++)
{
// row is being updated - ignore
if (columnListSize > i && columnList.get(i))
continue;
// save the data
try
{
// use the old value - we use logField to ensure that we
// get the raw contents of the field and don't follow
// any long columns. In addition we save this as a RawField
// so that we preserve the state of the field header.
if (fieldStream == null)
fieldStream = new ByteArrayOutputStream();
else
fieldStream.reset();
logField(slot, i, fieldStream);
savedFields[i - startColumn] =
new RawField(fieldStream.toByteArray());
}
catch (IOException ioe)
{
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));
}
}
// Use a long column exception to notify the caller of the need
// to perform an insert of the columns that need to move
LongColumnException lce = new LongColumnException();
lce.setExceptionInfo(
out, startColumn, -1 /* indicates not actual long column */);
lce.setColumn(savedFields);
throw lce;
}
/**
@param row (IN/OUT) the row that is to be restored (sparse representation)
@param limitInput the limit input stream
@param objectInput the object input stream
@exception StandardException Standard Derby error policy
@exception IOException I/O exception in reading meta data.
*/
/**
* Restore a storable row from a LimitInputStream.
* <p>
* Restore a storable row from an LimitInputStream - user must supply two
* streams on top of the same data, one implements ObjectInput interface
* that knows how to restore the object, the other one implements
* LimitInputStream.
* <p>
* @param in the limit input stream
* @param row (IN/OUT) row that is to be restored
* (sparse representation)
*
* @exception StandardException Standard exception policy.
**/
public void restoreRecordFromStream(
LimitObjectInput in,
Object[] row)
throws StandardException, IOException
{
StoredRecordHeader recordHeader = new StoredRecordHeader();
recordHeader.read(in);
readRecordFromStream(
row,
row.length - 1,
(int[]) null,
(int[]) null,
in,
recordHeader,
null);
}
/**
* Process the qualifier list on the row, return true if it qualifies.
* <p>
* A two dimensional array is to be used to pass around a AND's and OR's in
* conjunctive normal form. The top slot of the 2 dimensional array is
* optimized for the more frequent where no OR's are present. The first
* array slot is always a list of AND's to be treated as described above
* for single dimensional AND qualifier arrays. The subsequent slots are
* to be treated as AND'd arrays or OR's. Thus the 2 dimensional array
* qual[][] argument is to be treated as the following, note if
* qual.length = 1 then only the first array is valid and it is and an
* array of and clauses:
*
* (qual[0][0] and qual[0][0] ... and qual[0][qual[0].length - 1])
* and
* (qual[1][0] or qual[1][1] ... or qual[1][qual[1].length - 1])
* and
* (qual[2][0] or qual[2][1] ... or qual[2][qual[2].length - 1])
* ...
* and
* (qual[qual.length - 1][0] or qual[1][1] ... or qual[1][2])
*
*
* @return true if the row qualifies.
*
* @param row The row being qualified.
* @param qual_list 2 dimensional array representing conjunctive
* normal form of simple qualifiers.
*
* @exception StandardException Standard exception policy.
**/
private boolean qualifyRecordFromRow(
Object[] row,
Qualifier[][] qual_list)
throws StandardException
{
boolean row_qualifies = true;
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(row != null);
}
// First do the qual[0] which is an array of qualifer terms.
if (SanityManager.DEBUG)
{
// routine should not be called if there is no qualifier
SanityManager.ASSERT(qual_list != null);
SanityManager.ASSERT(qual_list.length > 0);
}
for (int i = 0; i < qual_list[0].length; i++)
{
// process each AND clause
row_qualifies = false;
// process each OR clause.
Qualifier q = qual_list[0][i];
// Get the column from the possibly partial row, of the
// q.getColumnId()'th column in the full row.
DataValueDescriptor columnValue =
(DataValueDescriptor) row[q.getColumnId()];
row_qualifies =
columnValue.compare(
q.getOperator(),
q.getOrderable(),
q.getOrderedNulls(),
q.getUnknownRV());
if (q.negateCompareResult())
row_qualifies = !row_qualifies;
// Once an AND fails the whole Qualification fails - do a return!
if (!row_qualifies)
return(false);
}
// all the qual[0] and terms passed, now process the OR clauses
for (int and_idx = 1; and_idx < qual_list.length; and_idx++)
{
// loop through each of the "and" clause.
row_qualifies = false;
if (SanityManager.DEBUG)
{
// Each OR clause must be non-empty.
SanityManager.ASSERT(qual_list[and_idx].length > 0);
}
for (int or_idx = 0; or_idx < qual_list[and_idx].length; or_idx++)
{
// Apply one qualifier to the row.
Qualifier q = qual_list[and_idx][or_idx];
int col_id = q.getColumnId();
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
(col_id < row.length),
"Qualifier is referencing a column not in the row.");
}
// Get the column from the possibly partial row, of the
// q.getColumnId()'th column in the full row.
DataValueDescriptor columnValue =
(DataValueDescriptor) row[q.getColumnId()];
if (SanityManager.DEBUG)
{
if (columnValue == null)
SanityManager.THROWASSERT(
"1:row = " + RowUtil.toString(row) +
"row.length = " + row.length +
";q.getColumnId() = " + q.getColumnId());
}
// do the compare between the column value and value in the
// qualifier.
row_qualifies =
columnValue.compare(
q.getOperator(),
q.getOrderable(),
q.getOrderedNulls(),
q.getUnknownRV());
if (q.negateCompareResult())
row_qualifies = !row_qualifies;
// SanityManager.DEBUG_PRINT("StoredPage.qual", "processing qual[" + and_idx + "][" + or_idx + "] = " + qual_list[and_idx][or_idx] );
// SanityManager.DEBUG_PRINT("StoredPage.qual", "value = " + row_qualifies);
// processing "OR" clauses, so as soon as one is true, break
// to go and process next AND clause.
if (row_qualifies)
break;
}
// The qualifier list represented a set of "AND'd"
// qualifications so as soon as one is false processing is done.
if (!row_qualifies)
break;
}
return(row_qualifies);
}
/**
* Read just one column from stream into row.
* <p>
* The routine reads just one column from the row, it is mostly code
* taken from readRecordFromStream, but highly optimized to just get
* one column from a non-overflow row. It can only be called to read
* a row from the pageData array as it directly accesses the page array
* to avoid the Stream overhead while processing non-user data which
* does not need the limit functionality.
* <p>
* It is expected that this code will be called to read in a column
* associated with a qualifiers which are applied one column at a time,
* and has been specialized to proved the greatest peformance for
* processing qualifiers. This kind of access is done when scanning
* large datasets while applying qualifiers and thus any performance
* gain at this low level is multiplied by the large number of rows that
* may be iterated over.
* <p>
* The column is read into the object located in row[qual_colid].
*
* @param row col is read into object in row[qual_colid].
* @param offset_to_field_data offset in bytes from top of page to field
* @param colid the column id to read, colid N is row[N]
* @param recordHeader record header of row to read column from.
* @param recordToLock record handle to lock,
* used by overflow column code.
*
* @exception StandardException Standard exception policy.
**/
private final void readOneColumnFromPage(
Object[] row,
int colid,
int offset_to_field_data,
StoredRecordHeader recordHeader,
RecordHandle recordToLock)
throws StandardException, IOException
{
ErrorObjectInput inUserCode = null;
// Reads in this routine are always against the raw data in the
// pageData array, thus it can assume array access to page data array.
ArrayInputStream lrdi = rawDataIn;
try
{
if (SanityManager.DEBUG)
{
if (colid >= row.length)
SanityManager.THROWASSERT(
"colid = " + colid +
";row length = " + row.length);
// currently this routine will not work on long rows.
if (recordHeader.getFirstField() != 0)
{
SanityManager.THROWASSERT(
"recordHeader.getFirstField() = " +
recordHeader.getFirstField());
}
}
Object column = row[colid];
// if the column id exists on this page.
if (colid <= (recordHeader.getNumberFields() - 1))
{
// skip the fields before colid, the column in question
// existent on this page.
for (int columnId = colid; columnId > 0; columnId--)
{
offset_to_field_data +=
StoredFieldHeader.readTotalFieldLength(
pageData, offset_to_field_data);
}
// read the field header
// read the status byte.
int fieldStatus =
StoredFieldHeader.readStatus(
pageData, offset_to_field_data);
// read the field data length, and position on 1st byte of data.
int fieldDataLength =
StoredFieldHeader.readFieldLengthAndSetStreamPosition(
pageData,
offset_to_field_data +
StoredFieldHeader.STORED_FIELD_HEADER_STATUS_SIZE,
fieldStatus,
slotFieldSize,
lrdi);
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
!StoredFieldHeader.isExtensible(fieldStatus),
"extensible fields not supported yet");
}
// SRW-DJD code assumes non-extensible case ...
if (!StoredFieldHeader.isNonexistent(fieldStatus))
{
boolean isOverflow =
StoredFieldHeader.isOverflow(fieldStatus);
OverflowInputStream overflowIn = null;
if (isOverflow)
{
// A fetched long column is returned as a stream
long overflowPage =
CompressedNumber.readLong((InputStream) lrdi);
int overflowId =
CompressedNumber.readInt((InputStream) lrdi);
// Prepare the stream for results...
// create the byteHolder the size of a page, so, that it
// will fit the field Data that would fit on a page.
MemByteHolder byteHolder =
new MemByteHolder(pageData.length);
overflowIn = new OverflowInputStream(
byteHolder, owner, overflowPage,
overflowId, recordToLock);
}
// Deal with Storable columns
if (column instanceof DataValueDescriptor)
{
DataValueDescriptor sColumn =
(DataValueDescriptor) column;
// is the column null ?
if (StoredFieldHeader.isNull(fieldStatus))
{
sColumn.restoreToNull();
}
else
{
// set the limit for the user read
if (!isOverflow)
{
// normal, non-overflow column case.
lrdi.setLimit(fieldDataLength);
inUserCode = lrdi;
sColumn.readExternalFromArray(lrdi);
inUserCode = null;
int unread = lrdi.clearLimit();
if (unread != 0)
DataInputUtil.skipFully(lrdi, unread);
}
else
{
// fetched column is a Storable long column.
FormatIdInputStream newIn =
new FormatIdInputStream(overflowIn);
if ((sColumn instanceof StreamStorable))
{
((StreamStorable)sColumn).setStream(newIn);
}
else
{
inUserCode = newIn;
sColumn.readExternal(newIn);
inUserCode = null;
}
}
}
}
else
{
// At this point only non-Storable columns.
if (StoredFieldHeader.isNull(fieldStatus))
{
// Only Storables can be null ...
throw StandardException.newException(
SQLState.DATA_NULL_STORABLE_COLUMN,
Integer.toString(colid));
}
// This is a non-extensible field, which means the
// caller must know the correct type and thus the
// element in row is the correct type or null. It must
// be Serializable.
//
// We do not support Externalizable here.
lrdi.setLimit(fieldDataLength);
inUserCode = lrdi;
// RESOLVE (no non-storables?)
row[colid] = (Object) lrdi.readObject();
inUserCode = null;
int unread = lrdi.clearLimit();
if (unread != 0)
DataInputUtil.skipFully(lrdi, unread);
}
}
else
{
// column does not exist in the row, return null.
// field is non-existent
if (column instanceof DataValueDescriptor)
{
// RESOLVE - This is in place for 1.2. In the future
// we may want to return this column as non-existent
// even if it is a storable column, or maybe use a
// supplied default.
((DataValueDescriptor) column).restoreToNull();
}
else
{
row[colid] = null;
}
}
}
else
{
// field does not exist on this page.
if (column instanceof DataValueDescriptor)
{
// RESOLVE - This is in place for 1.2. In the future
// we may want to return this column as non-existent
// even if it is a storable column, or maybe use a
// supplied default.
((DataValueDescriptor) column).restoreToNull();
}
else
{
row[colid] = null;
}
}
}
catch (IOException ioe)
{
// an exception during the restore of a user column, this doesn't
// make the database corrupt, just that this field is inaccessable
if (inUserCode != null)
{
lrdi.clearLimit();
if (ioe instanceof EOFException)
{
if (SanityManager.DEBUG)
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"StoredPage.readOneColumnFromPage - EOF while restoring record: " +
recordHeader +
"Page dump = " + this);
SanityManager.showTrace(ioe);
}
// going beyond the limit in a DataInput class results in
// an EOFException when it sees the -1 from a read
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_MISMATCH,
ioe, inUserCode.getErrorInfo());
}
// some SQLData error reporting
Exception ne = inUserCode.getNestedException();
if (ne != null)
{
if (ne instanceof InstantiationException)
{
throw StandardException.newException(
SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,
ne, inUserCode.getErrorInfo());
}
if (ne instanceof IllegalAccessException)
{
throw StandardException.newException(
SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,
ne, inUserCode.getErrorInfo());
}
if (ne instanceof StandardException)
{
throw (StandardException) ne;
}
}
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_EXCEPTION,
ioe, inUserCode.getErrorInfo());
}
// re-throw to higher levels so they can put it in correct context.
throw ioe;
}
catch (ClassNotFoundException cnfe)
{
lrdi.clearLimit();
// an exception during the restore of a user column, this doesn't
// make the database corrupt, just that this field is inaccessable
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_MISSING_CLASS,
cnfe, inUserCode.getErrorInfo());
}
catch (LinkageError le)
{
// Some error during the link of a user class
if (inUserCode != null)
{
lrdi.clearLimit();
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_EXCEPTION,
le, inUserCode.getErrorInfo());
}
throw le;
}
}
/**
* Process the list of qualifiers on the row in the stream.
* <p>
* The rawDataIn stream is expected to be positioned after the record
* header.
* <p>
* Check all qualifiers in the qualifier array against row. Return true
* if all compares specified by the qualifier array return true, else
* return false.
* <p>
* This routine assumes client caller has already checked if the row
* is deleted or not. The row that it get's is expected to match
* the partial column list of the scan.
* <p>
* On entering this routine the stream should be positioned to the
* beginning of the row data, just after the row header. On exit
* the stream will also be positioned there.
*
* A two dimensional array is to be used to pass around a AND's and OR's in
* conjunctive normal form. The top slot of the 2 dimensional array is
* optimized for the more frequent where no OR's are present. The first
* array slot is always a list of AND's to be treated as described above
* for single dimensional AND qualifier arrays. The subsequent slots are
* to be treated as AND'd arrays or OR's. Thus the 2 dimensional array
* qual[][] argument is to be treated as the following, note if
* qual.length = 1 then only the first array is valid and it is and an
* array of and clauses:
*
* (qual[0][0] and qual[0][0] ... and qual[0][qual[0].length - 1])
* and
* (qual[1][0] or qual[1][1] ... or qual[1][qual[1].length - 1])
* and
* (qual[2][0] or qual[2][1] ... or qual[2][qual[2].length - 1])
* ...
* and
* (qual[qual.length - 1][0] or qual[1][1] ... or qual[1][2])
*
* @return Whether or not the row input qualifies.
*
* @param row restore row into this object array.
* @param offset_to_row_data offset in bytes from top of page to row
* @param fetchDesc Description of fetch including which cols
* and qualifiers.
* @param recordHeader The record header of the row, it was read
* in from stream and dataIn is positioned
* after it.
* @param recordToLock The head row to use for locking, used to
* lock head row of overflow columns/rows.
*
* @exception StandardException Standard exception policy.
**/
private final boolean qualifyRecordFromSlot(
Object[] row,
int offset_to_row_data,
FetchDescriptor fetchDesc,
StoredRecordHeader recordHeader,
RecordHandle recordToLock)
throws StandardException, IOException
{
boolean row_qualifies = true;
Qualifier[][] qual_list = fetchDesc.getQualifierList();
int[] materializedCols = fetchDesc.getMaterializedColumns();
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(qual_list != null, "Not coded yet!");
}
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(row != null);
}
// First process the initial list of AND's in the 1st array
for (int i = 0; i < qual_list[0].length; i++)
{
// process each AND clause
row_qualifies = false;
// Apply one qualifier to the row.
Qualifier q = qual_list[0][i];
int col_id = q.getColumnId();
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
(col_id < row.length),
"Qualifier is referencing a column not in the row.");
}
// materialize the column object if we haven't done it yet.
if (materializedCols[col_id] == 0)
{
// materialize just this column from the row, no qualifiers
readOneColumnFromPage(
row,
col_id,
offset_to_row_data,
recordHeader,
recordToLock);
// mark offset, indicating the row has been read in.
//
// RESOLVE (mikem) - right now value of entry is useless, it
// is an int so that in the future we could cache the offset
// to fields to improve performance of getting to a column
// after qualifying.
materializedCols[col_id] = offset_to_row_data;
}
// Get the column from the possibly partial row, of the
// q.getColumnId()'th column in the full row.
if (SanityManager.DEBUG)
{
if (row[col_id] == null)
SanityManager.THROWASSERT(
"1:row = " + RowUtil.toString(row) +
"row.length = " + row.length +
";q.getColumnId() = " + q.getColumnId());
}
// do the compare between the column value and value in the
// qualifier.
row_qualifies =
((DataValueDescriptor) row[col_id]).compare(
q.getOperator(),
q.getOrderable(),
q.getOrderedNulls(),
q.getUnknownRV());
if (q.negateCompareResult())
row_qualifies = !row_qualifies;
// Once an AND fails the whole Qualification fails - do a return!
if (!row_qualifies)
return(false);
}
// Now process the Subsequent OR clause's, beginning with qual_list[1]
for (int and_idx = 1; and_idx < qual_list.length; and_idx++)
{
// loop through each of the "and" clause.
row_qualifies = false;
for (int or_idx = 0; or_idx < qual_list[and_idx].length; or_idx++)
{
// Apply one qualifier to the row.
Qualifier q = qual_list[and_idx][or_idx];
int col_id = q.getColumnId();
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
(col_id < row.length),
"Qualifier is referencing a column not in the row.");
}
// materialize the column object if we haven't done it yet.
if (materializedCols[col_id] == 0)
{
// materialize just this column from the row, no qualifiers
readOneColumnFromPage(
row,
col_id,
offset_to_row_data,
recordHeader,
recordToLock);
// mark offset, indicating the row has been read in.
//
// RESOLVE (mikem) - right now value of entry is useless, it
// is an int so that in the future we could cache the offset
// to fields to improve performance of getting to a column
// after qualifying.
materializedCols[col_id] = offset_to_row_data;
}
// Get the column from the possibly partial row, of the
// q.getColumnId()'th column in the full row.
if (SanityManager.DEBUG)
{
if (row[col_id] == null)
SanityManager.THROWASSERT(
"1:row = " + RowUtil.toString(row) +
"row.length = " + row.length +
";q.getColumnId() = " + q.getColumnId());
}
// do the compare between the column value and value in the
// qualifier.
row_qualifies =
((DataValueDescriptor) row[col_id]).compare(
q.getOperator(),
q.getOrderable(),
q.getOrderedNulls(),
q.getUnknownRV());
if (q.negateCompareResult())
row_qualifies = !row_qualifies;
// SanityManager.DEBUG_PRINT("StoredPage.qual", "processing qual[" + and_idx + "][" + or_idx + "] = " + qual_list[and_idx][or_idx] );
// SanityManager.DEBUG_PRINT("StoredPage.qual", "value = " + row_qualifies);
// processing "OR" clauses, so as soon as one is true, break
// to go and process next AND clause.
if (row_qualifies)
break;
}
// The qualifier list represented a set of "AND'd"
// qualifications so as soon as one is false processing is done.
if (!row_qualifies)
break;
}
return(row_qualifies);
}
/**
* restore a record from a stream.
* <p>
* The rawDataIn stream is expected to be positioned after the record
* header.
*
* @return The identifier to be used to open the conglomerate later.
*
* @param row restore row into this object array.
* @param max_colid The maximum numbered column id that will be
* requested by caller. It should be:
* min(row.length - 1, maximum bit set in vCols)
* It is used to stop the inner most loop from
* looking at more columns in the row.
* @param vCols If not null, bit map indicates valid cols.
* @param mCols If not null, int array indicates columns already
* read in from the stream. A non-zero entry
* means the column has already been read in.
* @param dataIn restore row from this stream.
* @param recordHeader The record header of the row, it was read in
* from stream and dataIn is positioned after it.
* @param recordToLock The head row to use for locking, used to lock
* head row of overflow columns/rows.
*
* @exception StandardException Standard exception policy.
**/
private final boolean readRecordFromStream(
Object[] row,
int max_colid,
int[] vCols,
int[] mCols,
LimitObjectInput dataIn,
StoredRecordHeader recordHeader,
RecordHandle recordToLock)
throws StandardException, IOException
{
ErrorObjectInput inUserCode = null;
try
{
// Get the number of columns in the row.
int numberFields = recordHeader.getNumberFields();
int startColumn = recordHeader.getFirstField();
if (startColumn > max_colid)
{
// done if the startColumn is higher than highest column.
return true;
}
// For each column in the row, restore the column from
// the corresponding field in the record. If the field
// is missing or not set, set the column to null.
int highestColumnOnPage = numberFields + startColumn;
int vColsSize = (vCols == null ) ? 0 : vCols.length;
for (int columnId = startColumn; columnId <= max_colid; columnId++)
{
// skip any "existing" columns not requested, or requested cols
// that have already been read.
if (((vCols != null) &&
(!(vColsSize > columnId && (vCols[columnId] != 0)))) ||
((mCols != null) && (mCols[columnId] != 0)))
{
if (columnId < highestColumnOnPage)
{
// If the field exists in the row on the page, but the
// partial row being returned does not include it,
// skip the field ...
skipField(dataIn);
}
continue;
}
// See if the column identifier is beyond the number of fields
// that this record has
if (columnId >= highestColumnOnPage)
{
// field is non-existent
Object column = row[columnId];
if (column instanceof DataValueDescriptor)
{
// RESOLVE - This is in place for 1.2. In the future
// we may want to return this column as non-existent
// even if it is a storable column, or maybe use a
// supplied default.
((DataValueDescriptor) column).restoreToNull();
}
else
{
row[columnId] = null;
}
continue;
}
// read the field header
int fieldStatus =
StoredFieldHeader.readStatus(dataIn);
int fieldDataLength =
StoredFieldHeader.readFieldDataLength(
dataIn, fieldStatus, slotFieldSize);
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
!StoredFieldHeader.isExtensible(fieldStatus),
"extensible fields not supported yet");
}
Object column = row[columnId];
OverflowInputStream overflowIn = null;
// SRW-DJD code assumes non-extensible case ...
// field is non-existent, return null
if (StoredFieldHeader.isNonexistent(fieldStatus))
{
if (column instanceof DataValueDescriptor)
{
// RESOLVE - This is in place for 1.2. In the future
// we may want to return this column as non-existent
// even if it is a storable column, or maybe use a
// supplied default.
((DataValueDescriptor) column).restoreToNull();
}
else
{
row[columnId] = null;
}
continue;
}
boolean isOverflow = StoredFieldHeader.isOverflow(fieldStatus);
if (isOverflow)
{
// A fetched long column needs to be returned as a stream
//
long overflowPage =
CompressedNumber.readLong((InputStream) dataIn);
int overflowId =
CompressedNumber.readInt((InputStream) dataIn);
// Prepare the stream for results...
// create the byteHolder the size of a page, so, that it
// will fit the field Data that would fit on a page.
MemByteHolder byteHolder =
new MemByteHolder(pageData.length);
overflowIn = new OverflowInputStream(
byteHolder, owner, overflowPage,
overflowId, recordToLock);
}
// Deal with Object columns
if (column instanceof DataValueDescriptor)
{
DataValueDescriptor sColumn = (DataValueDescriptor) column;
// is the column null ?
if (StoredFieldHeader.isNull(fieldStatus))
{
sColumn.restoreToNull();
continue;
}
// set the limit for the user read
if (!isOverflow)
{
// normal, non-overflow column case.
dataIn.setLimit(fieldDataLength);
inUserCode = dataIn;
sColumn.readExternal(dataIn);
inUserCode = null;
int unread = dataIn.clearLimit();
if (unread != 0)
DataInputUtil.skipFully(dataIn, unread);
}
else
{
// column being fetched is a Object long column.
FormatIdInputStream newIn =
new FormatIdInputStream(overflowIn);
// if a column is a long column, store recommends user
// fetch it as a stream.
boolean fetchStream = true;
if (!(sColumn instanceof StreamStorable))
{
fetchStream = false;
}
if (fetchStream)
{
((StreamStorable)sColumn).setStream(newIn);
}
else
{
inUserCode = newIn;
sColumn.readExternal(newIn);
inUserCode = null;
}
}
continue;
}
// At this point only non-Storable columns.
if (StoredFieldHeader.isNull(fieldStatus))
{
// Only Storables can be null ...
throw StandardException.newException(
SQLState.DATA_NULL_STORABLE_COLUMN,
Integer.toString(columnId));
}
// This is a non-extensible field, which means the caller must
// know the correct type and thus the element in row is the
// correct type or null. It must be Serializable.
//
// We do not support Externalizable here.
dataIn.setLimit(fieldDataLength);
inUserCode = dataIn;
row[columnId] = (Object) dataIn.readObject();
inUserCode = null;
int unread = dataIn.clearLimit();
if (unread != 0)
DataInputUtil.skipFully(dataIn, unread);
continue;
}
// if the last column on this page is bigger than the highest
// column we are looking for, then we are done restoring the record.
if ((numberFields + startColumn) > max_colid)
return true;
else
return false;
}
catch (IOException ioe)
{
// an exception during the restore of a user column, this doesn't
// make the database corrupt, just that this field is inaccessable
if (inUserCode != null)
{
dataIn.clearLimit();
if (ioe instanceof EOFException)
{
if (SanityManager.DEBUG)
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"StoredPage - EOF while restoring record: " +
recordHeader +
"Page dump = " + this);
}
// going beyond the limit in a DataInput class results in
// an EOFException when it sees the -1 from a read
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_MISMATCH,
ioe, inUserCode.getErrorInfo());
}
// some SQLData error reporting
Exception ne = inUserCode.getNestedException();
if (ne != null)
{
if (ne instanceof InstantiationException)
{
throw StandardException.newException(
SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,
ne, inUserCode.getErrorInfo());
}
if (ne instanceof IllegalAccessException)
{
throw StandardException.newException(
SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,
ne, inUserCode.getErrorInfo());
}
if (ne instanceof StandardException)
{
throw (StandardException) ne;
}
}
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_EXCEPTION,
ioe, inUserCode.getErrorInfo());
}
// re-throw to higher levels so they can put it in correct context.
throw ioe;
}
catch (ClassNotFoundException cnfe)
{
dataIn.clearLimit();
// an exception during the restore of a user column, this doesn't
// make the database corrupt, just that this field is inaccessable
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_MISSING_CLASS,
cnfe, inUserCode.getErrorInfo());
}
catch (LinkageError le)
{
// Some error during the link of a user class
if (inUserCode != null)
{
dataIn.clearLimit();
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_EXCEPTION,
le, inUserCode.getErrorInfo());
}
throw le;
}
}
private final boolean readRecordFromArray(
Object[] row,
int max_colid,
int[] vCols,
int[] mCols,
ArrayInputStream dataIn,
StoredRecordHeader recordHeader,
RecordHandle recordToLock)
throws StandardException, IOException
{
ErrorObjectInput inUserCode = null;
try
{
// Get the number of columns in the row.
int numberFields = recordHeader.getNumberFields();
int startColumn = recordHeader.getFirstField();
if (startColumn > max_colid)
{
// done if the startColumn is higher than highest column.
return true;
}
// For each column in the row, restore the column from
// the corresponding field in the record. If the field
// is missing or not set, set the column to null.
int highestColumnOnPage = numberFields + startColumn;
int vColsSize = (vCols == null ) ? 0 : vCols.length;
int offset_to_field_data = dataIn.getPosition();
for (int columnId = startColumn; columnId <= max_colid; columnId++)
{
// skip any "existing" columns not requested, or requested cols
// that have already been read.
if (((vCols != null) &&
(!(vColsSize > columnId && (vCols[columnId] != 0)))) ||
((mCols != null) && (mCols[columnId] != 0)))
{
if (columnId < highestColumnOnPage)
{
// If the field exists in the row on the page, but the
// partial row being returned does not include it,
// skip the field ...
offset_to_field_data +=
StoredFieldHeader.readTotalFieldLength(
pageData, offset_to_field_data);
}
continue;
}
else if (columnId < highestColumnOnPage)
{
// the column is on this page.
// read the field header
// read the status byte.
int fieldStatus =
StoredFieldHeader.readStatus(
pageData, offset_to_field_data);
// read the field data length, position on 1st byte of data
int fieldDataLength =
StoredFieldHeader.readFieldLengthAndSetStreamPosition(
pageData,
offset_to_field_data +
StoredFieldHeader.STORED_FIELD_HEADER_STATUS_SIZE,
fieldStatus,
slotFieldSize,
dataIn);
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
!StoredFieldHeader.isExtensible(fieldStatus),
"extensible fields not supported yet");
}
Object column = row[columnId];
OverflowInputStream overflowIn = null;
// SRW-DJD code assumes non-extensible case ...
if ((fieldStatus & StoredFieldHeader.FIELD_NONEXISTENT) !=
StoredFieldHeader.FIELD_NONEXISTENT)
{
// normal path - field exists.
boolean isOverflow =
((fieldStatus &
StoredFieldHeader.FIELD_OVERFLOW) != 0);
if (isOverflow)
{
// A fetched long column is returned as a stream
long overflowPage =
CompressedNumber.readLong((InputStream) dataIn);
int overflowId =
CompressedNumber.readInt((InputStream) dataIn);
// Prepare the stream for results...
// create the byteHolder the size of a page, so,
// that it will fit the field Data that would fit
// on a page.
MemByteHolder byteHolder =
new MemByteHolder(pageData.length);
overflowIn = new OverflowInputStream(
byteHolder, owner, overflowPage,
overflowId, recordToLock);
}
// Deal with Object columns
if (column instanceof DataValueDescriptor)
{
DataValueDescriptor sColumn =
(DataValueDescriptor) column;
// is the column null ?
if ((fieldStatus &
StoredFieldHeader.FIELD_NULL) == 0)
{
// the field is not null.
// set the limit for the user read
if (!isOverflow)
{
// normal, non-overflow column case.
dataIn.setLimit(fieldDataLength);
inUserCode = dataIn;
sColumn.readExternalFromArray(dataIn);
inUserCode = null;
int unread = dataIn.clearLimit();
if (unread != 0)
DataInputUtil.skipFully(dataIn, unread);
}
else
{
// column being fetched is a long column.
FormatIdInputStream newIn =
new FormatIdInputStream(overflowIn);
// long columns are fetched as a stream.
boolean fetchStream = true;
if (!(sColumn instanceof StreamStorable))
{
fetchStream = false;
}
if (fetchStream)
{
((StreamStorable) sColumn).setStream(
newIn);
}
else
{
inUserCode = newIn;
sColumn.readExternal(newIn);
inUserCode = null;
}
}
}
else
{
sColumn.restoreToNull();
}
}
else
{
// At this point only non-Storable columns.
if (StoredFieldHeader.isNull(fieldStatus))
{
// Only Storables can be null ...
throw StandardException.newException(
SQLState.DATA_NULL_STORABLE_COLUMN,
Integer.toString(columnId));
}
// This is a non-extensible field, which means the
// caller must know the correct type and thus the
// element in row is the correct type or null. It
// must be Serializable.
//
// We do not support Externalizable here.
dataIn.setLimit(fieldDataLength);
inUserCode = dataIn;
// RESOLVE (no non-storables?)
row[columnId] = (Object) dataIn.readObject();
inUserCode = null;
int unread = dataIn.clearLimit();
if (unread != 0)
DataInputUtil.skipFully(dataIn, unread);
}
}
else
{
// column is non-existent.
if (column instanceof DataValueDescriptor)
{
// RESOLVE - This is in place for 1.2. In the future
// we may want to return this column as non-existent
// even if it is a storable column, or maybe use a
// supplied default.
((DataValueDescriptor) column).restoreToNull();
}
else
{
row[columnId] = null;
}
}
// move the counter to point to beginning of next field.
offset_to_field_data = dataIn.getPosition();
}
else
{
// field is non-existent
Object column = row[columnId];
if (column instanceof DataValueDescriptor)
{
// RESOLVE - This is in place for 1.2. In the future
// we may want to return this column as non-existent
// even if it is a storable column, or maybe use a
// supplied default.
((DataValueDescriptor) column).restoreToNull();
}
else
{
row[columnId] = null;
}
}
}
// if the last column on this page is bigger than the highest
// column we are looking for, then we are done restoring the record.
if ((numberFields + startColumn) > max_colid)
return true;
else
return false;
}
catch (IOException ioe)
{
// an exception during the restore of a user column, this doesn't
// make the database corrupt, just that this field is inaccessable
if (inUserCode != null)
{
dataIn.clearLimit();
if (ioe instanceof EOFException)
{
if (SanityManager.DEBUG)
{
SanityManager.DEBUG_PRINT("DEBUG_TRACE",
"StoredPage - EOF while restoring record: " +
recordHeader +
"Page dump = " + this);
}
// going beyond the limit in a DataInput class results in
// an EOFException when it sees the -1 from a read
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_MISMATCH,
ioe, inUserCode.getErrorInfo());
}
// some SQLData error reporting
Exception ne = inUserCode.getNestedException();
if (ne != null)
{
if (ne instanceof InstantiationException)
{
throw StandardException.newException(
SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,
ne, inUserCode.getErrorInfo());
}
if (ne instanceof IllegalAccessException)
{
throw StandardException.newException(
SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,
ne, inUserCode.getErrorInfo());
}
if (ne instanceof StandardException)
{
throw (StandardException) ne;
}
}
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_EXCEPTION,
ioe, inUserCode.getErrorInfo());
}
// re-throw to higher levels so they can put it in correct context.
throw ioe;
}
catch (ClassNotFoundException cnfe)
{
dataIn.clearLimit();
// an exception during the restore of a user column, this doesn't
// make the database corrupt, just that this field is inaccessable
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_MISSING_CLASS,
cnfe, inUserCode.getErrorInfo());
}
catch (LinkageError le)
{
// Some error during the link of a user class
if (inUserCode != null)
{
dataIn.clearLimit();
throw StandardException.newException(
SQLState.DATA_STORABLE_READ_EXCEPTION,
le, inUserCode.getErrorInfo());
}
throw le;
}
}
/**
* Restore a portion of a long column.
* <p>
* Restore a portion of a long column - user must supply two streams on top
* of the same data, one implements ObjectInput interface that knows how to
* restore the object, the other one implements LimitInputStream.
*
* @param fetchStream the stream to read the next portion of long col from
*
* @exception StandardException Standard exception policy.
**/
public void restorePortionLongColumn(
OverflowInputStream fetchStream)
throws StandardException, IOException
{
int slot =
findRecordById(fetchStream.getOverflowId(), FIRST_SLOT_NUMBER);
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
int offset = getRecordOffset(slot);
int numberFields = recordHeader.getNumberFields();
if (SanityManager.DEBUG)
{
if ((numberFields > 2) || (numberFields < 1))
{
SanityManager.THROWASSERT(
"longColumn record header must have 1 or 2 fields." +
"numberFields = " + numberFields);
}
}
rawDataIn.setPosition(offset + recordHeader.size());
int fieldStatus =
StoredFieldHeader.readStatus(rawDataIn);
int fieldDataLength =
StoredFieldHeader.readFieldDataLength(
rawDataIn, fieldStatus, slotFieldSize);
// read the data portion of this segment from the stream.
ByteHolder bh = fetchStream.getByteHolder();
bh.write(rawDataIn, fieldDataLength);
fetchStream.setByteHolder(bh);
// set the next overflow pointer in the stream...
if (numberFields == 1)
{
// this is the last bit of the long column
fetchStream.setOverflowPage(-1);
fetchStream.setOverflowId(-1);
}
else
{
int firstFieldStatus = fieldStatus; // for DEBUG check
// get the field status and data length of the overflow pointer.
fieldStatus =
StoredFieldHeader.readStatus(rawDataIn);
fieldDataLength =
StoredFieldHeader.readFieldDataLength(
rawDataIn, fieldStatus, slotFieldSize);
if (SanityManager.DEBUG)
{
if (!StoredFieldHeader.isOverflow(fieldStatus))
{
// In version 1.5, the first field is overflow and the
// second is not. In version 2.0 onwards, the first
// field is not overflow and the second is overflow
// (the overflow bit goes with the overflow pointer).
// Check first field to make sure its overflow bit is
// set on.
SanityManager.ASSERT(
StoredFieldHeader.isOverflow(firstFieldStatus));
}
}
long overflowPage =
CompressedNumber.readLong((InputStream) rawDataIn);
int overflowId =
CompressedNumber.readInt((InputStream) rawDataIn);
// there is more after this chunk.
fetchStream.setOverflowPage(overflowPage);
fetchStream.setOverflowId(overflowId);
}
}
/**
* Log a Storable to a stream.
* <p>
* Log a Storable into a stream. This is used by update field operations
* <P>
* Write the column in its field format to the stream. Field format is a
* field header followed the data of the column as defined by the data
* itself. See this class's description for the specifics of the header.
*
* @exception StandardException Standard Derby error policy
* @exception IOException RESOLVE
**/
public void logColumn(
int slot,
int fieldId,
Object column,
DynamicByteArrayOutputStream out,
int overflowThreshold)
throws StandardException, IOException
{
// calculate the space available on the page, it includes
// the free space
// the space the record has reserved but not used
// the length of the old field itself
// free space
int bytesAvailable = freeSpace;
int beginPosition = -1;
// space reserved, but not used by the record
bytesAvailable += getReservedCount(slot);
// The size of the old field is also available for the new field
rawDataIn.setPosition(getFieldOffset(slot, fieldId));
int fieldStatus =
StoredFieldHeader.readStatus(rawDataIn);
int fieldDataLength =
StoredFieldHeader.readFieldDataLength(
rawDataIn, fieldStatus, slotFieldSize);
bytesAvailable +=
StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize)
+ fieldDataLength;
try
{
setOutputStream(out);
beginPosition = rawDataOut.getPosition();
Object[] row = new Object[1];
row[0] = column;
if (bytesAvailable == logColumn(
row, 0, out, bytesAvailable,
COLUMN_NONE, overflowThreshold))
{
throw new NoSpaceOnPage(isOverflowPage());
}
}
finally
{
rawDataOut.setPosition(beginPosition);
resetOutputStream();
}
}
/**
* Log a long column into a DataOuput.
* <p>
* Log a long column into a DataOuput. This is used by insert operations
* <P>
* Write the column in its field format to the stream. Field format is a
* field header followed the data of the column as defined by the data
* itself. See this class's description for the specifics of the header.
*
* @param slot slot of the row with the column
* @param recordId record id of the
* @param column the object form of the column to log
* @param out where to log to the column to.
*
* @exception StandardException Standard Derby error policy
* @exception IOException I/O exception from writing to an array.
*
* @see BasePage#logColumn
**/
public int logLongColumn(
int slot,
int recordId,
Object column,
DynamicByteArrayOutputStream out)
throws StandardException, IOException
{
int spaceAvailable = freeSpace;
// need to account for the slot table using extra space...
spaceAvailable -= slotEntrySize;
// <= is ok here as we know we want to write at least one more byte
if (spaceAvailable <= 0)
throw new NoSpaceOnPage(isOverflowPage());
setOutputStream(out);
int beginPosition = out.getPosition();
try
{
// in the long column portion on the new page there will be 1 field
// if the portion fits on the page (2 if it needs another pointer
// to continue to yet another page).
int numberFields = 1;
StoredRecordHeader recordHeader =
new StoredRecordHeader(recordId, numberFields);
int recordHeaderLength = recordHeader.write(logicalDataOut);
spaceAvailable -= recordHeaderLength;
if (spaceAvailable < 0)
{
// this part of long column won't totally fit on page, it
// needs to be linked to another page. Throw exception and
// caller will handle logging an overflow column portion
// with a forward pointer.
throw new NoSpaceOnPage(isOverflowPage());
}
else
{
// the rest of the long column fits on the page!
Object[] row = new Object[1];
row[0] = column;
return logColumn(row, 0, out, spaceAvailable, COLUMN_LONG, 100);
}
}
finally
{
resetOutputStream();
}
}
/**
* Log column from input row to the given output stream.
* <p>
* Read data from row[arrayPosition], and write the column data in
* raw store page format to the given column. Along the way determine
* if the column will fit on the current page.
* <p>
* Action taken in this routine is determined by the kind of column as
* specified in the columnFlag:
* COLUMN_NONE - the column is insignificant
* COLUMN_FIRST - this is the first column in a logRow() call
* COLUMN_LONG - this is a known long column, therefore we will
* store part of the column on the current page and
* overflow the rest if necessary.
* <p>
* Upon entry to this routine logicalDataOut is tied to the
* DynamicByteArrayOutputStream out.
* <BR>
* If a column is a long column and it does not totally fit on the current
* page, then a LongColumnException is thrown. We package up info about
* the current long column in the partially filled in exception so that
* callers can take correct action. The column will now be set a as a
* stream.
*
* @return The spaceAvailable after accounting for space for this column.
*
* @param row array of column from which to read the column from.
* @param arrayPosition The array position of column to be reading from row.
* @param out The stream to write the raw store page format of the
* the column to.
* @param spaceAvailable The number of bytes available on the page for
* this column, this may differ from current page
* as it may include bytes used by previous
* columns.
* @param columnFlag one of: COLUMN_NONE, COLUMN_FIRST, or COLUMN_LONG.
*
* @exception StandardException Standard exception policy.
* @exception LongColumnException Thrown if column will not fit on a
* single page. See notes above
**/
private int logColumn(
Object[] row,
int arrayPosition,
DynamicByteArrayOutputStream out,
int spaceAvailable,
int columnFlag,
int overflowThreshold)
throws StandardException, IOException
{
// RESOLVE (mikem) - why will row be null?
Object column = (row != null ? row[arrayPosition] : null);
// Check to see if the data comes from a page, if it is, then the field
// header is already formatted.
if (column instanceof RawField)
{
// field data is raw, no need to set up a field header etc.
byte[] data = ((RawField) column).getData();
if (data.length <= spaceAvailable)
{
out.write(data);
spaceAvailable -= data.length;
}
return spaceAvailable;
}
// If this is a long column, it may fit in this page or it may not.
boolean longColumnDone = true;
// default field status.
int fieldStatus =
StoredFieldHeader.setFixed(StoredFieldHeader.setInitial(), true);
int beginPosition = out.getPosition();
int columnBeginPosition = 0;
int headerLength;
int fieldDataLength = 0;
if (column instanceof StreamStorable)
{
StreamStorable stream_storable_column = (StreamStorable) column;
if (stream_storable_column.returnStream() != null)
{
column =
(Object) stream_storable_column.returnStream();
}
}
if ( (column == null) && (columnFlag != COLUMN_CREATE_NULL))
{
fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
headerLength =
StoredFieldHeader.write(
logicalDataOut, fieldStatus,
fieldDataLength, slotFieldSize);
}
else if (column instanceof InputStream)
{
RememberBytesInputStream bufferedIn = null;
int bufferLen = 0;
int estimatedMaxDataSize =
getMaxDataLength(spaceAvailable, overflowThreshold);
// if column is already instanceof RememberBytesInputStream, then we
// need to find out how many bytes have already been stored in the
// buffer.
if (column instanceof RememberBytesInputStream)
{
// data is already RememberBytesInputStream
bufferedIn = (RememberBytesInputStream) column;
bufferLen = bufferedIn.numBytesSaved();
}
else
{
// data comes in as an inputstream
bufferedIn = new RememberBytesInputStream(
(InputStream) column, new MemByteHolder(maxFieldSize + 1));
// always set stream of InputStream to RememberBytesInputStream
// so that all future access to this column will be able to
// get at the bytes drained from the InputStream, and copied
// into the RememberBytesInputStream.
if (row[arrayPosition] instanceof StreamStorable)
((StreamStorable)row[arrayPosition]).setStream(bufferedIn);
// set column to the RememberBytesInputStream so that
// all future access to this column will be able to get
// at bytes that have been already read. This assignment
// is needed to ensure that if long column exception is
// thrown, the column is set correctly
column = bufferedIn;
}
// read the buffer by reading the max we can read.
if (bufferLen < (estimatedMaxDataSize + 1))
{
bufferLen +=
bufferedIn.fillBuf(estimatedMaxDataSize + 1 - bufferLen);
}
if ((bufferLen <= estimatedMaxDataSize))
{
// we will be able to fit this into the page
fieldDataLength = bufferLen;
fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
headerLength = StoredFieldHeader.write(
logicalDataOut, fieldStatus,
fieldDataLength, slotFieldSize);
// if the field is extensible, then we write the serializable
// formatId. if the field is non-extensible, we don't need to
// write the formatId. but at this point, how do we know
// whether the field is extensible or not??? For Plato release,
// we do not support InputStream on extensible types,
// therefore, we ignore the formatId for now.
bufferedIn.putBuf(logicalDataOut, fieldDataLength);
}
else
{
// current column will not fit into the current page.
if (columnFlag == COLUMN_LONG)
{
// column is a long column and the remaining portion does
// not fit on the current page.
longColumnDone = false;
// it's a portion of a long column, and there is more to
// write reserve enough room for overflow pointer, then
// write as much data as we can leaving an extra 2 bytes
// for overflow field header.
fieldDataLength =
estimatedMaxDataSize - OVERFLOW_POINTER_SIZE - 2;
fieldStatus =
StoredFieldHeader.setFixed(fieldStatus, true);
headerLength =
StoredFieldHeader.write(
logicalDataOut, fieldStatus,
fieldDataLength, slotFieldSize);
bufferedIn.putBuf(logicalDataOut, fieldDataLength);
// now, we need to adjust the buffer, move the unread
// bytes to the beginning position the cursor correctly,
// so, next time around, we can read more into the buffer.
int remainingBytes = bufferedIn.available();
// move the unread bytes to the beginning of the byteHolder.
int bytesShifted = bufferedIn.shiftToFront();
}
else
{
// column not a long column and does not fit on page.
int delta = maxFieldSize - bufferLen + 1;
if (delta > 0)
bufferLen += bufferedIn.fillBuf(delta);
fieldDataLength = bufferLen;
// the data will not fit on this page make sure the new
// input stream is passed back to the upper layer...
column = (Object) bufferedIn;
}
}
}
else if ( columnFlag == COLUMN_CREATE_NULL )
{
//
// This block handles the case when a couple columns have been added
// recently and now one of the later columns is being updated. Newly added columns
// which appear in the row before the updated column don't actually have
// any values yet. We stuff NULLs into those newly added columns here.
// This fixes DERBY-5679.
//
fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);
// header is written with 0 length here.
headerLength =
StoredFieldHeader.write(
logicalDataOut, fieldStatus,
fieldDataLength, slotFieldSize);
}
else if (column instanceof DataValueDescriptor)
{
DataValueDescriptor sColumn = (DataValueDescriptor) column;
boolean isNull = (columnFlag == COLUMN_CREATE_NULL) || sColumn.isNull();
if (isNull)
{
fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);
}
// header is written with 0 length here.
headerLength =
StoredFieldHeader.write(
logicalDataOut, fieldStatus,
fieldDataLength, slotFieldSize);
if (!isNull)
{
// write the field data to the log
try
{
columnBeginPosition = out.getPosition();
sColumn.writeExternal(logicalDataOut);
}
catch (IOException ioe)
{
// SQLData error reporting
if (logicalDataOut != null)
{
Exception ne = logicalDataOut.getNestedException();
if (ne != null)
{
if (ne instanceof StandardException)
{
throw (StandardException) ne;
}
}
}
throw StandardException.newException(
SQLState.DATA_STORABLE_WRITE_EXCEPTION, ioe);
}
fieldDataLength =
(out.getPosition() - beginPosition) - headerLength;
}
}
else if (column instanceof RecordHandle)
{
// we are inserting an overflow pointer for a long column
// casted reference to column to avoid repeated casting.
RecordHandle overflowHandle = (RecordHandle) column;
fieldStatus = StoredFieldHeader.setOverflow(fieldStatus, true);
headerLength =
StoredFieldHeader.write(
logicalDataOut, fieldStatus,
fieldDataLength, slotFieldSize);
fieldDataLength +=
CompressedNumber.writeLong(out, overflowHandle.getPageNumber());
fieldDataLength +=
CompressedNumber.writeInt(out, overflowHandle.getId());
}
else
{
// Serializable/Externalizable/Formattable
// all look the same at this point.
// header is written with 0 length here.
headerLength =
StoredFieldHeader.write(
logicalDataOut, fieldStatus,
fieldDataLength, slotFieldSize);
logicalDataOut.writeObject(column);
fieldDataLength =
(out.getPosition() - beginPosition) - headerLength;
}
// calculate the size of the field on page with compresed field header
fieldStatus = StoredFieldHeader.setFixed(fieldStatus, false);
int fieldSizeOnPage =
StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize)
+ fieldDataLength;
userRowSize += fieldDataLength;
boolean fieldIsLong = isLong(fieldSizeOnPage, overflowThreshold);
// Do we have enough space on the page for this field?
if (((spaceAvailable < fieldSizeOnPage) || (fieldIsLong)) &&
(columnFlag != COLUMN_LONG))
{
// Column was not long before getting here and does not fit.
if (fieldIsLong)
{
// long column, and this first time we have figured out this
// column is long.
if (!(column instanceof InputStream))
{
// Convert already written object to an InputStream.
ByteArray fieldData =
new ByteArray(
((DynamicByteArrayOutputStream)out).getByteArray(),
(columnBeginPosition), fieldDataLength);
ByteArrayInputStream columnIn =
new ByteArrayInputStream(
fieldData.getArray(), columnBeginPosition,
fieldDataLength);
MemByteHolder byteHolder =
new MemByteHolder(fieldDataLength + 1);
RememberBytesInputStream bufferedIn =
new RememberBytesInputStream(columnIn, byteHolder);
// the data will not fit on this page make sure the new
// input stream is passed back to the upper layer...
column = bufferedIn;
}
out.setPosition(beginPosition);
// This exception carries the information for the client
// routine to continue inserting the long row on multiple
// pages.
LongColumnException lce = new LongColumnException();
lce.setColumn(column);
throw lce;
}
else
{
// Column does not fit on this page, but it isn't a long column.
out.setPosition(beginPosition);
return(spaceAvailable);
}
}
// Now we go back to update the fieldDataLength in the field header
out.setPosition(beginPosition);
// slotFieldSize is set based on the pageSize.
// We are borrowing this to set the size of our fieldDataLength.
fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);
headerLength = StoredFieldHeader.write(
out, fieldStatus, fieldDataLength, slotFieldSize);
// set position to the end of the field
out.setPosition(beginPosition + fieldDataLength + headerLength);
spaceAvailable -= fieldSizeOnPage;
// YYZ: revisit
if (columnFlag == COLUMN_LONG)
{
// if we are logging a long column, we don't care how much space
// is left on the page, instead, we care whether we are done with
// the column or not. So, here, we want to return 1. if we are
// not done, and return -1 if we are done.
// If logColumn returns -1, that flag is returned all the way to
// BasePage.insertLongColumn to signal end of loop.
if (longColumnDone)
return -1;
else
return 1;
} else
{
return (spaceAvailable);
}
}
/**
* Create and write a long row header to the log stream.
* <p>
* Called to log a new overflow record, will check for space available
* and throw an exception if the record header will not fit on the page.
* <p>
*
* @return -1
*
* @param slot slot of record to log.
* @param spaceAvailable spaceAvaliable on page.
* @param out stream to log the record to.
*
* @exception StandardException Standard exception policy.
**/
private int logOverflowRecord(
int slot,
int spaceAvailable,
DynamicByteArrayOutputStream out)
throws StandardException, IOException
{
setOutputStream(out);
StoredRecordHeader pageRecordHeader = getHeaderAtSlot(slot);
StoredRecordHeader overflow_rh = getOverFlowRecordHeader();
overflow_rh.setOverflowFields(pageRecordHeader);
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(overflow_rh.getOverflowPage() != 0);
}
/*
// #1 situation,
// we want to update the header to just an overflow pointer with no data
// so, update the recordHeader, and we are done...
if (!overflow_rh.isPartialOverflow()) {
// this recordHeader becomes just a overflow pointer,
// we need to make sure that the number of fields is set to 0.
overflow_rh.setNumberFields(0);
spaceAvailable -= overflow_rh.write(logicalDataOut);
if (spaceAvailable < 0) {
throw new NoSpaceOnPage(isOverflowPage());
}
resetOutputStream();
return (-1);
}
*/
// #2 situation,
// we want to only update the recordheader of the page, while leaving
// the data of the record on the page. Just update the header part and
// then arrange for the data part to move to after the new header.
int oldSize = pageRecordHeader.size();
int newSize = overflow_rh.size();
if (oldSize < newSize)
{
// need extra room...
int delta = newSize - oldSize;
if (spaceAvailable < delta)
{
throw new NoSpaceOnPage(isOverflowPage());
}
}
// write the new overflow_rh for the record.
overflow_rh.write(logicalDataOut);
// now, log the data
logRecordDataPortion(
slot, LOG_RECORD_DEFAULT, pageRecordHeader,
(FormatableBitSet) null, logicalDataOut, (RecordHandle)null);
return (-1);
}
private int logOverflowField(
DynamicByteArrayOutputStream out,
int spaceAvailable,
long overflowPage,
int overflowId)
throws StandardException, IOException
{
int fieldStatus =
StoredFieldHeader.setOverflow(
StoredFieldHeader.setInitial(), true);
int fieldSizeOnPage =
CompressedNumber.sizeLong(overflowPage) +
CompressedNumber.sizeInt(overflowId);
int fieldDataLength = fieldSizeOnPage;
fieldSizeOnPage +=
StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize);
// need to check that we have room on the page for this.
spaceAvailable -= fieldSizeOnPage;
// what if there is not enough room for the overflow pointer?
if (spaceAvailable < 0)
throw new NoSpaceOnPage(isOverflowPage());
// write the field to the page:
StoredFieldHeader.write(
logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);
CompressedNumber.writeLong(out, overflowPage);
CompressedNumber.writeInt(out, overflowId);
// return the available bytes
return(spaceAvailable);
}
/**
* Log a record to the ObjectOutput stream.
* <p>
* Write out the complete on-page record to the store stream. Data is
* preceeded by a compressed int that gives the length of the following
* data.
*
* @exception StandardException Standard Derby error policy
* @exception IOException on error writing to log stream.
*
* @see BasePage#logRecord
**/
public void logRecord(
int slot,
int flag,
int recordId,
FormatableBitSet validColumns,
OutputStream out,
RecordHandle headRowHandle)
throws StandardException, IOException
{
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
if (recordId != recordHeader.getId())
{
// the record is being logged under a different identifier,
// write it out with the correct identifier
StoredRecordHeader newRecordHeader =
new StoredRecordHeader(recordHeader);
newRecordHeader.setId(recordId);
newRecordHeader.write(out);
newRecordHeader = null;
}
else
{
// write the original record header
recordHeader.write(out);
}
logRecordDataPortion(
slot, flag, recordHeader, validColumns, out, headRowHandle);
}
private void logRecordDataPortion(
int slot,
int flag,
StoredRecordHeader recordHeader,
FormatableBitSet validColumns,
OutputStream out,
RecordHandle headRowHandle)
throws StandardException, IOException
{
int offset = getRecordOffset(slot);
// now skip over the original header before writing the data
int oldHeaderLength = recordHeader.size();
offset += oldHeaderLength;
// write out the record data (FH+data+...) from the page data
int startField = recordHeader.getFirstField();
int endField = startField + recordHeader.getNumberFields();
int validColumnsSize = (validColumns == null) ? 0 : validColumns.getLength();
for (int fieldId = startField; fieldId < endField; fieldId++) {
rawDataIn.setPosition(offset);
// get the field header information from the page
int fieldStatus = StoredFieldHeader.readStatus(rawDataIn);
int fieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, fieldStatus, slotFieldSize);
// see if this field needs to be logged
// no need to write the data portion if the log is getting written
// for purges unless the field is overflow pointer for a long column.
if (((validColumns != null) && !(validColumnsSize > fieldId && validColumns.isSet(fieldId))) ||
((flag & BasePage.LOG_RECORD_FOR_PURGE)!=0 && !StoredFieldHeader.isOverflow(fieldStatus)))
{
// nope, move page offset along
offset += StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize);
offset += fieldDataLength;
// write a non-existent field
fieldStatus = StoredFieldHeader.setInitial();
fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);
StoredFieldHeader.write(out, fieldStatus, 0, slotFieldSize);
continue;
}
// If this field is to be updated, and it points to a long column
// chain, the entire long column chain will be orphaned after the
// update operation. Therefore, need to queue up a post commit
// work to reclaim the long column chain. We cannot do any clean
// up in this transaction now because we are underneath a log
// action and cannot interrupt the transaction log buffer.
// HeadRowHandle may be null if updateAtSlot is called to update a
// non-head row piece. In that case, don't do anything.
// If temp container, don't do anything.
if (((flag & BasePage.LOG_RECORD_FOR_UPDATE) != 0) &&
headRowHandle != null &&
StoredFieldHeader.isOverflow(fieldStatus) &&
owner.isTemporaryContainer() == false)
{
int saveOffset = rawDataIn.getPosition(); // remember the page offset
long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);
int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);
// Remember the time stamp on the first page of the column
// chain. This is to prevent the case where the post commit
// work gets fired twice, in that case, the second time it is
// fired, this overflow page may not part of this row chain
// that is being updated.
Page firstPageOnColumnChain = getOverflowPage(overflowPage);
PageTimeStamp ts = firstPageOnColumnChain.currentTimeStamp();
firstPageOnColumnChain.unlatch();
RawTransaction rxact = (RawTransaction)owner.getTransaction();
ReclaimSpace work =
new ReclaimSpace(ReclaimSpace.COLUMN_CHAIN,
headRowHandle,
fieldId, // long column about to be orphaned by update
overflowPage, // page where the long column starts
overflowId, // record Id of the beginning of the long column
ts,
rxact.getDataFactory(), true);
rxact.addPostCommitWork(work);
rawDataIn.setPosition(saveOffset); // Just to be safe, reset data stream
}
// write the field header for the log
offset += StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
if (fieldDataLength != 0) {
// write the actual data
out.write(pageData, offset, fieldDataLength);
offset += fieldDataLength;
}
}
}
/**
Log a field to the ObjectOutput stream.
<P>
Find the field in the record and then write out the complete
field, i.e. header and data.
@exception StandardException Standard Derby error policy
@exception IOException RESOLVE
@see BasePage#logField
*/
public void logField(int slot, int fieldNumber, OutputStream out)
throws StandardException, IOException
{
int offset = getFieldOffset(slot, fieldNumber);
// these reads are always against the page array
ArrayInputStream lrdi = rawDataIn;
// now write out the field we are interested in ...
lrdi.setPosition(offset);
int fieldStatus = StoredFieldHeader.readStatus(lrdi);
int fieldDataLength = StoredFieldHeader.readFieldDataLength(lrdi, fieldStatus, slotFieldSize);
StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);
if (fieldDataLength != 0) {
// and then the data
out.write(pageData, lrdi.getPosition(), fieldDataLength);
}
}
/*
** Overidden methods of BasePage
*/
/**
Override insertAtSlot to provide long row support.
@exception StandardException Standard Derby error policy
*/
public RecordHandle insertAtSlot(
int slot,
Object[] row,
FormatableBitSet validColumns,
LogicalUndo undo,
byte insertFlag,
int overflowThreshold)
throws StandardException
{
try {
return super.insertAtSlot(slot, row, validColumns, undo, insertFlag, overflowThreshold);
} catch (NoSpaceOnPage nsop) {
// Super class already handle the case of insert that allows overflow.
// If we get here, we know that the insert should not allow overflow.
// Possibles causes:
// 1. insert to an empty page, row will never fit (ie long row)
// 2. insert to original page
// we will do:
// return a null to indicate the insert cannot be accepted ..
return null;
}
}
/**
Update field at specified slot
@exception StandardException Standard Derby error policy
*/
public RecordHandle updateFieldAtSlot(
int slot,
int fieldId,
Object newValue,
LogicalUndo undo)
throws StandardException
{
try {
return super.updateFieldAtSlot(slot, fieldId, newValue, undo);
} catch (NoSpaceOnPage nsop) {
// empty page apart from the record
if (slotsInUse == 1)
{
throw StandardException.newException(
SQLState.DATA_NO_SPACE_FOR_RECORD);
}
throw StandardException.newException(
SQLState.DATA_NO_SPACE_FOR_RECORD);
/*
// djd if (isOverflowPage()) {
}
return XXX;
*/
}
}
/**
Get the number of fields on the row at slot
@exception StandardException Standard Derby error policy
*/
public int fetchNumFieldsAtSlot(int slot) throws StandardException
{
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
if (!recordHeader.hasOverflow())
return super.fetchNumFieldsAtSlot(slot);
BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());
int count = overflowPage.fetchNumFieldsAtSlot(getOverflowSlot(overflowPage, recordHeader));
overflowPage.unlatch();
return count;
}
/**
* Move record to a page toward the beginning of the file.
* <p>
* As part of compressing the table records need to be moved from the
* end of the file toward the beginning of the file. Only the
* contiguous set of free pages at the very end of the file can
* be given back to the OS. This call is used to purge the row from
* the current page, insert it into a previous page, and return the
* new row location
* Mark the record identified by position as deleted. The record may be
* undeleted sometime later using undelete() by any transaction that sees
* the record.
* <p>
* The interface is optimized to work on a number of rows at a time,
* optimally processing all rows on the page at once. The call will
* process either all rows on the page, or the number of slots in the
* input arrays - whichever is smaller.
* <B>Locking Policy</B>
* <P>
* MUST be called with table locked, no locks are requested. Because
* it is called with table locks the call will go ahead and purge any
* row which is marked deleted. It will also use purge rather than
* delete to remove the old row after it moves it to a new page. This
* is ok since the table lock insures that no other transaction will
* use space on the table before this transaction commits.
*
* <BR>
* A page latch on the new page will be requested and released.
*
* @param slot slot of original row to move.
* @param row a row template to hold all columns of row.
* @param old_handle An array to be filled in by the call with the
* old handles of all rows moved.
* @param new_handle An array to be filled in by the call with the
* new handles of all rows moved.
*
* @return the number of rows processed.
*
* @exception StandardException Standard Derby error policy
*
**/
public int moveRecordForCompressAtSlot(
int slot,
Object[] row,
RecordHandle[] old_handle,
RecordHandle[] new_handle)
throws StandardException
{
long src_pageno = getPageNumber();
try
{
fetchFromSlot(
null,
slot,
row,
(FetchDescriptor) null, // all columns retrieved
false);
int row_size = getRecordPortionLength(slot);
int record_id = getHeaderAtSlot(slot).getId();
// first see if row will fit on current page being used to insert
StoredPage dest_page =
(StoredPage) owner.getPageForCompress(0, src_pageno);
if (dest_page != null)
{
if ((dest_page.getPageNumber() >= getPageNumber()) ||
(!dest_page.spaceForCopy(row_size, record_id)))
{
// page won't work
dest_page.unlatch();
dest_page = null;
}
}
if (dest_page == null)
{
// last page did not work, try unfilled page
dest_page = (StoredPage)
owner.getPageForCompress(
ContainerHandle.GET_PAGE_UNFILLED, src_pageno);
if (dest_page != null)
{
if ((dest_page.getPageNumber() >= getPageNumber()) ||
(!dest_page.spaceForCopy(row_size, record_id)))
{
// page won't work
dest_page.unlatch();
dest_page = null;
}
}
}
if (dest_page == null)
{
// last and unfilled page did not work, try getting a free page
dest_page = (StoredPage) owner.addPage();
if ((dest_page.getPageNumber() >= getPageNumber()) ||
(!dest_page.spaceForCopy(row_size, record_id)))
{
// The only time a new page might not have enough space is
// if the source row fills or almost fills a page by itself
// and has a record id that is smaller than the record id
// will be on the destination page such that the increase
// in space. Record id's are stored on the page in a
// compressed format such that depending on the value they
// may store in 1, 2, or 4 bytes, thus the destination page
// may need an additional 1, 2 or 3 bytes
// depending on the source and destination row id's.
// Because of record header overhead this can only happen
// if there is just one row on a page. For now just going
// to give up on moving this row. Future work could
// improve the algorithm to find a page with an equal or
// smaller stored record id in this case.
owner.removePage(dest_page);
dest_page = null;
}
}
if (dest_page != null)
{
int dest_slot = dest_page.recordCount();
old_handle[0] = getRecordHandleAtSlot(slot);
copyAndPurge(dest_page, slot, 1, dest_slot);
new_handle[0] = dest_page.getRecordHandleAtSlot(dest_slot);
dest_page.unlatch();
return(1);
}
else
{
return(0);
}
}
catch (IOException ioe)
{
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
}
/*
* methods that is called underneath a page action
*/
/*
* update page version and instance due to actions by a log record
*/
public void logAction(LogInstant instant) throws StandardException
{
if (SanityManager.DEBUG) {
SanityManager.ASSERT(isLatched(),
"logAction() executed on an unlatched page.");
}
if (rawDataOut == null)
createOutStreams();
if (!isActuallyDirty()) {
// if this is not an overflow page and the page is valid, set the
// initial row count.
if (!isOverflowPage() && ((getPageStatus() & VALID_PAGE) != 0)) {
initialRowCount = internalNonDeletedRecordCount();
} else
initialRowCount = 0;
}
setDirty();
bumpPageVersion();
updateLastLogInstant(instant);
}
/* clean the page for first use or reuse */
private void cleanPage()
{
setDirty();
// set pageData to all nulls
clearSection(0, getPageSize());
slotsInUse = 0;
deletedRowCount = 0;
headerOutOfDate = true; // headerOutOfDate must be set after setDirty
// because isDirty maybe called unlatched
clearAllSpace();
}
/**
Initialize the page.
If reuse, then
Clean up any in memory or on disk structure to ready the page for reuse.
This is not only reusing the page buffer, but reusing a free page
which may or may not be cleaned up the the client of raw store when it
was deallocated.
@exception StandardException Derby Standard Error Policy
*/
public void initPage(LogInstant instant, byte status, int recordId,
boolean overflow, boolean reuse)
throws StandardException
{
// log action at the end after the page is updated with all the
// pertinent information
logAction(instant);
if (reuse)
{
cleanPage();
super.cleanPageForReuse();
}
// if not reuse, createPage already called cleanpage
headerOutOfDate = true; // headerOutOfDate must be set after setDirty
// because isDirty maybe called unlatched
setPageStatus(status);
isOverflowPage = overflow;
nextId = recordId;
}
/**
Set page status
@exception StandardException Derby Standard Error Policy
*/
public void setPageStatus(LogInstant instant, byte status)
throws StandardException
{
logAction(instant);
headerOutOfDate = true; // headerOutOfDate must be set after setDirty
// because isDirty maybe called unlatched
setPageStatus(status);
}
/**
Set the row reserved space.
@exception StandardException Derby Standard Error Policy
*/
public void setReservedSpace(LogInstant instant, int slot, int value)
throws StandardException, IOException
{
logAction(instant);
headerOutOfDate = true; // headerOutOfDate must be set after setDirty
// because isDirty maybe called unlatched
int delta = value - getReservedCount(slot);
if (SanityManager.DEBUG) {
SanityManager.ASSERT(delta <= freeSpace,
"Cannot grow reserved space because there is not enough free space on the page");
SanityManager.ASSERT(delta != 0,
"Set Reserved Space called to set identical value");
if (value < 0)
SanityManager.THROWASSERT(
"Cannot set reserved space to value " + value);
}
// Find the end of the record that we are about to add or subtract from
// the reserved space.
int nextRecordOffset = getRecordOffset(slot) + getTotalSpace(slot);
if (delta > 0) {
// Growing - hopefully during a RRR restore
expandPage(nextRecordOffset, delta);
} else {
// shrinking, delta is < 0
shrinkPage(nextRecordOffset, -delta);
}
// Lastly, update the reserved space count in the slot.
rawDataOut.setPosition(getSlotOffset(slot) + (2*slotFieldSize));
if (slotFieldSize == SMALL_SLOT_SIZE)
logicalDataOut.writeShort(value);
else
logicalDataOut.writeInt(value);
}
/**
Store a record at the given slot.
@exception StandardException Standard Derby error policy
@exception IOException RESOLVE
*/
public void storeRecord(LogInstant instant, int slot, boolean insert, ObjectInput in)
throws StandardException, IOException
{
logAction(instant);
if (insert)
storeRecordForInsert(slot, in);
else
storeRecordForUpdate(slot, in);
}
private void storeRecordForInsert(int slot, ObjectInput in)
throws StandardException, IOException
{
StoredRecordHeader recordHeader = shiftUp(slot);
if (recordHeader == null) {
recordHeader = new StoredRecordHeader();
setHeaderAtSlot(slot, recordHeader);
}
bumpRecordCount(1);
// recordHeader represents the new version of the record header.
recordHeader.read(in);
// the record is already marked delete, we need to bump the deletedRowCount
if (recordHeader.isDeleted()) {
deletedRowCount++;
headerOutOfDate = true;
}
// during a rollforward insert, recordId == nextId
// during a rollback of purge, recordId < nextId
if (nextId <= recordHeader.getId())
nextId = recordHeader.getId()+1;
int recordOffset = firstFreeByte;
int offset = recordOffset;
// write each field out to the page
int numberFields = recordHeader.getNumberFields();
rawDataOut.setPosition(offset);
offset += recordHeader.write(rawDataOut);
int userData = 0;
for (int i = 0; i < numberFields; i++) {
// get the field header information, the input stream came from the log
int newFieldStatus = StoredFieldHeader.readStatus(in);
int newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);
newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);
rawDataOut.setPosition(offset);
offset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
if (newFieldDataLength != 0) {
in.readFully(pageData, offset, newFieldDataLength);
offset += newFieldDataLength;
userData += newFieldDataLength;
}
}
int dataWritten = offset - firstFreeByte;
freeSpace -= dataWritten;
firstFreeByte += dataWritten;
int reservedSpace = 0;
if (minimumRecordSize > 0) {
// make sure we reserve the minimumRecordSize for the user data
// portion of the record excluding the space we took on recordHeader
// and fieldHeaders.
if (userData < minimumRecordSize) {
reservedSpace = minimumRecordSize - userData;
freeSpace -= reservedSpace;
firstFreeByte += reservedSpace;
}
}
if (isOverflowPage())
{
// The total length of the row including the row header, field
// headers, user data, and unused reserve space must be at least
// as big as the worst case overflow row pointer. This is so that
// it always possible to do an expanding update on a row piece that
// in the worst case results in just using the existing space to
// put in an overflow pointer to another row segment on some other
// page.
int additional_space_needed =
StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE -
(dataWritten + reservedSpace);
if (additional_space_needed > 0)
{
// need to reserve more space for the row to handle worst case
// update of the row to an overflow row piece.
freeSpace -= additional_space_needed;
firstFreeByte += additional_space_needed;
reservedSpace += additional_space_needed;
}
}
// update the slot table
addSlotEntry(slot, recordOffset, dataWritten, reservedSpace);
if (SanityManager.DEBUG)
{
if ((freeSpace < 0) ||
(firstFreeByte > getSlotOffset(slotsInUse - 1)) ||
((firstFreeByte + freeSpace) != getSlotOffset(slotsInUse - 1)))
{
SanityManager.THROWASSERT(
" inconsistency in space management during insert: " +
" slot = " + slot +
" getSlotOffset(slot) = " + getSlotOffset(slot) +
" dataWritten = " + dataWritten +
" freeSpace = " + freeSpace +
" firstFreeByte = " + firstFreeByte +
" page = " + this);
}
}
if ((firstFreeByte > getSlotOffset(slot)) || (freeSpace < 0))
{
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, getPageId()));
}
}
private void storeRecordForUpdate(int slot, ObjectInput in)
throws StandardException, IOException
{
// set up to read the in-memory record header back from the record
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
StoredRecordHeader newRecorderHeader = new StoredRecordHeader();
// recordHeader represents the new version of the record header.
newRecorderHeader.read(in);
int oldFieldCount = recordHeader.getNumberFields();
int newFieldCount = newRecorderHeader.getNumberFields();
int startField = recordHeader.getFirstField();
if (SanityManager.DEBUG) {
if (startField != newRecorderHeader.getFirstField())
SanityManager.THROWASSERT("First field changed from " + startField + " to " + newRecorderHeader.getFirstField());
}
// See if the number of fields shrunk, if so clear out the old data
// we do this first to stop shuffling about the fields that are going to
// be deleted during the update of the earlier fields. This case occurs
// on an update that changes the row to be overflowed.
if (newFieldCount < oldFieldCount) {
int oldDataStartingOffset = getFieldOffset(slot, startField + newFieldCount);
// calculate the length of the to be deleted fields
int deleteLength = getRecordOffset(slot) + getRecordPortionLength(slot) - oldDataStartingOffset;
// we are updateing to zero bytes!
updateRecordPortionLength(slot, -(deleteLength), deleteLength);
}
// write each field out to the page
int startingOffset = getRecordOffset(slot);
int newOffset = startingOffset;
int oldOffset = startingOffset;
// see which field we get to use the reserve space
int reservedSpaceFieldId = newFieldCount < oldFieldCount ?
newFieldCount - 1 : oldFieldCount - 1;
reservedSpaceFieldId += startField;
// the new data the needs to be written at newOffset but can't until
// unsedSpace >= newDataToWrite.length (allowing for the header)
DynamicByteArrayOutputStream newDataToWrite = null;
rawDataOut.setPosition(newOffset);
// write the record header, which may change in size
int oldLength = recordHeader.size();
int newLength = newRecorderHeader.size();
int unusedSpace = oldLength; // the unused space at newOffset
// no fields, so we can eat into the reserve space
if (reservedSpaceFieldId < startField) // no fields
unusedSpace += getReservedCount(slot);
if (unusedSpace >= newLength) {
newRecorderHeader.write(rawDataOut);
newOffset += newLength;
unusedSpace -= newLength;
} else {
newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());
newRecorderHeader.write(newDataToWrite);
}
oldOffset += oldLength;
int recordDelta = (newLength - oldLength);
int oldFieldStatus = 0;
int oldFieldDataLength = 0;
int newFieldStatus = 0;
int newFieldDataLength = 0;
int oldEndFieldExclusive = startField + oldFieldCount;
int newEndFieldExclusive = startField + newFieldCount;
for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {
int oldFieldLength = 0;
if (fieldId < oldEndFieldExclusive) {
rawDataIn.setPosition(oldOffset);
oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);
oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);
oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize)
+ oldFieldDataLength;
}
newFieldStatus = StoredFieldHeader.readStatus(in);
newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);
// if no value was provided on an update of a field then use the old value,
// unless the old field didn't exist.
if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {
// may need to move this old field ...
if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {
// the is no old data to catch up on, is the data at
// the correct position already?
if (newOffset == oldOffset) {
// yes, nothing to do!!
if (SanityManager.DEBUG) {
if (unusedSpace != 0)
SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);
}
} else {
// need to shift the field left
if (SanityManager.DEBUG) {
if (unusedSpace != (oldOffset - newOffset))
SanityManager.THROWASSERT(
"Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);
}
System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);
}
newOffset += oldFieldLength;
// last field to be updated can eat into the reserve space
if (fieldId == reservedSpaceFieldId)
unusedSpace += getReservedCount(slot);
} else {
// there is data still to be written, just append this field to the
// saved data
int position = newDataToWrite.getPosition();
newDataToWrite.setPosition(position + oldFieldLength);
System.arraycopy(pageData, oldOffset,
newDataToWrite.getByteArray(), position, oldFieldLength);
unusedSpace += oldFieldLength;
// last field to be updated can eat into the reserve space
if (fieldId == reservedSpaceFieldId)
unusedSpace += getReservedCount(slot);
// attempt to write out some of what we have in the side buffer now.
int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
newOffset += copyLength;
unusedSpace -= copyLength;
}
oldOffset += oldFieldLength;
continue;
}
newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);
int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);
int newFieldLength = newFieldHeaderLength + newFieldDataLength;
recordDelta += (newFieldLength - oldFieldLength);
// See if we can write this field now
// space available increases by the amount of the old field
unusedSpace += oldFieldLength;
oldOffset += oldFieldLength;
// last field to be updated can eat into the reserve space
if (fieldId == reservedSpaceFieldId)
unusedSpace += getReservedCount(slot);
if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {
// catch up on the old data if possible
int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
newOffset += copyLength;
unusedSpace -= copyLength;
}
if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0))
&& (unusedSpace >= newFieldHeaderLength)) {
// can fit the header in
rawDataOut.setPosition(newOffset);
newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
unusedSpace -= newFieldHeaderLength;
if (newFieldDataLength != 0) {
// read as much as the field as possible
int fieldCopy = unusedSpace >= newFieldDataLength ?
newFieldDataLength : unusedSpace;
if (fieldCopy != 0) {
in.readFully(pageData, newOffset, fieldCopy);
newOffset += fieldCopy;
unusedSpace -= fieldCopy;
}
fieldCopy = newFieldDataLength - fieldCopy;
if (fieldCopy != 0) {
if (newDataToWrite == null)
newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);
// append the remaining portion of the field to the saved data
int position = newDataToWrite.getPosition();
newDataToWrite.setPosition(position + fieldCopy);
in.readFully(newDataToWrite.getByteArray(),
position, fieldCopy);
}
}
} else {
// can't fit these header, or therefore the field, append it
// to the buffer.
if (newDataToWrite == null)
newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);
StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);
// save the new field data
if (newFieldDataLength != 0) {
int position = newDataToWrite.getPosition();
newDataToWrite.setPosition(position + newFieldDataLength);
in.readFully(newDataToWrite.getByteArray(),
position, newFieldDataLength);
}
}
}
// at this point there may still be data left in the saved buffer
// but presumably we can't fit it in
int reservedDelta;
if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {
// need to shift the later records down ...
int nextRecordOffset = startingOffset + getTotalSpace(slot);
int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);
if (SanityManager.DEBUG) {
if (newOffset > nextRecordOffset)
SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset
+ " next record " + nextRecordOffset);
if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))
SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace
+ " newDataToWrite.getUsed() " + newDataToWrite.getUsed()
+ " nextRecordOffset " + nextRecordOffset
+ " newOffset " + newOffset
+ " reservedSpaceFieldId " + reservedSpaceFieldId
+ " startField " + startField
+ " newEndFieldExclusive " + newEndFieldExclusive
+ " newFieldCount " + newFieldCount
+ " oldFieldCount " + oldFieldCount
+ " slot " + slot
+ " freeSpace " + freeSpace
+ " unusedSpace " + unusedSpace
+ " page " + getPageId());
if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)
SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) +
"free space take " + spaceRequiredFromFreeSpace +
"record delta " + recordDelta);
}
if (spaceRequiredFromFreeSpace > freeSpace) {
throw dataFactory.markCorrupt(
StandardException.newException(
SQLState.DATA_CORRUPT_PAGE, getPageId()));
}
// see if this is the last record on the page, if so a simple
// shift of the remaining fields will sufice...
expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);
unusedSpace += spaceRequiredFromFreeSpace;
moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);
reservedDelta = -1 * getReservedCount(slot);
if (SanityManager.DEBUG) {
if (newDataToWrite.getUsed() != 0)
SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());
}
} else {
reservedDelta = -1 * recordDelta;
}
// now reset the length in the slot entry
updateRecordPortionLength(slot, recordDelta, reservedDelta);
setHeaderAtSlot(slot, newRecorderHeader);
}
private int moveSavedDataToPage(DynamicByteArrayOutputStream savedData, int unusedSpace, int pageOffset) {
// catch up on the old data if possible
if (unusedSpace > (savedData.getUsed() / 2)) {
// copy onto the page
int copyLength = unusedSpace <= savedData.getUsed() ?
unusedSpace : savedData.getUsed();
System.arraycopy(savedData.getByteArray(), 0,
pageData, pageOffset, copyLength);
// fix up the saved buffer
savedData.discardLeft(copyLength);
return copyLength;
}
return 0;
}
/**
Create the space to update a portion of a record.
This method ensures there is enough room to replace the
old data of length oldLength at the given offset, with the new data of length
newLength. This method does put any new data on the page, it moves old data around
and zeros out any old data when newLength < oldLength. This method does
update the information in the slot table.
The passed in offset is the correct place to put the data
when this method returns, ie. it only moves data that
has an offset greater then this.
@exception StandardException Standard Derby error policy
@exception IOException RESOLVE
*/
private void createSpaceForUpdate(int slot, int offset, int oldLength, int newLength)
throws StandardException, IOException
{
// now replace the old data with the new data
if (newLength <= oldLength) {
// now shift the remaining data down ...
int diffLength = oldLength - newLength;
// real easy
if (diffLength == 0)
return;
// shift the remaing fields down
int remainingLength =
shiftRemainingData(slot, offset, oldLength, newLength);
// clear the now unused data on the page
clearSection(offset + newLength + remainingLength, diffLength);
if (SanityManager.DEBUG) {
if ((getRecordPortionLength(slot) - diffLength) !=
((offset - getRecordOffset(slot)) + newLength +
remainingLength))
{
SanityManager.THROWASSERT(
" Slot table trying to update record length " +
(getRecordPortionLength(slot) - diffLength) +
" that is not the same as what it actully is");
}
}
// now reset the length in the slot entry, increase the reserved space
updateRecordPortionLength(slot, -(diffLength), diffLength);
return;
}
// tough case, the new field is bigger than the old field ...
// first attempt, see how much space is in row private reserved space
int extraLength = newLength - oldLength;
// extraLength is always greater than 0.
if (SanityManager.DEBUG)
SanityManager.ASSERT(extraLength > 0);
int recordReservedSpace = getReservedCount(slot);
int reservedDelta = 0;
int spaceRequiredFromFreeSpace = extraLength - recordReservedSpace;
if (SanityManager.DEBUG) {
if (spaceRequiredFromFreeSpace > freeSpace)
SanityManager.THROWASSERT(
"spaceRequiredFromFreeSpace = " +
spaceRequiredFromFreeSpace +
";freeSpace = " + freeSpace +
";newLength = " + newLength +
";oldLength = " + oldLength +
";\npage= " + this);
}
if (spaceRequiredFromFreeSpace > 0) {
// The update requires all the reserved space + some from free space
int nextRecordOffset = getRecordOffset(slot) + getTotalSpace(slot);
// see if this is the last record on the page, if so a simple
// shift of the remaining fields will sufice...
expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);
// we used all the reserved space we have, set it to 0
reservedDelta = -(recordReservedSpace);
} else {
// the update uses some amount of space from the rows reserved space
// set reserved Delta to account for amount of reserved space used.
reservedDelta = -(extraLength);
}
// just shift all remaining fields up
int remainingLength = shiftRemainingData(slot, offset, oldLength, newLength);
if (SanityManager.DEBUG) {
if ((extraLength + reservedDelta) < 0)
SanityManager.THROWASSERT(
"total space the record occupies cannot shrink, extraLength = "
+ extraLength + " reservedDelta = " + reservedDelta
+ " spacerequired = " + spaceRequiredFromFreeSpace
+ " recordReservedSpace = " + recordReservedSpace);
}
// now reset the length in the slot entry
updateRecordPortionLength(slot, extraLength, reservedDelta);
}
/**
storeField
@exception StandardException Standard Derby error policy
@exception IOException RESOLVE
*/
public void storeField(LogInstant instant, int slot, int fieldNumber, ObjectInput in)
throws StandardException, IOException
{
logAction(instant);
int offset = getFieldOffset(slot, fieldNumber);
// get the field header information, the input stream came from the log
ArrayInputStream lrdi = rawDataIn;
lrdi.setPosition(offset);
int oldFieldStatus = StoredFieldHeader.readStatus(lrdi);
int oldFieldDataLength = StoredFieldHeader.readFieldDataLength(lrdi, oldFieldStatus, slotFieldSize);
int newFieldStatus = StoredFieldHeader.readStatus(in);
int newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);
newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);
int oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize) + oldFieldDataLength;
int newFieldLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize) + newFieldDataLength;
createSpaceForUpdate(slot, offset, oldFieldLength, newFieldLength);
rawDataOut.setPosition(offset);
offset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);
if (newFieldDataLength != 0)
in.readFully(pageData, offset, newFieldDataLength);
}
/**
reserveSpaceForSlot
This method will reserve at least specified "spaceToReserve" bytes for the record
in the slot.
@exception StandardException Standard Derby error policy
@exception IOException RESOLVE
*/
public void reserveSpaceForSlot(LogInstant instant, int slot, int spaceToReserve)
throws StandardException, IOException
{
logAction(instant);
int extraSpace = spaceToReserve - getReservedCount(slot);
if (extraSpace <= 0)
return;
if (freeSpace < extraSpace)
throw new NoSpaceOnPage(isOverflowPage());
// need to shift the later records down ...
int startingOffset = getRecordOffset(slot);
int nextRecordOffset = startingOffset + getTotalSpace(slot);
// see if this is the last record on the page, if so a simple
// shift of the remaining fields will sufice...
expandPage(nextRecordOffset, extraSpace);
setSlotEntry(slot, startingOffset, getRecordPortionLength(slot), spaceToReserve);
}
/**
Skip a field header and its data on the given stream.
@exception IOException corrupt stream
*/
public void skipField(ObjectInput in) throws IOException {
int fieldStatus = StoredFieldHeader.readStatus(in);
int fieldDataLength = StoredFieldHeader.readFieldDataLength(in, fieldStatus, slotFieldSize);
if (fieldDataLength != 0) {
DataInputUtil.skipFully(in, fieldDataLength);
}
}
public void skipRecord(ObjectInput in) throws IOException
{
StoredRecordHeader recordHeader = new StoredRecordHeader();
recordHeader.read(in);
for (int i = recordHeader.getNumberFields(); i > 0; i--) {
skipField(in);
}
}
/**
Shift data within a record to account for an update.
@param offset Offset where the update starts, need not be on a field boundry.
@param oldLength length of the data being replaced
@param newLength length of the data replacing the old data
@return the length of the data in the record after the replaced data.
*/
private int shiftRemainingData(int slot, int offset, int oldLength, int newLength)
throws IOException
{
// length of valid data remaining in the record after the portion that
// is being replaced.
int remainingLength = (getRecordOffset(slot) + getRecordPortionLength(slot)) -
(offset + oldLength);
if (SanityManager.DEBUG) {
if (!(((remainingLength >= 0) &&
(getRecordPortionLength(slot) >= oldLength))))
{
SanityManager.THROWASSERT(
"oldLength = " + oldLength + " newLength = " + newLength +
"remainingLength = " + remainingLength +
" offset = " + offset +
" getRecordOffset(" + slot + ") = " + getRecordOffset(slot)+
" getRecordPortionLength(" + slot + ") = " +
getRecordPortionLength(slot));
}
}
if (remainingLength != 0) {
System.arraycopy(pageData, offset + oldLength,
pageData, offset + newLength, remainingLength);
}
return remainingLength;
}
/**
Set the deleted status
@exception StandardException Standard Derby error policy
@exception IOException RESOLVE
@see BasePage#setDeleteStatus
*/
public void setDeleteStatus(LogInstant instant, int slot, boolean delete)
throws StandardException, IOException
{
logAction(instant);
deletedRowCount += super.setDeleteStatus(slot, delete);
headerOutOfDate = true;
int offset = getRecordOffset(slot);
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
rawDataOut.setPosition(offset);
recordHeader.write(logicalDataOut);
}
/**
get record count without checking for latch
*/
protected int internalDeletedRecordCount()
{
return deletedRowCount;
}
/**
purgeRecord from page. Move following slots up by one.
@exception StandardException Standard Derby error policy
@exception IOException RESOLVE
*/
public void purgeRecord(LogInstant instant, int slot, int recordId)
throws StandardException, IOException
{
logAction(instant);
// if record is marked deleted, reduce deletedRowCount
if (getHeaderAtSlot(slot).isDeleted())
deletedRowCount--;
int startByte = getRecordOffset(slot);
int endByte = startByte + getTotalSpace(slot) - 1;
compressPage(startByte, endByte);
// fix up the on-page slot table
removeSlotEntry(slot);
// fix up the in-memory version
removeAndShiftDown(slot);
}
/*
**
*/
/**
Get the offset of the field header of the given field for
the record in the given slot.
Field number is the absolute number for the complete record, not just this portion.
E.g. if this is a record portion that starts at field 3 and has 6 fields
then the second field on this *page* has field number 4.
*/
private int getFieldOffset(int slot, int fieldNumber) throws IOException
{
// RESOLVE - overflow, needs to be changed
int offset = getRecordOffset(slot);
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
// get the number of fields
int startField = recordHeader.getFirstField();
if (SanityManager.DEBUG) {
int numberFields = recordHeader.getNumberFields();
if ((fieldNumber < startField) || (fieldNumber >= (startField + numberFields)))
SanityManager.THROWASSERT(
"fieldNumber: " + fieldNumber +
" start field: " + startField +
" number of fields " + numberFields);
}
ArrayInputStream lrdi = rawDataIn;
// skip the record header
lrdi.setPosition(offset + recordHeader.size());
// skip any earlier fields ...
for (int i = startField; i < fieldNumber; i++) {
skipField(lrdi);
}
return rawDataIn.getPosition();
}
/*
* Time stamp support - this page supports time stamp
*/
/**
Get a time stamp for this page
@return page time stamp
*/
public PageTimeStamp currentTimeStamp()
{
// saving the whole key would be an overkill
return new PageVersion(getPageNumber(), getPageVersion());
}
/**
Set given pageVersion to be the as what is on this page
@exception StandardException given time stamp is null or is not a time
stamp implementation this page knows how to deal with
*/
public void setTimeStamp(PageTimeStamp ts) throws StandardException
{
if (ts == null)
{
throw StandardException.newException(SQLState.DATA_TIME_STAMP_NULL);
}
if (!(ts instanceof PageVersion))
{
throw StandardException.newException(
SQLState.DATA_TIME_STAMP_ILLEGAL, ts);
}
PageVersion pv = (PageVersion)ts;
pv.setPageNumber(getPageNumber());
pv.setPageVersion(getPageVersion());
}
/**
compare given PageVersion with pageVersion on page
@param ts the page version gotton from this page via a currentTimeStamp
or setTimeStamp call earlier
@return true if the same
@exception StandardException given time stamp not gotton from this page
*/
public boolean equalTimeStamp(PageTimeStamp ts) throws StandardException
{
if (ts == null)
return false;
if (!(ts instanceof PageVersion))
{
throw StandardException.newException(
SQLState.DATA_TIME_STAMP_ILLEGAL, ts);
}
PageVersion pv = (PageVersion)ts;
if (pv.getPageNumber() != getPageNumber())
{
throw StandardException.newException(
SQLState.DATA_TIME_STAMP_ILLEGAL, ts);
}
return (pv.getPageVersion() == getPageVersion());
}
/** debugging, print this page */
public String toString()
{
if (SanityManager.DEBUG)
{
if (SanityManager.DEBUG_ON("DeadlockTrace") || SanityManager.DEBUG_ON("userLockStackTrace"))
return "page = " + getIdentity();
String str = "---------------------------------------------------\n";
str += pageHeaderToString();
// str += slotTableToString(); // print in memory slot table
// now print each row
for (int s = 0; s < slotsInUse; s++)
str += recordToString(s);
//if (SanityManager.DEBUG_ON("dumpPageImage"))
{
str += "---------------------------------------------------\n";
str += pagedataToHexDump(pageData);
str += "---------------------------------------------------\n";
}
return str;
}
else
return null;
}
public String toUncheckedString()
{
if (SanityManager.DEBUG)
{
String str = "---------------------------------------------------\n";
str += pageHeaderToString();
//if (SanityManager.DEBUG_ON("dumpPageImage"))
{
str += "---------------------------------------------------\n";
str += pagedataToHexDump(pageData);
str += "---------------------------------------------------\n";
}
return str;
}
else
return null;
}
/**
* Provide a hex dump of the data in the in memory version of the page.
* <p>
* The output looks like:
*
* 00000000: 4d5a 9000 0300 0000 0400 0000 ffff 0000 MZ..............
* 00000010: b800 0000 0000 0000 4000 0000 0000 0000 ........@.......
* 00000020: 0000 0000 0000 0000 0000 0000 0000 0000 ................
* 00000030: 0000 0000 0000 0000 0000 0000 8000 0000 ................
* 00000040: 0e1f ba0e 00b4 09cd 21b8 014c cd21 5468 ........!..L.!Th
* 00000050: 6973 2070 726f 6772 616d 2063 616e 6e6f is program canno
* 00000060: 7420 6265 2072 756e 2069 6e20 444f 5320 t be run in DOS
* 00000070: 6d6f 6465 2e0d 0a24 0000 0000 0000 0050 mode...$.......P
* 00000080: 4500 004c 0109 008b abfd 3000 0000 0000 E..L......0.....
* 00000090: 0000 00e0 000e 210b 0102 3700 3405 0000 ......!...7.4...
* 000000a0: 8401 0000 6400 0000 6004 0000 1000 0000 ....d...`.......
* 000000b0: 5005 0000 0008 6000 1000 0000 0200 0001 P.....`.........
* 000000c0: 0000 0000 0000 0004 0000 0000 0000 0000 ................
* 000000d0: 9007 0000 0400 0009 a207 0002 0000 0000 ................
* 000000e0: 0010 0000 1000 0000 0010 0000 1000 0000 ................
* 000000f0: 0000 0010 0000 0000 6006 00ef 8100 0000 ........`.......
* 00000100: 5006 00e6 0c00 0000 0007 00d0 0400 0000 P...............
* 00000110: 0000 0000 0000 0000 0000 0000 0000 0000 ................
* 00000120: 1007 00c8 7100 0000 0000 0000 0000 0000 ....q...........
* 00000130: 0000 0000 0000 0000 0000 0000 0000 0000 ................
*
* <p>
*
* @return The string with the hex dump in it.
*
* @param data array of bytes to dump.
**/
private static String pagedataToHexDump(byte[] data)
{
return org.apache.derby.iapi.util.StringUtil.hexDump(data);
}
private String pageHeaderToString()
{
if (SanityManager.DEBUG)
{
return
"page id: " + getIdentity() +
" Overflow: " + isOverflowPage +
" PageVersion: " + getPageVersion() +
" SlotsInUse: " + slotsInUse +
" DeletedRowCount: " + deletedRowCount +
" PageStatus: " + getPageStatus() +
" NextId: " + nextId +
" firstFreeByte: " + firstFreeByte +
" freeSpace: " + freeSpace +
" totalSpace: " + totalSpace +
" spareSpace: " + spareSpace + "%" +
" minimumRecordSize : " + minimumRecordSize +
" PageSize: " + getPageSize() +
"\n";
}
else
{
return null;
}
}
String getPageDumpString()
{
return(
MessageService.getTextMessage(
MessageId.STORE_PAGE_DUMP,
getIdentity(),
isOverflowPage,
getPageVersion(),
slotsInUse,
deletedRowCount,
getPageStatus(),
nextId,
firstFreeByte,
freeSpace,
totalSpace,
spareSpace,
minimumRecordSize,
getPageSize(),
pagedataToHexDump(pageData)));
}
private String recordToString(int slot)
{
if (SanityManager.DEBUG)
{
String str = "";
try
{
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
int offset = getRecordOffset(slot);
int numberFields = recordHeader.getNumberFields();
str = "\nslot " + slot + " offset " + offset + " " +
" recordlen " + getTotalSpace(slot) +
" (" + getRecordPortionLength(slot) +
"," + getReservedCount(slot) + ")"+
recordHeader.toString();
// move offset past record header to begin of first field.
offset += recordHeader.size();
rawDataIn.setPosition(offset);
for (int i = 0; i < numberFields; i++)
{
int fieldStatus = StoredFieldHeader.readStatus(rawDataIn);
int fieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, fieldStatus, slotFieldSize);
if (fieldDataLength < 0)
{
str += "\n\tField " + i + ": offset=" + offset + " null " +
StoredFieldHeader.toDebugString(fieldStatus);
}
else
{
str += "\n\tField " + i + ": offset=" + offset +
" len=" + fieldDataLength + " " +
StoredFieldHeader.toDebugString(fieldStatus);
if (StoredFieldHeader.isOverflow(fieldStatus))
{
// not likely to be a real pointer, this is most
// likely an old column chain where the first field
// is set to overflow even though the second field
// is the overflow pointer
if (i == 0 && fieldDataLength != 3)
{
// figure out where we should go next
offset = rawDataIn.getPosition() + fieldDataLength;
long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);
int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);
str += "Questionable long column at (" +
overflowPage + "," + overflowId + ")";
rawDataIn.setPosition(offset);
}
else
{
// print the overflow pointer
long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);
int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);
str += "long column at (" + overflowPage + "," + overflowId + ")";
}
}
else
{
// go to next field
offset = rawDataIn.getPosition() + fieldDataLength;
rawDataIn.setPosition(offset);
}
}
}
str += "\n";
}
catch (IOException ioe)
{
str += "\n ======= ERROR IOException =============\n";
str += ioe.toString();
}
catch (StandardException se)
{
str += "\n ======= ERROR StandardException =============\n";
str += se.toString();
}
return str;
}
else
return null;
}
/*
** Overflow related methods
*/
/**
Get the overflow page for a record that has already overflowed.
@exception StandardException Standard Derby error policy
*/
protected StoredPage getOverflowPage(long pageNumber) throws StandardException
{
StoredPage overflowPage = (StoredPage) owner.getPage(pageNumber);
if (overflowPage == null) {
}
// RESOLVE-LR
//if (!overflowPage.isOverflow()) {
// overflowPage.unlatch();
//}
return overflowPage;
}
/**
Get an empty overflow page.
@exception StandardException Standard Derby error policy
*/
protected BasePage getNewOverflowPage() throws StandardException
{
FileContainer myContainer = (FileContainer) containerCache.find(identity.getContainerId());
try {
// add an overflow page
return (BasePage) myContainer.addPage(owner, true);
} finally {
containerCache.release(myContainer);
}
}
/**
Get the overflow slot for a record that has already overflowed.
@exception StandardException Standard Derby error policy
*/
protected static int getOverflowSlot(BasePage overflowPage, StoredRecordHeader recordHeader)
throws StandardException
{
int slot = overflowPage.findRecordById(
recordHeader.getOverflowId(), Page.FIRST_SLOT_NUMBER);
if (slot < 0)
{
throw StandardException.newException(
SQLState.DATA_SLOT_NOT_ON_PAGE);
}
return slot;
}
/**
Get a overflow page that potentially can handle a new overflowed record.
@exception StandardException Standard Derby error policy
*/
public BasePage getOverflowPageForInsert(
int currentSlot,
Object[] row,
FormatableBitSet validColumns)
throws StandardException
{
return getOverflowPageForInsert(currentSlot, row, validColumns, 0);
}
/**
@exception StandardException Standard Derby error policy
*/
public BasePage getOverflowPageForInsert(
int currentSlot,
Object[] row,
FormatableBitSet validColumns,
int startColumn)
throws StandardException
{
// System.out.println("Top of getOverflowPageForInsert");
// look at all the overflow pages that are in use on this page, up
// to a maximum of 5.
long[] pageList = new long[5];
int pageCount = 0;
long currentOverflowPageNumber = 0;
slotScan:
for (int slot = 0; (slot < slotsInUse) && (pageCount < pageList.length); slot++) {
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
if (!recordHeader.hasOverflow())
continue;
long overflowPageNumber = recordHeader.getOverflowPage();
if (slot == currentSlot) {
currentOverflowPageNumber = overflowPageNumber;
continue;
}
for (int i = 0; i < pageCount; i++) {
if (pageList[i] == overflowPageNumber)
continue slotScan;
}
pageList[pageCount++] = overflowPageNumber;
}
for (int i = 0; i < pageCount; i++) {
long pageNumber = pageList[i];
// don't look at the current overflow page
// used by this slot, because it the record is already
// overflowed then we reached here because the overflow
// page is full.
if (pageNumber == currentOverflowPageNumber)
continue;
StoredPage overflowPage = null;
int spaceNeeded = 0;
try {
overflowPage = getOverflowPage(pageNumber);
if ( overflowPage.spaceForInsert(row, validColumns,
spaceNeeded, startColumn, 100))
{
// System.out.println("returning used page: " + pageNumber);
return overflowPage;
}
spaceNeeded = ((StoredPage) overflowPage).getCurrentFreeSpace();
overflowPage.unlatch();
overflowPage = null;
} catch (StandardException se) {
if (overflowPage != null) {
overflowPage.unlatch();
overflowPage = null;
}
}
}
// if we get here then we have to allocate a new overflow page
// System.out.println("returning new page: ");
return getNewOverflowPage();
}
/**
Update an already overflowed record.
@param slot Slot of the original record on its original page
@param row new version of the data
@exception StandardException Standard Derby error policy
*/
protected void updateOverflowed(
RawTransaction t,
int slot,
Object[] row,
FormatableBitSet validColumns,
StoredRecordHeader recordHeader)
throws StandardException
{
BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());
try {
int overflowSlot = getOverflowSlot(overflowPage, recordHeader);
overflowPage.doUpdateAtSlot(t, overflowSlot, recordHeader.getOverflowId(), row, validColumns);
overflowPage.unlatch();
overflowPage = null;
return;
} finally {
if (overflowPage != null) {
overflowPage.unlatch();
overflowPage = null;
}
}
}
/**
Update a record handle to point to an overflowed record portion.
Note that the record handle need not be the current page.
@exception StandardException Standard Derby error policy
*/
public void updateOverflowDetails(RecordHandle handle, RecordHandle overflowHandle)
throws StandardException
{
long handlePageNumber = handle.getPageNumber();
if (handlePageNumber == getPageNumber()) {
updateOverflowDetails(this, handle, overflowHandle);
return;
}
StoredPage handlePage = (StoredPage) owner.getPage(handlePageNumber);
updateOverflowDetails(handlePage, handle, overflowHandle);
handlePage.unlatch();
}
private void updateOverflowDetails(StoredPage handlePage, RecordHandle handle, RecordHandle overflowHandle)
throws StandardException {
// update the temp record header, this will be used in the log row ..
handlePage.getOverFlowRecordHeader().setOverflowDetails(overflowHandle);
// Use the slot interface as we don't need a lock since
// the initial insert/update holds the lock on the first
// portion of the record.
int slot = handlePage.getSlotNumber(handle);
// use doUpdateAtSlot as it avoids unnecessary work in updateAtSlot the
// null indicates to this page that the record should become an
// overflow record
handlePage.doUpdateAtSlot(
owner.getTransaction(), slot, handle.getId(),
(Object[]) null, (FormatableBitSet) null);
}
/**
@exception StandardException Standard Derby error policy
*/
public void updateFieldOverflowDetails(RecordHandle handle, RecordHandle overflowHandle)
throws StandardException
{
// add an overflow field at the end of the previous record
// uses sparse rows
Object[] row = new Object[2];
row[1] = overflowHandle;
// we are expanding the record to have 2 fields, the second field is the overflow pointer.
FormatableBitSet validColumns = new FormatableBitSet(2);
validColumns.set(1);
// Use the slot interface as we don't need a lock since
// the initial insert/update holds the lock on the first
// portion of the record.
int slot = getSlotNumber(handle);
// use doUpdateAtSlot as it avoids unnecessary work in updateAtSlot
doUpdateAtSlot(owner.getTransaction(), slot, handle.getId(), row, validColumns);
}
/**
@exception StandardException Standard Derby error policy
*/
public int appendOverflowFieldHeader(DynamicByteArrayOutputStream logBuffer, RecordHandle overflowHandle)
throws StandardException, IOException
{
int fieldStatus = StoredFieldHeader.setInitial();
fieldStatus = StoredFieldHeader.setOverflow(fieldStatus, true);
long overflowPage = overflowHandle.getPageNumber();
int overflowId = overflowHandle.getId();
int fieldDataLength = CompressedNumber.sizeLong(overflowPage)
+ CompressedNumber.sizeInt(overflowId);
// write the field header to the log buffer
int lenWritten = StoredFieldHeader.write(logBuffer, fieldStatus, fieldDataLength, slotFieldSize);
// write the overflow details to the log buffer
lenWritten += CompressedNumber.writeLong(logBuffer, overflowPage);
lenWritten += CompressedNumber.writeInt(logBuffer, overflowId);
// this length is the same on page as in the log
return (lenWritten);
}
protected int getSlotsInUse()
{
return(slotsInUse);
}
/**
return the max datalength allowed with the space available
*/
private int getMaxDataLength(int spaceAvailable, int overflowThreshold) {
if (SanityManager.DEBUG) {
if (overflowThreshold == 0)
SanityManager.THROWASSERT("overflowThreshold cannot be 0");
}
// we need to take into considering of the overflowThreshold
// the overflowThreshold limits the max data length,
// whatever space we have left, we will not allow max data length
// to exceed the overflow threshold.
int maxThresholdSpace = totalSpace * overflowThreshold / 100;
int maxAvailable = 0;
if (spaceAvailable < (64 - 2))
maxAvailable = spaceAvailable - 2;
else if (spaceAvailable < (16383 - 3))
maxAvailable = spaceAvailable - 3;
else
maxAvailable = spaceAvailable - 5;
return (maxAvailable > maxThresholdSpace ? maxThresholdSpace : maxAvailable);
}
/**
return whether the field has exceeded the max threshold for this page
it compares the fieldSize with the largest possible field for this page
*/
private boolean isLong(int fieldSize, int overflowThreshold) {
if (SanityManager.DEBUG) {
if (overflowThreshold == 0)
SanityManager.THROWASSERT("overflowThreshold cannot be 0");
}
// if a field size is over the threshold, then it becomes a long column
int maxThresholdSize = maxFieldSize * overflowThreshold / 100;
return (fieldSize > maxThresholdSize);
}
/**
Perform an update.
@exception StandardException Standard Derby policy
*/
public void doUpdateAtSlot(
RawTransaction t,
int slot,
int id,
Object[] row,
FormatableBitSet validColumns)
throws StandardException
{
// If this is a head page, the recordHandle is the head row handle.
// If this is not a head page, we are calling updateAtSlot inside some
// convoluted loop that updates an overflow chain. There is nothing we
// can doing about it anyway.
RecordHandle headRowHandle =
isOverflowPage() ? null : getRecordHandleAtSlot(slot);
// RESOLVE: djd/yyz what does a null row means? (sku)
if (row == null)
{
owner.getActionSet().actionUpdate(
t, this, slot, id, row, validColumns, -1,
(DynamicByteArrayOutputStream) null, -1, headRowHandle);
return;
}
// startColumn is the first column to be updated.
int startColumn = RowUtil.nextColumn(row, validColumns, 0);
if (startColumn == -1)
return;
if (SanityManager.DEBUG)
{
// make sure that if N bits are set in the validColumns that
// exactly N columns are passed in via the row array.
if (!isOverflowPage() && validColumns != null)
{
if (RowUtil.getNumberOfColumns(-1, validColumns) > row.length)
SanityManager.THROWASSERT("updating slot " + slot +
" on page " + getIdentity() + " " +
RowUtil.getNumberOfColumns(-1, validColumns) +
" bits are set in validColumns but only " +
row.length + " columns in row[]");
}
}
// Keep track of row shrinkage in the head row piece. If any row piece
// shrinks, file a post commit work to clear all reserved space for the
// entire row chain.
boolean rowHasReservedSpace = false;
StoredPage curPage = this;
for (;;)
{
StoredRecordHeader rh = curPage.getHeaderAtSlot(slot);
int startField = rh.getFirstField();
int endFieldExclusive = startField + rh.getNumberFields();
// curPage contains column[startField] to column[endFieldExclusive-1]
// Need to cope with an update that is increasing the number of
// columns. If this occurs we want to make sure that we perform a
// single update to the last portion of a record, and not an update
// of the current columns and then an update to append a column.
long nextPage = -1;
int realStartColumn = -1;
int realSpaceOnPage = -1;
if (!rh.hasOverflow() ||
((startColumn >= startField) &&
(startColumn < endFieldExclusive)))
{
boolean hitLongColumn;
int nextColumn = -1;
Object[] savedFields = null;
DynamicByteArrayOutputStream logBuffer = null;
do
{
try
{
// Update this portion of the record.
// Pass in headRowHandle in case we are to update any
// long column and they need to be cleaned up by post
// commit processing. We don't want to purge the
// columns right now because in order to reclaim the
// page, we need to remove them. But it would be bad
// to remove them now because the transaction may not
// commit for a long time. We can do both purging of
// the long column and page removal together in the
// post commit.
nextColumn =
owner.getActionSet().actionUpdate(
t, curPage, slot, id, row, validColumns,
realStartColumn, logBuffer,
realSpaceOnPage, headRowHandle);
hitLongColumn = false;
}
catch (LongColumnException lce)
{
if (lce.getRealSpaceOnPage() == -1)
{
// an update that has caused the row to increase
// in size *and* push some fields off the page
// that need to be inserted in an overflow page
// no need to make a copy as we are going to use
// this buffer right away
logBuffer = lce.getLogBuffer();
savedFields =
(Object[]) lce.getColumn();
realStartColumn = lce.getNextColumn();
realSpaceOnPage = -1;
hitLongColumn = true;
continue;
}
// we caught a real long column exception
// three things should happen here:
// 1. insert the long column into overflow pages.
// 2. append the overflow field header in the main chain.
// 3. continue the update in the main data chain.
logBuffer =
new DynamicByteArrayOutputStream(lce.getLogBuffer());
// step 1: insert the long column ... if this update
// operation rolls back, purge the after image column
// chain and reclaim the overflow page because the
// whole chain will be orphaned anyway.
RecordHandle longColumnHandle =
insertLongColumn(
curPage, lce, Page.INSERT_UNDO_WITH_PURGE);
// step 2: append overflow field header to log buffer
int overflowFieldLen = 0;
try
{
overflowFieldLen +=
appendOverflowFieldHeader(
logBuffer, longColumnHandle);
}
catch (IOException ioe)
{
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
// step 3: continue the insert in the main data chain
// need to pass the log buffer, and start column to the
// next insert.
realStartColumn = lce.getNextColumn() + 1;
realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;
hitLongColumn = true;
}
catch (NoSpaceOnPage nsop)
{
// DERBY-4923
//
// The actionUpdate() call should not generate a
// NoSpaceOnPage error.
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_NO_SPACE_ON_PAGE,
nsop,
((PageKey) curPage.getIdentity()).toString(),
getPageDumpString(),
slot,
id,
validColumns.toString(),
realStartColumn,
0,
headRowHandle);
}
} while (hitLongColumn);
// See if we completed all the columns that are on this page.
int validColumnsSize =
(validColumns == null) ? 0 : validColumns.getLength();
if (nextColumn != -1)
{
if (SanityManager.DEBUG)
{
// note nextColumn might be less than the the first
// column we started updating. This is because the
// update might force the record header to grow and
// push fields before the one we are updating off the
// page and into this insert.
if ((nextColumn < startField) ||
(rh.hasOverflow() && (nextColumn >= endFieldExclusive)))
{
SanityManager.THROWASSERT(
"nextColumn out of range = " + nextColumn +
" expected between " +
startField + " and " + endFieldExclusive);
}
}
// Need to insert rows from nextColumn to endFieldExclusive
// onto a new overflow page.
// If the column is not being updated we
// pick it up from the current page. If it is being updated
// we take it from the new value.
int possibleLastFieldExclusive = endFieldExclusive;
if (!rh.hasOverflow())
{
// we might be adding a field here
if (validColumns == null)
{
if (row.length > possibleLastFieldExclusive)
possibleLastFieldExclusive = row.length;
}
else
{
if (validColumnsSize > possibleLastFieldExclusive)
possibleLastFieldExclusive = validColumnsSize;
}
}
// use a sparse row
Object[] newRow =
new Object[possibleLastFieldExclusive];
FormatableBitSet newColumnList =
new FormatableBitSet(possibleLastFieldExclusive);
ByteArrayOutputStream fieldStream = null;
for (int i = nextColumn; i < possibleLastFieldExclusive; i++)
{
if ((validColumns == null) ||
(validColumnsSize > i && validColumns.isSet(i)))
{
newColumnList.set(i);
// use the new value
newRow[i] = RowUtil.getColumn(row, validColumns, i);
}
else if (i < endFieldExclusive)
{
newColumnList.set(i);
// use the old value
newRow[i] = savedFields[i - nextColumn];
}
}
RecordHandle handle = curPage.getRecordHandleAtSlot(slot);
// If the portion we just updated is the last portion then
// there cannot be any updates to do.
if (rh.hasOverflow())
{
// We have to carry across the overflow information
// from the current record, if any.
nextPage = rh.getOverflowPage();
id = rh.getOverflowId();
// find the next starting column before unlatching page
startColumn =
RowUtil.nextColumn(
row, validColumns, endFieldExclusive);
}
else
{
startColumn = -1;
nextPage = 0;
}
// After the update is done, see if this row piece has
// shrunk in curPage if no other row pieces have shrunk so
// far. In head page, need to respect minimumRecordSize.
// In overflow page entire row needs to respect
// StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE.
// Don't bother with temp container.
if (!rowHasReservedSpace && headRowHandle != null &&
curPage != null && !owner.isTemporaryContainer())
{
rowHasReservedSpace =
curPage.checkRowReservedSpace(slot);
}
// insert the record portion on a new overflow page at slot
// 0 this will automatically handle any overflows in
// this new portion
// BasePage op = getNewOverflowPage();
BasePage op =
curPage.getOverflowPageForInsert(
slot,
newRow,
newColumnList,
nextColumn);
// We have all the information from this page so unlatch it
if (curPage != this)
{
curPage.unlatch();
curPage = null;
}
byte mode = Page.INSERT_OVERFLOW;
if (nextPage != 0)
mode |= Page.INSERT_FOR_SPLIT;
RecordHandle nextPortionHandle =
nextPage == 0 ? null :
owner.makeRecordHandle(nextPage, id);
// RESOLVED (sku): even though we would like to roll back
// these inserts with PURGE rather than with delete,
// we have to delete because if we purge the last row
// from an overflow page, the purge will queue a post
// commit to remove the page.
// While this is OK with long columns, we cannot do this
// for long rows because long row overflow pages can be
// shared by more than one long rows, and thus it is unsafe
// to remove the page without first latching the head page.
// However, the insert log record do not have the head
// row's page number so the rollback cannot put that
// information into the post commit work.
RecordHandle portionHandle;
try
{
portionHandle =
op.insertAllowOverflow(
0, newRow, newColumnList, nextColumn, mode, 100,
nextPortionHandle);
}
catch (NoSpaceOnPage nsop)
{
// DERBY-6319, intermittently application is getting
// an unexpected NoSpaceOnPage error from the
// insertAllowOverflow() call. The code expects that
// the latched page returned by the above
// getOverflowPageForInsert() call should always have
// at least enough space to insert with allowing
// overflow.
// Not sure what is causing this and have not been able
// to repro outside of the application, so catching the
// error and raising a new error with more information
// to be printed to the log.
//
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_NO_SPACE_ON_PAGE,
nsop,
((PageKey) op.getIdentity()).toString(),
getPageDumpString(),
slot,
id,
newColumnList.toString(),
nextColumn,
mode,
nextPortionHandle);
}
// Update the previous record header to point to new portion
if (curPage == this)
updateOverflowDetails(this, handle, portionHandle);
else
updateOverflowDetails(handle, portionHandle);
op.unlatch();
}
else
{
// See earlier comments on checking row reserved space.
if (!rowHasReservedSpace &&
headRowHandle != null &&
curPage != null &&
!owner.isTemporaryContainer())
{
rowHasReservedSpace =
curPage.checkRowReservedSpace(slot);
}
// find the next starting column before we unlatch the page
startColumn =
rh.hasOverflow() ?
RowUtil.nextColumn(
row, validColumns, endFieldExclusive) : -1;
}
// have we completed this update?
if (startColumn == -1) {
if ((curPage != this) && (curPage != null))
curPage.unlatch();
break; // break out of the for loop
}
}
if (nextPage == -1)
{
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
curPage != null,
"Current page is null be no overflow information has been obtained");
}
// Get the next page info while we still have the page
// latched.
nextPage = rh.getOverflowPage();
id = rh.getOverflowId();
}
if ((curPage != this) && (curPage != null))
curPage.unlatch();
// get the next portion page and find the correct slot
curPage = (StoredPage) owner.getPage(nextPage);
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(
curPage.isOverflowPage(),
"following row chain gets a non-overflow page");
}
slot = curPage.findRecordById(id, FIRST_SLOT_NUMBER);
}
// Back to the head page. Get rid of all reserved space in the entire
// row post commit.
if (rowHasReservedSpace)
{
RawTransaction rxact = (RawTransaction)owner.getTransaction();
ReclaimSpace work =
new ReclaimSpace(ReclaimSpace.ROW_RESERVE,
headRowHandle,
rxact.getDataFactory(), true);
rxact.addPostCommitWork(work);
}
}
/**
*/
/**
* See if reserved space should be reclaimed for the input row.
* <p>
* See if the row on this page has reserved space that should be shrunk
* once the update commits. Will only indicate space should be reclaimed
* if at least RawTransaction.MINIMUM_RECORD_SIZE_DEFAULT bytes can be
* reclaimed.
* <p>
*
* @return true if space should be reclaimed from this row post commit.
**/
private boolean checkRowReservedSpace(int slot) throws StandardException
{
boolean rowHasReservedSpace = false;
try
{
int shrinkage = getReservedCount(slot);
// Only reclaim reserved space if it is "reasonably" sized, i.e.,
// we can reclaim at least MINIMUM_RECORD_SIZE_DEFAULT. Note
// any number could be used for "reasonable", not sure why
// MINIMUM_RECORD_SIZE_DEFAULT was chosen.
int reclaimThreshold = RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT;
if (shrinkage > reclaimThreshold)
{
// reserved space for row exceeds the threshold.
int totalSpace = getRecordPortionLength(slot) + shrinkage;
if (isOverflowPage())
{
// For overflow pages the total row size, including
// reserved space must be at least
// StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE
if (totalSpace >
(StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE +
reclaimThreshold))
{
// row can reclaim at least the threshold space
rowHasReservedSpace = true;
}
}
else
{
// this is a head page. The total space of the row
// including reserved space must total at least
// minimumRecordSize.
if (totalSpace > (minimumRecordSize + reclaimThreshold))
{
// row can reclaim at least the threshold space
rowHasReservedSpace = true;
}
}
}
}
catch (IOException ioe)
{
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
return rowHasReservedSpace;
}
/**
@see BasePage#compactRecord
@exception StandardException Standard Derby error policy
*/
protected void compactRecord(RawTransaction t, int slot, int id)
throws StandardException
{
if (!isOverflowPage())
{
// If this is a head row piece, first take care of the entire
// overflow row chain. Don't need to worry about long column
// because they are not in place updatable.
StoredRecordHeader recordHeader = getHeaderAtSlot(slot);
while (recordHeader.hasOverflow())
{
// loop calling compact on each piece of the overflow chain.
StoredPage nextPageInRowChain =
getOverflowPage(recordHeader.getOverflowPage());
if (SanityManager.DEBUG)
SanityManager.ASSERT(nextPageInRowChain != null);
try
{
int nextId = recordHeader.getOverflowId();
int nextSlot =
getOverflowSlot(nextPageInRowChain, recordHeader);
nextPageInRowChain.compactRecord(t, nextSlot, nextId);
// Follow the next long row pointer.
recordHeader = nextPageInRowChain.getHeaderAtSlot(nextSlot);
}
finally
{
nextPageInRowChain.unlatch();
}
}
}
// Lastly, see if this row has anything sizable that can be freed.
// Try to only reclaim space larger than MINIMUM_RECORD_SIZE_DEFAULT
// because otherwise it is probably not worth the effort.
int reclaimThreshold = RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT;
try
{
int reserve = getReservedCount(slot);
if (reserve > reclaimThreshold)
{
// unused space exceeds the reclaim threshold.
int recordLength = getRecordPortionLength(slot);
int correctReservedSpace = reserve;
int totalSpace = recordLength + reserve;
if (isOverflowPage())
{
// On an overflow page the total space of a record must
// be at least MAX_OVERFLOW_ONLY_REC_SIZE.
if (totalSpace >
(StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE +
reclaimThreshold))
{
// possible to reclaim more than threshold.
// calculate what the correct reserved space is
if (recordLength >=
StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE)
{
correctReservedSpace = 0;
}
else
{
// make sure record takes up max overflow rec size
correctReservedSpace =
StoredRecordHeader.MAX_OVERFLOW_ONLY_REC_SIZE -
recordLength;
}
}
}
else
{
// this is a head page. The total space of the row
// including reserved space must total at least
// minimumRecordSize.
if (totalSpace > (minimumRecordSize + reclaimThreshold))
{
// calculate what the correct reserved space is
if (recordLength >= minimumRecordSize)
{
correctReservedSpace = 0;
}
else
{
correctReservedSpace =
minimumRecordSize - recordLength;
}
}
}
if (SanityManager.DEBUG)
{
SanityManager.ASSERT(correctReservedSpace <= reserve,
"correct reserve > reserve");
}
// A shrinkage has occured.
if (correctReservedSpace < reserve)
{
owner.getActionSet().
actionShrinkReservedSpace(
t, this, slot, id, correctReservedSpace, reserve);
}
}
}
catch (IOException ioe)
{
throw StandardException.newException(
SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);
}
}
}
| {
"content_hash": "8da9cb8a2337d23b0fa7a8a7fc8cb667",
"timestamp": "",
"source": "github",
"line_count": 9297,
"max_line_length": 149,
"avg_line_length": 38.19812842852533,
"alnum_prop": 0.5133416683561983,
"repo_name": "scnakandala/derby",
"id": "a878c43781a074144c30dd2ebb69b8fef0147d52",
"size": "355991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "java/engine/org/apache/derby/impl/store/raw/data/StoredPage.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13737"
},
{
"name": "Java",
"bytes": "41272803"
},
{
"name": "Shell",
"bytes": "1951"
}
],
"symlink_target": ""
} |
ACCEPTED
#### According to
Index Fungorum
#### Published in
null
#### Original name
Psathyrella angusticeps Peck
### Remarks
null | {
"content_hash": "4678c114999c08f3dd52d1a975fcfd4a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 28,
"avg_line_length": 10.23076923076923,
"alnum_prop": 0.7218045112781954,
"repo_name": "mdoering/backbone",
"id": "679e3dc7b6a7314d635c284463c8bb569c81445c",
"size": "185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "life/Fungi/Basidiomycota/Agaricomycetes/Agaricales/Psathyrellaceae/Psathyrella/Psathyrella angusticeps/README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
using System.Web;
using System.Web.Optimization;
namespace ARAFFinal
{
public class BundleConfig
{
// For more information on bundling, visit http://go.microsoft.com/fwlink/?LinkId=301862
public static void RegisterBundles(BundleCollection bundles)
{
bundles.Add(new ScriptBundle("~/bundles/jquery").Include(
"~/Scripts/jquery-{version}.js"));
bundles.Add(new ScriptBundle("~/bundles/jqueryval").Include(
"~/Scripts/jquery.validate*"));
// Use the development version of Modernizr to develop with and learn from. Then, when you're
// ready for production, use the build tool at http://modernizr.com to pick only the tests you need.
bundles.Add(new ScriptBundle("~/bundles/modernizr").Include(
"~/Scripts/modernizr-*"));
bundles.Add(new ScriptBundle("~/bundles/bootstrap").Include(
"~/Scripts/bootstrap.js",
"~/Scripts/respond.js"));
bundles.Add(new StyleBundle("~/Content/css").Include(
"~/Content/bootstrap.css",
"~/Content/site.css"));
}
}
}
| {
"content_hash": "4b3f25031489af8c82f6e4504130eeda",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 112,
"avg_line_length": 39.935483870967744,
"alnum_prop": 0.5726978998384491,
"repo_name": "bhargavpatel431997/Adit-Result-Analysis",
"id": "0adfad6452bd34f808b0499aead602a905205d21",
"size": "1240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ARAFFinal/App_Start/BundleConfig.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "100"
},
{
"name": "C#",
"bytes": "120243"
},
{
"name": "CSS",
"bytes": "513"
},
{
"name": "HTML",
"bytes": "286064"
},
{
"name": "JavaScript",
"bytes": "12034"
}
],
"symlink_target": ""
} |
RSpec.describe 'VM::Operations' do
before do
@miq_server = EvmSpecHelper.local_miq_server
@ems = FactoryBot.create(:ems_vmware, :zone => @miq_server.zone)
@vm = FactoryBot.create(:vm_vmware, :ems_id => @ems.id)
ipaddresses = %w(fe80::21a:4aff:fe22:dde5 127.0.0.1)
allow(@vm).to receive(:ipaddresses).and_return(ipaddresses)
@hardware = FactoryBot.create(:hardware)
@hardware.ipaddresses << '10.142.0.2'
@hardware.ipaddresses << '35.190.140.48'
end
context '#cockpit_url' do
it '#returns a valid Cockpit url' do
url = @vm.send(:cockpit_url)
expect(url).to eq(URI::HTTP.build(:host => "127.0.0.1", :port => 9090))
end
end
context '#ipv4_address' do
it 'returns the existing ipv4 address' do
url = @vm.send(:ipv4_address)
expect(url).to eq('127.0.0.1')
end
context 'cloud providers' do
before { @ipaddresses = %w(10.10.1.121 35.190.140.48) }
it 'returns the public ipv4 address for AWS' do
ems = FactoryBot.create(:ems_google, :project => 'manageiq-dev')
az = FactoryBot.create(:availability_zone_google)
vm = FactoryBot.create(:vm_google,
:ext_management_system => ems,
:ems_ref => 123,
:availability_zone => az,
:hardware => @hardware)
allow(vm).to receive(:ipaddresses).and_return(@ipaddresses)
url = vm.send(:ipv4_address)
expect(url).to eq('35.190.140.48')
end
it 'returns the public ipv4 address for GCE' do
ems = FactoryBot.create(:ems_amazon)
vm = FactoryBot.create(:vm_amazon, :ext_management_system => ems, :hardware => @hardware)
allow(vm).to receive(:ipaddresses).and_return(@ipaddresses)
url = vm.send(:ipv4_address)
expect(url).to eq('35.190.140.48')
end
end
end
context '#public_address' do
it 'returns a public ipv4 address' do
ipaddresses = %w(10.10.1.121 35.190.140.48)
ems = FactoryBot.create(:ems_amazon)
vm = FactoryBot.create(:vm_amazon, :ext_management_system => ems, :hardware => @hardware)
allow(vm).to receive(:ipaddresses).and_return(ipaddresses)
url = vm.send(:public_address)
expect(url).to eq('35.190.140.48')
end
end
describe '#supports?(:vmrc_console)' do
it 'returns false if type is not supported' do
allow(@vm).to receive(:console_supported?).with('VMRC').and_return(false)
expect(@vm.supports?(:vmrc_console)).to be_falsey
expect(@vm.unsupported_reason(:vmrc_console)).to include('VMRC Console not supported')
end
it 'supports it if all conditions are met' do
allow(@vm).to receive(:console_supported?).with('VMRC').and_return(true)
expect(@vm.supports?(:vmrc_console)).to be_truthy
end
end
describe '#supports?(:html5_console)' do
it 'supports it if all conditions are met' do
allow(@vm).to receive(:console_supported?).and_return(true)
expect(@vm.supports?(:html5_console)).to be_truthy
end
it 'returns false if type is not supported' do
allow(@vm).to receive(:console_supported?).and_return(false)
expect(@vm.supports?(:html5_console)).to be_falsey
expect(@vm.unsupported_reason(:html5_console)).to include('HTML5 Console is not supported')
end
end
describe '#supports?(:native_console)' do
it 'returns false if type is not supported' do
allow(@vm).to receive(:console_supported?).with('NATIVE').and_return(false)
expect(@vm.supports?(:native_console)).to be_falsey
expect(@vm.unsupported_reason(:native_console)).to include('NATIVE Console not supported')
end
it 'supports it if all conditions are met' do
allow(@vm).to receive(:console_supported?).with('NATIVE').and_return(true)
expect(@vm.supports?(:native_console)).to be_truthy
end
end
describe '#supports?(:launch_vmrc_console)' do
it 'does not support it if validate_remote_console_vmrc_support raises an error' do
allow(@vm).to receive(:validate_remote_console_vmrc_support).and_raise(StandardError)
expect(@vm.supports?(:launch_vmrc_console)).to be_falsey
expect(@vm.unsupported_reason(:launch_vmrc_console)).to include('VM VMRC Console error:')
end
it 'supports it if all conditions are met' do
allow(@vm).to receive(:validate_remote_console_vmrc_support).and_return(true)
expect(@vm.supports?(:launch_vmrc_console)).to be_truthy
end
end
describe '#supports?(:launch_html5_console)' do
it 'does not support it if vm is not powered on' do
allow(@vm).to receive(:power_state).and_return('off')
expect(@vm.supports?(:launch_html5_console)).to be_falsey
expect(@vm.unsupported_reason(:launch_html5_console)).to include('the VM is not powered on')
end
it 'supports it if all conditions are met' do
allow(@vm).to receive(:power_state).and_return('on')
expect(@vm.supports?(:launch_html5_console)).to be_truthy
end
end
describe '#supports?(:launch_native_console)' do
it 'does not support it if validate_native_console_support raises an error' do
allow(@vm).to receive(:validate_native_console_support).and_raise(StandardError)
expect(@vm.supports?(:launch_native_console)).to be_falsey
expect(@vm.unsupported_reason(:launch_native_console)).to include('VM NATIVE Console error:')
end
it 'supports it if all conditions are met' do
allow(@vm).to receive(:validate_native_console_support)
expect(@vm.supports?(:launch_native_console)).to be_truthy
end
end
end
| {
"content_hash": "28ec6b6f1256258ea829fb8a5322dd3c",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 99,
"avg_line_length": 38.06,
"alnum_prop": 0.6447714135575408,
"repo_name": "NickLaMuro/manageiq",
"id": "bcc1b9a79609fd8797d12fd6fb8ad61f16f171b4",
"size": "5709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spec/models/vm/operations_spec.rb",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3042"
},
{
"name": "Dockerfile",
"bytes": "876"
},
{
"name": "HTML",
"bytes": "2167"
},
{
"name": "JavaScript",
"bytes": "183"
},
{
"name": "Ruby",
"bytes": "7775650"
},
{
"name": "Shell",
"bytes": "21796"
}
],
"symlink_target": ""
} |
{% import 'admin/layout.html' as layout with context -%}
{% import 'admin/static.html' as admin_static with context %}
<!DOCTYPE html>
<html>
<head>
<title>{% block title %}{% if admin_view.category %}{{ admin_view.category }} - {% endif %}{{ admin_view.name }} - {{ admin_view.admin.name }}{% endblock %}</title>
{% block head_meta %}
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="">
<meta name="author" content="">
{% endblock %}
{% block head_css %}
<link href="{{ admin_static.url(filename='bootstrap/bootstrap3/swatch/{swatch}/bootstrap.min.css'.format(swatch=config.get('FLASK_ADMIN_SWATCH', 'default')), v='3.3.5') }}" rel="stylesheet">
{%if config.get('FLASK_ADMIN_SWATCH', 'default') == 'default' %}
<link href="{{ admin_static.url(filename='bootstrap/bootstrap3/css/bootstrap-theme.min.css', v='3.3.5') }}" rel="stylesheet">
{%endif%}
<link href="{{ admin_static.url(filename='admin/css/bootstrap3/admin.css', v='1.1.1') }}" rel="stylesheet">
{% if admin_view.extra_css %}
{% for css_url in admin_view.extra_css %}
<link href="{{ css_url }}" rel="stylesheet">
{% endfor %}
{% endif %}
<style>
body {
padding-top: 4px;
}
</style>
{% endblock %}
{% block head %}
{% endblock %}
{% block head_tail %}
{% endblock %}
</head>
<body>
{% block page_body %}
<div class="container">
<nav class="navbar navbar-default" role="navigation">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#admin-navbar-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
{% block brand %}
<a class="navbar-brand" href="{{ admin_view.admin.url }}">{{ admin_view.admin.name }}</a>
{% endblock %}
</div>
<!-- navbar content -->
<div class="collapse navbar-collapse" id="admin-navbar-collapse">
{% block main_menu %}
<ul class="nav navbar-nav">
{{ layout.menu() }}
</ul>
{% endblock %}
{% block menu_links %}
<ul class="nav navbar-nav navbar-right">
{{ layout.menu_links() }}
</ul>
{% endblock %}
{% block access_control %}
{% endblock %}
</div>
</nav>
{% block messages %}
{{ layout.messages() }}
{% endblock %}
{# store the jinja2 context for form_rules rendering logic #}
{% set render_ctx = h.resolve_ctx() %}
{% block body %}{% endblock %}
</div>
{% endblock %}
{% block tail_js %}
<script src="{{ admin_static.url(filename='vendor/jquery.min.js', v='2.1.4') }}" type="text/javascript"></script>
<script src="{{ admin_static.url(filename='bootstrap/bootstrap3/js/bootstrap.min.js', v='3.3.5') }}" type="text/javascript"></script>
<script src="{{ admin_static.url(filename='vendor/moment.min.js', v='2.9.0') }}" type="text/javascript"></script>
<script src="{{ admin_static.url(filename='vendor/select2/select2.min.js', v='3.5.2') }}" type="text/javascript"></script>
{% if admin_view.extra_js %}
{% for js_url in admin_view.extra_js %}
<script src="{{ js_url }}" type="text/javascript"></script>
{% endfor %}
{% endif %}
{% endblock %}
{% block tail %}
{% endblock %}
</body>
</html>
| {
"content_hash": "2a7c1071b785db05c5cbb161b599e56d",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 198,
"avg_line_length": 40.208333333333336,
"alnum_prop": 0.549740932642487,
"repo_name": "jmagnusson/flask-admin",
"id": "0b3f33911ae200d6a6991a99e5fce6e7ba50e84b",
"size": "3860",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "flask_admin/templates/bootstrap3/admin/base.html",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "120"
},
{
"name": "HTML",
"bytes": "99878"
},
{
"name": "JavaScript",
"bytes": "32604"
},
{
"name": "Makefile",
"bytes": "5587"
},
{
"name": "Python",
"bytes": "688056"
},
{
"name": "Shell",
"bytes": "1437"
}
],
"symlink_target": ""
} |
package com.qlfg.miningcircle.lock;
import com.example.miningcircle.R;
import com.qlfg.miningcircle.activity.BaseActivity;
import com.qlfg.miningcircle.application.MiningCircleApplication;
import com.qlfg.miningcircle.lock.LocusPassWordView.OnCompleteListener;
import android.app.Activity;
import android.os.Bundle;
import android.view.View;
import android.view.Window;
import android.view.View.OnClickListener;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
public class ModifiFuturePswActivity extends BaseActivity {
private Toast toast;
private LocusPassWordView lpwv;
private String password;
private boolean needverify = false;
private boolean agin ;
private ImageView leftImage;
private ImageView titleImage;
private ImageView rightImage;
private TextView titleName;
private void showToast(CharSequence message) {
if (null == toast) {
toast = Toast.makeText(this, message, Toast.LENGTH_SHORT);
} else {
toast.setText(message);
}
toast.show();
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
setContentView(R.layout.setpassword_activity);
lpwv = (LocusPassWordView) this.findViewById(R.id.mLocusPassWordView);
View title = findViewById(R.id.home_title);
leftImage = (ImageView) title.findViewById(R.id.left_image);
titleImage = (ImageView) title.findViewById(R.id.title_image);
rightImage = (ImageView) title.findViewById(R.id.right_image);
titleName = (TextView) title.findViewById(R.id.title_name);
leftImage.setBackgroundResource(R.drawable.back);
leftImage.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View arg0) {
// TODO Auto-generated method stub
finish();
}
});
titleImage.setVisibility(View.GONE);
rightImage.setVisibility(View.GONE);
titleName.setText("修改手势密码");
titleName .setVisibility(View.VISIBLE);
lpwv.setOnCompleteListener(new OnCompleteListener() {
@Override
public void onComplete(String mPassword) {
if (needverify) {
if (lpwv.verifyPassword(mPassword)) {
showToast("密码输入正确,请输入新密码!");
lpwv.clearPassword();
needverify = false;
} else {
showToast("错误的密码,请重新输入!");
lpwv.clearPassword();
password = "";
}
}else{
if(StringUtil.isNotEmpty(mPassword)){
if(agin){
if(password.equals(mPassword) ){
lpwv.resetPassWord(password);
lpwv.clearPassword();
showToast("密码修改成功,请牢记");
MiningCircleApplication.getInstance().setIsFuture(true);
agin = false;
finish();
}else{
lpwv.clearPassword();
showToast("两次密码不一致");
}
}else{
password= mPassword;
agin = true;
lpwv.clearPassword();
showToast("请在输入一次。");
}
}
}
}
});
if (!lpwv.isPasswordEmpty()) {
this.needverify = true;
this.agin = false;
showToast("请输入旧密码");
}
}
}
| {
"content_hash": "9bfa51753a29a035d85317457aaa71c9",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 72,
"avg_line_length": 29.177570093457945,
"alnum_prop": 0.6816143497757847,
"repo_name": "haiwei27/MC",
"id": "020e2a1ca88a3b893801332483caac1f88bdc427",
"size": "3238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MiningCircleNew/src/main/java/com/qlfg/miningcircle/lock/ModifiFuturePswActivity.java",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
The first test
| {
"content_hash": "667ca9218149ae56ef4b20e000223763",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 14,
"avg_line_length": 15,
"alnum_prop": 0.8,
"repo_name": "jxnuxdy/mydev1",
"id": "e4004cfe95a4fdde12d0afad6e50e9b2691d0b01",
"size": "15",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.c",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40"
},
{
"name": "Objective-C",
"bytes": "8718"
}
],
"symlink_target": ""
} |
function PhyloGraphInit(){
var server_meta = new amigo.data.server();
var gloc = server_meta.golr_base();
var gconf = new bbop.golr.conf(amigo.data.golr);
var r = new bbop.widget.phylo.renderer('test0', gloc, gconf);
// Either directly start the render with the global ID or add a listener
// to the selection.
if( global_family && global_family != '' ){
r.show_family("PANTHER:" + global_family);
}else{
jQuery("#family_id").change(
function() {
r.show_family("PANTHER:"+ jQuery("#family_id :selected").val());
});
jQuery("#family_id").change();
}
}
| {
"content_hash": "bace182a5d5278aef429893e7e6627f1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 30.7,
"alnum_prop": 0.6188925081433225,
"repo_name": "geneontology/amigo",
"id": "3d52e9b0fead870f09025fc2cbcfd44d9605b53d",
"size": "614",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "javascript/web/PhyloGraph.js",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38137"
},
{
"name": "Dockerfile",
"bytes": "5087"
},
{
"name": "Emacs Lisp",
"bytes": "13034"
},
{
"name": "Gherkin",
"bytes": "8135"
},
{
"name": "HCL",
"bytes": "3741"
},
{
"name": "HTML",
"bytes": "131"
},
{
"name": "Java",
"bytes": "2944"
},
{
"name": "JavaScript",
"bytes": "6643508"
},
{
"name": "Perl",
"bytes": "597581"
},
{
"name": "Python",
"bytes": "11259"
},
{
"name": "Ruby",
"bytes": "1061"
},
{
"name": "Shell",
"bytes": "6526"
}
],
"symlink_target": ""
} |
set -e -u
## Variables -----------------------------------------------------------------
PLAYBOOK_LOGS=${PLAYBOOK_LOGS:-"/openstack/log/ansible_playbooks/"}
COMMAND_LOGS=${COMMAND_LOGS:-"/openstack/log/ansible_cmd_logs/"}
ORIG_ANSIBLE_LOG_PATH=${ANSIBLE_LOG_PATH:-"/openstack/log/ansible-logging/ansible.log"}
## Main ----------------------------------------------------------------------
function run_play_book_exit_message {
echo -e "\e[1;5;97m*** NOTICE ***\e[0m
The \"\e[1;31m${0}\e[0m\" script has exited. This script is no longer needed from now on.
If you need to re-run parts of the stack, adding new nodes to the environment,
or have encountered an error you will no longer need this application to
interact with the environment. All jobs should be executed out of the
\"\e[1;33m${PLAYBOOK_DIR}\e[0m\" directory using the \"\e[1;32mopenstack-ansible\e[0m\"
command line wrapper.
For more information about OpenStack-Ansible please review our documentation at:
\e[1;36mhttp://docs.openstack.org/developer/openstack-ansible\e[0m
Additionally if there's ever a need for information on common operational tasks please
see the following information:
\e[1;36mhttp://docs.openstack.org/developer/openstack-ansible/developer-docs/ops.html\e[0m
If you ever have any questions please join the community conversation on IRC at
#openstack-ansible on freenode.
"
}
function get_includes {
/opt/ansible-runtime/bin/python <<EOC
import yaml
with open("${1}") as f:
yaml_list = yaml.safe_load(f.read())
for item in yaml_list:
_item = '---\n' + yaml.safe_dump([item], default_flow_style=False, width=1000)
print(repr(_item).strip("'").strip('"'))
EOC
}
function get_include_file {
/opt/ansible-runtime/bin/python <<EOC
import yaml
with open("${1}") as f:
yaml_list = yaml.safe_load(f.read())
print(yaml_list[0]['include'])
EOC
}
function playbook_run {
# First we gather facts about the hosts to populate the fact cache.
# We can't gather the facts for all hosts yet because the containers
# aren't built yet.
ansible -m setup -a "gather_subset=network,hardware,virtual" hosts
# Iterate over lines in setup-everything
IFS=$'\n'
COUNTER=0
for root_include in $(get_includes setup-everything.yml); do
echo -e "${root_include}" > root-include-playbook.yml
root_include_file_name="$(get_include_file root-include-playbook.yml)"
# Once setup-hosts is complete, we should gather facts for everything
# (now including containers) so that the fact cache is complete for the
# remainder of the run.
if [[ "${root_include_file_name}" == "setup-infrastructure.yml" ]]; then
ansible -m setup -a "gather_subset=network,hardware,virtual" all
fi
for include in $(get_includes "${root_include_file_name}"); do
echo -e "${include}" > /tmp/include-playbook.yml
include_file_name="$(get_include_file /tmp/include-playbook.yml)"
include_playbook="include-playbook.yml-${include_file_name}"
mv /tmp/include-playbook.yml ${include_playbook}
echo "[Executing \"${include_file_name}\" playbook]"
# Set the playbook log path so that we can review specific execution later.
export ANSIBLE_LOG_PATH="${PLAYBOOK_LOGS}/${COUNTER}-${include_file_name}.txt"
let COUNTER=COUNTER+=1
install_bits "${include_playbook}"
# Remove the generate playbook when done with it
rm "${include_playbook}"
done
# Remove the generate playbook when done with it
rm root-include-playbook.yml
done
cat ${PLAYBOOK_LOGS}/* >> "${ORIG_ANSIBLE_LOG_PATH}"
}
trap run_play_book_exit_message EXIT
info_block "Checking for required libraries." 2> /dev/null || source "$(dirname "${0}")/scripts-library.sh"
# Initiate the deployment
pushd "playbooks"
PLAYBOOK_DIR="$(pwd)"
# Create playbook log directory
mkdir -p "${PLAYBOOK_LOGS}"
mkdir -p "$(dirname ${ORIG_ANSIBLE_LOG_PATH})"
# Execute setup everything
playbook_run
# Log some data about the instance and the rest of the system
log_instance_info
# Log repo data
mkdir -p "${COMMAND_LOGS}/repo_data"
ansible 'repo_all[0]' -m raw \
-a 'find /var/www/repo/os-releases -type l' \
-t "${COMMAND_LOGS}/repo_data"
print_report
popd
| {
"content_hash": "2862bb287e6d16a3b4dc066f68a3a9aa",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 107,
"avg_line_length": 36.93103448275862,
"alnum_prop": 0.6704014939309056,
"repo_name": "os-cloud/os-ansible-deployment",
"id": "445586746a5f11fb9167e467bbe8a3afe07bb466",
"size": "4967",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/run-playbooks.sh",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "202815"
},
{
"name": "Shell",
"bytes": "63703"
}
],
"symlink_target": ""
} |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_35) on Tue Oct 16 22:49:45 ICT 2012 -->
<TITLE>
CrossReferenceStream (Apache FOP 1.1 API)
</TITLE>
<META NAME="date" CONTENT="2012-10-16">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="CrossReferenceStream (Apache FOP 1.1 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/CrossReferenceStream.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
fop 1.1</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html" title="class in org.apache.fop.pdf.xref"><B>PREV CLASS</B></A>
<A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceTable.html" title="class in org.apache.fop.pdf.xref"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?org/apache/fop/pdf/xref/CrossReferenceStream.html" target="_top"><B>FRAMES</B></A>
<A HREF="CrossReferenceStream.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | <A HREF="#fields_inherited_from_class_org.apache.fop.pdf.xref.CrossReferenceObject">FIELD</A> | <A HREF="#constructor_summary">CONSTR</A> | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | <A HREF="#constructor_detail">CONSTR</A> | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<!-- ======== START OF CLASS DATA ======== -->
<H2>
<FONT SIZE="-1">
org.apache.fop.pdf.xref</FONT>
<BR>
Class CrossReferenceStream</H2>
<PRE>
java.lang.Object
<IMG SRC="../../../../../resources/inherit.gif" ALT="extended by "><A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html" title="class in org.apache.fop.pdf.xref">org.apache.fop.pdf.xref.CrossReferenceObject</A>
<IMG SRC="../../../../../resources/inherit.gif" ALT="extended by "><B>org.apache.fop.pdf.xref.CrossReferenceStream</B>
</PRE>
<HR>
<DL>
<DT><PRE>public class <B>CrossReferenceStream</B><DT>extends <A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html" title="class in org.apache.fop.pdf.xref">CrossReferenceObject</A></DL>
</PRE>
<P>
A cross-reference stream, as described in Section 3.4.7 of the PDF 1.5 Reference.
<P>
<P>
<HR>
<P>
<!-- =========== FIELD SUMMARY =========== -->
<A NAME="field_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Field Summary</B></FONT></TH>
</TR>
</TABLE>
<A NAME="fields_inherited_from_class_org.apache.fop.pdf.xref.CrossReferenceObject"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left"><B>Fields inherited from class org.apache.fop.pdf.xref.<A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html" title="class in org.apache.fop.pdf.xref">CrossReferenceObject</A></B></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html#startxref">startxref</A>, <A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html#trailerDictionary">trailerDictionary</A></CODE></TD>
</TR>
</TABLE>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<A NAME="constructor_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Constructor Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE><B><A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceStream.html#CrossReferenceStream(org.apache.fop.pdf.PDFDocument, int, org.apache.fop.pdf.xref.TrailerDictionary, long, java.util.List, java.util.List)">CrossReferenceStream</A></B>(<A HREF="../../../../../org/apache/fop/pdf/PDFDocument.html" title="class in org.apache.fop.pdf">PDFDocument</A> document,
int objectNumber,
<A HREF="../../../../../org/apache/fop/pdf/xref/TrailerDictionary.html" title="class in org.apache.fop.pdf.xref">TrailerDictionary</A> trailerDictionary,
long startxref,
java.util.List<java.lang.Long> uncompressedObjectReferences,
java.util.List<<A HREF="../../../../../org/apache/fop/pdf/xref/CompressedObjectReference.html" title="class in org.apache.fop.pdf.xref">CompressedObjectReference</A>> compressedObjectReferences)</CODE>
<BR>
</TD>
</TR>
</TABLE>
<!-- ========== METHOD SUMMARY =========== -->
<A NAME="method_summary"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="2"><FONT SIZE="+2">
<B>Method Summary</B></FONT></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD ALIGN="right" VALIGN="top" WIDTH="1%"><FONT SIZE="-1">
<CODE> void</CODE></FONT></TD>
<TD><CODE><B><A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceStream.html#output(java.io.OutputStream)">output</A></B>(java.io.OutputStream stream)</CODE>
<BR>
Writes the cross reference data to a PDF stream</TD>
</TR>
</TABLE>
<A NAME="methods_inherited_from_class_java.lang.Object"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#EEEEFF" CLASS="TableSubHeadingColor">
<TH ALIGN="left"><B>Methods inherited from class java.lang.Object</B></TH>
</TR>
<TR BGCOLOR="white" CLASS="TableRowColor">
<TD><CODE>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</CODE></TD>
</TR>
</TABLE>
<P>
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<A NAME="constructor_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Constructor Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="CrossReferenceStream(org.apache.fop.pdf.PDFDocument, int, org.apache.fop.pdf.xref.TrailerDictionary, long, java.util.List, java.util.List)"><!-- --></A><H3>
CrossReferenceStream</H3>
<PRE>
public <B>CrossReferenceStream</B>(<A HREF="../../../../../org/apache/fop/pdf/PDFDocument.html" title="class in org.apache.fop.pdf">PDFDocument</A> document,
int objectNumber,
<A HREF="../../../../../org/apache/fop/pdf/xref/TrailerDictionary.html" title="class in org.apache.fop.pdf.xref">TrailerDictionary</A> trailerDictionary,
long startxref,
java.util.List<java.lang.Long> uncompressedObjectReferences,
java.util.List<<A HREF="../../../../../org/apache/fop/pdf/xref/CompressedObjectReference.html" title="class in org.apache.fop.pdf.xref">CompressedObjectReference</A>> compressedObjectReferences)</PRE>
<DL>
</DL>
<!-- ============ METHOD DETAIL ========== -->
<A NAME="method_detail"><!-- --></A>
<TABLE BORDER="1" WIDTH="100%" CELLPADDING="3" CELLSPACING="0" SUMMARY="">
<TR BGCOLOR="#CCCCFF" CLASS="TableHeadingColor">
<TH ALIGN="left" COLSPAN="1"><FONT SIZE="+2">
<B>Method Detail</B></FONT></TH>
</TR>
</TABLE>
<A NAME="output(java.io.OutputStream)"><!-- --></A><H3>
output</H3>
<PRE>
public void <B>output</B>(java.io.OutputStream stream)
throws java.io.IOException</PRE>
<DL>
<DD>Writes the cross reference data to a PDF stream
<P>
<DD><DL>
<DT><B>Specified by:</B><DD><CODE><A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html#output(java.io.OutputStream)">output</A></CODE> in class <CODE><A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html" title="class in org.apache.fop.pdf.xref">CrossReferenceObject</A></CODE></DL>
</DD>
<DD><DL>
<DT><B>Parameters:</B><DD><CODE>stream</CODE> - the stream to write the cross reference to
<DT><B>Throws:</B>
<DD><CODE>java.io.IOException</CODE> - if an I/O exception occurs while writing the data</DL>
</DD>
</DL>
<!-- ========= END OF CLASS DATA ========= -->
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Class</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="class-use/CrossReferenceStream.html"><FONT CLASS="NavBarFont1"><B>Use</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
fop 1.1</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceObject.html" title="class in org.apache.fop.pdf.xref"><B>PREV CLASS</B></A>
<A HREF="../../../../../org/apache/fop/pdf/xref/CrossReferenceTable.html" title="class in org.apache.fop.pdf.xref"><B>NEXT CLASS</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../index.html?org/apache/fop/pdf/xref/CrossReferenceStream.html" target="_top"><B>FRAMES</B></A>
<A HREF="CrossReferenceStream.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
<TR>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
SUMMARY: NESTED | <A HREF="#fields_inherited_from_class_org.apache.fop.pdf.xref.CrossReferenceObject">FIELD</A> | <A HREF="#constructor_summary">CONSTR</A> | <A HREF="#method_summary">METHOD</A></FONT></TD>
<TD VALIGN="top" CLASS="NavBarCell3"><FONT SIZE="-2">
DETAIL: FIELD | <A HREF="#constructor_detail">CONSTR</A> | <A HREF="#method_detail">METHOD</A></FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright 1999-2012 The Apache Software Foundation. All Rights Reserved.
</BODY>
</HTML>
| {
"content_hash": "5720cd9db5fcd235c7d8b3eba7ba7c34",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 383,
"avg_line_length": 48.54948805460751,
"alnum_prop": 0.6363444639718805,
"repo_name": "pconrad/ucsb-cs56-tutorials-fop",
"id": "438883aae3930aceca0805365ac955ddf79b5125",
"size": "14225",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "fop-1.1/javadocs/org/apache/fop/pdf/xref/CrossReferenceStream.html",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23208"
},
{
"name": "Erlang",
"bytes": "15684"
},
{
"name": "Java",
"bytes": "210811"
},
{
"name": "JavaScript",
"bytes": "28643"
},
{
"name": "Perl",
"bytes": "12013"
},
{
"name": "R",
"bytes": "223"
},
{
"name": "Shell",
"bytes": "24838"
},
{
"name": "XSLT",
"bytes": "25384"
}
],
"symlink_target": ""
} |
posner
======
The Posner task as a demo for workshop
| {
"content_hash": "063e5af04a7a1a9adb882bbf9bf6a2b4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 38,
"avg_line_length": 13.5,
"alnum_prop": 0.6851851851851852,
"repo_name": "smithdanielle/posner",
"id": "71dc16801001cd7f99b7b5426392d89f2642622c",
"size": "54",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "README.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7365"
}
],
"symlink_target": ""
} |
package com.validation.manager.core.tool.requirement.importer;
import static com.validation.manager.core.DataBaseManager.getEntityManagerFactory;
import static com.validation.manager.core.DataBaseManager.namedQuery;
import com.validation.manager.core.ImporterInterface;
import com.validation.manager.core.VMException;
import com.validation.manager.core.db.Project;
import com.validation.manager.core.db.Requirement;
import com.validation.manager.core.db.RequirementSpecNode;
import com.validation.manager.core.db.RequirementStatus;
import com.validation.manager.core.db.RequirementType;
import com.validation.manager.core.db.controller.RequirementJpaController;
import com.validation.manager.core.db.controller.exceptions.IllegalOrphanException;
import com.validation.manager.core.db.controller.exceptions.NonexistentEntityException;
import com.validation.manager.core.server.core.RequirementSpecNodeServer;
import com.validation.manager.core.tool.Tool;
import com.validation.manager.core.tool.message.MessageHandler;
import com.validation.manager.core.tool.table.extractor.TableExtractor;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.ResourceBundle;
import java.util.logging.Level;
import java.util.logging.Logger;
import static java.util.logging.Logger.getLogger;
import javax.swing.table.DefaultTableModel;
import static org.apache.poi.hssf.usermodel.HSSFDataFormat.getBuiltinFormat;
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
import org.apache.poi.openxml4j.exceptions.InvalidFormatException;
import org.apache.poi.ss.usermodel.Cell;
import org.apache.poi.ss.usermodel.CellStyle;
import org.apache.poi.ss.usermodel.Font;
import org.apache.poi.ss.usermodel.Row;
import org.apache.poi.ss.usermodel.Workbook;
import static org.apache.poi.ss.usermodel.WorkbookFactory.create;
import org.openide.util.Exceptions;
import static org.openide.util.Lookup.getDefault;
/**
* Import Requirements into database
*
* @author Javier A. Ortiz Bultron [email protected]
*/
public class RequirementImporter implements ImporterInterface<Requirement> {
private final File toImport;
private final RequirementSpecNode rsn;
private static final Logger LOG
= getLogger(RequirementImporter.class.getSimpleName());
private static final List<String> COLUMNS = new ArrayList<>();
private final Map<String, Requirement> queue = new HashMap<>();
private InputStream inp;
static {
COLUMNS.add("Unique ID");
COLUMNS.add("Description");
COLUMNS.add("Requirement Type");
COLUMNS.add("Notes");
}
public RequirementImporter(File toImport, RequirementSpecNode rsn) {
assert rsn != null : "Requirement Spec Node is null?";
this.toImport = toImport;
this.rsn = rsn;
}
public Workbook loadFile() throws FileNotFoundException,
IOException, InvalidFormatException {
if (toImport != null && toImport.exists()) {
inp = new FileInputStream(toImport);
Workbook wb = create(inp);
return wb;
}
return null;
}
@Override
public List<Requirement> importFile() throws RequirementImportException {
List<Requirement> importedRequirements = new ArrayList<>();
try {
importedRequirements.addAll(importFile(false));
}
catch (UnsupportedOperationException | VMException ex) {
LOG.log(Level.SEVERE, null, ex);
}
return importedRequirements;
}
@Override
public List<Requirement> importFile(boolean header) throws
RequirementImportException, VMException {
queue.clear();
List<Integer> errors = new ArrayList<>();
HashMap<String, Object> parameters = new HashMap<>();
List<Object> result;
if (toImport == null) {
throw new RequirementImportException(
"message.requirement.import.file.null");
} else if (!toImport.exists()) {
throw new RequirementImportException(
"message.requirement.import.file.invalid");
} else {
//Excel support
if (toImport.getName().endsWith(".xls")
|| toImport.getName().endsWith(".xlsx")) {
try {
Workbook wb = loadFile();
org.apache.poi.ss.usermodel.Sheet sheet = wb.getSheetAt(0);
int rows = sheet.getPhysicalNumberOfRows();
int r = 0;
if (header) {
//Skip header row
r++;
}
for (; r < rows; r++) {
Row row = sheet.getRow(r);
if (row == null) {
continue;
}
if (row.getCell(0) == null) {
LOG.log(Level.WARNING,
"Found an empty row on line: {0}. "
+ "Stopping processing", r);
break;
}
int cells = row.getPhysicalNumberOfCells();
if (cells < 2) {
LOG.log(Level.INFO, "Processing row: {0}", r);
LOG.warning(
ResourceBundle.getBundle(
"com.validation.manager.resources.VMMessages",
Locale.getDefault()).getString(
"message.requirement.import.missing.column")
.replaceAll("%c", "" + cells));
errors.add(r);
} else {
Requirement requirement = new Requirement();
LOG.log(Level.FINE, "Row: {0}", r);
for (int c = 0; c < cells; c++) {
Cell cell = row.getCell(c);
String value = "";
if (cell != null) {
switch (cell.getCellTypeEnum()) {
case FORMULA:
value = cell.getCellFormula();
break;
case NUMERIC:
value = "" + cell.getNumericCellValue();
break;
case STRING:
value = cell.getStringCellValue();
break;
default:
value = "";
break;
}
}
//Remove any extra spaces.
value = value.trim();
switch (c) {
case 0:
//Unique ID
LOG.fine("Setting id");
requirement.setUniqueId(value);
break;
case 1:
//Description
LOG.fine("Setting desc");
requirement.setDescription(value);
break;
case 2:
//Optional Requirement type
LOG.fine("Setting requirement type");
parameters.clear();
parameters.put("name", value);
result = namedQuery(
"RequirementType.findByName",
parameters);
if (result.isEmpty()) {
//Assume a default
parameters.clear();
parameters.put("name", "SW");
result = namedQuery(
"RequirementType.findByName",
parameters);
}
requirement.setRequirementTypeId(
(RequirementType) result.get(0));
break;
case 3:
//Optional notes
LOG.fine("Setting notes");
requirement.setNotes(value);
break;
default:
throw new RequirementImportException(
"Invalid column detected: " + c);
}
LOG.fine(value);
}
//This shouldn't be null
assert rsn != null : "Requirement Spec Node is null?";
requirement.setRequirementSpecNode(rsn);
parameters.clear();
parameters.put("status", "general.open");
result = namedQuery(
"RequirementStatus.findByStatus", parameters);
requirement.setRequirementStatusId(
(RequirementStatus) result.get(0));
assert requirement.getUniqueId() != null
&& !requirement.getUniqueId().isEmpty() :
"Invalid requirement detected!";
try {
if (!exists(requirement)
&& !queue.containsKey(requirement.getUniqueId())) {
queue.put(requirement.getUniqueId(),
requirement);
}
}
catch (IllegalOrphanException | NonexistentEntityException ex) {
Exceptions.printStackTrace(ex);
}
}
}
}
catch (InvalidFormatException | IOException ex) {
LOG.log(Level.SEVERE, null, ex);
}
finally {
try {
if (inp != null) {
inp.close();
}
}
catch (IOException ex) {
LOG.log(Level.SEVERE, null, ex);
}
}
} else if (toImport.getName().endsWith(".xml")) {
throw new RequirementImportException(
"XML importing not supported yet.");
} else if (toImport.getName().endsWith(".doc")
|| toImport.getName().endsWith(".docx")) {
try {
TableExtractor te = new TableExtractor(toImport);
List<DefaultTableModel> tables = te.extractTables();
Requirement requirement = new Requirement();
LOG.log(Level.INFO, "Imported {0} tables!", tables.size());
int count = 1;
for (DefaultTableModel model : tables) {
int rows = model.getRowCount();
int cols = model.getColumnCount();
LOG.log(Level.INFO, "Processing table {0} with {1} "
+ "rows and {2} columns.",
new Object[]{count, rows, cols});
for (int r = 0; r < rows; r++) {
for (int c = 0; c < cols; c++) {
String value = (String) model.getValueAt(rows, cols);
switch (c) {
case 0:
//Unique ID
LOG.fine("Setting id");
requirement.setUniqueId(value);
break;
case 1:
//Description
LOG.fine("Setting desc");
requirement.setDescription(value);
break;
case 2:
//Requirement type
LOG.fine("Setting requirement type");
parameters.clear();
parameters.put("name", value);
result = namedQuery(
"RequirementType.findByName",
parameters);
if (result.isEmpty()) {
//Assume a default
parameters.clear();
parameters.put("name", "SW");
result = namedQuery(
"RequirementType.findByName",
parameters);
}
requirement.setRequirementTypeId(
(RequirementType) result.get(0));
break;
case 3:
//Optional notes
LOG.fine("Setting notes");
requirement.setNotes(value);
break;
default:
throw new RuntimeException("Invalid column detected: " + c);
}
}
}
}
}
catch (IOException | ClassNotFoundException ex) {
Exceptions.printStackTrace(ex);
}
} else {
throw new RequirementImportException("Unsupported file format: "
+ toImport.getName());
}
StringBuilder sb = new StringBuilder("Rows with erros:\n");
errors.stream().forEach((line) -> {
sb.append(line).append('\n');
});
if (!errors.isEmpty()) {
getDefault().lookup(MessageHandler.class).info(sb.toString());
}
return new ArrayList(queue.values());
}
}
public boolean exists(Requirement requirement) throws
IllegalOrphanException, NonexistentEntityException {
Project project = requirement.getRequirementSpecNode()
.getRequirementSpec().getProject();
List<Requirement> existing = Tool.extractRequirements(project);
LOG.log(Level.INFO, "Processing: {0}", requirement.getUniqueId());
boolean exists = false;
for (Requirement r : existing) {
if (r.getUniqueId() == null) {
LOG.warning("Detected requirement with null unique id!");
new RequirementJpaController(getEntityManagerFactory())
.destroy(r.getId());
} else {
if (r.getUniqueId().equals(requirement.getUniqueId())) {
exists = true;
break;
}
}
}
return exists;
}
private boolean processRequirement(Requirement requirement)
throws IllegalOrphanException, NonexistentEntityException,
Exception {
boolean result = true;
Project project = requirement.getRequirementSpecNode()
.getRequirementSpec().getProject();
if (exists(requirement)) {
MessageHandler handler = getDefault().lookup(MessageHandler.class);
if (handler != null) {
String error = "Requirement " + requirement.getUniqueId()
+ " already exists on project "
+ project.getName();
LOG.warning(error);
handler.error(error);
}
result = false;
} else {
new RequirementJpaController(getEntityManagerFactory())
.create(requirement);
}
return result;
}
@Override
public boolean processImport() throws RequirementImportException {
try {
for (Requirement r : queue.values()) {
processRequirement(r);
}
RequirementSpecNodeServer rsns = new RequirementSpecNodeServer(rsn);
rsns.update(rsn, rsns);
queue.clear();
return true;
}
catch (NonexistentEntityException ex) {
Exceptions.printStackTrace(ex);
throw new RequirementImportException(ex.getLocalizedMessage());
}
catch (Exception ex) {
Exceptions.printStackTrace(ex);
throw new RequirementImportException(ex.getLocalizedMessage());
}
}
public static File exportTemplate() throws FileNotFoundException,
IOException, InvalidFormatException {
File template = new File("Template.xls");
template.createNewFile();
org.apache.poi.ss.usermodel.Workbook wb = new HSSFWorkbook();
org.apache.poi.ss.usermodel.Sheet sheet = wb.createSheet();
wb.setSheetName(0, "Requirements");
int column = 0;
CellStyle cs = wb.createCellStyle();
cs.setDataFormat(getBuiltinFormat("text"));
Font f = wb.createFont();
f.setFontHeightInPoints((short) 12);
f.setBold(true);
f.setColor((short) Font.COLOR_NORMAL);
cs.setFont(f);
Row newRow = sheet.createRow(0);
for (String label : COLUMNS) {
Cell newCell = newRow.createCell(column);
newCell.setCellStyle(cs);
newCell.setCellValue(label);
column++;
}
try (FileOutputStream out = new FileOutputStream(template)) {
wb.write(out);
out.close();
}
catch (FileNotFoundException e) {
LOG.log(Level.SEVERE, null, e);
}
catch (IOException e) {
LOG.log(Level.SEVERE, null, e);
}
return template;
}
public static void main(String[] args) {
try {
File file = exportTemplate();
System.out.println(file.getAbsolutePath());
}
catch (FileNotFoundException | InvalidFormatException ex) {
LOG.log(Level.SEVERE, null, ex);
}
catch (IOException ex) {
LOG.log(Level.SEVERE, null, ex);
}
}
}
| {
"content_hash": "0b7fe4bc5c64b459f31fc58ce50b4ab2",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 100,
"avg_line_length": 46.92081447963801,
"alnum_prop": 0.44110130671681375,
"repo_name": "javydreamercsw/validation-manager",
"id": "a0f1a3d876ca07657aa0e1c803bdf8e29f41399a",
"size": "21387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VM-Core/src/main/java/com/validation/manager/core/tool/requirement/importer/RequirementImporter.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "321386"
},
{
"name": "Java",
"bytes": "3213112"
},
{
"name": "SCSS",
"bytes": "2735"
}
],
"symlink_target": ""
} |
#include "dev/arm/pl111.hh"
#include "base/output.hh"
#include "base/trace.hh"
#include "base/vnc/vncinput.hh"
#include "debug/PL111.hh"
#include "debug/Uart.hh"
#include "dev/arm/amba_device.hh"
#include "dev/arm/base_gic.hh"
#include "mem/packet.hh"
#include "mem/packet_access.hh"
#include "sim/system.hh"
// clang complains about std::set being overloaded with Packet::set if
// we open up the entire namespace std
using std::vector;
// initialize clcd registers
Pl111::Pl111(const Params *p)
: AmbaDmaDevice(p), lcdTiming0(0), lcdTiming1(0), lcdTiming2(0),
lcdTiming3(0), lcdUpbase(0), lcdLpbase(0), lcdControl(0), lcdImsc(0),
lcdRis(0), lcdMis(0),
clcdCrsrCtrl(0), clcdCrsrConfig(0), clcdCrsrPalette0(0),
clcdCrsrPalette1(0), clcdCrsrXY(0), clcdCrsrClip(0), clcdCrsrImsc(0),
clcdCrsrIcr(0), clcdCrsrRis(0), clcdCrsrMis(0),
pixelClock(p->pixel_clock),
converter(PixelConverter::rgba8888_le), fb(LcdMaxWidth, LcdMaxHeight),
vnc(p->vnc), bmp(&fb), pic(NULL),
width(LcdMaxWidth), height(LcdMaxHeight),
bytesPerPixel(4), startTime(0), startAddr(0), maxAddr(0), curAddr(0),
waterMark(0), dmaPendingNum(0),
readEvent([this]{ readFramebuffer(); }, name()),
fillFifoEvent([this]{ fillFifo(); }, name()),
dmaDoneEventAll(maxOutstandingDma, this),
dmaDoneEventFree(maxOutstandingDma),
intEvent([this]{ generateInterrupt(); }, name()),
enableCapture(p->enable_capture)
{
pioSize = 0xFFFF;
dmaBuffer = new uint8_t[buffer_size];
memset(lcdPalette, 0, sizeof(lcdPalette));
memset(cursorImage, 0, sizeof(cursorImage));
memset(dmaBuffer, 0, buffer_size);
for (int i = 0; i < maxOutstandingDma; ++i)
dmaDoneEventFree[i] = &dmaDoneEventAll[i];
if (vnc)
vnc->setFrameBuffer(&fb);
}
Pl111::~Pl111()
{
delete[] dmaBuffer;
}
// read registers and frame buffer
Tick
Pl111::read(PacketPtr pkt)
{
// use a temporary data since the LCD registers are read/written with
// different size operations
uint32_t data = 0;
assert(pkt->getAddr() >= pioAddr &&
pkt->getAddr() < pioAddr + pioSize);
Addr daddr = pkt->getAddr() - pioAddr;
DPRINTF(PL111, " read register %#x size=%d\n", daddr, pkt->getSize());
switch (daddr) {
case LcdTiming0:
data = lcdTiming0;
break;
case LcdTiming1:
data = lcdTiming1;
break;
case LcdTiming2:
data = lcdTiming2;
break;
case LcdTiming3:
data = lcdTiming3;
break;
case LcdUpBase:
data = lcdUpbase;
break;
case LcdLpBase:
data = lcdLpbase;
break;
case LcdControl:
data = lcdControl;
break;
case LcdImsc:
data = lcdImsc;
break;
case LcdRis:
data = lcdRis;
break;
case LcdMis:
data = lcdMis;
break;
case LcdIcr:
panic("LCD register at offset %#x is Write-Only\n", daddr);
break;
case LcdUpCurr:
data = curAddr;
break;
case LcdLpCurr:
data = curAddr;
break;
case ClcdCrsrCtrl:
data = clcdCrsrCtrl;
break;
case ClcdCrsrConfig:
data = clcdCrsrConfig;
break;
case ClcdCrsrPalette0:
data = clcdCrsrPalette0;
break;
case ClcdCrsrPalette1:
data = clcdCrsrPalette1;
break;
case ClcdCrsrXY:
data = clcdCrsrXY;
break;
case ClcdCrsrClip:
data = clcdCrsrClip;
break;
case ClcdCrsrImsc:
data = clcdCrsrImsc;
break;
case ClcdCrsrIcr:
panic("CLCD register at offset %#x is Write-Only\n", daddr);
break;
case ClcdCrsrRis:
data = clcdCrsrRis;
break;
case ClcdCrsrMis:
data = clcdCrsrMis;
break;
default:
if (readId(pkt, AMBA_ID, pioAddr)) {
// Hack for variable size accesses
data = pkt->getLE<uint32_t>();
break;
} else if (daddr >= CrsrImage && daddr <= 0xBFC) {
// CURSOR IMAGE
int index;
index = (daddr - CrsrImage) >> 2;
data= cursorImage[index];
break;
} else if (daddr >= LcdPalette && daddr <= 0x3FC) {
// LCD Palette
int index;
index = (daddr - LcdPalette) >> 2;
data = lcdPalette[index];
break;
} else {
panic("Tried to read CLCD register at offset %#x that "
"doesn't exist\n", daddr);
break;
}
}
switch(pkt->getSize()) {
case 1:
pkt->setLE<uint8_t>(data);
break;
case 2:
pkt->setLE<uint16_t>(data);
break;
case 4:
pkt->setLE<uint32_t>(data);
break;
default:
panic("CLCD controller read size too big?\n");
break;
}
pkt->makeAtomicResponse();
return pioDelay;
}
// write registers and frame buffer
Tick
Pl111::write(PacketPtr pkt)
{
// use a temporary data since the LCD registers are read/written with
// different size operations
//
uint32_t data = 0;
switch(pkt->getSize()) {
case 1:
data = pkt->getLE<uint8_t>();
break;
case 2:
data = pkt->getLE<uint16_t>();
break;
case 4:
data = pkt->getLE<uint32_t>();
break;
default:
panic("PL111 CLCD controller write size too big?\n");
break;
}
assert(pkt->getAddr() >= pioAddr &&
pkt->getAddr() < pioAddr + pioSize);
Addr daddr = pkt->getAddr() - pioAddr;
DPRINTF(PL111, " write register %#x value %#x size=%d\n", daddr,
pkt->getLE<uint8_t>(), pkt->getSize());
switch (daddr) {
case LcdTiming0:
lcdTiming0 = data;
// width = 16 * (PPL+1)
width = (lcdTiming0.ppl + 1) << 4;
break;
case LcdTiming1:
lcdTiming1 = data;
// height = LPP + 1
height = (lcdTiming1.lpp) + 1;
break;
case LcdTiming2:
lcdTiming2 = data;
break;
case LcdTiming3:
lcdTiming3 = data;
break;
case LcdUpBase:
lcdUpbase = data;
DPRINTF(PL111, "####### Upper panel base set to: %#x #######\n", lcdUpbase);
break;
case LcdLpBase:
warn_once("LCD dual screen mode not supported\n");
lcdLpbase = data;
DPRINTF(PL111, "###### Lower panel base set to: %#x #######\n", lcdLpbase);
break;
case LcdControl:
int old_lcdpwr;
old_lcdpwr = lcdControl.lcdpwr;
lcdControl = data;
DPRINTF(PL111, "LCD power is:%d\n", lcdControl.lcdpwr);
// LCD power enable
if (lcdControl.lcdpwr && !old_lcdpwr) {
updateVideoParams();
DPRINTF(PL111, " lcd size: height %d width %d\n", height, width);
waterMark = lcdControl.watermark ? 8 : 4;
startDma();
}
break;
case LcdImsc:
lcdImsc = data;
if (lcdImsc.vcomp)
panic("Interrupting on vcomp not supported\n");
lcdMis = lcdImsc & lcdRis;
if (!lcdMis)
gic->clearInt(intNum);
break;
case LcdRis:
panic("LCD register at offset %#x is Read-Only\n", daddr);
break;
case LcdMis:
panic("LCD register at offset %#x is Read-Only\n", daddr);
break;
case LcdIcr:
lcdRis = lcdRis & ~data;
lcdMis = lcdImsc & lcdRis;
if (!lcdMis)
gic->clearInt(intNum);
break;
case LcdUpCurr:
panic("LCD register at offset %#x is Read-Only\n", daddr);
break;
case LcdLpCurr:
panic("LCD register at offset %#x is Read-Only\n", daddr);
break;
case ClcdCrsrCtrl:
clcdCrsrCtrl = data;
break;
case ClcdCrsrConfig:
clcdCrsrConfig = data;
break;
case ClcdCrsrPalette0:
clcdCrsrPalette0 = data;
break;
case ClcdCrsrPalette1:
clcdCrsrPalette1 = data;
break;
case ClcdCrsrXY:
clcdCrsrXY = data;
break;
case ClcdCrsrClip:
clcdCrsrClip = data;
break;
case ClcdCrsrImsc:
clcdCrsrImsc = data;
break;
case ClcdCrsrIcr:
clcdCrsrIcr = data;
break;
case ClcdCrsrRis:
panic("CLCD register at offset %#x is Read-Only\n", daddr);
break;
case ClcdCrsrMis:
panic("CLCD register at offset %#x is Read-Only\n", daddr);
break;
default:
if (daddr >= CrsrImage && daddr <= 0xBFC) {
// CURSOR IMAGE
int index;
index = (daddr - CrsrImage) >> 2;
cursorImage[index] = data;
break;
} else if (daddr >= LcdPalette && daddr <= 0x3FC) {
// LCD Palette
int index;
index = (daddr - LcdPalette) >> 2;
lcdPalette[index] = data;
break;
} else {
panic("Tried to write PL111 register at offset %#x that "
"doesn't exist\n", daddr);
break;
}
}
pkt->makeAtomicResponse();
return pioDelay;
}
PixelConverter
Pl111::pixelConverter() const
{
unsigned rw, gw, bw;
unsigned offsets[3];
switch (lcdControl.lcdbpp) {
case bpp24:
rw = gw = bw = 8;
offsets[0] = 0;
offsets[1] = 8;
offsets[2] = 16;
break;
case bpp16m565:
rw = 5;
gw = 6;
bw = 5;
offsets[0] = 0;
offsets[1] = 5;
offsets[2] = 11;
break;
default:
panic("Unimplemented video mode\n");
}
if (lcdControl.bgr) {
return PixelConverter(
bytesPerPixel,
offsets[2], offsets[1], offsets[0],
rw, gw, bw,
LittleEndianByteOrder);
} else {
return PixelConverter(
bytesPerPixel,
offsets[0], offsets[1], offsets[2],
rw, gw, bw,
LittleEndianByteOrder);
}
}
void
Pl111::updateVideoParams()
{
if (lcdControl.lcdbpp == bpp24) {
bytesPerPixel = 4;
} else if (lcdControl.lcdbpp == bpp16m565) {
bytesPerPixel = 2;
}
fb.resize(width, height);
converter = pixelConverter();
// Workaround configuration bugs where multiple display
// controllers are attached to the same VNC server by reattaching
// enabled devices. This isn't ideal, but works as long as only
// one display controller is active at a time.
if (lcdControl.lcdpwr && vnc)
vnc->setFrameBuffer(&fb);
}
void
Pl111::startDma()
{
if (dmaPendingNum != 0 || readEvent.scheduled())
return;
readFramebuffer();
}
void
Pl111::readFramebuffer()
{
// initialization for dma read from frame buffer to dma buffer
uint32_t length = height * width;
if (startAddr != lcdUpbase)
startAddr = lcdUpbase;
// Updating base address, interrupt if we're supposed to
lcdRis.baseaddr = 1;
if (!intEvent.scheduled())
schedule(intEvent, clockEdge());
curAddr = 0;
startTime = curTick();
maxAddr = static_cast<Addr>(length * bytesPerPixel);
DPRINTF(PL111, " lcd frame buffer size of %d bytes \n", maxAddr);
fillFifo();
}
void
Pl111::fillFifo()
{
while ((dmaPendingNum < maxOutstandingDma) && (maxAddr >= curAddr + dmaSize )) {
// concurrent dma reads need different dma done events
// due to assertion in scheduling state
++dmaPendingNum;
assert(!dmaDoneEventFree.empty());
DmaDoneEvent *event(dmaDoneEventFree.back());
dmaDoneEventFree.pop_back();
assert(!event->scheduled());
// We use a uncachable request here because the requests from the CPU
// will be uncacheable as well. If we have uncacheable and cacheable
// requests in the memory system for the same address it won't be
// pleased
dmaPort.dmaAction(MemCmd::ReadReq, curAddr + startAddr, dmaSize,
event, curAddr + dmaBuffer,
0, Request::UNCACHEABLE);
curAddr += dmaSize;
}
}
void
Pl111::dmaDone()
{
DPRINTF(PL111, "DMA Done\n");
Tick maxFrameTime = lcdTiming2.cpl * height * pixelClock;
--dmaPendingNum;
if (maxAddr == curAddr && !dmaPendingNum) {
if ((curTick() - startTime) > maxFrameTime) {
warn("CLCD controller buffer underrun, took %d ticks when should"
" have taken %d\n", curTick() - startTime, maxFrameTime);
lcdRis.underflow = 1;
if (!intEvent.scheduled())
schedule(intEvent, clockEdge());
}
assert(!readEvent.scheduled());
fb.copyIn(dmaBuffer, converter);
if (vnc)
vnc->setDirty();
if (enableCapture) {
DPRINTF(PL111, "-- write out frame buffer into bmp\n");
if (!pic)
pic = simout.create(csprintf("%s.framebuffer.bmp", sys->name()),
true);
assert(pic);
pic->stream()->seekp(0);
bmp.write(*pic->stream());
}
// schedule the next read based on when the last frame started
// and the desired fps (i.e. maxFrameTime), we turn the
// argument into a relative number of cycles in the future
if (lcdControl.lcden)
schedule(readEvent, clockEdge(ticksToCycles(startTime -
curTick() +
maxFrameTime)));
}
if (dmaPendingNum > (maxOutstandingDma - waterMark))
return;
if (!fillFifoEvent.scheduled())
schedule(fillFifoEvent, clockEdge());
}
void
Pl111::serialize(CheckpointOut &cp) const
{
DPRINTF(PL111, "Serializing ARM PL111\n");
uint32_t lcdTiming0_serial = lcdTiming0;
SERIALIZE_SCALAR(lcdTiming0_serial);
uint32_t lcdTiming1_serial = lcdTiming1;
SERIALIZE_SCALAR(lcdTiming1_serial);
uint32_t lcdTiming2_serial = lcdTiming2;
SERIALIZE_SCALAR(lcdTiming2_serial);
uint32_t lcdTiming3_serial = lcdTiming3;
SERIALIZE_SCALAR(lcdTiming3_serial);
SERIALIZE_SCALAR(lcdUpbase);
SERIALIZE_SCALAR(lcdLpbase);
uint32_t lcdControl_serial = lcdControl;
SERIALIZE_SCALAR(lcdControl_serial);
uint8_t lcdImsc_serial = lcdImsc;
SERIALIZE_SCALAR(lcdImsc_serial);
uint8_t lcdRis_serial = lcdRis;
SERIALIZE_SCALAR(lcdRis_serial);
uint8_t lcdMis_serial = lcdMis;
SERIALIZE_SCALAR(lcdMis_serial);
SERIALIZE_ARRAY(lcdPalette, LcdPaletteSize);
SERIALIZE_ARRAY(cursorImage, CrsrImageSize);
SERIALIZE_SCALAR(clcdCrsrCtrl);
SERIALIZE_SCALAR(clcdCrsrConfig);
SERIALIZE_SCALAR(clcdCrsrPalette0);
SERIALIZE_SCALAR(clcdCrsrPalette1);
SERIALIZE_SCALAR(clcdCrsrXY);
SERIALIZE_SCALAR(clcdCrsrClip);
uint8_t clcdCrsrImsc_serial = clcdCrsrImsc;
SERIALIZE_SCALAR(clcdCrsrImsc_serial);
uint8_t clcdCrsrIcr_serial = clcdCrsrIcr;
SERIALIZE_SCALAR(clcdCrsrIcr_serial);
uint8_t clcdCrsrRis_serial = clcdCrsrRis;
SERIALIZE_SCALAR(clcdCrsrRis_serial);
uint8_t clcdCrsrMis_serial = clcdCrsrMis;
SERIALIZE_SCALAR(clcdCrsrMis_serial);
SERIALIZE_SCALAR(height);
SERIALIZE_SCALAR(width);
SERIALIZE_SCALAR(bytesPerPixel);
SERIALIZE_ARRAY(dmaBuffer, buffer_size);
SERIALIZE_SCALAR(startTime);
SERIALIZE_SCALAR(startAddr);
SERIALIZE_SCALAR(maxAddr);
SERIALIZE_SCALAR(curAddr);
SERIALIZE_SCALAR(waterMark);
SERIALIZE_SCALAR(dmaPendingNum);
Tick int_event_time = 0;
Tick read_event_time = 0;
Tick fill_fifo_event_time = 0;
if (readEvent.scheduled())
read_event_time = readEvent.when();
if (fillFifoEvent.scheduled())
fill_fifo_event_time = fillFifoEvent.when();
if (intEvent.scheduled())
int_event_time = intEvent.when();
SERIALIZE_SCALAR(read_event_time);
SERIALIZE_SCALAR(fill_fifo_event_time);
SERIALIZE_SCALAR(int_event_time);
vector<Tick> dma_done_event_tick;
dma_done_event_tick.resize(maxOutstandingDma);
for (int x = 0; x < maxOutstandingDma; x++) {
dma_done_event_tick[x] = dmaDoneEventAll[x].scheduled() ?
dmaDoneEventAll[x].when() : 0;
}
SERIALIZE_CONTAINER(dma_done_event_tick);
}
void
Pl111::unserialize(CheckpointIn &cp)
{
DPRINTF(PL111, "Unserializing ARM PL111\n");
uint32_t lcdTiming0_serial;
UNSERIALIZE_SCALAR(lcdTiming0_serial);
lcdTiming0 = lcdTiming0_serial;
uint32_t lcdTiming1_serial;
UNSERIALIZE_SCALAR(lcdTiming1_serial);
lcdTiming1 = lcdTiming1_serial;
uint32_t lcdTiming2_serial;
UNSERIALIZE_SCALAR(lcdTiming2_serial);
lcdTiming2 = lcdTiming2_serial;
uint32_t lcdTiming3_serial;
UNSERIALIZE_SCALAR(lcdTiming3_serial);
lcdTiming3 = lcdTiming3_serial;
UNSERIALIZE_SCALAR(lcdUpbase);
UNSERIALIZE_SCALAR(lcdLpbase);
uint32_t lcdControl_serial;
UNSERIALIZE_SCALAR(lcdControl_serial);
lcdControl = lcdControl_serial;
uint8_t lcdImsc_serial;
UNSERIALIZE_SCALAR(lcdImsc_serial);
lcdImsc = lcdImsc_serial;
uint8_t lcdRis_serial;
UNSERIALIZE_SCALAR(lcdRis_serial);
lcdRis = lcdRis_serial;
uint8_t lcdMis_serial;
UNSERIALIZE_SCALAR(lcdMis_serial);
lcdMis = lcdMis_serial;
UNSERIALIZE_ARRAY(lcdPalette, LcdPaletteSize);
UNSERIALIZE_ARRAY(cursorImage, CrsrImageSize);
UNSERIALIZE_SCALAR(clcdCrsrCtrl);
UNSERIALIZE_SCALAR(clcdCrsrConfig);
UNSERIALIZE_SCALAR(clcdCrsrPalette0);
UNSERIALIZE_SCALAR(clcdCrsrPalette1);
UNSERIALIZE_SCALAR(clcdCrsrXY);
UNSERIALIZE_SCALAR(clcdCrsrClip);
uint8_t clcdCrsrImsc_serial;
UNSERIALIZE_SCALAR(clcdCrsrImsc_serial);
clcdCrsrImsc = clcdCrsrImsc_serial;
uint8_t clcdCrsrIcr_serial;
UNSERIALIZE_SCALAR(clcdCrsrIcr_serial);
clcdCrsrIcr = clcdCrsrIcr_serial;
uint8_t clcdCrsrRis_serial;
UNSERIALIZE_SCALAR(clcdCrsrRis_serial);
clcdCrsrRis = clcdCrsrRis_serial;
uint8_t clcdCrsrMis_serial;
UNSERIALIZE_SCALAR(clcdCrsrMis_serial);
clcdCrsrMis = clcdCrsrMis_serial;
UNSERIALIZE_SCALAR(height);
UNSERIALIZE_SCALAR(width);
UNSERIALIZE_SCALAR(bytesPerPixel);
UNSERIALIZE_ARRAY(dmaBuffer, buffer_size);
UNSERIALIZE_SCALAR(startTime);
UNSERIALIZE_SCALAR(startAddr);
UNSERIALIZE_SCALAR(maxAddr);
UNSERIALIZE_SCALAR(curAddr);
UNSERIALIZE_SCALAR(waterMark);
UNSERIALIZE_SCALAR(dmaPendingNum);
Tick int_event_time = 0;
Tick read_event_time = 0;
Tick fill_fifo_event_time = 0;
UNSERIALIZE_SCALAR(read_event_time);
UNSERIALIZE_SCALAR(fill_fifo_event_time);
UNSERIALIZE_SCALAR(int_event_time);
if (int_event_time)
schedule(intEvent, int_event_time);
if (read_event_time)
schedule(readEvent, read_event_time);
if (fill_fifo_event_time)
schedule(fillFifoEvent, fill_fifo_event_time);
vector<Tick> dma_done_event_tick;
dma_done_event_tick.resize(maxOutstandingDma);
UNSERIALIZE_CONTAINER(dma_done_event_tick);
dmaDoneEventFree.clear();
for (int x = 0; x < maxOutstandingDma; x++) {
if (dma_done_event_tick[x])
schedule(dmaDoneEventAll[x], dma_done_event_tick[x]);
else
dmaDoneEventFree.push_back(&dmaDoneEventAll[x]);
}
assert(maxOutstandingDma - dmaDoneEventFree.size() == dmaPendingNum);
if (lcdControl.lcdpwr) {
updateVideoParams();
fb.copyIn(dmaBuffer, converter);
if (vnc)
vnc->setDirty();
}
}
void
Pl111::generateInterrupt()
{
DPRINTF(PL111, "Generate Interrupt: lcdImsc=0x%x lcdRis=0x%x lcdMis=0x%x\n",
(uint32_t)lcdImsc, (uint32_t)lcdRis, (uint32_t)lcdMis);
lcdMis = lcdImsc & lcdRis;
if (lcdMis.underflow || lcdMis.baseaddr || lcdMis.vcomp || lcdMis.ahbmaster) {
gic->sendInt(intNum);
DPRINTF(PL111, " -- Generated\n");
}
}
AddrRangeList
Pl111::getAddrRanges() const
{
AddrRangeList ranges;
ranges.push_back(RangeSize(pioAddr, pioSize));
return ranges;
}
Pl111 *
Pl111Params::create()
{
return new Pl111(this);
}
| {
"content_hash": "3570ba70ba9f1e52f2605e8ca1e36abc",
"timestamp": "",
"source": "github",
"line_count": 746,
"max_line_length": 84,
"avg_line_length": 27.33378016085791,
"alnum_prop": 0.5981070079937227,
"repo_name": "TUD-OS/gem5-dtu",
"id": "7560ec4b95727aab9f4bcdf1032fd469406b8133",
"size": "22560",
"binary": false,
"copies": "5",
"ref": "refs/heads/dtu-mmu",
"path": "src/dev/arm/pl111.cc",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
} |
from tkinter import *
"""/*
Class: ArenaPanel
Abstract super class for all <Panel> objects to be used as children in
the <ArenaGUI>.
Provides an interface for panel specific methods, and some helper
methods
*/"""
class ArenaPanel(LabelFrame):
"""/*
Group: Constructors
*/"""
"""/*
Constructor: __init__
Create the <Panel> and initialise any instance variables it may have
Parameters:
obj master - The parent of this panel
string title - The title to be displayed on the panel
int width - The width of the panel in pixels
int height - The height of the panel in pixels
array *args - An array of extra arguments to be passed in
dict **kwargs - A dict of extra keyword arguments to be passed in
*/"""
def __init__(self, master, title, width, height, *args, **kwargs):
super(ArenaPanel, self).__init__(
master, text=title, width=width, height=height)
"""/*
Group: Variables
*/"""
"""/*
var: _master
The parent of the window
*/"""
self._master = master
self._initialiseVariables(*args, **kwargs)
self._initialiseChildren()
"""/*
Group: Public Methods
*/"""
"""/*
Function: getTitle
Return the title of this ArenaPanel instance
Returns:
str title - The title of this ArenaPanel instance
*/"""
def getTitle(self):
return self.cget("text")
"""/*
Function: close
Handled the closing of the panel, including checking if the panel can
be closed, and closing any service the panel handles
*/"""
def close(self):
raise NotImplemented("This method must be overrided")
# Group: Private Methods
"""/*
Group: Private Methods
*/"""
"""/*
Function: _popup
Allows any subclass to create a popup for displaying errors
Parameters:
string title - The title of the popup
string message - The error message to be displayed
*/"""
def _popup(self, title, message):
popup = Toplevel(self._master)
popup.title(title)
Label(popup, text=message).pack(fill=BOTH, expand=1)
Button(popup, command=popup.destroy, text="Close").pack(
fill=BOTH, expand=1)
"""/*
Group: Abstract Methods
*/"""
"""/*
Function: _initialiseVariables
Initialise all instance variables to be used in this panel
Parameters:
array *args - An array of extra arguments needed
dict **kwargs - A dict of extra keyword arguments needed
*/"""
def _initialiseVariables(self, *args, **kwargs):
raise NotImplemented("This method must be overrided")
"""/*
Function: _initialiseChildren
Create any children of this panel and add them into the panel
*/"""
def _initialiseChildren(self):
raise NotImplemented("This method must be overrided")
| {
"content_hash": "6f671cdefbad3d1d376c30ab67e06c91",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 77,
"avg_line_length": 28.72222222222222,
"alnum_prop": 0.5838168923275306,
"repo_name": "ExceptionalVoid/Arena",
"id": "c7416412b198cd6e34208a09aa9f09a49b94aede",
"size": "3102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "local/ArenaPanel.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "399"
},
{
"name": "HTML",
"bytes": "8865"
},
{
"name": "JavaScript",
"bytes": "54859"
},
{
"name": "Python",
"bytes": "84596"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="utf-8"?>
<shape xmlns:android="http://schemas.android.com/apk/res/android"
android:shape="rectangle">
<solid android:color="#fff0f0f0"/>
<corners android:radius="2dp"/>
</shape>
| {
"content_hash": "56bbd4eda31c5de0e68caaa55d24d4be",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 65,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.6742081447963801,
"repo_name": "hugeterry/UpdateDemo",
"id": "a0dc59e24cae9a4350fdec6be4dfca87767015c0",
"size": "221",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "updatefun/src/main/res/drawable/bg_button_pressed.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "27226"
}
],
"symlink_target": ""
} |
cask "script-debugger" do
version "8.0.4-8A52"
sha256 "1f0b33fa80da72dad060a38807729a03b03c1bcd81e029d7f36c9617896e50e6"
url "https://s3.amazonaws.com/latenightsw.com/ScriptDebugger#{version}.dmg",
verified: "s3.amazonaws.com/latenightsw.com/"
name "Script Debugger"
desc "Integrated development environment focused entirely on AppleScript"
homepage "https://latenightsw.com/"
livecheck do
url "https://latenightsw.com/download/"
regex(/action=.*?ScriptDebugger(\d+(?:\.\d+)+-\d+A\d+)\.dmg/i)
end
depends_on macos: ">= :mojave"
app "Script Debugger.app"
zap trash: [
"~/Library/Application Support/Script Debugger #{version.major}",
"~/Library/Caches/com.latenightsw.ScriptDebugger#{version.major}",
"~/Library/Preferences/com.latenightsw.ScriptDebugger#{version.major}.plist",
]
end
| {
"content_hash": "4a18028407db858e5f500b12f33d5f8b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 81,
"avg_line_length": 33.56,
"alnum_prop": 0.7210965435041716,
"repo_name": "sscotth/homebrew-cask",
"id": "9a63bca01a28ef65f9ac8cf93e83a1f066c6dc0f",
"size": "839",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Casks/script-debugger.rb",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "249"
},
{
"name": "Python",
"bytes": "3630"
},
{
"name": "Ruby",
"bytes": "2969714"
},
{
"name": "Shell",
"bytes": "32035"
}
],
"symlink_target": ""
} |
package za.org.grassroot.core.domain.notification;
import za.org.grassroot.core.domain.LiveWireLog;
import za.org.grassroot.core.domain.User;
import za.org.grassroot.core.enums.AlertPreference;
import za.org.grassroot.core.enums.NotificationDetailedType;
import javax.persistence.DiscriminatorValue;
import javax.persistence.Entity;
/**
* Created by luke on 2017/05/16.
*/
@Entity
@DiscriminatorValue("LIVEWIRE_TO_REVIEW")
public class LiveWireToReviewNotification extends LiveWireNotification {
@Override
public NotificationDetailedType getNotificationDetailedType() {
return NotificationDetailedType.LIVEWIRE_TO_REVIEW;
}
private LiveWireToReviewNotification() {
// for JPA
}
public LiveWireToReviewNotification(User destination, String message, LiveWireLog log) {
super(destination, message, log);
this.priority = AlertPreference.NOTIFY_ONLY_NEW.getPriority();
}
}
| {
"content_hash": "becdd4a6f19929579a5c5adde3b455bc",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 92,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.7668449197860963,
"repo_name": "mokoka/grassroot-platform",
"id": "659d71c0253d40efc991911812e9da05fa8e3d7e",
"size": "935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grassroot-core/src/main/java/za/org/grassroot/core/domain/notification/LiveWireToReviewNotification.java",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "15432"
},
{
"name": "CSS",
"bytes": "169062"
},
{
"name": "HTML",
"bytes": "481968"
},
{
"name": "Java",
"bytes": "2714415"
},
{
"name": "JavaScript",
"bytes": "127382"
},
{
"name": "PLSQL",
"bytes": "4704"
},
{
"name": "PLpgSQL",
"bytes": "4905"
},
{
"name": "SQLPL",
"bytes": "3679"
},
{
"name": "Shell",
"bytes": "1633"
}
],
"symlink_target": ""
} |
package com.google.template.soy.data;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.base.Function;
import com.google.common.collect.ImmutableList;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.template.soy.data.SanitizedContent.ContentKind;
import java.io.IOException;
/** A simple forwarding implementation, forwards all calls to a delegate. */
public abstract class ForwardingLoggingAdvisingAppendable extends LoggingAdvisingAppendable {
protected final LoggingAdvisingAppendable delegate;
protected ForwardingLoggingAdvisingAppendable(LoggingAdvisingAppendable delegate) {
this.delegate = checkNotNull(delegate);
}
@Override
public boolean softLimitReached() {
return delegate.softLimitReached();
}
@CanIgnoreReturnValue
@Override
public LoggingAdvisingAppendable append(CharSequence csq) throws IOException {
delegate.append(csq);
return this;
}
@CanIgnoreReturnValue
@Override
public LoggingAdvisingAppendable append(CharSequence csq, int start, int end) throws IOException {
delegate.append(csq, start, end);
return this;
}
@CanIgnoreReturnValue
@Override
public LoggingAdvisingAppendable append(char c) throws IOException {
delegate.append(c);
return this;
}
@CanIgnoreReturnValue
@Override
public LoggingAdvisingAppendable enterLoggableElement(LogStatement statement) {
delegate.enterLoggableElement(statement);
return this;
}
@CanIgnoreReturnValue
@Override
public LoggingAdvisingAppendable exitLoggableElement() {
delegate.exitLoggableElement();
return this;
}
@Override
protected void notifyKindAndDirectionality(ContentKind kind, Dir dir) throws IOException {
delegate.setKindAndDirectionality(kind, dir);
}
@CanIgnoreReturnValue
@Override
public LoggingAdvisingAppendable appendLoggingFunctionInvocation(
LoggingFunctionInvocation funCall, ImmutableList<Function<String, String>> escapers)
throws IOException {
delegate.appendLoggingFunctionInvocation(funCall, escapers);
return this;
}
@Override
public void flushBuffers(int depth) throws IOException {
if (depth > 0) {
delegate.flushBuffers(depth - 1);
}
}
}
| {
"content_hash": "d740af667ec3d8637dd8a3455558dd44",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 100,
"avg_line_length": 28.4375,
"alnum_prop": 0.7762637362637362,
"repo_name": "google/closure-templates",
"id": "ca9c04ea9c19e9e6f51ba538d1ed277f05f0b9ae",
"size": "2869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "java/src/com/google/template/soy/data/ForwardingLoggingAdvisingAppendable.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Closure Templates",
"bytes": "3332"
},
{
"name": "HTML",
"bytes": "21890"
},
{
"name": "Java",
"bytes": "8407238"
},
{
"name": "JavaScript",
"bytes": "248481"
},
{
"name": "Python",
"bytes": "92193"
},
{
"name": "Starlark",
"bytes": "239786"
},
{
"name": "TypeScript",
"bytes": "52777"
}
],
"symlink_target": ""
} |
var expect = require('chai').expect;
describe('Collection', function () {
var Model = require('../lib/model'),
Collection = require('../lib/collection'),
Vow = require('vow'),
TestModel, TestCollection, collection, collectionData;
collectionData = [{
id: 1,
a: 'a-1'
}, {
id: 2,
a: 'a-2'
}, {
id: 3,
a: 'a-3'
}];
TestModel = Model.inherit({
attributes: {
id: Model.attributeTypes.Id,
a: Model.attributeTypes.String
},
storage: Model.Storage.inherit({
remove: function () {
return Vow.fulfill().delay(0);
}
})
});
TestCollection = Collection.inherit({
modelType: TestModel
});
beforeEach(function () {
collection = new TestCollection(collectionData);
});
describe('length', function () {
it('should return count of models', function () {
expect(collection.length).to.be.equal(3);
});
});
describe('toJSON', function () {
it('should serialize collection', function () {
var data = collection.toJSON();
expect(data).to.be.a('array');
expect(data).to.be.deep.equal(collectionData);
});
});
describe('at', function () {
it('should return model by index', function () {
expect(collection.at(0)).to.be.instanceOf(TestModel);
expect(collection.at(0).toJSON()).to.be.deep.equal(collectionData[0]);
});
});
describe('get', function () {
it('should return model by id', function () {
expect(collection.get(1)).to.be.instanceOf(TestModel);
expect(collection.get(1).toJSON()).to.be.deep.equal({id: 1, a: 'a-1'});
});
it('should work correctly after changing id', function (done) {
var model = collection.get(1);
model.set('id', 4);
model.ready().then(function () {
expect(collection.get(4)).to.be.instanceOf(TestModel);
expect(collection.get(4).toJSON()).to.be.deep.equal({id: 4, a: 'a-1'});
done();
});
});
});
describe('Array methods', function () {
describe('forEach', function () {
it('should iterate on models', function () {
var data = [];
collection.forEach(function (model, index) {
expect(model).to.be.instanceOf(TestModel);
expect(index).to.be.a('number');
data[index] = model.toJSON();
});
expect(data).to.be.deep.equal(collectionData);
});
});
describe('some', function () {
it('should return true if some model fulfills conditions', function () {
var result, index;
result = collection.some(function (model, i) {
expect(model).to.be.instanceOf(TestModel);
expect(i).to.be.a('number');
index = i;
return model.get('a') === 'a-2';
});
expect(index).to.be.equal(1);
expect(result).to.be.true;
});
it('should return false if there is no models fulfills conditions', function () {
var result, index;
result = collection.some(function (model, i) {
expect(model).to.be.instanceOf(TestModel);
expect(i).to.be.a('number');
index = i;
return model.get('a') === 'a-4';
});
expect(index).to.be.equal(2);
expect(result).to.be.false;
});
});
describe('every', function () {
it('should return true if every model fulfills conditions', function () {
var result, index;
result = collection.every(function (model, i) {
expect(model).to.be.instanceOf(TestModel);
expect(i).to.be.a('number');
index = i;
return model.getId() >= 1;
});
expect(index).to.be.equal(2);
expect(result).to.be.true;
});
it('should return false if some model does not fulfill conditions', function () {
var result, index;
result = collection.every(function (model, i) {
expect(model).to.be.instanceOf(TestModel);
expect(i).to.be.a('number');
index = i;
return model.getId() > 1;
});
expect(index).to.be.equal(0);
expect(result).to.be.false;
});
});
describe('filter', function () {
it('should return array of suitable models', function () {
var models = collection.filter(function(model, index) {
expect(index).to.be.a('number');
return model.getId() < 3;
});
expect(models.length).to.be.equal(2);
expect(models[0]).to.be.instanceOf(TestModel);
expect(models[1]).to.be.instanceOf(TestModel);
expect(models[0].toJSON()).to.be.deep.equal(collectionData[0]);
expect(models[1].toJSON()).to.be.deep.equal(collectionData[1]);
});
});
describe('map', function () {
it('should return new array by models', function() {
var ids = collection.map(function (model, index) {
expect(model).to.be.instanceOf(TestModel);
expect(index).to.be.a('number');
return model.getId();
});
expect(ids).to.be.deep.equal([1,2,3]);
});
});
describe('reduce', function () {
it('should return result by all models', function () {
var sum = collection.reduce(function (result, model, index) {
expect(model).to.be.instanceOf(TestModel);
expect(index).to.be.a('number');
expect(result).to.be.a('number');
return result + model.getId();
}, 0);
expect(sum).to.be.equal(6);
});
});
describe('find', function () {
it('should return first suitable model', function (){
var model, index;
model = collection.find(function (model, i) {
expect(model).to.be.instanceOf(TestModel);
expect(i).to.be.a('number');
index = i;
return model.getId() === 2;
});
expect(index).to.be.equal(1);
expect(model).to.be.instanceOf(TestModel);
expect(model).to.be.equal(collection.at(1));
});
});
});
describe('where', function () {
it('should return suitable models', function () {
var models = collection.where({id: 1});
expect(models).to.be.an('array');
expect(models.length).to.be.equal(1);
expect(models[0]).to.be.instanceOf(TestModel);
expect(models[0].toJSON()).to.be.deep.equal(collectionData[0]);
});
});
describe('findWhere', function () {
it('should return first suitable model', function () {
var model = collection.findWhere({id: 1});
expect(model).to.be.instanceOf(TestModel);
expect(model.toJSON()).to.be.deep.equal(collectionData[0]);
});
});
describe('pluck', function () {
it('should return an array of one attribute', function () {
var ids = collection.pluck('id');
expect(ids).to.be.deep.equal([1, 2, 3]);
});
});
describe('add', function () {
it('should trigger add event', function () {
var count = 0;
collection.on('add', function (model, options) {
count++;
expect(model).to.be.instanceOf(TestModel);
expect(options).to.be.deep.equal({at: 3});
});
collection.add({id: 4, a: 'a-4'});
expect(count).to.be.equal(1);
});
it('should be able to add model on specific position', function () {
var modelData = {id: 4, a: 'a-4'};
collection.add(modelData, {at: 1});
expect(collection.length).to.be.equal(4);
expect(collection.at(1).toJSON()).to.be.deep.equal(modelData);
});
it('should be able to add model on first position', function () {
var modelData = {id: 4, a: 'a-4'};
collection.add(modelData, {at: 0});
expect(collection.length).to.be.equal(4);
expect(collection.at(0).toJSON()).to.be.deep.equal(modelData);
});
it('should be able to add multiple models', function () {
var count = 0;
collection.on('add', function () {
count++;
});
collection.add([{id: 4, a: 'a-4'}, {id: 5, a: 'a-5'}]);
expect(collection.length).to.be.equal(5);
expect(count).to.be.equal(2);
});
it('should be able to add multiple models on specific position', function () {
var count = 0;
collection.on('add', function (model, options) {
if (count === 0) {
expect(model.toJSON()).to.be.deep.equal({id: 4, a: 'a-4'});
expect(options).to.be.deep.equal({at: 1});
} else {
expect(model.toJSON()).to.be.deep.equal({id: 5, a: 'a-5'});
expect(options).to.be.deep.equal({at: 2});
}
count++;
});
collection.add([{id: 4, a: 'a-4'}, {id: 5, a: 'a-5'}], {at: 1});
expect(collection.length).to.be.equal(5);
expect(collection.toJSON()).to.be.deep.equal([
{id: 1, a: 'a-1'},
{id: 4, a: 'a-4'},
{id: 5, a: 'a-5'},
{id: 2, a: 'a-2'},
{id: 3, a: 'a-3'}
]);
});
});
describe('remove', function () {
it('should trigger remove event', function () {
var count = 0;
collection.on('remove', function (model, options) {
count++;
expect(model).to.be.instanceOf(TestModel);
expect(options).to.be.deep.equal({at: 1});
});
collection.remove(collection.at(1));
expect(count).to.be.equal(1);
expect(collection.length).to.be.equal(2);
});
it('should be able to remove multiple models', function () {
var count = 0;
collection.on('remove', function (model, options) {
if (count === 0) {
expect(model.toJSON()).to.be.deep.equal({id: 1, a: 'a-1'});
} else {
expect(model.toJSON()).to.be.deep.equal({id: 2, a: 'a-2'});
}
expect(options).to.be.deep.equal({at: 0});
count++;
});
collection.remove([collection.at(0), collection.at(1)]);
expect(collection.length).to.be.equal(1);
expect(collection.toJSON()).to.be.deep.equal([{id:3, a: 'a-3'}]);
expect(count).to.be.equal(2);
});
it('should remove from collection destroyed models', function (done) {
var count = 0;
collection.on('remove', function () {
count++;
});
collection.at(0).remove().then(function () {
expect(collection.toJSON()).to.be.deep.equal([{id:2, a: 'a-2'}, {id: 3, a: 'a-3'}]);
expect(count).to.be.equal(1);
done();
});
});
});
describe('set', function () {
it('should set new models', function () {
var triggered = false;
collection.on('reset', function () {
triggered = true;
});
collection.set([{id: 4, a: 'a-4'}, {id: 5, a: 'a-5'}]);
expect(collection.toJSON()).to.be.deep.equal([{id: 4, a: 'a-4'}, {id: 5, a: 'a-5'}]);
expect(triggered).to.be.true;
});
it ('should destruct old models instances after set', function () {
var model = collection.at(0);
collection.set([{id: 4, a: 'a-4'}]);
expect(model.isDestructed()).to.be.true;
});
});
describe('commit', function () {
it('should commit models', function () {
expect(collection.isChanged()).to.be.false;
collection.at(0).set('a', 'a-1!');
expect(collection.isChanged()).to.be.true;
collection.commit();
expect(collection.isChanged()).to.be.false;
expect(collection.at(0).isChanged()).to.be.false;
});
it('should track new models', function () {
collection.add({id: 4}, {a: 'a-4'});
expect(collection.isChanged()).to.be.true;
collection.commit();
expect(collection.isChanged()).to.be.false;
});
it('should track removed models', function () {
collection.remove(collection.at(0));
expect(collection.isChanged()).to.be.true;
collection.commit();
expect(collection.isChanged()).to.be.false;
});
});
describe('revert', function () {
it('should revert models', function () {
collection.at(0).set('a', 'a-1!!');
collection.revert();
expect(collection.toJSON()).to.be.deep.equal(collectionData);
});
it('should remove added models', function () {
collection.add({id: 4}, {a: 'a-4'});
collection.revert();
expect(collection.toJSON()).to.be.deep.equal(collectionData);
});
it('should add removed models', function () {
collection.remove(collection.at(0));
collection.revert();
expect(collection.toJSON()).to.be.deep.equal(collectionData);
});
});
describe('destruct', function () {
it ('should unsubscribe from nested models', function (done) {
var model = new TestModel({id: 555, a: '123'}),
flag = false;
collection.on('change', function (collectionModel) {
if (collectionModel === model) {
flag = true;
}
});
collection.add(model, {at: 0});
collection.destruct();
model.set('a', 'zzzzz');
model.ready().then(function () {
expect(flag).to.be.equal(false);
done();
}).done();
});
it ('should keep old manually set instances', function () {
var model = new TestModel({id: 4, a: 'a-4'});
collection.set([model]);
collection.destruct();
expect(model.isDestructed()).to.be.false;
});
});
describe('model events', function () {
it('should trigger change event', function (done) {
collection.on('change', function (model) {
expect(model).to.be.instanceOf(TestModel);
expect(model.get('a')).to.be.equal('aa');
done();
});
collection.at(0).set('a', 'aa');
});
it('should trigger change:attribute event', function (done) {
collection.on('change:a', function (model) {
expect(model).to.be.instanceOf(TestModel);
expect(model.get('a')).to.be.equal('aa');
done();
});
collection.at(0).set('a', 'aa');
});
it('should trigger commit event', function (done) {
collection.on('commit', function (model) {
expect(model).to.be.instanceOf(TestModel);
expect(model.get('a')).to.be.equal('aa');
expect(model.isChanged()).to.be.false;
done();
});
collection.at(0).set('a', 'aa');
collection.commit();
});
});
});
| {
"content_hash": "ef180d35e6a87cc72cb162a3caf917f5",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 100,
"avg_line_length": 36.00869565217391,
"alnum_prop": 0.47615310311518955,
"repo_name": "km256/promised-models",
"id": "1045294978116017cf95194bb94d5ab238f8e5b0",
"size": "16564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/collection.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "149891"
}
],
"symlink_target": ""
} |
using System;
using System.Collections.Generic;
using System.Linq;
using LanguageExt;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.CSharp;
using Microsoft.CodeAnalysis.CSharp.Syntax;
using Microsoft.CodeAnalysis.Formatting;
using SJP.Schematic.Core;
using SJP.Schematic.Core.Comments;
using SJP.Schematic.Core.Extensions;
using SJP.Schematic.DataAccess.CodeGeneration;
using SJP.Schematic.DataAccess.Extensions;
using static Microsoft.CodeAnalysis.CSharp.SyntaxFactory;
namespace SJP.Schematic.DataAccess.Poco
{
public class PocoTableGenerator : DatabaseTableGenerator
{
public PocoTableGenerator(INameTranslator nameTranslator, string baseNamespace)
: base(nameTranslator)
{
if (baseNamespace.IsNullOrWhiteSpace())
throw new ArgumentNullException(nameof(baseNamespace));
Namespace = baseNamespace;
}
protected string Namespace { get; }
public override string Generate(IReadOnlyCollection<IRelationalDatabaseTable> tables, IRelationalDatabaseTable table, Option<IRelationalDatabaseTableComments> comment)
{
if (tables == null)
throw new ArgumentNullException(nameof(tables));
if (table == null)
throw new ArgumentNullException(nameof(table));
var schemaNamespace = NameTranslator.SchemaToNamespace(table.Name);
var tableNamespace = !schemaNamespace.IsNullOrWhiteSpace()
? Namespace + "." + schemaNamespace
: Namespace;
var namespaces = table.Columns
.Select(c => c.Type.ClrType.Namespace)
.Where(ns => ns != tableNamespace)
.Distinct()
.OrderNamespaces()
.ToList();
var usingStatements = namespaces
.Select(ns => ParseName(ns))
.Select(UsingDirective)
.ToList();
var namespaceDeclaration = NamespaceDeclaration(ParseName(tableNamespace));
var classDeclaration = BuildClass(table, comment);
var document = CompilationUnit()
.WithUsings(List(usingStatements))
.WithMembers(
SingletonList<MemberDeclarationSyntax>(
namespaceDeclaration
.WithMembers(
SingletonList<MemberDeclarationSyntax>(classDeclaration))));
using var workspace = new AdhocWorkspace();
return Formatter.Format(document, workspace).ToFullString();
}
private ClassDeclarationSyntax BuildClass(IRelationalDatabaseTable table, Option<IRelationalDatabaseTableComments> comment)
{
if (table == null)
throw new ArgumentNullException(nameof(table));
var className = NameTranslator.TableToClassName(table.Name);
var properties = table.Columns
.Select(c => BuildColumn(c, comment, className))
.ToList();
return ClassDeclaration(className)
.AddModifiers(Token(SyntaxKind.PublicKeyword))
.WithLeadingTrivia(BuildTableComment(table.Name, comment))
.WithMembers(List<MemberDeclarationSyntax>(properties));
}
private PropertyDeclarationSyntax BuildColumn(IDatabaseColumn column, Option<IRelationalDatabaseTableComments> comment, string className)
{
if (column == null)
throw new ArgumentNullException(nameof(column));
if (className.IsNullOrWhiteSpace())
throw new ArgumentNullException(nameof(className));
var clrType = column.Type.ClrType;
var propertyName = NameTranslator.ColumnToPropertyName(className, column.Name.LocalName);
var columnTypeSyntax = column.IsNullable
? NullableType(ParseTypeName(clrType.FullName))
: ParseTypeName(clrType.FullName);
if (clrType.Namespace == "System" && SyntaxUtilities.TypeSyntaxMap.ContainsKey(clrType.Name))
{
columnTypeSyntax = column.IsNullable
? NullableType(SyntaxUtilities.TypeSyntaxMap[clrType.Name])
: SyntaxUtilities.TypeSyntaxMap[clrType.Name];
}
var baseProperty = PropertyDeclaration(
columnTypeSyntax,
Identifier(propertyName)
);
var columnSyntax = baseProperty
.WithModifiers(SyntaxTokenList.Create(Token(SyntaxKind.PublicKeyword)))
.WithAccessorList(SyntaxUtilities.PropertyGetSetDeclaration)
.WithLeadingTrivia(BuildColumnComment(column.Name, comment));
var isNotNullRefType = !column.IsNullable && !column.Type.ClrType.IsValueType;
if (!isNotNullRefType)
return columnSyntax;
return columnSyntax
.WithInitializer(SyntaxUtilities.NotNullDefault)
.WithSemicolonToken(Token(SyntaxKind.SemicolonToken));
}
private static SyntaxTriviaList BuildTableComment(Identifier tableName, Option<IRelationalDatabaseTableComments> comment)
{
if (tableName == null)
throw new ArgumentNullException(nameof(tableName));
return comment
.Bind(c => c.Comment)
.Match(
SyntaxUtilities.BuildCommentTrivia,
() => SyntaxUtilities.BuildCommentTrivia(new XmlNodeSyntax[]
{
XmlText("A mapping class to query the "),
XmlElement("c", SingletonList<XmlNodeSyntax>(XmlText(tableName.LocalName))),
XmlText(" table.")
})
);
}
private static SyntaxTriviaList BuildColumnComment(Identifier columnName, Option<IRelationalDatabaseTableComments> comment)
{
if (columnName == null)
throw new ArgumentNullException(nameof(columnName));
return comment
.Bind(c => c.ColumnComments.TryGetValue(columnName, out var cc) ? cc : Option<string>.None)
.Match(
SyntaxUtilities.BuildCommentTrivia,
() => SyntaxUtilities.BuildCommentTrivia(new XmlNodeSyntax[]
{
XmlText("The "),
XmlElement("c", SingletonList<XmlNodeSyntax>(XmlText(columnName.LocalName))),
XmlText(" column.")
})
);
}
}
}
| {
"content_hash": "edfe23d564f293c43b52077c0f72df6e",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 175,
"avg_line_length": 41.85625,
"alnum_prop": 0.6063909213080484,
"repo_name": "sjp/SJP.Schema",
"id": "61529c83e038afb59884fd59f9182ea6d443f7e6",
"size": "6699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/SJP.Schematic.DataAccess.Poco/PocoTableGenerator.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "541004"
},
{
"name": "PowerShell",
"bytes": "727"
}
],
"symlink_target": ""
} |
#pragma once
#include "core_api.h"
namespace Core {
/**
* This function is executed when Envision is started. It shows the main window, loads all plug-ins and starts the event
* loop.
*/
int CORE_API coreMain(int argc, char *argv[]);
}
| {
"content_hash": "74f78a2dcdeeecb5d9251ee073c8dd84",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 120,
"avg_line_length": 17.428571428571427,
"alnum_prop": 0.6967213114754098,
"repo_name": "lukedirtwalker/Envision",
"id": "2bc558982125a22092529cd3a8acedd02771a45c",
"size": "2073",
"binary": false,
"copies": "4",
"ref": "refs/heads/development",
"path": "Core/src/core.h",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "780"
},
{
"name": "C",
"bytes": "79038"
},
{
"name": "C++",
"bytes": "7980980"
},
{
"name": "CMake",
"bytes": "78247"
},
{
"name": "CSS",
"bytes": "31"
},
{
"name": "HTML",
"bytes": "2314"
},
{
"name": "Java",
"bytes": "138527"
},
{
"name": "Python",
"bytes": "56272"
},
{
"name": "Shell",
"bytes": "30866"
}
],
"symlink_target": ""
} |
FILE(REMOVE_RECURSE
"CMakeFiles/polynomialsolver_2.dir/polynomialsolver.cpp.o"
"polynomialsolver_2.pdb"
"polynomialsolver_2"
)
# Per-language clean rules from dependency scanning.
FOREACH(lang CXX)
INCLUDE(CMakeFiles/polynomialsolver_2.dir/cmake_clean_${lang}.cmake OPTIONAL)
ENDFOREACH(lang)
| {
"content_hash": "74759a069ca45228eb0f4bf398836dd8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 79,
"avg_line_length": 30.2,
"alnum_prop": 0.7913907284768212,
"repo_name": "cmeon/Simplex",
"id": "41218c3d1e0a8d7753400bf374c64b9f7b8c1c3c",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/unsupported/test/CMakeFiles/polynomialsolver_2.dir/cmake_clean.cmake",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2192597"
},
{
"name": "C++",
"bytes": "3497542"
},
{
"name": "CSS",
"bytes": "5151"
},
{
"name": "FORTRAN",
"bytes": "1462981"
},
{
"name": "JavaScript",
"bytes": "7839"
},
{
"name": "Objective-C",
"bytes": "2089"
},
{
"name": "Python",
"bytes": "8750"
},
{
"name": "Shell",
"bytes": "15472"
},
{
"name": "Tcl",
"bytes": "2329"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<!--
Copyright (c) 2013 Samsung Electronics Co., Ltd.
Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
Karol Surma <[email protected]>
-->
<html>
<head>
<title>MessagePortCallback_onreceived_all</title>
<meta charset="utf-8"/>
<script type="text/javascript" src="support/unitcommon.js"></script>
</head>
<body>
<div id="log"></div>
<script>
//==== TEST: MessagePortCallback_onreceived_all
//==== LABEL Check if MessagePortCallback works correctly with all arguments
//==== SPEC Tizen Web API:IO:Messageport:MessagePortCallback:onreceived M
//==== SPEC_URL https://developer.tizen.org/help/topic/org.tizen.web.device.apireference/tizen/messageport.html
//==== TEST_CRITERIA CBOA
var t = async_test(document.title), remoteMsgPort, localMsgPort, onReceived, listenerId, messagePortData = [{key: "RESULT",
value: "OK"}], app = tizen.application.getCurrentApplication();
t.step(function () {
onReceived = t.step_func(function (data, remoteMessagePort) {
assert_equals(data.key, messagePortData.key, "Received data should be the same");
assert_equals(data.value, messagePortData.value, "Received data should be the same");
assert_not_equals(remoteMessagePort, null, "remoteMessagePort should be not null}");
localMsgPort.removeMessagePortListener(listenerId);
t.done();
});
localMsgPort = tizen.messageport.requestLocalMessagePort("remoteMsgPort");
remoteMsgPort = tizen.messageport.requestRemoteMessagePort(app.appInfo.id, "remoteMsgPort");
listenerId = localMsgPort.addMessagePortListener(onReceived);
remoteMsgPort.sendMessage(messagePortData, localMsgPort);
});
</script>
</body>
</html>
| {
"content_hash": "96a189085586db84f2781ca2f0ce0b81",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 123,
"avg_line_length": 36.813559322033896,
"alnum_prop": 0.742633517495396,
"repo_name": "JianfengXu/crosswalk-test-suite",
"id": "9358501f6615302d8dc98728a5d6c04bd11881a2",
"size": "2172",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "webapi/tct-messageport-tizen-tests/messageport/MessagePortCallback_onreceived_all.html",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1693"
},
{
"name": "C",
"bytes": "28136"
},
{
"name": "CSS",
"bytes": "401725"
},
{
"name": "CoffeeScript",
"bytes": "18978"
},
{
"name": "Cucumber",
"bytes": "106420"
},
{
"name": "GLSL",
"bytes": "6990"
},
{
"name": "Groff",
"bytes": "12"
},
{
"name": "HTML",
"bytes": "40865855"
},
{
"name": "Java",
"bytes": "879556"
},
{
"name": "JavaScript",
"bytes": "4750576"
},
{
"name": "Logos",
"bytes": "12"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "PHP",
"bytes": "45437"
},
{
"name": "Python",
"bytes": "4108034"
},
{
"name": "Shell",
"bytes": "851074"
}
],
"symlink_target": ""
} |
<?php
/**
*
* @author Alberto 'alb-i986' Scotto
*/
# DB CONFIG
const HOSTNAME = 'localhost';
const DB_TYPE = 'mysql';
const DB_NAME = 'BootstraPHPed_Blog';
const DB_USERNAME = 'root';
const DB_PASSWORD = 'f0t0sckk';
# OTHER
// relative path of the Location of BootstraPHPed
const URL_BASE = '/';
const SYSTEM_NAME = 'BootstraPHPed Blog';
const WEBMASTER_NAME = 'webmaster';
const WEBMASTER_EMAIL = 'webmaster@localhost';
const DEBUG_MODE = true;
const TZ = 'Europe/Rome'; | {
"content_hash": "6de05aef1af2563c18a1b2a49d36ca0d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 49,
"avg_line_length": 19.68,
"alnum_prop": 0.676829268292683,
"repo_name": "alb-i986/BootstraPHPed",
"id": "27b26121b1cfca3ee256c19437215197c9284304",
"size": "492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "public/config.inc.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2283"
},
{
"name": "JavaScript",
"bytes": "18698"
},
{
"name": "PHP",
"bytes": "1943328"
},
{
"name": "XSLT",
"bytes": "28086"
}
],
"symlink_target": ""
} |
package org.apache.commons.math3.ode;
import org.apache.commons.math3.exception.MathIllegalArgumentException;
import org.apache.commons.math3.exception.MathIllegalStateException;
/** This interface represents a second order integrator for
* differential equations.
*
* <p>The classes which are devoted to solve second order differential
* equations should implement this interface. The problems which can
* be handled should implement the {@link
* SecondOrderDifferentialEquations} interface.</p>
*
* @see SecondOrderDifferentialEquations
* @since 1.2
*/
public interface SecondOrderIntegrator extends ODEIntegrator {
/** Integrate the differential equations up to the given time
* @param equations differential equations to integrate
* @param t0 initial time
* @param y0 initial value of the state vector at t0
* @param yDot0 initial value of the first derivative of the state
* vector at t0
* @param t target time for the integration
* (can be set to a value smaller thant <code>t0</code> for backward integration)
* @param y placeholder where to put the state vector at each
* successful step (and hence at the end of integration), can be the
* same object as y0
* @param yDot placeholder where to put the first derivative of
* the state vector at time t, can be the same object as yDot0
* @throws MathIllegalStateException if the integrator cannot perform integration
* @throws MathIllegalArgumentException if integration parameters are wrong (typically
* too small integration span)
*/
void integrate(SecondOrderDifferentialEquations equations,
double t0, double[] y0, double[] yDot0,
double t, double[] y, double[] yDot)
throws MathIllegalStateException, MathIllegalArgumentException;
}
| {
"content_hash": "8b88cb19c656b0439637d261f4c8113e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 88,
"avg_line_length": 40.06666666666667,
"alnum_prop": 0.7509706045479756,
"repo_name": "najibg96/NeuralNetworkSimulator",
"id": "86284353d74d99f431e29b0568ec4ed82237d2c6",
"size": "2605",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "src/org/apache/commons/math3/ode/SecondOrderIntegrator.java",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "8727887"
}
],
"symlink_target": ""
} |
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="a"/>
<xs:element name="b"/>
<xs:element name="c"/>
<xs:element name="d"/>
<xs:element name="e"/>
<xs:group name="Cabc">
<xs:choice>
<xs:element ref="a"/>
<xs:element ref="b"/>
<xs:element ref="c"/>
</xs:choice>
</xs:group>
<xs:group name="Cbcd">
<xs:choice>
<xs:element ref="b"/>
<xs:element ref="c"/>
<xs:element ref="d"/>
</xs:choice>
</xs:group>
<xs:group name="Cbe">
<xs:choice>
<xs:element ref="b"/>
<xs:element ref="e"/>
</xs:choice>
</xs:group>
<xs:group name="CabcPCbcdPCbe">
<xs:sequence>
<xs:group ref="Cabc"/>
<xs:group ref="Cbcd"/>
<xs:group ref="Cbe"/>
</xs:sequence>
</xs:group>
<xs:group name="CbcdPCbe">
<xs:sequence>
<xs:group ref="Cbcd"/>
<xs:group ref="Cbe"/>
</xs:sequence>
</xs:group>
<xs:complexType name="aBCde">
<xs:sequence>
<xs:group ref="CabcPCbcdPCbe"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="Bcde">
<xs:sequence>
<xs:group ref="CbcdPCbe"/>
</xs:sequence>
</xs:complexType>
<xs:complexType name="aBCDE">
<xs:sequence>
<xs:group ref="CabcPCbcdPCbe"/>
<xs:group ref="CbcdPCbe"/>
</xs:sequence>
</xs:complexType>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0034 (unittest.TestCase):
def test_aBCde (self):
instance = aBCde()
self.assertEqual(None, instance.a)
self.assertEqual([], instance.b)
self.assertEqual([], instance.c)
self.assertEqual(None, instance.d)
self.assertEqual(None, instance.e)
def test_Bcde (self):
instance = Bcde()
self.assertEqual([], instance.b)
self.assertEqual(None, instance.c)
self.assertEqual(None, instance.d)
self.assertEqual(None, instance.e)
def test_aBCDE (self):
instance = aBCDE()
self.assertEqual(None, instance.a)
self.assertEqual([], instance.b)
self.assertEqual([], instance.c)
self.assertEqual([], instance.d)
self.assertEqual([], instance.e)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "24ad1f6c6ae46d4917ab93f07d8e8123",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 60,
"avg_line_length": 25.07,
"alnum_prop": 0.5907459114479457,
"repo_name": "jonfoster/pyxb1",
"id": "1fa7177178ff60827682b1376ca2af7b1212daee",
"size": "2507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/trac/test-trac-0034b.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1564427"
},
{
"name": "Shell",
"bytes": "18946"
}
],
"symlink_target": ""
} |
from pritunl/archlinux
## INSTALL YAOURT
RUN pacman -Syu --noconfirm && \
pacman -S base-devel --noconfirm && \
pacman -S --noconfirm git sudo
RUN groupadd -r work && \
useradd -r -g work work && \
echo "work ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
RUN mkdir /tmp/yaourt && \
chown -R work:work /tmp/yaourt
RUN mkdir /home/work && chown work /home/work
USER work
RUN cd /tmp/yaourt && \
git clone https://aur.archlinux.org/package-query.git && \
cd package-query && \
makepkg --noconfirm -si
RUN cd /tmp/yaourt && \
git clone https://aur.archlinux.org/yaourt.git && \
cd yaourt && \
makepkg --noconfirm -si
USER root
RUN alias 'yaourt=sudo -i -u work yaourt'
COPY din /din
| {
"content_hash": "84a6944ad1006cdd5c3a5f205fbabdf9",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 62,
"avg_line_length": 21.441176470588236,
"alnum_prop": 0.6378600823045267,
"repo_name": "Alotor/din",
"id": "f37ae12a51010416a44e53e80974a3fbb4935c9c",
"size": "729",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "base/Dockerfile",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Shell",
"bytes": "6790"
}
],
"symlink_target": ""
} |
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>colour.models.dataset.rec_2020 Module — Colour 0.3.1 documentation</title>
<link rel="stylesheet" href="_static/basic.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<link rel="stylesheet" href="_static/bootswatch-3.1.0/colour/bootstrap.min.css" type="text/css" />
<link rel="stylesheet" href="_static/bootstrap-sphinx.css" type="text/css" />
<link rel="stylesheet" href="_static/styles.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: './',
VERSION: '0.3.1',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
};
</script>
<script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script type="text/javascript" src="_static/js/jquery-1.11.0.min.js"></script>
<script type="text/javascript" src="_static/js/jquery-fix.js"></script>
<script type="text/javascript" src="_static/bootstrap-3.1.0/js/bootstrap.min.js"></script>
<script type="text/javascript" src="_static/bootstrap-sphinx.js"></script>
<link rel="top" title="Colour 0.3.1 documentation" href="index.html" />
<link rel="up" title="colour.models.dataset Package" href="colour.models.dataset.html" />
<link rel="next" title="colour.models.dataset.rec_709 Module" href="colour.models.dataset.rec_709.html" />
<link rel="prev" title="colour.models.dataset.prophoto_rgb Module" href="colour.models.dataset.prophoto_rgb.html" />
<meta charset='utf-8'>
<meta http-equiv='X-UA-Compatible' content='IE=edge,chrome=1'>
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1'>
<meta name="apple-mobile-web-app-capable" content="yes">
</head>
<body>
<div id="navbar" class="navbar navbar-default navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<!-- .btn-navbar is used as the toggle for collapsed navbar content -->
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".nav-collapse">
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="index.html"><img src="_static/Colour_Logo_Icon_001.png">
Colour 0.3</a>
<!--<span class="navbar-text navbar-version pull-left"><b>0.3</b></span>-->
</div>
<div class="collapse navbar-collapse nav-collapse">
<ul class="nav navbar-nav">
<li class="divider-vertical"></li>
<li><a href="http://colour-science.org">colour-science.org</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown"><i class="fa fa-life-ring"> Documentation</i><b class="caret"></b></a>
<ul class="dropdown-menu">
<li>
<a href="api.html" class="fa fa-life-ring"> API Reference</a>
</li>
<li>
<a href="http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/colour.ipynb', True)" class="fa fa-book"> IPython Notebooks</a>
</li>
<li>
<a href="http://colour-science.org/features.php" class="fa fa-lightbulb-o"> Features</a>
</li>
<li>
<a href="http://colour-science.org/contributing.php"><span class="fa fa-gears"> Contributing</span></a>
</li>
</ul>
</li>
<li>
<a href="colour.models.dataset.prophoto_rgb.html" title="Previous Chapter: colour.models.dataset.prophoto_rgb Module"><span class="glyphicon glyphicon-chevron-left visible-sm"></span><span class="hidden-sm">« colour.models.da...</span>
</a>
</li>
<li>
<a href="colour.models.dataset.rec_709.html" title="Next Chapter: colour.models.dataset.rec_709 Module"><span class="glyphicon glyphicon-chevron-right visible-sm"></span><span class="hidden-sm">colour.models.da... »</span>
</a>
</li>
</ul>
<form class="navbar-form navbar-right" action="search.html" method="get">
<div class="form-group">
<input type="text" name="q" class="form-control" placeholder="Search" />
</div>
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
</div>
<div class="container">
<div class="row">
<div class="col-md-12">
<div class="section" id="module-colour.models.dataset.rec_2020">
<span id="colour-models-dataset-rec-2020-module"></span><h1>colour.models.dataset.rec_2020 Module<a class="headerlink" href="#module-colour.models.dataset.rec_2020" title="Permalink to this headline">¶</a></h1>
<div class="section" id="rec-2020-colourspace">
<h2>Rec. 2020 Colourspace<a class="headerlink" href="#rec-2020-colourspace" title="Permalink to this headline">¶</a></h2>
<p>Defines the <em>Rec. 2020</em> colourspace:</p>
<ul class="simple">
<li><a class="reference internal" href="#colour.models.dataset.rec_2020.REC_2020_COLOURSPACE" title="colour.models.dataset.rec_2020.REC_2020_COLOURSPACE"><tt class="xref py py-attr docutils literal"><span class="pre">REC_2020_COLOURSPACE</span></tt></a>.</li>
</ul>
<div class="admonition seealso">
<p class="first admonition-title">See also</p>
<p class="last"><a class="reference external" href="http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb">RGB Colourspaces IPython Notebook</a></p>
</div>
<p class="rubric">References</p>
<table class="docutils footnote" frame="void" id="id1" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label">[1]</td><td><a class="reference external" href="http://www.itu.int/dms_pubrec/itu-r/rec/bt/R-REC-BT.2020-1-201406-I!!PDF-E.pdf">Recommendation ITU-R BT.2020 - Parameter values for ultra-high
definition television systems for production and international
programme exchange</a>
(Last accessed 2 September 2014)</td></tr>
</tbody>
</table>
<dl class="data">
<dt id="colour.models.dataset.rec_2020.REC_2020_PRIMARIES">
<tt class="descclassname">colour.models.dataset.rec_2020.</tt><tt class="descname">REC_2020_PRIMARIES</tt><em class="property"> = array([[ 0.708, 0.292], [ 0.17 , 0.797], [ 0.131, 0.046]])</em><a class="headerlink" href="#colour.models.dataset.rec_2020.REC_2020_PRIMARIES" title="Permalink to this definition">¶</a></dt>
<dd><p><em>Rec. 2020</em> colourspace primaries.</p>
<p>REC_2020_PRIMARIES : ndarray, (3, 2)</p>
</dd></dl>
<dl class="data">
<dt id="colour.models.dataset.rec_2020.REC_2020_WHITEPOINT">
<tt class="descclassname">colour.models.dataset.rec_2020.</tt><tt class="descname">REC_2020_WHITEPOINT</tt><em class="property"> = (0.31271, 0.32902)</em><a class="headerlink" href="#colour.models.dataset.rec_2020.REC_2020_WHITEPOINT" title="Permalink to this definition">¶</a></dt>
<dd><p><em>Rec. 2020</em> colourspace whitepoint.</p>
<p>REC_2020_WHITEPOINT : tuple</p>
</dd></dl>
<dl class="data">
<dt id="colour.models.dataset.rec_2020.REC_2020_TO_XYZ_MATRIX">
<tt class="descclassname">colour.models.dataset.rec_2020.</tt><tt class="descname">REC_2020_TO_XYZ_MATRIX</tt><em class="property"> = array([[ 6.36953507e-01, 1.44619185e-01, 1.68855854e-01], [ 2.62698339e-01, 6.78008766e-01, 5.92928953e-02], [ 4.99407097e-17, 2.80731358e-02, 1.06082723e+00]])</em><a class="headerlink" href="#colour.models.dataset.rec_2020.REC_2020_TO_XYZ_MATRIX" title="Permalink to this definition">¶</a></dt>
<dd><p><em>Rec. 2020</em> colourspace to <em>CIE XYZ</em> colourspace matrix.</p>
<p>REC_2020_TO_XYZ_MATRIX : array_like, (3, 3)</p>
</dd></dl>
<dl class="data">
<dt id="colour.models.dataset.rec_2020.XYZ_TO_REC_2020_MATRIX">
<tt class="descclassname">colour.models.dataset.rec_2020.</tt><tt class="descname">XYZ_TO_REC_2020_MATRIX</tt><em class="property"> = array([[ 1.71666343, -0.35567332, -0.25336809], [-0.66667384, 1.61645574, 0.0157683 ], [ 0.01764248, -0.04277698, 0.94224328]])</em><a class="headerlink" href="#colour.models.dataset.rec_2020.XYZ_TO_REC_2020_MATRIX" title="Permalink to this definition">¶</a></dt>
<dd><p><em>CIE XYZ</em> colourspace to <em>Rec. 2020</em> colourspace matrix.</p>
<p>XYZ_TO_REC_2020_MATRIX : array_like, (3, 3)</p>
</dd></dl>
<dl class="data">
<dt id="colour.models.dataset.rec_2020.REC_2020_CONSTANTS">
<tt class="descclassname">colour.models.dataset.rec_2020.</tt><tt class="descname">REC_2020_CONSTANTS</tt><em class="property"> = {'alpha': <function <lambda> at 0x102e23578>, 'beta': <function <lambda> at 0x102e23848>}</em><a class="headerlink" href="#colour.models.dataset.rec_2020.REC_2020_CONSTANTS" title="Permalink to this definition">¶</a></dt>
<dd><p><em>CIE XYZ</em> constants.</p>
<p>REC_2020_CONSTANTS : Structure</p>
</dd></dl>
<dl class="function">
<dt id="colour.models.dataset.rec_2020.REC_2020_TRANSFER_FUNCTION">
<tt class="descclassname">colour.models.dataset.rec_2020.</tt><tt class="descname">REC_2020_TRANSFER_FUNCTION</tt><big>(</big><em>value</em>, <em>is_10_bits_system=True</em><big>)</big><a class="headerlink" href="#colour.models.dataset.rec_2020.REC_2020_TRANSFER_FUNCTION" title="Permalink to this definition">¶</a></dt>
<dd><p>Transfer function from linear to <em>Rec. 2020</em> colourspace.</p>
<p>REC_2020_TRANSFER_FUNCTION : object</p>
</dd></dl>
<dl class="function">
<dt id="colour.models.dataset.rec_2020.REC_2020_INVERSE_TRANSFER_FUNCTION">
<tt class="descclassname">colour.models.dataset.rec_2020.</tt><tt class="descname">REC_2020_INVERSE_TRANSFER_FUNCTION</tt><big>(</big><em>value</em>, <em>is_10_bits_system=True</em><big>)</big><a class="headerlink" href="#colour.models.dataset.rec_2020.REC_2020_INVERSE_TRANSFER_FUNCTION" title="Permalink to this definition">¶</a></dt>
<dd><p>Inverse transfer function from <em>Rec. 2020</em> colourspace to linear.</p>
<p>REC_2020_INVERSE_TRANSFER_FUNCTION : object</p>
</dd></dl>
<dl class="data">
<dt id="colour.models.dataset.rec_2020.REC_2020_COLOURSPACE">
<tt class="descclassname">colour.models.dataset.rec_2020.</tt><tt class="descname">REC_2020_COLOURSPACE</tt><em class="property"> = <colour.models.rgb_colourspace.RGB_Colourspace object at 0x102e29050></em><a class="headerlink" href="#colour.models.dataset.rec_2020.REC_2020_COLOURSPACE" title="Permalink to this definition">¶</a></dt>
<dd><p><em>Rec. 2020</em> colourspace.</p>
<p>REC_2020_COLOURSPACE : RGB_Colourspace</p>
</dd></dl>
</div>
</div>
</div>
</div>
</div>
<footer class="footer">
<div class="container">
<p class="pull-right">
<a href="#">Back to top</a>
</p>
<p>
© Copyright 2013 - 2014, Colour Developers.<br/>
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> 1.2.2.<br/>
</p>
</div>
</footer>
</body>
</html> | {
"content_hash": "b6698dd22eefabda8510f54fb45a8432",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 459,
"avg_line_length": 54.30875576036866,
"alnum_prop": 0.6464997878659313,
"repo_name": "colour-science/colour-website",
"id": "0ccb59488b349405fe515950502fd8257a66c55c",
"size": "11795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/0.3.2/html/colour.models.dataset.rec_2020.html",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "44"
},
{
"name": "CSS",
"bytes": "585578"
},
{
"name": "HTML",
"bytes": "165359615"
},
{
"name": "JavaScript",
"bytes": "358954"
},
{
"name": "Makefile",
"bytes": "1224"
},
{
"name": "PHP",
"bytes": "213935"
},
{
"name": "Python",
"bytes": "8858"
},
{
"name": "Ruby",
"bytes": "470"
}
],
"symlink_target": ""
} |
<?php
namespace component\request;
use component\request;
class _default implements request
{
private $_pathinfo;
private $_uri = null;
private $_query = null;
public function __construct()
{
if(strpos($_SERVER['REQUEST_URI'], '?')!==false) {
list($uri, $query) = explode('?', $_SERVER['REQUEST_URI']);
parse_str($query, $this->_query);
} else {
$uri = $_SERVER['REQUEST_URI'];
}
$this->_pathinfo = pathinfo($uri);
$this->_uri = $uri;
}
public function getMethod()
{
return (isset($_SERVER['HTTP_X_REQUESTED_WITH']) and $_SERVER['HTTP_X_REQUESTED_WITH'] === 'XMLHttpRequest') ? 'AJAX' : $_SERVER['REQUEST_METHOD'];
}
public function getUri()
{
return $this->_uri;
}
public function getScheme()
{
return isset($_SERVER['HTTP_X_FORWARDED_PROTO']) ? $_SERVER['HTTP_X_FORWARDED_PROTO'] : 'http';
}
public function getHost()
{
return $_SERVER['HTTP_HOST'];
}
public function getPort()
{
return $_SERVER['SERVER_PORT'];
}
public function getPath()
{
return $this->_pathinfo['dirname'];
}
public function getBasename()
{
return $this->_pathinfo['basename'];
}
public function getFilename()
{
return $this->_pathinfo['filename'];
}
public function getExtension()
{
return isset($this->_pathinfo['extension']) ? $this->_pathinfo['extension'] : null;
}
public function getQueryString()
{
return $_SERVER['QUERY_STRING'];
}
public function getQuery($name=null, $default=null)
{
return isset($this->_query[$name]) ? $this->_query[$name] : $default;
}
public function getClientIp()
{
if(isset($_SERVER['HTTP_CLIENT_IP'])) {
return $_SERVER['HTTP_CLIENT_IP'];
} elseif(isset($_SERVER['HTTP_X_FORWARDED_FOR'])) {
return $_SERVER['HTTP_X_FORWARDED_FOR'];
} elseif(isset($_SERVER['REMOTE_ADDR'])) {
return $_SERVER['REMOTE_ADDR'];
} else{
return null;
}
}
public function getUser()
{
return isset($_SERVER['PHP_AUTH_USER']) ? $_SERVER['PHP_AUTH_USER'] : null;
}
public function getPassword()
{
return isset($_SERVER['PHP_AUTH_PW']) ? $_SERVER['PHP_AUTH_PW'] : null;
}
public function getReferer()
{
return isset($_SERVER['HTTP_REFERER']) ? $_SERVER['HTTP_REFERER'] : null;
}
public function getUserAgent()
{
return $_SERVER['HTTP_USER_AGENT'];
}
public function __get($name)
{
$method = 'get'.$name;
return method_exists(__CLASS__, $method) ? $this->$method() : null;
}
}
| {
"content_hash": "12b8852ad81064b7eef4e24dcb21e180",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 149,
"avg_line_length": 21.801724137931036,
"alnum_prop": 0.6014234875444839,
"repo_name": "bluefin-framework/bluefin",
"id": "bbb02beb70e9819f52f22633d05c26a247553361",
"size": "2529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluefin/component/request/_default.php",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "33640"
}
],
"symlink_target": ""
} |
layout: default
description: JavaScript API to manage ArangoSearch Analyzers with arangosh and Foxx
title: ArangoSearch Analyzer JS API
---
Analyzer Management
===================
The JavaScript API can be accessed via the `@arangodb/analyzers` module from
both server-side and client-side code (arangosh, Foxx):
```js
var analyzers = require("@arangodb/analyzers");
```
See [Analyzers](analyzers.html) for general information and
details about the attributes.
Analyzer Module Methods
-----------------------
### Create an Analyzer
```js
var analyzer = analyzers.save(<name>, <type>[, <properties>[, <features>]])
```
Create a new Analyzer with custom configuration in the current database.
- **name** (string): name for identifying the Analyzer later
- **type** (string): the kind of Analyzer to create
- **properties** (object, _optional_): settings specific to the chosen *type*.
Most types require at least one property, so this may not be optional
- **features** (array, _optional_): array of strings with names of the features
to enable
- returns **analyzer** (object): Analyzer object, also if an Analyzer with the
same settings exists already. An error is raised if the settings mismatch
or if they are invalid
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline analyzerCreate
@EXAMPLE_ARANGOSH_OUTPUT{analyzerCreate}
var analyzers = require("@arangodb/analyzers");
analyzers.save("csv", "delimiter", { "delimiter": "," }, ["frequency", "norm", "position"]);
~analyzers.remove("csv");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock analyzerCreate
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
### Get an Analyzer
```js
var analyzer = analyzers.analyzer(<name>)
```
Get an Analyzer by the name, stored in the current database. The name can be
prefixed with `_system::` to access Analyzers stored in the `_system` database.
- **name** (string): name of the Analyzer to find
- returns **analyzer** (object\|null): Analyzer object if found, else `null`
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline analyzerByName
@EXAMPLE_ARANGOSH_OUTPUT{analyzerByName}
var analyzers = require("@arangodb/analyzers");
analyzers.analyzer("text_en");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock analyzerByName
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
### List all Analyzers
```js
var analyzerArray = analyzers.toArray()
```
List all Analyzers available in the current database.
- returns **analyzerArray** (array): array of Analyzer objects
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline analyzerList
@EXAMPLE_ARANGOSH_OUTPUT{analyzerList}
var analyzers = require("@arangodb/analyzers");
analyzers.toArray();
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock analyzerList
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
### Remove an Analyzer
```js
analyzers.remove(<name> [, <force>])
```
Delete an Analyzer from the current database.
- **name** (string): name of the Analyzer to remove
- **force** (bool, _optional_): remove Analyzer even if in use by a View.
Default: `false`
- returns nothing: no return value on success, otherwise an error is raised
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline analyzerRemove
@EXAMPLE_ARANGOSH_OUTPUT{analyzerRemove}
var analyzers = require("@arangodb/analyzers");
~analyzers.save("csv", "delimiter", { "delimiter": "," }, []);
analyzers.remove("csv");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock analyzerRemove
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
Analyzer Object Methods
-----------------------
Individual Analyzer objects expose getter accessors for the aforementioned
definition attributes (see [Create an Analyzer](#create-an-analyzer)).
### Get Analyzer Name
```js
var name = analyzer.name()
```
- returns **name** (string): name of the Analyzer
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline analyzerName
@EXAMPLE_ARANGOSH_OUTPUT{analyzerName}
var analyzers = require("@arangodb/analyzers");
analyzers.analyzer("text_en").name();
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock analyzerName
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
### Get Analyzer Type
```js
var type = analyzer.type()
```
- returns **type** (string): type of the Analyzer
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline analyzerType
@EXAMPLE_ARANGOSH_OUTPUT{analyzerType}
var analyzers = require("@arangodb/analyzers");
analyzers.analyzer("text_en").type();
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock analyzerType
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
### Get Analyzer Properties
```js
var properties = analyzer.properties()
```
- returns **properties** (object): *type* dependent properties of the Analyzer
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline analyzerProperties
@EXAMPLE_ARANGOSH_OUTPUT{analyzerProperties}
var analyzers = require("@arangodb/analyzers");
analyzers.analyzer("text_en").properties();
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock analyzerProperties
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
### Get Analyzer Features
```js
var features = analyzer.features()
```
- returns **features** (array): array of strings with the features of the Analyzer
{% arangoshexample examplevar="examplevar" script="script" result="result" %}
@startDocuBlockInline analyzerFeatures
@EXAMPLE_ARANGOSH_OUTPUT{analyzerFeatures}
var analyzers = require("@arangodb/analyzers");
analyzers.analyzer("text_en").features();
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock analyzerFeatures
{% endarangoshexample %}
{% include arangoshexample.html id=examplevar script=script result=result %}
| {
"content_hash": "1d0640c8c6190ac7af8ed36f1ef49e79",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 96,
"avg_line_length": 33.369791666666664,
"alnum_prop": 0.7277977212423912,
"repo_name": "arangodb/docs",
"id": "dd8eabe9a044554042fea939a2cae5ba9d5fa84b",
"size": "6411",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "3.9/appendix-java-script-modules-analyzers.md",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111161"
},
{
"name": "Dockerfile",
"bytes": "606"
},
{
"name": "HTML",
"bytes": "20095"
},
{
"name": "JavaScript",
"bytes": "7194"
},
{
"name": "Ruby",
"bytes": "60609"
},
{
"name": "Shell",
"bytes": "617"
}
],
"symlink_target": ""
} |
namespace NLog.Targets.Wrappers
{
using System;
using System.ComponentModel;
using System.Threading;
using Common;
using Internal;
using System.Collections.Generic;
/// <summary>
/// Provides asynchronous, buffered execution of target writes.
/// </summary>
/// <seealso href="https://github.com/nlog/nlog/wiki/AsyncWrapper-target">Documentation on NLog Wiki</seealso>
/// <remarks>
/// <p>
/// Asynchronous target wrapper allows the logger code to execute more quickly, by queueing
/// messages and processing them in a separate thread. You should wrap targets
/// that spend a non-trivial amount of time in their Write() method with asynchronous
/// target to speed up logging.
/// </p>
/// <p>
/// Because asynchronous logging is quite a common scenario, NLog supports a
/// shorthand notation for wrapping all targets with AsyncWrapper. Just add async="true" to
/// the <targets/> element in the configuration file.
/// </p>
/// <code lang="XML">
/// <![CDATA[
/// <targets async="true">
/// ... your targets go here ...
/// </targets>
/// ]]></code>
/// </remarks>
/// <example>
/// <p>
/// To set up the target in the <a href="config.html">configuration file</a>,
/// use the following syntax:
/// </p>
/// <code lang="XML" source="examples/targets/Configuration File/AsyncWrapper/NLog.config" />
/// <p>
/// The above examples assume just one target and a single rule. See below for
/// a programmatic configuration that's equivalent to the above config file:
/// </p>
/// <code lang="C#" source="examples/targets/Configuration API/AsyncWrapper/Wrapping File/Example.cs" />
/// </example>
[Target("AsyncWrapper", IsWrapper = true)]
public class AsyncTargetWrapper : WrapperTargetBase
{
private readonly object lockObject = new object();
private Timer lazyWriterTimer;
private readonly Queue<AsyncContinuation> flushAllContinuations = new Queue<AsyncContinuation>();
private readonly object continuationQueueLock = new object();
/// <summary>
/// Initializes a new instance of the <see cref="AsyncTargetWrapper" /> class.
/// </summary>
public AsyncTargetWrapper()
: this(null)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="AsyncTargetWrapper" /> class.
/// </summary>
/// <param name="name">Name of the target.</param>
/// <param name="wrappedTarget">The wrapped target.</param>
public AsyncTargetWrapper(string name, Target wrappedTarget)
: this(wrappedTarget)
{
this.Name = name;
}
/// <summary>
/// Initializes a new instance of the <see cref="AsyncTargetWrapper" /> class.
/// </summary>
/// <param name="wrappedTarget">The wrapped target.</param>
public AsyncTargetWrapper(Target wrappedTarget)
: this(wrappedTarget, 10000, AsyncTargetWrapperOverflowAction.Discard)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="AsyncTargetWrapper" /> class.
/// </summary>
/// <param name="wrappedTarget">The wrapped target.</param>
/// <param name="queueLimit">Maximum number of requests in the queue.</param>
/// <param name="overflowAction">The action to be taken when the queue overflows.</param>
public AsyncTargetWrapper(Target wrappedTarget, int queueLimit, AsyncTargetWrapperOverflowAction overflowAction)
{
this.RequestQueue = new AsyncRequestQueue(10000, AsyncTargetWrapperOverflowAction.Discard);
this.TimeToSleepBetweenBatches = 50;
this.BatchSize = 100;
this.WrappedTarget = wrappedTarget;
this.QueueLimit = queueLimit;
this.OverflowAction = overflowAction;
}
/// <summary>
/// Gets or sets the number of log events that should be processed in a batch
/// by the lazy writer thread.
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue(100)]
public int BatchSize { get; set; }
/// <summary>
/// Gets or sets the time in milliseconds to sleep between batches.
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue(50)]
public int TimeToSleepBetweenBatches { get; set; }
/// <summary>
/// Gets or sets the action to be taken when the lazy writer thread request queue count
/// exceeds the set limit.
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue("Discard")]
public AsyncTargetWrapperOverflowAction OverflowAction
{
get { return this.RequestQueue.OnOverflow; }
set { this.RequestQueue.OnOverflow = value; }
}
/// <summary>
/// Gets or sets the limit on the number of requests in the lazy writer thread request queue.
/// </summary>
/// <docgen category='Buffering Options' order='100' />
[DefaultValue(10000)]
public int QueueLimit
{
get { return this.RequestQueue.RequestLimit; }
set { this.RequestQueue.RequestLimit = value; }
}
/// <summary>
/// Gets the queue of lazy writer thread requests.
/// </summary>
internal AsyncRequestQueue RequestQueue { get; private set; }
/// <summary>
/// Waits for the lazy writer thread to finish writing messages.
/// </summary>
/// <param name="asyncContinuation">The asynchronous continuation.</param>
protected override void FlushAsync(AsyncContinuation asyncContinuation)
{
lock (continuationQueueLock)
{
this.flushAllContinuations.Enqueue(asyncContinuation);
}
}
/// <summary>
/// Initializes the target by starting the lazy writer timer.
/// </summary>
protected override void InitializeTarget()
{
if (this.TimeToSleepBetweenBatches <= 0) {
throw new NLogConfigurationException("The AysncTargetWrapper\'s TimeToSleepBetweenBatches property must be > 0");
}
base.InitializeTarget();
this.RequestQueue.Clear();
InternalLogger.Trace("AsyncWrapper '{0}': start timer", Name);
this.lazyWriterTimer = new Timer(this.ProcessPendingEvents, null, Timeout.Infinite, Timeout.Infinite);
this.StartLazyWriterTimer();
}
/// <summary>
/// Shuts down the lazy writer timer.
/// </summary>
protected override void CloseTarget()
{
this.StopLazyWriterThread();
if (this.RequestQueue.RequestCount > 0)
{
ProcessPendingEvents(null);
}
base.CloseTarget();
}
/// <summary>
/// Starts the lazy writer thread which periodically writes
/// queued log messages.
/// </summary>
protected virtual void StartLazyWriterTimer()
{
lock (this.lockObject)
{
if (this.lazyWriterTimer != null)
{
this.lazyWriterTimer.Change(this.TimeToSleepBetweenBatches, Timeout.Infinite);
}
}
}
/// <summary>
/// Stops the lazy writer thread.
/// </summary>
protected virtual void StopLazyWriterThread()
{
lock (this.lockObject)
{
if (this.lazyWriterTimer != null)
{
this.lazyWriterTimer.Change(Timeout.Infinite, Timeout.Infinite);
this.lazyWriterTimer = null;
}
}
}
/// <summary>
/// Adds the log event to asynchronous queue to be processed by
/// the lazy writer thread.
/// </summary>
/// <param name="logEvent">The log event.</param>
/// <remarks>
/// The <see cref="Target.PrecalculateVolatileLayouts"/> is called
/// to ensure that the log event can be processed in another thread.
/// </remarks>
protected override void Write(AsyncLogEventInfo logEvent)
{
this.MergeEventProperties(logEvent.LogEvent);
this.PrecalculateVolatileLayouts(logEvent.LogEvent);
this.RequestQueue.Enqueue(logEvent);
}
private void ProcessPendingEvents(object state)
{
AsyncContinuation[] continuations;
lock (this.continuationQueueLock)
{
continuations = this.flushAllContinuations.Count > 0
? this.flushAllContinuations.ToArray()
: new AsyncContinuation[] { null };
this.flushAllContinuations.Clear();
}
try
{
if (this.WrappedTarget == null)
{
InternalLogger.Error("AsyncWrapper '{0}': WrappedTarget is NULL", Name);
return;
}
foreach (var continuation in continuations)
{
int count = this.BatchSize;
if (continuation != null)
{
count = this.RequestQueue.RequestCount;
}
InternalLogger.Trace("AsyncWrapper '{0}': Flushing {1} events.", Name, count);
if (this.RequestQueue.RequestCount == 0)
{
if (continuation != null)
{
continuation(null);
}
}
AsyncLogEventInfo[] logEventInfos = this.RequestQueue.DequeueBatch(count);
if (continuation != null)
{
// write all events, then flush, then call the continuation
this.WrappedTarget.WriteAsyncLogEvents(logEventInfos, ex => this.WrappedTarget.Flush(continuation));
}
else
{
// just write all events
this.WrappedTarget.WriteAsyncLogEvents(logEventInfos);
}
}
}
catch (Exception exception)
{
InternalLogger.Error(exception, "AsyncWrapper '{0}': Error in lazy writer timer procedure.", Name);
if (exception.MustBeRethrown())
{
throw;
}
}
finally
{
this.StartLazyWriterTimer();
}
}
}
} | {
"content_hash": "25672c87fb91e02e8d9f967ab184f1b4",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 129,
"avg_line_length": 37.4,
"alnum_prop": 0.555515272364724,
"repo_name": "kevindaub/NLog",
"id": "63c5e2d63f279accde2765a4824fe29fcbfb3ece",
"size": "12699",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "src/NLog/Targets/Wrappers/AsyncTargetWrapper.cs",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "2613"
},
{
"name": "Batchfile",
"bytes": "521"
},
{
"name": "C#",
"bytes": "4024662"
},
{
"name": "Makefile",
"bytes": "3639"
},
{
"name": "Perl",
"bytes": "1590"
},
{
"name": "PowerShell",
"bytes": "1596"
}
],
"symlink_target": ""
} |
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <acpi/apei.h>
#define PREFIX "ACPI: "
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_root");
#define ACPI_PCI_ROOT_CLASS "pci_bridge"
#define ACPI_PCI_ROOT_DEVICE_NAME "PCI Root Bridge"
static int acpi_pci_root_add(struct acpi_device *device);
static int acpi_pci_root_remove(struct acpi_device *device, int type);
static int acpi_pci_root_start(struct acpi_device *device);
#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
| OSC_ACTIVE_STATE_PWR_SUPPORT \
| OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
| OSC_MSI_SUPPORT)
static const struct acpi_device_id root_device_ids[] = {
{"PNP0A03", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, root_device_ids);
static struct acpi_driver acpi_pci_root_driver = {
.name = "pci_root",
.class = ACPI_PCI_ROOT_CLASS,
.ids = root_device_ids,
.ops = {
.add = acpi_pci_root_add,
.remove = acpi_pci_root_remove,
.start = acpi_pci_root_start,
},
};
static LIST_HEAD(acpi_pci_roots);
static struct acpi_pci_driver *sub_driver;
static DEFINE_MUTEX(osc_lock);
int acpi_pci_register_driver(struct acpi_pci_driver *driver)
{
int n = 0;
struct acpi_pci_root *root;
struct acpi_pci_driver **pptr = &sub_driver;
while (*pptr)
pptr = &(*pptr)->next;
*pptr = driver;
if (!driver->add)
return 0;
list_for_each_entry(root, &acpi_pci_roots, node) {
driver->add(root->device->handle);
n++;
}
return n;
}
EXPORT_SYMBOL(acpi_pci_register_driver);
void acpi_pci_unregister_driver(struct acpi_pci_driver *driver)
{
struct acpi_pci_root *root;
struct acpi_pci_driver **pptr = &sub_driver;
while (*pptr) {
if (*pptr == driver)
break;
pptr = &(*pptr)->next;
}
BUG_ON(!*pptr);
*pptr = (*pptr)->next;
if (!driver->remove)
return;
list_for_each_entry(root, &acpi_pci_roots, node)
driver->remove(root->device->handle);
}
EXPORT_SYMBOL(acpi_pci_unregister_driver);
acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
{
struct acpi_pci_root *root;
list_for_each_entry(root, &acpi_pci_roots, node)
if ((root->segment == (u16) seg) &&
(root->secondary.start == (u16) bus))
return root->device->handle;
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
/**
* acpi_is_root_bridge - determine whether an ACPI CA node is a PCI root bridge
* @handle - the ACPI CA node in question.
*
* Note: we could make this API take a struct acpi_device * instead, but
* for now, it's more convenient to operate on an acpi_handle.
*/
int acpi_is_root_bridge(acpi_handle handle)
{
int ret;
struct acpi_device *device;
ret = acpi_bus_get_device(handle, &device);
if (ret)
return 0;
ret = acpi_match_device_ids(device, root_device_ids);
if (ret)
return 0;
else
return 1;
}
EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
static acpi_status
get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
{
struct resource *res = data;
struct acpi_resource_address64 address;
if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
resource->type != ACPI_RESOURCE_TYPE_ADDRESS32 &&
resource->type != ACPI_RESOURCE_TYPE_ADDRESS64)
return AE_OK;
acpi_resource_to_address64(resource, &address);
if ((address.address_length > 0) &&
(address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
res->start = address.minimum;
res->end = address.minimum + address.address_length - 1;
}
return AE_OK;
}
static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
struct resource *res)
{
acpi_status status;
res->start = -1;
status =
acpi_walk_resources(handle, METHOD_NAME__CRS,
get_root_bridge_busnr_callback, res);
if (ACPI_FAILURE(status))
return status;
if (res->start == -1)
return AE_ERROR;
return AE_OK;
}
static void acpi_pci_bridge_scan(struct acpi_device *device)
{
int status;
struct acpi_device *child = NULL;
if (device->flags.bus_address)
if (device->parent && device->parent->ops.bind) {
status = device->parent->ops.bind(device);
if (!status) {
list_for_each_entry(child, &device->children, node)
acpi_pci_bridge_scan(child);
}
}
}
static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
static acpi_status acpi_pci_run_osc(acpi_handle handle,
const u32 *capbuf, u32 *retval)
{
struct acpi_osc_context context = {
.uuid_str = pci_osc_uuid_str,
.rev = 1,
.cap.length = 12,
.cap.pointer = (void *)capbuf,
};
acpi_status status;
status = acpi_run_osc(handle, &context);
if (ACPI_SUCCESS(status)) {
*retval = *((u32 *)(context.ret.pointer + 8));
kfree(context.ret.pointer);
}
return status;
}
static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
u32 support,
u32 *control)
{
acpi_status status;
u32 result, capbuf[3];
support &= OSC_PCI_SUPPORT_MASKS;
support |= root->osc_support_set;
capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_TYPE] = support;
if (control) {
*control &= OSC_PCI_CONTROL_MASKS;
capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
} else {
/* Run _OSC query for all possible controls. */
capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
}
status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
if (ACPI_SUCCESS(status)) {
root->osc_support_set = support;
if (control)
*control = result;
}
return status;
}
static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
{
acpi_status status;
acpi_handle tmp;
status = acpi_get_handle(root->device->handle, "_OSC", &tmp);
if (ACPI_FAILURE(status))
return status;
mutex_lock(&osc_lock);
status = acpi_pci_query_osc(root, flags, NULL);
mutex_unlock(&osc_lock);
return status;
}
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
{
struct acpi_pci_root *root;
list_for_each_entry(root, &acpi_pci_roots, node) {
if (root->device->handle == handle)
return root;
}
return NULL;
}
EXPORT_SYMBOL_GPL(acpi_pci_find_root);
struct acpi_handle_node {
struct list_head node;
acpi_handle handle;
};
/**
* acpi_get_pci_dev - convert ACPI CA handle to struct pci_dev
* @handle: the handle in question
*
* Given an ACPI CA handle, the desired PCI device is located in the
* list of PCI devices.
*
* If the device is found, its reference count is increased and this
* function returns a pointer to its data structure. The caller must
* decrement the reference count by calling pci_dev_put().
* If no device is found, %NULL is returned.
*/
struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
{
int dev, fn;
unsigned long long adr;
acpi_status status;
acpi_handle phandle;
struct pci_bus *pbus;
struct pci_dev *pdev = NULL;
struct acpi_handle_node *node, *tmp;
struct acpi_pci_root *root;
LIST_HEAD(device_list);
/*
* Walk up the ACPI CA namespace until we reach a PCI root bridge.
*/
phandle = handle;
while (!acpi_is_root_bridge(phandle)) {
node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL);
if (!node)
goto out;
INIT_LIST_HEAD(&node->node);
node->handle = phandle;
list_add(&node->node, &device_list);
status = acpi_get_parent(phandle, &phandle);
if (ACPI_FAILURE(status))
goto out;
}
root = acpi_pci_find_root(phandle);
if (!root)
goto out;
pbus = root->bus;
/*
* Now, walk back down the PCI device tree until we return to our
* original handle. Assumes that everything between the PCI root
* bridge and the device we're looking for must be a P2P bridge.
*/
list_for_each_entry(node, &device_list, node) {
acpi_handle hnd = node->handle;
status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status))
goto out;
dev = (adr >> 16) & 0xffff;
fn = adr & 0xffff;
pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
if (!pdev || hnd == handle)
break;
pbus = pdev->subordinate;
pci_dev_put(pdev);
/*
* This function may be called for a non-PCI device that has a
* PCI parent (eg. a disk under a PCI SATA controller). In that
* case pdev->subordinate will be NULL for the parent.
*/
if (!pbus) {
dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n");
pdev = NULL;
break;
}
}
out:
list_for_each_entry_safe(node, tmp, &device_list, node)
kfree(node);
return pdev;
}
EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
/**
* acpi_pci_osc_control_set - Request control of PCI root _OSC features.
* @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
* @mask: Mask of _OSC bits to request control of, place to store control mask.
* @req: Mask of _OSC bits the control of is essential to the caller.
*
* Run _OSC query for @mask and if that is successful, compare the returned
* mask of control bits with @req. If all of the @req bits are set in the
* returned mask, run _OSC request for it.
*
* The variable at the @mask address may be modified regardless of whether or
* not the function returns success. On success it will contain the mask of
* _OSC bits the BIOS has granted control of, but its contents are meaningless
* on failure.
**/
acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
{
struct acpi_pci_root *root;
acpi_status status;
u32 ctrl, capbuf[3];
acpi_handle tmp;
if (!mask)
return AE_BAD_PARAMETER;
ctrl = *mask & OSC_PCI_CONTROL_MASKS;
if ((ctrl & req) != req)
return AE_TYPE;
root = acpi_pci_find_root(handle);
if (!root)
return AE_NOT_EXIST;
status = acpi_get_handle(handle, "_OSC", &tmp);
if (ACPI_FAILURE(status))
return status;
mutex_lock(&osc_lock);
*mask = ctrl | root->osc_control_set;
/* No need to evaluate _OSC if the control was already granted. */
if ((root->osc_control_set & ctrl) == ctrl)
goto out;
/* Need to check the available controls bits before requesting them. */
while (*mask) {
status = acpi_pci_query_osc(root, root->osc_support_set, mask);
if (ACPI_FAILURE(status))
goto out;
if (ctrl == *mask)
break;
ctrl = *mask;
}
if ((ctrl & req) != req) {
status = AE_SUPPORT;
goto out;
}
capbuf[OSC_QUERY_TYPE] = 0;
capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
capbuf[OSC_CONTROL_TYPE] = ctrl;
status = acpi_pci_run_osc(handle, capbuf, mask);
if (ACPI_SUCCESS(status))
root->osc_control_set = *mask;
out:
mutex_unlock(&osc_lock);
return status;
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
static int __devinit acpi_pci_root_add(struct acpi_device *device)
{
unsigned long long segment, bus;
acpi_status status;
int result;
struct acpi_pci_root *root;
acpi_handle handle;
struct acpi_device *child;
u32 flags, base_flags;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
return -ENOMEM;
segment = 0;
status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
&segment);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
result = -ENODEV;
goto end;
}
/* Check _CRS first, then _BBN. If no _BBN, default to zero. */
root->secondary.flags = IORESOURCE_BUS;
status = try_get_root_bridge_busnr(device->handle, &root->secondary);
if (ACPI_FAILURE(status)) {
/*
* We need both the start and end of the downstream bus range
* to interpret _CBA (MMCONFIG base address), so it really is
* supposed to be in _CRS. If we don't find it there, all we
* can do is assume [_BBN-0xFF] or [0-0xFF].
*/
root->secondary.end = 0xFF;
printk(KERN_WARNING FW_BUG PREFIX
"no secondary bus range in _CRS\n");
status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN,
NULL, &bus);
if (ACPI_SUCCESS(status))
root->secondary.start = bus;
else if (status == AE_NOT_FOUND)
root->secondary.start = 0;
else {
printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
result = -ENODEV;
goto end;
}
}
INIT_LIST_HEAD(&root->node);
root->device = device;
root->segment = segment & 0xFFFF;
strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
/*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
*/
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
acpi_pci_osc_support(root, flags);
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
*/
/* TBD: Locking */
list_add_tail(&root->node, &acpi_pci_roots);
printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
/*
* Scan the Root Bridge
* --------------------
* Must do this prior to any attempt to bind the root device, as the
* PCI namespace does not get created until this call is made (and
* thus the root bridge's pci_dev does not exist).
*/
root->bus = pci_acpi_scan_root(root);
if (!root->bus) {
printk(KERN_ERR PREFIX
"Bus %04x:%02x not present in PCI namespace\n",
root->segment, (unsigned int)root->secondary.start);
result = -ENODEV;
goto end;
}
/*
* Attach ACPI-PCI Context
* -----------------------
* Thus binding the ACPI and PCI devices.
*/
result = acpi_pci_bind_root(device);
if (result)
goto end;
/*
* PCI Routing Table
* -----------------
* Evaluate and parse _PRT, if exists.
*/
status = acpi_get_handle(device->handle, METHOD_NAME__PRT, &handle);
if (ACPI_SUCCESS(status))
result = acpi_pci_irq_add_prt(device->handle, root->bus);
/*
* Scan and bind all _ADR-Based Devices
*/
list_for_each_entry(child, &device->children, node)
acpi_pci_bridge_scan(child);
/* Indicate support for various _OSC capabilities. */
if (pci_ext_cfg_avail(root->bus->self))
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
if (pcie_aspm_support_enabled())
flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
if (pci_msi_enabled())
flags |= OSC_MSI_SUPPORT;
if (flags != base_flags)
acpi_pci_osc_support(root, flags);
if (!pcie_ports_disabled
&& (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
| OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
| OSC_PCI_EXPRESS_PME_CONTROL;
if (pci_aer_available()) {
if (aer_acpi_firmware_first())
dev_dbg(root->bus->bridge,
"PCIe errors handled by BIOS.\n");
else
flags |= OSC_PCI_EXPRESS_AER_CONTROL;
}
dev_info(root->bus->bridge,
"Requesting ACPI _OSC control (0x%02x)\n", flags);
status = acpi_pci_osc_control_set(device->handle, &flags,
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (ACPI_SUCCESS(status)) {
dev_info(root->bus->bridge,
"ACPI _OSC control (0x%02x) granted\n", flags);
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
/*
* We have ASPM control, but the FADT indicates
* that it's unsupported. Clear it.
*/
pcie_clear_aspm(root->bus);
}
} else {
dev_info(root->bus->bridge,
"ACPI _OSC request failed (%s), "
"returned control mask: 0x%02x\n",
acpi_format_exception(status), flags);
pr_info("ACPI _OSC control for PCIe not granted, "
"disabling ASPM\n");
pcie_no_aspm();
}
} else {
dev_info(root->bus->bridge,
"Unable to request _OSC control "
"(_OSC support mask: 0x%02x)\n", flags);
}
pci_acpi_add_bus_pm_notifier(device, root->bus);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
return 0;
end:
if (!list_empty(&root->node))
list_del(&root->node);
kfree(root);
return result;
}
static int acpi_pci_root_start(struct acpi_device *device)
{
struct acpi_pci_root *root = acpi_driver_data(device);
pci_bus_add_devices(root->bus);
return 0;
}
static int acpi_pci_root_remove(struct acpi_device *device, int type)
{
struct acpi_pci_root *root = acpi_driver_data(device);
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
kfree(root);
return 0;
}
static int __init acpi_pci_root_init(void)
{
acpi_hest_init();
if (acpi_pci_disabled)
return 0;
pci_acpi_crs_quirks();
if (acpi_bus_register_driver(&acpi_pci_root_driver) < 0)
return -ENODEV;
return 0;
}
subsys_initcall(acpi_pci_root_init);
| {
"content_hash": "d29412c3eb68f5bf9c6d7c9427ba0372",
"timestamp": "",
"source": "github",
"line_count": 644,
"max_line_length": 79,
"avg_line_length": 25.619565217391305,
"alnum_prop": 0.6702224377234984,
"repo_name": "WhiteBearSolutions/WBSAirback",
"id": "7aff6312ce7c75a66d731b5ea0a10743e7b34c5f",
"size": "17603",
"binary": false,
"copies": "3748",
"ref": "refs/heads/master",
"path": "packages/wbsairback-kernel-image/wbsairback-kernel-image-3.2.43/drivers/acpi/pci_root.c",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "4780982"
}
],
"symlink_target": ""
} |
package parquet.example.data;
import parquet.Log;
import parquet.example.data.simple.NanoTime;
import parquet.io.api.Binary;
import parquet.io.api.RecordConsumer;
abstract public class Group extends GroupValueSource {
private static final Log logger = Log.getLog(Group.class);
private static final boolean DEBUG = Log.DEBUG;
public void add(String field, int value) {
add(getType().getFieldIndex(field), value);
}
public void add(String field, long value) {
add(getType().getFieldIndex(field), value);
}
public void add(String field, float value) {
add(getType().getFieldIndex(field), value);
}
public void add(String field, double value) {
add(getType().getFieldIndex(field), value);
}
public void add(String field, String value) {
add(getType().getFieldIndex(field), value);
}
public void add(String field, NanoTime value) {
add(getType().getFieldIndex(field), value);
}
public void add(String field, boolean value) {
add(getType().getFieldIndex(field), value);
}
public void add(String field, Binary value) {
add(getType().getFieldIndex(field), value);
}
public void add(String field, Group value) {
add(getType().getFieldIndex(field), value);
}
public Group addGroup(String field) {
if (DEBUG) logger.debug("add group "+field+" to "+getType().getName());
return addGroup(getType().getFieldIndex(field));
}
public Group getGroup(String field, int index) {
return getGroup(getType().getFieldIndex(field), index);
}
abstract public void add(int fieldIndex, int value);
abstract public void add(int fieldIndex, long value);
abstract public void add(int fieldIndex, String value);
abstract public void add(int fieldIndex, boolean value);
abstract public void add(int fieldIndex, NanoTime value);
abstract public void add(int fieldIndex, Binary value);
abstract public void add(int fieldIndex, float value);
abstract public void add(int fieldIndex, double value);
abstract public void add(int fieldIndex, Group value);
abstract public Group addGroup(int fieldIndex);
abstract public Group getGroup(int fieldIndex, int index);
public Group asGroup() {
return this;
}
public Group append(String fieldName, int value) {
add(fieldName, value);
return this;
}
public Group append(String fieldName, float value) {
add(fieldName, value);
return this;
}
public Group append(String fieldName, double value) {
add(fieldName, value);
return this;
}
public Group append(String fieldName, long value) {
add(fieldName, value);
return this;
}
public Group append(String fieldName, NanoTime value) {
add(fieldName, value);
return this;
}
public Group append(String fieldName, String value) {
add(fieldName, Binary.fromString(value));
return this;
}
public Group append(String fieldName, boolean value) {
add(fieldName, value);
return this;
}
public Group append(String fieldName, Binary value) {
add(fieldName, value);
return this;
}
abstract public void writeValue(int field, int index, RecordConsumer recordConsumer);
}
| {
"content_hash": "ee45777f77dfcca3f5d7565a102daca1",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 87,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.7060682680151706,
"repo_name": "danielcweeks/incubator-parquet-mr",
"id": "bb04623a3d64da4dd51d020f07b8992011c3d3f2",
"size": "3757",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "parquet-column/src/main/java/parquet/example/data/Group.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2329031"
},
{
"name": "Python",
"bytes": "13319"
},
{
"name": "Scala",
"bytes": "5969"
},
{
"name": "Shell",
"bytes": "4660"
}
],
"symlink_target": ""
} |
{% extends "base.html" %}
{% load static %}
{% load url %}
{% load user_profile_link from profiles %}
{% load workstations %}
{% load guardian_tags %}
{% load evaluation_extras %}
{% block breadcrumbs %}
<ol class="breadcrumb">
<li class="breadcrumb-item"><a href="{% url 'workstation-configs:list' %}">Viewer
Configurations</a></li>
<li class="breadcrumb-item active" aria-current="page">{{ object.title }}
</li>
</ol>
{% endblock %}
{% block content %}
{% get_obj_perms request.user for object as "config_perms" %}
<h1>Viewer Configuration {{ object.title }}</h1>
{% if object.description %}
<p>{{ object.description }}</p>
{% endif %}
<table class="table table-hover my-3">
<tr>
<td class="font-weight-bold">Image context</td>
<td>{{ object.get_image_context_display }}</td>
</tr>
<tr>
<td class="font-weight-bold">Window presets</td>
<td>
{% for x in object.window_presets.all %}
<div>{{ x }}</div>
{% endfor %}
</td>
<td>{{ object.get_window_preset_display }}</td>
</tr>
<tr>
<td class="font-weight-bold">Default window preset</td>
<td>{{ object.default_window_preset }}</td>
</tr>
<tr>
<td class="font-weight-bold">Default slab thickness (mm)</td>
<td>{{ object.default_slab_thickness_mm }}</td>
</tr>
<tr>
<td class="font-weight-bold">Default slab render method</td>
<td>{{ object.get_default_slab_render_method_display }}</td>
</tr>
<tr>
<td class="font-weight-bold">Default orientation</td>
<td>{{ object.get_default_orientation_display }}</td>
</tr>
<tr>
<td class="font-weight-bold">Default overlay alpha</td>
<td>{{ object.default_overlay_alpha }}</td>
</tr>
<tr>
<td class="font-weight-bold">Overlay lookup tables</td>
<td>
{% for x in object.overlay_luts.all %}
<div>{{ x }}</div>
{% endfor %}
</td>
</tr>
<tr>
<td class="font-weight-bold">Default overlay lookup table</td>
<td>{{ object.default_overlay_lut }}</td>
</tr>
<tr>
<td class="font-weight-bold">Default overlay interpolation</td>
<td>{{ object.get_default_overlay_interpolation_display }}</td>
</tr>
<tr>
<td class="font-weight-bold">Overlay Segments</td>
<td>
<pre>{{ object.overlay_segments|json_dumps }}</pre>
</td>
</tr>
<tr>
<td class="font-weight-bold">Key Bindings</td>
<td>
<pre>{{ object.key_bindings|json_dumps }}</pre>
</td>
</tr>
<tr>
<td class="font-weight-bold">Default zoom scale</td>
<td>{{ object.default_zoom_scale }}</td>
</tr>
<tr>
<td class="font-weight-bold">Image info plugin</td>
<td>
<i class="fas {% if object.show_image_info_plugin %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Display plugin</td>
<td>
<i class="fas {% if object.show_display_plugin %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Image switcher plugin</td>
<td>
<i class="fas {% if object.show_image_switcher_plugin %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Algorithm output plugin</td>
<td>
<i class="fas {% if object.show_algorithm_output_plugin %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Overlay plugin</td>
<td>
<i class="fas {% if object.show_overlay_plugin %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Invert tool</td>
<td>
<i class="fas {% if object.show_invert_tool %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Flip tool</td>
<td>
<i class="fas {% if object.show_flip_tool %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Window level tool</td>
<td>
<i class="fas {% if object.show_window_level_tool %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Reset tool</td>
<td>
<i class="fas {% if object.show_reset_tool %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Overlay selection tool</td>
<td>
<i class="fas {% if object.show_overlay_selection_tool %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">LUT selection tool</td>
<td>
<i class="fas {% if object.show_lut_selection_tool %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Enable contrast enhancement preprocessing (fundus)</td>
<td>
<i class="fas {% if object.enable_contrast_enhancement %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
<tr>
<td class="font-weight-bold">Jump to center of gravity of first output when viewing algorithm results</td>
<td>
<i class="fas {% if object.auto_jump_center_of_gravity %}fa-check-circle{% else %}fa-times-circle{% endif %}"></i>
</td>
</tr>
</table>
{% if "change_workstationconfig" in config_perms %}
<div class="d-flex justify-content-start align-items-center">
<a class="btn btn-primary mr-1"
href="{% url 'workstation-configs:update' slug=object.slug %}">
<i class="fa fa-edit"></i> Edit this viewer configuration
</a>
<a href="{% url 'workstation-configs:delete' slug=object.slug %}"
class="btn btn-danger">
<i class="fa fa-times"></i> Delete this viewer configuration
</a>
</div>
{% endif %}
{% endblock %}
| {
"content_hash": "607f862fa0a3934d7bce8c424047b231",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 131,
"avg_line_length": 38.03763440860215,
"alnum_prop": 0.49045936395759715,
"repo_name": "comic/comic-django",
"id": "00fb73ec61ae96dd9f6f92ffd13e85aa183fb8f8",
"size": "7075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/grandchallenge/workstation_configs/templates/workstation_configs/workstationconfig_detail.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94300"
},
{
"name": "HTML",
"bytes": "101108"
},
{
"name": "JavaScript",
"bytes": "122734"
},
{
"name": "PHP",
"bytes": "99155"
},
{
"name": "Python",
"bytes": "486219"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
} |
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | {
"content_hash": "e9f6fba202ccd969e9027a5381a36f76",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 31,
"avg_line_length": 9.692307692307692,
"alnum_prop": 0.7063492063492064,
"repo_name": "mdoering/backbone",
"id": "51eb73211e15313aff88ea868161ce929db34d44",
"size": "197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "life/Plantae/Magnoliophyta/Magnoliopsida/Caryophyllales/Cactaceae/Cephalocereus/Cephalocereus chrysacanthus/Cephalocereus chrysacanthus cristatus/README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import test from "ava";
import { Client } from "../client";
import { Job, JobPayload } from "../job";
import { mocked, registerCleaner } from "./_helper";
registerCleaner(test);
test("#new: host defaults to 127.0.0.1", (t) => {
const client = new Client();
t.is(client.connectionFactory.host, "127.0.0.1");
});
test("#new: port defaults to 7419", (t) => {
const client = new Client();
t.is(client.connectionFactory.port, "7419");
});
test("#buildHello: client builds a passwordless ahoy", (t) => {
const client = new Client();
const hello = client.buildHello({ i: 3, s: "123", v: 3 });
t.truthy(hello.hostname, "hostname is present");
});
test("#buildHello: client builds a salty ahoy", (t) => {
const client = new Client({
password: "abcde123",
});
const hello = client.buildHello({ i: 3, s: "123", v: 3 });
t.is(
hello.pwdhash,
"ef646abadf4ffba660d9bbb8de8e45576970de917b4c9da8cad96b49e64636d9"
);
});
test("#buildHello: wid is present in HELLO", (t) => {
const client = new Client({ wid: "workerid" });
const hello = client.buildHello({ v: 2, s: "abc", i: 3 });
t.is(hello.wid, client.wid, "wid in ahoy does not match");
});
test("#buildHello: pid is present when wid is given in ahoy", (t) => {
const client = new Client();
const hello = client.buildHello({ i: 3, s: "123", v: 3 });
t.truthy(!hello.pid, "pid should not be present");
});
test("#buildHello: labels are passed in ahoy", (t) => {
const labels = ["hippo"];
const client = new Client({ labels, wid: "something" });
const hello = client.buildHello({ i: 3, s: "123", v: 3 });
t.deepEqual(hello.labels, labels, "hello does not includes labels correctly");
});
test(".assertVersion: does not throw when version matches supported", (t) => {
t.notThrows(() => {
Client.assertVersion(2);
});
});
test(".assertVersion: throws when version does not match supported", (t) => {
t.throws(() => {
Client.assertVersion(4);
});
});
test("#new: unescapes password in url", (t) => {
const client = new Client({ url: "tcp://:abcd=@somehost:7419" });
t.is(client.password, "abcd=");
});
test("#info: sends info and parses response", async (t) => {
const client = new Client();
const info = await client.info();
t.truthy(info.faktory);
t.truthy(info.server_utc_time);
});
test("#info: client subsequent serial requests", async (t) => {
t.plan(5);
const client = new Client();
for (let i = 5; i > 0; i -= 1) {
t.truthy(await client.info(), `reply for info #${i} not ok`);
}
});
test("#push: pushes serially", async (t) => {
t.plan(4);
const client = new Client();
for (let i = 4; i > 0; i -= 1) {
t.truthy(await client.job("test", i).push());
}
});
test("#push: pushes concurrently", async (t) => {
const client = new Client();
const args = [0, 1, 2, 3, 4];
Promise.all(args.map((arg) => client.job("test", arg).push()));
t.pass();
});
test("#push: accepts a Job object", async (t) => {
const client = new Client();
const job = client.job("test");
t.is(await client.push(job), job.jid);
});
test("#fetch: fetches jobs", async (t) => {
const client = new Client();
const job = client.job("test");
await job.push();
const fetched = await client.fetch(job.queue);
if (!fetched) return t.fail("job not fetched");
t.truthy(fetched);
t.is(fetched.jid, job.jid);
t.deepEqual(fetched.args, job.args);
t.is(fetched.jobtype, job.jobtype);
return;
});
test("#beat: sends a heartbeat", async (t) => {
const client = new Client({ wid: "123" });
const resp = await client.beat();
t.is(resp, "OK");
});
test("#beat: returns a signal from the server", async (t) => {
await mocked(async (server, port) => {
server.on("BEAT", mocked.beat("quiet"));
const client = new Client({ port });
const resp = await client.beat();
t.is(resp, "quiet");
});
});
test("#connect: rejects connect when connection cannot be established", async (t) => {
const client = new Client({ url: "tcp://127.0.0.1:1" });
await t.throwsAsync(client.connect(), { message: /ECONNREFUSED/ });
});
test("#connect: rejects if handshake is not successful", async (t) => {
const client = new Client();
client.buildHello = () => {
throw new Error("test");
};
await t.throwsAsync(client.connect(), { message: /test/i });
});
test("#connect: connects explicitly", async (t) => {
t.plan(2);
await mocked(async (server, port) => {
server
.on("HELLO", () => {
t.is(1, 1);
})
.on("END", () => {
t.is(1, 1);
});
const client = new Client({ port });
await client.connect();
return client.close();
});
});
test("#job: returns a Job", (t) => {
const client = new Client();
t.truthy(client.job("test") instanceof Job);
});
test("#ack: ACKs a job", async (t) => {
const client = new Client();
const job = client.job("jobtype");
await job.push();
const fetched = await client.fetch(job.queue);
if (!fetched) return t.fail("job not fetched");
t.is(await client.ack(fetched.jid), "OK");
return;
});
test("#fetch: returns null when queue is empty", async (t) => {
await mocked(async (server, port) => {
server.on("FETCH", ({ socket }) => {
// null bulkstring
socket.write("$-1\r\n");
});
const client = new Client({ port });
const fetched = await client.fetch("default");
t.is(fetched, null);
});
});
test("#push: defaults job payload values according to spec", async (t) => {
let serverJob: JobPayload;
await mocked(async (server, port) => {
server.on("PUSH", ({ data, socket }) => {
serverJob = data;
socket.write("+OK\r\n");
});
const jobtype = "TestJob";
const client = new Client({ port });
const jid = await client.push({ jobtype });
t.deepEqual(serverJob, {
jid,
jobtype: "TestJob",
queue: "default",
args: [],
priority: 5,
retry: 25,
});
});
});
test("#pushBulk: defaults job payload values according to spec", async (t) => {
let serverJob: Array<JobPayload>;
await mocked(async (server, port) => {
server.on("PUSHB", ({ data, socket }) => {
serverJob = data;
socket.write("+{}\r\n");
});
const jobtype = "TestJob";
const jid1 = Job.jid();
const jid2 = Job.jid();
const client = new Client({ port });
await client.pushBulk([
{ jobtype, jid: jid1 },
{ jobtype, jid: jid2 },
]);
t.assert(Array.isArray(serverJob));
t.assert(serverJob.length === 2);
t.deepEqual(serverJob, [
{
jid: jid1,
jobtype: "TestJob",
queue: "default",
args: [],
priority: 5,
retry: 25,
},
{
jid: jid2,
jobtype: "TestJob",
queue: "default",
args: [],
priority: 5,
retry: 25,
},
]);
return;
});
});
test("#pushBulk resolves with the map of failed JIDs to RejectedJobFromPushBulk", async (t) => {
let jid1 = Job.jid();
let jid2 = Job.jid();
await mocked(async (server, port) => {
server.on("PUSHB", ({ data, socket }) => {
socket.write('+{"' + jid1 + '": "Failed"}\r\n');
});
const client = new Client({ port });
const response = await client.pushBulk([
{ jobtype: "MyJob", jid: jid1, args: [3] },
{ jobtype: "MyJob", jid: jid2 },
]);
t.deepEqual(response[jid1].reason, "Failed");
t.deepEqual(response[jid1].payload.args, [3]);
return;
});
});
test("#fail: FAILs a job", async (t) => {
const client = new Client();
const job = client.job("test");
await job.push();
const fetched = await client.fetch(job.queue);
if (!fetched) return t.fail("job not fetched");
t.is(await client.fail(fetched.jid, new Error("EHANGRY")), "OK");
return;
});
test("#fail: FAILs a job without a stack", async (t) => {
// #29
const client = new Client();
const job = client.job("test");
await job.push();
const fetched = await client.fetch(job.queue);
if (!fetched) return t.fail("job not fetched");
const error = new Error("EHANGRY");
delete error.stack;
t.is(await client.fail(fetched.jid, error), "OK");
return;
});
test("#fail: FAILs a job with a non-string error code", async (t) => {
const client = new Client();
const job = client.job("test");
await job.push();
const fetched = await client.fetch(job.queue);
if (!fetched) return t.fail("job not fetched");
class CustomError extends Error {
public readonly code;
constructor(code: number, message: string) {
super(message);
this.code = code;
}
}
const error = new CustomError(1234, "ETOOMANYDIGITS");
t.is(await client.fail(fetched.jid, error), "OK");
return;
});
test("#job: returns a job builder", (t) => {
const client = new Client();
const job = client.job("MyTestJob");
t.truthy(job instanceof Job);
});
test("#job: provides the client to the job", (t) => {
const client = new Client();
const job = client.job("MyTestJob");
t.is(job.client, client);
});
test("#job: provides the args to the job", (t) => {
const client = new Client();
const job = client.job("MyTestJob", 1, 2, 3);
t.deepEqual(job.args, [1, 2, 3]);
});
test("#job: push sends job specification to server", async (t) => {
await mocked(async (server, port) => {
server.on("PUSH", ({ data, socket }) => {
socket.write("+OK\r\n");
const { jobtype, args, custom, retry } = data;
t.is(jobtype, "MyJob");
t.deepEqual(args, [1, 2, 3]);
t.deepEqual(custom, { locale: "en-us" });
t.is(retry, -1);
});
const client = new Client({ port });
const job = client.job("MyJob", 1, 2, 3);
job.retry = -1;
job.custom = { locale: "en-us" };
await job.push();
});
});
test("#job: push resolves with the jid", async (t) => {
await mocked(async (server, port) => {
server.on("PUSH", ({ data, socket }) => {
socket.write("+OK\r\n");
});
const client = new Client({ port });
const jid = await client.job("MyJob").push();
t.truthy(/\w{8}-\w{4}-\w{4}-\w{4}-\w{12}/.test(jid));
});
});
test("#job: SUCCESS, pushBulk resolves with the empty json", async (t) => {
await mocked(async (server, port) => {
server.on("PUSHB", ({ data, socket }) => {
socket.write("+{}\r\n");
});
const client = new Client({ port });
const response = await client.pushBulk([
{ jobtype: "MyJob", jid: Job.jid() },
]);
t.deepEqual(response, JSON.parse("{}"));
return;
});
});
test.skip("shutdown: shutsdown before timeout", async (t) => {});
| {
"content_hash": "580fbe86c73f465cde11598af472dfeb",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 96,
"avg_line_length": 25.317422434367543,
"alnum_prop": 0.582579185520362,
"repo_name": "jbielick/faktory_worker_node",
"id": "72a218eab11d8699b256da2750dd455537f2434e",
"size": "10608",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/__tests__/client.test.ts",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4472"
},
{
"name": "Shell",
"bytes": "301"
},
{
"name": "TypeScript",
"bytes": "106644"
}
],
"symlink_target": ""
} |
#import "GPTimelineHeaderView.h"
static void *GPTimelineHeaderScrollViewDidScrollViewContext;
#define AVATAR_LENGTH 72.0f
#define HEADER_HEIGTH 100.0f
#define AVATAR_FRAME CGRectIntegral((CGRect){15.0f, CGRectGetHeight(self.bounds) - AVATAR_LENGTH + 8.0f, AVATAR_LENGTH, AVATAR_LENGTH})
#define ABOUT_ME_FRAME CGRectIntegral((CGRect){CGRectGetMaxX(self.avatarImageView.frame) + 10.0f, CGRectGetMidY(self.avatarImageView.frame) - 12.0, 140.0f, 24.0f})
/* ------------------------------------------------------------------------------------------------------
@implementation GPTimelineHeaderView
------------------------------------------------------------------------------------------------------ */
@implementation GPTimelineHeaderView
@synthesize aboutMeButton = _aboutMeButton;
@synthesize avatarImageView = _avatarImageView;
@synthesize backgroundImageView = _backgroundImageView;
#pragma mark -
#pragma mark Getter
- (UIImageView *)backgroundImageView {
if (_backgroundImageView) {
return _backgroundImageView;
}
UIImageView *imageView = [[UIImageView alloc] init];
imageView.contentMode = UIViewContentModeScaleAspectFill;
imageView.clipsToBounds = YES;
[self addSubview:imageView];
[self sendSubviewToBack:imageView];
_backgroundImageView = imageView;
return _backgroundImageView;
}
- (GPAboutMeButton *)aboutMeButton {
if (_aboutMeButton) {
return _aboutMeButton;
}
GPAboutMeButton *button = [[GPAboutMeButton alloc] init];
[self addSubview:button];
_aboutMeButton = button;
return _aboutMeButton;
}
- (UIImageView *)avatarImageView {
if (_avatarImageView) {
return _avatarImageView;
}
UIImageView *imageView = [[UIImageView alloc] initWithImage:[UIImage imageNamed:@"Avatar"]];
[self addSubview:imageView];
_avatarImageView = imageView;
return _avatarImageView;
}
#pragma mark -
#pragma mark KVO
- (void)observeValueForKeyPath:(NSString *)keyPath ofObject:(UIScrollView *)object change:(NSDictionary *)change context:(void *)context {
if (context == GPTimelineHeaderScrollViewDidScrollViewContext) {
[self _scrollViewDidScroll:[[change valueForKey:NSKeyValueChangeNewKey] CGPointValue]];
}
}
- (void)_scrollViewDidScroll:(CGPoint)contentOffset {
if (contentOffset.y > 0) {
return;
}
self.frame = CGRectIntegral((CGRect){0.0f, contentOffset.y, CGRectGetWidth(self.bounds), contentOffset.y * (-1)});
}
#pragma mark -
#pragma mark Layout
- (CGSize)sizeThatFits:(CGSize)size {
return (CGSize){CGRectGetWidth(self.superview.bounds), HEADER_HEIGTH};
}
- (void)didMoveToSuperview {
NSAssert([self.superview isKindOfClass:[UIScrollView class]], @"Superview must be of class UIScrollView");
UIScrollView *scrollView = (UIScrollView *)self.superview;
UIEdgeInsets newInset = scrollView.contentInset;
newInset.top = HEADER_HEIGTH;
scrollView.contentInset = newInset;
[self.superview addObserver:self
forKeyPath:@"contentOffset"
options:NSKeyValueObservingOptionNew
context:GPTimelineHeaderScrollViewDidScrollViewContext];
[self sizeToFit];
}
- (void)removeFromSuperview {
[super removeFromSuperview];
[self removeObserver:self forKeyPath:@"contentOffset" context:GPTimelineHeaderScrollViewDidScrollViewContext];
}
- (void)layoutSubviews {
[super layoutSubviews];
self.backgroundImageView.frame = self.bounds;
self.avatarImageView.frame = AVATAR_FRAME;
self.aboutMeButton.frame = ABOUT_ME_FRAME;
}
@end
| {
"content_hash": "bbb4e0ae671a1eefe6df8fe2d3ccb5a0",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 163,
"avg_line_length": 30.297520661157026,
"alnum_prop": 0.6718494271685761,
"repo_name": "Gi-lo/AboutMe-WWDC",
"id": "5cd8d09fc1bf631451e7ab8143e206bcec67f65c",
"size": "5039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Giulio Petek/Giulio Petek/View/Timeline/GPTimelineHeaderView.m",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C",
"bytes": "134726"
}
],
"symlink_target": ""
} |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
// gSyncUI handles updating the tools menu
let gSyncUI = {
_obs: ["weave:service:sync:start",
"weave:service:sync:delayed",
"weave:service:quota:remaining",
"weave:service:setup-complete",
"weave:service:login:start",
"weave:service:login:finish",
"weave:service:logout:finish",
"weave:service:start-over",
"weave:ui:login:error",
"weave:ui:sync:error",
"weave:ui:sync:finish",
"weave:ui:clear-error",
],
_unloaded: false,
init: function SUI_init() {
// Proceed to set up the UI if Sync has already started up.
// Otherwise we'll do it when Sync is firing up.
if (Weave.Status.ready) {
this.initUI();
return;
}
Services.obs.addObserver(this, "weave:service:ready", true);
// Remove the observer if the window is closed before the observer
// was triggered.
window.addEventListener("unload", function onUnload() {
gSyncUI._unloaded = true;
window.removeEventListener("unload", onUnload, false);
Services.obs.removeObserver(gSyncUI, "weave:service:ready");
if (Weave.Status.ready) {
gSyncUI._obs.forEach(function(topic) {
Services.obs.removeObserver(gSyncUI, topic);
});
}
}, false);
},
initUI: function SUI_initUI() {
// If this is a browser window?
if (gBrowser) {
this._obs.push("weave:notification:added");
}
this._obs.forEach(function(topic) {
Services.obs.addObserver(this, topic, true);
}, this);
if (gBrowser && Weave.Notifications.notifications.length) {
this.initNotifications();
}
this.updateUI();
},
initNotifications: function SUI_initNotifications() {
const XULNS = "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul";
let notificationbox = document.createElementNS(XULNS, "notificationbox");
notificationbox.id = "sync-notifications";
notificationbox.setAttribute("flex", "1");
let bottombox = document.getElementById("browser-bottombox");
bottombox.insertBefore(notificationbox, bottombox.firstChild);
// Force a style flush to ensure that our binding is attached.
notificationbox.clientTop;
// notificationbox will listen to observers from now on.
Services.obs.removeObserver(this, "weave:notification:added");
},
_wasDelayed: false,
_needsSetup: function SUI__needsSetup() {
let firstSync = "";
try {
firstSync = Services.prefs.getCharPref("services.sync.firstSync");
} catch (e) { }
return Weave.Status.checkSetup() == Weave.CLIENT_NOT_CONFIGURED ||
firstSync == "notReady";
},
updateUI: function SUI_updateUI() {
let needsSetup = this._needsSetup();
document.getElementById("sync-setup-state").hidden = !needsSetup;
document.getElementById("sync-syncnow-state").hidden = needsSetup;
if (!gBrowser)
return;
let button = document.getElementById("sync-button");
if (!button)
return;
button.removeAttribute("status");
this._updateLastSyncTime();
if (needsSetup)
button.removeAttribute("tooltiptext");
},
// Functions called by observers
onActivityStart: function SUI_onActivityStart() {
if (!gBrowser)
return;
let button = document.getElementById("sync-button");
if (!button)
return;
button.setAttribute("status", "active");
},
onSyncDelay: function SUI_onSyncDelay() {
// basically, we want to just inform users that stuff is going to take a while
let title = this._stringBundle.GetStringFromName("error.sync.no_node_found.title");
let description = this._stringBundle.GetStringFromName("error.sync.no_node_found");
let buttons = [new Weave.NotificationButton(
this._stringBundle.GetStringFromName("error.sync.serverStatusButton.label"),
this._stringBundle.GetStringFromName("error.sync.serverStatusButton.accesskey"),
function() { gSyncUI.openServerStatus(); return true; }
)];
let notification = new Weave.Notification(
title, description, null, Weave.Notifications.PRIORITY_INFO, buttons);
Weave.Notifications.replaceTitle(notification);
this._wasDelayed = true;
},
onLoginFinish: function SUI_onLoginFinish() {
// Clear out any login failure notifications
let title = this._stringBundle.GetStringFromName("error.login.title");
this.clearError(title);
},
onSetupComplete: function SUI_onSetupComplete() {
this.onLoginFinish();
},
onLoginError: function SUI_onLoginError() {
// if login fails, any other notifications are essentially moot
Weave.Notifications.removeAll();
// if we haven't set up the client, don't show errors
if (this._needsSetup()) {
this.updateUI();
return;
}
let title = this._stringBundle.GetStringFromName("error.login.title");
let description;
if (Weave.Status.sync == Weave.PROLONGED_SYNC_FAILURE) {
// Convert to days
let lastSync =
Services.prefs.getIntPref("services.sync.errorhandler.networkFailureReportTimeout") / 86400;
description =
this._stringBundle.formatStringFromName("error.sync.prolonged_failure", [lastSync], 1);
} else {
let reason = Weave.Utils.getErrorString(Weave.Status.login);
description =
this._stringBundle.formatStringFromName("error.sync.description", [reason], 1);
}
let buttons = [];
buttons.push(new Weave.NotificationButton(
this._stringBundle.GetStringFromName("error.login.prefs.label"),
this._stringBundle.GetStringFromName("error.login.prefs.accesskey"),
function() { gSyncUI.openPrefs(); return true; }
));
let notification = new Weave.Notification(title, description, null,
Weave.Notifications.PRIORITY_WARNING, buttons);
Weave.Notifications.replaceTitle(notification);
this.updateUI();
},
onLogout: function SUI_onLogout() {
this.updateUI();
},
onStartOver: function SUI_onStartOver() {
this.clearError();
},
onQuotaNotice: function onQuotaNotice(subject, data) {
let title = this._stringBundle.GetStringFromName("warning.sync.quota.label");
let description = this._stringBundle.GetStringFromName("warning.sync.quota.description");
let buttons = [];
buttons.push(new Weave.NotificationButton(
this._stringBundle.GetStringFromName("error.sync.viewQuotaButton.label"),
this._stringBundle.GetStringFromName("error.sync.viewQuotaButton.accesskey"),
function() { gSyncUI.openQuotaDialog(); return true; }
));
let notification = new Weave.Notification(
title, description, null, Weave.Notifications.PRIORITY_WARNING, buttons);
Weave.Notifications.replaceTitle(notification);
},
openServerStatus: function () {
let statusURL = Services.prefs.getCharPref("services.sync.statusURL");
window.openUILinkIn(statusURL, "tab");
},
// Commands
doSync: function SUI_doSync() {
setTimeout(function() Weave.ErrorHandler.syncAndReportErrors(), 0);
},
handleToolbarButton: function SUI_handleStatusbarButton() {
if (this._needsSetup())
this.openSetup();
else
this.doSync();
},
//XXXzpao should be part of syncCommon.js - which we might want to make a module...
// To be fixed in a followup (bug 583366)
/**
* Invoke the Sync setup wizard.
*
* @param wizardType
* Indicates type of wizard to launch:
* null -- regular set up wizard
* "pair" -- pair a device first
* "reset" -- reset sync
*/
openSetup: function SUI_openSetup(wizardType) {
let win = Services.wm.getMostRecentWindow("Weave:AccountSetup");
if (win)
win.focus();
else {
window.openDialog("chrome://browser/content/sync/setup.xul",
"weaveSetup", "centerscreen,chrome,resizable=no",
wizardType);
}
},
openAddDevice: function () {
if (!Weave.Utils.ensureMPUnlocked())
return;
let win = Services.wm.getMostRecentWindow("Sync:AddDevice");
if (win)
win.focus();
else
window.openDialog("chrome://browser/content/sync/addDevice.xul",
"syncAddDevice", "centerscreen,chrome,resizable=no");
},
openQuotaDialog: function SUI_openQuotaDialog() {
let win = Services.wm.getMostRecentWindow("Sync:ViewQuota");
if (win)
win.focus();
else
Services.ww.activeWindow.openDialog(
"chrome://browser/content/sync/quota.xul", "",
"centerscreen,chrome,dialog,modal");
},
openPrefs: function SUI_openPrefs() {
openPreferences("paneSync");
},
// Helpers
_updateLastSyncTime: function SUI__updateLastSyncTime() {
if (!gBrowser)
return;
let syncButton = document.getElementById("sync-button");
if (!syncButton)
return;
let lastSync;
try {
lastSync = Services.prefs.getCharPref("services.sync.lastSync");
}
catch (e) { };
if (!lastSync || this._needsSetup()) {
syncButton.removeAttribute("tooltiptext");
return;
}
// Show the day-of-week and time (HH:MM) of last sync
let lastSyncDate = new Date(lastSync).toLocaleFormat("%a %H:%M");
let lastSyncLabel =
this._stringBundle.formatStringFromName("lastSync2.label", [lastSyncDate], 1);
syncButton.setAttribute("tooltiptext", lastSyncLabel);
},
clearError: function SUI_clearError(errorString) {
Weave.Notifications.removeAll(errorString);
this.updateUI();
},
onSyncFinish: function SUI_onSyncFinish() {
let title = this._stringBundle.GetStringFromName("error.sync.title");
// Clear out sync failures on a successful sync
this.clearError(title);
if (this._wasDelayed && Weave.Status.sync != Weave.NO_SYNC_NODE_FOUND) {
title = this._stringBundle.GetStringFromName("error.sync.no_node_found.title");
this.clearError(title);
this._wasDelayed = false;
}
},
onSyncError: function SUI_onSyncError() {
let title = this._stringBundle.GetStringFromName("error.sync.title");
if (Weave.Status.login != Weave.LOGIN_SUCCEEDED) {
this.onLoginError();
return;
}
let description;
if (Weave.Status.sync == Weave.PROLONGED_SYNC_FAILURE) {
// Convert to days
let lastSync =
Services.prefs.getIntPref("services.sync.errorhandler.networkFailureReportTimeout") / 86400;
description =
this._stringBundle.formatStringFromName("error.sync.prolonged_failure", [lastSync], 1);
} else {
let error = Weave.Utils.getErrorString(Weave.Status.sync);
description =
this._stringBundle.formatStringFromName("error.sync.description", [error], 1);
}
let priority = Weave.Notifications.PRIORITY_WARNING;
let buttons = [];
// Check if the client is outdated in some way
let outdated = Weave.Status.sync == Weave.VERSION_OUT_OF_DATE;
for (let [engine, reason] in Iterator(Weave.Status.engines))
outdated = outdated || reason == Weave.VERSION_OUT_OF_DATE;
if (outdated) {
description = this._stringBundle.GetStringFromName(
"error.sync.needUpdate.description");
buttons.push(new Weave.NotificationButton(
this._stringBundle.GetStringFromName("error.sync.needUpdate.label"),
this._stringBundle.GetStringFromName("error.sync.needUpdate.accesskey"),
function() { window.openUILinkIn("https://services.mozilla.com/update/", "tab"); return true; }
));
}
else if (Weave.Status.sync == Weave.OVER_QUOTA) {
description = this._stringBundle.GetStringFromName(
"error.sync.quota.description");
buttons.push(new Weave.NotificationButton(
this._stringBundle.GetStringFromName(
"error.sync.viewQuotaButton.label"),
this._stringBundle.GetStringFromName(
"error.sync.viewQuotaButton.accesskey"),
function() { gSyncUI.openQuotaDialog(); return true; } )
);
}
else if (Weave.Status.enforceBackoff) {
priority = Weave.Notifications.PRIORITY_INFO;
buttons.push(new Weave.NotificationButton(
this._stringBundle.GetStringFromName("error.sync.serverStatusButton.label"),
this._stringBundle.GetStringFromName("error.sync.serverStatusButton.accesskey"),
function() { gSyncUI.openServerStatus(); return true; }
));
}
else {
priority = Weave.Notifications.PRIORITY_INFO;
buttons.push(new Weave.NotificationButton(
this._stringBundle.GetStringFromName("error.sync.tryAgainButton.label"),
this._stringBundle.GetStringFromName("error.sync.tryAgainButton.accesskey"),
function() { gSyncUI.doSync(); return true; }
));
}
let notification =
new Weave.Notification(title, description, null, priority, buttons);
Weave.Notifications.replaceTitle(notification);
if (this._wasDelayed && Weave.Status.sync != Weave.NO_SYNC_NODE_FOUND) {
title = this._stringBundle.GetStringFromName("error.sync.no_node_found.title");
Weave.Notifications.removeAll(title);
this._wasDelayed = false;
}
this.updateUI();
},
observe: function SUI_observe(subject, topic, data) {
if (this._unloaded) {
Cu.reportError("SyncUI observer called after unload: " + topic);
return;
}
switch (topic) {
case "weave:service:sync:start":
this.onActivityStart();
break;
case "weave:ui:sync:finish":
this.onSyncFinish();
break;
case "weave:ui:sync:error":
this.onSyncError();
break;
case "weave:service:sync:delayed":
this.onSyncDelay();
break;
case "weave:service:quota:remaining":
this.onQuotaNotice();
break;
case "weave:service:setup-complete":
this.onSetupComplete();
break;
case "weave:service:login:start":
this.onActivityStart();
break;
case "weave:service:login:finish":
this.onLoginFinish();
break;
case "weave:ui:login:error":
this.onLoginError();
break;
case "weave:service:logout:finish":
this.onLogout();
break;
case "weave:service:start-over":
this.onStartOver();
break;
case "weave:service:ready":
this.initUI();
break;
case "weave:notification:added":
this.initNotifications();
break;
case "weave:ui:clear-error":
this.clearError();
break;
}
},
QueryInterface: XPCOMUtils.generateQI([
Ci.nsIObserver,
Ci.nsISupportsWeakReference
])
};
XPCOMUtils.defineLazyGetter(gSyncUI, "_stringBundle", function() {
//XXXzpao these strings should probably be moved from /services to /browser... (bug 583381)
// but for now just make it work
return Cc["@mozilla.org/intl/stringbundle;1"].
getService(Ci.nsIStringBundleService).
createBundle("chrome://weave/locale/services/sync.properties");
});
| {
"content_hash": "c3fc40cc2c98b727788793657c65261f",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 103,
"avg_line_length": 32.85775862068966,
"alnum_prop": 0.6582054309327037,
"repo_name": "sergecodd/FireFox-OS",
"id": "1f0f733c22e72e009a2b95fedbb80e080e0bc12a",
"size": "15246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "B2G/gecko/browser/base/content/browser-syncui.js",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "443"
},
{
"name": "ApacheConf",
"bytes": "85"
},
{
"name": "Assembly",
"bytes": "5123438"
},
{
"name": "Awk",
"bytes": "46481"
},
{
"name": "Batchfile",
"bytes": "56250"
},
{
"name": "C",
"bytes": "101720951"
},
{
"name": "C#",
"bytes": "38531"
},
{
"name": "C++",
"bytes": "148896543"
},
{
"name": "CMake",
"bytes": "23541"
},
{
"name": "CSS",
"bytes": "2758664"
},
{
"name": "DIGITAL Command Language",
"bytes": "56757"
},
{
"name": "Emacs Lisp",
"bytes": "12694"
},
{
"name": "Erlang",
"bytes": "889"
},
{
"name": "FLUX",
"bytes": "34449"
},
{
"name": "GLSL",
"bytes": "26344"
},
{
"name": "Gnuplot",
"bytes": "710"
},
{
"name": "Groff",
"bytes": "447012"
},
{
"name": "HTML",
"bytes": "43343468"
},
{
"name": "IDL",
"bytes": "1455122"
},
{
"name": "Java",
"bytes": "43261012"
},
{
"name": "JavaScript",
"bytes": "46646658"
},
{
"name": "Lex",
"bytes": "38358"
},
{
"name": "Logos",
"bytes": "21054"
},
{
"name": "Makefile",
"bytes": "2733844"
},
{
"name": "Matlab",
"bytes": "67316"
},
{
"name": "Max",
"bytes": "3698"
},
{
"name": "NSIS",
"bytes": "421625"
},
{
"name": "Objective-C",
"bytes": "877657"
},
{
"name": "Objective-C++",
"bytes": "737713"
},
{
"name": "PHP",
"bytes": "17415"
},
{
"name": "Pascal",
"bytes": "6780"
},
{
"name": "Perl",
"bytes": "1153180"
},
{
"name": "Perl6",
"bytes": "1255"
},
{
"name": "PostScript",
"bytes": "1139"
},
{
"name": "PowerShell",
"bytes": "8252"
},
{
"name": "Protocol Buffer",
"bytes": "26553"
},
{
"name": "Python",
"bytes": "8453201"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3481"
},
{
"name": "Ruby",
"bytes": "5116"
},
{
"name": "Scilab",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "3383832"
},
{
"name": "SourcePawn",
"bytes": "23661"
},
{
"name": "TeX",
"bytes": "879606"
},
{
"name": "WebIDL",
"bytes": "1902"
},
{
"name": "XSLT",
"bytes": "13134"
},
{
"name": "Yacc",
"bytes": "112744"
}
],
"symlink_target": ""
} |
Translation_Jobs.listing.models.ListingItem = Backbone.Model.extend({
get_type: function () {
return this.get('type');
},
is_string: function () {
return this.get_type() === 'String';
}
}); | {
"content_hash": "758d550719d5baafe1c46c78bfdebb5f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 20.363636363636363,
"alnum_prop": 0.5758928571428571,
"repo_name": "Kilbourne/biosphaera",
"id": "083aa8822c9deba5662377e01a90d8af70bf5107",
"size": "224",
"binary": false,
"copies": "25",
"ref": "refs/heads/master",
"path": "web/app/plugins/wpml-translation-management/res/js/listing/models/ListingItem.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "223361"
},
{
"name": "HTML",
"bytes": "40850"
},
{
"name": "JavaScript",
"bytes": "44809"
},
{
"name": "PHP",
"bytes": "173651"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2005 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- $Rev$ $Date$ -->
<project>
<pomVersion>3</pomVersion>
<extend>../../etc/project.xml</extend>
<id>online-deployer</id>
<name>Online Deployer</name>
<dependencies>
<!-- dependency on deployer -->
<dependency>
<groupId>geronimo</groupId>
<artifactId>geronimo-packaging-plugin</artifactId>
<version>${geronimo_packaging_plugin_version}</version>
<type>plugin</type>
</dependency>
<dependency>
<groupId>geronimo</groupId>
<artifactId>geronimo-deploy-tool</artifactId>
<version>${geronimo_version}</version>
</dependency>
<dependency>
<groupId>mx4j</groupId>
<artifactId>mx4j</artifactId>
<version>${mx4j_version}</version>
</dependency>
<dependency>
<groupId>mx4j</groupId>
<artifactId>mx4j-remote</artifactId>
<version>${mx4j_version}</version>
</dependency>
<!-- Module Dependencies -->
<dependency>
<groupId>org.apache.geronimo.specs</groupId>
<artifactId>geronimo-j2ee-deployment_1.1_spec</artifactId>
<version>${geronimo_spec_j2ee_deployment_version}</version>
</dependency>
<dependency>
<groupId>geronimo</groupId>
<artifactId>geronimo-common</artifactId>
<version>${pom.currentVersion}</version>
</dependency>
<dependency>
<groupId>geronimo</groupId>
<artifactId>geronimo-util</artifactId>
<version>${pom.currentVersion}</version>
</dependency>
<dependency>
<groupId>geronimo</groupId>
<artifactId>geronimo-system</artifactId>
<version>${pom.currentVersion}</version>
</dependency>
<dependency>
<groupId>geronimo</groupId>
<artifactId>geronimo-deployment</artifactId>
<version>${pom.currentVersion}</version>
</dependency>
<dependency>
<groupId>geronimo</groupId>
<artifactId>geronimo-deploy-jsr88</artifactId>
<version>${pom.currentVersion}</version>
</dependency>
<dependency>
<groupId>geronimo</groupId>
<artifactId>geronimo-service-builder</artifactId>
<version>${pom.currentVersion}</version>
</dependency>
</dependencies>
</project>
| {
"content_hash": "ddfa1badca4b1307dc9367094a88ecc0",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 76,
"avg_line_length": 31.46,
"alnum_prop": 0.6004450095359186,
"repo_name": "meetdestiny/geronimo-trader",
"id": "ced0a1761b81e9c6d2562a9f3cb1294e1f0b5e81",
"size": "3146",
"binary": false,
"copies": "2",
"ref": "refs/heads/1.0",
"path": "configs/online-deployer/project.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "47972"
},
{
"name": "Java",
"bytes": "8387339"
},
{
"name": "JavaScript",
"bytes": "906"
},
{
"name": "Shell",
"bytes": "62441"
},
{
"name": "XSLT",
"bytes": "4468"
}
],
"symlink_target": ""
} |
import StaticPrefixer from '../../modules/prefixer/StaticPrefixer'
describe('Statically prefixing styles', () => {
it('should return prefixed styles', () => {
expect(new StaticPrefixer().prefix({ appearance: 'test' })).to.eql({
WebkitAppearance: 'test',
MozAppearance: 'test',
appearance: 'test'
})
})
it('should return all prefixes needed for keyframes', () => {
expect(new StaticPrefixer().getKeyframesPrefix()).to.eql([ '-webkit-', '-moz-', '' ])
})
})
| {
"content_hash": "c88a5f155f97e48ceb24d83fff280498",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 89,
"avg_line_length": 31.0625,
"alnum_prop": 0.6257545271629779,
"repo_name": "rofrischmann/react-look",
"id": "d5495f15b0e4bbd1b44287685139b80b8e68453e",
"size": "497",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "packages/react-look/test/prefixer/StaticPrefixer-test.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "495"
},
{
"name": "JavaScript",
"bytes": "27945"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
/**
* console-log-extend.module.js
*
* @package ConsoleLogExtendModule
* @category Module
* @version 1.0
* @author Ricky Hurtado <[email protected]>
*/
// ConsoleLogExtend Namespace
I.ConsoleLogExtend = {};
define([
'console_log_extend_view',
'console_log_extend_router'
],
function(
ConsoleLogExtendView,
ConsoleLogExtendRouter
){
/**
* Init ConsoleLogExtend properties
*/
I.ConsoleLogExtend.View = new ConsoleLogExtendView;
I.ConsoleLogExtend.Router = new ConsoleLogExtendRouter('router-and-view');
/**
* Init ConsoleLogExtendModule class
*/
var ConsoleLogExtendModule = Backbone.Module.extend(
{
/**
* Initialize model
*/
initialize : function()
{
console.log('Backbone.Module.ConsoleLogExtendModule has been initialized.');
// Merge sub routers
I.Message.Router = I.ConsoleLogExtend.Router;
}
});
return ConsoleLogExtendModule;
}); | {
"content_hash": "541813af620eee1125ee07efcd69f63b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 82,
"avg_line_length": 22.066666666666666,
"alnum_prop": 0.6424974823766365,
"repo_name": "rickyhurtado/ironframework",
"id": "66694dd965b1a3630cbb978add7db96040457c16",
"size": "993",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demos/php/basic/assets/js/modules/console-log-extend/dev/console-log-extend.module.dev.js",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "636"
},
{
"name": "JavaScript",
"bytes": "342362"
},
{
"name": "PHP",
"bytes": "18719"
}
],
"symlink_target": ""
} |
<html lang="ta">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
<meta http-equiv="Pragma" content="no-cache">
<meta http-equiv="Expires" content="0">
<style>
@font-face { font-family:"NotoSansTamil"; src:url("https://cdn.jsdelivr.net/gh/googlefonts/noto-fonts@main/unhinted/otf/NotoSansTamil/NotoSansTamil-Regular.otf");
}
@font-face { font-family:"NotoSerifTamil"; src:url("https://cdn.jsdelivr.net/gh/googlefonts/noto-fonts@main/unhinted/otf/NotoSerifTamil/NotoSerifTamil-Regular.otf");
}
@font-face { font-family:"NotoSerifTamilSlanted"; src:url("https://cdn.jsdelivr.net/gh/googlefonts/noto-fonts@main/unhinted/otf/NotoSerifTamilSlanted/NotoSerifTamilSlanted-Regular.otf");
}
@font-face {font-family: AdobeBlank; src:url(AdobeBlank.ttf);}
</style>
</head>
<body>
<a href="https://github.com/googlefonts/noto-fonts/issues/1698">Issue 1698</a>
<p> </p>
<p>NotoSansTamil: <span style="font-family:NotoSansTamil, AdobeBlank; font-weight:400; background-color:#EDEBEA">க𑌼ுகு</span></p>
<p>NotoSerifTamil: <span style="font-family:NotoSerifTamil, AdobeBlank; font-weight:400; background-color:#EDEBEA">க𑌼ுகு</span></p>
<p>NotoSerifTamilSlanted: <span style="font-family:NotoSerifTamilSlanted, AdobeBlank; font-weight:400; background-color:#EDEBEA">க𑌼ுகு</span></p>
<label style="font-size:10px"> Test details:<br>Tested font directory: https://github.com/googlefonts/noto-fonts/tree/main/unhinted/otf/NotoSansTamil/NotoSansTamil-Regular.otf</label><br><label style="font-size:10px">Test created date: </label><label style="font-size:10px" id="date"></label>
<script>
var date_tag = new Date(); document.getElementById("date").innerHTML=date_tag;
</script>
</body>
</html>
| {
"content_hash": "8c24dcdb83fabd9d33c95e1c2038e2b1",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 292,
"avg_line_length": 65.17241379310344,
"alnum_prop": 0.746031746031746,
"repo_name": "googlefonts/noto-source",
"id": "616f0371585f79e3325ca6ced010111ded4abcda",
"size": "1890",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "test/Tamil/1698-NotoSerifTamil.html",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "21014413"
},
{
"name": "Python",
"bytes": "16989"
},
{
"name": "Shell",
"bytes": "34567"
}
],
"symlink_target": ""
} |
package org.apache.geode.internal.cache;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.logging.log4j.Logger;
import org.apache.geode.DataSerializer;
import org.apache.geode.Instantiator;
import org.apache.geode.annotations.internal.MakeNotStatic;
import org.apache.geode.cache.Region;
import org.apache.geode.cache.client.Pool;
import org.apache.geode.cache.client.PoolFactory;
import org.apache.geode.cache.client.PoolManager;
import org.apache.geode.cache.client.internal.PoolImpl;
import org.apache.geode.cache.client.internal.RegisterDataSerializersOp;
import org.apache.geode.cache.client.internal.RegisterInstantiatorsOp;
import org.apache.geode.distributed.internal.InternalDistributedSystem;
import org.apache.geode.internal.InternalDataSerializer;
import org.apache.geode.internal.InternalDataSerializer.SerializerAttributesHolder;
import org.apache.geode.internal.InternalInstantiator;
import org.apache.geode.internal.InternalInstantiator.InstantiatorAttributesHolder;
import org.apache.geode.internal.cache.tier.sockets.ServerConnection;
import org.apache.geode.internal.cache.xmlcache.CacheCreation;
import org.apache.geode.internal.logging.LogService;
/**
* Implementation used by PoolManager.
*
* @since GemFire 5.7
*
*/
public class PoolManagerImpl {
private static final Logger logger = LogService.getLogger();
@MakeNotStatic
private static final PoolManagerImpl impl = new PoolManagerImpl(true);
public static PoolManagerImpl getPMI() {
PoolManagerImpl result = CacheCreation.getCurrentPoolManager();
if (result == null) {
result = impl;
}
return result;
}
private volatile Map<String, Pool> pools = Collections.emptyMap();
private volatile Iterator<Map.Entry<String, Pool>> itrForEmergencyClose = null;
private final Object poolLock = new Object();
/**
* True if this manager is a normal one owned by the PoolManager. False if this is a special one
* owned by a xml CacheCreation.
*/
private final boolean normalManager;
/**
* @param addListener will be true if the is a real manager that needs to register a connect
* listener. False if it is a fake manager used internally by the XML code.
*/
public PoolManagerImpl(boolean addListener) {
normalManager = addListener;
}
/**
* Returns true if this is a normal manager; false if it is a fake one used for xml parsing.
*/
public boolean isNormal() {
return normalManager;
}
/**
* Creates a new {@link PoolFactory pool factory}, which is used to configure and create new
* {@link Pool}s.
*
* @return the new pool factory
*/
public PoolFactory createFactory() {
return new PoolFactoryImpl(this);
}
/**
* Find by name an existing connection pool returning the existing pool or <code>null</code> if it
* does not exist.
*
* @param name the name of the connection pool
* @return the existing connection pool or <code>null</code> if it does not exist.
*/
public Pool find(String name) {
return pools.get(name);
}
/**
* Destroys all created pool in this manager.
*/
public void close(boolean keepAlive) {
// destroying connection pools
boolean foundClientPool = false;
synchronized (poolLock) {
for (Entry<String, Pool> entry : pools.entrySet()) {
PoolImpl pool = (PoolImpl) entry.getValue();
pool.basicDestroy(keepAlive);
foundClientPool = true;
}
pools = Collections.emptyMap();
itrForEmergencyClose = null;
if (foundClientPool) {
// Now that the client has all the pools destroyed free up the pooled comm buffers
ServerConnection.emptyCommBufferPool();
}
}
}
/**
* @return a copy of the Pools Map
*/
public Map<String, Pool> getMap() {
return new HashMap<>(pools);
}
/**
* This is called by {@link PoolImpl#create}
*
* @throws IllegalStateException if a pool with same name is already registered.
*/
public void register(Pool pool) {
synchronized (poolLock) {
Map<String, Pool> copy = new HashMap<>(pools);
String name = pool.getName();
// debugStack("register pool=" + name);
Object old = copy.put(name, pool);
if (old != null) {
throw new IllegalStateException(
String.format("A pool named %s already exists", name));
}
// Boolean specialCase=Boolean.getBoolean("gemfire.SPECIAL_DURABLE");
// if(specialCase && copy.size()>1){
// throw new IllegalStateException("Using SPECIAL_DURABLE system property"
// + " and more than one pool already exists in client.");
// }
pools = Collections.unmodifiableMap(copy);
itrForEmergencyClose = copy.entrySet().iterator();
}
}
/**
* This is called by {@link Pool#destroy(boolean)}
*
* @return true if pool unregistered from cache; false if someone else already did it
*/
public boolean unregister(Pool pool) {
// Continue only if the pool is not currently being used by any region and/or service.
int attachCount = ((PoolImpl) pool).getAttachCount();
if (attachCount > 0) {
throw new IllegalStateException(String.format(
"Pool could not be destroyed because it is still in use by %s regions", attachCount));
}
synchronized (poolLock) {
Map<String, Pool> copy = new HashMap<>(pools);
String name = pool.getName();
// debugStack("unregister pool=" + name);
Object rmPool = copy.remove(name);
if (rmPool == null || rmPool != pool) {
return false;
} else {
pools = Collections.unmodifiableMap(copy);
itrForEmergencyClose = copy.entrySet().iterator();
return true;
}
}
}
@Override
public String toString() {
return super.toString() + "-" + (normalManager ? "normal" : "xml");
}
/**
* @param xmlPoolsOnly if true then only call readyForEvents on pools declared in XML.
*/
public static void readyForEvents(InternalDistributedSystem system, boolean xmlPoolsOnly) {
boolean foundDurablePool = false;
Map<String, Pool> pools = PoolManager.getAll();
for (Pool pool : pools.values()) {
PoolImpl p = (PoolImpl) pool;
if (p.isDurableClient()) {
// TODO - handle an exception and attempt on all pools?
foundDurablePool = true;
if (!xmlPoolsOnly) {
p.readyForEvents(system);
}
}
}
if (pools.size() > 0 && !foundDurablePool) {
throw new IllegalStateException(
"Only durable clients should call readyForEvents()");
}
}
public static void allPoolsRegisterInstantiator(Instantiator instantiator) {
Instantiator[] instantiators = new Instantiator[] {instantiator};
for (Pool pool : PoolManager.getAll().values()) {
PoolImpl next = (PoolImpl) pool;
try {
EventID eventId = InternalInstantiator.generateEventId();
if (eventId != null) {
RegisterInstantiatorsOp.execute(next, instantiators, eventId);
}
} catch (RuntimeException e) {
logger.warn("Error registering instantiator on pool:", e);
}
}
}
public static void allPoolsRegisterInstantiator(InstantiatorAttributesHolder holder) {
InstantiatorAttributesHolder[] holders = new InstantiatorAttributesHolder[] {holder};
for (Pool pool : PoolManager.getAll().values()) {
PoolImpl next = (PoolImpl) pool;
try {
EventID eventId = InternalInstantiator.generateEventId();
if (eventId != null) {
RegisterInstantiatorsOp.execute(next, holders, eventId);
}
} catch (RuntimeException e) {
logger.warn("Error registering instantiator on pool:", e);
}
}
}
public static void allPoolsRegisterDataSerializers(DataSerializer dataSerializer) {
DataSerializer[] dataSerializers = new DataSerializer[] {dataSerializer};
for (Pool pool : PoolManager.getAll().values()) {
PoolImpl next = (PoolImpl) pool;
try {
EventID eventId = (EventID) dataSerializer.getEventId();
if (eventId == null) {
eventId = InternalDataSerializer.generateEventId();
}
if (eventId != null) {
RegisterDataSerializersOp.execute(next, dataSerializers, eventId);
}
} catch (RuntimeException e) {
logger.warn("Error registering instantiator on pool:", e);
}
}
}
public static void allPoolsRegisterDataSerializers(SerializerAttributesHolder holder) {
SerializerAttributesHolder[] holders = new SerializerAttributesHolder[] {holder};
for (Pool pool : PoolManager.getAll().values()) {
PoolImpl next = (PoolImpl) pool;
try {
EventID eventId = holder.getEventId();
if (eventId == null) {
eventId = InternalDataSerializer.generateEventId();
}
if (eventId != null) {
RegisterDataSerializersOp.execute(next, holders, eventId);
}
} catch (RuntimeException e) {
logger.warn("Error registering instantiator on pool:", e);
}
}
}
public static void emergencyClose() {
if (impl == null) {
return;
}
Iterator<Map.Entry<String, Pool>> itr = impl.itrForEmergencyClose;
if (itr == null) {
return;
}
while (itr.hasNext()) {
Entry<String, Pool> next = itr.next();
((PoolImpl) next.getValue()).emergencyClose();
}
}
public static void loadEmergencyClasses() {
PoolImpl.loadEmergencyClasses();
}
public Pool find(Region<?, ?> region) {
return find(region.getAttributes().getPoolName());
}
}
| {
"content_hash": "6117eb25965413ee4a284bc1801f44d2",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 100,
"avg_line_length": 33.17123287671233,
"alnum_prop": 0.6726202766880033,
"repo_name": "PurelyApplied/geode",
"id": "429480349e4a3134cd666a2a8f3afdb50c456e89",
"size": "10475",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "geode-core/src/main/java/org/apache/geode/internal/cache/PoolManagerImpl.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "106708"
},
{
"name": "Dockerfile",
"bytes": "16965"
},
{
"name": "Go",
"bytes": "1205"
},
{
"name": "Groovy",
"bytes": "38270"
},
{
"name": "HTML",
"bytes": "3793466"
},
{
"name": "Java",
"bytes": "30089003"
},
{
"name": "JavaScript",
"bytes": "1781602"
},
{
"name": "Python",
"bytes": "29327"
},
{
"name": "Ruby",
"bytes": "6656"
},
{
"name": "Shell",
"bytes": "136665"
}
],
"symlink_target": ""
} |
package org.wso2.carbon.sp.jobmanager.core.deployment;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.log4j.Logger;
import org.wso2.carbon.sp.jobmanager.core.DeploymentManager;
import org.wso2.carbon.sp.jobmanager.core.allocation.ResourceAllocationAlgorithm;
import org.wso2.carbon.sp.jobmanager.core.ResourcePoolChangeListener;
import org.wso2.carbon.sp.jobmanager.core.SiddhiAppDeployer;
import org.wso2.carbon.sp.jobmanager.core.allocation.RoundRobinAllocationAlgorithm;
import org.wso2.carbon.sp.jobmanager.core.appcreator.DistributedSiddhiQuery;
import org.wso2.carbon.sp.jobmanager.core.appcreator.SiddhiQuery;
import org.wso2.carbon.sp.jobmanager.core.internal.ServiceDataHolder;
import org.wso2.carbon.sp.jobmanager.core.model.ResourceNode;
import org.wso2.carbon.sp.jobmanager.core.model.ResourcePool;
import org.wso2.carbon.sp.jobmanager.core.model.SiddhiAppHolder;
import org.wso2.carbon.stream.processor.core.distribution.DeploymentStatus;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
/**
* Implementation regarding deploying siddhi applications in the resource cluster
*/
public class DeploymentManagerImpl implements DeploymentManager, ResourcePoolChangeListener {
private static final Logger log = Logger.getLogger(DeploymentManagerImpl.class);
private final Lock lock = new ReentrantLock();
private ResourceAllocationAlgorithm resourceAllocationAlgorithm = ServiceDataHolder.getAllocationAlgorithm();
private ResourceAllocationAlgorithm receiverAllocationAlgorithm = new RoundRobinAllocationAlgorithm();
@Override
public DeploymentStatus deploy(DistributedSiddhiQuery distributedSiddhiQuery) {
Map<String, List<SiddhiAppHolder>> deployedSiddhiAppHoldersMap = ServiceDataHolder
.getResourcePool().getSiddhiAppHoldersMap();
List<SiddhiAppHolder> appsToDeploy = getSiddhiAppHolders(distributedSiddhiQuery);
List<SiddhiAppHolder> deployedApps = new ArrayList<>();
boolean shouldDeploy = true;
lock.lock();
try {
if (deployedSiddhiAppHoldersMap.containsKey(distributedSiddhiQuery.getAppName())) {
List<SiddhiAppHolder> existingApps = deployedSiddhiAppHoldersMap
.get(distributedSiddhiQuery.getAppName());
if (CollectionUtils.isEqualCollection(existingApps, appsToDeploy)) {
boolean waitingToDeploy = false;
for (SiddhiAppHolder app : existingApps) {
if (app.getDeployedNode() == null) {
waitingToDeploy = true;
break;
}
}
if (waitingToDeploy) {
log.info(String.format("Exact Siddhi app with name: %s is already exists in waiting mode. " +
"Hence, trying to re-deploy.", distributedSiddhiQuery
.getAppName()));
rollback(existingApps);
} else {
log.info(String.format("Exact Siddhi app with name: %s is already deployed.",
distributedSiddhiQuery.getAppName()));
shouldDeploy = false;
}
} else {
log.info("Different Siddhi app with name:" + distributedSiddhiQuery.getAppName() + " is already " +
"deployed. Hence, un-deploying existing Siddhi app.");
rollback(deployedSiddhiAppHoldersMap.get(distributedSiddhiQuery.getAppName()));
}
}
boolean isDeployed = true;
if (shouldDeploy) {
for (SiddhiAppHolder appHolder : appsToDeploy) {
ResourceNode deployedNode;
deployedNode = deploy(new SiddhiQuery(appHolder.getAppName(), appHolder.getSiddhiApp(),
appHolder.isReceiverQueryGroup()), 0, appHolder.getParallelism());
if (deployedNode != null) {
appHolder.setDeployedNode(deployedNode);
deployedApps.add(appHolder);
log.info(String.format("Siddhi app %s of %s successfully deployed in %s.",
appHolder.getAppName(), appHolder.getParentAppName(), deployedNode));
} else {
log.warn(String.format("Insufficient resources to deploy Siddhi app %s of %s. Hence, rolling " +
"back.", appHolder.getAppName(), appHolder.getParentAppName()));
isDeployed = false;
break;
}
}
if (isDeployed) {
deployedSiddhiAppHoldersMap.put(distributedSiddhiQuery.getAppName(), deployedApps);
log.info("Siddhi app " + distributedSiddhiQuery.getAppName() + " successfully deployed.");
} else {
rollback(deployedApps);
deployedApps = Collections.emptyList();
deployedSiddhiAppHoldersMap.remove(distributedSiddhiQuery.getAppName());
ServiceDataHolder.getResourcePool().getAppsWaitingForDeploy()
.put(distributedSiddhiQuery.getAppName(), appsToDeploy);
log.info("Siddhi app " + distributedSiddhiQuery.getAppName() + " held back in waiting mode.");
}
} else {
deployedApps = deployedSiddhiAppHoldersMap.get(distributedSiddhiQuery.getAppName());
}
ServiceDataHolder.getResourcePool().persist();
} finally {
lock.unlock();
}
// Returning true as the deployment state, since we might put some apps on wait.
return getDeploymentStatus(true, deployedApps);
}
private DeploymentStatus getDeploymentStatus(boolean isDeployed, List<SiddhiAppHolder> siddhiAppHolders) {
Map<String, List<String>> deploymentDataMap = new HashMap<>();
for (SiddhiAppHolder appHolder : siddhiAppHolders) {
if (appHolder.getDeployedNode() != null && appHolder.getDeployedNode().getHttpsInterface() != null) {
if (deploymentDataMap.containsKey(appHolder.getGroupName())) {
deploymentDataMap.get(appHolder.getGroupName())
.add(appHolder.getDeployedNode().getHttpsInterface().getHost());
} else {
List<String> hosts = new ArrayList<>();
hosts.add(appHolder.getDeployedNode().getHttpsInterface().getHost());
deploymentDataMap.put(appHolder.getGroupName(), hosts);
}
}
}
return new DeploymentStatus(isDeployed, deploymentDataMap);
}
private List<SiddhiAppHolder> getSiddhiAppHolders(DistributedSiddhiQuery distributedSiddhiQuery) {
List<SiddhiAppHolder> siddhiAppHolders = new ArrayList<>();
distributedSiddhiQuery.getQueryGroups().forEach(queryGroup -> {
queryGroup.getSiddhiQueries().forEach(query -> {
siddhiAppHolders.add(new SiddhiAppHolder(distributedSiddhiQuery.getAppName(),
queryGroup.getGroupName(), query.getAppName(), query.getApp(),
null, queryGroup.isReceiverQueryGroup(), queryGroup.getParallelism()));
});
});
return siddhiAppHolders;
}
@Override
public boolean unDeploy(String siddhiAppName) {
boolean unDeployed = false;
Map<String, List<SiddhiAppHolder>> siddhiAppHoldersMap = ServiceDataHolder
.getResourcePool().getSiddhiAppHoldersMap();
Map<String, List<SiddhiAppHolder>> waitingAppList = ServiceDataHolder
.getResourcePool().getAppsWaitingForDeploy();
lock.lock();
try {
if (siddhiAppHoldersMap.containsKey(siddhiAppName) || waitingAppList.containsKey(siddhiAppName)) {
// remove from the deployed apps
rollback(siddhiAppHoldersMap.get(siddhiAppName));
siddhiAppHoldersMap.remove(siddhiAppName);
// remove from the waiting list
rollback(siddhiAppHoldersMap.get(siddhiAppName));
waitingAppList.remove(siddhiAppName);
unDeployed = true;
log.info("Siddhi app " + siddhiAppName + " un-deployed successfully");
} else {
log.warn("Siddhi app " + siddhiAppName + " is not deployed. Therefore, cannot un-deploy.");
}
ServiceDataHolder.getResourcePool().persist();
} finally {
lock.unlock();
}
return unDeployed;
}
@Override
public boolean isDeployed(String parentSiddhiAppName) {
Map<String, List<SiddhiAppHolder>> siddhiAppHoldersMap = ServiceDataHolder
.getResourcePool().getSiddhiAppHoldersMap();
Map<String, List<SiddhiAppHolder>> waitingAppList = ServiceDataHolder
.getResourcePool().getAppsWaitingForDeploy();
return siddhiAppHoldersMap.containsKey(parentSiddhiAppName)
|| waitingAppList.containsKey(parentSiddhiAppName);
}
@Override
public void resourceAdded(ResourceNode resourceNode) {
Map<String, List<SiddhiAppHolder>> waitingList = ServiceDataHolder.getResourcePool().getAppsWaitingForDeploy();
Set<String> waitingParentAppNames = new HashSet<>(waitingList.keySet());
List<SiddhiAppHolder> partialAppHoldersOfSiddhiApp;
List<SiddhiAppHolder> currentDeployedPartialApps;
boolean deployedCompletely;
lock.lock();
try {
for (String parentSiddhiAppName : waitingParentAppNames) {
partialAppHoldersOfSiddhiApp = waitingList.getOrDefault(parentSiddhiAppName, Collections.emptyList());
if (partialAppHoldersOfSiddhiApp.size() == 0) {
//continue to next parent app if all partial apps are deployed
continue;
}
deployedCompletely = true;
currentDeployedPartialApps = new ArrayList<>();
for (SiddhiAppHolder partialAppHolder : partialAppHoldersOfSiddhiApp) {
ResourceNode deployedNode = deploy(new SiddhiQuery(partialAppHolder.getAppName(),
partialAppHolder.getSiddhiApp(), partialAppHolder.isReceiverQueryGroup()), 0,
partialAppHolder.getParallelism());
if (deployedNode != null) {
partialAppHolder.setDeployedNode(deployedNode);
currentDeployedPartialApps.add(partialAppHolder);
log.info(String.format("Siddhi app %s of %s successfully deployed in %s.",
partialAppHolder.getAppName(), partialAppHolder.getParentAppName(),
deployedNode));
} else {
deployedCompletely = false;
break;
}
}
if (deployedCompletely) {
ServiceDataHolder.getResourcePool().getSiddhiAppHoldersMap()
.put(parentSiddhiAppName, partialAppHoldersOfSiddhiApp);
waitingList.remove(parentSiddhiAppName);
log.info("Siddhi app " + parentSiddhiAppName + " successfully deployed.");
} else {
log.warn(String.format("Still insufficient resources to deploy %s. Hence, rolling back the " +
"deployment and waiting for additional resources.",
parentSiddhiAppName));
rollback(currentDeployedPartialApps);
}
}
ServiceDataHolder.getResourcePool().persist();
} finally {
lock.unlock();
}
}
@Override
public void resourceRemoved(ResourceNode resourceNode) {
ResourcePool resourcePool = ServiceDataHolder.getResourcePool();
List<SiddhiAppHolder> affectedPartialApps = resourcePool.getNodeAppMapping().get(resourceNode);
lock.lock();
try {
if (affectedPartialApps != null) {
log.info(String.format("Siddhi apps %s were affected by the removal of node %s. Hence, re-deploying "
+ "them in other resource nodes.", affectedPartialApps, resourceNode));
rollback(affectedPartialApps);
affectedPartialApps.forEach(affectedPartialApp -> {
ResourceNode deployedNode = deploy(new SiddhiQuery(affectedPartialApp.getAppName(),
affectedPartialApp.getSiddhiApp(), affectedPartialApp.isReceiverQueryGroup()), 0,
affectedPartialApp.getParallelism());
if (deployedNode != null) {
affectedPartialApp.setDeployedNode(deployedNode);
log.info(String.format("Siddhi app %s of %s successfully deployed in %s.",
affectedPartialApp.getAppName(), affectedPartialApp.getParentAppName(),
deployedNode));
} else {
log.warn(String.format("Insufficient resources to deploy %s. Therefore, cannot re-balance "
+ "Siddhi app %s. Hence, rolling back the deployment and waiting"
+ " for additional resources.", affectedPartialApp.getAppName(),
affectedPartialApp.getParentAppName()));
List<SiddhiAppHolder> appHolders = resourcePool.getSiddhiAppHoldersMap()
.remove(affectedPartialApp.getParentAppName());
if (appHolders != null) {
appHolders.forEach(e -> e.setDeployedNode(null));
rollback(appHolders);
resourcePool.getAppsWaitingForDeploy().put(affectedPartialApp.getParentAppName(),
appHolders);
}
}
});
}
resourcePool.persist();
} finally {
lock.unlock();
}
}
public void reDeployAppsInResourceNode(ResourceNode resourceNode) {
lock.lock();
try {
ResourcePool resourcePool = ServiceDataHolder.getResourcePool();
List<SiddhiAppHolder> deployedAppHolders = resourcePool.getNodeAppMapping().get(resourceNode);
if (resourceNode != null && deployedAppHolders != null) {
deployedAppHolders.forEach(appHolder -> {
String appName = SiddhiAppDeployer.deploy(resourceNode, new SiddhiQuery(appHolder.getAppName(),
appHolder.getSiddhiApp(), appHolder.isReceiverQueryGroup()));
if (appName == null || appName.isEmpty()) {
log.warn(String.format("Couldn't re-deploy partial Siddhi app %s of %s in %s. Therefore, " +
"assuming the %s has left the resource pool.", appHolder
.getAppName(),
appHolder.getParentAppName(), resourceNode, resourceNode));
if (resourceNode.isReceiverNode()) {
resourcePool.removeReceiverNode(resourceNode.getId());
} else {
resourcePool.removeResourceNode(resourceNode.getId());
}
} else {
if (log.isDebugEnabled()) {
log.debug(String.format("Partial Siddhi app %s of %s successfully re-deployed in %s.",
appName, appHolder.getParentAppName(), resourceNode));
}
}
});
}
resourcePool.persist();
} finally {
lock.unlock();
}
}
private ResourceNode deploy(SiddhiQuery siddhiQuery, int retry, int parallelism) {
ResourcePool resourcePool = ServiceDataHolder.getResourcePool();
Map<String, ResourceNode> nodeMap;
ResourceNode resourceNode;
if (siddhiQuery.isReceiverQuery()) {
nodeMap = resourcePool.getReceiverNodeMap();
resourceNode = receiverAllocationAlgorithm.getNextResourceNode(nodeMap, parallelism);
} else {
nodeMap = resourcePool.getResourceNodeMap();
resourceNode = resourceAllocationAlgorithm.getNextResourceNode(nodeMap,
ServiceDataHolder.getDeploymentConfig().getMinResourceCount());
}
ResourceNode deployedNode = null;
if (resourceNode != null) {
String appName = SiddhiAppDeployer.deploy(resourceNode, siddhiQuery);
if (appName == null || appName.isEmpty()) {
int retryCount = ServiceDataHolder.getDeploymentConfig().getHeartbeatMaxRetry();
int retryInterval = ServiceDataHolder.getDeploymentConfig().getHeartbeatInterval();
if (retry < retryCount) {
log.warn(String.format("Couldn't deploy partial Siddhi app %s in %s. Will retry in %d "
+ "milliseconds.", siddhiQuery.getAppName(), resourceNode,
retryInterval));
try {
Thread.sleep(retryInterval);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
deployedNode = deploy(siddhiQuery, retry + 1, parallelism);
} else {
log.warn(String.format("Couldn't deploy partial Siddhi app %s even after %s attempts.",
siddhiQuery.getAppName(), retry));
}
} else {
if (log.isDebugEnabled()) {
log.debug(String.format("Partial Siddhi app %s successfully deployed in %s.",
appName, resourceNode));
}
deployedNode = resourceNode;
}
}
return deployedNode;
}
/**
* Rollback (un-deploy) already deployed Siddhi apps.
*
* @param siddhiAppHolders list of Siddhi app holders to be un deployed.
*/
private void rollback(List<SiddhiAppHolder> siddhiAppHolders) {
if (siddhiAppHolders != null) {
siddhiAppHolders.forEach(appHolder -> {
if (appHolder.getDeployedNode() != null) {
if (!SiddhiAppDeployer.unDeploy(appHolder.getDeployedNode(), appHolder.getAppName())) {
log.warn(String.format("Could not un-deploy Siddhi app %s from %s.",
appHolder.getAppName(), appHolder.getDeployedNode()));
} else {
appHolder.setDeployedNode(null);
if (log.isDebugEnabled()) {
log.debug(String.format("Siddhi app %s un-deployed from %s.",
appHolder.getAppName(), appHolder.getDeployedNode()));
}
}
}
});
}
}
}
| {
"content_hash": "7db454cf3f26f3f4b5f123bee7083c35",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 120,
"avg_line_length": 51.91428571428571,
"alnum_prop": 0.5791764647020563,
"repo_name": "tishan89/carbon-analytics",
"id": "911a51ab6c2147f2d28075e15894f18a8e104b20",
"size": "20660",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "components/org.wso2.carbon.sp.jobmanager.core/src/main/java/org/wso2/carbon/sp/jobmanager/core/deployment/DeploymentManagerImpl.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "40449"
},
{
"name": "Batchfile",
"bytes": "16377"
},
{
"name": "CSS",
"bytes": "660252"
},
{
"name": "Groovy",
"bytes": "5041"
},
{
"name": "HTML",
"bytes": "773783"
},
{
"name": "Java",
"bytes": "4352207"
},
{
"name": "JavaScript",
"bytes": "19018271"
},
{
"name": "PHP",
"bytes": "328"
},
{
"name": "PLSQL",
"bytes": "4761"
},
{
"name": "Shell",
"bytes": "17901"
}
],
"symlink_target": ""
} |
/**
* flexarea - Pretty flexible textareas
* @version v1.3.1
* @link https://github.com/bevacqua/flexarea
* @license MIT
*/
.fa-grip,
.fa-grip:before,
.fa-grip:after {
background-color: #f3f4eb;
border-color: #dedede;
border-style: solid;
border-width: 0pt 1px 1px;
cursor: s-resize;
cursor: grab;
cursor: -webkit-grab;
overflow: hidden;
}
.fa-grip {
height: 9px;
}
.fa-grip:before,
.fa-grip:after {
content: '';
display: block;
height: 3px;
}
.fa-textarea {
display: block;
resize: none;
}
.fa-textarea-resizing {
opacity: 0.75;
cursor: s-resize;
cursor: grabbing;
cursor: -webkit-grabbing;
}
| {
"content_hash": "742f4d39dc581e4691bc7ae92bb1132d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 45,
"avg_line_length": 17.16216216216216,
"alnum_prop": 0.6535433070866141,
"repo_name": "modulexcite/flexarea",
"id": "11c0715377ba878da2dacf14e92bf1e5082a9fb3",
"size": "635",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dist/flexarea.css",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1134"
},
{
"name": "JavaScript",
"bytes": "21192"
}
],
"symlink_target": ""
} |
namespace BoolToStringConverter
{
public class PrintResult
{
public static void Main()
{
BoolToStringConverter.CreateConverter();
}
}
} | {
"content_hash": "a71c6c8011d5be5d9dc4d39bd06f7588",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 18.4,
"alnum_prop": 0.5869565217391305,
"repo_name": "LyubaG/TelerikAcademy",
"id": "38e7801e097a5a6db8a421ef0c5e95089891fe26",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "High-Quality-Code/02.Naming-Identifiers-HW/BooleanToString/PrintResult.cs",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "106"
},
{
"name": "C#",
"bytes": "851195"
},
{
"name": "CSS",
"bytes": "88096"
},
{
"name": "CoffeeScript",
"bytes": "3700"
},
{
"name": "HTML",
"bytes": "197557"
},
{
"name": "JavaScript",
"bytes": "587289"
}
],
"symlink_target": ""
} |
package org.drools.core.spi;
import java.io.Serializable;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import org.drools.core.WorkingMemory;
import org.drools.core.common.InternalFactHandle;
import org.drools.core.rule.Declaration;
import org.kie.internal.security.KiePolicyHelper;
/**
* Accumulator
*
* Created: 04/06/2006
*
* @version $Id$
*/
public interface Accumulator
extends
Invoker {
/**
* Creates and return a context object for each working memory instance
*
* @return
*/
public Object createWorkingMemoryContext();
/**
* Creates the context object for an accumulator session.
* The context is passed as a parameter to every subsequent accumulator
* method call in the same session.
*
* @return
*/
public Serializable createContext();
/**
* Executes the initialization block of code
*
* @param leftTuple tuple causing the rule fire
* @param declarations previous declarations
* @param workingMemory
* @throws Exception
*/
public void init(Object workingMemoryContext,
Object context,
Tuple leftTuple,
Declaration[] declarations,
WorkingMemory workingMemory) throws Exception;
/**
* Executes the accumulate (action) code for the given fact handle
*
* @param leftTuple
* @param handle
* @param declarations
* @param innerDeclarations
* @param workingMemory
* @throws Exception
*/
public void accumulate(Object workingMemoryContext,
Object context,
Tuple leftTuple,
InternalFactHandle handle,
Declaration[] declarations,
Declaration[] innerDeclarations,
WorkingMemory workingMemory) throws Exception;
/**
* Returns true if this accumulator supports operation reversal
*
* @return
*/
public boolean supportsReverse();
/**
* Reverses the accumulate action for the given fact handle
*
* @param context
* @param leftTuple
* @param handle
* @param declarations
* @param innerDeclarations
* @param workingMemory
* @throws Exception
*/
public void reverse(Object workingMemoryContext,
Object context,
Tuple leftTuple,
InternalFactHandle handle,
Declaration[] declarations,
Declaration[] innerDeclarations,
WorkingMemory workingMemory) throws Exception;
/**
* Gets the result of the accummulation
*
* @param leftTuple
* @param declarations
* @param workingMemory
* @return
* @throws Exception
*/
public Object getResult(Object workingMemoryContext,
Object context,
Tuple leftTuple,
Declaration[] declarations,
WorkingMemory workingMemory) throws Exception;
/**
* This class is used as a wrapper delegate when a security
* policy is in place.
*/
public static class SafeAccumulator implements Accumulator, Serializable {
private static final long serialVersionUID = -2845820209337318924L;
private Accumulator delegate;
public SafeAccumulator(Accumulator delegate) {
super();
this.delegate = delegate;
}
public Object createWorkingMemoryContext() {
return AccessController.doPrivileged(new PrivilegedAction<Object>() {
@Override
public Object run() {
return delegate.createWorkingMemoryContext();
}
}, KiePolicyHelper.getAccessContext());
}
public Serializable createContext() {
return AccessController.doPrivileged(new PrivilegedAction<Serializable>() {
@Override
public Serializable run() {
return delegate.createContext();
}
}, KiePolicyHelper.getAccessContext());
}
public void init(final Object workingMemoryContext,
final Object context,
final Tuple leftTuple,
final Declaration[] declarations,
final WorkingMemory workingMemory) throws Exception {
AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
delegate.init(workingMemoryContext, context, leftTuple, declarations, workingMemory);
return null;
}
}, KiePolicyHelper.getAccessContext());
}
public void accumulate(final Object workingMemoryContext,
final Object context,
final Tuple leftTuple,
final InternalFactHandle handle,
final Declaration[] declarations,
final Declaration[] innerDeclarations,
final WorkingMemory workingMemory) throws Exception {
AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
delegate.accumulate(workingMemoryContext, context, leftTuple, handle, declarations, innerDeclarations, workingMemory);
return null;
}
}, KiePolicyHelper.getAccessContext());
}
public boolean supportsReverse() {
// we have to secure even this call because it might run untrusted code
return AccessController.doPrivileged(new PrivilegedAction<Boolean>() {
@Override
public Boolean run() {
return delegate.supportsReverse();
}
}, KiePolicyHelper.getAccessContext());
}
public void reverse(final Object workingMemoryContext,
final Object context,
final Tuple leftTuple,
final InternalFactHandle handle,
final Declaration[] declarations,
final Declaration[] innerDeclarations,
final WorkingMemory workingMemory) throws Exception {
AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
delegate.reverse(workingMemoryContext, context, leftTuple, handle, declarations, innerDeclarations, workingMemory);
return null;
}
}, KiePolicyHelper.getAccessContext());
}
public Object getResult(final Object workingMemoryContext,
final Object context,
final Tuple leftTuple,
final Declaration[] declarations,
final WorkingMemory workingMemory) throws Exception {
return AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
return delegate.getResult(workingMemoryContext, context, leftTuple, declarations, workingMemory);
}
}, KiePolicyHelper.getAccessContext());
}
public boolean wrapsCompiledInvoker() {
return delegate instanceof CompiledInvoker;
}
}
public static boolean isCompiledInvoker(final Accumulator accumulator) {
return (accumulator instanceof CompiledInvoker)
|| (accumulator instanceof SafeAccumulator && ((SafeAccumulator) accumulator).wrapsCompiledInvoker());
}
}
| {
"content_hash": "a84cf11b603401f026309fb894dc383d",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 138,
"avg_line_length": 35.703539823008846,
"alnum_prop": 0.5890444912628578,
"repo_name": "romartin/drools",
"id": "542413effda916907027e7b220d7cda8449cfe4b",
"size": "8689",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "drools-core/src/main/java/org/drools/core/spi/Accumulator.java",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "15988"
},
{
"name": "Batchfile",
"bytes": "2554"
},
{
"name": "CSS",
"bytes": "1412"
},
{
"name": "GAP",
"bytes": "197299"
},
{
"name": "HTML",
"bytes": "6271"
},
{
"name": "Java",
"bytes": "34370393"
},
{
"name": "Python",
"bytes": "4555"
},
{
"name": "Ruby",
"bytes": "491"
},
{
"name": "Shell",
"bytes": "1120"
},
{
"name": "XSLT",
"bytes": "24302"
}
],
"symlink_target": ""
} |
package gw.lang.gosuc;
import gw.lang.parser.IToken;
import java.util.ArrayList;
import java.util.List;
/**
*/
public class GosucSdk {
private List<String> _paths;
public GosucSdk( List<String> paths ) {
_paths = paths;
}
public List<String> getPaths() {
return _paths;
}
public String write() {
StringBuilder sb = new StringBuilder()
.append( "sdk {\n" );
for( String path : getPaths() ) {
sb.append( " " ).append( "\"" ).append( path ).append( "\",\n" );
}
sb.append( "}" );
return sb.toString();
}
public static GosucSdk parse( GosucProjectParser parser ) {
parser.verify( parser.matchWord( "sdk", false ), "Expecting 'sdk' keyword" );
parser.verify( parser.match( null, '{', false ), "Expecting '{' to begin sdk path list" );
List<String> paths = new ArrayList<String>();
for( IToken t = parser.getTokenizer().getCurrentToken(); parser.match( null, '"', false ); t = parser.getTokenizer().getCurrentToken() ) {
paths.add( t.getStringValue() );
if( !parser.match( null, ',', false ) ) {
break;
}
}
parser.verify( parser.match( null, '}', false ), "Expecting '}' to close sdk path list" );
return new GosucSdk( paths );
}
@Override
public boolean equals( Object o ) {
if( this == o ) {
return true;
}
if( o == null || getClass() != o.getClass() ) {
return false;
}
GosucSdk gosucSdk = (GosucSdk)o;
return _paths.equals( gosucSdk._paths );
}
@Override
public int hashCode() {
return _paths.hashCode();
}
}
| {
"content_hash": "5f42dbfc632419b628f907ff23c55990",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 142,
"avg_line_length": 24.369230769230768,
"alnum_prop": 0.5934343434343434,
"repo_name": "tcmoore32/sheer-madness",
"id": "f0dd0507dc000b599b3a593927ef6f84935ab086",
"size": "1633",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gosu-core-api/src/main/java/gw/lang/gosuc/GosucSdk.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7707"
},
{
"name": "GAP",
"bytes": "51837"
},
{
"name": "Gosu",
"bytes": "16343290"
},
{
"name": "Groovy",
"bytes": "1611"
},
{
"name": "HTML",
"bytes": "4998"
},
{
"name": "Java",
"bytes": "12820028"
},
{
"name": "JavaScript",
"bytes": "1060972"
},
{
"name": "Makefile",
"bytes": "6850"
},
{
"name": "Python",
"bytes": "8450"
},
{
"name": "Shell",
"bytes": "1046"
}
],
"symlink_target": ""
} |
package org.apache.kerberos.kerb.spec.common;
import org.apache.haox.asn1.type.Asn1FieldInfo;
import org.apache.haox.asn1.type.Asn1Integer;
import org.apache.haox.asn1.type.Asn1OctetString;
import org.apache.kerberos.kerb.spec.KrbSequenceType;
import java.net.InetAddress;
import java.util.Arrays;
/*
HostAddress ::= SEQUENCE {
addr-type [0] Int32,
address [1] OCTET STRING
}
*/
public class HostAddress extends KrbSequenceType {
private static int ADDR_TYPE = 0;
private static int ADDRESS = 1;
static Asn1FieldInfo[] fieldInfos = new Asn1FieldInfo[] {
new Asn1FieldInfo(ADDR_TYPE, 0, Asn1Integer.class),
new Asn1FieldInfo(ADDRESS, 1, Asn1OctetString.class)
};
public HostAddress() {
super(fieldInfos);
}
public HostAddress(InetAddress inetAddress) {
this();
setAddrType(HostAddrType.ADDRTYPE_INET);
setAddress(inetAddress.getAddress());
}
public HostAddrType getAddrType() {
Integer value = getFieldAsInteger(ADDR_TYPE);
return HostAddrType.fromValue(value);
}
public void setAddrType(HostAddrType addrType) {
setField(ADDR_TYPE, addrType);
}
public byte[] getAddress() {
return getFieldAsOctets(ADDRESS);
}
public void setAddress(byte[] address) {
setFieldAsOctets(ADDRESS, address);
}
public boolean equalsWith(InetAddress address) {
if (address == null) {
return false;
}
HostAddress that = new HostAddress(address);
return that.equals(this);
}
@Override
public boolean equals(Object other) {
if (other == null) {
return false;
}
if (other == this) {
return true;
} else if (! (other instanceof HostAddress)) {
return false;
}
HostAddress that = (HostAddress) other;
if (getAddrType() == that.getAddrType() &&
Arrays.equals(getAddress(), that.getAddress())) {
return true;
}
return false;
}
@Override
public int hashCode() {
int result = getAddrType().getValue();
if (getAddress() != null) {
result = 31 * result + getAddress().hashCode();
}
return result;
}
}
| {
"content_hash": "dcb8a4498482f838e7012bc08be9fbef",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 65,
"avg_line_length": 25.65934065934066,
"alnum_prop": 0.6042826552462527,
"repo_name": "HazelChen/directory-kerberos",
"id": "9606746b01e824fcfddefff7abe4531066494164",
"size": "3167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "haox-kerb/kerb-core/src/main/java/org/apache/kerberos/kerb/spec/common/HostAddress.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "1325633"
}
],
"symlink_target": ""
} |
<!doctype html>
<meta charset=utf-8>
<title>Test that execCommand and related methods never throw exceptions if HTML or XHTML document</title>
<script src=/resources/testharness.js></script>
<script src=/resources/testharnessreport.js></script>
<div contenteditable=""></div>
<script>
"use strict";
const editor = document.querySelector("div[contenteditable]");
test(function execCommand_with_unknown_command() {
editor.innerHTML = "abc";
editor.focus();
try {
let done = document.execCommand("unknown-command", false, "foo");
assert_equals(done, false,
"Should return false without throwing exception");
} catch (e) {
assert_true(false,
"Shouldn't throw exception for unknown command");
}
}, "Testing execCommand with unknown command");
test(function queryCommandEnabled_with_unknown_command() {
editor.innerHTML = "abc";
editor.focus();
try {
let enabled = document.queryCommandEnabled("unknown-command");
assert_equals(enabled, false,
"Should return false without throwing exception");
} catch (e) {
assert_true(false,
"Shouldn't throw exception for unknown command");
}
}, "Testing queryCommandEnabled with unknown command");
test(function queryCommandIndeterm_with_unknown_command() {
editor.innerHTML = "abc";
editor.focus();
try {
let indeterminate = document.queryCommandIndeterm("unknown-command");
assert_equals(indeterminate, false,
"Should return false without throwing exception");
} catch (e) {
assert_true(false,
"Shouldn't throw exception for unknown command");
}
}, "Testing queryCommandIndeterm with unknown command");
test(function queryCommandState_with_unknown_command() {
editor.innerHTML = "abc";
editor.focus();
try {
let state = document.queryCommandState("unknown-command");
assert_equals(state, false,
"Should return false without throwing exception");
} catch (e) {
assert_true(false,
"Shouldn't throw exception for unknown command");
}
}, "Testing queryCommandState with unknown command");
test(function queryCommandSupported_with_unknown_command() {
editor.innerHTML = "abc";
editor.focus();
try {
let supported = document.queryCommandSupported("unknown-command");
assert_equals(supported, false,
"Should return false without throwing exception");
} catch (e) {
assert_true(false,
"Shouldn't throw exception for unknown command");
}
}, "Testing queryCommandSupported with unknown command");
test(function queryCommandValue_with_unknown_command() {
editor.innerHTML = "abc";
editor.focus();
try {
let value = document.queryCommandValue("unknown-command");
assert_equals(value, "",
"Should return empty string without throwing exception");
} catch (e) {
assert_true(false,
"Shouldn't throw exception for unknown command");
}
}, "Testing queryCommandValue with unknown command");
</script>
| {
"content_hash": "5a078ba4cb08fda8a50a3e7a92f82507",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 105,
"avg_line_length": 33.168539325842694,
"alnum_prop": 0.7029132791327913,
"repo_name": "ric2b/Vivaldi-browser",
"id": "1b77b15ab039dabd209507ada217bb6947f9d4e7",
"size": "2952",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "chromium/third_party/blink/web_tests/external/wpt/editing/other/exec-command-never-throw-exceptions.tentative.html",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
<?php
namespace Everyman\Neo4j\Command\Batch;
use Everyman\Neo4j\Client,
Everyman\Neo4j\Node,
Everyman\Neo4j\Command\UpdateNode as SingleUpdateNode;
/**
* Update a node in a batch
*/
class UpdateNode extends Command
{
/**
* Set the operation to drive the command
*
* @param Client $client
* @param Node $node
* @param integer $opId
*/
public function __construct(Client $client, Node $node, $opId)
{
parent::__construct($client, new SingleUpdateNode($client, $node), $opId);
}
/**
* Return the data to pass
*
* @return array
*/
protected function getData()
{
$opData = array(array(
'method' => strtoupper($this->base->getMethod()),
'to' => $this->base->getPath(),
'body' => $this->base->getData(),
'id' => $this->opId,
));
return $opData;
}
}
| {
"content_hash": "66e5a8c245a69433e3cb506299c16f67",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 76,
"avg_line_length": 19.95,
"alnum_prop": 0.6416040100250626,
"repo_name": "karthikbalu/Neo4jApp",
"id": "6eae553220f5ec2f1a7d0873b4ba75ffa587a8f5",
"size": "798",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "htdocs 3/APIS/neo4jphp/lib/Everyman/Neo4j/Command/Batch/UpdateNode.php",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28571"
},
{
"name": "JavaScript",
"bytes": "267510"
},
{
"name": "PHP",
"bytes": "720051"
},
{
"name": "Python",
"bytes": "13328"
},
{
"name": "Shell",
"bytes": "1242"
},
{
"name": "XSLT",
"bytes": "313"
}
],
"symlink_target": ""
} |
namespace Tools {
namespace Sources {
std::string read_file(const std::string& filename) {
std::ifstream instream (filename);
std::stringstream text;
if (instream.is_open()) {
std::string line;
while (getline(instream, line)) {
text << line << "\n";
}
}
return text.str();
}
} // namespace Sources
} // namespace Tools
| {
"content_hash": "d997bfd898b3a074ce8ffbd8d229f899",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 23.352941176470587,
"alnum_prop": 0.5541561712846348,
"repo_name": "csrhau/sandpit",
"id": "07e1357ed1092b1824b37f7c501d432fa7c21cef",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpp/file_io/src/tools/sources.cc",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "120622"
},
{
"name": "C++",
"bytes": "949865"
},
{
"name": "CMake",
"bytes": "6863"
},
{
"name": "Go",
"bytes": "1499"
},
{
"name": "Jupyter Notebook",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "15541"
},
{
"name": "Python",
"bytes": "45814"
},
{
"name": "Ruby",
"bytes": "6055"
},
{
"name": "Shell",
"bytes": "15056"
},
{
"name": "TeX",
"bytes": "14628"
},
{
"name": "VHDL",
"bytes": "132109"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="utf-8"?>
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:background="#fff"
android:clipChildren="false"
android:orientation="vertical">
<LinearLayout
android:id="@+id/linearLayout"
android:layout_width="wrap_content"
android:layout_height="300dp"
android:clipChildren="false"
android:orientation="vertical">
<ImageView
android:id="@+id/imageView"
android:layout_width="match_parent"
android:layout_height="200dp"
android:scaleType="centerCrop"
android:transitionName="pic" />
<View
android:layout_width="match_parent"
android:layout_height="100dp"
android:background="#fff" />
</LinearLayout>
<LinearLayout
android:id="@+id/ll_bottom"
android:layout_width="match_parent"
android:layout_height="100dp"
android:clipChildren="false"
android:gravity="center_vertical"
android:orientation="vertical">
<TextView
android:id="@+id/tv_name"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="农作物多样性种植提高"
android:textColor="#000"
android:textSize="18sp" />
<TextView
android:id="@+id/tv_cost"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_marginTop="10dp"
android:text="成果所属单位: 中国农业大学" />
</LinearLayout>
<TextView
android:id="@+id/btn_buy"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignParentRight="true"
android:background="@drawable/button_bg"
android:paddingBottom="5dp"
android:paddingLeft="30dp"
android:paddingRight="30dp"
android:paddingTop="5dp"
android:text="我要投资"
android:textColor="#fff" />
</RelativeLayout>
| {
"content_hash": "672bb47dc880a0ecb85f1b477128d903",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 31.764705882352942,
"alnum_prop": 0.6055555555555555,
"repo_name": "Clark422392573/TabDemo",
"id": "90d51691122b69cb2eb7ea068dff5805ffd27b4f",
"size": "2212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/src/main/res/layout/item_img.xml",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "245271"
}
],
"symlink_target": ""
} |
/**************************************************************************************************
* @file shared_memory.cpp
* @date 2014-12-13
* @author Omar Stefan Evans
**************************************************************************************************/
#include "shared_memory.h"
#include "MemoryManager.hpp"
void * shared_memory_reserve(size_t n, size_t size) {
return rambler::memory::MemoryManager::default_manager()->reserve_memory(n, size);
}
void shared_memory_release(void * ptr) {
return rambler::memory::MemoryManager::default_manager()->release_memory(ptr);
}
void shared_memory_share(void * ptr) {
return rambler::memory::MemoryManager::default_manager()->share_memory(ptr);
}
| {
"content_hash": "f4cc6963ba2fc603507de305fa62d408",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 100,
"avg_line_length": 36.75,
"alnum_prop": 0.5129251700680272,
"repo_name": "CodeOmar/rambler_memory",
"id": "1b910d3093b832620f97af183653f0cb30988f4f",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source Code/shared_memory.cpp",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2472"
},
{
"name": "C++",
"bytes": "251500"
}
],
"symlink_target": ""
} |
title: Clean MS Word Formatting
page_title: Clean MS Word Formatting | RadEditor for ASP.NET AJAX Documentation
description: Clean MS Word Formatting
slug: editor/managing-content/pasting-content/clean-ms-word-formatting-
tags: clean,ms,word,formatting,
published: True
position: 1
---
# Clean MS Word Formatting
This article explains how **RadEditor** handles content pasted from MS Word, and what built-in mechanisms are available to control the pasting behavior.
**RadEditor** provides a number of tools that help users paste formatted text from MS Word, which, as a result is cleaned from unnecessary tags, comments and MS-Word-specific style formatting attributes.
>note As of IE11, Microsoft have included a built-in mechanism that handles the HTML formatting when pasting MS Word content. Due to that, **RadEditor** has minor control over the MS Word content pasted under Internet Explorer 11.You can find more details about the browser dependency when pasting in the **RadEditor** in the [Pasting in RadEditor]({%slug editor/managing-content/pasting-content/overview%}) article.
>caption Figure 1: MS Word content is pasted as proper HTML markup in the RadEditor.

## Strip MS Word Content via Paste Tools
**RadEditor** exposes two easy-to-use built-in tools, which enable end-users to get proper HTMLfrom pasted MS Word content - **Paste from Word** (in **Figure 1**) and **Paste from Word, strip font**.
These tools are categorized as built-in clipboard tools. You can find more details about how they behave in the Using the Built-in Clipboard Tools section of the [Overview]({%slug editor/managing-content/pasting-content/overview%}) article.
## Automatic On-paste Content Stripping
>important The **StripFormattingOptions** property replaces the deprecated StripFormattingOnPaste property.
The major **RadEditor** mechanism for on-paste content cleaning and stripping is the **StripFormattingOptions** functionality. It enables you to choose a specific configuration of generic or MS-Word-specific stripping options to be processed during paste. In the lists below you can find all possible options:
Generic options:
* **All** - strips all HTML formatting and pastes plain text.
* **AllExceptNewLines** - clears all tags except <br> and new lines (\n) on paste.
* **Css** - strips CSS styles on paste.
* **Font** - strips Font tags on paste.
* **None** - pastes the clipboard content as is. If MS Word formatting exists, the user is prompted to clean it.
* **NoneSupressCleanMessage** - does not strip anything on paste and does not show the prompt about MS Word content being pasted (see [Overview]({%slug editor/managing-content/pasting-content/overview%}) article).
* **Span** - strips Span tags on paste.
MS Word specific options:
* **ConvertWordLists** - converts Word ordered/unordered lists to HTML tags.
* **MSWord** - strips Word-specific tags on Paste, preserving fonts and text sizes.
* **MSWordNoFonts** - strips Word-specific tags on paste, preserving text sizes only.
* **MSWordNoMargins** - strips Word-specific tags and margins, preserving fonts and text sizes.
* **MSWordRemoveAll** - strips Word-specific tag on paste, removing both fonts and text sizes.
>note Enabling the **NoneSupressCleanMessage** option, will prevent the client-side [OnClientPasteHtml]({%slug editor/client-side-programming/events/onclientpastehtml%}) event from firing when using the native browser paste options (the browser’s context menu, or the Ctrl+V shortcut).
>caption Example 1: How to set multiple values to the **StripFormattingOptions** property.
````ASP.NET
<telerik:RadEditor runat="server" ID="RadEditor1" StripFormattingOptions="MsWord,Span,Css,ConvertWordLists">
</telerik:RadEditor>
````
````C#
protected void Page_Load(object sender, EventArgs e)
{
RadEditor1.StripFormattingOptions = EditorStripFormattingOptions.Span | EditorStripFormattingOptions.MSWordRemoveAll;
}
````
````VB
Protected Sub Page_Load(ByVal sender As Object, ByVal e As EventArgs)
RadEditor1.StripFormattingOptions = EditorStripFormattingOptions.Span Or EditorStripFormattingOptions.MSWordRemoveAll
End Sub
````
>tip Optionally, you can use the client-side [fire]({%slug editor/client-side-programming/methods/fire%}) method to strip the content on submit or on page-load. This approach is showcased in the [Clean MS Word Formatting on Page Load and on Submit]({%slug editor/managing-content/pasting-content/clean-ms-word-formatting-on-page-load-and-on-submit%}) article.
## See Also
* [Overview]({%slug editor/managing-content/pasting-content/overview%})
* [Demo: Cleaning Word Formatting](http://demos.telerik.com/aspnet-ajax/editor/examples/cleaningwordformatting/defaultcs.aspx)
* [Set Properties]({%slug editor/getting-started/set-properties%})
| {
"content_hash": "438793d3c11704429c5345bafbfa9eef",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 416,
"avg_line_length": 56.83529411764706,
"alnum_prop": 0.7770647898985718,
"repo_name": "somethingforever/ajax-docs",
"id": "fd2c937cd952144f25d380ad2ef49a50a5ab1cce",
"size": "4837",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "controls/editor/managing-content/pasting-content/clean-ms-word-formatting.md",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16473"
},
{
"name": "HTML",
"bytes": "5677"
},
{
"name": "JavaScript",
"bytes": "96453"
},
{
"name": "Ruby",
"bytes": "24913"
}
],
"symlink_target": ""
} |
<?php
namespace PHP_CodeSniffer\Tests\Core;
use PHP_CodeSniffer\Util\Common;
class IsCamelCapsTest extends \PHPUnit_Framework_TestCase
{
/**
* Test valid public function/method names.
*
* @return void
*/
public function testValidNotClassFormatPublic()
{
$this->assertTrue(Common::isCamelCaps('thisIsCamelCaps', false, true, true));
$this->assertTrue(Common::isCamelCaps('thisISCamelCaps', false, true, false));
}//end testValidNotClassFormatPublic()
/**
* Test invalid public function/method names.
*
* @return void
*/
public function testInvalidNotClassFormatPublic()
{
$this->assertFalse(Common::isCamelCaps('_thisIsCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('thisISCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('ThisIsCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('3thisIsCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('*thisIsCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('-thisIsCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('this*IsCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('this-IsCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('this_IsCamelCaps', false, true, true));
$this->assertFalse(Common::isCamelCaps('this_is_camel_caps', false, true, true));
}//end testInvalidNotClassFormatPublic()
/**
* Test valid private method names.
*
* @return void
*/
public function testValidNotClassFormatPrivate()
{
$this->assertTrue(Common::isCamelCaps('_thisIsCamelCaps', false, false, true));
$this->assertTrue(Common::isCamelCaps('_thisISCamelCaps', false, false, false));
$this->assertTrue(Common::isCamelCaps('_i18N', false, false, true));
$this->assertTrue(Common::isCamelCaps('_i18n', false, false, true));
}//end testValidNotClassFormatPrivate()
/**
* Test invalid private method names.
*
* @return void
*/
public function testInvalidNotClassFormatPrivate()
{
$this->assertFalse(Common::isCamelCaps('thisIsCamelCaps', false, false, true));
$this->assertFalse(Common::isCamelCaps('_thisISCamelCaps', false, false, true));
$this->assertFalse(Common::isCamelCaps('_ThisIsCamelCaps', false, false, true));
$this->assertFalse(Common::isCamelCaps('__thisIsCamelCaps', false, false, true));
$this->assertFalse(Common::isCamelCaps('__thisISCamelCaps', false, false, false));
$this->assertFalse(Common::isCamelCaps('3thisIsCamelCaps', false, false, true));
$this->assertFalse(Common::isCamelCaps('*thisIsCamelCaps', false, false, true));
$this->assertFalse(Common::isCamelCaps('-thisIsCamelCaps', false, false, true));
$this->assertFalse(Common::isCamelCaps('_this_is_camel_caps', false, false, true));
}//end testInvalidNotClassFormatPrivate()
/**
* Test valid class names.
*
* @return void
*/
public function testValidClassFormatPublic()
{
$this->assertTrue(Common::isCamelCaps('ThisIsCamelCaps', true, true, true));
$this->assertTrue(Common::isCamelCaps('ThisISCamelCaps', true, true, false));
$this->assertTrue(Common::isCamelCaps('This3IsCamelCaps', true, true, false));
}//end testValidClassFormatPublic()
/**
* Test invalid class names.
*
* @return void
*/
public function testInvalidClassFormat()
{
$this->assertFalse(Common::isCamelCaps('thisIsCamelCaps', true));
$this->assertFalse(Common::isCamelCaps('This-IsCamelCaps', true));
$this->assertFalse(Common::isCamelCaps('This_Is_Camel_Caps', true));
}//end testInvalidClassFormat()
/**
* Test invalid class names with the private flag set.
*
* Note that the private flag is ignored if the class format
* flag is set, so these names are all invalid.
*
* @return void
*/
public function testInvalidClassFormatPrivate()
{
$this->assertFalse(Common::isCamelCaps('_ThisIsCamelCaps', true, true));
$this->assertFalse(Common::isCamelCaps('_ThisIsCamelCaps', true, false));
}//end testInvalidClassFormatPrivate()
}//end class
| {
"content_hash": "c9b87880621d181b192f6ca6c1efd7d3",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 91,
"avg_line_length": 34.71875,
"alnum_prop": 0.6588658865886589,
"repo_name": "LCabreroG/opensource",
"id": "9b7a375588258af5169f666c78df37ce68997d6d",
"size": "4725",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vendor/squizlabs/php_codesniffer/tests/Core/IsCamelCapsTest.php",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "727"
},
{
"name": "Batchfile",
"bytes": "835"
},
{
"name": "CSS",
"bytes": "355396"
},
{
"name": "JavaScript",
"bytes": "548966"
},
{
"name": "PHP",
"bytes": "130407"
},
{
"name": "Shell",
"bytes": "3049"
}
],
"symlink_target": ""
} |
ACCEPTED
#### According to
Index Fungorum
#### Published in
Syll. fung. (Abellini) 9: 400 (1891)
#### Original name
Asterina furcata Pat.
### Remarks
null | {
"content_hash": "ebe4899cfae5ca75ee3854b6c17afc5d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 36,
"avg_line_length": 12.153846153846153,
"alnum_prop": 0.6835443037974683,
"repo_name": "mdoering/backbone",
"id": "9ab464130ed4dd7ceab4fb204535df71f5ad6f43",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "life/Fungi/Ascomycota/Dothideomycetes/Pleosporales/Melanommataceae/Asterella/Asterella furcata/README.md",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
package nl.homeserver.energie.verbruikkosten;
import java.math.BigDecimal;
import java.util.Collection;
import java.util.Objects;
import javax.annotation.Nullable;
class VerbruikenEnKosten {
private final Collection<VerbruikKosten> all;
VerbruikenEnKosten(final Collection<VerbruikKosten> all) {
this.all = all;
}
@Nullable
private BigDecimal getTotaalVerbruik() {
return all.stream()
.map(VerbruikKosten::getVerbruik)
.filter(Objects::nonNull)
.reduce(BigDecimal::add)
.orElse(null);
}
@Nullable
private BigDecimal getTotaalKosten() {
return all.stream()
.map(VerbruikKosten::getKosten)
.filter(Objects::nonNull)
.reduce(BigDecimal::add)
.orElse(null);
}
VerbruikKosten sumToSingle() {
return new VerbruikKosten(getTotaalVerbruik(), getTotaalKosten());
}
}
| {
"content_hash": "4f8ee99e8b5e6d6c03524f4770ff8cd0",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 25.973684210526315,
"alnum_prop": 0.6119554204660588,
"repo_name": "bassages/home-server",
"id": "5a30ceef09447a4529cc30386d796a3150ffdbee",
"size": "987",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/main/java/nl/homeserver/energie/verbruikkosten/VerbruikenEnKosten.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "366"
},
{
"name": "Java",
"bytes": "401641"
},
{
"name": "Shell",
"bytes": "3768"
}
],
"symlink_target": ""
} |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.9.1"/>
<title>V8 API Reference Guide for node.js v0.12.6: v8::Eternal< T > Class Template Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
$(document).ready(function() { init_search(); });
</script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td style="padding-left: 0.5em;">
<div id="projectname">V8 API Reference Guide for node.js v0.12.6
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.9.1 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main Page</span></a></li>
<li><a href="namespaces.html"><span>Namespaces</span></a></li>
<li class="current"><a href="annotated.html"><span>Classes</span></a></li>
<li><a href="files.html"><span>Files</span></a></li>
<li><a href="examples.html"><span>Examples</span></a></li>
<li>
<div id="MSearchBox" class="MSearchBoxInactive">
<span class="left">
<img id="MSearchSelect" src="search/mag_sel.png"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
alt=""/>
<input type="text" id="MSearchField" value="Search" accesskey="S"
onfocus="searchBox.OnSearchFieldFocus(true)"
onblur="searchBox.OnSearchFieldFocus(false)"
onkeyup="searchBox.OnSearchFieldChange(event)"/>
</span><span class="right">
<a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
</span>
</div>
</li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="annotated.html"><span>Class List</span></a></li>
<li><a href="classes.html"><span>Class Index</span></a></li>
<li><a href="hierarchy.html"><span>Class Hierarchy</span></a></li>
<li><a href="functions.html"><span>Class Members</span></a></li>
</ul>
</div>
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><a class="el" href="namespacev8.html">v8</a></li><li class="navelem"><a class="el" href="classv8_1_1_eternal.html">Eternal</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="summary">
<a href="#pub-methods">Public Member Functions</a> |
<a href="classv8_1_1_eternal-members.html">List of all members</a> </div>
<div class="headertitle">
<div class="title">v8::Eternal< T > Class Template Reference</div> </div>
</div><!--header-->
<div class="contents">
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:ad7522d8b51e072dcbc4261bc1f155bcb"><td class="memTemplParams" colspan="2"><a class="anchor" id="ad7522d8b51e072dcbc4261bc1f155bcb"></a>
template<class S > </td></tr>
<tr class="memitem:ad7522d8b51e072dcbc4261bc1f155bcb"><td class="memTemplItemLeft" align="right" valign="top">V8_INLINE </td><td class="memTemplItemRight" valign="bottom"><b>Eternal</b> (<a class="el" href="classv8_1_1_isolate.html">Isolate</a> *isolate, <a class="el" href="classv8_1_1_local.html">Local</a>< S > handle)</td></tr>
<tr class="separator:ad7522d8b51e072dcbc4261bc1f155bcb"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:ae9614309d9c93fe484d81926e31ed6b7"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="ae9614309d9c93fe484d81926e31ed6b7"></a>
V8_INLINE <a class="el" href="classv8_1_1_local.html">Local</a>< T > </td><td class="memItemRight" valign="bottom"><b>Get</b> (<a class="el" href="classv8_1_1_isolate.html">Isolate</a> *isolate)</td></tr>
<tr class="separator:ae9614309d9c93fe484d81926e31ed6b7"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a5d77cbfe0662af5fe75172be9a8f1d5d"><td class="memItemLeft" align="right" valign="top"><a class="anchor" id="a5d77cbfe0662af5fe75172be9a8f1d5d"></a>
V8_INLINE bool </td><td class="memItemRight" valign="bottom"><b>IsEmpty</b> ()</td></tr>
<tr class="separator:a5d77cbfe0662af5fe75172be9a8f1d5d"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a75a32f5c428a0d47e13f66dbdeb9adba"><td class="memTemplParams" colspan="2"><a class="anchor" id="a75a32f5c428a0d47e13f66dbdeb9adba"></a>
template<class S > </td></tr>
<tr class="memitem:a75a32f5c428a0d47e13f66dbdeb9adba"><td class="memTemplItemLeft" align="right" valign="top">V8_INLINE void </td><td class="memTemplItemRight" valign="bottom"><b>Set</b> (<a class="el" href="classv8_1_1_isolate.html">Isolate</a> *isolate, <a class="el" href="classv8_1_1_local.html">Local</a>< S > handle)</td></tr>
<tr class="separator:a75a32f5c428a0d47e13f66dbdeb9adba"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a2f9dcec02b2c2f7d4b55aee0d8b9881a"><td class="memTemplParams" colspan="2"><a class="anchor" id="a2f9dcec02b2c2f7d4b55aee0d8b9881a"></a>
template<class S > </td></tr>
<tr class="memitem:a2f9dcec02b2c2f7d4b55aee0d8b9881a"><td class="memTemplItemLeft" align="right" valign="top">void </td><td class="memTemplItemRight" valign="bottom"><b>Set</b> (<a class="el" href="classv8_1_1_isolate.html">Isolate</a> *isolate, <a class="el" href="classv8_1_1_local.html">Local</a>< S > handle)</td></tr>
<tr class="separator:a2f9dcec02b2c2f7d4b55aee0d8b9881a"><td class="memSeparator" colspan="2"> </td></tr>
</table>
<hr/>The documentation for this class was generated from the following file:<ul>
<li>deps/v8/include/<a class="el" href="v8_8h_source.html">v8.h</a></li>
</ul>
</div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated on Tue Aug 11 2015 23:46:31 for V8 API Reference Guide for node.js v0.12.6 by  <a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.9.1
</small></address>
</body>
</html>
| {
"content_hash": "4f5ca0d65437efa8e04fd9d7db4efc01",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 343,
"avg_line_length": 57.908396946564885,
"alnum_prop": 0.6783548642235697,
"repo_name": "v8-dox/v8-dox.github.io",
"id": "431b3a12638190aa6d91dacc609557e83e3fe259",
"size": "7586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "78b0e30/html/classv8_1_1_eternal.html",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
<?php
require_once "global_class.php";
class Section extends GlobalClass{
public function __construct($db){
parent::__construct("sections",$db);
}
}
?> | {
"content_hash": "fdf7b65ccb85f71f7a29c6743f11f3ee",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 39,
"avg_line_length": 21,
"alnum_prop": 0.6428571428571429,
"repo_name": "Birjik/reference-book",
"id": "3015527dd439abc2146cbb8f72bde5b3b0634592",
"size": "168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/section_class.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "871"
},
{
"name": "PHP",
"bytes": "36396"
},
{
"name": "Smarty",
"bytes": "6448"
}
],
"symlink_target": ""
} |
__all__ = ('config', 'logger', )
from hotqueue import HotQueue
from ConfigParser import ConfigParser
from os.path import dirname, join, exists
from redis import Redis
import json
# configuration
config = ConfigParser()
config.add_section('www'),
config.set('www', 'baseurl', 'http://android.kivy.org/')
config.set('www', 'secret_key', '')
config.add_section('database')
config.set('database', 'url', 'sqlite:////tmp/test.db')
config.add_section('redis')
config.set('redis', 'host', 'localhost')
config.set('redis', 'port', '6379')
config.set('redis', 'password', '')
# read existing file
config_fn = join(dirname(__file__), '..', 'config.cfg')
if exists(config_fn):
config.read(config_fn)
# write current config if possible
try:
fd = open(config_fn, 'w')
config.write(fd)
fd.close()
except Exception:
pass
# start the queue
qjob = HotQueue(
'jobsubmit',
host=config.get('redis', 'host'),
port=config.getint('redis', 'port'),
password=config.get('redis', 'password'),
db=0)
# Redis database connector
r = Redis(
host=config.get('redis', 'host'),
port=config.getint('redis', 'port'),
password=config.get('redis', 'password'),
db=1)
| {
"content_hash": "958c1d43d65b704e6e6ef8750e103e84",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 56,
"avg_line_length": 23.6,
"alnum_prop": 0.6661016949152543,
"repo_name": "kivy/p4a-cloud",
"id": "8af2cf12151a9f9eafb13f8d0f95f77ec7dded22",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "master/web/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5449"
},
{
"name": "HTML",
"bytes": "43702"
},
{
"name": "JavaScript",
"bytes": "1356"
},
{
"name": "Python",
"bytes": "33003"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
'use strict';
module.exports = function (math) {
var util = require('../../util/index'),
BigNumber = math.type.BigNumber,
Complex = require('../../type/Complex'),
collection = math.collection,
isCollection = collection.isCollection,
isNumber = util.number.isNumber,
isString = util.string.isString,
isComplex = Complex.isComplex;
/**
* Create a complex value or convert a value to a complex value.
*
* Syntax:
*
* math.complex() // creates a complex value with zero
* // as real and imaginary part.
* math.complex(re : number, im : string) // creates a complex value with provided
* // values for real and imaginary part.
* math.complex(re : number) // creates a complex value with provided
* // real value and zero imaginary part.
* math.complex(complex : Complex) // clones the provided complex value.
* math.complex(arg : string) // parses a string into a complex value.
* math.complex(array : Array) // converts the elements of the array
* // or matrix element wise into a
* // complex value.
* math.complex({re: number, im: number}) // creates a complex value with provided
* // values for real an imaginary part.
* math.complex({r: number, phi: number}) // creates a complex value with provided
* // polar coordinates
*
* Examples:
*
* var a = math.complex(3, -4); // a = Complex 3 - 4i
* a.re = 5; // a = Complex 5 - 4i
* var i = a.im; // Number -4;
* var b = math.complex('2 + 6i'); // Complex 2 + 6i
* var c = math.complex(); // Complex 0 + 0i
* var d = math.add(a, b); // Complex 5 + 2i
*
* See also:
*
* bignumber, boolean, index, matrix, number, string, unit
*
* @param {* | Array | Matrix} [args]
* Arguments specifying the real and imaginary part of the complex number
* @return {Complex | Array | Matrix} Returns a complex value
*/
math.complex = function complex(args) {
switch (arguments.length) {
case 0:
// no parameters. Set re and im zero
return new Complex(0, 0);
case 1:
// parse string into a complex number
var arg = arguments[0];
if (isNumber(arg)) {
return new Complex(arg, 0);
}
if (arg instanceof BigNumber) {
// convert to Number
return new Complex(arg.toNumber(), 0);
}
if (isComplex(arg)) {
// create a clone
return arg.clone();
}
if (isString(arg)) {
var c = Complex.parse(arg);
if (c) {
return c;
}
else {
throw new SyntaxError('String "' + arg + '" is no valid complex number');
}
}
if (isCollection(arg)) {
return collection.deepMap(arg, complex);
}
if (typeof arg === 'object') {
if('re' in arg && 'im' in arg) {
return new Complex(arg.re, arg.im);
} else if ('r' in arg && 'phi' in arg) {
return Complex.fromPolar(arg.r, arg.phi);
}
}
throw new TypeError('Two numbers, single string or an fitting object expected in function complex');
case 2:
// re and im provided
var re = arguments[0],
im = arguments[1];
// convert re to number
if (re instanceof BigNumber) {
re = re.toNumber();
}
// convert im to number
if (im instanceof BigNumber) {
im = im.toNumber();
}
if (isNumber(re) && isNumber(im)) {
return new Complex(re, im);
}
else {
throw new TypeError('Two numbers or a single string expected in function complex');
}
default:
throw new math.error.ArgumentsError('complex', arguments.length, 0, 2);
}
};
};
| {
"content_hash": "033968ab18b1256b2397411b37f2d490",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 108,
"avg_line_length": 34.37795275590551,
"alnum_prop": 0.49198350893266146,
"repo_name": "nikitazu/kali",
"id": "6a563bdffe064849c275e6487b0a9e76a4199015",
"size": "4366",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "bower_components/mathjs/lib/function/construction/complex.js",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "990"
},
{
"name": "HTML",
"bytes": "1370"
},
{
"name": "JavaScript",
"bytes": "739741"
}
],
"symlink_target": ""
} |
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
package com.microsoft.windowsazure.scheduler.models;
import java.util.Calendar;
/**
* Specifies service bus brokered message properties.
*/
public class JobServiceBusBrokeredMessageProperties {
private String contentType;
/**
* Optional. Gets or sets the service bus brokered message properties
* ContentType.
* @return The ContentType value.
*/
public String getContentType() {
return this.contentType;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* ContentType.
* @param contentTypeValue The ContentType value.
*/
public void setContentType(final String contentTypeValue) {
this.contentType = contentTypeValue;
}
private String correlationId;
/**
* Optional. Gets or sets the service bus brokered message properties
* CorrelationId.
* @return The CorrelationId value.
*/
public String getCorrelationId() {
return this.correlationId;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* CorrelationId.
* @param correlationIdValue The CorrelationId value.
*/
public void setCorrelationId(final String correlationIdValue) {
this.correlationId = correlationIdValue;
}
private boolean forcePersistence;
/**
* Optional. Gets or sets the service bus brokered message properties
* ForcePersistence.
* @return The ForcePersistence value.
*/
public boolean isForcePersistence() {
return this.forcePersistence;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* ForcePersistence.
* @param forcePersistenceValue The ForcePersistence value.
*/
public void setForcePersistence(final boolean forcePersistenceValue) {
this.forcePersistence = forcePersistenceValue;
}
private String label;
/**
* Optional. Gets or sets the service bus brokered message properties Label.
* @return The Label value.
*/
public String getLabel() {
return this.label;
}
/**
* Optional. Gets or sets the service bus brokered message properties Label.
* @param labelValue The Label value.
*/
public void setLabel(final String labelValue) {
this.label = labelValue;
}
private String messageId;
/**
* Optional. Gets or sets the service bus brokered message properties
* MessageId.
* @return The MessageId value.
*/
public String getMessageId() {
return this.messageId;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* MessageId.
* @param messageIdValue The MessageId value.
*/
public void setMessageId(final String messageIdValue) {
this.messageId = messageIdValue;
}
private String partitionKey;
/**
* Optional. Gets or sets the service bus brokered message properties
* PartitionKey.
* @return The PartitionKey value.
*/
public String getPartitionKey() {
return this.partitionKey;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* PartitionKey.
* @param partitionKeyValue The PartitionKey value.
*/
public void setPartitionKey(final String partitionKeyValue) {
this.partitionKey = partitionKeyValue;
}
private String replyTo;
/**
* Optional. Gets or sets the service bus brokered message properties
* ReplyTo.
* @return The ReplyTo value.
*/
public String getReplyTo() {
return this.replyTo;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* ReplyTo.
* @param replyToValue The ReplyTo value.
*/
public void setReplyTo(final String replyToValue) {
this.replyTo = replyToValue;
}
private String replyToSessionId;
/**
* Optional. Gets or sets the service bus brokered message properties
* ReplyToSessionId.
* @return The ReplyToSessionId value.
*/
public String getReplyToSessionId() {
return this.replyToSessionId;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* ReplyToSessionId.
* @param replyToSessionIdValue The ReplyToSessionId value.
*/
public void setReplyToSessionId(final String replyToSessionIdValue) {
this.replyToSessionId = replyToSessionIdValue;
}
private Calendar scheduledEnqueueTimeUtc;
/**
* Optional. Gets or sets the service bus brokered message properties
* ScheduledEnqueueTimeUtc.
* @return The ScheduledEnqueueTimeUtc value.
*/
public Calendar getScheduledEnqueueTimeUtc() {
return this.scheduledEnqueueTimeUtc;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* ScheduledEnqueueTimeUtc.
* @param scheduledEnqueueTimeUtcValue The ScheduledEnqueueTimeUtc value.
*/
public void setScheduledEnqueueTimeUtc(final Calendar scheduledEnqueueTimeUtcValue) {
this.scheduledEnqueueTimeUtc = scheduledEnqueueTimeUtcValue;
}
private String sessionId;
/**
* Optional. Gets or sets the service bus brokered message properties
* SessionId.
* @return The SessionId value.
*/
public String getSessionId() {
return this.sessionId;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* SessionId.
* @param sessionIdValue The SessionId value.
*/
public void setSessionId(final String sessionIdValue) {
this.sessionId = sessionIdValue;
}
private Calendar timeToLive;
/**
* Optional. Gets or sets the service bus brokered message properties
* TimeToLive.
* @return The TimeToLive value.
*/
public Calendar getTimeToLive() {
return this.timeToLive;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* TimeToLive.
* @param timeToLiveValue The TimeToLive value.
*/
public void setTimeToLive(final Calendar timeToLiveValue) {
this.timeToLive = timeToLiveValue;
}
private String to;
/**
* Optional. Gets or sets the service bus brokered message properties To.
* @return The To value.
*/
public String getTo() {
return this.to;
}
/**
* Optional. Gets or sets the service bus brokered message properties To.
* @param toValue The To value.
*/
public void setTo(final String toValue) {
this.to = toValue;
}
private String viaPartitionKey;
/**
* Optional. Gets or sets the service bus brokered message properties
* ViaPartitionKey.
* @return The ViaPartitionKey value.
*/
public String getViaPartitionKey() {
return this.viaPartitionKey;
}
/**
* Optional. Gets or sets the service bus brokered message properties
* ViaPartitionKey.
* @param viaPartitionKeyValue The ViaPartitionKey value.
*/
public void setViaPartitionKey(final String viaPartitionKeyValue) {
this.viaPartitionKey = viaPartitionKeyValue;
}
}
| {
"content_hash": "b2e07d20deac2bfec0bf4423bbb664b5",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 89,
"avg_line_length": 27.468634686346864,
"alnum_prop": 0.6612036539494895,
"repo_name": "jmspring/azure-sdk-for-java",
"id": "868ee41d1b7faf1a1752fe53b40117da84c40d79",
"size": "8085",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "management-scheduler/src/main/java/com/microsoft/windowsazure/scheduler/models/JobServiceBusBrokeredMessageProperties.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10369"
},
{
"name": "Java",
"bytes": "24006632"
}
],
"symlink_target": ""
} |
FROM balenalib/iot-gate-imx8-ubuntu:focal-build
# remove several traces of debian python
RUN apt-get purge -y python.*
# http://bugs.python.org/issue19846
# > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK.
ENV LANG C.UTF-8
# key 63C7CC90: public key "Simon McVittie <[email protected]>" imported
# key 3372DCFA: public key "Donald Stufft (dstufft) <[email protected]>" imported
RUN gpg --keyserver keyring.debian.org --recv-keys 4DE8FF2A63C7CC90 \
&& gpg --keyserver keyserver.ubuntu.com --recv-key 6E3CBCE93372DCFA \
&& gpg --keyserver keyserver.ubuntu.com --recv-keys 0x52a43a1e4b77b059
ENV PYTHON_VERSION 3.5.10
# if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value '<VERSION>'"
ENV PYTHON_PIP_VERSION 21.0.1
ENV SETUPTOOLS_VERSION 56.0.0
RUN set -x \
&& curl -SLO "http://resin-packages.s3.amazonaws.com/python/v$PYTHON_VERSION/Python-$PYTHON_VERSION.linux-aarch64-openssl1.1.tar.gz" \
&& echo "423a72fecef7a888c7121199426c96cda6b0c9c6a6dde4bf156e75ab21713aa9 Python-$PYTHON_VERSION.linux-aarch64-openssl1.1.tar.gz" | sha256sum -c - \
&& tar -xzf "Python-$PYTHON_VERSION.linux-aarch64-openssl1.1.tar.gz" --strip-components=1 \
&& rm -rf "Python-$PYTHON_VERSION.linux-aarch64-openssl1.1.tar.gz" \
&& ldconfig \
&& if [ ! -e /usr/local/bin/pip3 ]; then : \
&& curl -SLO "https://raw.githubusercontent.com/pypa/get-pip/430ba37776ae2ad89f794c7a43b90dc23bac334c/get-pip.py" \
&& echo "19dae841a150c86e2a09d475b5eb0602861f2a5b7761ec268049a662dbd2bd0c get-pip.py" | sha256sum -c - \
&& python3 get-pip.py \
&& rm get-pip.py \
; fi \
&& pip3 install --no-cache-dir --upgrade --force-reinstall pip=="$PYTHON_PIP_VERSION" setuptools=="$SETUPTOOLS_VERSION" \
&& find /usr/local \
\( -type d -a -name test -o -name tests \) \
-o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \
-exec rm -rf '{}' + \
&& cd / \
&& rm -rf /usr/src/python ~/.cache
# install "virtualenv", since the vast majority of users of this image will want it
RUN pip3 install --no-cache-dir virtualenv
ENV PYTHON_DBUS_VERSION 1.2.8
# install dbus-python dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
libdbus-1-dev \
libdbus-glib-1-dev \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get -y autoremove
# install dbus-python
RUN set -x \
&& mkdir -p /usr/src/dbus-python \
&& curl -SL "http://dbus.freedesktop.org/releases/dbus-python/dbus-python-$PYTHON_DBUS_VERSION.tar.gz" -o dbus-python.tar.gz \
&& curl -SL "http://dbus.freedesktop.org/releases/dbus-python/dbus-python-$PYTHON_DBUS_VERSION.tar.gz.asc" -o dbus-python.tar.gz.asc \
&& gpg --verify dbus-python.tar.gz.asc \
&& tar -xzC /usr/src/dbus-python --strip-components=1 -f dbus-python.tar.gz \
&& rm dbus-python.tar.gz* \
&& cd /usr/src/dbus-python \
&& PYTHON_VERSION=$(expr match "$PYTHON_VERSION" '\([0-9]*\.[0-9]*\)') ./configure \
&& make -j$(nproc) \
&& make install -j$(nproc) \
&& cd / \
&& rm -rf /usr/src/dbus-python
# make some useful symlinks that are expected to exist
RUN cd /usr/local/bin \
&& ln -sf pip3 pip \
&& { [ -e easy_install ] || ln -s easy_install-* easy_install; } \
&& ln -sf idle3 idle \
&& ln -sf pydoc3 pydoc \
&& ln -sf python3 python \
&& ln -sf python3-config python-config
# set PYTHONPATH to point to dist-packages
ENV PYTHONPATH /usr/lib/python3/dist-packages:$PYTHONPATH
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/[email protected]" \
&& echo "Running test-stack@python" \
&& chmod +x [email protected] \
&& bash [email protected] \
&& rm -rf [email protected]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v8 \nOS: Ubuntu focal \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nPython v3.5.10, Pip v21.0.1, Setuptools v56.0.0 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh | {
"content_hash": "6f4bc8f987c1f3ae594b96b24167f283",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 710,
"avg_line_length": 50.66315789473684,
"alnum_prop": 0.7039268647413256,
"repo_name": "nghiant2710/base-images",
"id": "4027a980433fb190ed02c949fc2edd5a5b40f4fb",
"size": "4834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "balena-base-images/python/iot-gate-imx8/ubuntu/focal/3.5.10/build/Dockerfile",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "144558581"
},
{
"name": "JavaScript",
"bytes": "16316"
},
{
"name": "Shell",
"bytes": "368690"
}
],
"symlink_target": ""
} |
package main
import (
"flag"
"fmt"
"log"
"time"
pb "github.com/hnakamur/hello_grpc_go/helloworld"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
const (
address = "localhost:50051"
defaultName = "world"
)
func main() {
addr := flag.String("addr", "localhost:50051", "server address")
name := flag.String("name", defaultName, "name")
loop := flag.Bool("loop", false, "enable loop")
sleep := flag.Duration("sleep", 10*time.Millisecond, "sleep time in loop")
flag.Parse()
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
if *loop {
for {
err := sayHello(*addr, *name)
if err != nil {
log.Print(err)
}
time.Sleep(*sleep)
}
} else {
err := sayHello(*addr, *name)
if err != nil {
log.Fatal(err)
}
}
}
func sayHello(address, name string) error {
// Set up a connection to the server.
conn, err := grpc.Dial(address, grpc.WithInsecure())
if err != nil {
return err
}
defer conn.Close()
c := pb.NewGreeterClient(conn)
r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name})
if err != nil {
return fmt.Errorf("could not greet: %v", err)
}
log.Printf("Greeting: %s", r.Message)
return nil
}
| {
"content_hash": "defa30683d862caad92be1e9d1a84f08",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 19.327868852459016,
"alnum_prop": 0.6395250212044106,
"repo_name": "hnakamur/hello_grpc_go",
"id": "5639742f41c117ee92837aa1fc2aec3213a18c4a",
"size": "2747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "greeter_client/main.go",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "4963"
},
{
"name": "Protocol Buffer",
"bytes": "1998"
}
],
"symlink_target": ""
} |
import logging
from framework.celery_tasks.handlers import enqueue_task
from website import settings
logger = logging.getLogger(__name__)
if settings.SEARCH_ENGINE == 'elastic':
import elastic_search as search_engine
else:
search_engine = None
logger.warn('Elastic search is not set to load')
def requires_search(func):
def wrapped(*args, **kwargs):
if search_engine is not None and not settings.RUNNING_MIGRATION:
return func(*args, **kwargs)
return wrapped
@requires_search
def search(query, index=None, doc_type=None, raw=None):
index = index or settings.ELASTIC_INDEX
return search_engine.search(query, index=index, doc_type=doc_type, raw=raw)
@requires_search
def update_node(node, index=None, bulk=False, async=True, saved_fields=None):
kwargs = {
'index': index,
'bulk': bulk
}
if async:
node_id = node._id
# We need the transaction to be committed before trying to run celery tasks.
# For example, when updating a Node's privacy, is_public must be True in the
# database in order for method that updates the Node's elastic search document
# to run correctly.
if settings.USE_CELERY:
enqueue_task(search_engine.update_node_async.s(node_id=node_id, **kwargs))
else:
search_engine.update_node_async(node_id=node_id, **kwargs)
else:
index = index or settings.ELASTIC_INDEX
return search_engine.update_node(node, **kwargs)
@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.bulk_update_nodes(serialize, nodes, index=index)
@requires_search
def delete_node(node, index=None):
index = index or settings.ELASTIC_INDEX
doc_type = node.project_or_component
if node.is_registration:
doc_type = 'registration'
elif node.is_preprint:
doc_type = 'preprint'
search_engine.delete_doc(node._id, node, index=index, category=doc_type)
@requires_search
def update_contributors_async(user_id):
"""Async version of update_contributors above"""
if settings.USE_CELERY:
enqueue_task(search_engine.update_contributors_async.s(user_id))
else:
search_engine.update_contributors_async(user_id)
@requires_search
def update_user(user, index=None, async=True):
index = index or settings.ELASTIC_INDEX
if async:
user_id = user.id
if settings.USE_CELERY:
enqueue_task(search_engine.update_user_async.s(user_id, index=index))
else:
search_engine.update_user_async(user_id, index=index)
else:
search_engine.update_user(user, index=index)
@requires_search
def update_file(file_, index=None, delete=False):
index = index or settings.ELASTIC_INDEX
search_engine.update_file(file_, index=index, delete=delete)
@requires_search
def update_institution(institution, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.update_institution(institution, index=index)
@requires_search
def update_collected_metadata(cgm_id, collection_id=None, index=None, op='update'):
index = index or settings.ELASTIC_INDEX
if settings.USE_CELERY:
enqueue_task(search_engine.update_cgm_async.s(cgm_id, collection_id=collection_id, op=op, index=index))
else:
search_engine.update_cgm_async(cgm_id, collection_id=collection_id, op=op, index=index)
@requires_search
def bulk_update_collected_metadata(cgms, op='update', index=None):
index = index or settings.ELASTIC_INDEX
search_engine.bulk_update_cgm(cgms, op=op, index=index)
@requires_search
def delete_all():
search_engine.delete_all()
@requires_search
def delete_index(index):
search_engine.delete_index(index)
@requires_search
def create_index(index=None):
index = index or settings.ELASTIC_INDEX
search_engine.create_index(index=index)
@requires_search
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
exclude = exclude or []
result = search_engine.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=current_user)
return result
| {
"content_hash": "f9457fdb6c2febe42ef1487aa847da68",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 111,
"avg_line_length": 33.968,
"alnum_prop": 0.6938294865756006,
"repo_name": "caseyrollins/osf.io",
"id": "b49d2ee4dc90e77956d7b4cc7029aa7a5fa37a48",
"size": "4246",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "website/search/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93007"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "296984"
},
{
"name": "JavaScript",
"bytes": "1813961"
},
{
"name": "Mako",
"bytes": "676476"
},
{
"name": "Python",
"bytes": "8712355"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
/*************************************************************************/
/* convex_polygon_shape_2d.h */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#ifndef CONVEX_POLYGON_SHAPE_2D_H
#define CONVEX_POLYGON_SHAPE_2D_H
#include "scene/resources/shape_2d.h"
class ConvexPolygonShape2D : public Shape2D {
GDCLASS(ConvexPolygonShape2D, Shape2D);
Vector<Vector2> points;
void _update_shape();
protected:
static void _bind_methods();
public:
virtual bool _edit_is_selected_on_click(const Point2 &p_point, double p_tolerance) const;
void set_point_cloud(const Vector<Vector2> &p_points);
void set_points(const Vector<Vector2> &p_points);
Vector<Vector2> get_points() const;
virtual void draw(const RID &p_to_rid, const Color &p_color);
virtual Rect2 get_rect() const;
virtual real_t get_enclosing_radius() const;
ConvexPolygonShape2D();
};
#endif // CONVEX_POLYGON_SHAPE_2D_H
| {
"content_hash": "33144434e5e046d6b260035a8066f8a6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 90,
"avg_line_length": 49.91525423728814,
"alnum_prop": 0.5225806451612903,
"repo_name": "ex/godot",
"id": "b9e4c5c1e129f43fd66390fff824360d0ab726ea",
"size": "2945",
"binary": false,
"copies": "1",
"ref": "refs/heads/3.5",
"path": "scene/resources/convex_polygon_shape_2d.h",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AIDL",
"bytes": "1633"
},
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "C",
"bytes": "1045182"
},
{
"name": "C#",
"bytes": "1061492"
},
{
"name": "C++",
"bytes": "39315087"
},
{
"name": "CMake",
"bytes": "606"
},
{
"name": "GAP",
"bytes": "62"
},
{
"name": "GDScript",
"bytes": "323212"
},
{
"name": "GLSL",
"bytes": "836846"
},
{
"name": "Java",
"bytes": "595274"
},
{
"name": "JavaScript",
"bytes": "194742"
},
{
"name": "Kotlin",
"bytes": "84098"
},
{
"name": "Makefile",
"bytes": "1421"
},
{
"name": "Objective-C",
"bytes": "20550"
},
{
"name": "Objective-C++",
"bytes": "365306"
},
{
"name": "PowerShell",
"bytes": "2713"
},
{
"name": "Python",
"bytes": "475722"
},
{
"name": "Shell",
"bytes": "30899"
}
],
"symlink_target": ""
} |
package org.savarese.vserv.tcpip;
/**
* ICMPPacket extends {@link IPPacket} to handle ICMP packets. The ICMP
* packet structure is described in
* <a href="http://www.ietf.org/rfc/rfc0792.txt?number=792">RFC 792</a>.
*
* @author <a href="http://www.savarese.org/">Daniel F. Savarese</a>
*/
public abstract class ICMPPacket extends IPPacket {
/** Offset into the ICMP packet of the type header value. */
public static final int OFFSET_TYPE = 0;
/** Offset into the ICMP packet of the code header value. */
public static final int OFFSET_CODE = 1;
/** Offset into the ICMP packet of the ICMP checksum. */
public static final int OFFSET_ICMP_CHECKSUM = 2;
/** Offset into the ICMP packet of the identifier header value. */
public static final int OFFSET_IDENTIFIER = 4;
/** Offset into the ICMP packet of the sequence number header value. */
public static final int OFFSET_SEQUENCE = 6;
/** The ICMP type number for an echo request. */
public static final int TYPE_ECHO_REQUEST = 8;
/** The ICMP type number for an echo reply. */
public static final int TYPE_ECHO_REPLY = 0;
/** The byte offset into the IP packet where the ICMP packet begins. */
int _offset;
/**
* Creates a new ICMP packet of a given size.
*
* @param size The number of bytes in the packet.
*/
public ICMPPacket(int size) {
super(size);
_offset = 0;
}
/**
* Creates a new ICMP packet that is a copy of a given packet.
*
* @param packet The packet to replicate.
*/
public ICMPPacket(ICMPPacket packet) {
super(packet.size());
copy(packet);
_offset = packet._offset;
}
/** @return The number of bytes in the ICMP packet header. */
public abstract int getICMPHeaderByteLength();
public void setIPHeaderLength(int length) {
super.setIPHeaderLength(length);
_offset = getIPHeaderByteLength();
}
/**
* @return The total number of bytes in the IP and ICMP headers.
*/
public final int getCombinedHeaderByteLength() {
return _offset + getICMPHeaderByteLength();
}
/**
* Sets the length of the ICMP data payload.
*
* @param length The length of the ICMP data payload in bytes.
*/
public final void setICMPDataByteLength(int length) {
if(length < 0)
length = 0;
setIPPacketLength(getCombinedHeaderByteLength() + length);
}
/**
* @return The number of bytes in the ICMP data payload.
*/
public final int getICMPDataByteLength() {
return getIPPacketLength() - getCombinedHeaderByteLength();
}
/**
* @return The ICMP packet length. This is the size of the IP packet
* minus the size of the IP header.
*/
public final int getICMPPacketByteLength() {
return getIPPacketLength() - _offset;
}
/**
* Copies the contents of an ICMPPacket. If the current data array is
* of insufficient length to store the contents, a new array is
* allocated.
*
* @param packet The TCPPacket to copy.
*/
public final void copyData(ICMPPacket packet) {
if(_data_.length < packet._data_.length) {
byte[] data = new byte[packet._data_.length];
System.arraycopy(_data_, 0, data, 0, getCombinedHeaderByteLength());
_data_ = data;
}
int length = packet.getICMPDataByteLength();
System.arraycopy(packet._data_, packet.getCombinedHeaderByteLength(),
_data_, getCombinedHeaderByteLength(), length);
setICMPDataByteLength(length);
}
public void setData(byte[] data) {
super.setData(data);
_offset = getIPHeaderByteLength();
}
/**
* Sets the ICMP type header field.
*
* @param type The new type.
*/
public final void setType(int type) {
_data_[_offset + OFFSET_TYPE] = (byte)(type & 0xff);
}
/**
* @return The ICMP type header field.
*/
public final int getType() {
return (_data_[_offset + OFFSET_TYPE] & 0xff);
}
/**
* Sets the ICMP code header field.
*
* @param code The new type.
*/
public final void setCode(int code) {
_data_[_offset + OFFSET_CODE] = (byte)(code & 0xff);
}
/**
* @return The ICMP code header field.
*/
public final int getCode() {
return (_data_[_offset + OFFSET_CODE] & 0xff);
}
/**
* @return The ICMP checksum.
*/
public final int getICMPChecksum() {
return (((_data_[_offset + OFFSET_ICMP_CHECKSUM] & 0xff) << 8) |
(_data_[_offset + OFFSET_ICMP_CHECKSUM + 1] & 0xff));
}
/**
* Computes the ICMP checksum, optionally updating the ICMP checksum header.
*
* @param update Specifies whether or not to update the ICMP checksum
* header after computing the checksum. A value of true indicates
* the header should be updated, a value of false indicates it
* should not be updated.
* @return The computed ICMP checksum.
*/
public final int computeICMPChecksum(boolean update) {
return _computeChecksum_(_offset, _offset + OFFSET_ICMP_CHECKSUM,
getIPPacketLength(), 0, update);
}
/**
* Same as <code>computeICMPChecksum(true);</code>
*
* @return The computed ICMP checksum value.
*/
public final int computeICMPChecksum() {
return computeICMPChecksum(true);
}
}
| {
"content_hash": "55c97b42336b1d8b7e577c2bc3ad5362",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 78,
"avg_line_length": 25.396135265700483,
"alnum_prop": 0.6461860376640669,
"repo_name": "sebgod/java-sockets",
"id": "159ce74aa30b9f01619657a555ca762e4a1b4b22",
"size": "5991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vserv-tcpip/src/java/org/savarese/vserv/tcpip/ICMPPacket.java",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21538"
},
{
"name": "HTML",
"bytes": "106"
},
{
"name": "Java",
"bytes": "93624"
},
{
"name": "Makefile",
"bytes": "2661"
},
{
"name": "Shell",
"bytes": "530"
}
],
"symlink_target": ""
} |
title: Settings
page_title: Settings - WinForms RichTextEditor Control
description: XamlFormatProvider allows for import of XAML documents and respectively export of WinForms RichTextEditor to XAML file.
slug: winforms/richtexteditor/import-export/xaml/settings
tags: import/export
published: True
position: 1
---
# Settings
__XamlFormatProvider__ allows for import of XAML documents and respectively export of RadRichTextEditor to XAML file. Additionally, the import/export settings provide modification options. The current article outlines the available settings.
## Export Settings
__XamlFormatProvider__ exposes __ExportSettings__, which allow you to control the export of the RadRichTextEditor document.
### Export Settings Properties
* __ImageExportMode__: A property of type __ImageExportMode__ that gets or sets how the image should be exported. This property is an enumeration and it allows the following values:
* __None__: Images are not exported.
* __RawData__: Images are exported using their RawData.
* __ImageExportingEvent__: Event is fired on exporting.
* __UriSource__: The UriSource property is used instead of RawData. Bare in mind that this property may not be set on all images.
### Export Settings Events
* __ImageExporting__: This event is fired every time before exporting an Image.
* __InlineUIContainerExporting__: This event is fired every time before exporting an __InlineUIContainer__.
>These events will be called when the __ImageExportMode__ enumeration property is set to __ImageExportingEvent__.
#### Setting the ExportSettings of the XamlFormatProvider
{{source=..\SamplesCS\RichTextEditor\ImportExport\XamlFormatProviderForm.cs region=SetupXamlExportSettings}}
{{source=..\SamplesVB\RichTextEditor\ImportExport\XamlFormatProviderForm.vb region=SetupXamlExportSettings}}
````C#
XamlFormatProvider xamlFormatProvider = new XamlFormatProvider();
XamlExportSettings settings = new XamlExportSettings();
settings.ImageExportMode = ImageExportMode.UriSource;
xamlFormatProvider.ExportSettings = settings;
````
````VB.NET
Dim xamlFormatProvider As XamlFormatProvider = New XamlFormatProvider()
Dim settings As XamlExportSettings = New XamlExportSettings()
settings.ImageExportMode = ImageExportMode.UriSource
xamlFormatProvider.ExportSettings = settings
````
{{endregion}}
## Import Settings
__XamlFormatProvider__ exposes __ImportSettings__, which allow you to control the import of the XAML file.
### Import Settings Events
* __ImageImported__: This event is fired every time when the __Image__ is imported.
* __InlineUIContainerImported__: This event is fired every time when the __InlineUIContainer__ is imported.
#### Setting the ImportSettings of the XamlFormatProvider
{{source=..\SamplesCS\RichTextEditor\ImportExport\XamlFormatProviderForm.cs region=SetupXamlImportSettings}}
{{source=..\SamplesVB\RichTextEditor\ImportExport\XamlFormatProviderForm.vb region=SetupXamlImportSettings}}
````C#
XamlFormatProvider xamlFormatProvider = new XamlFormatProvider();
XamlImportSettings settings = new XamlImportSettings();
xamlFormatProvider.ImportSettings = settings;
settings.ImageImported += XamlImportSettings_ImageImported;
````
````VB.NET
Dim xamlFormatProvider As XamlFormatProvider = New XamlFormatProvider()
Dim settings As XamlImportSettings = New XamlImportSettings()
xamlFormatProvider.ImportSettings = settings
AddHandler settings.ImageImported, AddressOf XamlImportSettings_ImageImported
````
{{endregion}}
#### ImageImported Event
{{source=..\SamplesCS\RichTextEditor\ImportExport\XamlFormatProviderForm.cs region=ImageImportedEvent}}
{{source=..\SamplesVB\RichTextEditor\ImportExport\XamlFormatProviderForm.vb region=ImageImportedEvent}}
````C#
private void XamlImportSettings_ImageImported(object sender, ImageImportedEventArgs e)
{
var img = e.Image;
}
````
````VB.NET
Private Sub XamlImportSettings_ImageImported(ByVal sender As Object, ByVal e As ImageImportedEventArgs)
Dim img = e.Image
End Sub
````
{{endregion}}
## See Also
* [Getting Started]({%slug winforms/richtexteditor-/getting-started%})
* [Using XamlFormatProvider]({%slug winforms/richtexteditor/import-export/xaml/xamlformatprovider%}) | {
"content_hash": "0835fe21bc936df716172046d4c02e25",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 241,
"avg_line_length": 38.81651376146789,
"alnum_prop": 0.7896478373906878,
"repo_name": "telerik/winforms-docs",
"id": "ff5c291570c55ea9fedb65b6a95bca2b604ef9ca",
"size": "4235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controls/richtexteditor/import-export/xaml/settings.md",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "96"
},
{
"name": "CSS",
"bytes": "2296"
},
{
"name": "HTML",
"bytes": "1629"
},
{
"name": "JavaScript",
"bytes": "42129"
},
{
"name": "Ruby",
"bytes": "882"
}
],
"symlink_target": ""
} |
<?php
namespace Core;
class Inflector
{
/**
* Contains the list of singluralization rules.
*
* @var array An array of regular expression rules in the form of `'match' => 'replace'`,
* which specify the matching and replacing rules for the singluralization of words.
*/
protected static $_singular = [];
/**
* Contains the list of pluralization rules.
*
* @var array An array of regular expression rules in the form of `'match' => 'replace'`,
* which specify the matching and replacing rules for the pluralization of words.
*/
protected static $_plural = [];
/**
* Takes a under_scored word and turns it into a camelcased word.
*
* @param string $word An underscored or slugged word (i.e. `'red_bike'` or `'red-bike'`).
* @param array $on List of characters to camelize on.
* @return string Camel cased version of the word (i.e. `'RedBike'`).
*/
public static function camelize($word)
{
$upper = function ($matches) {
return strtoupper($matches[0]);
};
$camelized = str_replace(' ', '', ucwords(str_replace(['_', '-'], ' ', strtolower($word))));
return preg_replace_callback('/(\\\[a-z])/', $upper, $camelized);
}
/**
* Takes a under_scored word and turns it into a camel-back word.
*
* @param string $word An underscored or slugged word (i.e. `'red_bike'` or `'red-bike'`).
* @param array $on List of characters to camelize on.
* @return string Camel-back version of the word (i.e. `'redBike'`).
*/
public static function camelback($word, $on = ['_', '-', '\\'])
{
return lcfirst(static::camelize($word));
}
/**
* Takes a CamelCased version of a word and turns it into an under_scored one.
*
* @param string $word Camel cased version of a word (i.e. `'RedBike'`).
* @return string Underscored version of the word (i.e. `'red_bike'`).
*/
public static function underscore($word)
{
$underscored = strtr(preg_replace('/(?<=\\w)([A-Z])/', '_\\1', $word), '-', '_');
return strtolower(static::transliterate($underscored));
}
/**
* Replaces underscores with dashes in the string.
*
* @param string $word Underscored string (i.e. `'red_bike'`).
* @return string dashes version of the word (i.e. `'red-bike'`).
*/
public static function dasherize($word)
{
return strtr($word, '_', '-');
}
/**
* Returns a string with all spaces converted to given replacement and non word characters removed.
* Maps special characters to ASCII using `transliterator_transliterate`.
*
* @param string $string An arbitrary string to convert.
* @param string $replacement The replacement to use for spaces.
* @return string The converted string.
*/
public static function slug($string, $replacement = '-')
{
$transliterated = static::transliterate($string);
$spaced = preg_replace('/[^\w\s]/', ' ', $transliterated);
return preg_replace('/\\s+/', $replacement, trim($spaced));
}
/**
* Returns a lowercased string with all spaces converted to given replacement and non word characters removed.
* Maps special characters to ASCII using `transliterator_transliterate`.
*
* @param string $string An arbitrary string to convert.
* @param string $replacement The replacement to use for spaces.
* @return string The converted lowercased string.
*/
public static function parameterize($string, $replacement = '-')
{
$transliterated = static::transliterate($string);
return strtolower(static::slug($string, $replacement));
}
/**
* Takes an under_scored version of a word and turns it into an human- readable form by
* replacing underscores with a space, and by upper casing the initial character of each word.
*
* @param string $word Under_scored version of a word (i.e. `'red_bike'`).
* @param string $separator The separator character used in the initial string.
* @return string Human readable version of the word (i.e. `'Red Bike'`).
*/
public static function titleize($word, $separator = '_')
{
return ucwords(static::humanize($word, $separator));
}
/**
* Takes an under_scored version of a word and turns it into an human- readable form by
* replacing underscores with a space, and by upper casing the initial character of the sentence.
*
* @param string $word Under_scored version of a word (i.e. `'red_bike'`).
* @param string $separator The separator character used in the initial string.
* @return ucfirst(string Human readable version of the word (i.e. `'Red bike'`).
*/
public static function humanize($word, $separator = '_')
{
return ucfirst(strtr(preg_replace('/_id$/', '', $word), $separator, ' '));
}
/**
* Set a new pluralization rule and its replacement.
*
* @param string $rule A regular expression.
* @param string $replacement The replacement expression.
* @param string $locale The locale where this rule will be applied.
*/
public static function plural($rule, $replacement, $locale = 'default')
{
static::_inflect('_plural', $rule, $replacement, $locale);
}
/**
* Set a new singularization rule and its replacement.
*
* @param string $rule A regular expression.
* @param string $replacement The replacement expression.
* @param string $locale The locale where this rule will be applied.
*/
public static function singular($rule, $replacement, $locale = 'default')
{
static::_inflect('_singular', $rule, $replacement, $locale);
}
/**
* Set a new inflection rule and its replacement.
*
* @param string $type The inflection type.
* @param string $rule A regular expression.
* @param string $replacement The replacement expression.
* @param string $locale The locale where this rule will be applied.
*/
protected static function _inflect($type, $rule, $replacement, $locale)
{
$rules = & static::${$type};
if (!isset($rules[$locale])) {
$rules[$locale] = [];
}
$rules[$locale] = [$rule => $replacement] + $rules[$locale];
}
/**
* Changes the form of a word from singular to plural.
*
* @param string $word Word in singular form.
* @param string $locale The locale to use for rules. Defaults to `'default'`.
* @return string Word in plural form.
*/
public static function pluralize($word, $locale = 'default')
{
$rules = static::$_plural;
return static::_inflectize($rules, $word, $locale);
}
/**
* Changes the form of a word from plural to singular.
*
* @param string $word Word in plural form.
* @param string $locale The locale to use for rules. Defaults to `'default'`.
* @return string Word in plural form.
*/
public static function singularize($word, $locale = 'default')
{
$rules = static::$_singular;
return static::_inflectize($rules, $word, $locale);
}
/**
* Changes the form of a word.
*
* @param string $rules The inflection rules array.
* @param string $word A word.
* @param string $locale The locale to use for rules.
* @return string The inflectized word.
*/
protected static function _inflectize($rules, $word, $locale)
{
if (!$word || !isset($rules[$locale])) {
return $word;
}
$result = $word;
foreach ($rules[$locale] as $rule => $replacement) {
$result = preg_replace($rule, $replacement, $word, -1, $count);
if ($count) {
return $result;
}
}
return $result;
}
/**
* Set a new exception in inflection.
*
* @param string $singular The singular form of the word.
* @param string $plural The plural form of the word.
* @param string $locale The locale where this irregularity will be applied.
*/
public static function irregular($singular, $plural, $locale = 'default')
{
$rules = !is_array($singular) ? [$singular => $plural] : $singular;
$len = min(mb_strlen($singular), mb_strlen($plural));
$prefix = '';
$index = 0;
while ($index < $len && ($singular[$index] === $plural[$index])) {
$prefix .= $singular[$index];
$index++;
}
if (!$sSuffix = substr($singular, $index)) {
$sSuffix = '';
}
if (!$pSuffix = substr($plural, $index)) {
$pSuffix = '';
}
static::singular("/({$singular})$/i", "\\1", $locale);
static::singular("/({$prefix}){$pSuffix}$/i", "\\1{$sSuffix}", $locale);
static::plural("/({$plural})$/i", "\\1", $locale);
static::plural("/({$prefix}){$sSuffix}$/i", "\\1{$pSuffix}", $locale);
}
/**
* Replaces non-ASCII characters with an ASCII approximation.
*
* @param string $string
* @param string $transliterator
* @return string
*/
public static function transliterate($string, $transliterator = "Any-Latin; Latin-ASCII; [\u0080-\u7fff] remove;")
{
//return transliterator_transliterate($transliterator, $string);
return $string;
}
/**
* Clears all inflection rules.
*
* @param string|boolean $lang The language name to reset or `true` to reset all even defaults.
*/
public static function reset($lang = null)
{
if (is_string($lang)) {
unset(static::$_singular[$lang]);
unset(static::$_plural[$lang]);
return;
}
static::$_singular = [];
static::$_plural = [];
if ($lang === true) {
return;
}
/**
* Initilalize the class with english inflector rules.
*/
Inflector::singular('/([^s])s$/i', '\1', 'default');
Inflector::plural('/([^s])$/i', '\1s', 'default');
Inflector::singular('/(x|z|s|ss|ch|sh)es$/i', '\1', 'default');
Inflector::plural('/(x|z|ss|ch|sh)$/i', '\1es', 'default');
Inflector::singular('/ies$/i', 'y', 'default');
Inflector::plural('/([^aeiouy]|qu)y$/i', '\1ies', 'default');
Inflector::plural('/(meta|data)$/i', '\1', 'default');
Inflector::irregular('child', 'children', 'default');
Inflector::irregular('equipment', 'equipment', 'default');
Inflector::irregular('information', 'information', 'default');
Inflector::irregular('man', 'men', 'default');
Inflector::irregular('news', 'news', 'default');
Inflector::irregular('person', 'people', 'default');
Inflector::irregular('woman', 'women', 'default');
/**
* Warning, using an "exhastive" list of rules will slow
* down all singularizations/pluralizations generations.
* So it may be preferable to only add the ones you are actually needed.
*
* Anyhow bellow a list english exceptions which are not covered by the above rules.
*/
// Inflector::irregular('advice', 'advice', 'default');
// Inflector::irregular('aircraft', 'aircraft', 'default');
// Inflector::irregular('alias', 'aliases', 'default');
// Inflector::irregular('alga', 'algae', 'default');
// Inflector::irregular('alumna', 'alumnae', 'default');
// Inflector::irregular('alumnus', 'alumni', 'default');
// Inflector::irregular('analysis', 'analyses', 'default');
// Inflector::irregular('antenna', 'antennae', 'default');
// Inflector::irregular('automaton', 'automata', 'default');
// Inflector::irregular('axis', 'axes', 'default');
// Inflector::irregular('bacillus', 'bacilli', 'default');
// Inflector::irregular('bacterium', 'bacteria', 'default');
// Inflector::irregular('barracks', 'barracks', 'default');
// Inflector::irregular('basis', 'bases', 'default');
// Inflector::irregular('bellows', 'bellows', 'default');
// Inflector::irregular('buffalo', 'buffaloes', 'default');
// Inflector::irregular('bus', 'buses', 'default');
// Inflector::irregular('bison', 'bison', 'default');
// Inflector::irregular('cactus', 'cacti', 'default');
// Inflector::irregular('cafe', 'cafes', 'default');
// Inflector::irregular('calf', 'calves', 'default');
// Inflector::irregular('cargo', 'cargoes', 'default');
// Inflector::irregular('cattle', 'cattle', 'default');
// Inflector::irregular('child', 'children', 'default');
// Inflector::irregular('congratulations', 'congratulations', 'default');
// Inflector::irregular('corn', 'corn', 'default');
// Inflector::irregular('crisis', 'crises', 'default');
// Inflector::irregular('criteria', 'criterion', 'default');
// Inflector::irregular('curriculum', 'curricula', 'default');
// Inflector::irregular('datum', 'data', 'default');
// Inflector::irregular('deer', 'deer', 'default');
// Inflector::irregular('die', 'dice', 'default');
// Inflector::irregular('dregs', 'dregs', 'default');
// Inflector::irregular('duck', 'duck', 'default');
// Inflector::irregular('echo', 'echos', 'default');
// Inflector::irregular('elf', 'elves', 'default');
// Inflector::irregular('ellipsis', 'ellipses', 'default');
// Inflector::irregular('embargo', 'embargoes', 'default');
// Inflector::irregular('equipment', 'equipment', 'default');
// Inflector::irregular('erratum', 'errata', 'default');
// Inflector::irregular('evidence', 'evidence', 'default');
// Inflector::irregular('eyeglasses', 'eyeglasses', 'default');
// Inflector::irregular('fish', 'fish', 'default');
// Inflector::irregular('focus', 'foci', 'default');
// Inflector::irregular('foot', 'feet', 'default');
// Inflector::irregular('fungus', 'fungi', 'default');
// Inflector::irregular('gallows', 'gallows', 'default');
// Inflector::irregular('genus', 'genera', 'default');
// Inflector::irregular('goose', 'geese', 'default');
// Inflector::irregular('gold', 'gold', 'default');
// Inflector::irregular('grotto', 'grottoes', 'default');
// Inflector::irregular('gymnasium', 'gymnasia', 'default');
// Inflector::irregular('half', 'halves', 'default');
// Inflector::irregular('headquarters', 'headquarters', 'default');
// Inflector::irregular('hoof', 'hooves', 'default');
// Inflector::irregular('hypothesis', 'hypotheses', 'default');
// Inflector::irregular('information', 'information', 'default');
// Inflector::irregular('graffito', 'graffiti', 'default');
// Inflector::irregular('half', 'halves', 'default');
// Inflector::irregular('hero', 'heroes', 'default');
// Inflector::irregular('jewelry', 'jewelry', 'default');
// Inflector::irregular('kin', 'kin', 'default');
// Inflector::irregular('knife', 'knives', 'default');
// Inflector::irregular('larva', 'larvae', 'default');
// Inflector::irregular('leaf', 'leaves', 'default');
// Inflector::irregular('legislation', 'legislation', 'default');
// Inflector::irregular('life', 'lives', 'default');
// Inflector::irregular('loaf', 'loaves', 'default');
// Inflector::irregular('locus', 'loci', 'default');
// Inflector::irregular('louse', 'lice', 'default');
// Inflector::irregular('luck', 'luck', 'default');
// Inflector::irregular('luggage', 'luggage', 'default');
// Inflector::irregular('man', 'men', 'default');
// Inflector::irregular('mathematics', 'mathematics', 'default');
// Inflector::irregular('matrix', 'matrices', 'default');
// Inflector::irregular('means', 'means', 'default');
// Inflector::irregular('measles', 'measles', 'default');
// Inflector::irregular('medium', 'media', 'default');
// Inflector::irregular('memorandum', 'memoranda', 'default');
// Inflector::irregular('money', 'monies', 'default');
// Inflector::irregular('moose', 'moose', 'default');
// Inflector::irregular('mosquito', 'mosquitoes', 'default');
// Inflector::irregular('motto', 'mottoes', 'default');
// Inflector::irregular('mouse', 'mice', 'default');
// Inflector::irregular('mumps', 'mumps', 'default');
// Inflector::irregular('music', 'music', 'default');
// Inflector::irregular('mythos', 'mythoi', 'default');
// Inflector::irregular('nebula', 'nebulae', 'default');
// Inflector::irregular('neurosis', 'neuroses', 'default');
// Inflector::irregular('news', 'news', 'default');
// Inflector::irregular('nucleus', 'nuclei', 'default');
// Inflector::irregular('numen', 'numina', 'default');
// Inflector::irregular('oasis', 'oases', 'default');
// Inflector::irregular('oats', 'oats', 'default');
// Inflector::irregular('octopus', 'octopuses', 'default');
// Inflector::irregular('offspring', 'offspring', 'default');
// Inflector::irregular('ovum', 'ova', 'default');
// Inflector::irregular('ox', 'oxen', 'default');
// Inflector::irregular('pajamas', 'pajamas', 'default');
// Inflector::irregular('pants', 'pants', 'default');
// Inflector::irregular('paralysis', 'paralyses', 'default');
// Inflector::irregular('parenthesis', 'parentheses', 'default');
// Inflector::irregular('person', 'people', 'default');
// Inflector::irregular('phenomenon', 'phenomena', 'default');
// Inflector::irregular('pike', 'pike', 'default');
// Inflector::irregular('plankton', 'plankton', 'default');
// Inflector::irregular('pliers', 'pliers', 'default');
// Inflector::irregular('polyhedron', 'polyhedra', 'default');
// Inflector::irregular('potato', 'potatoes', 'default');
// Inflector::irregular('quiz', 'quizzes', 'default');
// Inflector::irregular('radius', 'radii', 'default');
// Inflector::irregular('roof', 'roofs', 'default');
// Inflector::irregular('salmon', 'salmon', 'default');
// Inflector::irregular('scarf', 'scarves', 'default');
// Inflector::irregular('scissors', 'scissors', 'default');
// Inflector::irregular('self', 'selves', 'default');
// Inflector::irregular('series', 'series', 'default');
// Inflector::irregular('shears', 'shears', 'default');
// Inflector::irregular('sheep', 'sheep', 'default');
// Inflector::irregular('shelf', 'shelves', 'default');
// Inflector::irregular('shorts', 'shorts', 'default');
// Inflector::irregular('silver', 'silver', 'default');
// Inflector::irregular('species', 'species', 'default');
// Inflector::irregular('squid', 'squid', 'default');
// Inflector::irregular('stimulus', 'stimuli', 'default');
// Inflector::irregular('stratum', 'strata', 'default');
// Inflector::irregular('swine', 'swine', 'default');
// Inflector::irregular('syllabus', 'syllabi', 'default');
// Inflector::irregular('synopsis', 'synopses', 'default');
// Inflector::irregular('synthesis', 'syntheses', 'default');
// Inflector::irregular('tax', 'taxes', 'default');
// Inflector::irregular('terminus', 'termini', 'default');
// Inflector::irregular('thesis', 'theses', 'default');
// Inflector::irregular('thief', 'thieves', 'default');
// Inflector::irregular('tomato', 'tomatoes', 'default');
// Inflector::irregular('tongs', 'tongs', 'default');
// Inflector::irregular('tooth', 'teeth', 'default');
// Inflector::irregular('torpedo', 'torpedoes', 'default');
// Inflector::irregular('torus', 'tori', 'default');
// Inflector::irregular('trousers', 'trousers', 'default');
// Inflector::irregular('trout', 'trout', 'default');
// Inflector::irregular('tweezers', 'tweezers', 'default');
// Inflector::irregular('vertebra', 'vertebrae', 'default');
// Inflector::irregular('vertex', 'vertices', 'default');
// Inflector::irregular('vespers', 'vespers', 'default');
// Inflector::irregular('veto', 'vetoes', 'default');
// Inflector::irregular('volcano', 'volcanoes', 'default');
// Inflector::irregular('vortex', 'vortices', 'default');
// Inflector::irregular('vita', 'vitae', 'default');
// Inflector::irregular('virus', 'viri', 'default');
// Inflector::irregular('wheat', 'wheat', 'default');
// Inflector::irregular('wife', 'wives', 'default');
// Inflector::irregular('wolf', 'wolves', 'default');
// Inflector::irregular('woman', 'women', 'default');
// Inflector::irregular('zero', 'zeros', 'default');
}
}
Inflector::reset();
| {
"content_hash": "905ebe8e06d4890f4933927c7fb1bffc",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 118,
"avg_line_length": 45.819532908704886,
"alnum_prop": 0.57847180390158,
"repo_name": "lucasnpinheiro/Aulas",
"id": "cc9d737c4ed810945965b667f0634d0feaf13b6a",
"size": "21581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Core/Inflector.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "328"
},
{
"name": "Batchfile",
"bytes": "216"
},
{
"name": "CSS",
"bytes": "881"
},
{
"name": "HTML",
"bytes": "131"
},
{
"name": "JavaScript",
"bytes": "3011"
},
{
"name": "PHP",
"bytes": "604010"
},
{
"name": "Shell",
"bytes": "1071"
}
],
"symlink_target": ""
} |
namespace extensions {
AppSyncData::AppSyncData() {}
AppSyncData::AppSyncData(const syncer::SyncData& sync_data) {
PopulateFromSyncData(sync_data);
}
AppSyncData::AppSyncData(const syncer::SyncChange& sync_change) {
PopulateFromSyncData(sync_change.sync_data());
extension_sync_data_.set_uninstalled(
sync_change.change_type() == syncer::SyncChange::ACTION_DELETE);
}
AppSyncData::AppSyncData(const Extension& extension,
bool enabled,
bool incognito_enabled,
const syncer::StringOrdinal& app_launch_ordinal,
const syncer::StringOrdinal& page_ordinal,
extensions::LaunchType launch_type)
: extension_sync_data_(extension, enabled, incognito_enabled),
app_launch_ordinal_(app_launch_ordinal),
page_ordinal_(page_ordinal),
launch_type_(launch_type) {
}
AppSyncData::~AppSyncData() {}
syncer::SyncData AppSyncData::GetSyncData() const {
sync_pb::EntitySpecifics specifics;
PopulateAppSpecifics(specifics.mutable_app());
return syncer::SyncData::CreateLocalData(extension_sync_data_.id(),
extension_sync_data_.name(),
specifics);
}
syncer::SyncChange AppSyncData::GetSyncChange(
syncer::SyncChange::SyncChangeType change_type) const {
return syncer::SyncChange(FROM_HERE, change_type, GetSyncData());
}
void AppSyncData::PopulateAppSpecifics(sync_pb::AppSpecifics* specifics) const {
DCHECK(specifics);
// Only sync the ordinal values and launch type if they are valid.
if (app_launch_ordinal_.IsValid())
specifics->set_app_launch_ordinal(app_launch_ordinal_.ToInternalValue());
if (page_ordinal_.IsValid())
specifics->set_page_ordinal(page_ordinal_.ToInternalValue());
sync_pb::AppSpecifics::LaunchType sync_launch_type =
static_cast<sync_pb::AppSpecifics::LaunchType>(launch_type_);
// The corresponding validation of this value during processing of an
// AppSyncData is in ExtensionSyncService::ProcessAppSyncData.
if (launch_type_ >= LAUNCH_TYPE_FIRST && launch_type_ < NUM_LAUNCH_TYPES &&
sync_pb::AppSpecifics_LaunchType_IsValid(sync_launch_type)) {
specifics->set_launch_type(sync_launch_type);
}
extension_sync_data_.PopulateExtensionSpecifics(
specifics->mutable_extension());
}
void AppSyncData::PopulateFromAppSpecifics(
const sync_pb::AppSpecifics& specifics) {
extension_sync_data_.PopulateFromExtensionSpecifics(specifics.extension());
app_launch_ordinal_ = syncer::StringOrdinal(specifics.app_launch_ordinal());
page_ordinal_ = syncer::StringOrdinal(specifics.page_ordinal());
launch_type_ = specifics.has_launch_type()
? static_cast<extensions::LaunchType>(specifics.launch_type())
: LAUNCH_TYPE_INVALID;
}
void AppSyncData::PopulateFromSyncData(const syncer::SyncData& sync_data) {
PopulateFromAppSpecifics(sync_data.GetSpecifics().app());
}
} // namespace extensions
| {
"content_hash": "d79aa815ddd1bb6b7aad9f07bbf9efce",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 80,
"avg_line_length": 37.22222222222222,
"alnum_prop": 0.6985074626865672,
"repo_name": "patrickm/chromium.src",
"id": "ac0fb07d6df6c30efb56396df59ac3ea4a578845",
"size": "3392",
"binary": false,
"copies": "3",
"ref": "refs/heads/nw",
"path": "chrome/browser/extensions/app_sync_data.cc",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40737238"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "207930633"
},
{
"name": "CSS",
"bytes": "939170"
},
{
"name": "Java",
"bytes": "5844934"
},
{
"name": "JavaScript",
"bytes": "17837835"
},
{
"name": "Mercury",
"bytes": "10533"
},
{
"name": "Objective-C",
"bytes": "886228"
},
{
"name": "Objective-C++",
"bytes": "6667789"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10857933"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1326032"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
<?xml version="1.0" encoding="UTF-8"?>
<CustomFieldTranslation xmlns="http://soap.sforce.com/2006/04/metadata">
<name>Contact_JSON__c</name>
<label>JSON de contact</label>
</CustomFieldTranslation>
| {
"content_hash": "fee35bde7f87dd33b4095eda3b768e8f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 72,
"avg_line_length": 41.2,
"alnum_prop": 0.7135922330097088,
"repo_name": "SalesforceFoundation/HEDAP",
"id": "3da2959db9015589a84583e0cfadb88e0533da45",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature/234",
"path": "force-app/main/default/objectTranslations/Contact-fr/Contact_JSON__c.fieldTranslation-meta.xml",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Apex",
"bytes": "1599605"
},
{
"name": "CSS",
"bytes": "621"
},
{
"name": "HTML",
"bytes": "145319"
},
{
"name": "JavaScript",
"bytes": "61802"
},
{
"name": "Python",
"bytes": "28442"
},
{
"name": "RobotFramework",
"bytes": "26714"
}
],
"symlink_target": ""
} |
// ReSharper disable InconsistentNaming
using System;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using EasyNetQ.Topology;
using NUnit.Framework;
namespace EasyNetQ.Tests.Integration
{
[TestFixture, Explicit("Requires a RabbitMQ instance on localhost")]
public class AdvancedApiExamples
{
private IAdvancedBus advancedBus;
[SetUp]
public void SetUp()
{
advancedBus = RabbitHutch.CreateBus("host=localhost").Advanced;
}
[TearDown]
public void TearDown()
{
advancedBus.Dispose();
}
[Test, Explicit]
public void DeclareTopology()
{
var queue = advancedBus.QueueDeclare("my_queue");
var exchange = advancedBus.ExchangeDeclare("my_exchange", ExchangeType.Direct);
advancedBus.Bind(exchange, queue, "routing_key");
}
[Test,Explicit]
public void DeclareTopologyAndCheckPassive()
{
var queue = advancedBus.QueueDeclare("my_queue");
var exchange = advancedBus.ExchangeDeclare("my_exchange", ExchangeType.Direct);
advancedBus.Bind(exchange, queue, "routing_key");
advancedBus.ExchangeDeclare("my_exchange", ExchangeType.Direct, passive: true);
}
[Test, Explicit]
public void DeclareWithTtlAndExpire()
{
advancedBus.QueueDeclare("my_queue", perQueueMessageTtl: 500, expires: 500);
}
[Test, Explicit]
public void DeclareExchangeWithAlternate()
{
const string alternate = "alternate";
const string bindingKey = "the-binding-key";
var alternateExchange = advancedBus.ExchangeDeclare(alternate, ExchangeType.Direct);
var originalExchange = advancedBus.ExchangeDeclare("original", ExchangeType.Direct, alternateExchange: alternate);
var queue = advancedBus.QueueDeclare("my_queue");
advancedBus.Bind(alternateExchange, queue, bindingKey);
var message = Encoding.UTF8.GetBytes("Some message");
advancedBus.Publish(originalExchange, bindingKey, false, new MessageProperties(), message);
}
[Test, Explicit]
public void DeclareDelayedExchange()
{
const string bindingKey = "the-binding-key";
var delayedExchange = advancedBus.ExchangeDeclare("delayed", ExchangeType.Direct, delayed: true);
var queue = advancedBus.QueueDeclare("my_queue");
advancedBus.Bind(delayedExchange, queue, bindingKey);
var message = Encoding.UTF8.GetBytes("Some message");
var messageProperties = new MessageProperties();
messageProperties.Headers.Add("x-delay", 5000);
advancedBus.Publish(delayedExchange, bindingKey, false, messageProperties, message);
}
[Test, Explicit]
public void ConsumeFromAQueue()
{
var queue = new Queue("my_queue", false);
advancedBus.Consume(queue, (body, properties, info) => Task.Factory.StartNew(() =>
{
var message = Encoding.UTF8.GetString(body);
Console.Out.WriteLine("Got message: '{0}'", message);
}));
Thread.Sleep(500);
}
[Test, Explicit]
public void PublishToAnExchange()
{
var exchange = new Exchange("my_exchange");
var body = Encoding.UTF8.GetBytes("Hello World!");
advancedBus.Publish(exchange, "routing_key", false, new MessageProperties(), body);
Thread.Sleep(5000);
}
[Test, Explicit]
public void Should_be_able_to_delete_objects()
{
// declare some objects
var queue = advancedBus.QueueDeclare("my_queue");
var exchange = advancedBus.ExchangeDeclare("my_exchange", ExchangeType.Direct);
var binding = advancedBus.Bind(exchange, queue, "routing_key");
// and then delete them
advancedBus.BindingDelete(binding);
advancedBus.ExchangeDelete(exchange);
advancedBus.QueueDelete(queue);
}
[Test, Explicit]
public void Should_consume_a_message()
{
var queue = advancedBus.QueueDeclare("consume_test");
advancedBus.Consume<MyMessage>(queue, (message, info) =>
Task.Factory.StartNew(() =>
Console.WriteLine("Got message {0}", message.Body.Text)));
advancedBus.Publish(Exchange.GetDefault(), "consume_test", false, new Message<MyMessage>(new MyMessage{ Text = "Wotcha!"}));
Thread.Sleep(1000);
}
[Test, Explicit]
public void Should_be_able_to_get_a_message()
{
var queue = advancedBus.QueueDeclare("get_test");
advancedBus.Publish(Exchange.GetDefault(), "get_test", false, new Message<MyMessage>(new MyMessage { Text = "Oh! Hello!" }));
var getResult = advancedBus.Get<MyMessage>(queue);
if (getResult.MessageAvailable)
{
Console.Out.WriteLine("Got message: {0}", getResult.Message.Body.Text);
}
else
{
Console.Out.WriteLine("Failed to get message!");
}
}
[Test, Explicit]
public void Should_set_MessageAvailable_to_false_when_queue_is_empty()
{
var queue = advancedBus.QueueDeclare("get_empty_queue_test");
var getResult = advancedBus.Get<MyMessage>(queue);
if (!getResult.MessageAvailable)
{
Console.Out.WriteLine("Failed to get message!");
}
}
[Test, Explicit]
public void Should_be_able_to_get_queue_length()
{
var queue = advancedBus.QueueDeclare("count_test");
advancedBus.Publish(Exchange.GetDefault(), "count_test", false, new Message<MyMessage>(new MyMessage { Text = "Oh! Hello!" }));
uint messageCount = advancedBus.MessageCount(queue);
Console.WriteLine("{0} messages in queue", messageCount);
}
}
}
// ReSharper restore InconsistentNaming | {
"content_hash": "00a34edebea2c21854a4ce204c15e2b5",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 139,
"avg_line_length": 35.548022598870055,
"alnum_prop": 0.5969485060394152,
"repo_name": "danbarua/EasyNetQ",
"id": "581a0411d0010d1276f0708cebcac44427cb8a3a",
"size": "6294",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Source/EasyNetQ.Tests/Integration/AdvancedApiExamples.cs",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "82"
},
{
"name": "C#",
"bytes": "1044522"
},
{
"name": "JavaScript",
"bytes": "1022"
},
{
"name": "PLpgSQL",
"bytes": "1830"
}
],
"symlink_target": ""
} |
<html>
<head>
<title>BOOST_PP_RPAREN</title>
<link rel="stylesheet" type="text/css" href="../styles.css">
</head>
<body>
<div style="margin-left: 0px;">
The <b>BOOST_PP_RPAREN</b> macro expands to a right parenthesis.
</div>
<h4>Usage</h4>
<div class="code">
<b>BOOST_PP_RPAREN</b>()
</div>
<h4>Remarks</h4>
<div>
The preprocessor interprets parentheses as delimiters in macro invocations.
Because of this, parentheses require special handling.
</div>
<h4>See Also</h4>
<ul>
<li><a href="lparen.html">BOOST_PP_LPAREN</a></li>
</ul>
<h4>Requirements</h4>
<div>
<b>Header:</b> <a href="../headers/punctuation/paren.html"><boost/preprocessor/punctuation/paren.hpp></a>
</div>
<h4>Sample Code</h4>
<div><pre>
#include <<a href="../headers/facilities/empty.html">boost/preprocessor/facilities/empty.hpp</a>>
#include <<a href="../headers/punctuation/paren.html">boost/preprocessor/punctuation/paren.hpp</a>>
#define X(x) x
#define MACRO(x, p) X ( x p
MACRO(abc, <a href="rparen.html">BOOST_PP_RPAREN</a>()) // expands to abc
#define Y(x)
MACRO(<a href="empty.html">BOOST_PP_EMPTY</a> <a href="rparen.html">BOOST_PP_RPAREN</a>()(), 10) // expands to 10
</pre></div>
<hr size="1">
<div style="margin-left: 0px;">
<i>© Copyright <a href="http://www.housemarque.com" target="_top">Housemarque Oy</a> 2002</i>
</br><i>© Copyright Paul Mensonides 2002</i>
</div>
<div style="margin-left: 0px;">
<p><small>Distributed under the Boost Software License, Version 1.0. (See
accompanying file <a href="../../../../LICENSE_1_0.txt">LICENSE_1_0.txt</a> or
copy at <a href=
"http://www.boost.org/LICENSE_1_0.txt">www.boost.org/LICENSE_1_0.txt</a>)</small></p>
</div>
</body>
</html>
| {
"content_hash": "cfcca377375d57b876dae58b676cf258",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 120,
"avg_line_length": 34.15094339622642,
"alnum_prop": 0.6359116022099448,
"repo_name": "zjutjsj1004/third",
"id": "483c3acd842c759fd897c46e0bccef98349f3d92",
"size": "1810",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "boost/libs/preprocessor/doc/ref/rparen.html",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "224158"
},
{
"name": "Batchfile",
"bytes": "33175"
},
{
"name": "C",
"bytes": "5576593"
},
{
"name": "C#",
"bytes": "41850"
},
{
"name": "C++",
"bytes": "179595990"
},
{
"name": "CMake",
"bytes": "28348"
},
{
"name": "CSS",
"bytes": "331303"
},
{
"name": "Cuda",
"bytes": "26521"
},
{
"name": "FORTRAN",
"bytes": "1856"
},
{
"name": "Groff",
"bytes": "1305458"
},
{
"name": "HTML",
"bytes": "159660377"
},
{
"name": "IDL",
"bytes": "15"
},
{
"name": "JavaScript",
"bytes": "285786"
},
{
"name": "Lex",
"bytes": "1290"
},
{
"name": "Makefile",
"bytes": "1202020"
},
{
"name": "Max",
"bytes": "37424"
},
{
"name": "Objective-C",
"bytes": "3674"
},
{
"name": "Objective-C++",
"bytes": "651"
},
{
"name": "PHP",
"bytes": "60249"
},
{
"name": "Perl",
"bytes": "37297"
},
{
"name": "Perl6",
"bytes": "2130"
},
{
"name": "Python",
"bytes": "1833677"
},
{
"name": "QML",
"bytes": "613"
},
{
"name": "QMake",
"bytes": "17385"
},
{
"name": "Rebol",
"bytes": "372"
},
{
"name": "Shell",
"bytes": "1144162"
},
{
"name": "Tcl",
"bytes": "1205"
},
{
"name": "TeX",
"bytes": "38313"
},
{
"name": "XSLT",
"bytes": "564356"
},
{
"name": "Yacc",
"bytes": "20341"
}
],
"symlink_target": ""
} |
class AddDogBehaviorRating < ActiveRecord::Migration
def change
add_column :pups, :dog_behavior, :integer
end
end
| {
"content_hash": "c50a2659c7052e2a26759fa60f44ca19",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 52,
"avg_line_length": 24.4,
"alnum_prop": 0.7540983606557377,
"repo_name": "cjzcpsyx/rate-my-pup",
"id": "da8ada4e50964029f4dbf08548b44b1555ff8e3c",
"size": "122",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "db/migrate/20160616072316_add_dog_behavior_rating.rb",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15018"
},
{
"name": "Cucumber",
"bytes": "26072"
},
{
"name": "HTML",
"bytes": "44223"
},
{
"name": "JavaScript",
"bytes": "41379"
},
{
"name": "Ruby",
"bytes": "141866"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
} |
<?php
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*
* @author Sergey Bukharov, Karl Diab, Tim Davis, Jonathan Heggen, Kuanysh Boranbayev
*/
class Entity
{
// If this class has a setProp method, use it, else modify the property directly
// Magic setter
public function __set($key, $value) {
// if a set* method exists for this key,
// use that method to insert this value.
// For instance, setName(...) will be invoked by $object->name = ...
// and setLastName(...) for $object->last_name =
$method = 'set' . str_replace(' ', '', ucwords(str_replace(['-', '_'], ' ', $key)));
if (method_exists($this, $method))
{
$this->$method($value);
return $this;
}
// Otherwise, just set the property value directly.
$this->$key = $value;
return $this;
}
// If this class has a getProp method, use it, else modify the property directly
// Magic getter
public function __get($key) {
$method = 'get' . str_replace(' ', '', ucwords(str_replace(['-', '_'], ' ', $key)));
if (method_exists($this, $method))
{
$this->$method();
return $this;
}
return $this->$key;
}
}
?> | {
"content_hash": "39a13c7355667031d07399c07b957bf3",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 92,
"avg_line_length": 30.425531914893618,
"alnum_prop": 0.5503496503496503,
"repo_name": "TempOrg4711/HalfMileHigh",
"id": "7a0d25f5af580b07b92ae3d58a2e77b984dc69d6",
"size": "1434",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "application/models/Entity.php",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5860"
},
{
"name": "HTML",
"bytes": "5632"
},
{
"name": "JavaScript",
"bytes": "14872"
},
{
"name": "PHP",
"bytes": "1871753"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.