repo_name
stringlengths
4
116
path
stringlengths
3
942
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
KangDroidSMProject/ISL
test_inputs/codegen/cloog/reservoir-lim-lam1.c
231
for (int c0 = -99; c0 <= 100; c0 += 1) { if (c0 >= 1) S2(c0, 1); for (int c1 = max(1, -c0 + 1); c1 <= min(99, -c0 + 100); c1 += 1) { S1(c0 + c1, c1); S2(c0 + c1, c1 + 1); } if (c0 <= 0) S1(c0 + 100, 100); }
mit
wearefractal/smog
public/js/routes/index.js
911
// Generated by CoffeeScript 1.3.3 (function() { define(["smog/server", "smog/notify", "templates/connect"], function(server, notify, templ) { return { show: function() { $('#content').html(templ()); $('#connect-modal').modal({ backdrop: false }); return $('#connect-button').click(function() { var host; host = $('#host').val(); return server.connect(host, function(err, okay) { if (err != null) { if (typeof err === 'object' && Object.keys(err).length === 0) { err = "Server unavailable"; } return notify.error("Connection error: " + (err.err || err)); } else { $('#connect-modal').modal('hide'); return window.location.hash = '#/home'; } }); }); } }; }); }).call(this);
mit
hotchandanisagar/odata.net
test/FunctionalTests/Tests/DataOData/Tests/OData.Reader.Tests/JsonLight/StreamReferenceValueReaderJsonLightTests.cs
23505
//--------------------------------------------------------------------- // <copyright file="StreamReferenceValueReaderJsonLightTests.cs" company="Microsoft"> // Copyright (C) Microsoft Corporation. All rights reserved. See License.txt in the project root for license information. // </copyright> //--------------------------------------------------------------------- namespace Microsoft.Test.Taupo.OData.Reader.Tests.JsonLight { #region Namespaces using System.Collections.Generic; using System.Linq; using Microsoft.Test.Taupo.Astoria.Contracts.OData; using Microsoft.Test.Taupo.Astoria.OData; using Microsoft.Test.Taupo.Common; using Microsoft.Test.Taupo.Contracts.EntityModel; using Microsoft.Test.Taupo.Execution; using Microsoft.Test.Taupo.OData.Common; using Microsoft.Test.Taupo.OData.Contracts; using Microsoft.Test.Taupo.OData.Contracts.Json; using Microsoft.Test.Taupo.OData.JsonLight; using Microsoft.Test.Taupo.OData.Reader.Tests; using Microsoft.VisualStudio.TestTools.UnitTesting; using Microsoft.OData.Edm; using Microsoft.OData.Edm.Library; using TestModels = Microsoft.Test.OData.Utils.Metadata.TestModels; #endregion Namespaces /// <summary> /// Tests reading of various complex value JSON Light payloads. /// </summary> [TestClass, TestCase] public class StreamReferenceValueReaderJsonLightTests : ODataReaderTestCase { [InjectDependency] public IPayloadGenerator PayloadGenerator { get; set; } private PayloadReaderTestDescriptor.Settings settings; [InjectDependency] public PayloadReaderTestDescriptor.Settings Settings { get { return this.settings; } set { this.settings = value; this.settings.ExpectedResultSettings.ObjectModelToPayloadElementConverter = new JsonLightObjectModelToPayloadElementConverter(); } } private sealed class StreamPropertyTestCase { public string DebugDescription { get; set; } public string Json { get; set; } public EntityInstance ExpectedEntity { get; set; } public ExpectedException ExpectedException { get; set; } public bool OnlyResponse { get; set; } public IEdmTypeReference OwningEntityType { get; set; } } [TestMethod, TestCategory("Reader.Json"), Variation(Description = "Verifies correct reading of stream properties (stream reference values) with fully specified metadata.")] public void StreamPropertyTest() { IEdmModel model = TestModels.BuildTestModel(); var testCases = new[] { new StreamPropertyTestCase { DebugDescription = "Just edit link", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", "http://odata.org/test/Cities(1)/Skyline", "http://odata.org/streamproperty/editlink", null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"http://odata.org/streamproperty/editlink\"" }, new StreamPropertyTestCase { DebugDescription = "Just read link", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", "http://odata.org/streamproperty/readlink", "http://odata.org/test/Cities(1)/Skyline", null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaReadLinkAnnotationName) + "\":\"http://odata.org/streamproperty/readlink\"" }, new StreamPropertyTestCase { DebugDescription = "Just content type", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", "http://odata.org/test/Cities(1)/Skyline", "http://odata.org/test/Cities(1)/Skyline", "streamproperty:contenttype", null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaContentTypeAnnotationName) + "\":\"streamproperty:contenttype\"" }, new StreamPropertyTestCase { DebugDescription = "Just ETag", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", "http://odata.org/test/Cities(1)/Skyline", "http://odata.org/test/Cities(1)/Skyline", null, "streamproperty:etag"), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaETagAnnotationName) + "\":\"streamproperty:etag\"" }, new StreamPropertyTestCase { DebugDescription = "Everything", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", "http://odata.org/streamproperty/readlink", "http://odata.org/streamproperty/editlink", "streamproperty:contenttype", "streamproperty:etag"), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"http://odata.org/streamproperty/editlink\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaReadLinkAnnotationName) + "\":\"http://odata.org/streamproperty/readlink\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaContentTypeAnnotationName) + "\":\"streamproperty:contenttype\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaETagAnnotationName) + "\":\"streamproperty:etag\"" }, new StreamPropertyTestCase { DebugDescription = "Just custom annotation - should report empty stream property", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", "http://odata.org/test/Cities(1)/Skyline", "http://odata.org/test/Cities(1)/Skyline", null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", "custom.value") + "\":\"value\"" }, new StreamPropertyTestCase { DebugDescription = "Everything with custom annotation - custom annotations should be ignored", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", "http://odata.org/streamproperty/readlink", "http://odata.org/streamproperty/editlink", "streamproperty:contenttype", "streamproperty:etag"), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"http://odata.org/streamproperty/editlink\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", "custom.value") + "\":\"value\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaReadLinkAnnotationName) + "\":\"http://odata.org/streamproperty/readlink\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaContentTypeAnnotationName) + "\":\"streamproperty:contenttype\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaETagAnnotationName) + "\":\"streamproperty:etag\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", "custom.type") + "\":42" }, new StreamPropertyTestCase { DebugDescription = "With odata.type annotation - should fail", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataTypeAnnotationName) + "\":\"Edm.Stream\"", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightEntryAndFeedDeserializer_UnexpectedStreamPropertyAnnotation", "Skyline", JsonLightConstants.ODataTypeAnnotationName) }, new StreamPropertyTestCase { DebugDescription = "Everything with navigation link URL annotation - should fail", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", "http://odata.org/streamproperty/readlink", "http://odata.org/streamproperty/editlink", "streamproperty:contenttype", "streamproperty:etag"), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"http://odata.org/streamproperty/editlink\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaReadLinkAnnotationName) + "\":\"http://odata.org/streamproperty/readlink\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataNavigationLinkUrlAnnotationName) + "\":\"http://odata.org/streamproperty/navlink\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaContentTypeAnnotationName) + "\":\"streamproperty:contenttype\"," + "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaETagAnnotationName) + "\":\"streamproperty:etag\"", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightEntryAndFeedDeserializer_UnexpectedStreamPropertyAnnotation", "Skyline", JsonLightConstants.ODataNavigationLinkUrlAnnotationName) }, new StreamPropertyTestCase { DebugDescription = "Invalid edit link - wrong primitive", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":42", ExpectedException = ODataExpectedExceptions.ODataException("JsonReaderExtensions_CannotReadPropertyValueAsString", "42", JsonLightConstants.ODataMediaEditLinkAnnotationName), OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Invalid edit link - null", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":null", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightReaderUtils_AnnotationWithNullValue", JsonLightConstants.ODataMediaEditLinkAnnotationName), OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Invalid read link - wrong primitive", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaReadLinkAnnotationName) + "\":true", ExpectedException = ODataExpectedExceptions.ODataException("JsonReaderExtensions_CannotReadPropertyValueAsString", "True", JsonLightConstants.ODataMediaReadLinkAnnotationName), OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Invalid read link - null", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaReadLinkAnnotationName) + "\":null", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightReaderUtils_AnnotationWithNullValue", JsonLightConstants.ODataMediaReadLinkAnnotationName), OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Invalid ETag - non primitive", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaETagAnnotationName) + "\":[]", ExpectedException = ODataExpectedExceptions.ODataException("JsonReaderExtensions_UnexpectedNodeDetected", "PrimitiveValue", "StartArray"), OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Invalid ETag - null", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaETagAnnotationName) + "\":null", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightReaderUtils_AnnotationWithNullValue", JsonLightConstants.ODataMediaETagAnnotationName), OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Invalid content type - non primitive", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaContentTypeAnnotationName) + "\":{}", ExpectedException = ODataExpectedExceptions.ODataException("JsonReaderExtensions_UnexpectedNodeDetected", "PrimitiveValue", "StartObject"), OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Invalid content type - null", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaContentTypeAnnotationName) + "\":null", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightReaderUtils_AnnotationWithNullValue", JsonLightConstants.ODataMediaContentTypeAnnotationName), OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Open stream property", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("OpenSkyline", null, "http://odata.org/streamproperty/editlink", null, null), OwningEntityType = model.FindDeclaredType("TestModel.CityOpenType").ToTypeReference(), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("OpenSkyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"http://odata.org/streamproperty/editlink\"", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightEntryAndFeedDeserializer_OpenPropertyWithoutValue", "OpenSkyline"), OnlyResponse = true }, new StreamPropertyTestCase { DebugDescription = "Undeclared stream property", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("NewSkyline", null, "http://odata.org/streamproperty/editlink", null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("NewSkyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"http://odata.org/streamproperty/editlink\"", ExpectedException = ODataExpectedExceptions.ODataException("ValidationUtils_PropertyDoesNotExistOnType", "NewSkyline", "TestModel.CityType"), OnlyResponse = true }, new StreamPropertyTestCase { DebugDescription = "Stream property declared with non-stream type", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Name", null, "http://odata.org/streamproperty/editlink", null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Name", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"http://odata.org/streamproperty/editlink\"", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightEntryAndFeedDeserializer_PropertyWithoutValueWithWrongType", "Name", "Edm.String"), OnlyResponse = true }, new StreamPropertyTestCase { DebugDescription = "Stream property with value", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, "http://odata.org/streamproperty/editlink", null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"http://odata.org/streamproperty/editlink\"," + "\"Skyline\":\"value\"", ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightEntryAndFeedDeserializer_StreamPropertyWithValue", "Skyline"), OnlyResponse = true }, }; this.RunStreamPropertyTest(model, testCases); } [TestMethod, TestCategory("Reader.Json"), Variation(Description = "Verifies correct reading of stream properties (stream reference values) with fully specified metadata.")] public void StreamPropertyTestWithRelativeLinkUris() { IEdmModel model = TestModels.BuildTestModel(); var testCases = new[] { new StreamPropertyTestCase { DebugDescription = "Invalid edit link - non-URL", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaEditLinkAnnotationName) + "\":\"xxx yyy zzz\"", ExpectedException = null, OnlyResponse = true, }, new StreamPropertyTestCase { DebugDescription = "Invalid read link - non-URL", ExpectedEntity = PayloadBuilder.Entity().StreamProperty("Skyline", null, null, null, null), Json = "\"" + JsonLightUtils.GetPropertyAnnotationName("Skyline", JsonLightConstants.ODataMediaReadLinkAnnotationName) + "\":\"xxx yyy zzz\"", ExpectedException = null, OnlyResponse = true, }, }; this.RunStreamPropertyTest(model, testCases); } private void RunStreamPropertyTest(IEdmModel model, IEnumerable<StreamPropertyTestCase> testCases) { var cityType = model.FindDeclaredType("TestModel.CityType").ToTypeReference(); var cities = model.EntityContainer.FindEntitySet("Cities"); IEnumerable<PayloadReaderTestDescriptor> testDescriptors = testCases.Select(testCase => { IEdmTypeReference entityType = testCase.OwningEntityType ?? cityType; EntityInstance entity = PayloadBuilder.Entity(entityType.FullName()).PrimitiveProperty("Id", 1) .JsonRepresentation( "{" + "\"" + JsonLightConstants.ODataPropertyAnnotationSeparator + JsonLightConstants.ODataContextAnnotationName + "\":\"http://odata.org/test/$metadata#TestModel.DefaultContainer.Cities/" + entityType.FullName() + "()/$entity\"," + "\"" + JsonLightConstants.ODataPropertyAnnotationSeparator + JsonLightConstants.ODataTypeAnnotationName + "\":\"" + entityType.FullName() + "\"," + "\"Id\": 1," + testCase.Json + "}") .ExpectedEntityType(entityType, cities); foreach (NamedStreamInstance streamProperty in testCase.ExpectedEntity.Properties.OfType<NamedStreamInstance>()) { entity.Add(streamProperty.DeepCopy()); } return new PayloadReaderTestDescriptor(this.Settings) { DebugDescription = testCase.DebugDescription, PayloadEdmModel = model, PayloadElement = entity, ExpectedException = testCase.ExpectedException, SkipTestConfiguration = tc => testCase.OnlyResponse ? tc.IsRequest : false }; }); this.CombinatorialEngineProvider.RunCombinations( testDescriptors, this.ReaderTestConfigurationProvider.JsonLightFormatConfigurations, (testDescriptor, testConfiguration) => { if (testConfiguration.IsRequest) { testDescriptor = new PayloadReaderTestDescriptor(testDescriptor) { ExpectedException = ODataExpectedExceptions.ODataException("ODataJsonLightEntryAndFeedDeserializer_StreamPropertyInRequest") }; } // These descriptors are already tailored specifically for Json Light and // do not require normalization. testDescriptor.TestDescriptorNormalizers.Clear(); var testConfigClone = new ReaderTestConfiguration(testConfiguration); testConfigClone.MessageReaderSettings.BaseUri = null; testDescriptor.RunTest(testConfigClone); }); } } }
mit
bsundsrud/aptly
cmd/repo_include.go
7511
package cmd import ( "bytes" "fmt" "github.com/smira/aptly/aptly" "github.com/smira/aptly/deb" "github.com/smira/aptly/query" "github.com/smira/aptly/utils" "github.com/smira/commander" "github.com/smira/flag" "os" "path/filepath" "text/template" ) func aptlyRepoInclude(cmd *commander.Command, args []string) error { var err error if len(args) < 1 { cmd.Usage() return commander.ErrCommandError } verifier, err := getVerifier(context.Flags()) if err != nil { return fmt.Errorf("unable to initialize GPG verifier: %s", err) } if verifier == nil { verifier = &utils.GpgVerifier{} } forceReplace := context.Flags().Lookup("force-replace").Value.Get().(bool) acceptUnsigned := context.Flags().Lookup("accept-unsigned").Value.Get().(bool) ignoreSignatures := context.Flags().Lookup("ignore-signatures").Value.Get().(bool) noRemoveFiles := context.Flags().Lookup("no-remove-files").Value.Get().(bool) repoTemplate, err := template.New("repo").Parse(context.Flags().Lookup("repo").Value.Get().(string)) if err != nil { return fmt.Errorf("error parsing -repo template: %s", err) } uploaders := (*deb.Uploaders)(nil) uploadersFile := context.Flags().Lookup("uploaders-file").Value.Get().(string) if uploadersFile != "" { uploaders, err = deb.NewUploadersFromFile(uploadersFile) if err != nil { return err } for i := range uploaders.Rules { uploaders.Rules[i].CompiledCondition, err = query.Parse(uploaders.Rules[i].Condition) if err != nil { return fmt.Errorf("error parsing query %s: %s", uploaders.Rules[i].Condition, err) } } } reporter := &aptly.ConsoleResultReporter{Progress: context.Progress()} var changesFiles, failedFiles, processedFiles []string changesFiles, failedFiles = deb.CollectChangesFiles(args, reporter) for _, path := range changesFiles { var changes *deb.Changes changes, err = deb.NewChanges(path) if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", path, err) continue } err = changes.VerifyAndParse(acceptUnsigned, ignoreSignatures, verifier) if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", changes.ChangesName, err) changes.Cleanup() continue } err = changes.Prepare() if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", changes.ChangesName, err) changes.Cleanup() continue } repoName := &bytes.Buffer{} err = repoTemplate.Execute(repoName, changes.Stanza) if err != nil { return fmt.Errorf("error applying template to repo: %s", err) } context.Progress().Printf("Loading repository %s for changes file %s...\n", repoName.String(), changes.ChangesName) repo, err := context.CollectionFactory().LocalRepoCollection().ByName(repoName.String()) if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", changes.ChangesName, err) changes.Cleanup() continue } currentUploaders := uploaders if repo.Uploaders != nil { currentUploaders = repo.Uploaders for i := range currentUploaders.Rules { currentUploaders.Rules[i].CompiledCondition, err = query.Parse(currentUploaders.Rules[i].Condition) if err != nil { return fmt.Errorf("error parsing query %s: %s", currentUploaders.Rules[i].Condition, err) } } } if currentUploaders != nil { if err = currentUploaders.IsAllowed(changes); err != nil { failedFiles = append(failedFiles, path) reporter.Warning("changes file skipped due to uploaders config: %s, keys %#v: %s", changes.ChangesName, changes.SignatureKeys, err) changes.Cleanup() continue } } err = context.CollectionFactory().LocalRepoCollection().LoadComplete(repo) if err != nil { return fmt.Errorf("unable to load repo: %s", err) } list, err := deb.NewPackageListFromRefList(repo.RefList(), context.CollectionFactory().PackageCollection(), context.Progress()) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } packageFiles, _ := deb.CollectPackageFiles([]string{changes.TempDir}, reporter) var restriction deb.PackageQuery restriction, err = changes.PackageQuery() if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", changes.ChangesName, err) changes.Cleanup() continue } var processedFiles2, failedFiles2 []string processedFiles2, failedFiles2, err = deb.ImportPackageFiles(list, packageFiles, forceReplace, verifier, context.PackagePool(), context.CollectionFactory().PackageCollection(), reporter, restriction) if err != nil { return fmt.Errorf("unable to import package files: %s", err) } repo.UpdateRefList(deb.NewPackageRefListFromPackageList(list)) err = context.CollectionFactory().LocalRepoCollection().Update(repo) if err != nil { return fmt.Errorf("unable to save: %s", err) } err = changes.Cleanup() if err != nil { return err } for _, file := range failedFiles2 { failedFiles = append(failedFiles, filepath.Join(changes.BasePath, filepath.Base(file))) } for _, file := range processedFiles2 { processedFiles = append(processedFiles, filepath.Join(changes.BasePath, filepath.Base(file))) } processedFiles = append(processedFiles, path) } if !noRemoveFiles { processedFiles = utils.StrSliceDeduplicate(processedFiles) for _, file := range processedFiles { err := os.Remove(file) if err != nil { return fmt.Errorf("unable to remove file: %s", err) } } } if len(failedFiles) > 0 { context.Progress().ColoredPrintf("@y[!]@| @!Some files were skipped due to errors:@|") for _, file := range failedFiles { context.Progress().ColoredPrintf(" %s", file) } return fmt.Errorf("some files failed to be added") } return err } func makeCmdRepoInclude() *commander.Command { cmd := &commander.Command{ Run: aptlyRepoInclude, UsageLine: "include <file.changes>|<directory> ...", Short: "add packages to local repositories based on .changes files", Long: ` Command include looks for .changes files in list of arguments or specified directories. Each .changes file is verified, parsed, referenced files are put into separate temporary directory and added into local repository. Successfully imported files are removed by default. Additionally uploads could be restricted with <uploaders.json> file. Rules in this file control uploads based on GPG key ID of .changes file signature and queries on .changes file fields. Example: $ aptly repo include -repo=foo-release incoming/ `, Flag: *flag.NewFlagSet("aptly-repo-include", flag.ExitOnError), } cmd.Flag.Bool("no-remove-files", false, "don't remove files that have been imported successfully into repository") cmd.Flag.Bool("force-replace", false, "when adding package that conflicts with existing package, remove existing package") cmd.Flag.String("repo", "{{.Distribution}}", "which repo should files go to, defaults to Distribution field of .changes file") cmd.Flag.Var(&keyRingsFlag{}, "keyring", "gpg keyring to use when verifying Release file (could be specified multiple times)") cmd.Flag.Bool("ignore-signatures", false, "disable verification of .changes file signature") cmd.Flag.Bool("accept-unsigned", false, "accept unsigned .changes files") cmd.Flag.String("uploaders-file", "", "path to uploaders.json file") return cmd }
mit
daniellienert/flow-development-collection
Neos.FluidAdaptor/Tests/Unit/ViewHelpers/Fixtures/IfThenElseFixture.html
140
{namespace f=Neos\FluidAdaptor\ViewHelpers}<f:if condition="{condition}"> Do not display <f:then>YEP</f:then> <f:else>NOPE</f:else> </f:if>
mit
sinfin/folio
vendor/assets/bower_components/emitter-es6/src/index.js
3401
/** * Creates a new instance of Emitter. * @class * @returns {Object} Returns a new instance of Emitter. * @example * // Creates a new instance of Emitter. * var Emitter = require('emitter'); * * var emitter = new Emitter(); */ class Emitter { /** * Adds a listener to the collection for the specified event. * @memberof! Emitter.prototype * @function * @param {String} event - The event name. * @param {Function} listener - A listener function to add. * @returns {Object} Returns an instance of Emitter. * @example * // Add an event listener to "foo" event. * emitter.on('foo', listener); */ on(event, listener) { // Use the current collection or create it. this._eventCollection = this._eventCollection || {}; // Use the current collection of an event or create it. this._eventCollection[event] = this._eventCollection[event] || []; // Appends the listener into the collection of the given event this._eventCollection[event].push(listener); return this; } /** * Adds a listener to the collection for the specified event that will be called only once. * @memberof! Emitter.prototype * @function * @param {String} event - The event name. * @param {Function} listener - A listener function to add. * @returns {Object} Returns an instance of Emitter. * @example * // Will add an event handler to "foo" event once. * emitter.once('foo', listener); */ once(event, listener) { const self = this; function fn() { self.off(event, fn); listener.apply(this, arguments); } fn.listener = listener; this.on(event, fn); return this; } /** * Removes a listener from the collection for the specified event. * @memberof! Emitter.prototype * @function * @param {String} event - The event name. * @param {Function} listener - A listener function to remove. * @returns {Object} Returns an instance of Emitter. * @example * // Remove a given listener. * emitter.off('foo', listener); */ off(event, listener) { let listeners; // Defines listeners value. if (!this._eventCollection || !(listeners = this._eventCollection[event])) { return this; } listeners.forEach((fn, i) => { if (fn === listener || fn.listener === listener) { // Removes the given listener. listeners.splice(i, 1); } }); // Removes an empty event collection. if (listeners.length === 0) { delete this._eventCollection[event]; } return this; } /** * Execute each item in the listener collection in order with the specified data. * @memberof! Emitter.prototype * @function * @param {String} event - The name of the event you want to emit. * @param {...Object} data - Data to pass to the listeners. * @returns {Object} Returns an instance of Emitter. * @example * // Emits the "foo" event with 'param1' and 'param2' as arguments. * emitter.emit('foo', 'param1', 'param2'); */ emit(event, ...args) { let listeners; // Defines listeners value. if (!this._eventCollection || !(listeners = this._eventCollection[event])) { return this; } // Clone listeners listeners = listeners.slice(0); listeners.forEach(fn => fn.apply(this, args)); return this; } } /** * Exports Emitter */ export default Emitter;
mit
JHand93/WebPerformanceTestSuite
webpagetest-charts-api/node_modules/cheerio/test/render.js
2628
var expect = require('expect.js'), defaultOpts = require('..').prototype.options, _ = require('lodash'), parse = require('../lib/parse'), render = require('../lib/render'); var html = function(str, options) { options = _.defaults(options || {}, defaultOpts); var dom = parse(str, options); return render(dom); }; var xml = function(str, options) { options = _.defaults(options || {}, defaultOpts); options.xmlMode = true; var dom = parse(str, options); return render(dom, options); }; describe('render', function() { describe('(html)', function() { it('should render <br /> tags correctly', function() { var str = '<br />'; expect(html(str)).to.equal('<br>'); }); it('should handle double quotes within single quoted attributes properly', function() { var str = '<hr class=\'an "edge" case\' />'; expect(html(str)).to.equal('<hr class="an &#x22;edge&#x22; case">'); }); it('should retain encoded HTML content within attributes', function() { var str = '<hr class="cheerio &amp; node = happy parsing" />'; expect(html(str)).to.equal('<hr class="cheerio &#x26; node = happy parsing">'); }); it('should shorten the "checked" attribute when it contains the value "checked"', function() { var str = '<input checked/>'; expect(html(str)).to.equal('<input checked>'); }); it('should not shorten the "name" attribute when it contains the value "name"', function() { var str = '<input name="name"/>'; expect(html(str)).to.equal('<input name="name">'); }); it('should render comments correctly', function() { var str = '<!-- comment -->'; expect(html(str)).to.equal('<!-- comment -->'); }); it('should render whitespace by default', function() { var str = '<a href="./haha.html">hi</a> <a href="./blah.html">blah</a>'; expect(html(str)).to.equal(str); }); it('should normalize whitespace if specified', function() { var str = '<a href="./haha.html">hi</a> <a href="./blah.html">blah </a>'; expect(html(str, { normalizeWhitespace: true })).to.equal('<a href="./haha.html">hi</a> <a href="./blah.html">blah </a>'); }); it('should preserve multiple hyphens in data attributes', function() { var str = '<div data-foo-bar-baz="value"></div>'; expect(html(str)).to.equal('<div data-foo-bar-baz="value"></div>'); }); it('should render CDATA correctly', function() { var str = '<a> <b> <![CDATA[ asdf&asdf ]]> <c/> <![CDATA[ asdf&asdf ]]> </b> </a>'; expect(xml(str)).to.equal(str); }); }); });
mit
pinkowitz/objc-mailgun
Docs/html/Classes/MGMessage.html
54336
<!DOCTYPE HTML> <html> <head> <meta http-equiv="Content-Type" content="html/html; charset=utf-8" /> <title>MGMessage Class Reference</title> <meta id="xcode-display" name="xcode-display" content="render"/> <link rel="stylesheet" type="text/css" href="../css/styles.css" media="all" /> <link rel="stylesheet" type="text/css" media="print" href="../css/stylesPrint.css" /> <meta name="generator" content="appledoc 2.1 (build 858)" /> </head> <body> <header id="top_header"> <div id="library" class="hideInXcode"> <h1><a id="libraryTitle" href="../index.html">Mailgun SDK </a></h1> <a id="developerHome" href="../index.html">Rackspace Hosting</a> </div> <div id="title" role="banner"> <h1 class="hideInXcode">MGMessage Class Reference</h1> </div> <ul id="headerButtons" role="toolbar"> <li id="toc_button"> <button aria-label="Show Table of Contents" role="checkbox" class="open" id="table_of_contents"><span class="disclosure"></span>Table of Contents</button> </li> <li id="jumpto_button" role="navigation"> <select id="jumpTo"> <option value="top">Jump To&#133;</option> <option value="tasks">Tasks</option> <option value="properties">Properties</option> <option value="//api/name/attachments">&nbsp;&nbsp;&nbsp;&nbsp;attachments</option> <option value="//api/name/bcc">&nbsp;&nbsp;&nbsp;&nbsp;bcc</option> <option value="//api/name/campaign">&nbsp;&nbsp;&nbsp;&nbsp;campaign</option> <option value="//api/name/cc">&nbsp;&nbsp;&nbsp;&nbsp;cc</option> <option value="//api/name/deliverAt">&nbsp;&nbsp;&nbsp;&nbsp;deliverAt</option> <option value="//api/name/dkim">&nbsp;&nbsp;&nbsp;&nbsp;dkim</option> <option value="//api/name/from">&nbsp;&nbsp;&nbsp;&nbsp;from</option> <option value="//api/name/headers">&nbsp;&nbsp;&nbsp;&nbsp;headers</option> <option value="//api/name/html">&nbsp;&nbsp;&nbsp;&nbsp;html</option> <option value="//api/name/inlineAttachments">&nbsp;&nbsp;&nbsp;&nbsp;inlineAttachments</option> <option value="//api/name/subject">&nbsp;&nbsp;&nbsp;&nbsp;subject</option> <option value="//api/name/tags">&nbsp;&nbsp;&nbsp;&nbsp;tags</option> <option value="//api/name/testing">&nbsp;&nbsp;&nbsp;&nbsp;testing</option> <option value="//api/name/text">&nbsp;&nbsp;&nbsp;&nbsp;text</option> <option value="//api/name/to">&nbsp;&nbsp;&nbsp;&nbsp;to</option> <option value="//api/name/trackClicks">&nbsp;&nbsp;&nbsp;&nbsp;trackClicks</option> <option value="//api/name/trackOpens">&nbsp;&nbsp;&nbsp;&nbsp;trackOpens</option> <option value="//api/name/tracking">&nbsp;&nbsp;&nbsp;&nbsp;tracking</option> <option value="//api/name/variables">&nbsp;&nbsp;&nbsp;&nbsp;variables</option> <option value="class_methods">Class Methods</option> <option value="//api/name/messageFrom:to:subject:body:">&nbsp;&nbsp;&nbsp;&nbsp;+ messageFrom:to:subject:body:</option> <option value="instance_methods">Instance Methods</option> <option value="//api/name/addAttachment:withName:type:">&nbsp;&nbsp;&nbsp;&nbsp;- addAttachment:withName:type:</option> <option value="//api/name/addBcc:">&nbsp;&nbsp;&nbsp;&nbsp;- addBcc:</option> <option value="//api/name/addCc:">&nbsp;&nbsp;&nbsp;&nbsp;- addCc:</option> <option value="//api/name/addHeader:value:">&nbsp;&nbsp;&nbsp;&nbsp;- addHeader:value:</option> <option value="//api/name/addImage:withName:type:">&nbsp;&nbsp;&nbsp;&nbsp;- addImage:withName:type:</option> <option value="//api/name/addImage:withName:type:inline:">&nbsp;&nbsp;&nbsp;&nbsp;- addImage:withName:type:inline:</option> <option value="//api/name/addRecipient:">&nbsp;&nbsp;&nbsp;&nbsp;- addRecipient:</option> <option value="//api/name/addTag:">&nbsp;&nbsp;&nbsp;&nbsp;- addTag:</option> <option value="//api/name/addTags:">&nbsp;&nbsp;&nbsp;&nbsp;- addTags:</option> <option value="//api/name/addVariable:value:">&nbsp;&nbsp;&nbsp;&nbsp;- addVariable:value:</option> <option value="//api/name/dictionary">&nbsp;&nbsp;&nbsp;&nbsp;- dictionary</option> <option value="//api/name/initWithFrom:to:subject:body:">&nbsp;&nbsp;&nbsp;&nbsp;- initWithFrom:to:subject:body:</option> </select> </li> </ul> </header> <nav id="tocContainer" class="isShowingTOC"> <ul id="toc" role="tree"> <li role="treeitem" id="task_treeitem"><span class="nodisclosure"></span><span class="sectionName"><a href="#tasks">Tasks</a></span><ul> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#task_Managing Message Setup">Managing Message Setup</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#task_Mailgun Message Configuration">Mailgun Message Configuration</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#task_Creating and Initializing a Mailgun Message">Creating and Initializing a Mailgun Message</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#task_Adding Message Metadata">Adding Message Metadata</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#task_Adding Additional Recipients">Adding Additional Recipients</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#task_Adding Attachments">Adding Attachments</a></span></li> </ul></li> <li role="treeitem" class="children"><span class="disclosure"></span><span class="sectionName"><a href="#properties">Properties</a></span><ul> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/attachments">attachments</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/bcc">bcc</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/campaign">campaign</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/cc">cc</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/deliverAt">deliverAt</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/dkim">dkim</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/from">from</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/headers">headers</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/html">html</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/inlineAttachments">inlineAttachments</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/subject">subject</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/tags">tags</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/testing">testing</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/text">text</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/to">to</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/trackClicks">trackClicks</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/trackOpens">trackOpens</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/tracking">tracking</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/variables">variables</a></span></li> </ul></li> <li role="treeitem" class="children"><span class="disclosure"></span><span class="sectionName"><a href="#class_methods">Class Methods</a></span><ul> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/messageFrom:to:subject:body:">messageFrom:to:subject:body:</a></span></li> </ul></li> <li role="treeitem" class="children"><span class="disclosure"></span><span class="sectionName"><a href="#instance_methods">Instance Methods</a></span><ul> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addAttachment:withName:type:">addAttachment:withName:type:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addBcc:">addBcc:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addCc:">addCc:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addHeader:value:">addHeader:value:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addImage:withName:type:">addImage:withName:type:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addImage:withName:type:inline:">addImage:withName:type:inline:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addRecipient:">addRecipient:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addTag:">addTag:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addTags:">addTags:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/addVariable:value:">addVariable:value:</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/dictionary">dictionary</a></span></li> <li><span class="nodisclosure"></span><span class="sectionName"><a href="#//api/name/initWithFrom:to:subject:body:">initWithFrom:to:subject:body:</a></span></li> </ul></li> </ul> </nav> <article> <div id="contents" class="isShowingTOC" role="main"> <a title="MGMessage Class Reference" name="top"></a> <div class="main-navigation navigation-top"> <ul> <li><a href="../index.html">Index</a></li> <li><a href="../hierarchy.html">Hierarchy</a></li> </ul> </div> <div id="header"> <div class="section-header"> <h1 class="title title-header">MGMessage Class Reference</h1> </div> </div> <div id="container"> <div class="section section-specification"><table cellspacing="0"><tbody> <tr> <td class="specification-title">Inherits from</td> <td class="specification-value">NSObject</td> </tr><tr> <td class="specification-title">Declared in</td> <td class="specification-value">MGMessage.h</td> </tr> </tbody></table></div> <div class="section section-tasks"> <a title="Tasks" name="tasks"></a> <h2 class="subtitle subtitle-tasks">Tasks</h2> <a title="Managing Message Setup" name="task_Managing Message Setup"></a> <h3 class="subsubtitle task-title">Managing Message Setup</h3> <ul class="task-list"> <li> <span class="tooltip"> <code><a href="#//api/name/from">&nbsp;&nbsp;from</a></code> <span class="tooltip"><p>Email address for From header</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/to">&nbsp;&nbsp;to</a></code> <span class="tooltip"><p>Email address of the recipient(s). Example: &ldquo;Bob <a href="&#x6d;&#97;&#x69;&#108;&#116;&#x6f;&#58;&#98;&#x6f;&#98;&#x40;&#x68;&#x6f;&#115;&#116;&#x2e;&#x63;&#111;&#109;">&#98;&#x6f;&#x62;&#x40;&#104;&#111;&#x73;&#x74;&#46;&#99;&#x6f;&#109;</a>&rdquo;.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/cc">&nbsp;&nbsp;cc</a></code> <span class="tooltip"><p>Email address of the CC recipient(s). Example: &ldquo;Bob <a href="&#109;&#x61;&#x69;&#108;&#x74;&#111;&#58;&#98;&#x6f;&#98;&#x40;&#104;&#111;&#x73;&#x74;&#x2e;&#99;&#x6f;&#x6d;">&#98;&#111;&#98;&#64;&#104;&#111;&#x73;&#116;&#x2e;&#99;&#x6f;&#x6d;</a>&rdquo;.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/bcc">&nbsp;&nbsp;bcc</a></code> <span class="tooltip"><p>Email address of the BCC recipient(s). Example: &ldquo;Bob <a href="&#109;&#x61;&#105;&#108;&#116;&#x6f;&#58;&#98;&#111;&#x62;&#x40;&#x68;&#x6f;&#115;&#116;&#46;&#x63;&#x6f;&#109;">&#x62;&#x6f;&#98;&#x40;&#x68;&#x6f;&#x73;&#x74;&#46;&#x63;&#x6f;&#x6d;</a>&rdquo;.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/subject">&nbsp;&nbsp;subject</a></code> <span class="tooltip"><p>Message subject</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/text">&nbsp;&nbsp;text</a></code> <span class="tooltip"><p>Body of the message, text version</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/html">&nbsp;&nbsp;html</a></code> <span class="tooltip"><p>Body of the message. HTML version</p></span> </span> <span class="task-item-suffix">property</span> </li> </ul> <a title="Mailgun Message Configuration" name="task_Mailgun Message Configuration"></a> <h3 class="subsubtitle task-title">Mailgun Message Configuration</h3> <ul class="task-list"> <li> <span class="tooltip"> <code><a href="#//api/name/campaign">&nbsp;&nbsp;campaign</a></code> <span class="tooltip"><p>ID of the campaign the message belongs <a href="#//api/name/to">to</a>. See <a href="http://documentation.mailgun.net/user_manual.html#um-campaign-analytics">Campaign Analytics</a> for details.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/tags">&nbsp;&nbsp;tags</a></code> <span class="tooltip"><p>An <code>NSArray</code> of tag strings. See <a href="http://documentation.mailgun.net/user_manual.html#tagging">Tagging</a> for more information.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/headers">&nbsp;&nbsp;headers</a></code> <span class="tooltip"><p><code>NSMutableDictionary</code> of custom MIME headers <a href="#//api/name/to">to</a> the message. For example, <code>Reply-To</code> <a href="#//api/name/to">to</a> specify a Reply-To address.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/variables">&nbsp;&nbsp;variables</a></code> <span class="tooltip"><p><code>NSMutableDictionary</code> for attaching custom JSON data <a href="#//api/name/to">to</a> the message. See <a href="http://documentation.mailgun.net/user_manual.html#manual-customdata">Attaching Data to Messages</a> for more information.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/attachments">&nbsp;&nbsp;attachments</a></code> <span class="tooltip"><p><code>NSMutableDictionary</code> of attachments <a href="#//api/name/to">to</a> the message.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/inlineAttachments">&nbsp;&nbsp;inlineAttachments</a></code> <span class="tooltip"><p><code>NSMutableDictionary</code> of inline message <a href="#//api/name/attachments">attachments</a>.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/dkim">&nbsp;&nbsp;dkim</a></code> <span class="tooltip"><p>Enables/disables DKIM signatures on per-message basis.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/testing">&nbsp;&nbsp;testing</a></code> <span class="tooltip"><p>Enables sending in test mode. See <a href="http://documentation.mailgun.net/user_manual.html#manual-testmode">Sending in Test Mode</a></p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/tracking">&nbsp;&nbsp;tracking</a></code> <span class="tooltip"><p>Toggles tracking on a per-message basis, see <a href="http://documentation.mailgun.net/user_manual.html#tracking-messages">Tracking Messages</a> for details.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/trackOpens">&nbsp;&nbsp;trackOpens</a></code> <span class="tooltip"><p>Toggles opens <a href="#//api/name/tracking">tracking</a> on a per-message basis. Has higher priority than domain-level setting.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/deliverAt">&nbsp;&nbsp;deliverAt</a></code> <span class="tooltip"><p>An <code>NSDate</code> representing the desired time of delivery.</p></span> </span> <span class="task-item-suffix">property</span> </li><li> <span class="tooltip"> <code><a href="#//api/name/trackClicks">&nbsp;&nbsp;trackClicks</a></code> <span class="tooltip"><p>Toggles clicks <a href="#//api/name/tracking">tracking</a> on a per-message basis. Has higher priority than domain-level setting.</p></span> </span> <span class="task-item-suffix">property</span> </li> </ul> <a title="Creating and Initializing a Mailgun Message" name="task_Creating and Initializing a Mailgun Message"></a> <h3 class="subsubtitle task-title">Creating and Initializing a Mailgun Message</h3> <ul class="task-list"> <li> <span class="tooltip"> <code><a href="#//api/name/messageFrom:to:subject:body:">+&nbsp;messageFrom:to:subject:body:</a></code> <span class="tooltip"><p>Creates and initializes a message with the provided details.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/initWithFrom:to:subject:body:">&ndash;&nbsp;initWithFrom:to:subject:body:</a></code> <span class="tooltip"><p>The designated initializer <a href="#//api/name/to">to</a> create a message with the provided details.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/dictionary">&ndash;&nbsp;dictionary</a></code> </span> </li> </ul> <a title="Adding Message Metadata" name="task_Adding Message Metadata"></a> <h3 class="subsubtitle task-title">Adding Message Metadata</h3> <ul class="task-list"> <li> <span class="tooltip"> <code><a href="#//api/name/addTag:">&ndash;&nbsp;addTag:</a></code> <span class="tooltip"><p>Adds a single tag <a href="#//api/name/to">to</a> this recevier&rsquo;s metadata.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/addTags:">&ndash;&nbsp;addTags:</a></code> <span class="tooltip"><p>Adds multiple <a href="#//api/name/tags">tags</a> <a href="#//api/name/to">to</a> the recevier&rsquo;s metadata.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/addHeader:value:">&ndash;&nbsp;addHeader:value:</a></code> <span class="tooltip"><p>Adds a header and value <a href="#//api/name/to">to</a> the receiver&rsquo;s metadata.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/addVariable:value:">&ndash;&nbsp;addVariable:value:</a></code> <span class="tooltip"><p>Adds a variable and a value <a href="#//api/name/to">to</a> the receiver.</p></span> </span> </li> </ul> <a title="Adding Additional Recipients" name="task_Adding Additional Recipients"></a> <h3 class="subsubtitle task-title">Adding Additional Recipients</h3> <ul class="task-list"> <li> <span class="tooltip"> <code><a href="#//api/name/addRecipient:">&ndash;&nbsp;addRecipient:</a></code> <span class="tooltip"><p>Adds an additional recipient <a href="#//api/name/to">to</a> the receiver.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/addCc:">&ndash;&nbsp;addCc:</a></code> <span class="tooltip"><p>Adds a CC recipient <a href="#//api/name/to">to</a> the receiver.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/addBcc:">&ndash;&nbsp;addBcc:</a></code> <span class="tooltip"><p>Adds a BCC recipient <a href="#//api/name/to">to</a> the receiver.</p></span> </span> </li> </ul> <a title="Adding Attachments" name="task_Adding Attachments"></a> <h3 class="subsubtitle task-title">Adding Attachments</h3> <ul class="task-list"> <li> <span class="tooltip"> <code><a href="#//api/name/addAttachment:withName:type:">&ndash;&nbsp;addAttachment:withName:type:</a></code> <span class="tooltip"><p>Adds an attachment <a href="#//api/name/to">to</a> the receiver.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/addImage:withName:type:">&ndash;&nbsp;addImage:withName:type:</a></code> <span class="tooltip"><p>Adds a <code>UIImage</code> as an attachment <a href="#//api/name/to">to</a> the receiver.</p></span> </span> </li><li> <span class="tooltip"> <code><a href="#//api/name/addImage:withName:type:inline:">&ndash;&nbsp;addImage:withName:type:inline:</a></code> <span class="tooltip"><p>Adds a <code>UIImage</code> as an attachment <a href="#//api/name/to">to</a> the receiver but inline in the message body.</p></span> </span> </li> </ul> </div> <div class="section section-methods"> <a title="Properties" name="properties"></a> <h2 class="subtitle subtitle-methods">Properties</h2> <div class="section-method"> <a name="//api/name/attachments" title="attachments"></a> <h3 class="subsubtitle method-title">attachments</h3> <div class="method-subsection brief-description"> <p><code>NSMutableDictionary</code> of attachments <a href="#//api/name/to">to</a> the message.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong, readonly) NSMutableDictionary *attachments</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/bcc" title="bcc"></a> <h3 class="subsubtitle method-title">bcc</h3> <div class="method-subsection brief-description"> <p>Email address of the BCC recipient(s). Example: &ldquo;Bob <a href="&#109;&#x61;&#105;&#108;&#116;&#x6f;&#58;&#98;&#111;&#x62;&#x40;&#x68;&#x6f;&#115;&#116;&#46;&#x63;&#x6f;&#109;">&#x62;&#x6f;&#98;&#x40;&#x68;&#x6f;&#x73;&#x74;&#46;&#x63;&#x6f;&#x6d;</a>&rdquo;.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSArray *bcc</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/campaign" title="campaign"></a> <h3 class="subsubtitle method-title">campaign</h3> <div class="method-subsection brief-description"> <p>ID of the campaign the message belongs <a href="#//api/name/to">to</a>. See <a href="http://documentation.mailgun.net/user_manual.html#um-campaign-analytics">Campaign Analytics</a> for details.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSString *campaign</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/cc" title="cc"></a> <h3 class="subsubtitle method-title">cc</h3> <div class="method-subsection brief-description"> <p>Email address of the CC recipient(s). Example: &ldquo;Bob <a href="&#109;&#x61;&#x69;&#108;&#x74;&#111;&#58;&#98;&#x6f;&#98;&#x40;&#104;&#111;&#x73;&#x74;&#x2e;&#99;&#x6f;&#x6d;">&#98;&#111;&#98;&#64;&#104;&#111;&#x73;&#116;&#x2e;&#99;&#x6f;&#x6d;</a>&rdquo;.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSArray *cc</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/deliverAt" title="deliverAt"></a> <h3 class="subsubtitle method-title">deliverAt</h3> <div class="method-subsection brief-description"> <p>An <code>NSDate</code> representing the desired time of delivery.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSDate *deliverAt</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/dkim" title="dkim"></a> <h3 class="subsubtitle method-title">dkim</h3> <div class="method-subsection brief-description"> <p>Enables/disables DKIM signatures on per-message basis.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic) BOOL *dkim</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/from" title="from"></a> <h3 class="subsubtitle method-title">from</h3> <div class="method-subsection brief-description"> <p>Email address for From header</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSString *from</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/headers" title="headers"></a> <h3 class="subsubtitle method-title">headers</h3> <div class="method-subsection brief-description"> <p><code>NSMutableDictionary</code> of custom MIME headers <a href="#//api/name/to">to</a> the message. For example, <code>Reply-To</code> <a href="#//api/name/to">to</a> specify a Reply-To address.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong, readonly) NSMutableDictionary *headers</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/html" title="html"></a> <h3 class="subsubtitle method-title">html</h3> <div class="method-subsection brief-description"> <p>Body of the message. HTML version</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSString *html</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/inlineAttachments" title="inlineAttachments"></a> <h3 class="subsubtitle method-title">inlineAttachments</h3> <div class="method-subsection brief-description"> <p><code>NSMutableDictionary</code> of inline message <a href="#//api/name/attachments">attachments</a>.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong, readonly) NSMutableDictionary *inlineAttachments</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/subject" title="subject"></a> <h3 class="subsubtitle method-title">subject</h3> <div class="method-subsection brief-description"> <p>Message subject</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSString *subject</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/tags" title="tags"></a> <h3 class="subsubtitle method-title">tags</h3> <div class="method-subsection brief-description"> <p>An <code>NSArray</code> of tag strings. See <a href="http://documentation.mailgun.net/user_manual.html#tagging">Tagging</a> for more information.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong, readonly) NSArray *tags</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/testing" title="testing"></a> <h3 class="subsubtitle method-title">testing</h3> <div class="method-subsection brief-description"> <p>Enables sending in test mode. See <a href="http://documentation.mailgun.net/user_manual.html#manual-testmode">Sending in Test Mode</a></p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic) BOOL *testing</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/text" title="text"></a> <h3 class="subsubtitle method-title">text</h3> <div class="method-subsection brief-description"> <p>Body of the message, text version</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSString *text</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/to" title="to"></a> <h3 class="subsubtitle method-title">to</h3> <div class="method-subsection brief-description"> <p>Email address of the recipient(s). Example: &ldquo;Bob <a href="&#x6d;&#97;&#x69;&#108;&#116;&#x6f;&#58;&#98;&#x6f;&#98;&#x40;&#x68;&#x6f;&#115;&#116;&#x2e;&#x63;&#111;&#109;">&#98;&#x6f;&#x62;&#x40;&#104;&#111;&#x73;&#x74;&#46;&#99;&#x6f;&#109;</a>&rdquo;.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong) NSArray *to</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/trackClicks" title="trackClicks"></a> <h3 class="subsubtitle method-title">trackClicks</h3> <div class="method-subsection brief-description"> <p>Toggles clicks <a href="#//api/name/tracking">tracking</a> on a per-message basis. Has higher priority than domain-level setting.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic) ClickTrackingType trackClicks</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/trackOpens" title="trackOpens"></a> <h3 class="subsubtitle method-title">trackOpens</h3> <div class="method-subsection brief-description"> <p>Toggles opens <a href="#//api/name/tracking">tracking</a> on a per-message basis. Has higher priority than domain-level setting.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic) BOOL *trackOpens</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/tracking" title="tracking"></a> <h3 class="subsubtitle method-title">tracking</h3> <div class="method-subsection brief-description"> <p>Toggles tracking on a per-message basis, see <a href="http://documentation.mailgun.net/user_manual.html#tracking-messages">Tracking Messages</a> for details.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic) BOOL *tracking</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/variables" title="variables"></a> <h3 class="subsubtitle method-title">variables</h3> <div class="method-subsection brief-description"> <p><code>NSMutableDictionary</code> for attaching custom JSON data <a href="#//api/name/to">to</a> the message. See <a href="http://documentation.mailgun.net/user_manual.html#manual-customdata">Attaching Data to Messages</a> for more information.</p> </div> <div class="method-subsection method-declaration"><code>@property (nonatomic, strong, readonly) NSMutableDictionary *variables</code></div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> </div> <div class="section section-methods"> <a title="Class Methods" name="class_methods"></a> <h2 class="subtitle subtitle-methods">Class Methods</h2> <div class="section-method"> <a name="//api/name/messageFrom:to:subject:body:" title="messageFrom:to:subject:body:"></a> <h3 class="subsubtitle method-title">messageFrom:to:subject:body:</h3> <div class="method-subsection brief-description"> <p>Creates and initializes a message with the provided details.</p> </div> <div class="method-subsection method-declaration"><code>+ (instancetype)messageFrom:(NSString *)<em>from</em> to:(NSString *)<em>to</em> subject:(NSString *)<em>subject</em> body:(NSString *)<em>body</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>from</em></dt> <dd><p>The message sender. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>to</em></dt> <dd><p>The message recipient. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>subject</em></dt> <dd><p>The message <a href="#//api/name/subject">subject</a>. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>body</em></dt> <dd><p>The body of the message.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> </div> <div class="section section-methods"> <a title="Instance Methods" name="instance_methods"></a> <h2 class="subtitle subtitle-methods">Instance Methods</h2> <div class="section-method"> <a name="//api/name/addAttachment:withName:type:" title="addAttachment:withName:type:"></a> <h3 class="subsubtitle method-title">addAttachment:withName:type:</h3> <div class="method-subsection brief-description"> <p>Adds an attachment <a href="#//api/name/to">to</a> the receiver.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addAttachment:(NSData *)<em>data</em> withName:(NSString *)<em>name</em> type:(NSString *)<em>type</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>data</em></dt> <dd><p>The <code>NSData</code> <a href="#//api/name/to">to</a> be attached <a href="#//api/name/to">to</a> the message. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>name</em></dt> <dd><p>The name used <a href="#//api/name/to">to</a> identify this attachment in the message. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>type</em></dt> <dd><p>The MIME type used <a href="#//api/name/to">to</a> describe the contents of <code>data</code>. Must not be nil.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addBcc:" title="addBcc:"></a> <h3 class="subsubtitle method-title">addBcc:</h3> <div class="method-subsection brief-description"> <p>Adds a BCC recipient <a href="#//api/name/to">to</a> the receiver.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addBcc:(NSString *)<em>recipient</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>recipient</em></dt> <dd><p>The recipient <a href="#//api/name/to">to</a> add <a href="#//api/name/to">to</a> the BCC field of the message. Must not be nil.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addCc:" title="addCc:"></a> <h3 class="subsubtitle method-title">addCc:</h3> <div class="method-subsection brief-description"> <p>Adds a CC recipient <a href="#//api/name/to">to</a> the receiver.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addCc:(NSString *)<em>recipient</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>recipient</em></dt> <dd><p>The recipient <a href="#//api/name/to">to</a> add <a href="#//api/name/to">to</a> the CC field of the message. Must not be nil.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addHeader:value:" title="addHeader:value:"></a> <h3 class="subsubtitle method-title">addHeader:value:</h3> <div class="method-subsection brief-description"> <p>Adds a header and value <a href="#//api/name/to">to</a> the receiver&rsquo;s metadata.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addHeader:(NSString *)<em>header</em> value:(NSString *)<em>value</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>header</em></dt> <dd><p>The header identifier <a href="#//api/name/to">to</a> add. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>value</em></dt> <dd><p>The value for the identifier. Must not be nil.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addImage:withName:type:" title="addImage:withName:type:"></a> <h3 class="subsubtitle method-title">addImage:withName:type:</h3> <div class="method-subsection brief-description"> <p>Adds a <code>UIImage</code> as an attachment <a href="#//api/name/to">to</a> the receiver.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addImage:(UIImage *)<em>image</em> withName:(NSString *)<em>name</em> type:(ImageAttachmentType)<em>type</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>image</em></dt> <dd><p>The <code>UIImage</code> <a href="#//api/name/to">to</a> be attached <a href="#//api/name/to">to</a> the message. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>name</em></dt> <dd><p>The name used <a href="#//api/name/to">to</a> identify this image attachment in the message. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>type</em></dt> <dd><p>The <code>ImageAttachmentType</code> <a href="#//api/name/to">to</a> identify this image as a JPEG or a PNG.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addImage:withName:type:inline:" title="addImage:withName:type:inline:"></a> <h3 class="subsubtitle method-title">addImage:withName:type:inline:</h3> <div class="method-subsection brief-description"> <p>Adds a <code>UIImage</code> as an attachment <a href="#//api/name/to">to</a> the receiver but inline in the message body.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addImage:(UIImage *)<em>image</em> withName:(NSString *)<em>name</em> type:(ImageAttachmentType)<em>type</em> inline:(BOOL)<em>inlineAttachment</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>image</em></dt> <dd><p>The <code>UIImage</code> <a href="#//api/name/to">to</a> be attached <a href="#//api/name/to">to</a> the message. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>name</em></dt> <dd><p>The name used <a href="#//api/name/to">to</a> identify this attachment in the message. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>type</em></dt> <dd><p>The <code>ImageAttachmentType</code> <a href="#//api/name/to">to</a> identify this image as a JPEG or a PNG.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addRecipient:" title="addRecipient:"></a> <h3 class="subsubtitle method-title">addRecipient:</h3> <div class="method-subsection brief-description"> <p>Adds an additional recipient <a href="#//api/name/to">to</a> the receiver.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addRecipient:(NSString *)<em>recipient</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>recipient</em></dt> <dd><p>The recipient <a href="#//api/name/to">to</a> add <a href="#//api/name/to">to</a> the message. Must not be nil.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addTag:" title="addTag:"></a> <h3 class="subsubtitle method-title">addTag:</h3> <div class="method-subsection brief-description"> <p>Adds a single tag <a href="#//api/name/to">to</a> this recevier&rsquo;s metadata.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addTag:(NSString *)<em>tag</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>tag</em></dt> <dd><p>The tag <a href="#//api/name/to">to</a> add <a href="#//api/name/to">to</a> this recevier&rsquo;s metadata. Must not be nil.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addTags:" title="addTags:"></a> <h3 class="subsubtitle method-title">addTags:</h3> <div class="method-subsection brief-description"> <p>Adds multiple <a href="#//api/name/tags">tags</a> <a href="#//api/name/to">to</a> the recevier&rsquo;s metadata.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addTags:(NSArray *)<em>tags</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>tags</em></dt> <dd><p>An <code>NSArray</code> containing the <a href="#//api/name/tags">tags</a> <a href="#//api/name/to">to</a> add <a href="#//api/name/to">to</a> this recevier&rsquo;s metadata. Must not be nil.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/addVariable:value:" title="addVariable:value:"></a> <h3 class="subsubtitle method-title">addVariable:value:</h3> <div class="method-subsection brief-description"> <p>Adds a variable and a value <a href="#//api/name/to">to</a> the receiver.</p> </div> <div class="method-subsection method-declaration"><code>- (void)addVariable:(NSString *)<em>var</em> value:(NSString *)<em>value</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>var</em></dt> <dd><p>The variable name. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>value</em></dt> <dd><p>The value of the variable <a href="#//api/name/to">to</a> display in the message.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> <div class="section-method"> <a name="//api/name/dictionary" title="dictionary"></a> <h3 class="subsubtitle method-title">dictionary</h3> <div class="method-subsection method-declaration"><code>- (NSDictionary *)dictionary</code></div> </div> <div class="section-method"> <a name="//api/name/initWithFrom:to:subject:body:" title="initWithFrom:to:subject:body:"></a> <h3 class="subsubtitle method-title">initWithFrom:to:subject:body:</h3> <div class="method-subsection brief-description"> <p>The designated initializer <a href="#//api/name/to">to</a> create a message with the provided details.</p> </div> <div class="method-subsection method-declaration"><code>- (id)initWithFrom:(NSString *)<em>from</em> to:(NSString *)<em>to</em> subject:(NSString *)<em>subject</em> body:(NSString *)<em>body</em></code></div> <div class="method-subsection arguments-section parameters"> <h4 class="method-subtitle parameter-title">Parameters</h4> <dl class="argument-def parameter-def"> <dt><em>from</em></dt> <dd><p>The message sender. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>to</em></dt> <dd><p>The message recipient. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>subject</em></dt> <dd><p>The message <a href="#//api/name/subject">subject</a>. Must not be nil.</p></dd> </dl> <dl class="argument-def parameter-def"> <dt><em>body</em></dt> <dd><p>The body of the message.</p></dd> </dl> </div> <div class="method-subsection declared-in-section"> <h4 class="method-subtitle">Declared In</h4> <code class="declared-in-ref">MGMessage.h</code><br /> </div> </div> </div> </div> <div class="main-navigation navigation-bottom"> <ul> <li><a href="../index.html">Index</a></li> <li><a href="../hierarchy.html">Hierarchy</a></li> </ul> </div> <div id="footer"> <hr /> <div class="footer-copyright"> <p><span class="copyright">&copy; 2013 Rackspace Hosting. All rights reserved. (Last updated: 2013-03-26)</span><br /> <span class="generator">Generated by <a href="http://appledoc.gentlebytes.com">appledoc 2.1 (build 858)</a>.</span></p> </div> </div> </div> </article> <script type="text/javascript"> function jumpToChange() { window.location.hash = this.options[this.selectedIndex].value; } function toggleTOC() { var contents = document.getElementById('contents'); var tocContainer = document.getElementById('tocContainer'); if (this.getAttribute('class') == 'open') { this.setAttribute('class', ''); contents.setAttribute('class', ''); tocContainer.setAttribute('class', ''); window.name = "hideTOC"; } else { this.setAttribute('class', 'open'); contents.setAttribute('class', 'isShowingTOC'); tocContainer.setAttribute('class', 'isShowingTOC'); window.name = ""; } return false; } function toggleTOCEntryChildren(e) { e.stopPropagation(); var currentClass = this.getAttribute('class'); if (currentClass == 'children') { this.setAttribute('class', 'children open'); } else if (currentClass == 'children open') { this.setAttribute('class', 'children'); } return false; } function tocEntryClick(e) { e.stopPropagation(); return true; } function init() { var selectElement = document.getElementById('jumpTo'); selectElement.addEventListener('change', jumpToChange, false); var tocButton = document.getElementById('table_of_contents'); tocButton.addEventListener('click', toggleTOC, false); var taskTreeItem = document.getElementById('task_treeitem'); if (taskTreeItem.getElementsByTagName('li').length > 0) { taskTreeItem.setAttribute('class', 'children'); taskTreeItem.firstChild.setAttribute('class', 'disclosure'); } var tocList = document.getElementById('toc'); var tocEntries = tocList.getElementsByTagName('li'); for (var i = 0; i < tocEntries.length; i++) { tocEntries[i].addEventListener('click', toggleTOCEntryChildren, false); } var tocLinks = tocList.getElementsByTagName('a'); for (var i = 0; i < tocLinks.length; i++) { tocLinks[i].addEventListener('click', tocEntryClick, false); } if (window.name == "hideTOC") { toggleTOC.call(tocButton); } } window.onload = init; // If showing in Xcode, hide the TOC and Header if (navigator.userAgent.match(/xcode/i)) { document.getElementById("contents").className = "hideInXcode" document.getElementById("tocContainer").className = "hideInXcode" document.getElementById("top_header").className = "hideInXcode" } </script> </body> </html>
mit
JonFerrera/angular.js
docs/config/index.js
5289
"use strict"; var path = require('canonical-path'); var packagePath = __dirname; var Package = require('dgeni').Package; // Create and export a new Dgeni package called angularjs. This package depends upon // the ngdoc, nunjucks, and examples packages defined in the dgeni-packages npm module. module.exports = new Package('angularjs', [ require('dgeni-packages/ngdoc'), require('dgeni-packages/nunjucks'), require('dgeni-packages/examples'), require('dgeni-packages/git') ]) .factory(require('./services/errorNamespaceMap')) .factory(require('./services/getMinerrInfo')) .factory(require('./services/getVersion')) .factory(require('./services/deployments/debug')) .factory(require('./services/deployments/default')) .factory(require('./services/deployments/jquery')) .factory(require('./services/deployments/production')) .factory(require('./inline-tag-defs/type')) .processor(require('./processors/error-docs')) .processor(require('./processors/index-page')) .processor(require('./processors/keywords')) .processor(require('./processors/pages-data')) .processor(require('./processors/versions-data')) .config(function(dgeni, log, readFilesProcessor, writeFilesProcessor) { dgeni.stopOnValidationError = true; dgeni.stopOnProcessingError = true; log.level = 'info'; readFilesProcessor.basePath = path.resolve(__dirname,'../..'); readFilesProcessor.sourceFiles = [ { include: 'src/**/*.js', exclude: 'src/angular.bind.js', basePath: 'src' }, { include: 'docs/content/**/*.ngdoc', basePath: 'docs/content' } ]; writeFilesProcessor.outputFolder = 'build/docs'; }) .config(function(parseTagsProcessor) { parseTagsProcessor.tagDefinitions.push(require('./tag-defs/tutorial-step')); parseTagsProcessor.tagDefinitions.push(require('./tag-defs/sortOrder')); }) .config(function(inlineTagProcessor, typeInlineTagDef) { inlineTagProcessor.inlineTagDefinitions.push(typeInlineTagDef); }) .config(function(templateFinder, renderDocsProcessor, gitData) { templateFinder.templateFolders.unshift(path.resolve(packagePath, 'templates')); renderDocsProcessor.extraData.git = gitData; }) .config(function(computePathsProcessor, computeIdsProcessor) { computePathsProcessor.pathTemplates.push({ docTypes: ['error'], pathTemplate: 'error/${namespace}/${name}', outputPathTemplate: 'partials/error/${namespace}/${name}.html' }); computePathsProcessor.pathTemplates.push({ docTypes: ['errorNamespace'], pathTemplate: 'error/${name}', outputPathTemplate: 'partials/error/${name}.html' }); computePathsProcessor.pathTemplates.push({ docTypes: ['overview', 'tutorial'], getPath: function(doc) { var docPath = path.dirname(doc.fileInfo.relativePath); if ( doc.fileInfo.baseName !== 'index' ) { docPath = path.join(docPath, doc.fileInfo.baseName); } return docPath; }, outputPathTemplate: 'partials/${path}.html' }); computePathsProcessor.pathTemplates.push({ docTypes: ['e2e-test'], getPath: function() {}, outputPathTemplate: 'ptore2e/${example.id}/${deployment.name}_test.js' }); computePathsProcessor.pathTemplates.push({ docTypes: ['indexPage'], pathTemplate: '.', outputPathTemplate: '${id}.html' }); computePathsProcessor.pathTemplates.push({ docTypes: ['module' ], pathTemplate: '${area}/${name}', outputPathTemplate: 'partials/${area}/${name}.html' }); computePathsProcessor.pathTemplates.push({ docTypes: ['componentGroup' ], pathTemplate: '${area}/${moduleName}/${groupType}', outputPathTemplate: 'partials/${area}/${moduleName}/${groupType}.html' }); computeIdsProcessor.idTemplates.push({ docTypes: ['overview', 'tutorial', 'e2e-test', 'indexPage'], getId: function(doc) { return doc.fileInfo.baseName; }, getAliases: function(doc) { return [doc.id]; } }); computeIdsProcessor.idTemplates.push({ docTypes: ['error'], getId: function(doc) { return 'error:' + doc.namespace + ':' + doc.name; }, getAliases: function(doc) { return [doc.name, doc.namespace + ':' + doc.name, doc.id]; } }, { docTypes: ['errorNamespace'], getId: function(doc) { return 'error:' + doc.name; }, getAliases: function(doc) { return [doc.id]; } } ); }) .config(function(checkAnchorLinksProcessor) { checkAnchorLinksProcessor.base = '/'; // We are only interested in docs that have an area (i.e. they are pages) checkAnchorLinksProcessor.checkDoc = function(doc) { return doc.area; }; }) .config(function( generateIndexPagesProcessor, generateProtractorTestsProcessor, generateExamplesProcessor, debugDeployment, defaultDeployment, jqueryDeployment, productionDeployment) { generateIndexPagesProcessor.deployments = [ debugDeployment, defaultDeployment, jqueryDeployment, productionDeployment ]; generateProtractorTestsProcessor.deployments = [ defaultDeployment, jqueryDeployment ]; generateProtractorTestsProcessor.basePath = 'build/docs/'; generateExamplesProcessor.deployments = [ debugDeployment, defaultDeployment, jqueryDeployment, productionDeployment ]; }) .config(function(generateKeywordsProcessor) { generateKeywordsProcessor.docTypesToIgnore = ['componentGroup']; });
mit
abkmr/odata.net
test/EndToEndTests/Tests/Client/Build.Desktop/CodeGenerationTests/CustomizeNamingTest.cs
24396
//--------------------------------------------------------------------- // <copyright file="CustomizeNamingTest.cs" company="Microsoft"> // Copyright (C) Microsoft Corporation. All rights reserved. See License.txt in the project root for license information. // </copyright> //--------------------------------------------------------------------- namespace Microsoft.Test.OData.Tests.Client.CodeGenerationTests { using System; using System.Collections.Generic; using System.Collections.ObjectModel; using System.Linq; using Microsoft.OData.Core; using Microsoft.OData.Edm; using Microsoft.Spatial; using Microsoft.Test.OData.Services.TestServices; using Microsoft.Test.OData.Services.TestServices.ODataWCFServiceReferencePlus; using Microsoft.Test.OData.Tests.Client.Common; using Microsoft.VisualStudio.TestTools.UnitTesting; using ODataClient = Microsoft.OData.Client; /// <summary> /// T4 code generation for operations test cases. /// </summary> [TestClass] public class CustomizeNamingTest : ODataWCFServiceTestsBase<Microsoft.Test.OData.Services.TestServices.ODataWCFServiceReferencePlus.InMemoryEntitiesPlus> { private const string ServerSideNameSpacePrefix = "Microsoft.Test.OData.Services.ODataWCFService."; public CustomizeNamingTest() : base(ServiceDescriptors.ODataWCFServiceDescriptor) { } [TestMethod] public void BasicQuery() { TestClientContext.MergeOption = Microsoft.OData.Client.MergeOption.OverwriteChanges; // Query a entity set var products1 = TestClientContext.ProductsPlus.ToList(); Assert.AreEqual(5, products1.Count); // Query with expand (Linq) var products2 = TestClientContext.ProductsPlus.Expand(p => p.DetailsPlus).ToList(); Assert.AreEqual(5, products2.Single(p => p.ProductIDPlus == 5).DetailsPlus.Count); // Query with expand (PropertyName) var products3 = TestClientContext.ProductsPlus.Expand("Details").ToList(); Assert.AreEqual(5, products3.Single(p => p.ProductIDPlus == 5).DetailsPlus.Count); // Query a individual primitive property var product4 = TestClientContext.ProductsPlus.Where(p => p.ProductIDPlus == 5).Single(); Assert.AreEqual("Cheetos", product4.NamePlus); // Query an Navigation Property TestClientContext.LoadProperty(product4, "Details"); Assert.AreEqual(5, product4.DetailsPlus.Count); // Query a Derived entity. var people5 = TestClientContext.PeoplePlus.Where(p => p.PersonIDPlus == 1).Single(); // Check the property from the derived type. Assert.AreEqual("Tokyo", people5.HomeAddressPlus.CityPlus); // Check the derived complex property. Assert.AreEqual("Cats", ((HomeAddressPlus)(people5.HomeAddressPlus)).FamilyNamePlus); // Check collection of PrimitiveTypes Assert.AreEqual(1, people5.EmailsPlus.Count); // Query with $select & $expand var accounts6 = TestClientContext.AccountsPlus .Where(a => a.AccountIDPlus == 103) .Select(a => new AccountPlus() { AccountIDPlus = a.AccountIDPlus, MyGiftCardPlus = a.MyGiftCardPlus, CountryRegionPlus = a.CountryRegionPlus }); var account6 = accounts6.Single(); Assert.IsNotNull(account6.MyGiftCardPlus); Assert.AreEqual(103, account6.AccountIDPlus); Assert.IsNull(account6.AccountInfoPlus); // Query with $filter by non-key property. var accounts7 = TestClientContext.AccountsPlus.Where(a => a.CountryRegionPlus == "CN").ToList(); Assert.AreEqual(3, accounts7.Count); // Query with OrderBy var people8 = TestClientContext.PeoplePlus.OrderBy((p) => p.LastNamePlus).First(); Assert.AreEqual(5, people8.PersonIDPlus); // Query with $count var count = TestClientContext.AccountsPlus.Count(); Assert.AreEqual(count, 7); // Query with MultiKeys var productReview10 = TestClientContext.ProductReviewsPlus.Where(pd => pd.ProductDetailIDPlus == 2 && pd.ProductIDPlus == 5 && pd.ReviewTitlePlus == "Special" && pd.RevisionIDPlus == 1).First(); Assert.AreEqual("Andy", productReview10.AuthorPlus); } [TestMethod] public void BasicModify() { TestClientContext.MergeOption = Microsoft.OData.Client.MergeOption.OverwriteChanges; TestClientContext.IgnoreMissingProperties = true; // AddRelatedObject AccountPlus newAccount1 = new AccountPlus() { AccountIDPlus = 110, CountryRegionPlus = "CN", AccountInfoPlus = new AccountInfoPlus() { FirstNamePlus = "New", LastNamePlus = "Boy" } }; PaymentInstrumentPlus newPI = new PaymentInstrumentPlus() { PaymentInstrumentIDPlus = 110901, FriendlyNamePlus = "110's first PI", CreatedDatePlus = new DateTimeOffset(new DateTime(2012, 12, 10)) }; TestClientContext.AddToAccountsPlus(newAccount1); TestClientContext.AddRelatedObject(newAccount1, "MyPaymentInstruments", newPI); TestClientContext.SaveChanges(); var r1 = TestClientContext.AccountsPlus.Where(account => account.AccountIDPlus == 110).Single(); Assert.AreEqual("Boy", r1.AccountInfoPlus.LastNamePlus); var r2 = TestClientContext.CreateQuery<PaymentInstrumentPlus>("Accounts(110)/MyPaymentInstruments") .Where(pi => pi.PaymentInstrumentIDPlus == 110901).Single(); Assert.AreEqual("110's first PI", r2.FriendlyNamePlus); //UpdateObject newAccount1.CountryRegionPlus = "US"; TestClientContext.UpdateObject(newAccount1); TestClientContext.SaveChanges(); r1 = TestClientContext.AccountsPlus.Where(account => account.AccountIDPlus == 110).Single(); Assert.AreEqual("US", r1.CountryRegionPlus); //UpdateRelatedObject var myGiftCard = new GiftCardPlus() { GiftCardIDPlus = 11111, GiftCardNOPlus = "11111", AmountPlus = 20, ExperationDatePlus = new DateTimeOffset(2015, 12, 1, 0, 0, 0, new TimeSpan(0)) }; TestClientContext.UpdateRelatedObject(newAccount1, "MyGiftCard", myGiftCard); TestClientContext.SaveChanges(); r1 = TestClientContext.AccountsPlus.Expand(account => account.MyGiftCardPlus).Where(account => account.AccountIDPlus == 110).Single(); Assert.AreEqual(11111, r1.MyGiftCardPlus.GiftCardIDPlus); //Add Derived Object CustomerPlus customerPlus = new CustomerPlus() { FirstNamePlus = "Nelson", MiddleNamePlus = "S.", LastNamePlus = "Black", NumbersPlus = new ObservableCollection<string> { "111-111-1111" }, EmailsPlus = new ObservableCollection<string> { "[email protected]" }, PersonIDPlus = 10001, BirthdayPlus = new DateTimeOffset(new DateTime(1957, 4, 3)), CityPlus = "London", HomePlus = GeographyPoint.Create(32.1, 23.1), TimeBetweenLastTwoOrdersPlus = new TimeSpan(1), HomeAddressPlus = new HomeAddressPlus() { CityPlus = "London", PostalCodePlus = "98052", StreetPlus = "1 Microsoft Way", FamilyNamePlus = "Black's Family" }, }; var ordersPlus = new ODataClient.DataServiceCollection<OrderPlus>(TestClientContext) { new OrderPlus() { OrderIDPlus = 11111111, OrderDatePlus = new DateTimeOffset(new DateTime(2011, 5, 29, 14, 21, 12)), ShelfLifePlus = new TimeSpan(1), OrderShelfLifesPlus = new ObservableCollection<TimeSpan>(){new TimeSpan(1)} } }; TestClientContext.AddToPeoplePlus(customerPlus); TestClientContext.SaveChanges(); var customer1 = TestClientContext.CustomersPlus.Where(c => c.PersonIDPlus == 10001).Single(); TestClientContext.AddLink(customer1, "Orders", ordersPlus[0]); TestClientContext.SaveChanges(); TestClientContext.Detach(customerPlus); TestClientContext.SaveChanges(); var customer = TestClientContext.CustomersPlus.Expand(p => (p as CustomerPlus).OrdersPlus).Where(p => p.PersonIDPlus == 10001).SingleOrDefault(); Assert.AreEqual(((CustomerPlus)customer).CityPlus, "London"); Assert.AreEqual(((HomeAddressPlus)(customer.HomeAddressPlus)).FamilyNamePlus, "Black's Family"); Assert.AreEqual(((CustomerPlus)customer).OrdersPlus.Count, 1); var order = TestClientContext.OrdersPlus.Where(p => p.OrderIDPlus == 11111111).SingleOrDefault(); Assert.AreEqual(order.OrderShelfLifesPlus.Count, 1); // DeleteObject TestClientContext.DeleteObject(newAccount1); TestClientContext.SaveChanges(); var accounts = TestClientContext.AccountsPlus.ToList(); Assert.IsTrue(!accounts.Any(ac => ac.AccountIDPlus == 110)); // SetLink var person1 = TestClientContext.PeoplePlus.Where((p) => p.PersonIDPlus == 1).Single(); var person2 = TestClientContext.PeoplePlus.Where((p) => p.PersonIDPlus == 2).Single(); TestClientContext.SetLink(person1, "Parent", person2); TestClientContext.SaveChanges(); person1 = TestClientContext.PeoplePlus.Expand(d => d.ParentPlus).Where((p) => p.PersonIDPlus == 1).Single(); Assert.IsNotNull(person1.ParentPlus); Assert.IsNotNull(person1.ParentPlus.PersonIDPlus == 2); // SetLink : Bug, SetLink to Null will not update the client object. TestClientContext.SetLink(person1, "Parent", null); TestClientContext.SaveChanges(); person1.ParentPlus = null; var person3 = TestClientContext.PeoplePlus.Expand(d => d.ParentPlus).Where((p) => p.PersonIDPlus == 1).Single(); Assert.IsNull(person3.ParentPlus); //AddLink var companyPlus = TestClientContext.CompanyPlus.GetValue(); DepartmentPlus department = new DepartmentPlus() { DepartmentIDPlus = 100001, NamePlus = "ID" + 100001, }; TestClientContext.AddToDepartmentsPlus(department); TestClientContext.AddLink(companyPlus, "Departments", department); TestClientContext.SaveChanges(); TestClientContext.LoadProperty(companyPlus, "Departments"); Assert.IsTrue(companyPlus.DepartmentsPlus.Any(d => d.DepartmentIDPlus == department.DepartmentIDPlus)); //Delete Link TestClientContext.DeleteLink(companyPlus, "Departments", department); TestClientContext.SaveChanges(); TestClientContext.LoadProperty(companyPlus, "Departments"); Assert.IsTrue(!companyPlus.DepartmentsPlus.Any(d => d.DepartmentIDPlus == department.DepartmentIDPlus)); } [TestMethod] public void OpenComplexType() { //Update entity with open complex type AccountPlus account = new AccountPlus() { AccountIDPlus = 1000000, CountryRegionPlus = "CN", AccountInfoPlus = new AccountInfoPlus() { FirstNamePlus = "Peter", MiddleNamePlus = "White", LastNamePlus = "Andy", IsActivePlus = true } }; TestClientContext.AddToAccountsPlus(account); TestClientContext.SaveChanges(); //Check account can be correctly desirialized. account = TestClientContext.AccountsPlus.Where(a => a.AccountIDPlus == 1000000).Single(); Assert.IsNotNull(account); Assert.AreEqual(account.AccountInfoPlus.MiddleNamePlus, "White"); Assert.IsTrue(account.AccountInfoPlus.IsActivePlus); //Update entity with open complex type var entry = new ODataEntry() { TypeName = ServerSideNameSpacePrefix + "Account" }; entry.Properties = new[] { new ODataProperty { Name = "AccountID", Value = 1000000 }, new ODataProperty { Name = "AccountInfo", Value = new ODataComplexValue { TypeName = ServerSideNameSpacePrefix + "AccountInfo", Properties = new[] { new ODataProperty { Name = "FirstName", Value = "Peter" }, new ODataProperty { Name = "LastName", Value = "Andy" }, //Property that exists in Customer-Defined client code. new ODataProperty { Name = "MiddleName", Value = "White2" }, new ODataProperty { Name = "IsActive", Value = false, }, //Property that doesn't exist in Customer-Defined client code. new ODataProperty { Name = "ShippingAddress", Value = "#999, ZiXing Road" } } } } }; var settings = new ODataMessageWriterSettings(); settings.PayloadBaseUri = ServiceBaseUri; var accountType = Model.FindDeclaredType(ServerSideNameSpacePrefix + "Account") as IEdmEntityType; var accountSet = Model.EntityContainer.FindEntitySet("Accounts"); var requestMessage = new HttpWebRequestMessage(new Uri(ServiceBaseUri + "Accounts(1000000)")); requestMessage.SetHeader("Content-Type", MimeTypes.ApplicationJson); requestMessage.SetHeader("Accept", MimeTypes.ApplicationJson); requestMessage.Method = "PATCH"; using (var messageWriter = new ODataMessageWriter(requestMessage, settings)) { var odataWriter = messageWriter.CreateODataEntryWriter(accountSet, accountType); odataWriter.WriteStart(entry); odataWriter.WriteEnd(); } var responseMessage = requestMessage.GetResponse(); TestClientContext.MergeOption = Microsoft.OData.Client.MergeOption.OverwriteChanges; //Check account can be correctly desirialized. account = TestClientContext.AccountsPlus.Where(a => a.AccountIDPlus == 1000000).Single(); Assert.IsNotNull(account); Assert.AreEqual(account.AccountInfoPlus.MiddleNamePlus, "White2"); Assert.IsTrue(!account.AccountInfoPlus.IsActivePlus); } [TestMethod] public void OpenEntityType() { //UpdateOpenTypeSingleton var entry = new ODataEntry() { TypeName = ServerSideNameSpacePrefix + "PublicCompany" }; entry.Properties = new[] { new ODataProperty { Name = "FullName", Value = "MS Ltd." }, new ODataProperty { Name = "PhoneNumber", Value = "123-45678" }, new ODataProperty { Name = "TotalAssets", Value = 500000L, } }; var settings = new ODataMessageWriterSettings(); settings.PayloadBaseUri = ServiceBaseUri; settings.AutoComputePayloadMetadataInJson = true; var companyType = Model.FindDeclaredType(ServerSideNameSpacePrefix + "PublicCompany") as IEdmEntityType; var companySingleton = Model.EntityContainer.FindSingleton("PublicCompany"); var requestMessage = new HttpWebRequestMessage(new Uri(ServiceBaseUri + "PublicCompany")); requestMessage.SetHeader("Content-Type", MimeTypes.ApplicationJson); requestMessage.SetHeader("Accept", MimeTypes.ApplicationJson); requestMessage.Method = "PATCH"; using (var messageWriter = new ODataMessageWriter(requestMessage, settings)) { var odataWriter = messageWriter.CreateODataEntryWriter(companySingleton, companyType); odataWriter.WriteStart(entry); odataWriter.WriteEnd(); } var responseMessage = requestMessage.GetResponse(); Assert.AreEqual(204, responseMessage.StatusCode); //Check account can be correctly desirialized. var company = TestClientContext.PublicCompanyPlus.GetValue(); Assert.IsNotNull(company); Assert.AreEqual("MS Ltd.", company.FullNamePlus); Assert.AreEqual(500000, company.TotalAssetsPlus); TestClientContext.MergeOption = Microsoft.OData.Client.MergeOption.OverwriteChanges; company.FullNamePlus = "MS2 Ltd."; company.TotalAssetsPlus = 1000000; TestClientContext.UpdateObject(company); TestClientContext.SaveChanges(); company.FullNamePlus = null; company.TotalAssetsPlus = 0; company = TestClientContext.PublicCompanyPlus.GetValue(); Assert.IsNotNull(company); Assert.AreEqual("MS2 Ltd.", company.FullNamePlus); Assert.AreEqual(1000000, company.TotalAssetsPlus); } [TestMethod] public void InvokeOperations() { TestClientContext.MergeOption = Microsoft.OData.Client.MergeOption.OverwriteChanges; // Invoke Unbounded Action var color1 = TestClientContext.GetDefaultColorPlus().GetValue(); Assert.AreEqual(color1, ColorPlus.RedPlus); // Invoke Bounded Function on single entity var account = TestClientContext.AccountsPlus.Where(a => a.AccountIDPlus == 101).Single(); var r2 = account.GetDefaultPIPlus().GetValue(); Assert.AreEqual(101901, r2.PaymentInstrumentIDPlus); // Invoke bounded Function on Navigation Property var account3 = TestClientContext.AccountsPlus.Expand(c => c.MyGiftCardPlus).Where(a => a.AccountIDPlus == 101).Single(); var result3 = account3.MyGiftCardPlus.GetActualAmountPlus(1).GetValue(); Assert.AreEqual(39.8, result3); // Invoke bounded Action on single entity set var product4 = TestClientContext.ProductsPlus.Where(p => p.ProductIDPlus == 7).Single(); var result = product4.AddAccessRightPlus(AccessLevelPlus.WritePlus).GetValue(); Assert.AreEqual(AccessLevelPlus.ReadWritePlus, result); // Invoke bounded Action on Navigation Property var account5 = TestClientContext.AccountsPlus.Where(ac => ac.AccountIDPlus == 101).Single(); var result5 = account5.RefreshDefaultPIPlus(DateTimeOffset.Now).GetValue(); Assert.AreEqual(101901, result5.PaymentInstrumentIDPlus); } [TestMethod] public void ContainedEntityQuery() { TestClientContext.MergeOption = Microsoft.OData.Client.MergeOption.OverwriteChanges; // Query a single contained entity var q1 = TestClientContext.CreateQuery<PaymentInstrumentPlus>("Accounts(103)/MyPaymentInstruments(103902)"); Assert.IsTrue(q1.RequestUri.OriginalString.EndsWith("Accounts(103)/MyPaymentInstruments(103902)", StringComparison.Ordinal)); List<PaymentInstrumentPlus> r1 = q1.ToList(); Assert.AreEqual(1, r1.Count); Assert.AreEqual(103902, r1[0].PaymentInstrumentIDPlus); Assert.AreEqual("103 second PI", r1[0].FriendlyNamePlus); // Query a contained entity set with query option var q2 = TestClientContext.CreateQuery<PaymentInstrumentPlus>("Accounts(103)/MyPaymentInstruments").Expand(pi => pi.BillingStatementsPlus).Where(pi => pi.PaymentInstrumentIDPlus == 103901); PaymentInstrumentPlus r2 = q2.Single(); Assert.IsNotNull(r2.BillingStatementsPlus); // Invoke a bounded Function. double result = TestClientContext.Execute<double>(new Uri(ServiceBaseUri.AbsoluteUri + "Accounts(101)/MyGiftCard/Microsoft.Test.OData.Services.ODataWCFService.GetActualAmount(bonusRate=0.2)", UriKind.Absolute), "GET", true).Single(); Assert.AreEqual(23.88, result); } [TestMethod] public void SingltonQuery() { TestClientContext.MergeOption = Microsoft.OData.Client.MergeOption.OverwriteChanges; // Invoke a bounded Function var company1 = TestClientContext.CompanyPlus.GetValue(); var result1 = company1.GetEmployeesCountPlus().GetValue(); Assert.AreEqual(2, result1); // Invoke a bounded Action var company2 = TestClientContext.CompanyPlus.GetValue(); var result2 = company2.IncreaseRevenuePlus(1).GetValue(); Assert.AreEqual(100001, result2); // Invoke a bounded Action on derived type TestClientContext.MergeOption = Microsoft.OData.Client.MergeOption.OverwriteChanges; var publicCompany = TestClientContext.PublicCompanyPlus.GetValue(); var originalRevenue = publicCompany.RevenuePlus; var revenue = publicCompany.IncreaseRevenuePlus(10).GetValue(); Assert.IsTrue(originalRevenue + 10 == revenue); publicCompany = TestClientContext.PublicCompanyPlus.GetValue(); Assert.IsTrue(revenue == publicCompany.RevenuePlus); // Invoke Unbound Action TestClientContext.ResetBossAddressPlus( new HomeAddressPlus() { CityPlus = "Shanghai", StreetPlus = "ZiXing Road", PostalCodePlus = "200100", FamilyNamePlus = "White's Family" }).GetValue(); TestClientContext.SaveChanges(); var boss = TestClientContext.BossPlus.GetValue(); Assert.AreEqual(boss.HomeAddressPlus.PostalCodePlus, "200100"); Assert.AreEqual(((HomeAddressPlus)boss.HomeAddressPlus).FamilyNamePlus, "White's Family"); } } }
mit
andreufirefly/keystone
lib/core/openDatabaseConnection.js
2539
var debug = require('debug')('keystone:core:openDatabaseConnection'); module.exports = function openDatabaseConnection (callback) { var keystone = this; var mongoConnectionOpen = false; // support replica sets for mongoose if (keystone.get('mongo replica set')) { if (keystone.get('logger')) { console.log('\nWarning: using the `mongo replica set` option has been deprecated and will be removed in' + ' a future version.\nInstead set the `mongo` connection string with your host details, e.g.' + ' mongodb://username:password@host:port,host:port,host:port/database and set any replica set options' + ' in `mongo options`.\n\nRefer to https://mongodb.github.io/node-mongodb-native/driver-articles/mongoclient.html' + ' for more details on the connection settings.'); } debug('setting up mongo replica set'); var replicaData = keystone.get('mongo replica set'); var replica = ''; var credentials = (replicaData.username && replicaData.password) ? replicaData.username + ':' + replicaData.password + '@' : ''; replicaData.db.servers.forEach(function (server) { replica += 'mongodb://' + credentials + server.host + ':' + server.port + '/' + replicaData.db.name + ','; }); var options = { auth: { authSource: replicaData.authSource }, replset: { rs_name: replicaData.db.replicaSetOptions.rs_name, readPreference: replicaData.db.replicaSetOptions.readPreference, }, }; debug('connecting to replicate set'); keystone.mongoose.connect(replica, options); } else { debug('connecting to mongo'); keystone.mongoose.connect(keystone.get('mongo'), keystone.get('mongo options')); } keystone.mongoose.connection.on('error', function (err) { if (keystone.get('logger')) { console.log('------------------------------------------------'); console.log('Mongo Error:\n'); console.log(err); } if (mongoConnectionOpen) { if (err.name === 'ValidationError') return; throw err; } else { throw new Error('KeystoneJS (' + keystone.get('name') + ') failed to start - Check that you are running `mongod` in a separate process.'); } }).on('open', function () { debug('mongo connection open'); mongoConnectionOpen = true; var connected = function () { if (keystone.get('auto update')) { debug('applying auto update'); keystone.applyUpdates(callback); } else { callback(); } }; if (keystone.sessionStorePromise) { keystone.sessionStorePromise.then(connected); } else { connected(); } }); return this; };
mit
Alwnikrotikz/numexpr
numexpr/numexpr_object.hpp
1069
#ifndef NUMEXPR_OBJECT_HPP #define NUMEXPR_OBJECT_HPP /********************************************************************* Numexpr - Fast numerical array expression evaluator for NumPy. License: MIT Author: See AUTHORS.txt See LICENSE.txt for details about copyright and rights to use. **********************************************************************/ struct NumExprObject { PyObject_HEAD PyObject *signature; /* a python string */ PyObject *tempsig; PyObject *constsig; PyObject *fullsig; PyObject *program; /* a python string */ PyObject *constants; /* a tuple of int/float/complex */ PyObject *input_names; /* tuple of strings */ char **mem; /* pointers to registers */ char *rawmem; /* a chunks of raw memory for storing registers */ npy_intp *memsteps; npy_intp *memsizes; int rawmemsize; int n_inputs; int n_constants; int n_temps; }; extern PyTypeObject NumExprType; #endif // NUMEXPR_OBJECT_HPP
mit
nongfadai/front_demo
web/src/notuse/app/lib/zepto/zepto.js
69690
// Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. var Zepto = (function() { var undefined, key, $, classList, emptyArray = [], concat = emptyArray.concat, filter = emptyArray.filter, slice = emptyArray.slice, document = window.document, elementDisplay = {}, classCache = {}, cssNumber = { 'column-count': 1, 'columns': 1, 'font-weight': 1, 'line-height': 1,'opacity': 1, 'z-index': 1, 'zoom': 1 }, fragmentRE = /^\s*<(\w+|!)[^>]*>/, singleTagRE = /^<(\w+)\s*\/?>(?:<\/\1>|)$/, tagExpanderRE = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig, rootNodeRE = /^(?:body|html)$/i, capitalRE = /([A-Z])/g, // special attributes that should be get/set via method calls methodAttributes = ['val', 'css', 'html', 'text', 'data', 'width', 'height', 'offset'], adjacencyOperators = [ 'after', 'prepend', 'before', 'append' ], table = document.createElement('table'), tableRow = document.createElement('tr'), containers = { 'tr': document.createElement('tbody'), 'tbody': table, 'thead': table, 'tfoot': table, 'td': tableRow, 'th': tableRow, '*': document.createElement('div') }, readyRE = /complete|loaded|interactive/, simpleSelectorRE = /^[\w-]*$/, class2type = {}, toString = class2type.toString, zepto = {}, camelize, uniq, tempParent = document.createElement('div'), propMap = { 'tabindex': 'tabIndex', 'readonly': 'readOnly', 'for': 'htmlFor', 'class': 'className', 'maxlength': 'maxLength', 'cellspacing': 'cellSpacing', 'cellpadding': 'cellPadding', 'rowspan': 'rowSpan', 'colspan': 'colSpan', 'usemap': 'useMap', 'frameborder': 'frameBorder', 'contenteditable': 'contentEditable' }, isArray = Array.isArray || function(object){ return object instanceof Array } zepto.matches = function(element, selector) { if (!selector || !element || element.nodeType !== 1) return false var matchesSelector = element.webkitMatchesSelector || element.mozMatchesSelector || element.oMatchesSelector || element.matchesSelector if (matchesSelector) return matchesSelector.call(element, selector) // fall back to performing a selector: var match, parent = element.parentNode, temp = !parent if (temp) (parent = tempParent).appendChild(element) match = ~zepto.qsa(parent, selector).indexOf(element) temp && tempParent.removeChild(element) return match } function type(obj) { return obj == null ? String(obj) : class2type[toString.call(obj)] || "object" } function isFunction(value) { return type(value) == "function" } function isWindow(obj) { return obj != null && obj == obj.window } function isDocument(obj) { return obj != null && obj.nodeType == obj.DOCUMENT_NODE } function isObject(obj) { return type(obj) == "object" } function isPlainObject(obj) { return isObject(obj) && !isWindow(obj) && Object.getPrototypeOf(obj) == Object.prototype } function likeArray(obj) { return typeof obj.length == 'number' } function compact(array) { return filter.call(array, function(item){ return item != null }) } function flatten(array) { return array.length > 0 ? $.fn.concat.apply([], array) : array } camelize = function(str){ return str.replace(/-+(.)?/g, function(match, chr){ return chr ? chr.toUpperCase() : '' }) } function dasherize(str) { return str.replace(/::/g, '/') .replace(/([A-Z]+)([A-Z][a-z])/g, '$1_$2') .replace(/([a-z\d])([A-Z])/g, '$1_$2') .replace(/_/g, '-') .toLowerCase() } uniq = function(array){ return filter.call(array, function(item, idx){ return array.indexOf(item) == idx }) } function classRE(name) { return name in classCache ? classCache[name] : (classCache[name] = new RegExp('(^|\\s)' + name + '(\\s|$)')) } function maybeAddPx(name, value) { return (typeof value == "number" && !cssNumber[dasherize(name)]) ? value + "px" : value } function defaultDisplay(nodeName) { var element, display if (!elementDisplay[nodeName]) { element = document.createElement(nodeName) document.body.appendChild(element) display = getComputedStyle(element, '').getPropertyValue("display") element.parentNode.removeChild(element) display == "none" && (display = "block") elementDisplay[nodeName] = display } return elementDisplay[nodeName] } function children(element) { return 'children' in element ? slice.call(element.children) : $.map(element.childNodes, function(node){ if (node.nodeType == 1) return node }) } function Z(dom, selector) { var i, len = dom ? dom.length : 0 for (i = 0; i < len; i++) this[i] = dom[i] this.length = len this.selector = selector || '' } // `$.zepto.fragment` takes a html string and an optional tag name // to generate DOM nodes nodes from the given html string. // The generated DOM nodes are returned as an array. // This function can be overriden in plugins for example to make // it compatible with browsers that don't support the DOM fully. zepto.fragment = function(html, name, properties) { var dom, nodes, container // A special case optimization for a single tag if (singleTagRE.test(html)) dom = $(document.createElement(RegExp.$1)) if (!dom) { if (html.replace) html = html.replace(tagExpanderRE, "<$1></$2>") if (name === undefined) name = fragmentRE.test(html) && RegExp.$1 if (!(name in containers)) name = '*' container = containers[name] container.innerHTML = '' + html dom = $.each(slice.call(container.childNodes), function(){ container.removeChild(this) }) } if (isPlainObject(properties)) { nodes = $(dom) $.each(properties, function(key, value) { if (methodAttributes.indexOf(key) > -1) nodes[key](value) else nodes.attr(key, value) }) } return dom } // `$.zepto.Z` swaps out the prototype of the given `dom` array // of nodes with `$.fn` and thus supplying all the Zepto functions // to the array. This method can be overriden in plugins. zepto.Z = function(dom, selector) { return new Z(dom, selector) } // `$.zepto.isZ` should return `true` if the given object is a Zepto // collection. This method can be overriden in plugins. zepto.isZ = function(object) { return object instanceof zepto.Z } // `$.zepto.init` is Zepto's counterpart to jQuery's `$.fn.init` and // takes a CSS selector and an optional context (and handles various // special cases). // This method can be overriden in plugins. zepto.init = function(selector, context) { var dom // If nothing given, return an empty Zepto collection if (!selector) return zepto.Z() // Optimize for string selectors else if (typeof selector == 'string') { selector = selector.trim() // If it's a html fragment, create nodes from it // Note: In both Chrome 21 and Firefox 15, DOM error 12 // is thrown if the fragment doesn't begin with < if (selector[0] == '<' && fragmentRE.test(selector)) dom = zepto.fragment(selector, RegExp.$1, context), selector = null // If there's a context, create a collection on that context first, and select // nodes from there else if (context !== undefined) return $(context).find(selector) // If it's a CSS selector, use it to select nodes. else dom = zepto.qsa(document, selector) } // If a function is given, call it when the DOM is ready else if (isFunction(selector)) return $(document).ready(selector) // If a Zepto collection is given, just return it else if (zepto.isZ(selector)) return selector else { // normalize array if an array of nodes is given if (isArray(selector)) dom = compact(selector) // Wrap DOM nodes. else if (isObject(selector)) dom = [selector], selector = null // If it's a html fragment, create nodes from it else if (fragmentRE.test(selector)) dom = zepto.fragment(selector.trim(), RegExp.$1, context), selector = null // If there's a context, create a collection on that context first, and select // nodes from there else if (context !== undefined) return $(context).find(selector) // And last but no least, if it's a CSS selector, use it to select nodes. else dom = zepto.qsa(document, selector) } // create a new Zepto collection from the nodes found return zepto.Z(dom, selector) } // `$` will be the base `Zepto` object. When calling this // function just call `$.zepto.init, which makes the implementation // details of selecting nodes and creating Zepto collections // patchable in plugins. $ = function(selector, context){ return zepto.init(selector, context) } function extend(target, source, deep) { for (key in source) if (deep && (isPlainObject(source[key]) || isArray(source[key]))) { if (isPlainObject(source[key]) && !isPlainObject(target[key])) target[key] = {} if (isArray(source[key]) && !isArray(target[key])) target[key] = [] extend(target[key], source[key], deep) } else if (source[key] !== undefined) target[key] = source[key] } // Copy all but undefined properties from one or more // objects to the `target` object. $.extend = function(target){ var deep, args = slice.call(arguments, 1) if (typeof target == 'boolean') { deep = target target = args.shift() } args.forEach(function(arg){ extend(target, arg, deep) }) return target } // `$.zepto.qsa` is Zepto's CSS selector implementation which // uses `document.querySelectorAll` and optimizes for some special cases, like `#id`. // This method can be overriden in plugins. zepto.qsa = function(element, selector){ var found, maybeID = selector[0] == '#', maybeClass = !maybeID && selector[0] == '.', nameOnly = maybeID || maybeClass ? selector.slice(1) : selector, // Ensure that a 1 char tag name still gets checked isSimple = simpleSelectorRE.test(nameOnly) return (element.getElementById && isSimple && maybeID) ? // Safari DocumentFragment doesn't have getElementById ( (found = element.getElementById(nameOnly)) ? [found] : [] ) : (element.nodeType !== 1 && element.nodeType !== 9 && element.nodeType !== 11) ? [] : slice.call( isSimple && !maybeID && element.getElementsByClassName ? // DocumentFragment doesn't have getElementsByClassName/TagName maybeClass ? element.getElementsByClassName(nameOnly) : // If it's simple, it could be a class element.getElementsByTagName(selector) : // Or a tag element.querySelectorAll(selector) // Or it's not simple, and we need to query all ) } function filtered(nodes, selector) { return selector == null ? $(nodes) : $(nodes).filter(selector) } $.contains = document.documentElement.contains ? function(parent, node) { return parent !== node && parent.contains(node) } : function(parent, node) { while (node && (node = node.parentNode)) if (node === parent) return true return false } function funcArg(context, arg, idx, payload) { return isFunction(arg) ? arg.call(context, idx, payload) : arg } function setAttribute(node, name, value) { value == null ? node.removeAttribute(name) : node.setAttribute(name, value) } // access className property while respecting SVGAnimatedString function className(node, value){ var klass = node.className || '', svg = klass && klass.baseVal !== undefined if (value === undefined) return svg ? klass.baseVal : klass svg ? (klass.baseVal = value) : (node.className = value) } // "true" => true // "false" => false // "null" => null // "42" => 42 // "42.5" => 42.5 // "08" => "08" // JSON => parse if valid // String => self function deserializeValue(value) { try { return value ? value == "true" || ( value == "false" ? false : value == "null" ? null : +value + "" == value ? +value : /^[\[\{]/.test(value) ? $.parseJSON(value) : value ) : value } catch(e) { return value } } $.type = type $.isFunction = isFunction $.isWindow = isWindow $.isArray = isArray $.isPlainObject = isPlainObject $.isEmptyObject = function(obj) { var name for (name in obj) return false return true } $.inArray = function(elem, array, i){ return emptyArray.indexOf.call(array, elem, i) } $.camelCase = camelize $.trim = function(str) { return str == null ? "" : String.prototype.trim.call(str) } // plugin compatibility $.uuid = 0 $.support = { } $.expr = { } $.noop = function() {} $.map = function(elements, callback){ var value, values = [], i, key if (likeArray(elements)) for (i = 0; i < elements.length; i++) { value = callback(elements[i], i) if (value != null) values.push(value) } else for (key in elements) { value = callback(elements[key], key) if (value != null) values.push(value) } return flatten(values) } $.each = function(elements, callback){ var i, key if (likeArray(elements)) { for (i = 0; i < elements.length; i++) if (callback.call(elements[i], i, elements[i]) === false) return elements } else { for (key in elements) if (callback.call(elements[key], key, elements[key]) === false) return elements } return elements } $.grep = function(elements, callback){ return filter.call(elements, callback) } if (window.JSON) $.parseJSON = JSON.parse // Populate the class2type map $.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { class2type[ "[object " + name + "]" ] = name.toLowerCase() }) // Define methods that will be available on all // Zepto collections $.fn = { constructor: zepto.Z, length: 0, // Because a collection acts like an array // copy over these useful array functions. forEach: emptyArray.forEach, reduce: emptyArray.reduce, push: emptyArray.push, sort: emptyArray.sort, splice: emptyArray.splice, indexOf: emptyArray.indexOf, concat: function(){ var i, value, args = [] for (i = 0; i < arguments.length; i++) { value = arguments[i] args[i] = zepto.isZ(value) ? value.toArray() : value } return concat.apply(zepto.isZ(this) ? this.toArray() : this, args) }, // `map` and `slice` in the jQuery API work differently // from their array counterparts map: function(fn){ return $($.map(this, function(el, i){ return fn.call(el, i, el) })) }, slice: function(){ return $(slice.apply(this, arguments)) }, ready: function(callback){ // need to check if document.body exists for IE as that browser reports // document ready when it hasn't yet created the body element if (readyRE.test(document.readyState) && document.body) callback($) else document.addEventListener('DOMContentLoaded', function(){ callback($) }, false) return this }, get: function(idx){ return idx === undefined ? slice.call(this) : this[idx >= 0 ? idx : idx + this.length] }, toArray: function(){ return this.get() }, size: function(){ return this.length }, remove: function(){ return this.each(function(){ if (this.parentNode != null) this.parentNode.removeChild(this) }) }, each: function(callback){ emptyArray.every.call(this, function(el, idx){ return callback.call(el, idx, el) !== false }) return this }, filter: function(selector){ if (isFunction(selector)) return this.not(this.not(selector)) return $(filter.call(this, function(element){ return zepto.matches(element, selector) })) }, add: function(selector,context){ return $(uniq(this.concat($(selector,context)))) }, is: function(selector){ return this.length > 0 && zepto.matches(this[0], selector) }, not: function(selector){ var nodes=[] if (isFunction(selector) && selector.call !== undefined) this.each(function(idx){ if (!selector.call(this,idx)) nodes.push(this) }) else { var excludes = typeof selector == 'string' ? this.filter(selector) : (likeArray(selector) && isFunction(selector.item)) ? slice.call(selector) : $(selector) this.forEach(function(el){ if (excludes.indexOf(el) < 0) nodes.push(el) }) } return $(nodes) }, has: function(selector){ return this.filter(function(){ return isObject(selector) ? $.contains(this, selector) : $(this).find(selector).size() }) }, eq: function(idx){ return idx === -1 ? this.slice(idx) : this.slice(idx, + idx + 1) }, first: function(){ var el = this[0] return el && !isObject(el) ? el : $(el) }, last: function(){ var el = this[this.length - 1] return el && !isObject(el) ? el : $(el) }, find: function(selector){ var result, $this = this if (!selector) result = $() else if (typeof selector == 'object') result = $(selector).filter(function(){ var node = this return emptyArray.some.call($this, function(parent){ return $.contains(parent, node) }) }) else if (this.length == 1) result = $(zepto.qsa(this[0], selector)) else result = this.map(function(){ return zepto.qsa(this, selector) }) return result }, closest: function(selector, context){ var node = this[0], collection = false if (typeof selector == 'object') collection = $(selector) while (node && !(collection ? collection.indexOf(node) >= 0 : zepto.matches(node, selector))) node = node !== context && !isDocument(node) && node.parentNode return $(node) }, parents: function(selector){ var ancestors = [], nodes = this while (nodes.length > 0) nodes = $.map(nodes, function(node){ if ((node = node.parentNode) && !isDocument(node) && ancestors.indexOf(node) < 0) { ancestors.push(node) return node } }) return filtered(ancestors, selector) }, parent: function(selector){ return filtered(uniq(this.pluck('parentNode')), selector) }, children: function(selector){ return filtered(this.map(function(){ return children(this) }), selector) }, contents: function() { return this.map(function() { return this.contentDocument || slice.call(this.childNodes) }) }, siblings: function(selector){ return filtered(this.map(function(i, el){ return filter.call(children(el.parentNode), function(child){ return child!==el }) }), selector) }, empty: function(){ return this.each(function(){ this.innerHTML = '' }) }, // `pluck` is borrowed from Prototype.js pluck: function(property){ return $.map(this, function(el){ return el[property] }) }, show: function(){ return this.each(function(){ this.style.display == "none" && (this.style.display = '') if (getComputedStyle(this, '').getPropertyValue("display") == "none") this.style.display = defaultDisplay(this.nodeName) }) }, replaceWith: function(newContent){ return this.before(newContent).remove() }, wrap: function(structure){ var func = isFunction(structure) if (this[0] && !func) var dom = $(structure).get(0), clone = dom.parentNode || this.length > 1 return this.each(function(index){ $(this).wrapAll( func ? structure.call(this, index) : clone ? dom.cloneNode(true) : dom ) }) }, wrapAll: function(structure){ if (this[0]) { $(this[0]).before(structure = $(structure)) var children // drill down to the inmost element while ((children = structure.children()).length) structure = children.first() $(structure).append(this) } return this }, wrapInner: function(structure){ var func = isFunction(structure) return this.each(function(index){ var self = $(this), contents = self.contents(), dom = func ? structure.call(this, index) : structure contents.length ? contents.wrapAll(dom) : self.append(dom) }) }, unwrap: function(){ this.parent().each(function(){ $(this).replaceWith($(this).children()) }) return this }, clone: function(){ return this.map(function(){ return this.cloneNode(true) }) }, hide: function(){ return this.css("display", "none") }, toggle: function(setting){ return this.each(function(){ var el = $(this) ;(setting === undefined ? el.css("display") == "none" : setting) ? el.show() : el.hide() }) }, prev: function(selector){ return $(this.pluck('previousElementSibling')).filter(selector || '*') }, next: function(selector){ return $(this.pluck('nextElementSibling')).filter(selector || '*') }, html: function(html){ return 0 in arguments ? this.each(function(idx){ var originHtml = this.innerHTML $(this).empty().append( funcArg(this, html, idx, originHtml) ) }) : (0 in this ? this[0].innerHTML : null) }, text: function(text){ return 0 in arguments ? this.each(function(idx){ var newText = funcArg(this, text, idx, this.textContent) this.textContent = newText == null ? '' : ''+newText }) : (0 in this ? this[0].textContent : null) }, attr: function(name, value){ var result return (typeof name == 'string' && !(1 in arguments)) ? (!this.length || this[0].nodeType !== 1 ? undefined : (!(result = this[0].getAttribute(name)) && name in this[0]) ? this[0][name] : result ) : this.each(function(idx){ if (this.nodeType !== 1) return if (isObject(name)) for (key in name) setAttribute(this, key, name[key]) else setAttribute(this, name, funcArg(this, value, idx, this.getAttribute(name))) }) }, removeAttr: function(name){ return this.each(function(){ this.nodeType === 1 && name.split(' ').forEach(function(attribute){ setAttribute(this, attribute) }, this)}) }, prop: function(name, value){ name = propMap[name] || name return (1 in arguments) ? this.each(function(idx){ this[name] = funcArg(this, value, idx, this[name]) }) : (this[0] && this[0][name]) }, data: function(name, value){ var attrName = 'data-' + name.replace(capitalRE, '-$1').toLowerCase() var data = (1 in arguments) ? this.attr(attrName, value) : this.attr(attrName) return data !== null ? deserializeValue(data) : undefined }, val: function(value){ return 0 in arguments ? this.each(function(idx){ this.value = funcArg(this, value, idx, this.value) }) : (this[0] && (this[0].multiple ? $(this[0]).find('option').filter(function(){ return this.selected }).pluck('value') : this[0].value) ) }, offset: function(coordinates){ if (coordinates) return this.each(function(index){ var $this = $(this), coords = funcArg(this, coordinates, index, $this.offset()), parentOffset = $this.offsetParent().offset(), props = { top: coords.top - parentOffset.top, left: coords.left - parentOffset.left } if ($this.css('position') == 'static') props['position'] = 'relative' $this.css(props) }) if (!this.length) return null if (!$.contains(document.documentElement, this[0])) return {top: 0, left: 0} var obj = this[0].getBoundingClientRect() return { left: obj.left + window.pageXOffset, top: obj.top + window.pageYOffset, width: Math.round(obj.width), height: Math.round(obj.height) } }, css: function(property, value){ if (arguments.length < 2) { var computedStyle, element = this[0] if(!element) return computedStyle = getComputedStyle(element, '') if (typeof property == 'string') return element.style[camelize(property)] || computedStyle.getPropertyValue(property) else if (isArray(property)) { var props = {} $.each(property, function(_, prop){ props[prop] = (element.style[camelize(prop)] || computedStyle.getPropertyValue(prop)) }) return props } } var css = '' if (type(property) == 'string') { if (!value && value !== 0) this.each(function(){ this.style.removeProperty(dasherize(property)) }) else css = dasherize(property) + ":" + maybeAddPx(property, value) } else { for (key in property) if (!property[key] && property[key] !== 0) this.each(function(){ this.style.removeProperty(dasherize(key)) }) else css += dasherize(key) + ':' + maybeAddPx(key, property[key]) + ';' } return this.each(function(){ this.style.cssText += ';' + css }) }, index: function(element){ return element ? this.indexOf($(element)[0]) : this.parent().children().indexOf(this[0]) }, hasClass: function(name){ if (!name) return false return emptyArray.some.call(this, function(el){ return this.test(className(el)) }, classRE(name)) }, addClass: function(name){ if (!name) return this return this.each(function(idx){ if (!('className' in this)) return classList = [] var cls = className(this), newName = funcArg(this, name, idx, cls) newName.split(/\s+/g).forEach(function(klass){ if (!$(this).hasClass(klass)) classList.push(klass) }, this) classList.length && className(this, cls + (cls ? " " : "") + classList.join(" ")) }) }, removeClass: function(name){ return this.each(function(idx){ if (!('className' in this)) return if (name === undefined) return className(this, '') classList = className(this) funcArg(this, name, idx, classList).split(/\s+/g).forEach(function(klass){ classList = classList.replace(classRE(klass), " ") }) className(this, classList.trim()) }) }, toggleClass: function(name, when){ if (!name) return this return this.each(function(idx){ var $this = $(this), names = funcArg(this, name, idx, className(this)) names.split(/\s+/g).forEach(function(klass){ (when === undefined ? !$this.hasClass(klass) : when) ? $this.addClass(klass) : $this.removeClass(klass) }) }) }, scrollTop: function(value){ if (!this.length) return var hasScrollTop = 'scrollTop' in this[0] if (value === undefined) return hasScrollTop ? this[0].scrollTop : this[0].pageYOffset return this.each(hasScrollTop ? function(){ this.scrollTop = value } : function(){ this.scrollTo(this.scrollX, value) }) }, scrollLeft: function(value){ if (!this.length) return var hasScrollLeft = 'scrollLeft' in this[0] if (value === undefined) return hasScrollLeft ? this[0].scrollLeft : this[0].pageXOffset return this.each(hasScrollLeft ? function(){ this.scrollLeft = value } : function(){ this.scrollTo(value, this.scrollY) }) }, position: function() { if (!this.length) return var elem = this[0], // Get *real* offsetParent offsetParent = this.offsetParent(), // Get correct offsets offset = this.offset(), parentOffset = rootNodeRE.test(offsetParent[0].nodeName) ? { top: 0, left: 0 } : offsetParent.offset() // Subtract element margins // note: when an element has margin: auto the offsetLeft and marginLeft // are the same in Safari causing offset.left to incorrectly be 0 offset.top -= parseFloat( $(elem).css('margin-top') ) || 0 offset.left -= parseFloat( $(elem).css('margin-left') ) || 0 // Add offsetParent borders parentOffset.top += parseFloat( $(offsetParent[0]).css('border-top-width') ) || 0 parentOffset.left += parseFloat( $(offsetParent[0]).css('border-left-width') ) || 0 // Subtract the two offsets return { top: offset.top - parentOffset.top, left: offset.left - parentOffset.left } }, offsetParent: function() { return this.map(function(){ var parent = this.offsetParent || document.body while (parent && !rootNodeRE.test(parent.nodeName) && $(parent).css("position") == "static") parent = parent.offsetParent return parent }) } } // for now $.fn.detach = $.fn.remove // Generate the `width` and `height` functions ;['width', 'height'].forEach(function(dimension){ var dimensionProperty = dimension.replace(/./, function(m){ return m[0].toUpperCase() }) $.fn[dimension] = function(value){ var offset, el = this[0] if (value === undefined) return isWindow(el) ? el['inner' + dimensionProperty] : isDocument(el) ? el.documentElement['scroll' + dimensionProperty] : (offset = this.offset()) && offset[dimension] else return this.each(function(idx){ el = $(this) el.css(dimension, funcArg(this, value, idx, el[dimension]())) }) } }) function traverseNode(node, fun) { fun(node) for (var i = 0, len = node.childNodes.length; i < len; i++) traverseNode(node.childNodes[i], fun) } // Generate the `after`, `prepend`, `before`, `append`, // `insertAfter`, `insertBefore`, `appendTo`, and `prependTo` methods. adjacencyOperators.forEach(function(operator, operatorIndex) { var inside = operatorIndex % 2 //=> prepend, append $.fn[operator] = function(){ // arguments can be nodes, arrays of nodes, Zepto objects and HTML strings var argType, nodes = $.map(arguments, function(arg) { argType = type(arg) return argType == "object" || argType == "array" || arg == null ? arg : zepto.fragment(arg) }), parent, copyByClone = this.length > 1 if (nodes.length < 1) return this return this.each(function(_, target){ parent = inside ? target : target.parentNode // convert all methods to a "before" operation target = operatorIndex == 0 ? target.nextSibling : operatorIndex == 1 ? target.firstChild : operatorIndex == 2 ? target : null var parentInDocument = $.contains(document.documentElement, parent) nodes.forEach(function(node){ if (copyByClone) node = node.cloneNode(true) else if (!parent) return $(node).remove() parent.insertBefore(node, target) if (parentInDocument) traverseNode(node, function(el){ if (el.nodeName != null && el.nodeName.toUpperCase() === 'SCRIPT' && (!el.type || el.type === 'text/javascript') && !el.src) window['eval'].call(window, el.innerHTML) }) }) }) } // after => insertAfter // prepend => prependTo // before => insertBefore // append => appendTo $.fn[inside ? operator+'To' : 'insert'+(operatorIndex ? 'Before' : 'After')] = function(html){ $(html)[operator](this) return this } }) zepto.Z.prototype = Z.prototype = $.fn // Export internal API functions in the `$.zepto` namespace zepto.uniq = uniq zepto.deserializeValue = deserializeValue $.zepto = zepto return $ })() // If `$` is not yet defined, point it to `Zepto` window.Zepto = Zepto window.$ === undefined && (window.$ = Zepto) // Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. ;(function($){ var jsonpID = 0, document = window.document, key, name, rscript = /<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi, scriptTypeRE = /^(?:text|application)\/javascript/i, xmlTypeRE = /^(?:text|application)\/xml/i, jsonType = 'application/json', htmlType = 'text/html', blankRE = /^\s*$/, originAnchor = document.createElement('a') originAnchor.href = window.location.href // trigger a custom event and return false if it was cancelled function triggerAndReturn(context, eventName, data) { var event = $.Event(eventName) $(context).trigger(event, data) return !event.isDefaultPrevented() } // trigger an Ajax "global" event function triggerGlobal(settings, context, eventName, data) { if (settings.global) return triggerAndReturn(context || document, eventName, data) } // Number of active Ajax requests $.active = 0 function ajaxStart(settings) { if (settings.global && $.active++ === 0) triggerGlobal(settings, null, 'ajaxStart') } function ajaxStop(settings) { if (settings.global && !(--$.active)) triggerGlobal(settings, null, 'ajaxStop') } // triggers an extra global event "ajaxBeforeSend" that's like "ajaxSend" but cancelable function ajaxBeforeSend(xhr, settings) { var context = settings.context if (settings.beforeSend.call(context, xhr, settings) === false || triggerGlobal(settings, context, 'ajaxBeforeSend', [xhr, settings]) === false) return false triggerGlobal(settings, context, 'ajaxSend', [xhr, settings]) } function ajaxSuccess(data, xhr, settings, deferred) { var context = settings.context, status = 'success' settings.success.call(context, data, status, xhr) if (deferred) deferred.resolveWith(context, [data, status, xhr]) triggerGlobal(settings, context, 'ajaxSuccess', [xhr, settings, data]) ajaxComplete(status, xhr, settings) } // type: "timeout", "error", "abort", "parsererror" function ajaxError(error, type, xhr, settings, deferred) { var context = settings.context settings.error.call(context, xhr, type, error) if (deferred) deferred.rejectWith(context, [xhr, type, error]) triggerGlobal(settings, context, 'ajaxError', [xhr, settings, error || type]) ajaxComplete(type, xhr, settings) } // status: "success", "notmodified", "error", "timeout", "abort", "parsererror" function ajaxComplete(status, xhr, settings) { var context = settings.context settings.complete.call(context, xhr, status) triggerGlobal(settings, context, 'ajaxComplete', [xhr, settings]) ajaxStop(settings) } // Empty function, used as default callback function empty() {} $.ajaxJSONP = function(options, deferred){ if (!('type' in options)) return $.ajax(options) var _callbackName = options.jsonpCallback, callbackName = ($.isFunction(_callbackName) ? _callbackName() : _callbackName) || ('jsonp' + (++jsonpID)), script = document.createElement('script'), originalCallback = window[callbackName], responseData, abort = function(errorType) { $(script).triggerHandler('error', errorType || 'abort') }, xhr = { abort: abort }, abortTimeout if (deferred) deferred.promise(xhr) $(script).on('load error', function(e, errorType){ clearTimeout(abortTimeout) $(script).off().remove() if (e.type == 'error' || !responseData) { ajaxError(null, errorType || 'error', xhr, options, deferred) } else { ajaxSuccess(responseData[0], xhr, options, deferred) } window[callbackName] = originalCallback if (responseData && $.isFunction(originalCallback)) originalCallback(responseData[0]) originalCallback = responseData = undefined }) if (ajaxBeforeSend(xhr, options) === false) { abort('abort') return xhr } window[callbackName] = function(){ responseData = arguments } script.src = options.url.replace(/\?(.+)=\?/, '?$1=' + callbackName) document.head.appendChild(script) if (options.timeout > 0) abortTimeout = setTimeout(function(){ abort('timeout') }, options.timeout) return xhr } $.ajaxSettings = { // Default type of request type: 'GET', // Callback that is executed before request beforeSend: empty, // Callback that is executed if the request succeeds success: empty, // Callback that is executed the the server drops error error: empty, // Callback that is executed on request complete (both: error and success) complete: empty, // The context for the callbacks context: null, // Whether to trigger "global" Ajax events global: true, // Transport xhr: function () { return new window.XMLHttpRequest() }, // MIME types mapping // IIS returns Javascript as "application/x-javascript" accepts: { script: 'text/javascript, application/javascript, application/x-javascript', json: jsonType, xml: 'application/xml, text/xml', html: htmlType, text: 'text/plain' }, // Whether the request is to another domain crossDomain: false, // Default timeout timeout: 0, // Whether data should be serialized to string processData: true, // Whether the browser should be allowed to cache GET responses cache: true } function mimeToDataType(mime) { if (mime) mime = mime.split(';', 2)[0] return mime && ( mime == htmlType ? 'html' : mime == jsonType ? 'json' : scriptTypeRE.test(mime) ? 'script' : xmlTypeRE.test(mime) && 'xml' ) || 'text' } function appendQuery(url, query) { if (query == '') return url return (url + '&' + query).replace(/[&?]{1,2}/, '?') } // serialize payload and append it to the URL for GET requests function serializeData(options) { if (options.processData && options.data && $.type(options.data) != "string") options.data = $.param(options.data, options.traditional) if (options.data && (!options.type || options.type.toUpperCase() == 'GET')) options.url = appendQuery(options.url, options.data), options.data = undefined } $.ajax = function(options){ var settings = $.extend({}, options || {}), deferred = $.Deferred && $.Deferred(), urlAnchor, hashIndex for (key in $.ajaxSettings) if (settings[key] === undefined) settings[key] = $.ajaxSettings[key] ajaxStart(settings) if (!settings.crossDomain) { urlAnchor = document.createElement('a') urlAnchor.href = settings.url urlAnchor.href = urlAnchor.href settings.crossDomain = (originAnchor.protocol + '//' + originAnchor.host) !== (urlAnchor.protocol + '//' + urlAnchor.host) } if (!settings.url) settings.url = window.location.toString() if ((hashIndex = settings.url.indexOf('#')) > -1) settings.url = settings.url.slice(0, hashIndex) serializeData(settings) var dataType = settings.dataType, hasPlaceholder = /\?.+=\?/.test(settings.url) if (hasPlaceholder) dataType = 'jsonp' if (settings.cache === false || ( (!options || options.cache !== true) && ('script' == dataType || 'jsonp' == dataType) )) settings.url = appendQuery(settings.url, '_=' + Date.now()) if ('jsonp' == dataType) { if (!hasPlaceholder) settings.url = appendQuery(settings.url, settings.jsonp ? (settings.jsonp + '=?') : settings.jsonp === false ? '' : 'callback=?') return $.ajaxJSONP(settings, deferred) } var mime = settings.accepts[dataType], headers = { }, setHeader = function(name, value) { headers[name.toLowerCase()] = [name, value] }, protocol = /^([\w-]+:)\/\//.test(settings.url) ? RegExp.$1 : window.location.protocol, xhr = settings.xhr(), nativeSetHeader = xhr.setRequestHeader, abortTimeout if (deferred) deferred.promise(xhr) if (!settings.crossDomain) setHeader('X-Requested-With', 'XMLHttpRequest') setHeader('Accept', mime || '*/*') if (mime = settings.mimeType || mime) { if (mime.indexOf(',') > -1) mime = mime.split(',', 2)[0] xhr.overrideMimeType && xhr.overrideMimeType(mime) } if (settings.contentType || (settings.contentType !== false && settings.data && settings.type.toUpperCase() != 'GET')) setHeader('Content-Type', settings.contentType || 'application/x-www-form-urlencoded') if (settings.headers) for (name in settings.headers) setHeader(name, settings.headers[name]) xhr.setRequestHeader = setHeader xhr.onreadystatechange = function(){ if (xhr.readyState == 4) { xhr.onreadystatechange = empty clearTimeout(abortTimeout) var result, error = false if ((xhr.status >= 200 && xhr.status < 300) || xhr.status == 304 || (xhr.status == 0 && protocol == 'file:')) { dataType = dataType || mimeToDataType(settings.mimeType || xhr.getResponseHeader('content-type')) result = xhr.responseText try { // http://perfectionkills.com/global-eval-what-are-the-options/ if (dataType == 'script') (1,eval)(result) else if (dataType == 'xml') result = xhr.responseXML else if (dataType == 'json') result = blankRE.test(result) ? null : $.parseJSON(result) } catch (e) { error = e } if (error) ajaxError(error, 'parsererror', xhr, settings, deferred) else ajaxSuccess(result, xhr, settings, deferred) } else { ajaxError(xhr.statusText || null, xhr.status ? 'error' : 'abort', xhr, settings, deferred) } } } if (ajaxBeforeSend(xhr, settings) === false) { xhr.abort() ajaxError(null, 'abort', xhr, settings, deferred) return xhr } if (settings.xhrFields) for (name in settings.xhrFields) xhr[name] = settings.xhrFields[name] var async = 'async' in settings ? settings.async : true xhr.open(settings.type, settings.url, async, settings.username, settings.password) for (name in headers) nativeSetHeader.apply(xhr, headers[name]) if (settings.timeout > 0) abortTimeout = setTimeout(function(){ xhr.onreadystatechange = empty xhr.abort() ajaxError(null, 'timeout', xhr, settings, deferred) }, settings.timeout) // avoid sending empty string (#319) xhr.send(settings.data ? settings.data : null) return xhr } // handle optional data/success arguments function parseArguments(url, data, success, dataType) { if ($.isFunction(data)) dataType = success, success = data, data = undefined if (!$.isFunction(success)) dataType = success, success = undefined return { url: url , data: data , success: success , dataType: dataType } } $.get = function(/* url, data, success, dataType */){ return $.ajax(parseArguments.apply(null, arguments)) } $.post = function(/* url, data, success, dataType */){ var options = parseArguments.apply(null, arguments) options.type = 'POST' return $.ajax(options) } $.getJSON = function(/* url, data, success */){ var options = parseArguments.apply(null, arguments) options.dataType = 'json' return $.ajax(options) } $.fn.load = function(url, data, success){ if (!this.length) return this var self = this, parts = url.split(/\s/), selector, options = parseArguments(url, data, success), callback = options.success if (parts.length > 1) options.url = parts[0], selector = parts[1] options.success = function(response){ self.html(selector ? $('<div>').html(response.replace(rscript, "")).find(selector) : response) callback && callback.apply(self, arguments) } $.ajax(options) return this } var escape = encodeURIComponent function serialize(params, obj, traditional, scope){ var type, array = $.isArray(obj), hash = $.isPlainObject(obj) $.each(obj, function(key, value) { type = $.type(value) if (scope) key = traditional ? scope : scope + '[' + (hash || type == 'object' || type == 'array' ? key : '') + ']' // handle data in serializeArray() format if (!scope && array) params.add(value.name, value.value) // recurse into nested objects else if (type == "array" || (!traditional && type == "object")) serialize(params, value, traditional, key) else params.add(key, value) }) } $.param = function(obj, traditional){ var params = [] params.add = function(key, value) { if ($.isFunction(value)) value = value() if (value == null) value = "" this.push(escape(key) + '=' + escape(value)) } serialize(params, obj, traditional) return params.join('&').replace(/%20/g, '+') } })(Zepto) // Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. ;(function($){ var _zid = 1, undefined, slice = Array.prototype.slice, isFunction = $.isFunction, isString = function(obj){ return typeof obj == 'string' }, handlers = {}, specialEvents={}, focusinSupported = 'onfocusin' in window, focus = { focus: 'focusin', blur: 'focusout' }, hover = { mouseenter: 'mouseover', mouseleave: 'mouseout' } specialEvents.click = specialEvents.mousedown = specialEvents.mouseup = specialEvents.mousemove = 'MouseEvents' function zid(element) { return element._zid || (element._zid = _zid++) } function findHandlers(element, event, fn, selector) { event = parse(event) if (event.ns) var matcher = matcherFor(event.ns) return (handlers[zid(element)] || []).filter(function(handler) { return handler && (!event.e || handler.e == event.e) && (!event.ns || matcher.test(handler.ns)) && (!fn || zid(handler.fn) === zid(fn)) && (!selector || handler.sel == selector) }) } function parse(event) { var parts = ('' + event).split('.') return {e: parts[0], ns: parts.slice(1).sort().join(' ')} } function matcherFor(ns) { return new RegExp('(?:^| )' + ns.replace(' ', ' .* ?') + '(?: |$)') } function eventCapture(handler, captureSetting) { return handler.del && (!focusinSupported && (handler.e in focus)) || !!captureSetting } function realEvent(type) { return hover[type] || (focusinSupported && focus[type]) || type } function add(element, events, fn, data, selector, delegator, capture){ var id = zid(element), set = (handlers[id] || (handlers[id] = [])) events.split(/\s/).forEach(function(event){ if (event == 'ready') return $(document).ready(fn) var handler = parse(event) handler.fn = fn handler.sel = selector // emulate mouseenter, mouseleave if (handler.e in hover) fn = function(e){ var related = e.relatedTarget if (!related || (related !== this && !$.contains(this, related))) return handler.fn.apply(this, arguments) } handler.del = delegator var callback = delegator || fn handler.proxy = function(e){ e = compatible(e) if (e.isImmediatePropagationStopped()) return e.data = data var result = callback.apply(element, e._args == undefined ? [e] : [e].concat(e._args)) if (result === false) e.preventDefault(), e.stopPropagation() return result } handler.i = set.length set.push(handler) if ('addEventListener' in element) element.addEventListener(realEvent(handler.e), handler.proxy, eventCapture(handler, capture)) }) } function remove(element, events, fn, selector, capture){ var id = zid(element) ;(events || '').split(/\s/).forEach(function(event){ findHandlers(element, event, fn, selector).forEach(function(handler){ delete handlers[id][handler.i] if ('removeEventListener' in element) element.removeEventListener(realEvent(handler.e), handler.proxy, eventCapture(handler, capture)) }) }) } $.event = { add: add, remove: remove } $.proxy = function(fn, context) { var args = (2 in arguments) && slice.call(arguments, 2) if (isFunction(fn)) { var proxyFn = function(){ return fn.apply(context, args ? args.concat(slice.call(arguments)) : arguments) } proxyFn._zid = zid(fn) return proxyFn } else if (isString(context)) { if (args) { args.unshift(fn[context], fn) return $.proxy.apply(null, args) } else { return $.proxy(fn[context], fn) } } else { throw new TypeError("expected function") } } $.fn.bind = function(event, data, callback){ return this.on(event, data, callback) } $.fn.unbind = function(event, callback){ return this.off(event, callback) } $.fn.one = function(event, selector, data, callback){ return this.on(event, selector, data, callback, 1) } var returnTrue = function(){return true}, returnFalse = function(){return false}, ignoreProperties = /^([A-Z]|returnValue$|layer[XY]$)/, eventMethods = { preventDefault: 'isDefaultPrevented', stopImmediatePropagation: 'isImmediatePropagationStopped', stopPropagation: 'isPropagationStopped' } function compatible(event, source) { if (source || !event.isDefaultPrevented) { source || (source = event) $.each(eventMethods, function(name, predicate) { var sourceMethod = source[name] event[name] = function(){ this[predicate] = returnTrue return sourceMethod && sourceMethod.apply(source, arguments) } event[predicate] = returnFalse }) if (source.defaultPrevented !== undefined ? source.defaultPrevented : 'returnValue' in source ? source.returnValue === false : source.getPreventDefault && source.getPreventDefault()) event.isDefaultPrevented = returnTrue } return event } function createProxy(event) { var key, proxy = { originalEvent: event } for (key in event) if (!ignoreProperties.test(key) && event[key] !== undefined) proxy[key] = event[key] return compatible(proxy, event) } $.fn.delegate = function(selector, event, callback){ return this.on(event, selector, callback) } $.fn.undelegate = function(selector, event, callback){ return this.off(event, selector, callback) } $.fn.live = function(event, callback){ $(document.body).delegate(this.selector, event, callback) return this } $.fn.die = function(event, callback){ $(document.body).undelegate(this.selector, event, callback) return this } $.fn.on = function(event, selector, data, callback, one){ var autoRemove, delegator, $this = this if (event && !isString(event)) { $.each(event, function(type, fn){ $this.on(type, selector, data, fn, one) }) return $this } if (!isString(selector) && !isFunction(callback) && callback !== false) callback = data, data = selector, selector = undefined if (callback === undefined || data === false) callback = data, data = undefined if (callback === false) callback = returnFalse return $this.each(function(_, element){ if (one) autoRemove = function(e){ remove(element, e.type, callback) return callback.apply(this, arguments) } if (selector) delegator = function(e){ var evt, match = $(e.target).closest(selector, element).get(0) if (match && match !== element) { evt = $.extend(createProxy(e), {currentTarget: match, liveFired: element}) return (autoRemove || callback).apply(match, [evt].concat(slice.call(arguments, 1))) } } add(element, event, callback, data, selector, delegator || autoRemove) }) } $.fn.off = function(event, selector, callback){ var $this = this if (event && !isString(event)) { $.each(event, function(type, fn){ $this.off(type, selector, fn) }) return $this } if (!isString(selector) && !isFunction(callback) && callback !== false) callback = selector, selector = undefined if (callback === false) callback = returnFalse return $this.each(function(){ remove(this, event, callback, selector) }) } $.fn.trigger = function(event, args){ event = (isString(event) || $.isPlainObject(event)) ? $.Event(event) : compatible(event) event._args = args return this.each(function(){ // handle focus(), blur() by calling them directly if (event.type in focus && typeof this[event.type] == "function") this[event.type]() // items in the collection might not be DOM elements else if ('dispatchEvent' in this) this.dispatchEvent(event) else $(this).triggerHandler(event, args) }) } // triggers event handlers on current element just as if an event occurred, // doesn't trigger an actual event, doesn't bubble $.fn.triggerHandler = function(event, args){ var e, result this.each(function(i, element){ e = createProxy(isString(event) ? $.Event(event) : event) e._args = args e.target = element $.each(findHandlers(element, event.type || event), function(i, handler){ result = handler.proxy(e) if (e.isImmediatePropagationStopped()) return false }) }) return result } // shortcut methods for `.bind(event, fn)` for each event type ;('focusin focusout focus blur load resize scroll unload click dblclick '+ 'mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave '+ 'change select keydown keypress keyup error').split(' ').forEach(function(event) { $.fn[event] = function(callback) { return (0 in arguments) ? this.bind(event, callback) : this.trigger(event) } }) $.Event = function(type, props) { if (!isString(type)) props = type, type = props.type var event = document.createEvent(specialEvents[type] || 'Events'), bubbles = true if (props) for (var name in props) (name == 'bubbles') ? (bubbles = !!props[name]) : (event[name] = props[name]) event.initEvent(type, bubbles, true) return compatible(event) } })(Zepto) // Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. ;(function($){ $.fn.serializeArray = function() { var name, type, result = [], add = function(value) { if (value.forEach) return value.forEach(add) result.push({ name: name, value: value }) } if (this[0]) $.each(this[0].elements, function(_, field){ type = field.type, name = field.name if (name && field.nodeName.toLowerCase() != 'fieldset' && !field.disabled && type != 'submit' && type != 'reset' && type != 'button' && type != 'file' && ((type != 'radio' && type != 'checkbox') || field.checked)) add($(field).val()) }) return result } $.fn.serialize = function(){ var result = [] this.serializeArray().forEach(function(elm){ result.push(encodeURIComponent(elm.name) + '=' + encodeURIComponent(elm.value)) }) return result.join('&') } $.fn.submit = function(callback) { if (0 in arguments) this.bind('submit', callback) else if (this.length) { var event = $.Event('submit') this.eq(0).trigger(event) if (!event.isDefaultPrevented()) this.get(0).submit() } return this } })(Zepto) // Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. ;(function($, undefined){ var prefix = '', eventPrefix, vendors = { Webkit: 'webkit', Moz: '', O: 'o' }, testEl = document.createElement('div'), supportedTransforms = /^((translate|rotate|scale)(X|Y|Z|3d)?|matrix(3d)?|perspective|skew(X|Y)?)$/i, transform, transitionProperty, transitionDuration, transitionTiming, transitionDelay, animationName, animationDuration, animationTiming, animationDelay, cssReset = {} function dasherize(str) { return str.replace(/([a-z])([A-Z])/, '$1-$2').toLowerCase() } function normalizeEvent(name) { return eventPrefix ? eventPrefix + name : name.toLowerCase() } $.each(vendors, function(vendor, event){ if (testEl.style[vendor + 'TransitionProperty'] !== undefined) { prefix = '-' + vendor.toLowerCase() + '-' eventPrefix = event return false } }) transform = prefix + 'transform' cssReset[transitionProperty = prefix + 'transition-property'] = cssReset[transitionDuration = prefix + 'transition-duration'] = cssReset[transitionDelay = prefix + 'transition-delay'] = cssReset[transitionTiming = prefix + 'transition-timing-function'] = cssReset[animationName = prefix + 'animation-name'] = cssReset[animationDuration = prefix + 'animation-duration'] = cssReset[animationDelay = prefix + 'animation-delay'] = cssReset[animationTiming = prefix + 'animation-timing-function'] = '' $.fx = { off: (eventPrefix === undefined && testEl.style.transitionProperty === undefined), speeds: { _default: 400, fast: 200, slow: 600 }, cssPrefix: prefix, transitionEnd: normalizeEvent('TransitionEnd'), animationEnd: normalizeEvent('AnimationEnd') } $.fn.animate = function(properties, duration, ease, callback, delay){ if ($.isFunction(duration)) callback = duration, ease = undefined, duration = undefined if ($.isFunction(ease)) callback = ease, ease = undefined if ($.isPlainObject(duration)) ease = duration.easing, callback = duration.complete, delay = duration.delay, duration = duration.duration if (duration) duration = (typeof duration == 'number' ? duration : ($.fx.speeds[duration] || $.fx.speeds._default)) / 1000 if (delay) delay = parseFloat(delay) / 1000 return this.anim(properties, duration, ease, callback, delay) } $.fn.anim = function(properties, duration, ease, callback, delay){ var key, cssValues = {}, cssProperties, transforms = '', that = this, wrappedCallback, endEvent = $.fx.transitionEnd, fired = false if (duration === undefined) duration = $.fx.speeds._default / 1000 if (delay === undefined) delay = 0 if ($.fx.off) duration = 0 if (typeof properties == 'string') { // keyframe animation cssValues[animationName] = properties cssValues[animationDuration] = duration + 's' cssValues[animationDelay] = delay + 's' cssValues[animationTiming] = (ease || 'linear') endEvent = $.fx.animationEnd } else { cssProperties = [] // CSS transitions for (key in properties) if (supportedTransforms.test(key)) transforms += key + '(' + properties[key] + ') ' else cssValues[key] = properties[key], cssProperties.push(dasherize(key)) if (transforms) cssValues[transform] = transforms, cssProperties.push(transform) if (duration > 0 && typeof properties === 'object') { cssValues[transitionProperty] = cssProperties.join(', ') cssValues[transitionDuration] = duration + 's' cssValues[transitionDelay] = delay + 's' cssValues[transitionTiming] = (ease || 'linear') } } wrappedCallback = function(event){ if (typeof event !== 'undefined') { if (event.target !== event.currentTarget) return // makes sure the event didn't bubble from "below" $(event.target).unbind(endEvent, wrappedCallback) } else $(this).unbind(endEvent, wrappedCallback) // triggered by setTimeout fired = true $(this).css(cssReset) callback && callback.call(this) } if (duration > 0){ this.bind(endEvent, wrappedCallback) // transitionEnd is not always firing on older Android phones // so make sure it gets fired setTimeout(function(){ if (fired) return wrappedCallback.call(that) }, ((duration + delay) * 1000) + 25) } // trigger page reflow so new elements can animate this.size() && this.get(0).clientLeft this.css(cssValues) if (duration <= 0) setTimeout(function() { that.each(function(){ wrappedCallback.call(this) }) }, 0) return this } testEl = null })(Zepto) // Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. ;(function($, undefined){ var document = window.document, docElem = document.documentElement, origShow = $.fn.show, origHide = $.fn.hide, origToggle = $.fn.toggle function anim(el, speed, opacity, scale, callback) { if (typeof speed == 'function' && !callback) callback = speed, speed = undefined var props = { opacity: opacity } if (scale) { props.scale = scale el.css($.fx.cssPrefix + 'transform-origin', '0 0') } return el.animate(props, speed, null, callback) } function hide(el, speed, scale, callback) { return anim(el, speed, 0, scale, function(){ origHide.call($(this)) callback && callback.call(this) }) } $.fn.show = function(speed, callback) { origShow.call(this) if (speed === undefined) speed = 0 else this.css('opacity', 0) return anim(this, speed, 1, '1,1', callback) } $.fn.hide = function(speed, callback) { if (speed === undefined) return origHide.call(this) else return hide(this, speed, '0,0', callback) } $.fn.toggle = function(speed, callback) { if (speed === undefined || typeof speed == 'boolean') return origToggle.call(this, speed) else return this.each(function(){ var el = $(this) el[el.css('display') == 'none' ? 'show' : 'hide'](speed, callback) }) } $.fn.fadeTo = function(speed, opacity, callback) { return anim(this, speed, opacity, null, callback) } $.fn.fadeIn = function(speed, callback) { var target = this.css('opacity') if (target > 0) this.css('opacity', 0) else target = 1 return origShow.call(this).fadeTo(speed, target, callback) } $.fn.fadeOut = function(speed, callback) { return hide(this, speed, null, callback) } $.fn.fadeToggle = function(speed, callback) { return this.each(function(){ var el = $(this) el[ (el.css('opacity') == 0 || el.css('display') == 'none') ? 'fadeIn' : 'fadeOut' ](speed, callback) }) } })(Zepto) // Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. ;(function(){ // getComputedStyle shouldn't freak out when called // without a valid element as argument try { getComputedStyle(undefined) } catch(e) { var nativeGetComputedStyle = getComputedStyle; window.getComputedStyle = function(element){ try { return nativeGetComputedStyle(element) } catch(e) { return null } } } })() // Zepto.js // (c) 2010-2015 Thomas Fuchs // Zepto.js may be freely distributed under the MIT license. ;(function($){ var touch = {}, touchTimeout, tapTimeout, swipeTimeout, longTapTimeout, longTapDelay = 750, gesture function swipeDirection(x1, x2, y1, y2) { return Math.abs(x1 - x2) >= Math.abs(y1 - y2) ? (x1 - x2 > 0 ? 'Left' : 'Right') : (y1 - y2 > 0 ? 'Up' : 'Down') } function longTap() { longTapTimeout = null if (touch.last) { touch.el.trigger('longTap') touch = {} } } function cancelLongTap() { if (longTapTimeout) clearTimeout(longTapTimeout) longTapTimeout = null } function cancelAll() { if (touchTimeout) clearTimeout(touchTimeout) if (tapTimeout) clearTimeout(tapTimeout) if (swipeTimeout) clearTimeout(swipeTimeout) if (longTapTimeout) clearTimeout(longTapTimeout) touchTimeout = tapTimeout = swipeTimeout = longTapTimeout = null touch = {} } function isPrimaryTouch(event){ return (event.pointerType == 'touch' || event.pointerType == event.MSPOINTER_TYPE_TOUCH) && event.isPrimary } function isPointerEventType(e, type){ return (e.type == 'pointer'+type || e.type.toLowerCase() == 'mspointer'+type) } $(document).ready(function(){ var now, delta, deltaX = 0, deltaY = 0, firstTouch, _isPointerType if ('MSGesture' in window) { gesture = new MSGesture() gesture.target = document.body } $(document) .bind('MSGestureEnd', function(e){ var swipeDirectionFromVelocity = e.velocityX > 1 ? 'Right' : e.velocityX < -1 ? 'Left' : e.velocityY > 1 ? 'Down' : e.velocityY < -1 ? 'Up' : null; if (swipeDirectionFromVelocity) { touch.el.trigger('swipe') touch.el.trigger('swipe'+ swipeDirectionFromVelocity) } }) .on('touchstart MSPointerDown pointerdown', function(e){ if((_isPointerType = isPointerEventType(e, 'down')) && !isPrimaryTouch(e)) return firstTouch = _isPointerType ? e : e.touches[0] if (e.touches && e.touches.length === 1 && touch.x2) { // Clear out touch movement data if we have it sticking around // This can occur if touchcancel doesn't fire due to preventDefault, etc. touch.x2 = undefined touch.y2 = undefined } now = Date.now() delta = now - (touch.last || now) touch.el = $('tagName' in firstTouch.target ? firstTouch.target : firstTouch.target.parentNode) touchTimeout && clearTimeout(touchTimeout) touch.x1 = firstTouch.pageX touch.y1 = firstTouch.pageY if (delta > 0 && delta <= 250) touch.isDoubleTap = true touch.last = now longTapTimeout = setTimeout(longTap, longTapDelay) // adds the current touch contact for IE gesture recognition if (gesture && _isPointerType) gesture.addPointer(e.pointerId); }) .on('touchmove MSPointerMove pointermove', function(e){ if((_isPointerType = isPointerEventType(e, 'move')) && !isPrimaryTouch(e)) return firstTouch = _isPointerType ? e : e.touches[0] cancelLongTap() touch.x2 = firstTouch.pageX touch.y2 = firstTouch.pageY deltaX += Math.abs(touch.x1 - touch.x2) deltaY += Math.abs(touch.y1 - touch.y2) }) .on('touchend MSPointerUp pointerup', function(e){ if((_isPointerType = isPointerEventType(e, 'up')) && !isPrimaryTouch(e)) return cancelLongTap() // swipe if ((touch.x2 && Math.abs(touch.x1 - touch.x2) > 30) || (touch.y2 && Math.abs(touch.y1 - touch.y2) > 30)) swipeTimeout = setTimeout(function() { touch.el.trigger('swipe') touch.el.trigger('swipe' + (swipeDirection(touch.x1, touch.x2, touch.y1, touch.y2))) touch = {} }, 0) // normal tap else if ('last' in touch) // don't fire tap when delta position changed by more than 30 pixels, // for instance when moving to a point and back to origin if (deltaX < 30 && deltaY < 30) { // delay by one tick so we can cancel the 'tap' event if 'scroll' fires // ('tap' fires before 'scroll') tapTimeout = setTimeout(function() { // trigger universal 'tap' with the option to cancelTouch() // (cancelTouch cancels processing of single vs double taps for faster 'tap' response) var event = $.Event('tap') event.cancelTouch = cancelAll touch.el.trigger(event) // trigger double tap immediately if (touch.isDoubleTap) { if (touch.el) touch.el.trigger('doubleTap') touch = {} } // trigger single tap after 250ms of inactivity else { touchTimeout = setTimeout(function(){ touchTimeout = null if (touch.el) touch.el.trigger('singleTap') touch = {} }, 250) } }, 0) } else { touch = {} } deltaX = deltaY = 0 }) // when the browser window loses focus, // for example when a modal dialog is shown, // cancel all ongoing events .on('touchcancel MSPointerCancel pointercancel', cancelAll) // scrolling the window indicates intention of the user // to scroll, not tap or swipe, so cancel all ongoing events $(window).on('scroll', cancelAll) }) ;['swipe', 'swipeLeft', 'swipeRight', 'swipeUp', 'swipeDown', 'doubleTap', 'tap', 'singleTap', 'longTap'].forEach(function(eventName){ $.fn[eventName] = function(callback){ return this.on(eventName, callback) } }) })(Zepto)
mit
xinyzhang9/xinyzhang9.github.io
spirits2/node_modules/glsl-token-whitespace-trim/README.md
1349
# glsl-token-whitespace-trim [![stable](http://badges.github.io/stability-badges/dist/stable.svg)](http://github.com/badges/stability-badges) Trim the whitespace within an array of GLSL tokens provided by [glsl-tokenizer](https://github.com/stackgl/glsl-tokenizer). Useful for minimising shader source size, especially after heavy processing steps such as seen in [glslify](http://github.com/stackgl/glslify) or as part of a GLSL minifier. ## Usage [![NPM](https://nodei.co/npm/glsl-token-whitespace-trim.png)](https://www.npmjs.com/package/glsl-token-whitespace-trim) ### `trim(tokens, [all])` Trims the whitespace in an array of GLSL `tokens`. By default, this will trim repeated to newlines such that no more than two newlines will appear in a row. If you're more concerned about size than aesthetics, you can pass `true` as the second argument to remove *all* extraneous whitespace (more or less). ``` javascript const tokenize = require('glsl-tokenizer') const string = require('glsl-token-string') const trim = require('glsl-token-whitespace-trim') const fs = require('fs') const src = fs.readFileSync('shader.glsl', 'utf8') const tokens = tokenize(src) trim(tokens, true) const trimmed = string(tokens) ``` ## License MIT, see [LICENSE.md](http://github.com/hughsk/glsl-token-whitespace-trim/blob/master/LICENSE.md) for details.
mit
mwoynarski/KunstmaanBundlesCMS
src/Kunstmaan/AdminBundle/Resources/ui/js/_datepicker.js
4312
var kunstmaanbundles = kunstmaanbundles || {}; kunstmaanbundles.datepicker = (function($, window, undefined) { var init, reInit, _setDefaultDate, _initDatepicker; var _today = window.moment(), _tomorrow = window.moment(_today).add(1, 'days'); var defaultFormat = 'DD-MM-YYYY', defaultCollapse = true, defaultKeepOpen = false, defaultMinDate = false, defaultShowDefaultDate = false, defaultStepping = 1; init = function() { $('.js-datepicker').each(function() { _initDatepicker($(this)); }); }; reInit = function(el) { if (el) { _initDatepicker($(el)); } else { $('.js-datepicker').each(function() { if (!$(this).hasClass('datepicker--enabled')) { _initDatepicker($(this)); } }); } }; _setDefaultDate = function(elMinDate) { if(elMinDate === 'tomorrow') { return _tomorrow; } else { return _today; } }; _initDatepicker = function($el) { // Get Settings var elFormat = $el.data('format'), elCollapse = $el.data('collapse'), elKeepOpen = $el.data('keep-open'), elMinDate = $el.data('min-date'), elShowDefaultDate = $el.data('default-date'), elStepping = $el.data('stepping'); // Set Settings var format = (elFormat !== undefined) ? elFormat : defaultFormat, collapse = (elCollapse !== undefined) ? elCollapse : defaultCollapse, keepOpen = (elKeepOpen !== undefined) ? elKeepOpen : defaultKeepOpen, minDate = (elMinDate === 'tomorrow') ? _tomorrow : (elMinDate === 'today') ? _today : defaultMinDate, defaultDate = (elShowDefaultDate) ? _setDefaultDate(elMinDate) : defaultShowDefaultDate, stepping = (elStepping !== undefined) ? elStepping : defaultStepping; // Setup var $input = $el.find('input'), $addon = $el.find('.input-group-addon'), linkedDatepickerID = $el.data('linked-datepicker') || false; if (format.indexOf('HH:mm') === -1) { // Drop time if not necessary if (minDate) { minDate = minDate.clone().startOf('day'); // clone() because otherwise .startOf() mutates the original moment object } if (defaultDate) { defaultDate = defaultDate.clone().startOf('day'); } } $input.datetimepicker({ format: format, collapse: collapse, keepOpen: keepOpen, minDate: minDate, defaultDate: defaultDate, widgetPositioning: { horizontal: 'left', vertical: 'auto' }, widgetParent: $el, icons: { time: 'fa fa-clock', date: 'fa fa-calendar', up: 'fa fa-chevron-up', down: 'fa fa-chevron-down', previous: 'fa fa-arrow-left', next: 'fa fa-arrow-right', today: 'fa fa-crosshairs', clear: 'fa fa-trash' }, stepping: stepping }); $el.addClass('datepicker--enabled'); $addon.on('click', function() { $input.focus(); }); // Linked datepickers - allow future datetime only - (un)publish modal if (linkedDatepickerID) { // set min time only if selected date = today $(document).on('dp.change', linkedDatepickerID, function(e) { if (e.target.value === _today.format('DD-MM-YYYY')) { var selectedTime = window.moment($input.val(), 'HH:mm'); // Force user to select new time, if current time isn't valid anymore selectedTime.isBefore(_today) && $input.data('DateTimePicker').show(); $input.data('DateTimePicker').minDate(_today); } else { $input.data('DateTimePicker').minDate(false); } }); } }; return { init: init, reInit: reInit }; })(jQuery, window);
mit
dwpdigitaltech/healthanddisability
app/views/fha/v5/booking/details/contact.html
1150
{% extends path+"/booking/details/_layout2.html" %} {% block case_content %} <h3 class="heading-medium mt0">Contact details</h3> <h4 class="heading-small">Phone</h4> <p>{{customer.phone}}</p> <h4 class="heading-small">Mobile</h4> <p>{{customer.mobile}}</p> <h4 class="heading-small">Email</h4> <p>{{customer.email}}</p> <h4 class="heading-small">Address</h4> <p class="address-stack"> <span>{{customer.address.street}}</span> <span>{{customer.address.city}}</span> <span>{{customer.address.postcode}}</span> </p> {% if customer.correspondenceAddress %} <h4 class="heading-small">Correspondence Address</h4> <p class="address-stack"> <span>{{customer.correspondenceAddress.street}}</span> <span>{{customer.correspondenceAddress.city}}</span> <span>{{customer.correspondenceAddress.postcode}}</span> </p> {%endif%} {% block page_scripts %} {{ super() }} <script type="text/javascript"> $(document).on('ready',function() { $('.evidence__leftnav') .children(":nth-child(1)") .addClass('active_detail') }); </script> {% endblock %} {% endblock %}
mit
wingyplus/wmii
lib/libstuff/event/event.h
193
#include <stuff/x.h> typedef void (*EventHandler)(XEvent*); #define handle(w, fn, ev) \ BLOCK(if((w)->handler->fn) (w)->handler->fn((w), ev)) extern EventHandler event_handler[LASTEvent];
mit
daimajia/EverMemo-EverNote
src/com/evernote/client/android/AsyncLinkedNoteStoreClient.java
9741
/* * Copyright 2012 Evernote Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.evernote.client.android; import com.evernote.edam.error.EDAMNotFoundException; import com.evernote.edam.error.EDAMSystemException; import com.evernote.edam.error.EDAMUserException; import com.evernote.edam.type.LinkedNotebook; import com.evernote.edam.type.Note; import com.evernote.edam.type.Notebook; import com.evernote.edam.type.SharedNotebook; import com.evernote.thrift.TException; import com.evernote.thrift.protocol.TProtocol; import com.evernote.thrift.transport.TTransportException; import java.util.Arrays; import java.util.List; /** * * This is a wrapper/helper class that manages the connection to a linked notestore. It maintains two * {@link AsyncLinkedNoteStoreClient} objects, one points to the users personal store and the other to * linked notebooks shard. * * These helper methods make network calls across both shards to return the appropriate data. * * * * @author @tylersmithnet */ public class AsyncLinkedNoteStoreClient { /** * References users main note store */ private AsyncNoteStoreClient mMainNoteStoreClient; private AsyncNoteStoreClient mLinkedStoreClient; private String mAuthToken; private ClientFactory mClientFactory; AsyncLinkedNoteStoreClient(TProtocol iprot, TProtocol oprot, String authenticationToken, ClientFactory clientFactory) throws TTransportException { mLinkedStoreClient = new AsyncNoteStoreClient(iprot, oprot, authenticationToken); mMainNoteStoreClient = EvernoteSession.getOpenSession().getClientFactory().createNoteStoreClient(); mAuthToken = authenticationToken; mClientFactory = clientFactory; } /** * Returns the {@link AsyncNoteStoreClient} object that has been instantiated to the appropriate shard * @return */ public AsyncNoteStoreClient getAsyncClient() { return mLinkedStoreClient; } AsyncNoteStoreClient getAsyncPersonalClient() { return mMainNoteStoreClient; } String getAuthenticationToken() { return mAuthToken; } void setAuthToken(String authenticationToken) { mAuthToken = authenticationToken; } ClientFactory getClientFactory() { return mClientFactory; } /** * Helper method to create a note asynchronously in a linked/business notebook * * @param note * @param linkedNotebook * @param callback */ public void createNoteAsync(final Note note, final LinkedNotebook linkedNotebook, final OnClientCallback<Note> callback) { AsyncReflector.execute(this, callback, "createNote", note, linkedNotebook); } /** * Helper method to create a note synchronously in a linked notebook * * @param note * @param linkedNotebook * @return * @throws com.evernote.edam.error.EDAMUserException * * @throws com.evernote.edam.error.EDAMSystemException * * @throws com.evernote.thrift.TException * @throws com.evernote.edam.error.EDAMNotFoundException * */ public Note createNote(Note note, LinkedNotebook linkedNotebook) throws EDAMUserException, EDAMSystemException, TException, EDAMNotFoundException { SharedNotebook sharedNotebook = getAsyncClient().getClient().getSharedNotebookByAuth(getAuthenticationToken()); note.setNotebookGuid(sharedNotebook.getNotebookGuid()); return getAsyncClient().getClient().createNote(getAuthenticationToken(), note); } /** * Helper method to list linked/business notebooks asynchronously * * @see {@link com.evernote.edam.notestore.NoteStore.Client#listLinkedNotebooks(String)} * * @param callback */ public void listNotebooksAsync(final OnClientCallback<List<LinkedNotebook>> callback) { AsyncReflector.execute(getAsyncPersonalClient(), callback, "listNotebooks", getAuthenticationToken()); } /** * Helper method to list linked notebooks synchronously * * @see {@link com.evernote.edam.notestore.NoteStore.Client#listLinkedNotebooks(String)} * */ public List<LinkedNotebook> listNotebooks() throws EDAMUserException, EDAMSystemException, TException, EDAMNotFoundException { return getAsyncPersonalClient().getClient().listLinkedNotebooks(getAsyncPersonalClient().getAuthenticationToken()); } /** * Create Linked Notebook from a Notebook * * Asynchronous call * * @param callback */ public void createNotebookAsync(Notebook notebook, OnClientCallback<LinkedNotebook> callback) { AsyncReflector.execute(this, callback, "createNotebook", notebook); } /** * Create Linked Notebook from a Notebook * * Synchronous call * * @return {@link LinkedNotebook} with guid from server */ public LinkedNotebook createNotebook(Notebook notebook) throws TException, EDAMUserException, EDAMSystemException, EDAMNotFoundException { Notebook originalNotebook = getAsyncClient().getClient().createNotebook(getAuthenticationToken(), notebook); SharedNotebook sharedNotebook = originalNotebook.getSharedNotebooks().get(0); LinkedNotebook linkedNotebook = new LinkedNotebook(); linkedNotebook.setShareKey(sharedNotebook.getShareKey()); linkedNotebook.setShareName(originalNotebook.getName()); linkedNotebook.setUsername(EvernoteSession.getOpenSession().getAuthenticationResult().getBusinessUser().getUsername()); linkedNotebook.setShardId(EvernoteSession.getOpenSession().getAuthenticationResult().getBusinessUser().getShardId()); return getAsyncPersonalClient().getClient().createLinkedNotebook(getAsyncPersonalClient().getAuthenticationToken(), linkedNotebook); } /** * Providing a LinkedNotebook referencing a linked/business account, perform a delete * * Asynchronous call * @param callback */ public void deleteNotebookAsync(LinkedNotebook linkedNotebook, OnClientCallback<Integer> callback) { AsyncReflector.execute(this, callback, "deleteNotebook", linkedNotebook); } /** * Providing a LinkedNotebook referencing a linked account, perform a delete * * Synchronous call * * @return guid of notebook deleted */ public int deleteNotebook(LinkedNotebook linkedNotebook) throws TException, EDAMUserException, EDAMSystemException, EDAMNotFoundException { SharedNotebook sharedNotebook = getAsyncClient().getClient().getSharedNotebookByAuth(getAuthenticationToken()); Long[] ids = {sharedNotebook.getId()}; getAsyncClient().getClient().expungeSharedNotebooks(getAuthenticationToken(), Arrays.asList(ids)); return getAsyncPersonalClient().getClient().expungeLinkedNotebook(getAsyncPersonalClient().getAuthenticationToken(), linkedNotebook.getGuid()); } /** * Will return the {@link Notebook} associated with the {@link LinkedNotebook} from the linked/business account * * Asynchronous call * * @param linkedNotebook * @param callback */ public void getCorrespondingNotebookAsync(LinkedNotebook linkedNotebook, OnClientCallback<Notebook> callback) { AsyncReflector.execute(this, callback, "getCorrespondingNotebook", linkedNotebook); } /** * Will return the {@link com.evernote.edam.type.Notebook} associated with the {@link com.evernote.edam.type.LinkedNotebook} from the linked account * * Synchronous call * * @param linkedNotebook */ public Notebook getCorrespondingNotebook(LinkedNotebook linkedNotebook) throws TException, EDAMUserException, EDAMSystemException, EDAMNotFoundException { SharedNotebook sharedNotebook = getAsyncClient().getClient().getSharedNotebookByAuth(getAuthenticationToken()); return getAsyncClient().getClient().getNotebook(getAuthenticationToken(), sharedNotebook.getNotebookGuid()); } /** * Checks writable permissions of {@link LinkedNotebook} on Linked/business account * * Asynchronous call * * @param linkedNotebook * @param callback */ public void isNotebookWritableAsync(LinkedNotebook linkedNotebook, OnClientCallback<Boolean> callback) { AsyncReflector.execute(this, callback, "isLinkedNotebookWritable", linkedNotebook); } /** * Checks writable permissions of {@link LinkedNotebook} on Linked account * * Synchronous call * * @param linkedNotebook */ public boolean isNotebookWritable(LinkedNotebook linkedNotebook) throws EDAMUserException, TException, EDAMSystemException, EDAMNotFoundException { Notebook notebook = getCorrespondingNotebook(linkedNotebook); return !notebook.getRestrictions().isNoCreateNotes(); } }
mit
cjwl/cocotron
Foundation/NSSet/NSSet.h
2363
/* Copyright (c) 2006-2007 Christopher J. W. Lloyd Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #import <Foundation/NSObject.h> #import <Foundation/NSEnumerator.h> @class NSArray, NSDictionary, NSString, NSPredicate; @interface NSSet : NSObject <NSCoding, NSCopying, NSMutableCopying, NSFastEnumeration> - initWithObjects:(id *)objects count:(NSUInteger)count; - initWithArray:(NSArray *)array; - initWithSet:(NSSet *)set; - initWithSet:(NSSet *)set copyItems:(BOOL)copyItems; - initWithObjects:first, ...; + set; + setWithArray:(NSArray *)array; + setWithSet:(NSSet *)set; + setWithObject:object; + setWithObjects:first, ...; + setWithObjects:(id *)objects count:(NSUInteger)count; - (NSSet *)setByAddingObject:object; - (NSSet *)setByAddingObjectsFromSet:(NSSet *)other; - (NSSet *)setByAddingObjectsFromArray:(NSArray *)array; - member:object; - (NSUInteger)count; - (NSEnumerator *)objectEnumerator; - (BOOL)isEqualToSet:(NSSet *)set; - (NSArray *)allObjects; - (BOOL)containsObject:object; - (BOOL)isSubsetOfSet:(NSSet *)set; - (BOOL)intersectsSet:(NSSet *)set; - (void)makeObjectsPerformSelector:(SEL)selector; - (void)makeObjectsPerformSelector:(SEL)selector withObject:argument; - anyObject; - (NSString *)descriptionWithLocale:(NSDictionary *)locale; - (NSSet *)filteredSetUsingPredicate:(NSPredicate *)predicate; @end #import <Foundation/NSMutableSet.h>
mit
KrzysztofCwalina/corefxlab
tests/System.Collections.Sequences.Tests/BasicUnitTests.cs
2965
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System.Collections.Generic; using Xunit; namespace System.Collections.Sequences.Tests { public class SequenceTests { [Theory] [InlineData(new int[] { })] [InlineData(new int[] { 1 })] [InlineData(new int[] { 1, 2, 3 })] public void ArrayList(int[] array) { ArrayList<int> collection = CreateArrayList(array); SequencePosition position = default; int arrayIndex = 0; while (collection.TryGet(ref position, out int item)) { Assert.Equal(array[arrayIndex++], item); } Assert.Equal(array.Length, arrayIndex); arrayIndex = 0; foreach (int item in collection) { Assert.Equal(array[arrayIndex++], item); } Assert.Equal(array.Length, arrayIndex); } private static ArrayList<int> CreateArrayList(int[] array) { var collection = new ArrayList<int>(); foreach (var arrayItem in array) collection.Add(arrayItem); return collection; } [Theory] [InlineData(new int[] { })] [InlineData(new int[] { 1 })] [InlineData(new int[] { 1, 2, 3 })] public void LinkedContainer(int[] array) { LinkedContainer<int> collection = CreateLinkedContainer(array); SequencePosition position = default; int arrayIndex = array.Length; while (collection.TryGet(ref position, out int item)) { Assert.Equal(array[--arrayIndex], item); } } private static LinkedContainer<int> CreateLinkedContainer(int[] array) { var collection = new LinkedContainer<int>(); foreach (var item in array) collection.Add(item); // this adds to front return collection; } [Theory] [InlineData(new int[] { })] [InlineData(new int[] { 1 })] [InlineData(new int[] { 1, 2, 3 })] public void Hashtable(int[] array) { Hashtable<int, string> collection = CreateHashtable(array); int arrayIndex = 0; SequencePosition position = default; while (collection.TryGet(ref position, out KeyValuePair<int, string> item)) { Assert.Equal(array[arrayIndex++], item.Key); } } private static Hashtable<int, string> CreateHashtable(int[] array) { var collection = new Hashtable<int, string>(EqualityComparer<int>.Default); foreach (var item in array) collection.Add(item, item.ToString()); return collection; } } }
mit
guhilling/smart-release-plugin
test-projects/parent-as-sibling/core-utils/src/test/java/de/hilling/maven/release/testprojects/versioninheritor/CalculatorTest.java
369
package de.hilling.maven.release.testprojects.versioninheritor; import org.junit.Assert; import org.junit.Test; public class CalculatorTest { @Test public void testAdd() throws Exception { Assert.assertEquals(3, new Calculator().add(1, 2)); System.out.println("The Calculator Test has run"); // used in a test to assert this has run } }
mit
Dokaponteam/ITF_Project
xampp/perl/lib/unicore/lib/Blk/Cham.pl
421
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!! # This file is machine-generated by mktables from the Unicode # database, Version 6.1.0. Any changes made here will be lost! # !!!!!!! INTERNAL PERL USE ONLY !!!!!!! # This file is for internal use by core Perl only. The format and even the # name or existence of this file are subject to change without notice. Don't # use it directly. return <<'END'; AA00 AA5F END
mit
spitelab/spiteheroes
www/bower_components/pixi/docs/files/src_pixi_utils_Polyk.js.html
14080
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <title>src/pixi/utils/Polyk.js - pixi.js</title> <link rel="stylesheet" href="http://yui.yahooapis.com/3.9.1/build/cssgrids/cssgrids-min.css"> <link rel="stylesheet" href="../assets/vendor/prettify/prettify-min.css"> <link rel="stylesheet" href="../assets/css/main.css" id="site_styles"> <link rel="shortcut icon" type="image/png" href="../assets/favicon.png"> <script src="http://yui.yahooapis.com/combo?3.9.1/build/yui/yui-min.js"></script> </head> <body class="yui3-skin-sam"> <div id="doc"> <div id="hd" class="yui3-g header"> <div class="yui3-u-3-4"> <h1><img src="http://www.goodboydigital.com/pixijs/logo_small.png" title="pixi.js"></h1> </div> <div class="yui3-u-1-4 version"> <em>API Docs for: 1.6.0</em> </div> </div> <div id="bd" class="yui3-g"> <div class="yui3-u-1-4"> <div id="docs-sidebar" class="sidebar apidocs"> <div id="api-list"> <h2 class="off-left">APIs</h2> <div id="api-tabview" class="tabview"> <ul class="tabs"> <li><a href="#api-classes">Classes</a></li> <li><a href="#api-modules">Modules</a></li> </ul> <div id="api-tabview-filter"> <input type="search" id="api-filter" placeholder="Type to filter APIs"> </div> <div id="api-tabview-panel"> <ul id="api-classes" class="apis classes"> <li><a href="../classes/AbstractFilter.html">AbstractFilter</a></li> <li><a href="../classes/AjaxRequest.html">AjaxRequest</a></li> <li><a href="../classes/AlphaMaskFilter.html">AlphaMaskFilter</a></li> <li><a href="../classes/AssetLoader.html">AssetLoader</a></li> <li><a href="../classes/AtlasLoader.html">AtlasLoader</a></li> <li><a href="../classes/autoDetectRenderer.html">autoDetectRenderer</a></li> <li><a href="../classes/BaseTexture.html">BaseTexture</a></li> <li><a href="../classes/BitmapFontLoader.html">BitmapFontLoader</a></li> <li><a href="../classes/BitmapText.html">BitmapText</a></li> <li><a href="../classes/BlurFilter.html">BlurFilter</a></li> <li><a href="../classes/CanvasGraphics.html">CanvasGraphics</a></li> <li><a href="../classes/CanvasMaskManager.html">CanvasMaskManager</a></li> <li><a href="../classes/CanvasRenderer.html">CanvasRenderer</a></li> <li><a href="../classes/CanvasTinter.html">CanvasTinter</a></li> <li><a href="../classes/Circle.html">Circle</a></li> <li><a href="../classes/ColorMatrixFilter.html">ColorMatrixFilter</a></li> <li><a href="../classes/ColorStepFilter.html">ColorStepFilter</a></li> <li><a href="../classes/ComplexPrimitiveShader.html">ComplexPrimitiveShader</a></li> <li><a href="../classes/DisplacementFilter.html">DisplacementFilter</a></li> <li><a href="../classes/DisplayObject.html">DisplayObject</a></li> <li><a href="../classes/DisplayObjectContainer.html">DisplayObjectContainer</a></li> <li><a href="../classes/DotScreenFilter.html">DotScreenFilter</a></li> <li><a href="../classes/Ellipse.html">Ellipse</a></li> <li><a href="../classes/EventTarget.html">EventTarget</a></li> <li><a href="../classes/FilterTexture.html">FilterTexture</a></li> <li><a href="../classes/getRecommendedRenderer.html">getRecommendedRenderer</a></li> <li><a href="../classes/Graphics.html">Graphics</a></li> <li><a href="../classes/GrayFilter.html">GrayFilter</a></li> <li><a href="../classes/ImageLoader.html">ImageLoader</a></li> <li><a href="../classes/InteractionData.html">InteractionData</a></li> <li><a href="../classes/InteractionManager.html">InteractionManager</a></li> <li><a href="../classes/InvertFilter.html">InvertFilter</a></li> <li><a href="../classes/JsonLoader.html">JsonLoader</a></li> <li><a href="../classes/Matrix.html">Matrix</a></li> <li><a href="../classes/Matrix2.html">Matrix2</a></li> <li><a href="../classes/MovieClip.html">MovieClip</a></li> <li><a href="../classes/NormalMapFilter.html">NormalMapFilter</a></li> <li><a href="../classes/PixelateFilter.html">PixelateFilter</a></li> <li><a href="../classes/PixiFastShader.html">PixiFastShader</a></li> <li><a href="../classes/PixiShader.html">PixiShader</a></li> <li><a href="../classes/Point.html">Point</a></li> <li><a href="../classes/Polygon.html">Polygon</a></li> <li><a href="../classes/PolyK.html">PolyK</a></li> <li><a href="../classes/PrimitiveShader.html">PrimitiveShader</a></li> <li><a href="../classes/Rectangle.html">Rectangle</a></li> <li><a href="../classes/Rope.html">Rope</a></li> <li><a href="../classes/SepiaFilter.html">SepiaFilter</a></li> <li><a href="../classes/Spine.html">Spine</a></li> <li><a href="../classes/Sprite.html">Sprite</a></li> <li><a href="../classes/SpriteBatch.html">SpriteBatch</a></li> <li><a href="../classes/SpriteSheetLoader.html">SpriteSheetLoader</a></li> <li><a href="../classes/Stage.html">Stage</a></li> <li><a href="../classes/Strip.html">Strip</a></li> <li><a href="../classes/Text.html">Text</a></li> <li><a href="../classes/Texture.html">Texture</a></li> <li><a href="../classes/TilingSprite.html">TilingSprite</a></li> <li><a href="../classes/TwistFilter.html">TwistFilter</a></li> <li><a href="../classes/WebGLFilterManager.html">WebGLFilterManager</a></li> <li><a href="../classes/WebGLGraphics.html">WebGLGraphics</a></li> <li><a href="../classes/WebGLMaskManager.html">WebGLMaskManager</a></li> <li><a href="../classes/WebGLRenderer.html">WebGLRenderer</a></li> <li><a href="../classes/WebGLShaderManager.html">WebGLShaderManager</a></li> <li><a href="../classes/WebGLSpriteBatch.html">WebGLSpriteBatch</a></li> <li><a href="../classes/WebGLStencilManager.html">WebGLStencilManager</a></li> </ul> <ul id="api-modules" class="apis modules"> <li><a href="../modules/PIXI.html">PIXI</a></li> </ul> </div> </div> </div> </div> </div> <div class="yui3-u-3-4"> <div id="api-options"> Show: <label for="api-show-inherited"> <input type="checkbox" id="api-show-inherited" checked> Inherited </label> <label for="api-show-protected"> <input type="checkbox" id="api-show-protected"> Protected </label> <label for="api-show-private"> <input type="checkbox" id="api-show-private"> Private </label> <label for="api-show-deprecated"> <input type="checkbox" id="api-show-deprecated"> Deprecated </label> </div> <div class="apidocs"> <div id="docs-main"> <div class="content"> <h1 class="file-heading">File: src/pixi/utils/Polyk.js</h1> <div class="file"> <pre class="code prettyprint linenums"> /* PolyK library url: http://polyk.ivank.net Released under MIT licence. Copyright (c) 2012 Ivan Kuckir Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the &quot;Software&quot;), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This is an amazing lib! slightly modified by Mat Groves (matgroves.com); */ /** * Based on the Polyk library http://polyk.ivank.net released under MIT licence. * This is an amazing lib! * slightly modified by Mat Groves (matgroves.com); * @class PolyK * */ PIXI.PolyK = {}; /** * Triangulates shapes for webGL graphic fills * * @method Triangulate * */ PIXI.PolyK.Triangulate = function(p) { var sign = true; var n = p.length &gt;&gt; 1; if(n &lt; 3) return []; var tgs = []; var avl = []; for(var i = 0; i &lt; n; i++) avl.push(i); i = 0; var al = n; while(al &gt; 3) { var i0 = avl[(i+0)%al]; var i1 = avl[(i+1)%al]; var i2 = avl[(i+2)%al]; var ax = p[2*i0], ay = p[2*i0+1]; var bx = p[2*i1], by = p[2*i1+1]; var cx = p[2*i2], cy = p[2*i2+1]; var earFound = false; if(PIXI.PolyK._convex(ax, ay, bx, by, cx, cy, sign)) { earFound = true; for(var j = 0; j &lt; al; j++) { var vi = avl[j]; if(vi === i0 || vi === i1 || vi === i2) continue; if(PIXI.PolyK._PointInTriangle(p[2*vi], p[2*vi+1], ax, ay, bx, by, cx, cy)) { earFound = false; break; } } } if(earFound) { tgs.push(i0, i1, i2); avl.splice((i+1)%al, 1); al--; i = 0; } else if(i++ &gt; 3*al) { // need to flip flip reverse it! // reset! if(sign) { tgs = []; avl = []; for(i = 0; i &lt; n; i++) avl.push(i); i = 0; al = n; sign = false; } else { window.console.log(&quot;PIXI Warning: shape too complex to fill&quot;); return []; } } } tgs.push(avl[0], avl[1], avl[2]); return tgs; }; /** * Checks whether a point is within a triangle * * @method _PointInTriangle * @param px {Number} x coordinate of the point to test * @param py {Number} y coordinate of the point to test * @param ax {Number} x coordinate of the a point of the triangle * @param ay {Number} y coordinate of the a point of the triangle * @param bx {Number} x coordinate of the b point of the triangle * @param by {Number} y coordinate of the b point of the triangle * @param cx {Number} x coordinate of the c point of the triangle * @param cy {Number} y coordinate of the c point of the triangle * @private */ PIXI.PolyK._PointInTriangle = function(px, py, ax, ay, bx, by, cx, cy) { var v0x = cx-ax; var v0y = cy-ay; var v1x = bx-ax; var v1y = by-ay; var v2x = px-ax; var v2y = py-ay; var dot00 = v0x*v0x+v0y*v0y; var dot01 = v0x*v1x+v0y*v1y; var dot02 = v0x*v2x+v0y*v2y; var dot11 = v1x*v1x+v1y*v1y; var dot12 = v1x*v2x+v1y*v2y; var invDenom = 1 / (dot00 * dot11 - dot01 * dot01); var u = (dot11 * dot02 - dot01 * dot12) * invDenom; var v = (dot00 * dot12 - dot01 * dot02) * invDenom; // Check if point is in triangle return (u &gt;= 0) &amp;&amp; (v &gt;= 0) &amp;&amp; (u + v &lt; 1); }; /** * Checks whether a shape is convex * * @method _convex * * @private */ PIXI.PolyK._convex = function(ax, ay, bx, by, cx, cy, sign) { return ((ay-by)*(cx-bx) + (bx-ax)*(cy-by) &gt;= 0) === sign; }; </pre> </div> </div> </div> </div> </div> </div> </div> <script src="../assets/vendor/prettify/prettify-min.js"></script> <script>prettyPrint();</script> <script src="../assets/js/yui-prettify.js"></script> <script src="../assets/../api.js"></script> <script src="../assets/js/api-filter.js"></script> <script src="../assets/js/api-list.js"></script> <script src="../assets/js/api-search.js"></script> <script src="../assets/js/apidocs.js"></script> </body> </html>
cc0-1.0
yorkulibraries/vufind
web/vendor/php-tmdb/api/lib/Tmdb/Model/Collection/QueryParametersCollection.php
555
<?php /** * This file is part of the Tmdb PHP API created by Michael Roterman. * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. * * @package Tmdb * @author Michael Roterman <[email protected]> * @copyright (c) 2013, Michael Roterman * @version 0.0.1 */ namespace Tmdb\Model\Collection; use Tmdb\Model\Common\GenericCollection; /** * Class QueryParametersCollection * @package Tmdb\Model\Collection */ class QueryParametersCollection extends GenericCollection { }
gpl-2.0
myri/lanai-gcc
gcc/testsuite/g77.f-torture/execute/u77-test.f
12998
*** Some random stuff for testing libU77. Should be done better. It's * hard to test things where you can't guarantee the result. Have a * good squint at what it prints, though detected errors will cause * starred messages. * * Currently not tested: * ALARM * CHDIR (func) * CHMOD (func) * FGET (func/subr) * FGETC (func) * FPUT (func/subr) * FPUTC (func) * FSTAT (subr) * GETCWD (subr) * HOSTNM (subr) * IRAND * KILL * LINK (func) * LSTAT (subr) * RENAME (func/subr) * SIGNAL (subr) * SRAND * STAT (subr) * SYMLNK (func/subr) * UMASK (func) * UNLINK (func) * * NOTE! This is the testsuite version, so it should compile and * execute on all targets, and either run to completion (with * success status) or fail (by calling abort). The *other* version, * which is a bit more interactive and tests a couple of things * this one cannot, should be generally the same, and is in * libf2c/libU77/u77-test.f. Please keep it up-to-date. implicit none external hostnm * intrinsic hostnm integer hostnm integer i, j, k, ltarray (9), idat (3), count, rate, count_max, + pid, mask real tarray1(2), tarray2(2), r1, r2 double precision d1 integer(kind=2) bigi logical issum intrinsic getpid, getuid, getgid, ierrno, gerror, time8, + fnum, isatty, getarg, access, unlink, fstat, iargc, + stat, lstat, getcwd, gmtime, etime, chmod, itime, date, + chdir, fgetc, fputc, system_clock, second, idate, secnds, + time, ctime, fdate, ttynam, date_and_time, mclock, mclock8, + cpu_time, dtime, ftell, abort external lenstr, ctrlc integer lenstr logical l character gerr*80, c*1 character ctim*25, line*80, lognam*20, wd*1000, line2*80, + ddate*8, ttime*10, zone*5, ctim2*25 integer fstatb (13), statb (13) integer *2 i2zero integer values(8) integer(kind=7) sigret i = time () ctim = ctime (i) WRITE (6,'(A/)') '1 GNU libU77 test at: ' // ctim(:lenstr (ctim)) write (6,'(A,I3,'', '',I3)') + ' Logical units 5 and 6 correspond (FNUM) to' + // ' Unix i/o units ', fnum(5), fnum(6) if (lnblnk('foo ').ne.3 .or. len_trim('foo ').ne.3) then print *, 'LNBLNK or LEN_TRIM failed' call abort end if bigi = time8 () call ctime (i, ctim2) if (ctim .ne. ctim2) then write (6, *) '*** CALL CTIME disagrees with CTIME(): ', + ctim2(:lenstr (ctim2)), ' vs. ', ctim(:lenstr (ctim)) call doabort end if j = time () if (i .gt. bigi .or. bigi .gt. j) then write (6, *) '*** TIME/TIME8/TIME sequence failures: ', + i, bigi, j call doabort end if print *, 'Command-line arguments: ', iargc () do i = 0, iargc () call getarg (i, line) print *, 'Arg ', i, ' is: ', line(:lenstr (line)) end do l= isatty(6) line2 = ttynam(6) if (l) then line = 'and 6 is a tty device (ISATTY) named '//line2 else line = 'and 6 isn''t a tty device (ISATTY)' end if write (6,'(1X,A)') line(:lenstr(line)) call ttynam (6, line) if (line .ne. line2) then print *, '*** CALL TTYNAM disagrees with TTYNAM: ', + line(:lenstr (line)) call doabort end if * regression test for compiler crash fixed by JCB 1998-08-04 com.c sigret = signal(2, ctrlc) pid = getpid() WRITE (6,'(A,I10)') ' Process id (GETPID): ', pid WRITE (6,'(A,I10)') ' User id (GETUID): ', GETUID () WRITE (6,'(A,I10)') ' Group id (GETGID): ', GETGID () WRITE (6, *) 'If you have the `id'' program, the following call' write (6, *) 'of SYSTEM should agree with the above:' call flush(6) CALL SYSTEM ('echo " " `id`') call flush lognam = 'blahblahblah' call getlog (lognam) write (6,*) 'Login name (GETLOG): ', lognam(:lenstr (lognam)) wd = 'blahblahblah' call getenv ('LOGNAME', wd) write (6,*) 'Login name (GETENV of LOGNAME): ', wd(:lenstr (wd)) call umask(0, mask) write(6,*) 'UMASK returns', mask call umask(mask) ctim = fdate() write (6,*) 'FDATE returns: ', ctim(:lenstr (ctim)) call fdate (ctim) write (6,*) 'CALL FDATE returns: ', ctim(:lenstr (ctim)) j=time() call ltime (j, ltarray) write (6,'(1x,a,9i4)') 'LTIME returns:', ltarray call gmtime (j, ltarray) write (6,'(1x,a,9i4)') 'GMTIME returns:', ltarray call system_clock(count) ! omitting optional args call system_clock(count, rate, count_max) write(6,*) 'SYSTEM_CLOCK returns: ', count, rate, count_max call date_and_time(ddate) ! omitting optional args call date_and_time(ddate, ttime, zone, values) write(6, *) 'DATE_AND_TIME returns: ', ddate, ' ', ttime, ' ', + zone, ' ', values write (6,*) 'Sleeping for 1 second (SLEEP) ...' call sleep (1) c consistency-check etime vs. dtime for first call r1 = etime (tarray1) r2 = dtime (tarray2) if (abs (r1-r2).gt.1.0) then write (6,*) + 'Results of ETIME and DTIME differ by more than a second:', + r1, r2 call doabort end if if (.not. issum (r1, tarray1(1), tarray1(2))) then write (6,*) '*** ETIME didn''t return sum of the array: ', + r1, ' /= ', tarray1(1), '+', tarray1(2) call doabort end if if (.not. issum (r2, tarray2(1), tarray2(2))) then write (6,*) '*** DTIME didn''t return sum of the array: ', + r2, ' /= ', tarray2(1), '+', tarray2(2) call doabort end if write (6, '(A,3F10.3)') + ' Elapsed total, user, system time (ETIME): ', + r1, tarray1 c now try to get times to change enough to see in etime/dtime write (6,*) 'Looping until clock ticks at least once...' do i = 1,1000 do j = 1,1000 end do call dtime (tarray2, r2) if (tarray2(1) .ne. 0. .or. tarray2(2) .ne. 0.) exit end do call etime (tarray1, r1) if (.not. issum (r1, tarray1(1), tarray1(2))) then write (6,*) '*** ETIME didn''t return sum of the array: ', + r1, ' /= ', tarray1(1), '+', tarray1(2) call doabort end if if (.not. issum (r2, tarray2(1), tarray2(2))) then write (6,*) '*** DTIME didn''t return sum of the array: ', + r2, ' /= ', tarray2(1), '+', tarray2(2) call doabort end if write (6, '(A,3F10.3)') + ' Differences in total, user, system time (DTIME): ', + r2, tarray2 write (6, '(A,3F10.3)') + ' Elapsed total, user, system time (ETIME): ', + r1, tarray1 write (6, *) '(Clock-tick detected after ', i, ' 1K loops.)' call idate (i,j,k) call idate (idat) write (6,*) 'IDATE (date,month,year): ',idat print *, '... and the VXT version (month,date,year): ', i,j,k if (i/=idat(2) .or. j/=idat(1) .or. k/=mod(idat(3),100)) then print *, '*** VXT and U77 versions don''t agree' call doabort end if call date (ctim) write (6,*) 'DATE (dd-mmm-yy): ', ctim(:lenstr (ctim)) call itime (idat) write (6,*) 'ITIME (hour,minutes,seconds): ', idat call time(line(:8)) print *, 'TIME: ', line(:8) write (6,*) 'SECNDS(0.0) returns: ',secnds(0.0) write (6,*) 'SECOND returns: ', second() call dumdum(r1) call second(r1) write (6,*) 'CALL SECOND returns: ', r1 * compiler crash fixed by 1998-10-01 com.c change if (rand(0).lt.0.0 .or. rand(0).gt.1.0) then write (6,*) '*** rand(0) error' call doabort() end if i = getcwd(wd) if (i.ne.0) then call perror ('*** getcwd') call doabort else write (6,*) 'Current directory is "'//wd(:lenstr(wd))//'"' end if call chdir ('.',i) if (i.ne.0) then write (6,*) '***CHDIR to ".": ', i call doabort end if i=hostnm(wd) if(i.ne.0) then call perror ('*** hostnm') call doabort else write (6,*) 'Host name is ', wd(:lenstr(wd)) end if i = access('/dev/null ', 'rw') if (i.ne.0) write (6,*) '***Read/write ACCESS to /dev/null: ', i write (6,*) 'Creating file "foo" for testing...' open (3,file='foo',status='UNKNOWN') rewind 3 call fputc(3, 'c',i) call fputc(3, 'd',j) if (i+j.ne.0) write(6,*) '***FPUTC: ', i C why is it necessary to reopen? (who wrote this?) C the better to test with, my dear! (-- burley) close(3) open(3,file='foo',status='old') call fseek(3,0,0,*10) go to 20 10 write(6,*) '***FSEEK failed' call doabort 20 call fgetc(3, c,i) if (i.ne.0) then write(6,*) '***FGETC: ', i call doabort end if if (c.ne.'c') then write(6,*) '***FGETC read the wrong thing: ', ichar(c) call doabort end if i= ftell(3) if (i.ne.1) then write(6,*) '***FTELL offset: ', i call doabort end if call ftell(3, i) if (i.ne.1) then write(6,*) '***CALL FTELL offset: ', i call doabort end if call chmod ('foo', 'a+w',i) if (i.ne.0) then write (6,*) '***CHMOD of "foo": ', i call doabort end if i = fstat (3, fstatb) if (i.ne.0) then write (6,*) '***FSTAT of "foo": ', i call doabort end if i = stat ('foo', statb) if (i.ne.0) then write (6,*) '***STAT of "foo": ', i call doabort end if write (6,*) ' with stat array ', statb if (statb(6) .ne. getgid ()) then write (6,*) 'Note: FSTAT gid wrong (happens on some systems).' end if if (statb(5) .ne. getuid () .or. statb(4) .ne. 1) then write (6,*) '*** FSTAT uid or nlink is wrong' call doabort end if do i=1,13 if (fstatb (i) .ne. statb (i)) then write (6,*) '*** FSTAT and STAT don''t agree on '// ' + array element ', i, ' value ', fstatb (i), statb (i) call abort end if end do i = lstat ('foo', fstatb) do i=1,13 if (fstatb (i) .ne. statb (i)) then write (6,*) '*** LSTAT and STAT don''t agree on '// + 'array element ', i, ' value ', fstatb (i), statb (i) call abort end if end do C in case it exists already: call unlink ('bar',i) call link ('foo ', 'bar ',i) if (i.ne.0) then write (6,*) '***LINK "foo" to "bar" failed: ', i call doabort end if call unlink ('foo',i) if (i.ne.0) then write (6,*) '***UNLINK "foo" failed: ', i call doabort end if call unlink ('foo',i) if (i.eq.0) then write (6,*) '***UNLINK "foo" again: ', i call doabort end if call gerror (gerr) i = ierrno() write (6,'(A,I3,A/1X,A)') ' The current error number is: ', + i, + ' and the corresponding message is:', gerr(:lenstr(gerr)) write (6,*) 'This is sent to stderr prefixed by the program name' call getarg (0, line) call perror (line (:lenstr (line))) call unlink ('bar') print *, 'MCLOCK returns ', mclock () print *, 'MCLOCK8 returns ', mclock8 () call cpu_time (d1) print *, 'CPU_TIME returns ', d1 C WRITE (6,*) 'You should see exit status 1' CALL EXIT(0) 99 END * Return length of STR not including trailing blanks, but always > 0. integer function lenstr (str) character*(*) str if (str.eq.' ') then lenstr=1 else lenstr = lnblnk (str) end if end * Just make sure SECOND() doesn't "magically" work the second time. subroutine dumdum(r) r = 3.14159 end * Test whether sum is approximately left+right. logical function issum (sum, left, right) implicit none real sum, left, right real mysum, delta, width mysum = left + right delta = abs (mysum - sum) width = abs (left) + abs (right) issum = (delta .le. .0001 * width) end * Signal handler subroutine ctrlc print *, 'Got ^C' call doabort end * A problem has been noticed, so maybe abort the test. subroutine doabort * For this version, call the ABORT intrinsic. intrinsic abort call abort end * Testsuite version only. * Don't actually reference the HOSTNM intrinsic, because some targets * need -lsocket, which we don't have a mechanism for supplying. integer function hostnm(nm) character*(*) nm nm = 'not determined by this version of u77-test.f' hostnm = 0 end
gpl-2.0
gerrit507/Emby
RSSDP/DeviceEventArgs.cs
1125
using System; using System.Collections.Generic; using System.Text; namespace Rssdp { /// <summary> /// Event arguments for the <see cref="SsdpDevice.DeviceAdded"/> and <see cref="SsdpDevice.DeviceRemoved"/> events. /// </summary> public sealed class DeviceEventArgs : EventArgs { #region Fields private readonly SsdpDevice _Device; #endregion #region Constructors /// <summary> /// Constructs a new instance for the specified <see cref="SsdpDevice"/>. /// </summary> /// <param name="device">The <see cref="SsdpDevice"/> associated with the event this argument class is being used for.</param> /// <exception cref="System.ArgumentNullException">Thrown if the <paramref name="device"/> argument is null.</exception> public DeviceEventArgs(SsdpDevice device) { if (device == null) throw new ArgumentNullException("device"); _Device = device; } #endregion #region Public Properties /// <summary> /// Returns the <see cref="SsdpDevice"/> instance the event being raised for. /// </summary> public SsdpDevice Device { get { return _Device; } } #endregion } }
gpl-2.0
Gurgel100/gcc
gcc/testsuite/g++.dg/analyzer/pr93899.C
67
// { dg-do compile { target c++11 } } #include "../abi/mangle55.C"
gpl-2.0
tobiasbuhrer/tobiasb
web/core/tests/Drupal/KernelTests/Core/Datetime/DateFormatterTest.php
6727
<?php namespace Drupal\KernelTests\Core\Datetime; use Drupal\KernelTests\KernelTestBase; use Drupal\language\Entity\ConfigurableLanguage; /** * Tests date formatting. * * @group Common * @coversDefaultClass \Drupal\Core\Datetime\DateFormatter */ class DateFormatterTest extends KernelTestBase { /** * {@inheritdoc} */ protected static $modules = ['language', 'system']; /** * Arbitrary langcode for a custom language. */ const LANGCODE = 'xx'; /** * {@inheritdoc} */ protected function setUp(): void { parent::setUp(); $this->installConfig(['system']); $this->setSetting('locale_custom_strings_' . self::LANGCODE, [ '' => ['Sunday' => 'domingo'], 'Long month name' => ['March' => 'marzo'], ]); $formats = $this->container->get('entity_type.manager') ->getStorage('date_format') ->loadMultiple(['long', 'medium', 'short']); $formats['long']->setPattern('l, j. F Y - G:i')->save(); $formats['medium']->setPattern('j. F Y - G:i')->save(); $formats['short']->setPattern('Y M j - g:ia')->save(); ConfigurableLanguage::createFromLangcode(static::LANGCODE)->save(); } /** * Tests DateFormatter::format(). * * @covers ::format */ public function testFormat() { /** @var \Drupal\Core\Datetime\DateFormatterInterface $formatter */ $formatter = $this->container->get('date.formatter'); /** @var \Drupal\Core\Language\LanguageManagerInterface $language_manager */ $language_manager = $this->container->get('language_manager'); $timestamp = strtotime('2007-03-26T00:00:00+00:00'); $this->assertSame('Sunday, 25-Mar-07 17:00:00 PDT', $formatter->format($timestamp, 'custom', 'l, d-M-y H:i:s T', 'America/Los_Angeles', 'en'), 'Test all parameters.'); $this->assertSame('domingo, 25-Mar-07 17:00:00 PDT', $formatter->format($timestamp, 'custom', 'l, d-M-y H:i:s T', 'America/Los_Angeles', self::LANGCODE), 'Test translated format.'); $this->assertSame('l, 25-Mar-07 17:00:00 PDT', $formatter->format($timestamp, 'custom', '\\l, d-M-y H:i:s T', 'America/Los_Angeles', self::LANGCODE), 'Test an escaped format string.'); $this->assertSame('\\domingo, 25-Mar-07 17:00:00 PDT', $formatter->format($timestamp, 'custom', '\\\\l, d-M-y H:i:s T', 'America/Los_Angeles', self::LANGCODE), 'Test format containing backslash character.'); $this->assertSame('\\l, 25-Mar-07 17:00:00 PDT', $formatter->format($timestamp, 'custom', '\\\\\\l, d-M-y H:i:s T', 'America/Los_Angeles', self::LANGCODE), 'Test format containing backslash followed by escaped format string.'); $this->assertSame('Monday, 26-Mar-07 01:00:00 BST', $formatter->format($timestamp, 'custom', 'l, d-M-y H:i:s T', 'Europe/London', 'en'), 'Test a different time zone.'); $this->assertSame('Thu, 01/01/1970 - 00:00', $formatter->format(0, 'custom', '', 'UTC', 'en'), 'Test custom format with empty string.'); // Make sure we didn't change the configuration override language. $this->assertSame('en', $language_manager->getConfigOverrideLanguage()->getId(), 'Configuration override language not disturbed,'); // Test bad format string will use the fallback format. $this->assertSame($formatter->format($timestamp, 'fallback'), $formatter->format($timestamp, 'bad_format_string'), 'Test fallback format.'); $this->assertSame('en', $language_manager->getConfigOverrideLanguage()->getId(), 'Configuration override language not disturbed,'); // Change the default language and timezone. $this->config('system.site')->set('default_langcode', static::LANGCODE)->save(); date_default_timezone_set('America/Los_Angeles'); // Reset the language manager so new negotiations attempts will fall back on // on the new language. $language_manager->reset(); $this->assertSame('en', $language_manager->getConfigOverrideLanguage()->getId(), 'Configuration override language not disturbed,'); $this->assertSame('Sunday, 25-Mar-07 17:00:00 PDT', $formatter->format($timestamp, 'custom', 'l, d-M-y H:i:s T', 'America/Los_Angeles', 'en'), 'Test a different language.'); $this->assertSame('Monday, 26-Mar-07 01:00:00 BST', $formatter->format($timestamp, 'custom', 'l, d-M-y H:i:s T', 'Europe/London'), 'Test a different time zone.'); $this->assertSame('domingo, 25-Mar-07 17:00:00 PDT', $formatter->format($timestamp, 'custom', 'l, d-M-y H:i:s T'), 'Test custom date format.'); $this->assertSame('domingo, 25. marzo 2007 - 17:00', $formatter->format($timestamp, 'long'), 'Test long date format.'); $this->assertSame('25. marzo 2007 - 17:00', $formatter->format($timestamp, 'medium'), 'Test medium date format.'); $this->assertSame('2007 Mar 25 - 5:00pm', $formatter->format($timestamp, 'short'), 'Test short date format.'); $this->assertSame('25. marzo 2007 - 17:00', $formatter->format($timestamp), 'Test default date format.'); // Test HTML time element formats. $this->assertSame('2007-03-25T17:00:00-0700', $formatter->format($timestamp, 'html_datetime'), 'Test html_datetime date format.'); $this->assertSame('2007-03-25', $formatter->format($timestamp, 'html_date'), 'Test html_date date format.'); $this->assertSame('17:00:00', $formatter->format($timestamp, 'html_time'), 'Test html_time date format.'); $this->assertSame('03-25', $formatter->format($timestamp, 'html_yearless_date'), 'Test html_yearless_date date format.'); $this->assertSame('2007-W12', $formatter->format($timestamp, 'html_week'), 'Test html_week date format.'); $this->assertSame('2007-03', $formatter->format($timestamp, 'html_month'), 'Test html_month date format.'); $this->assertSame('2007', $formatter->format($timestamp, 'html_year'), 'Test html_year date format.'); // Make sure we didn't change the configuration override language. $this->assertSame('en', $language_manager->getConfigOverrideLanguage()->getId(), 'Configuration override language not disturbed,'); // Test bad format string will use the fallback format. $this->assertSame($formatter->format($timestamp, 'fallback'), $formatter->format($timestamp, 'bad_format_string'), 'Test fallback format.'); $this->assertSame('en', $language_manager->getConfigOverrideLanguage()->getId(), 'Configuration override language not disturbed,'); // HTML is not escaped by the date formatter, it must be escaped later. $this->assertSame("<script>alert('2007');</script>", $formatter->format($timestamp, 'custom', '\<\s\c\r\i\p\t\>\a\l\e\r\t\(\'Y\'\)\;\<\/\s\c\r\i\p\t\>'), 'Script tags not removed from dates.'); $this->assertSame('<em>2007</em>', $formatter->format($timestamp, 'custom', '\<\e\m\>Y\<\/\e\m\>'), 'Em tags are not removed from dates.'); } }
gpl-2.0
wnsonsa/destin-foot
vendor/nelmio/cors-bundle/Nelmio/CorsBundle/README.md
3211
# NelmioCorsBundle ## About The NelmioCorsBundle allows you to send [Cross-Origin Resource Sharing](http://enable-cors.org/) headers with ACL-style per-url configuration. If you want to have have a global overview of CORS workflow, you can browse this [image](http://www.html5rocks.com/static/images/cors_server_flowchart.png). ## Features * Handles CORS pre-flight OPTIONS requests * Adds CORS headers to your responses ## Configuration The `defaults` are the default values applied to all the `paths` that match, unless overriden in a specific URL configuration. If you want them to apply to everything, you must define a path with `^/`. This example config contains all the possible config values with their default values shown in the `defaults` key. In paths, you see that we allow CORS requests from any origin on `/api/`. One custom header and some HTTP methods are defined as allowed as well. Preflight requests can be cached for 3600 seconds. nelmio_cors: defaults: allow_credentials: false allow_origin: [] allow_headers: [] allow_methods: [] expose_headers: [] max_age: 0 hosts: [] paths: '^/api/': allow_origin: ['*'] allow_headers: ['X-Custom-Auth'] allow_methods: ['POST', 'PUT', 'GET', 'DELETE'] max_age: 3600 '^/': allow_origin: ['*'] allow_headers: ['X-Custom-Auth'] allow_methods: ['POST', 'PUT', 'GET', 'DELETE'] max_age: 3600 hosts: ['^api\.'] `allow_origin` and `allow_headers` can be set to `*` to accept any value, the allowed methods however have to be explicitly listed. `paths` must contain at least one item. > **Note:** If you allow POST methods and have > [http method overriding](http://symfony.com/doc/current/reference/configuration/framework.html#http-method-override) > enabled in the framework, it will enable the API users to perform PUT and DELETE > requests as well. ## Installation (Symfony 2.2+) Require the `nelmio/cors-bundle` package in your composer.json and update your dependencies. $ composer require nelmio/cors-bundle:~1.0 Add the NelmioCorsBundle to your application's kernel: public function registerBundles() { $bundles = array( ... new Nelmio\CorsBundle\NelmioCorsBundle(), ... ); ... } ## Installation (Symfony 2.0) Put the NelmioCorsBundle into the `vendor/bundles/Nelmio` directory: $ git clone git://github.com/nelmio/NelmioCorsBundle.git vendor/bundles/Nelmio/CorsBundle Register the `Nelmio` namespace in your project's autoload script (app/autoload.php): $loader->registerNamespaces(array( 'Nelmio' => __DIR__.'/../vendor/bundles', )); Add the NelmioCorsBundle to your application's kernel: public function registerBundles() { $bundles = array( ... new Nelmio\CorsBundle\NelmioCorsBundle(), ... ); ... } ## License Released under the MIT License, see LICENSE.
gpl-2.0
HarveyHunt/linux
mm/hugetlb.c
132730
/* * Generic hugetlb support. * (C) Nadia Yvette Chambers, April 2004 */ #include <linux/list.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/sysctl.h> #include <linux/highmem.h> #include <linux/mmu_notifier.h> #include <linux/nodemask.h> #include <linux/pagemap.h> #include <linux/mempolicy.h> #include <linux/compiler.h> #include <linux/cpuset.h> #include <linux/mutex.h> #include <linux/bootmem.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <linux/rmap.h> #include <linux/string_helpers.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/jhash.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlb.h> #include <linux/io.h> #include <linux/hugetlb.h> #include <linux/hugetlb_cgroup.h> #include <linux/node.h> #include <linux/userfaultfd_k.h> #include <linux/page_owner.h> #include "internal.h" int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; struct hstate hstates[HUGE_MAX_HSTATE]; /* * Minimum page order among possible hugepage sizes, set to a proper value * at boot time. */ static unsigned int minimum_order __read_mostly = UINT_MAX; __initdata LIST_HEAD(huge_boot_pages); /* for command line parsing */ static struct hstate * __initdata parsed_hstate; static unsigned long __initdata default_hstate_max_huge_pages; static unsigned long __initdata default_hstate_size; static bool __initdata parsed_valid_hugepagesz = true; /* * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, * free_huge_pages, and surplus_huge_pages. */ DEFINE_SPINLOCK(hugetlb_lock); /* * Serializes faults on the same logical page. This is used to * prevent spurious OOMs when the hugepage pool is fully utilized. */ static int num_fault_mutexes; struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; /* Forward declaration */ static int hugetlb_acct_memory(struct hstate *h, long delta); static inline void unlock_or_release_subpool(struct hugepage_subpool *spool) { bool free = (spool->count == 0) && (spool->used_hpages == 0); spin_unlock(&spool->lock); /* If no pages are used, and no other handles to the subpool * remain, give up any reservations mased on minimum size and * free the subpool */ if (free) { if (spool->min_hpages != -1) hugetlb_acct_memory(spool->hstate, -spool->min_hpages); kfree(spool); } } struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, long min_hpages) { struct hugepage_subpool *spool; spool = kzalloc(sizeof(*spool), GFP_KERNEL); if (!spool) return NULL; spin_lock_init(&spool->lock); spool->count = 1; spool->max_hpages = max_hpages; spool->hstate = h; spool->min_hpages = min_hpages; if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { kfree(spool); return NULL; } spool->rsv_hpages = min_hpages; return spool; } void hugepage_put_subpool(struct hugepage_subpool *spool) { spin_lock(&spool->lock); BUG_ON(!spool->count); spool->count--; unlock_or_release_subpool(spool); } /* * Subpool accounting for allocating and reserving pages. * Return -ENOMEM if there are not enough resources to satisfy the * the request. Otherwise, return the number of pages by which the * global pools must be adjusted (upward). The returned value may * only be different than the passed value (delta) in the case where * a subpool minimum size must be manitained. */ static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; if (!spool) return ret; spin_lock(&spool->lock); if (spool->max_hpages != -1) { /* maximum size accounting */ if ((spool->used_hpages + delta) <= spool->max_hpages) spool->used_hpages += delta; else { ret = -ENOMEM; goto unlock_ret; } } /* minimum size accounting */ if (spool->min_hpages != -1 && spool->rsv_hpages) { if (delta > spool->rsv_hpages) { /* * Asking for more reserves than those already taken on * behalf of subpool. Return difference. */ ret = delta - spool->rsv_hpages; spool->rsv_hpages = 0; } else { ret = 0; /* reserves already accounted for */ spool->rsv_hpages -= delta; } } unlock_ret: spin_unlock(&spool->lock); return ret; } /* * Subpool accounting for freeing and unreserving pages. * Return the number of global page reservations that must be dropped. * The return value may only be different than the passed value (delta) * in the case where a subpool minimum size must be maintained. */ static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, long delta) { long ret = delta; if (!spool) return delta; spin_lock(&spool->lock); if (spool->max_hpages != -1) /* maximum size accounting */ spool->used_hpages -= delta; /* minimum size accounting */ if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { if (spool->rsv_hpages + delta <= spool->min_hpages) ret = 0; else ret = spool->rsv_hpages + delta - spool->min_hpages; spool->rsv_hpages += delta; if (spool->rsv_hpages > spool->min_hpages) spool->rsv_hpages = spool->min_hpages; } /* * If hugetlbfs_put_super couldn't free spool due to an outstanding * quota reference, free it now. */ unlock_or_release_subpool(spool); return ret; } static inline struct hugepage_subpool *subpool_inode(struct inode *inode) { return HUGETLBFS_SB(inode->i_sb)->spool; } static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) { return subpool_inode(file_inode(vma->vm_file)); } /* * Region tracking -- allows tracking of reservations and instantiated pages * across the pages in a mapping. * * The region data structures are embedded into a resv_map and protected * by a resv_map's lock. The set of regions within the resv_map represent * reservations for huge pages, or huge pages that have already been * instantiated within the map. The from and to elements are huge page * indicies into the associated mapping. from indicates the starting index * of the region. to represents the first index past the end of the region. * * For example, a file region structure with from == 0 and to == 4 represents * four huge pages in a mapping. It is important to note that the to element * represents the first element past the end of the region. This is used in * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. * * Interval notation of the form [from, to) will be used to indicate that * the endpoint from is inclusive and to is exclusive. */ struct file_region { struct list_head link; long from; long to; }; /* * Add the huge page range represented by [f, t) to the reserve * map. In the normal case, existing regions will be expanded * to accommodate the specified range. Sufficient regions should * exist for expansion due to the previous call to region_chg * with the same range. However, it is possible that region_del * could have been called after region_chg and modifed the map * in such a way that no region exists to be expanded. In this * case, pull a region descriptor from the cache associated with * the map and use that for the new range. * * Return the number of new huge pages added to the map. This * number is greater than or equal to zero. */ static long region_add(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *nrg, *trg; long add = 0; spin_lock(&resv->lock); /* Locate the region we are either in or before. */ list_for_each_entry(rg, head, link) if (f <= rg->to) break; /* * If no region exists which can be expanded to include the * specified range, the list must have been modified by an * interleving call to region_del(). Pull a region descriptor * from the cache and use it for this range. */ if (&rg->link == head || t < rg->from) { VM_BUG_ON(resv->region_cache_count <= 0); resv->region_cache_count--; nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); nrg->from = f; nrg->to = t; list_add(&nrg->link, rg->link.prev); add += t - f; goto out_locked; } /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; /* Check for and consume any regions we now overlap with. */ nrg = rg; list_for_each_entry_safe(rg, trg, rg->link.prev, link) { if (&rg->link == head) break; if (rg->from > t) break; /* If this area reaches higher then extend our area to * include it completely. If this is not the first area * which we intend to reuse, free it. */ if (rg->to > t) t = rg->to; if (rg != nrg) { /* Decrement return value by the deleted range. * Another range will span this area so that by * end of routine add will be >= zero */ add -= (rg->to - rg->from); list_del(&rg->link); kfree(rg); } } add += (nrg->from - f); /* Added to beginning of region */ nrg->from = f; add += t - nrg->to; /* Added to end of region */ nrg->to = t; out_locked: resv->adds_in_progress--; spin_unlock(&resv->lock); VM_BUG_ON(add < 0); return add; } /* * Examine the existing reserve map and determine how many * huge pages in the specified range [f, t) are NOT currently * represented. This routine is called before a subsequent * call to region_add that will actually modify the reserve * map to add the specified range [f, t). region_chg does * not change the number of huge pages represented by the * map. However, if the existing regions in the map can not * be expanded to represent the new range, a new file_region * structure is added to the map as a placeholder. This is * so that the subsequent region_add call will have all the * regions it needs and will not fail. * * Upon entry, region_chg will also examine the cache of region descriptors * associated with the map. If there are not enough descriptors cached, one * will be allocated for the in progress add operation. * * Returns the number of huge pages that need to be added to the existing * reservation map for the range [f, t). This number is greater or equal to * zero. -ENOMEM is returned if a new file_region structure or cache entry * is needed and can not be allocated. */ static long region_chg(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *nrg = NULL; long chg = 0; retry: spin_lock(&resv->lock); retry_locked: resv->adds_in_progress++; /* * Check for sufficient descriptors in the cache to accommodate * the number of in progress add operations. */ if (resv->adds_in_progress > resv->region_cache_count) { struct file_region *trg; VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1); /* Must drop lock to allocate a new descriptor. */ resv->adds_in_progress--; spin_unlock(&resv->lock); trg = kmalloc(sizeof(*trg), GFP_KERNEL); if (!trg) { kfree(nrg); return -ENOMEM; } spin_lock(&resv->lock); list_add(&trg->link, &resv->region_cache); resv->region_cache_count++; goto retry_locked; } /* Locate the region we are before or in. */ list_for_each_entry(rg, head, link) if (f <= rg->to) break; /* If we are below the current region then a new region is required. * Subtle, allocate a new region at the position but make it zero * size such that we can guarantee to record the reservation. */ if (&rg->link == head || t < rg->from) { if (!nrg) { resv->adds_in_progress--; spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; nrg->from = f; nrg->to = f; INIT_LIST_HEAD(&nrg->link); goto retry; } list_add(&nrg->link, rg->link.prev); chg = t - f; goto out_nrg; } /* Round our left edge to the current segment if it encloses us. */ if (f > rg->from) f = rg->from; chg = t - f; /* Check for and consume any regions we now overlap with. */ list_for_each_entry(rg, rg->link.prev, link) { if (&rg->link == head) break; if (rg->from > t) goto out; /* We overlap with this area, if it extends further than * us then we must extend ourselves. Account for its * existing reservation. */ if (rg->to > t) { chg += rg->to - t; t = rg->to; } chg -= rg->to - rg->from; } out: spin_unlock(&resv->lock); /* We already know we raced and no longer need the new region */ kfree(nrg); return chg; out_nrg: spin_unlock(&resv->lock); return chg; } /* * Abort the in progress add operation. The adds_in_progress field * of the resv_map keeps track of the operations in progress between * calls to region_chg and region_add. Operations are sometimes * aborted after the call to region_chg. In such cases, region_abort * is called to decrement the adds_in_progress counter. * * NOTE: The range arguments [f, t) are not needed or used in this * routine. They are kept to make reading the calling code easier as * arguments will match the associated region_chg call. */ static void region_abort(struct resv_map *resv, long f, long t) { spin_lock(&resv->lock); VM_BUG_ON(!resv->region_cache_count); resv->adds_in_progress--; spin_unlock(&resv->lock); } /* * Delete the specified range [f, t) from the reserve map. If the * t parameter is LONG_MAX, this indicates that ALL regions after f * should be deleted. Locate the regions which intersect [f, t) * and either trim, delete or split the existing regions. * * Returns the number of huge pages deleted from the reserve map. * In the normal case, the return value is zero or more. In the * case where a region must be split, a new region descriptor must * be allocated. If the allocation fails, -ENOMEM will be returned. * NOTE: If the parameter t == LONG_MAX, then we will never split * a region and possibly return -ENOMEM. Callers specifying * t == LONG_MAX do not need to check for -ENOMEM error. */ static long region_del(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg, *trg; struct file_region *nrg = NULL; long del = 0; retry: spin_lock(&resv->lock); list_for_each_entry_safe(rg, trg, head, link) { /* * Skip regions before the range to be deleted. file_region * ranges are normally of the form [from, to). However, there * may be a "placeholder" entry in the map which is of the form * (from, to) with from == to. Check for placeholder entries * at the beginning of the range to be deleted. */ if (rg->to <= f && (rg->to != rg->from || rg->to != f)) continue; if (rg->from >= t) break; if (f > rg->from && t < rg->to) { /* Must split region */ /* * Check for an entry in the cache before dropping * lock and attempting allocation. */ if (!nrg && resv->region_cache_count > resv->adds_in_progress) { nrg = list_first_entry(&resv->region_cache, struct file_region, link); list_del(&nrg->link); resv->region_cache_count--; } if (!nrg) { spin_unlock(&resv->lock); nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); if (!nrg) return -ENOMEM; goto retry; } del += t - f; /* New entry for end of split region */ nrg->from = t; nrg->to = rg->to; INIT_LIST_HEAD(&nrg->link); /* Original entry is trimmed */ rg->to = f; list_add(&nrg->link, &rg->link); nrg = NULL; break; } if (f <= rg->from && t >= rg->to) { /* Remove entire region */ del += rg->to - rg->from; list_del(&rg->link); kfree(rg); continue; } if (f <= rg->from) { /* Trim beginning of region */ del += t - rg->from; rg->from = t; } else { /* Trim end of region */ del += rg->to - f; rg->to = f; } } spin_unlock(&resv->lock); kfree(nrg); return del; } /* * A rare out of memory error was encountered which prevented removal of * the reserve map region for a page. The huge page itself was free'ed * and removed from the page cache. This routine will adjust the subpool * usage count, and the global reserve count if needed. By incrementing * these counts, the reserve map entry which could not be deleted will * appear as a "reserved" entry instead of simply dangling with incorrect * counts. */ void hugetlb_fix_reserve_counts(struct inode *inode) { struct hugepage_subpool *spool = subpool_inode(inode); long rsv_adjust; rsv_adjust = hugepage_subpool_get_pages(spool, 1); if (rsv_adjust) { struct hstate *h = hstate_inode(inode); hugetlb_acct_memory(h, 1); } } /* * Count and return the number of huge pages in the reserve map * that intersect with the range [f, t). */ static long region_count(struct resv_map *resv, long f, long t) { struct list_head *head = &resv->regions; struct file_region *rg; long chg = 0; spin_lock(&resv->lock); /* Locate each segment we overlap with, and count that overlap. */ list_for_each_entry(rg, head, link) { long seg_from; long seg_to; if (rg->to <= f) continue; if (rg->from >= t) break; seg_from = max(rg->from, f); seg_to = min(rg->to, t); chg += seg_to - seg_from; } spin_unlock(&resv->lock); return chg; } /* * Convert the address within this vma to the page offset within * the mapping, in pagecache page units; huge pages here. */ static pgoff_t vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { return ((address - vma->vm_start) >> huge_page_shift(h)) + (vma->vm_pgoff >> huge_page_order(h)); } pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address) { return vma_hugecache_offset(hstate_vma(vma), vma, address); } EXPORT_SYMBOL_GPL(linear_hugepage_index); /* * Return the size of the pages allocated when backing a VMA. In the majority * cases this will be same size as used by the page table entries. */ unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) { struct hstate *hstate; if (!is_vm_hugetlb_page(vma)) return PAGE_SIZE; hstate = hstate_vma(vma); return 1UL << huge_page_shift(hstate); } EXPORT_SYMBOL_GPL(vma_kernel_pagesize); /* * Return the page size being used by the MMU to back a VMA. In the majority * of cases, the page size used by the kernel matches the MMU size. On * architectures where it differs, an architecture-specific version of this * function is required. */ #ifndef vma_mmu_pagesize unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) { return vma_kernel_pagesize(vma); } #endif /* * Flags for MAP_PRIVATE reservations. These are stored in the bottom * bits of the reservation map pointer, which are always clear due to * alignment. */ #define HPAGE_RESV_OWNER (1UL << 0) #define HPAGE_RESV_UNMAPPED (1UL << 1) #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) /* * These helpers are used to track how many pages are reserved for * faults in a MAP_PRIVATE mapping. Only the process that called mmap() * is guaranteed to have their future faults succeed. * * With the exception of reset_vma_resv_huge_pages() which is called at fork(), * the reserve counters are updated with the hugetlb_lock held. It is safe * to reset the VMA at fork() time as it is not in use yet and there is no * chance of the global counters getting corrupted as a result of the values. * * The private mapping reservation is represented in a subtly different * manner to a shared mapping. A shared mapping has a region map associated * with the underlying file, this region map represents the backing file * pages which have ever had a reservation assigned which this persists even * after the page is instantiated. A private mapping has a region map * associated with the original mmap which is attached to all VMAs which * reference it, this region map represents those offsets which have consumed * reservation ie. where pages have been instantiated. */ static unsigned long get_vma_private_data(struct vm_area_struct *vma) { return (unsigned long)vma->vm_private_data; } static void set_vma_private_data(struct vm_area_struct *vma, unsigned long value) { vma->vm_private_data = (void *)value; } struct resv_map *resv_map_alloc(void) { struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); if (!resv_map || !rg) { kfree(resv_map); kfree(rg); return NULL; } kref_init(&resv_map->refs); spin_lock_init(&resv_map->lock); INIT_LIST_HEAD(&resv_map->regions); resv_map->adds_in_progress = 0; INIT_LIST_HEAD(&resv_map->region_cache); list_add(&rg->link, &resv_map->region_cache); resv_map->region_cache_count = 1; return resv_map; } void resv_map_release(struct kref *ref) { struct resv_map *resv_map = container_of(ref, struct resv_map, refs); struct list_head *head = &resv_map->region_cache; struct file_region *rg, *trg; /* Clear out any active regions before we release the map. */ region_del(resv_map, 0, LONG_MAX); /* ... and any entries left in the cache */ list_for_each_entry_safe(rg, trg, head, link) { list_del(&rg->link); kfree(rg); } VM_BUG_ON(resv_map->adds_in_progress); kfree(resv_map); } static inline struct resv_map *inode_resv_map(struct inode *inode) { return inode->i_mapping->private_data; } static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (vma->vm_flags & VM_MAYSHARE) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; return inode_resv_map(inode); } else { return (struct resv_map *)(get_vma_private_data(vma) & ~HPAGE_RESV_MASK); } } static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, (get_vma_private_data(vma) & HPAGE_RESV_MASK) | (unsigned long)map); } static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, get_vma_private_data(vma) | flags); } static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); return (get_vma_private_data(vma) & flag) != 0; } /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (!(vma->vm_flags & VM_MAYSHARE)) vma->vm_private_data = (void *)0; } /* Returns true if the VMA has associated reserve pages */ static bool vma_has_reserves(struct vm_area_struct *vma, long chg) { if (vma->vm_flags & VM_NORESERVE) { /* * This address is already reserved by other process(chg == 0), * so, we should decrement reserved count. Without decrementing, * reserve count remains after releasing inode, because this * allocated page will go into page cache and is regarded as * coming from reserved pool in releasing step. Currently, we * don't have any other solution to deal with this situation * properly, so add work-around here. */ if (vma->vm_flags & VM_MAYSHARE && chg == 0) return true; else return false; } /* Shared mappings always use reserves */ if (vma->vm_flags & VM_MAYSHARE) { /* * We know VM_NORESERVE is not set. Therefore, there SHOULD * be a region map for all pages. The only situation where * there is no region map is if a hole was punched via * fallocate. In this case, there really are no reverves to * use. This situation is indicated if chg != 0. */ if (chg) return false; else return true; } /* * Only the process that called mmap() has reserves for * private mappings. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { /* * Like the shared case above, a hole punch or truncate * could have been performed on the private mapping. * Examine the value of chg to determine if reserves * actually exist or were previously consumed. * Very Subtle - The value of chg comes from a previous * call to vma_needs_reserves(). The reserve map for * private mappings has different (opposite) semantics * than that of shared mappings. vma_needs_reserves() * has already taken this difference in semantics into * account. Therefore, the meaning of chg is the same * as in the shared case above. Code could easily be * combined, but keeping it separate draws attention to * subtle differences. */ if (chg) return false; else return true; } return false; } static void enqueue_huge_page(struct hstate *h, struct page *page) { int nid = page_to_nid(page); list_move(&page->lru, &h->hugepage_freelists[nid]); h->free_huge_pages++; h->free_huge_pages_node[nid]++; } static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) { struct page *page; list_for_each_entry(page, &h->hugepage_freelists[nid], lru) if (!PageHWPoison(page)) break; /* * if 'non-isolated free hugepage' not found on the list, * the allocation fails. */ if (&h->hugepage_freelists[nid] == &page->lru) return NULL; list_move(&page->lru, &h->hugepage_activelist); set_page_refcounted(page); h->free_huge_pages--; h->free_huge_pages_node[nid]--; return page; } static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { unsigned int cpuset_mems_cookie; struct zonelist *zonelist; struct zone *zone; struct zoneref *z; int node = -1; zonelist = node_zonelist(nid, gfp_mask); retry_cpuset: cpuset_mems_cookie = read_mems_allowed_begin(); for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { struct page *page; if (!cpuset_zone_allowed(zone, gfp_mask)) continue; /* * no need to ask again on the same node. Pool is node rather than * zone aware */ if (zone_to_nid(zone) == node) continue; node = zone_to_nid(zone); page = dequeue_huge_page_node_exact(h, node); if (page) return page; } if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; return NULL; } /* Movability of hugepages depends on migration support. */ static inline gfp_t htlb_alloc_mask(struct hstate *h) { if (hugepage_migration_supported(h)) return GFP_HIGHUSER_MOVABLE; else return GFP_HIGHUSER; } static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) { struct page *page; struct mempolicy *mpol; gfp_t gfp_mask; nodemask_t *nodemask; int nid; /* * A child process with MAP_PRIVATE mappings created by their parent * have no page reserves. This check ensures that reservations are * not "stolen". The child may still get SIGKILLed */ if (!vma_has_reserves(vma, chg) && h->free_huge_pages - h->resv_huge_pages == 0) goto err; /* If reserves cannot be used, ensure enough pages are in the pool */ if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) goto err; gfp_mask = htlb_alloc_mask(h); nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { SetPagePrivate(page); h->resv_huge_pages--; } mpol_cond_put(mpol); return page; err: return NULL; } /* * common helper functions for hstate_next_node_to_{alloc|free}. * We may have allocated or freed a huge page based on a different * nodes_allowed previously, so h->next_node_to_{alloc|free} might * be outside of *nodes_allowed. Ensure that we use an allowed * node for alloc or free. */ static int next_node_allowed(int nid, nodemask_t *nodes_allowed) { nid = next_node_in(nid, *nodes_allowed); VM_BUG_ON(nid >= MAX_NUMNODES); return nid; } static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) { if (!node_isset(nid, *nodes_allowed)) nid = next_node_allowed(nid, nodes_allowed); return nid; } /* * returns the previously saved node ["this node"] from which to * allocate a persistent huge page for the pool and advance the * next node from which to allocate, handling wrap at end of node * mask. */ static int hstate_next_node_to_alloc(struct hstate *h, nodemask_t *nodes_allowed) { int nid; VM_BUG_ON(!nodes_allowed); nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); return nid; } /* * helper for free_pool_huge_page() - return the previously saved * node ["this node"] from which to free a huge page. Advance the * next node id whether or not we find a free huge page to free so * that the next attempt to free addresses the next node. */ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) { int nid; VM_BUG_ON(!nodes_allowed); nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); return nid; } #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ for (nr_nodes = nodes_weight(*mask); \ nr_nodes > 0 && \ ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ nr_nodes--) #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ for (nr_nodes = nodes_weight(*mask); \ nr_nodes > 0 && \ ((node = hstate_next_node_to_free(hs, mask)) || 1); \ nr_nodes--) #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE static void destroy_compound_gigantic_page(struct page *page, unsigned int order) { int i; int nr_pages = 1 << order; struct page *p = page + 1; atomic_set(compound_mapcount_ptr(page), 0); for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { clear_compound_head(p); set_page_refcounted(p); } set_compound_order(page, 0); __ClearPageHead(page); } static void free_gigantic_page(struct page *page, unsigned int order) { free_contig_range(page_to_pfn(page), 1 << order); } static int __alloc_gigantic_page(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) { unsigned long end_pfn = start_pfn + nr_pages; return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, gfp_mask); } static bool pfn_range_valid_gigantic(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) { unsigned long i, end_pfn = start_pfn + nr_pages; struct page *page; for (i = start_pfn; i < end_pfn; i++) { if (!pfn_valid(i)) return false; page = pfn_to_page(i); if (page_zone(page) != z) return false; if (PageReserved(page)) return false; if (page_count(page) > 0) return false; if (PageHuge(page)) return false; } return true; } static bool zone_spans_last_pfn(const struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) { unsigned long last_pfn = start_pfn + nr_pages - 1; return zone_spans_pfn(zone, last_pfn); } static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { unsigned int order = huge_page_order(h); unsigned long nr_pages = 1 << order; unsigned long ret, pfn, flags; struct zonelist *zonelist; struct zone *zone; struct zoneref *z; zonelist = node_zonelist(nid, gfp_mask); for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) { spin_lock_irqsave(&zone->lock, flags); pfn = ALIGN(zone->zone_start_pfn, nr_pages); while (zone_spans_last_pfn(zone, pfn, nr_pages)) { if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) { /* * We release the zone lock here because * alloc_contig_range() will also lock the zone * at some point. If there's an allocation * spinning on this lock, it may win the race * and cause alloc_contig_range() to fail... */ spin_unlock_irqrestore(&zone->lock, flags); ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask); if (!ret) return pfn_to_page(pfn); spin_lock_irqsave(&zone->lock, flags); } pfn += nr_pages; } spin_unlock_irqrestore(&zone->lock, flags); } return NULL; } static void prep_new_huge_page(struct hstate *h, struct page *page, int nid); static void prep_compound_gigantic_page(struct page *page, unsigned int order); #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ static inline bool gigantic_page_supported(void) { return false; } static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nodemask) { return NULL; } static inline void free_gigantic_page(struct page *page, unsigned int order) { } static inline void destroy_compound_gigantic_page(struct page *page, unsigned int order) { } #endif static void update_and_free_page(struct hstate *h, struct page *page) { int i; if (hstate_is_gigantic(h) && !gigantic_page_supported()) return; h->nr_huge_pages--; h->nr_huge_pages_node[page_to_nid(page)]--; for (i = 0; i < pages_per_huge_page(h); i++) { page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 1 << PG_dirty | 1 << PG_active | 1 << PG_private | 1 << PG_writeback); } VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); set_compound_page_dtor(page, NULL_COMPOUND_DTOR); set_page_refcounted(page); if (hstate_is_gigantic(h)) { destroy_compound_gigantic_page(page, huge_page_order(h)); free_gigantic_page(page, huge_page_order(h)); } else { __free_pages(page, huge_page_order(h)); } } struct hstate *size_to_hstate(unsigned long size) { struct hstate *h; for_each_hstate(h) { if (huge_page_size(h) == size) return h; } return NULL; } /* * Test to determine whether the hugepage is "active/in-use" (i.e. being linked * to hstate->hugepage_activelist.) * * This function can be called for tail pages, but never returns true for them. */ bool page_huge_active(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); return PageHead(page) && PagePrivate(&page[1]); } /* never called for tail page */ static void set_page_huge_active(struct page *page) { VM_BUG_ON_PAGE(!PageHeadHuge(page), page); SetPagePrivate(&page[1]); } static void clear_page_huge_active(struct page *page) { VM_BUG_ON_PAGE(!PageHeadHuge(page), page); ClearPagePrivate(&page[1]); } /* * Internal hugetlb specific page flag. Do not use outside of the hugetlb * code */ static inline bool PageHugeTemporary(struct page *page) { if (!PageHuge(page)) return false; return (unsigned long)page[2].mapping == -1U; } static inline void SetPageHugeTemporary(struct page *page) { page[2].mapping = (void *)-1U; } static inline void ClearPageHugeTemporary(struct page *page) { page[2].mapping = NULL; } void free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the * compound page destructor. */ struct hstate *h = page_hstate(page); int nid = page_to_nid(page); struct hugepage_subpool *spool = (struct hugepage_subpool *)page_private(page); bool restore_reserve; set_page_private(page, 0); page->mapping = NULL; VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(page_mapcount(page), page); restore_reserve = PagePrivate(page); ClearPagePrivate(page); /* * A return code of zero implies that the subpool will be under its * minimum size if the reservation is not restored after page is free. * Therefore, force restore_reserve operation. */ if (hugepage_subpool_put_pages(spool, 1) == 0) restore_reserve = true; spin_lock(&hugetlb_lock); clear_page_huge_active(page); hugetlb_cgroup_uncharge_page(hstate_index(h), pages_per_huge_page(h), page); if (restore_reserve) h->resv_huge_pages++; if (PageHugeTemporary(page)) { list_del(&page->lru); ClearPageHugeTemporary(page); update_and_free_page(h, page); } else if (h->surplus_huge_pages_node[nid]) { /* remove the page from active list */ list_del(&page->lru); update_and_free_page(h, page); h->surplus_huge_pages--; h->surplus_huge_pages_node[nid]--; } else { arch_clear_hugepage_flags(page); enqueue_huge_page(h, page); } spin_unlock(&hugetlb_lock); } static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) { INIT_LIST_HEAD(&page->lru); set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); spin_lock(&hugetlb_lock); set_hugetlb_cgroup(page, NULL); h->nr_huge_pages++; h->nr_huge_pages_node[nid]++; spin_unlock(&hugetlb_lock); } static void prep_compound_gigantic_page(struct page *page, unsigned int order) { int i; int nr_pages = 1 << order; struct page *p = page + 1; /* we rely on prep_new_huge_page to set the destructor */ set_compound_order(page, order); __ClearPageReserved(page); __SetPageHead(page); for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { /* * For gigantic hugepages allocated through bootmem at * boot, it's safer to be consistent with the not-gigantic * hugepages and clear the PG_reserved bit from all tail pages * too. Otherwse drivers using get_user_pages() to access tail * pages may get the reference counting wrong if they see * PG_reserved set on a tail page (despite the head page not * having PG_reserved set). Enforcing this consistency between * head and tail pages allows drivers to optimize away a check * on the head page when they need know if put_page() is needed * after get_user_pages(). */ __ClearPageReserved(p); set_page_count(p, 0); set_compound_head(p, page); } atomic_set(compound_mapcount_ptr(page), -1); } /* * PageHuge() only returns true for hugetlbfs pages, but not for normal or * transparent huge pages. See the PageTransHuge() documentation for more * details. */ int PageHuge(struct page *page) { if (!PageCompound(page)) return 0; page = compound_head(page); return page[1].compound_dtor == HUGETLB_PAGE_DTOR; } EXPORT_SYMBOL_GPL(PageHuge); /* * PageHeadHuge() only returns true for hugetlbfs head page, but not for * normal or transparent huge pages. */ int PageHeadHuge(struct page *page_head) { if (!PageHead(page_head)) return 0; return get_compound_page_dtor(page_head) == free_huge_page; } pgoff_t __basepage_index(struct page *page) { struct page *page_head = compound_head(page); pgoff_t index = page_index(page_head); unsigned long compound_idx; if (!PageHuge(page_head)) return page_index(page); if (compound_order(page_head) >= MAX_ORDER) compound_idx = page_to_pfn(page) - page_to_pfn(page_head); else compound_idx = page - page_head; return (index << compound_order(page_head)) + compound_idx; } static struct page *alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { int order = huge_page_order(h); struct page *page; gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; if (nid == NUMA_NO_NODE) nid = numa_mem_id(); page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask); if (page) __count_vm_event(HTLB_BUDDY_PGALLOC); else __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); return page; } /* * Common helper to allocate a fresh hugetlb page. All specific allocators * should use this function to get new hugetlb pages */ static struct page *alloc_fresh_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page; if (hstate_is_gigantic(h)) page = alloc_gigantic_page(h, gfp_mask, nid, nmask); else page = alloc_buddy_huge_page(h, gfp_mask, nid, nmask); if (!page) return NULL; if (hstate_is_gigantic(h)) prep_compound_gigantic_page(page, huge_page_order(h)); prep_new_huge_page(h, page, page_to_nid(page)); return page; } /* * Allocates a fresh page to the hugetlb allocator pool in the node interleaved * manner. */ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) { struct page *page; int nr_nodes, node; gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed); if (page) break; } if (!page) return 0; put_page(page); /* free it into the hugepage allocator */ return 1; } /* * Free huge page from pool from next node to free. * Attempt to keep persistent huge pages more or less * balanced over allowed nodes. * Called with hugetlb_lock locked. */ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, bool acct_surplus) { int nr_nodes, node; int ret = 0; for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { /* * If we're returning unused surplus pages, only examine * nodes with surplus pages. */ if ((!acct_surplus || h->surplus_huge_pages_node[node]) && !list_empty(&h->hugepage_freelists[node])) { struct page *page = list_entry(h->hugepage_freelists[node].next, struct page, lru); list_del(&page->lru); h->free_huge_pages--; h->free_huge_pages_node[node]--; if (acct_surplus) { h->surplus_huge_pages--; h->surplus_huge_pages_node[node]--; } update_and_free_page(h, page); ret = 1; break; } } return ret; } /* * Dissolve a given free hugepage into free buddy pages. This function does * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the * number of free hugepages would be reduced below the number of reserved * hugepages. */ int dissolve_free_huge_page(struct page *page) { int rc = 0; spin_lock(&hugetlb_lock); if (PageHuge(page) && !page_count(page)) { struct page *head = compound_head(page); struct hstate *h = page_hstate(head); int nid = page_to_nid(head); if (h->free_huge_pages - h->resv_huge_pages == 0) { rc = -EBUSY; goto out; } /* * Move PageHWPoison flag from head page to the raw error page, * which makes any subpages rather than the error page reusable. */ if (PageHWPoison(head) && page != head) { SetPageHWPoison(page); ClearPageHWPoison(head); } list_del(&head->lru); h->free_huge_pages--; h->free_huge_pages_node[nid]--; h->max_huge_pages--; update_and_free_page(h, head); } out: spin_unlock(&hugetlb_lock); return rc; } /* * Dissolve free hugepages in a given pfn range. Used by memory hotplug to * make specified memory blocks removable from the system. * Note that this will dissolve a free gigantic hugepage completely, if any * part of it lies within the given range. * Also note that if dissolve_free_huge_page() returns with an error, all * free hugepages that were dissolved before that error are lost. */ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct page *page; int rc = 0; if (!hugepages_supported()) return rc; for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { page = pfn_to_page(pfn); if (PageHuge(page) && !page_count(page)) { rc = dissolve_free_huge_page(page); if (rc) break; } } return rc; } /* * Allocates a fresh surplus page from the page allocator. */ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page = NULL; if (hstate_is_gigantic(h)) return NULL; spin_lock(&hugetlb_lock); if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) goto out_unlock; spin_unlock(&hugetlb_lock); page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); if (!page) return NULL; spin_lock(&hugetlb_lock); /* * We could have raced with the pool size change. * Double check that and simply deallocate the new page * if we would end up overcommiting the surpluses. Abuse * temporary page to workaround the nasty free_huge_page * codeflow */ if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { SetPageHugeTemporary(page); put_page(page); page = NULL; } else { h->surplus_huge_pages++; h->nr_huge_pages_node[page_to_nid(page)]++; } out_unlock: spin_unlock(&hugetlb_lock); return page; } static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask) { struct page *page; if (hstate_is_gigantic(h)) return NULL; page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask); if (!page) return NULL; /* * We do not account these pages as surplus because they are only * temporary and will be released properly on the last reference */ SetPageHugeTemporary(page); return page; } /* * Use the VMA's mpolicy to allocate a huge page from the buddy. */ static struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { struct page *page; struct mempolicy *mpol; gfp_t gfp_mask = htlb_alloc_mask(h); int nid; nodemask_t *nodemask; nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); mpol_cond_put(mpol); return page; } /* page migration callback function */ struct page *alloc_huge_page_node(struct hstate *h, int nid) { gfp_t gfp_mask = htlb_alloc_mask(h); struct page *page = NULL; if (nid != NUMA_NO_NODE) gfp_mask |= __GFP_THISNODE; spin_lock(&hugetlb_lock); if (h->free_huge_pages - h->resv_huge_pages > 0) page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL); spin_unlock(&hugetlb_lock); if (!page) page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL); return page; } /* page migration callback function */ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) { gfp_t gfp_mask = htlb_alloc_mask(h); spin_lock(&hugetlb_lock); if (h->free_huge_pages - h->resv_huge_pages > 0) { struct page *page; page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); if (page) { spin_unlock(&hugetlb_lock); return page; } } spin_unlock(&hugetlb_lock); return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); } /* mempolicy aware migration callback */ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct mempolicy *mpol; nodemask_t *nodemask; struct page *page; gfp_t gfp_mask; int node; gfp_mask = htlb_alloc_mask(h); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); page = alloc_huge_page_nodemask(h, node, nodemask); mpol_cond_put(mpol); return page; } /* * Increase the hugetlb pool such that it can accommodate a reservation * of size 'delta'. */ static int gather_surplus_pages(struct hstate *h, int delta) { struct list_head surplus_list; struct page *page, *tmp; int ret, i; int needed, allocated; bool alloc_ok = true; needed = (h->resv_huge_pages + delta) - h->free_huge_pages; if (needed <= 0) { h->resv_huge_pages += delta; return 0; } allocated = 0; INIT_LIST_HEAD(&surplus_list); ret = -ENOMEM; retry: spin_unlock(&hugetlb_lock); for (i = 0; i < needed; i++) { page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), NUMA_NO_NODE, NULL); if (!page) { alloc_ok = false; break; } list_add(&page->lru, &surplus_list); cond_resched(); } allocated += i; /* * After retaking hugetlb_lock, we need to recalculate 'needed' * because either resv_huge_pages or free_huge_pages may have changed. */ spin_lock(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - (h->free_huge_pages + allocated); if (needed > 0) { if (alloc_ok) goto retry; /* * We were not able to allocate enough pages to * satisfy the entire reservation so we free what * we've allocated so far. */ goto free; } /* * The surplus_list now contains _at_least_ the number of extra pages * needed to accommodate the reservation. Add the appropriate number * of pages to the hugetlb pool and free the extras back to the buddy * allocator. Commit the entire reservation here to prevent another * process from stealing the pages as they are added to the pool but * before they are reserved. */ needed += allocated; h->resv_huge_pages += delta; ret = 0; /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { if ((--needed) < 0) break; /* * This page is now managed by the hugetlb allocator and has * no users -- drop the buddy allocator's reference. */ put_page_testzero(page); VM_BUG_ON_PAGE(page_count(page), page); enqueue_huge_page(h, page); } free: spin_unlock(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) put_page(page); spin_lock(&hugetlb_lock); return ret; } /* * This routine has two main purposes: * 1) Decrement the reservation count (resv_huge_pages) by the value passed * in unused_resv_pages. This corresponds to the prior adjustments made * to the associated reservation map. * 2) Free any unused surplus pages that may have been allocated to satisfy * the reservation. As many as unused_resv_pages may be freed. * * Called with hugetlb_lock held. However, the lock could be dropped (and * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, * we must make sure nobody else can claim pages we are in the process of * freeing. Do this by ensuring resv_huge_page always is greater than the * number of huge pages we plan to free when dropping the lock. */ static void return_unused_surplus_pages(struct hstate *h, unsigned long unused_resv_pages) { unsigned long nr_pages; /* Cannot return gigantic pages currently */ if (hstate_is_gigantic(h)) goto out; /* * Part (or even all) of the reservation could have been backed * by pre-allocated pages. Only free surplus pages. */ nr_pages = min(unused_resv_pages, h->surplus_huge_pages); /* * We want to release as many surplus pages as possible, spread * evenly across all nodes with memory. Iterate across these nodes * until we can no longer free unreserved surplus pages. This occurs * when the nodes with surplus pages have no free pages. * free_pool_huge_page() will balance the the freed pages across the * on-line nodes with memory and will handle the hstate accounting. * * Note that we decrement resv_huge_pages as we free the pages. If * we drop the lock, resv_huge_pages will still be sufficiently large * to cover subsequent pages we may free. */ while (nr_pages--) { h->resv_huge_pages--; unused_resv_pages--; if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) goto out; cond_resched_lock(&hugetlb_lock); } out: /* Fully uncommit the reservation */ h->resv_huge_pages -= unused_resv_pages; } /* * vma_needs_reservation, vma_commit_reservation and vma_end_reservation * are used by the huge page allocation routines to manage reservations. * * vma_needs_reservation is called to determine if the huge page at addr * within the vma has an associated reservation. If a reservation is * needed, the value 1 is returned. The caller is then responsible for * managing the global reservation and subpool usage counts. After * the huge page has been allocated, vma_commit_reservation is called * to add the page to the reservation map. If the page allocation fails, * the reservation must be ended instead of committed. vma_end_reservation * is called in such cases. * * In the normal case, vma_commit_reservation returns the same value * as the preceding vma_needs_reservation call. The only time this * is not the case is if a reserve map was changed between calls. It * is the responsibility of the caller to notice the difference and * take appropriate action. * * vma_add_reservation is used in error paths where a reservation must * be restored when a newly allocated huge page must be freed. It is * to be called after calling vma_needs_reservation to determine if a * reservation exists. */ enum vma_resv_mode { VMA_NEEDS_RESV, VMA_COMMIT_RESV, VMA_END_RESV, VMA_ADD_RESV, }; static long __vma_reservation_common(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, enum vma_resv_mode mode) { struct resv_map *resv; pgoff_t idx; long ret; resv = vma_resv_map(vma); if (!resv) return 1; idx = vma_hugecache_offset(h, vma, addr); switch (mode) { case VMA_NEEDS_RESV: ret = region_chg(resv, idx, idx + 1); break; case VMA_COMMIT_RESV: ret = region_add(resv, idx, idx + 1); break; case VMA_END_RESV: region_abort(resv, idx, idx + 1); ret = 0; break; case VMA_ADD_RESV: if (vma->vm_flags & VM_MAYSHARE) ret = region_add(resv, idx, idx + 1); else { region_abort(resv, idx, idx + 1); ret = region_del(resv, idx, idx + 1); } break; default: BUG(); } if (vma->vm_flags & VM_MAYSHARE) return ret; else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { /* * In most cases, reserves always exist for private mappings. * However, a file associated with mapping could have been * hole punched or truncated after reserves were consumed. * As subsequent fault on such a range will not use reserves. * Subtle - The reserve map for private mappings has the * opposite meaning than that of shared mappings. If NO * entry is in the reserve map, it means a reservation exists. * If an entry exists in the reserve map, it means the * reservation has already been consumed. As a result, the * return value of this routine is the opposite of the * value returned from reserve map manipulation routines above. */ if (ret) return 0; else return 1; } else return ret < 0 ? ret : 0; } static long vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); } static long vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); } static void vma_end_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); } static long vma_add_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); } /* * This routine is called to restore a reservation on error paths. In the * specific error paths, a huge page was allocated (via alloc_huge_page) * and is about to be freed. If a reservation for the page existed, * alloc_huge_page would have consumed the reservation and set PagePrivate * in the newly allocated page. When the page is freed via free_huge_page, * the global reservation count will be incremented if PagePrivate is set. * However, free_huge_page can not adjust the reserve map. Adjust the * reserve map here to be consistent with global reserve count adjustments * to be made by free_huge_page. */ static void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, unsigned long address, struct page *page) { if (unlikely(PagePrivate(page))) { long rc = vma_needs_reservation(h, vma, address); if (unlikely(rc < 0)) { /* * Rare out of memory condition in reserve map * manipulation. Clear PagePrivate so that * global reserve count will not be incremented * by free_huge_page. This will make it appear * as though the reservation for this page was * consumed. This may prevent the task from * faulting in the page at a later time. This * is better than inconsistent global huge page * accounting of reserve counts. */ ClearPagePrivate(page); } else if (rc) { rc = vma_add_reservation(h, vma, address); if (unlikely(rc < 0)) /* * See above comment about rare out of * memory condition. */ ClearPagePrivate(page); } else vma_end_reservation(h, vma, address); } } struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct page *page; long map_chg, map_commit; long gbl_chg; int ret, idx; struct hugetlb_cgroup *h_cg; idx = hstate_index(h); /* * Examine the region/reserve map to determine if the process * has a reservation for the page to be allocated. A return * code of zero indicates a reservation exists (no change). */ map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); if (map_chg < 0) return ERR_PTR(-ENOMEM); /* * Processes that did not create the mapping will have no * reserves as indicated by the region/reserve map. Check * that the allocation will not exceed the subpool limit. * Allocations for MAP_NORESERVE mappings also need to be * checked against any subpool limit. */ if (map_chg || avoid_reserve) { gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) { vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } /* * Even though there was no reservation in the region/reserve * map, there could be reservations associated with the * subpool that can be used. This would be indicated if the * return value of hugepage_subpool_get_pages() is zero. * However, if avoid_reserve is specified we still avoid even * the subpool reservations. */ if (avoid_reserve) gbl_chg = 1; } ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); if (ret) goto out_subpool_put; spin_lock(&hugetlb_lock); /* * glb_chg is passed to indicate whether or not a page must be taken * from the global free pool (global change). gbl_chg == 0 indicates * a reservation exists for the allocation. */ page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); if (!page) { spin_unlock(&hugetlb_lock); page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { SetPagePrivate(page); h->resv_huge_pages--; } spin_lock(&hugetlb_lock); list_move(&page->lru, &h->hugepage_activelist); /* Fall through */ } hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); spin_unlock(&hugetlb_lock); set_page_private(page, (unsigned long)spool); map_commit = vma_commit_reservation(h, vma, addr); if (unlikely(map_chg > map_commit)) { /* * The page was added to the reservation map between * vma_needs_reservation and vma_commit_reservation. * This indicates a race with hugetlb_reserve_pages. * Adjust for the subpool count incremented above AND * in hugetlb_reserve_pages for the same page. Also, * the reservation count added in hugetlb_reserve_pages * no longer applies. */ long rsv_adjust; rsv_adjust = hugepage_subpool_put_pages(spool, 1); hugetlb_acct_memory(h, -rsv_adjust); } return page; out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); out_subpool_put: if (map_chg || avoid_reserve) hugepage_subpool_put_pages(spool, 1); vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } int alloc_bootmem_huge_page(struct hstate *h) __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); int __alloc_bootmem_huge_page(struct hstate *h) { struct huge_bootmem_page *m; int nr_nodes, node; for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { void *addr; addr = memblock_virt_alloc_try_nid_nopanic( huge_page_size(h), huge_page_size(h), 0, BOOTMEM_ALLOC_ACCESSIBLE, node); if (addr) { /* * Use the beginning of the huge page to store the * huge_bootmem_page struct (until gather_bootmem * puts them into the mem_map). */ m = addr; goto found; } } return 0; found: BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); /* Put them into a private list first because mem_map is not up yet */ list_add(&m->list, &huge_boot_pages); m->hstate = h; return 1; } static void __init prep_compound_huge_page(struct page *page, unsigned int order) { if (unlikely(order > (MAX_ORDER - 1))) prep_compound_gigantic_page(page, order); else prep_compound_page(page, order); } /* Put bootmem huge pages into the standard lists after mem_map is up */ static void __init gather_bootmem_prealloc(void) { struct huge_bootmem_page *m; list_for_each_entry(m, &huge_boot_pages, list) { struct hstate *h = m->hstate; struct page *page; #ifdef CONFIG_HIGHMEM page = pfn_to_page(m->phys >> PAGE_SHIFT); memblock_free_late(__pa(m), sizeof(struct huge_bootmem_page)); #else page = virt_to_page(m); #endif WARN_ON(page_count(page) != 1); prep_compound_huge_page(page, h->order); WARN_ON(PageReserved(page)); prep_new_huge_page(h, page, page_to_nid(page)); put_page(page); /* free it into the hugepage allocator */ /* * If we had gigantic hugepages allocated at boot time, we need * to restore the 'stolen' pages to totalram_pages in order to * fix confusing memory reports from free(1) and another * side-effects, like CommitLimit going negative. */ if (hstate_is_gigantic(h)) adjust_managed_page_count(page, 1 << h->order); } } static void __init hugetlb_hstate_alloc_pages(struct hstate *h) { unsigned long i; for (i = 0; i < h->max_huge_pages; ++i) { if (hstate_is_gigantic(h)) { if (!alloc_bootmem_huge_page(h)) break; } else if (!alloc_pool_huge_page(h, &node_states[N_MEMORY])) break; cond_resched(); } if (i < h->max_huge_pages) { char buf[32]; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", h->max_huge_pages, buf, i); h->max_huge_pages = i; } } static void __init hugetlb_init_hstates(void) { struct hstate *h; for_each_hstate(h) { if (minimum_order > huge_page_order(h)) minimum_order = huge_page_order(h); /* oversize hugepages were init'ed in early boot */ if (!hstate_is_gigantic(h)) hugetlb_hstate_alloc_pages(h); } VM_BUG_ON(minimum_order == UINT_MAX); } static void __init report_hugepages(void) { struct hstate *h; for_each_hstate(h) { char buf[32]; string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", buf, h->free_huge_pages); } } #ifdef CONFIG_HIGHMEM static void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { int i; if (hstate_is_gigantic(h)) return; for_each_node_mask(i, *nodes_allowed) { struct page *page, *next; struct list_head *freel = &h->hugepage_freelists[i]; list_for_each_entry_safe(page, next, freel, lru) { if (count >= h->nr_huge_pages) return; if (PageHighMem(page)) continue; list_del(&page->lru); update_and_free_page(h, page); h->free_huge_pages--; h->free_huge_pages_node[page_to_nid(page)]--; } } } #else static inline void try_to_free_low(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { } #endif /* * Increment or decrement surplus_huge_pages. Keep node-specific counters * balanced by operating on them in a round-robin fashion. * Returns 1 if an adjustment was made. */ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, int delta) { int nr_nodes, node; VM_BUG_ON(delta != -1 && delta != 1); if (delta < 0) { for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { if (h->surplus_huge_pages_node[node]) goto found; } } else { for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { if (h->surplus_huge_pages_node[node] < h->nr_huge_pages_node[node]) goto found; } } return 0; found: h->surplus_huge_pages += delta; h->surplus_huge_pages_node[node] += delta; return 1; } #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, nodemask_t *nodes_allowed) { unsigned long min_count, ret; if (hstate_is_gigantic(h) && !gigantic_page_supported()) return h->max_huge_pages; /* * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. * * We might race with alloc_surplus_huge_page() here and be unable * to convert a surplus huge page to a normal huge page. That is * not critical, though, it just means the overall size of the * pool might be one hugepage larger than it needs to be, but * within all the constraints specified by the sysctls. */ spin_lock(&hugetlb_lock); while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, -1)) break; } while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the * page, free_huge_page will handle it by freeing the page * and reducing the surplus. */ spin_unlock(&hugetlb_lock); /* yield cpu to avoid soft lockup */ cond_resched(); ret = alloc_pool_huge_page(h, nodes_allowed); spin_lock(&hugetlb_lock); if (!ret) goto out; /* Bail for signals. Probably ctrl-c from user */ if (signal_pending(current)) goto out; } /* * Decrease the pool size * First return free pages to the buddy allocator (being careful * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. * * By placing pages into the surplus state independent of the * overcommit value, we are allowing the surplus pool size to * exceed overcommit. There are few sane options here. Since * alloc_surplus_huge_page() is checking the global counter, * though, we'll note that we're not allowed to exceed surplus * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. */ min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; min_count = max(count, min_count); try_to_free_low(h, min_count, nodes_allowed); while (min_count < persistent_huge_pages(h)) { if (!free_pool_huge_page(h, nodes_allowed, 0)) break; cond_resched_lock(&hugetlb_lock); } while (count < persistent_huge_pages(h)) { if (!adjust_pool_surplus(h, nodes_allowed, 1)) break; } out: ret = persistent_huge_pages(h); spin_unlock(&hugetlb_lock); return ret; } #define HSTATE_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define HSTATE_ATTR(_name) \ static struct kobj_attribute _name##_attr = \ __ATTR(_name, 0644, _name##_show, _name##_store) static struct kobject *hugepages_kobj; static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) { int i; for (i = 0; i < HUGE_MAX_HSTATE; i++) if (hstate_kobjs[i] == kobj) { if (nidp) *nidp = NUMA_NO_NODE; return &hstates[i]; } return kobj_to_node_hstate(kobj, nidp); } static ssize_t nr_hugepages_show_common(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long nr_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) nr_huge_pages = h->nr_huge_pages; else nr_huge_pages = h->nr_huge_pages_node[nid]; return sprintf(buf, "%lu\n", nr_huge_pages); } static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, struct hstate *h, int nid, unsigned long count, size_t len) { int err; NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY); if (hstate_is_gigantic(h) && !gigantic_page_supported()) { err = -EINVAL; goto out; } if (nid == NUMA_NO_NODE) { /* * global hstate attribute */ if (!(obey_mempolicy && init_nodemask_of_mempolicy(nodes_allowed))) { NODEMASK_FREE(nodes_allowed); nodes_allowed = &node_states[N_MEMORY]; } } else if (nodes_allowed) { /* * per node hstate attribute: adjust count to global, * but restrict alloc/free to the specified node. */ count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; init_nodemask_of_node(nodes_allowed, nid); } else nodes_allowed = &node_states[N_MEMORY]; h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed); if (nodes_allowed != &node_states[N_MEMORY]) NODEMASK_FREE(nodes_allowed); return len; out: NODEMASK_FREE(nodes_allowed); return err; } static ssize_t nr_hugepages_store_common(bool obey_mempolicy, struct kobject *kobj, const char *buf, size_t len) { struct hstate *h; unsigned long count; int nid; int err; err = kstrtoul(buf, 10, &count); if (err) return err; h = kobj_to_hstate(kobj, &nid); return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); } static ssize_t nr_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return nr_hugepages_show_common(kobj, attr, buf); } static ssize_t nr_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { return nr_hugepages_store_common(false, kobj, buf, len); } HSTATE_ATTR(nr_hugepages); #ifdef CONFIG_NUMA /* * hstate attribute for optionally mempolicy-based constraint on persistent * huge page alloc/free. */ static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return nr_hugepages_show_common(kobj, attr, buf); } static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t len) { return nr_hugepages_store_common(true, kobj, buf, len); } HSTATE_ATTR(nr_hugepages_mempolicy); #endif static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); } static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long input; struct hstate *h = kobj_to_hstate(kobj, NULL); if (hstate_is_gigantic(h)) return -EINVAL; err = kstrtoul(buf, 10, &input); if (err) return err; spin_lock(&hugetlb_lock); h->nr_overcommit_huge_pages = input; spin_unlock(&hugetlb_lock); return count; } HSTATE_ATTR(nr_overcommit_hugepages); static ssize_t free_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long free_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) free_huge_pages = h->free_huge_pages; else free_huge_pages = h->free_huge_pages_node[nid]; return sprintf(buf, "%lu\n", free_huge_pages); } HSTATE_ATTR_RO(free_hugepages); static ssize_t resv_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h = kobj_to_hstate(kobj, NULL); return sprintf(buf, "%lu\n", h->resv_huge_pages); } HSTATE_ATTR_RO(resv_hugepages); static ssize_t surplus_hugepages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hstate *h; unsigned long surplus_huge_pages; int nid; h = kobj_to_hstate(kobj, &nid); if (nid == NUMA_NO_NODE) surplus_huge_pages = h->surplus_huge_pages; else surplus_huge_pages = h->surplus_huge_pages_node[nid]; return sprintf(buf, "%lu\n", surplus_huge_pages); } HSTATE_ATTR_RO(surplus_hugepages); static struct attribute *hstate_attrs[] = { &nr_hugepages_attr.attr, &nr_overcommit_hugepages_attr.attr, &free_hugepages_attr.attr, &resv_hugepages_attr.attr, &surplus_hugepages_attr.attr, #ifdef CONFIG_NUMA &nr_hugepages_mempolicy_attr.attr, #endif NULL, }; static const struct attribute_group hstate_attr_group = { .attrs = hstate_attrs, }; static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, struct kobject **hstate_kobjs, const struct attribute_group *hstate_attr_group) { int retval; int hi = hstate_index(h); hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); if (!hstate_kobjs[hi]) return -ENOMEM; retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); if (retval) kobject_put(hstate_kobjs[hi]); return retval; } static void __init hugetlb_sysfs_init(void) { struct hstate *h; int err; hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); if (!hugepages_kobj) return; for_each_hstate(h) { err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, hstate_kobjs, &hstate_attr_group); if (err) pr_err("Hugetlb: Unable to add hstate %s", h->name); } } #ifdef CONFIG_NUMA /* * node_hstate/s - associate per node hstate attributes, via their kobjects, * with node devices in node_devices[] using a parallel array. The array * index of a node device or _hstate == node id. * This is here to avoid any static dependency of the node device driver, in * the base kernel, on the hugetlb module. */ struct node_hstate { struct kobject *hugepages_kobj; struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; }; static struct node_hstate node_hstates[MAX_NUMNODES]; /* * A subset of global hstate attributes for node devices */ static struct attribute *per_node_hstate_attrs[] = { &nr_hugepages_attr.attr, &free_hugepages_attr.attr, &surplus_hugepages_attr.attr, NULL, }; static const struct attribute_group per_node_hstate_attr_group = { .attrs = per_node_hstate_attrs, }; /* * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. * Returns node id via non-NULL nidp. */ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) { int nid; for (nid = 0; nid < nr_node_ids; nid++) { struct node_hstate *nhs = &node_hstates[nid]; int i; for (i = 0; i < HUGE_MAX_HSTATE; i++) if (nhs->hstate_kobjs[i] == kobj) { if (nidp) *nidp = nid; return &hstates[i]; } } BUG(); return NULL; } /* * Unregister hstate attributes from a single node device. * No-op if no hstate attributes attached. */ static void hugetlb_unregister_node(struct node *node) { struct hstate *h; struct node_hstate *nhs = &node_hstates[node->dev.id]; if (!nhs->hugepages_kobj) return; /* no hstate attributes */ for_each_hstate(h) { int idx = hstate_index(h); if (nhs->hstate_kobjs[idx]) { kobject_put(nhs->hstate_kobjs[idx]); nhs->hstate_kobjs[idx] = NULL; } } kobject_put(nhs->hugepages_kobj); nhs->hugepages_kobj = NULL; } /* * Register hstate attributes for a single node device. * No-op if attributes already registered. */ static void hugetlb_register_node(struct node *node) { struct hstate *h; struct node_hstate *nhs = &node_hstates[node->dev.id]; int err; if (nhs->hugepages_kobj) return; /* already allocated */ nhs->hugepages_kobj = kobject_create_and_add("hugepages", &node->dev.kobj); if (!nhs->hugepages_kobj) return; for_each_hstate(h) { err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, nhs->hstate_kobjs, &per_node_hstate_attr_group); if (err) { pr_err("Hugetlb: Unable to add hstate %s for node %d\n", h->name, node->dev.id); hugetlb_unregister_node(node); break; } } } /* * hugetlb init time: register hstate attributes for all registered node * devices of nodes that have memory. All on-line nodes should have * registered their associated device by this time. */ static void __init hugetlb_register_all_nodes(void) { int nid; for_each_node_state(nid, N_MEMORY) { struct node *node = node_devices[nid]; if (node->dev.id == nid) hugetlb_register_node(node); } /* * Let the node device driver know we're here so it can * [un]register hstate attributes on node hotplug. */ register_hugetlbfs_with_node(hugetlb_register_node, hugetlb_unregister_node); } #else /* !CONFIG_NUMA */ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) { BUG(); if (nidp) *nidp = -1; return NULL; } static void hugetlb_register_all_nodes(void) { } #endif static int __init hugetlb_init(void) { int i; if (!hugepages_supported()) return 0; if (!size_to_hstate(default_hstate_size)) { if (default_hstate_size != 0) { pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n", default_hstate_size, HPAGE_SIZE); } default_hstate_size = HPAGE_SIZE; if (!size_to_hstate(default_hstate_size)) hugetlb_add_hstate(HUGETLB_PAGE_ORDER); } default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); if (default_hstate_max_huge_pages) { if (!default_hstate.max_huge_pages) default_hstate.max_huge_pages = default_hstate_max_huge_pages; } hugetlb_init_hstates(); gather_bootmem_prealloc(); report_hugepages(); hugetlb_sysfs_init(); hugetlb_register_all_nodes(); hugetlb_cgroup_file_init(); #ifdef CONFIG_SMP num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); #else num_fault_mutexes = 1; #endif hugetlb_fault_mutex_table = kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) mutex_init(&hugetlb_fault_mutex_table[i]); return 0; } subsys_initcall(hugetlb_init); /* Should be called on processing a hugepagesz=... option */ void __init hugetlb_bad_size(void) { parsed_valid_hugepagesz = false; } void __init hugetlb_add_hstate(unsigned int order) { struct hstate *h; unsigned long i; if (size_to_hstate(PAGE_SIZE << order)) { pr_warn("hugepagesz= specified twice, ignoring\n"); return; } BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); BUG_ON(order == 0); h = &hstates[hugetlb_max_hstate++]; h->order = order; h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); h->nr_huge_pages = 0; h->free_huge_pages = 0; for (i = 0; i < MAX_NUMNODES; ++i) INIT_LIST_HEAD(&h->hugepage_freelists[i]); INIT_LIST_HEAD(&h->hugepage_activelist); h->next_nid_to_alloc = first_memory_node; h->next_nid_to_free = first_memory_node; snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", huge_page_size(h)/1024); parsed_hstate = h; } static int __init hugetlb_nrpages_setup(char *s) { unsigned long *mhp; static unsigned long *last_mhp; if (!parsed_valid_hugepagesz) { pr_warn("hugepages = %s preceded by " "an unsupported hugepagesz, ignoring\n", s); parsed_valid_hugepagesz = true; return 1; } /* * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet, * so this hugepages= parameter goes to the "default hstate". */ else if (!hugetlb_max_hstate) mhp = &default_hstate_max_huge_pages; else mhp = &parsed_hstate->max_huge_pages; if (mhp == last_mhp) { pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n"); return 1; } if (sscanf(s, "%lu", mhp) <= 0) *mhp = 0; /* * Global state is always initialized later in hugetlb_init. * But we need to allocate >= MAX_ORDER hstates here early to still * use the bootmem allocator. */ if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER) hugetlb_hstate_alloc_pages(parsed_hstate); last_mhp = mhp; return 1; } __setup("hugepages=", hugetlb_nrpages_setup); static int __init hugetlb_default_setup(char *s) { default_hstate_size = memparse(s, &s); return 1; } __setup("default_hugepagesz=", hugetlb_default_setup); static unsigned int cpuset_mems_nr(unsigned int *array) { int node; unsigned int nr = 0; for_each_node_mask(node, cpuset_current_mems_allowed) nr += array[node]; return nr; } #ifdef CONFIG_SYSCTL static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp = h->max_huge_pages; int ret; if (!hugepages_supported()) return -EOPNOTSUPP; table->data = &tmp; table->maxlen = sizeof(unsigned long); ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); if (ret) goto out; if (write) ret = __nr_hugepages_store_common(obey_mempolicy, h, NUMA_NO_NODE, tmp, *length); out: return ret; } int hugetlb_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(false, table, write, buffer, length, ppos); } #ifdef CONFIG_NUMA int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(true, table, write, buffer, length, ppos); } #endif /* CONFIG_NUMA */ int hugetlb_overcommit_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; unsigned long tmp; int ret; if (!hugepages_supported()) return -EOPNOTSUPP; tmp = h->nr_overcommit_huge_pages; if (write && hstate_is_gigantic(h)) return -EINVAL; table->data = &tmp; table->maxlen = sizeof(unsigned long); ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); if (ret) goto out; if (write) { spin_lock(&hugetlb_lock); h->nr_overcommit_huge_pages = tmp; spin_unlock(&hugetlb_lock); } out: return ret; } #endif /* CONFIG_SYSCTL */ void hugetlb_report_meminfo(struct seq_file *m) { struct hstate *h; unsigned long total = 0; if (!hugepages_supported()) return; for_each_hstate(h) { unsigned long count = h->nr_huge_pages; total += (PAGE_SIZE << huge_page_order(h)) * count; if (h == &default_hstate) seq_printf(m, "HugePages_Total: %5lu\n" "HugePages_Free: %5lu\n" "HugePages_Rsvd: %5lu\n" "HugePages_Surp: %5lu\n" "Hugepagesize: %8lu kB\n", count, h->free_huge_pages, h->resv_huge_pages, h->surplus_huge_pages, (PAGE_SIZE << huge_page_order(h)) / 1024); } seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024); } int hugetlb_report_node_meminfo(int nid, char *buf) { struct hstate *h = &default_hstate; if (!hugepages_supported()) return 0; return sprintf(buf, "Node %d HugePages_Total: %5u\n" "Node %d HugePages_Free: %5u\n" "Node %d HugePages_Surp: %5u\n", nid, h->nr_huge_pages_node[nid], nid, h->free_huge_pages_node[nid], nid, h->surplus_huge_pages_node[nid]); } void hugetlb_show_meminfo(void) { struct hstate *h; int nid; if (!hugepages_supported()) return; for_each_node_state(nid, N_MEMORY) for_each_hstate(h) pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", nid, h->nr_huge_pages_node[nid], h->free_huge_pages_node[nid], h->surplus_huge_pages_node[nid], 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); } void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) { seq_printf(m, "HugetlbPages:\t%8lu kB\n", atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); } /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ unsigned long hugetlb_total_pages(void) { struct hstate *h; unsigned long nr_total_pages = 0; for_each_hstate(h) nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); return nr_total_pages; } static int hugetlb_acct_memory(struct hstate *h, long delta) { int ret = -ENOMEM; spin_lock(&hugetlb_lock); /* * When cpuset is configured, it breaks the strict hugetlb page * reservation as the accounting is done on a global variable. Such * reservation is completely rubbish in the presence of cpuset because * the reservation is not checked against page availability for the * current cpuset. Application can still potentially OOM'ed by kernel * with lack of free htlb page in cpuset that the task is in. * Attempt to enforce strict accounting with cpuset is almost * impossible (or too ugly) because cpuset is too fluid that * task or memory node can be dynamically moved between cpusets. * * The change of semantics for shared hugetlb mapping with cpuset is * undesirable. However, in order to preserve some of the semantics, * we fall back to check against current free page availability as * a best attempt and hopefully to minimize the impact of changing * semantics that cpuset has. */ if (delta > 0) { if (gather_surplus_pages(h, delta) < 0) goto out; if (delta > cpuset_mems_nr(h->free_huge_pages_node)) { return_unused_surplus_pages(h, delta); goto out; } } ret = 0; if (delta < 0) return_unused_surplus_pages(h, (unsigned long) -delta); out: spin_unlock(&hugetlb_lock); return ret; } static void hugetlb_vm_op_open(struct vm_area_struct *vma) { struct resv_map *resv = vma_resv_map(vma); /* * This new VMA should share its siblings reservation map if present. * The VMA will only ever have a valid reservation map pointer where * it is being copied for another still existing VMA. As that VMA * has a reference to the reservation map it cannot disappear until * after this open call completes. It is therefore safe to take a * new reference here without additional locking. */ if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) kref_get(&resv->refs); } static void hugetlb_vm_op_close(struct vm_area_struct *vma) { struct hstate *h = hstate_vma(vma); struct resv_map *resv = vma_resv_map(vma); struct hugepage_subpool *spool = subpool_vma(vma); unsigned long reserve, start, end; long gbl_reserve; if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) return; start = vma_hugecache_offset(h, vma, vma->vm_start); end = vma_hugecache_offset(h, vma, vma->vm_end); reserve = (end - start) - region_count(resv, start, end); kref_put(&resv->refs, resv_map_release); if (reserve) { /* * Decrement reserve counts. The global reserve count may be * adjusted if the subpool has a minimum size. */ gbl_reserve = hugepage_subpool_put_pages(spool, reserve); hugetlb_acct_memory(h, -gbl_reserve); } } static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) { if (addr & ~(huge_page_mask(hstate_vma(vma)))) return -EINVAL; return 0; } /* * We cannot handle pagefaults against hugetlb pages at all. They cause * handle_mm_fault() to try to instantiate regular-sized pages in the * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get * this far. */ static int hugetlb_vm_op_fault(struct vm_fault *vmf) { BUG(); return 0; } const struct vm_operations_struct hugetlb_vm_ops = { .fault = hugetlb_vm_op_fault, .open = hugetlb_vm_op_open, .close = hugetlb_vm_op_close, .split = hugetlb_vm_op_split, }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) { pte_t entry; if (writable) { entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, vma->vm_page_prot))); } else { entry = huge_pte_wrprotect(mk_huge_pte(page, vma->vm_page_prot)); } entry = pte_mkyoung(entry); entry = pte_mkhuge(entry); entry = arch_make_huge_pte(entry, vma, page, writable); return entry; } static void set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t entry; entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) update_mmu_cache(vma, address, ptep); } bool is_hugetlb_entry_migration(pte_t pte) { swp_entry_t swp; if (huge_pte_none(pte) || pte_present(pte)) return false; swp = pte_to_swp_entry(pte); if (non_swap_entry(swp) && is_migration_entry(swp)) return true; else return false; } static int is_hugetlb_entry_hwpoisoned(pte_t pte) { swp_entry_t swp; if (huge_pte_none(pte) || pte_present(pte)) return 0; swp = pte_to_swp_entry(pte); if (non_swap_entry(swp) && is_hwpoison_entry(swp)) return 1; else return 0; } int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) { pte_t *src_pte, *dst_pte, entry; struct page *ptepage; unsigned long addr; int cow; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ int ret = 0; cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; mmun_start = vma->vm_start; mmun_end = vma->vm_end; if (cow) mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end); for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { spinlock_t *src_ptl, *dst_ptl; src_pte = huge_pte_offset(src, addr, sz); if (!src_pte) continue; dst_pte = huge_pte_alloc(dst, addr, sz); if (!dst_pte) { ret = -ENOMEM; break; } /* If the pagetables are shared don't copy or take references */ if (dst_pte == src_pte) continue; dst_ptl = huge_pte_lock(h, dst, dst_pte); src_ptl = huge_pte_lockptr(h, src, src_pte); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); entry = huge_ptep_get(src_pte); if (huge_pte_none(entry)) { /* skip none entry */ ; } else if (unlikely(is_hugetlb_entry_migration(entry) || is_hugetlb_entry_hwpoisoned(entry))) { swp_entry_t swp_entry = pte_to_swp_entry(entry); if (is_write_migration_entry(swp_entry) && cow) { /* * COW mappings require pages in both * parent and child to be set to read. */ make_migration_entry_read(&swp_entry); entry = swp_entry_to_pte(swp_entry); set_huge_swap_pte_at(src, addr, src_pte, entry, sz); } set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz); } else { if (cow) { /* * No need to notify as we are downgrading page * table protection not changing it to point * to a new page. * * See Documentation/vm/mmu_notifier.txt */ huge_ptep_set_wrprotect(src, addr, src_pte); } entry = huge_ptep_get(src_pte); ptepage = pte_page(entry); get_page(ptepage); page_dup_rmap(ptepage, true); set_huge_pte_at(dst, addr, dst_pte, entry); hugetlb_count_add(pages_per_huge_page(h), dst); } spin_unlock(src_ptl); spin_unlock(dst_ptl); } if (cow) mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end); return ret; } void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *ptep; pte_t pte; spinlock_t *ptl; struct page *page; struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); const unsigned long mmun_start = start; /* For mmu_notifiers */ const unsigned long mmun_end = end; /* For mmu_notifiers */ WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); BUG_ON(end & ~huge_page_mask(h)); /* * This is a hugetlb vma, all the pte entries should point * to huge page. */ tlb_remove_check_page_size_change(tlb, sz); tlb_start_vma(tlb, vma); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); address = start; for (; address < end; address += sz) { ptep = huge_pte_offset(mm, address, sz); if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, &address, ptep)) { spin_unlock(ptl); continue; } pte = huge_ptep_get(ptep); if (huge_pte_none(pte)) { spin_unlock(ptl); continue; } /* * Migrating hugepage or HWPoisoned hugepage is already * unmapped and its refcount is dropped, so just clear pte here. */ if (unlikely(!pte_present(pte))) { huge_pte_clear(mm, address, ptep, sz); spin_unlock(ptl); continue; } page = pte_page(pte); /* * If a reference page is supplied, it is because a specific * page is being unmapped, not a range. Ensure the page we * are about to unmap is the actual page of interest. */ if (ref_page) { if (page != ref_page) { spin_unlock(ptl); continue; } /* * Mark the VMA as having unmapped its page so that * future faults in this VMA will fail rather than * looking like data was lost */ set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); } pte = huge_ptep_get_and_clear(mm, address, ptep); tlb_remove_huge_tlb_entry(h, tlb, ptep, address); if (huge_pte_dirty(pte)) set_page_dirty(page); hugetlb_count_sub(pages_per_huge_page(h), mm); page_remove_rmap(page, true); spin_unlock(ptl); tlb_remove_page_size(tlb, page, huge_page_size(h)); /* * Bail out after unmapping reference page if supplied */ if (ref_page) break; } mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); tlb_end_vma(tlb, vma); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { __unmap_hugepage_range(tlb, vma, start, end, ref_page); /* * Clear this flag so that x86's huge_pmd_share page_table_shareable * test will fail on a vma being torn down, and not grab a page table * on its way out. We're lucky that the flag has such an appropriate * name, and can in fact be safely cleared here. We could clear it * before the __unmap_hugepage_range above, but all that's necessary * is to clear it before releasing the i_mmap_rwsem. This works * because in the context this is called, the VMA is about to be * destroyed and the i_mmap_rwsem is held. */ vma->vm_flags &= ~VM_MAYSHARE; } void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { struct mm_struct *mm; struct mmu_gather tlb; mm = vma->vm_mm; tlb_gather_mmu(&tlb, mm, start, end); __unmap_hugepage_range(&tlb, vma, start, end, ref_page); tlb_finish_mmu(&tlb, start, end); } /* * This is called when the original mapper is failing to COW a MAP_PRIVATE * mappping it owns the reserve page for. The intention is to unmap the page * from other VMAs and let the children be SIGKILLed if they are faulting the * same region. */ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) { struct hstate *h = hstate_vma(vma); struct vm_area_struct *iter_vma; struct address_space *mapping; pgoff_t pgoff; /* * vm_pgoff is in PAGE_SIZE units, hence the different calculation * from page cache lookup which is in HPAGE_SIZE units. */ address = address & huge_page_mask(h); pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; mapping = vma->vm_file->f_mapping; /* * Take the mapping lock for the duration of the table walk. As * this mapping should be shared between all the VMAs, * __unmap_hugepage_range() is called as the lock is already held */ i_mmap_lock_write(mapping); vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { /* Do not unmap the current VMA */ if (iter_vma == vma) continue; /* * Shared VMAs have their own reserves and do not affect * MAP_PRIVATE accounting but it is possible that a shared * VMA is using the same page so check and skip such VMAs. */ if (iter_vma->vm_flags & VM_MAYSHARE) continue; /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these * areas. This is because a future no-page fault on this VMA * could insert a zeroed page instead of the data existing * from the time of fork. This would look like data corruption */ if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) unmap_hugepage_range(iter_vma, address, address + huge_page_size(h), page); } i_mmap_unlock_write(mapping); } /* * Hugetlb_cow() should be called with page lock of the original hugepage held. * Called with hugetlb_instantiation_mutex held and pte_page locked so we * cannot race with other handlers or page migration. * Keep the pte_same checks anyway to make transition from the mutex easier. */ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, struct page *pagecache_page, spinlock_t *ptl) { pte_t pte; struct hstate *h = hstate_vma(vma); struct page *old_page, *new_page; int ret = 0, outside_reserve = 0; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ pte = huge_ptep_get(ptep); old_page = pte_page(pte); retry_avoidcopy: /* If no-one else is actually using this page, avoid the copy * and just make the page writable */ if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { page_move_anon_rmap(old_page, vma); set_huge_ptep_writable(vma, address, ptep); return 0; } /* * If the process that created a MAP_PRIVATE mapping is about to * perform a COW due to a shared page count, attempt to satisfy * the allocation without using the existing reserves. The pagecache * page is used to determine if the reserve at this address was * consumed or not. If reserves were used, a partial faulted mapping * at the time of fork() could consume its reserves on COW instead * of the full address range. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && old_page != pagecache_page) outside_reserve = 1; get_page(old_page); /* * Drop page table lock as buddy allocator may be called. It will * be acquired again before returning to the caller, as expected. */ spin_unlock(ptl); new_page = alloc_huge_page(vma, address, outside_reserve); if (IS_ERR(new_page)) { /* * If a process owning a MAP_PRIVATE mapping fails to COW, * it is due to references held by a child and an insufficient * huge page pool. To guarantee the original mappers * reliability, unmap the page from child processes. The child * may get SIGKILLed if it later faults. */ if (outside_reserve) { put_page(old_page); BUG_ON(huge_pte_none(pte)); unmap_ref_private(mm, vma, old_page, address); BUG_ON(huge_pte_none(pte)); spin_lock(ptl); ptep = huge_pte_offset(mm, address & huge_page_mask(h), huge_page_size(h)); if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) goto retry_avoidcopy; /* * race occurs while re-acquiring page table * lock, and our job is done. */ return 0; } ret = (PTR_ERR(new_page) == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; goto out_release_old; } /* * When the original hugepage is shared one, it does not have * anon_vma prepared. */ if (unlikely(anon_vma_prepare(vma))) { ret = VM_FAULT_OOM; goto out_release_all; } copy_user_huge_page(new_page, old_page, address, vma, pages_per_huge_page(h)); __SetPageUptodate(new_page); set_page_huge_active(new_page); mmun_start = address & huge_page_mask(h); mmun_end = mmun_start + huge_page_size(h); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); /* * Retake the page table lock to check for racing updates * before the page tables are altered */ spin_lock(ptl); ptep = huge_pte_offset(mm, address & huge_page_mask(h), huge_page_size(h)); if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { ClearPagePrivate(new_page); /* Break COW */ huge_ptep_clear_flush(vma, address, ptep); mmu_notifier_invalidate_range(mm, mmun_start, mmun_end); set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page, true); hugepage_add_new_anon_rmap(new_page, vma, address); /* Make the old page be freed below */ new_page = old_page; } spin_unlock(ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); out_release_all: restore_reserve_on_error(h, vma, address, new_page); put_page(new_page); out_release_old: put_page(old_page); spin_lock(ptl); /* Caller expects lock to be held */ return ret; } /* Return the pagecache page at a given address within a VMA */ static struct page *hugetlbfs_pagecache_page(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping; pgoff_t idx; mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, address); return find_lock_page(mapping, idx); } /* * Return whether there is a pagecache page to back given address within VMA. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. */ static bool hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping; pgoff_t idx; struct page *page; mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, address); page = find_get_page(mapping, idx); if (page) put_page(page); return page != NULL; } int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx) { struct inode *inode = mapping->host; struct hstate *h = hstate_inode(inode); int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); if (err) return err; ClearPagePrivate(page); spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); return 0; } static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, unsigned int flags) { struct hstate *h = hstate_vma(vma); int ret = VM_FAULT_SIGBUS; int anon_rmap = 0; unsigned long size; struct page *page; pte_t new_pte; spinlock_t *ptl; /* * Currently, we are forced to kill the process in the event the * original mapper has unmapped pages from the child due to a failed * COW. Warn that such a situation has occurred as it may not be obvious */ if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", current->pid); return ret; } /* * Use page lock to guard against racing truncation * before we get page_table_lock. */ retry: page = find_lock_page(mapping, idx); if (!page) { size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto out; /* * Check for page in userfault range */ if (userfaultfd_missing(vma)) { u32 hash; struct vm_fault vmf = { .vma = vma, .address = address, .flags = flags, /* * Hard to debug if it ends up being * used by a callee that assumes * something about the other * uninitialized fields... same as in * memory.c */ }; /* * hugetlb_fault_mutex must be dropped before * handling userfault. Reacquire after handling * fault to make calling code simpler. */ hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); mutex_unlock(&hugetlb_fault_mutex_table[hash]); ret = handle_userfault(&vmf, VM_UFFD_MISSING); mutex_lock(&hugetlb_fault_mutex_table[hash]); goto out; } page = alloc_huge_page(vma, address, 0); if (IS_ERR(page)) { ret = PTR_ERR(page); if (ret == -ENOMEM) ret = VM_FAULT_OOM; else ret = VM_FAULT_SIGBUS; goto out; } clear_huge_page(page, address, pages_per_huge_page(h)); __SetPageUptodate(page); set_page_huge_active(page); if (vma->vm_flags & VM_MAYSHARE) { int err = huge_add_to_page_cache(page, mapping, idx); if (err) { put_page(page); if (err == -EEXIST) goto retry; goto out; } } else { lock_page(page); if (unlikely(anon_vma_prepare(vma))) { ret = VM_FAULT_OOM; goto backout_unlocked; } anon_rmap = 1; } } else { /* * If memory error occurs between mmap() and fault, some process * don't have hwpoisoned swap entry for errored virtual address. * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(PageHWPoison(page))) { ret = VM_FAULT_HWPOISON | VM_FAULT_SET_HINDEX(hstate_index(h)); goto backout_unlocked; } } /* * If we are going to COW a private mapping later, we examine the * pending reservations for this page now. This will ensure that * any allocations necessary to record that reservation occur outside * the spinlock. */ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; goto backout_unlocked; } /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, address); } ptl = huge_pte_lock(h, mm, ptep); size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto backout; ret = 0; if (!huge_pte_none(huge_ptep_get(ptep))) goto backout; if (anon_rmap) { ClearPagePrivate(page); hugepage_add_new_anon_rmap(page, vma, address); } else page_dup_rmap(page, true); new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); set_huge_pte_at(mm, address, ptep, new_pte); hugetlb_count_add(pages_per_huge_page(h), mm); if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); } spin_unlock(ptl); unlock_page(page); out: return ret; backout: spin_unlock(ptl); backout_unlocked: unlock_page(page); restore_reserve_on_error(h, vma, address, page); put_page(page); goto out; } #ifdef CONFIG_SMP u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) { unsigned long key[2]; u32 hash; if (vma->vm_flags & VM_SHARED) { key[0] = (unsigned long) mapping; key[1] = idx; } else { key[0] = (unsigned long) mm; key[1] = address >> huge_page_shift(h); } hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); return hash & (num_fault_mutexes - 1); } #else /* * For uniprocesor systems we always use a single mutex, so just * return 0 and avoid the hashing overhead. */ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) { return 0; } #endif int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) { pte_t *ptep, entry; spinlock_t *ptl; int ret; u32 hash; pgoff_t idx; struct page *page = NULL; struct page *pagecache_page = NULL; struct hstate *h = hstate_vma(vma); struct address_space *mapping; int need_wait_lock = 0; address &= huge_page_mask(h); ptep = huge_pte_offset(mm, address, huge_page_size(h)); if (ptep) { entry = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_migration(entry))) { migration_entry_wait_huge(vma, mm, ptep); return 0; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); } else { ptep = huge_pte_alloc(mm, address, huge_page_size(h)); if (!ptep) return VM_FAULT_OOM; } mapping = vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, vma, address); /* * Serialize hugepage allocation and instantiation, so that we don't * get spurious allocation failures if two CPUs race to instantiate * the same page in the page cache. */ hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); mutex_lock(&hugetlb_fault_mutex_table[hash]); entry = huge_ptep_get(ptep); if (huge_pte_none(entry)) { ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); goto out_mutex; } ret = 0; /* * entry could be a migration/hwpoison entry at this point, so this * check prevents the kernel from going below assuming that we have * a active hugepage in pagecache. This goto expects the 2nd page fault, * and is_hugetlb_entry_(migration|hwpoisoned) check will properly * handle it. */ if (!pte_present(entry)) goto out_mutex; /* * If we are going to COW the mapping later, we examine the pending * reservations for this page now. This will ensure that any * allocations necessary to record that reservation occur outside the * spinlock. For private mappings, we also lookup the pagecache * page now as it is used to determine if a reservation has been * consumed. */ if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { if (vma_needs_reservation(h, vma, address) < 0) { ret = VM_FAULT_OOM; goto out_mutex; } /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, address); if (!(vma->vm_flags & VM_MAYSHARE)) pagecache_page = hugetlbfs_pagecache_page(h, vma, address); } ptl = huge_pte_lock(h, mm, ptep); /* Check for a racing update before calling hugetlb_cow */ if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) goto out_ptl; /* * hugetlb_cow() requires page locks of pte_page(entry) and * pagecache_page, so here we need take the former one * when page != pagecache_page or !pagecache_page. */ page = pte_page(entry); if (page != pagecache_page) if (!trylock_page(page)) { need_wait_lock = 1; goto out_ptl; } get_page(page); if (flags & FAULT_FLAG_WRITE) { if (!huge_pte_write(entry)) { ret = hugetlb_cow(mm, vma, address, ptep, pagecache_page, ptl); goto out_put_page; } entry = huge_pte_mkdirty(entry); } entry = pte_mkyoung(entry); if (huge_ptep_set_access_flags(vma, address, ptep, entry, flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, address, ptep); out_put_page: if (page != pagecache_page) unlock_page(page); put_page(page); out_ptl: spin_unlock(ptl); if (pagecache_page) { unlock_page(pagecache_page); put_page(pagecache_page); } out_mutex: mutex_unlock(&hugetlb_fault_mutex_table[hash]); /* * Generally it's safe to hold refcount during waiting page lock. But * here we just wait to defer the next page fault to avoid busy loop and * the page is not used after unlocked before returning from the current * page fault. So we are safe from accessing freed page, even if we wait * here without taking refcount. */ if (need_wait_lock) wait_on_page_locked(page); return ret; } /* * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with * modifications for huge pages. */ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, struct page **pagep) { struct address_space *mapping; pgoff_t idx; unsigned long size; int vm_shared = dst_vma->vm_flags & VM_SHARED; struct hstate *h = hstate_vma(dst_vma); pte_t _dst_pte; spinlock_t *ptl; int ret; struct page *page; if (!*pagep) { ret = -ENOMEM; page = alloc_huge_page(dst_vma, dst_addr, 0); if (IS_ERR(page)) goto out; ret = copy_huge_page_from_user(page, (const void __user *) src_addr, pages_per_huge_page(h), false); /* fallback to copy_from_user outside mmap_sem */ if (unlikely(ret)) { ret = -EFAULT; *pagep = page; /* don't free the page */ goto out; } } else { page = *pagep; *pagep = NULL; } /* * The memory barrier inside __SetPageUptodate makes sure that * preceding stores to the page contents become visible before * the set_pte_at() write. */ __SetPageUptodate(page); set_page_huge_active(page); mapping = dst_vma->vm_file->f_mapping; idx = vma_hugecache_offset(h, dst_vma, dst_addr); /* * If shared, add to page cache */ if (vm_shared) { size = i_size_read(mapping->host) >> huge_page_shift(h); ret = -EFAULT; if (idx >= size) goto out_release_nounlock; /* * Serialization between remove_inode_hugepages() and * huge_add_to_page_cache() below happens through the * hugetlb_fault_mutex_table that here must be hold by * the caller. */ ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); spin_lock(ptl); /* * Recheck the i_size after holding PT lock to make sure not * to leave any page mapped (as page_mapped()) beyond the end * of the i_size (remove_inode_hugepages() is strict about * enforcing that). If we bail out here, we'll also leave a * page in the radix tree in the vm_shared case beyond the end * of the i_size, but remove_inode_hugepages() will take care * of it as soon as we drop the hugetlb_fault_mutex_table. */ size = i_size_read(mapping->host) >> huge_page_shift(h); ret = -EFAULT; if (idx >= size) goto out_release_unlock; ret = -EEXIST; if (!huge_pte_none(huge_ptep_get(dst_pte))) goto out_release_unlock; if (vm_shared) { page_dup_rmap(page, true); } else { ClearPagePrivate(page); hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); } _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE); if (dst_vma->vm_flags & VM_WRITE) _dst_pte = huge_pte_mkdirty(_dst_pte); _dst_pte = pte_mkyoung(_dst_pte); set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte, dst_vma->vm_flags & VM_WRITE); hugetlb_count_add(pages_per_huge_page(h), dst_mm); /* No need to invalidate - it was non-present before */ update_mmu_cache(dst_vma, dst_addr, dst_pte); spin_unlock(ptl); if (vm_shared) unlock_page(page); ret = 0; out: return ret; out_release_unlock: spin_unlock(ptl); if (vm_shared) unlock_page(page); out_release_nounlock: put_page(page); goto out; } long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, unsigned long *nr_pages, long i, unsigned int flags, int *nonblocking) { unsigned long pfn_offset; unsigned long vaddr = *position; unsigned long remainder = *nr_pages; struct hstate *h = hstate_vma(vma); int err = -EFAULT; while (vaddr < vma->vm_end && remainder) { pte_t *pte; spinlock_t *ptl = NULL; int absent; struct page *page; /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (unlikely(fatal_signal_pending(current))) { remainder = 0; break; } /* * Some archs (sparc64, sh*) have multiple pte_ts to * each hugepage. We have to make sure we get the * first, for the page indexing below to work. * * Note that page table lock is not held when pte is null. */ pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), huge_page_size(h)); if (pte) ptl = huge_pte_lock(h, mm, pte); absent = !pte || huge_pte_none(huge_ptep_get(pte)); /* * When coredumping, it suits get_dump_page if we just return * an error where there's an empty slot with no huge pagecache * to back it. This way, we avoid allocating a hugepage, and * the sparse dumpfile avoids allocating disk blocks, but its * huge holes still show up with zeroes where they need to be. */ if (absent && (flags & FOLL_DUMP) && !hugetlbfs_pagecache_present(h, vma, vaddr)) { if (pte) spin_unlock(ptl); remainder = 0; break; } /* * We need call hugetlb_fault for both hugepages under migration * (in which case hugetlb_fault waits for the migration,) and * hwpoisoned hugepages (in which case we need to prevent the * caller from accessing to them.) In order to do this, we use * here is_swap_pte instead of is_hugetlb_entry_migration and * is_hugetlb_entry_hwpoisoned. This is because it simply covers * both cases, and because we can't follow correct pages * directly from any kind of swap entries. */ if (absent || is_swap_pte(huge_ptep_get(pte)) || ((flags & FOLL_WRITE) && !huge_pte_write(huge_ptep_get(pte)))) { int ret; unsigned int fault_flags = 0; if (pte) spin_unlock(ptl); if (flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = hugetlb_fault(mm, vma, vaddr, fault_flags); if (ret & VM_FAULT_ERROR) { err = vm_fault_to_errno(ret, flags); remainder = 0; break; } if (ret & VM_FAULT_RETRY) { if (nonblocking) *nonblocking = 0; *nr_pages = 0; /* * VM_FAULT_RETRY must not return an * error, it will return zero * instead. * * No need to update "position" as the * caller will not check it after * *nr_pages is set to 0. */ return i; } continue; } pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; page = pte_page(huge_ptep_get(pte)); same_page: if (pages) { pages[i] = mem_map_offset(page, pfn_offset); get_page(pages[i]); } if (vmas) vmas[i] = vma; vaddr += PAGE_SIZE; ++pfn_offset; --remainder; ++i; if (vaddr < vma->vm_end && remainder && pfn_offset < pages_per_huge_page(h)) { /* * We use pfn_offset to avoid touching the pageframes * of this compound page. */ goto same_page; } spin_unlock(ptl); } *nr_pages = remainder; /* * setting position is actually required only if remainder is * not zero but it's faster not to add a "if (remainder)" * branch. */ *position = vaddr; return i ? i : err; } #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE /* * ARCHes with special requirements for evicting HUGETLB backing TLB entries can * implement this. */ #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #endif unsigned long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) { struct mm_struct *mm = vma->vm_mm; unsigned long start = address; pte_t *ptep; pte_t pte; struct hstate *h = hstate_vma(vma); unsigned long pages = 0; BUG_ON(address >= end); flush_cache_range(vma, address, end); mmu_notifier_invalidate_range_start(mm, start, end); i_mmap_lock_write(vma->vm_file->f_mapping); for (; address < end; address += huge_page_size(h)) { spinlock_t *ptl; ptep = huge_pte_offset(mm, address, huge_page_size(h)); if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, &address, ptep)) { pages++; spin_unlock(ptl); continue; } pte = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { spin_unlock(ptl); continue; } if (unlikely(is_hugetlb_entry_migration(pte))) { swp_entry_t entry = pte_to_swp_entry(pte); if (is_write_migration_entry(entry)) { pte_t newpte; make_migration_entry_read(&entry); newpte = swp_entry_to_pte(entry); set_huge_swap_pte_at(mm, address, ptep, newpte, huge_page_size(h)); pages++; } spin_unlock(ptl); continue; } if (!huge_pte_none(pte)) { pte = huge_ptep_get_and_clear(mm, address, ptep); pte = pte_mkhuge(huge_pte_modify(pte, newprot)); pte = arch_make_huge_pte(pte, vma, NULL, 0); set_huge_pte_at(mm, address, ptep, pte); pages++; } spin_unlock(ptl); } /* * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare * may have cleared our pud entry and done put_page on the page table: * once we release i_mmap_rwsem, another task can do the final put_page * and that page table be reused and filled with junk. */ flush_hugetlb_tlb_range(vma, start, end); /* * No need to call mmu_notifier_invalidate_range() we are downgrading * page table protection not changing it to point to a new page. * * See Documentation/vm/mmu_notifier.txt */ i_mmap_unlock_write(vma->vm_file->f_mapping); mmu_notifier_invalidate_range_end(mm, start, end); return pages << h->order; } int hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) { long ret, chg; struct hstate *h = hstate_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode); struct resv_map *resv_map; long gbl_reserve; /* * Only apply hugepage reservation if asked. At fault time, an * attempt will be made for VM_NORESERVE to allocate a page * without using reserves */ if (vm_flags & VM_NORESERVE) return 0; /* * Shared mappings base their reservation on the number of pages that * are already allocated on behalf of the file. Private mappings need * to reserve the full area even if read-only as mprotect() may be * called to make the mapping read-write. Assume !vma is a shm mapping */ if (!vma || vma->vm_flags & VM_MAYSHARE) { resv_map = inode_resv_map(inode); chg = region_chg(resv_map, from, to); } else { resv_map = resv_map_alloc(); if (!resv_map) return -ENOMEM; chg = to - from; set_vma_resv_map(vma, resv_map); set_vma_resv_flags(vma, HPAGE_RESV_OWNER); } if (chg < 0) { ret = chg; goto out_err; } /* * There must be enough pages in the subpool for the mapping. If * the subpool has a minimum size, there may be some global * reservations already in place (gbl_reserve). */ gbl_reserve = hugepage_subpool_get_pages(spool, chg); if (gbl_reserve < 0) { ret = -ENOSPC; goto out_err; } /* * Check enough hugepages are available for the reservation. * Hand the pages back to the subpool if there are not */ ret = hugetlb_acct_memory(h, gbl_reserve); if (ret < 0) { /* put back original number of pages, chg */ (void)hugepage_subpool_put_pages(spool, chg); goto out_err; } /* * Account for the reservations made. Shared mappings record regions * that have reservations as they are shared by multiple VMAs. * When the last VMA disappears, the region map says how much * the reservation was and the page cache tells how much of * the reservation was consumed. Private mappings are per-VMA and * only the consumed reservations are tracked. When the VMA * disappears, the original reservation is the VMA size and the * consumed reservations are stored in the map. Hence, nothing * else has to be done for private mappings here */ if (!vma || vma->vm_flags & VM_MAYSHARE) { long add = region_add(resv_map, from, to); if (unlikely(chg > add)) { /* * pages in this range were added to the reserve * map between region_chg and region_add. This * indicates a race with alloc_huge_page. Adjust * the subpool and reserve counts modified above * based on the difference. */ long rsv_adjust; rsv_adjust = hugepage_subpool_put_pages(spool, chg - add); hugetlb_acct_memory(h, -rsv_adjust); } } return 0; out_err: if (!vma || vma->vm_flags & VM_MAYSHARE) /* Don't call region_abort if region_chg failed */ if (chg >= 0) region_abort(resv_map, from, to); if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) kref_put(&resv_map->refs, resv_map_release); return ret; } long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed) { struct hstate *h = hstate_inode(inode); struct resv_map *resv_map = inode_resv_map(inode); long chg = 0; struct hugepage_subpool *spool = subpool_inode(inode); long gbl_reserve; if (resv_map) { chg = region_del(resv_map, start, end); /* * region_del() can fail in the rare case where a region * must be split and another region descriptor can not be * allocated. If end == LONG_MAX, it will not fail. */ if (chg < 0) return chg; } spin_lock(&inode->i_lock); inode->i_blocks -= (blocks_per_huge_page(h) * freed); spin_unlock(&inode->i_lock); /* * If the subpool has a minimum size, the number of global * reservations to be released may be adjusted. */ gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); hugetlb_acct_memory(h, -gbl_reserve); return 0; } #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE static unsigned long page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) { unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + svma->vm_start; unsigned long sbase = saddr & PUD_MASK; unsigned long s_end = sbase + PUD_SIZE; /* Allow segments to share if only one is marked locked */ unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; /* * match the virtual addresses, permission and the alignment of the * page table page. */ if (pmd_index(addr) != pmd_index(saddr) || vm_flags != svm_flags || sbase < svma->vm_start || svma->vm_end < s_end) return 0; return saddr; } static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) { unsigned long base = addr & PUD_MASK; unsigned long end = base + PUD_SIZE; /* * check on proper vm_flags and page table alignment */ if (vma->vm_flags & VM_MAYSHARE && vma->vm_start <= base && end <= vma->vm_end) return true; return false; } /* * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() * and returns the corresponding pte. While this is not necessary for the * !shared pmd case because we can allocate the pmd later as well, it makes the * code much cleaner. pmd allocation is essential for the shared case because * pud has to be populated inside the same i_mmap_rwsem section - otherwise * racing tasks could either miss the sharing (see huge_pte_offset) or select a * bad pmd for sharing. */ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; pte_t *pte; spinlock_t *ptl; if (!vma_shareable(vma, addr)) return (pte_t *)pmd_alloc(mm, pud, addr); i_mmap_lock_write(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { if (svma == vma) continue; saddr = page_table_shareable(svma, vma, addr, idx); if (saddr) { spte = huge_pte_offset(svma->vm_mm, saddr, vma_mmu_pagesize(svma)); if (spte) { get_page(virt_to_page(spte)); break; } } } if (!spte) goto out; ptl = huge_pte_lock(hstate_vma(vma), mm, spte); if (pud_none(*pud)) { pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); mm_inc_nr_pmds(mm); } else { put_page(virt_to_page(spte)); } spin_unlock(ptl); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); i_mmap_unlock_write(mapping); return pte; } /* * unmap huge page backed by shared pte. * * Hugetlb pte page is ref counted at the time of mapping. If pte is shared * indicated by page_count > 1, unmap is achieved by clearing pud and * decrementing the ref count. If count == 1, the pte page is not shared. * * called with page table lock held. * * returns: 1 successfully unmapped a shared pte page * 0 the underlying pte page is not shared, or it is the last user */ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { pgd_t *pgd = pgd_offset(mm, *addr); p4d_t *p4d = p4d_offset(pgd, *addr); pud_t *pud = pud_offset(p4d, *addr); BUG_ON(page_count(virt_to_page(ptep)) == 0); if (page_count(virt_to_page(ptep)) == 1) return 0; pud_clear(pud); put_page(virt_to_page(ptep)); mm_dec_nr_pmds(mm); *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; return 1; } #define want_pmd_share() (1) #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { return NULL; } int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) { return 0; } #define want_pmd_share() (0) #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pte_t *pte = NULL; pgd = pgd_offset(mm, addr); p4d = p4d_alloc(mm, pgd, addr); if (!p4d) return NULL; pud = pud_alloc(mm, p4d, addr); if (pud) { if (sz == PUD_SIZE) { pte = (pte_t *)pud; } else { BUG_ON(sz != PMD_SIZE); if (want_pmd_share() && pud_none(*pud)) pte = huge_pmd_share(mm, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); } } BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); return pte; } /* * huge_pte_offset() - Walk the page table to resolve the hugepage * entry at address @addr * * Return: Pointer to page table or swap entry (PUD or PMD) for * address @addr, or NULL if a p*d_none() entry is encountered and the * size @sz doesn't match the hugepage size at this level of the page * table. */ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) return NULL; p4d = p4d_offset(pgd, addr); if (!p4d_present(*p4d)) return NULL; pud = pud_offset(p4d, addr); if (sz != PUD_SIZE && pud_none(*pud)) return NULL; /* hugepage or swap? */ if (pud_huge(*pud) || !pud_present(*pud)) return (pte_t *)pud; pmd = pmd_offset(pud, addr); if (sz != PMD_SIZE && pmd_none(*pmd)) return NULL; /* hugepage or swap? */ if (pmd_huge(*pmd) || !pmd_present(*pmd)) return (pte_t *)pmd; return NULL; } #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ /* * These functions are overwritable if your architecture needs its own * behavior. */ struct page * __weak follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { return ERR_PTR(-EINVAL); } struct page * __weak follow_huge_pd(struct vm_area_struct *vma, unsigned long address, hugepd_t hpd, int flags, int pdshift) { WARN(1, "hugepd follow called with no support for hugepage directory format\n"); return NULL; } struct page * __weak follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int flags) { struct page *page = NULL; spinlock_t *ptl; pte_t pte; retry: ptl = pmd_lockptr(mm, pmd); spin_lock(ptl); /* * make sure that the address range covered by this pmd is not * unmapped from other threads. */ if (!pmd_huge(*pmd)) goto out; pte = huge_ptep_get((pte_t *)pmd); if (pte_present(pte)) { page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); if (flags & FOLL_GET) get_page(page); } else { if (is_hugetlb_entry_migration(pte)) { spin_unlock(ptl); __migration_entry_wait(mm, (pte_t *)pmd, ptl); goto retry; } /* * hwpoisoned entry is treated as no_page_table in * follow_page_mask(). */ } out: spin_unlock(ptl); return page; } struct page * __weak follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int flags) { if (flags & FOLL_GET) return NULL; return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); } struct page * __weak follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) { if (flags & FOLL_GET) return NULL; return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); } bool isolate_huge_page(struct page *page, struct list_head *list) { bool ret = true; VM_BUG_ON_PAGE(!PageHead(page), page); spin_lock(&hugetlb_lock); if (!page_huge_active(page) || !get_page_unless_zero(page)) { ret = false; goto unlock; } clear_page_huge_active(page); list_move_tail(&page->lru, list); unlock: spin_unlock(&hugetlb_lock); return ret; } void putback_active_hugepage(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); spin_lock(&hugetlb_lock); set_page_huge_active(page); list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); spin_unlock(&hugetlb_lock); put_page(page); } void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) { struct hstate *h = page_hstate(oldpage); hugetlb_cgroup_migrate(oldpage, newpage); set_page_owner_migrate_reason(newpage, reason); /* * transfer temporary state of the new huge page. This is * reverse to other transitions because the newpage is going to * be final while the old one will be freed so it takes over * the temporary status. * * Also note that we have to transfer the per-node surplus state * here as well otherwise the global surplus count will not match * the per-node's. */ if (PageHugeTemporary(newpage)) { int old_nid = page_to_nid(oldpage); int new_nid = page_to_nid(newpage); SetPageHugeTemporary(oldpage); ClearPageHugeTemporary(newpage); spin_lock(&hugetlb_lock); if (h->surplus_huge_pages_node[old_nid]) { h->surplus_huge_pages_node[old_nid]--; h->surplus_huge_pages_node[new_nid]++; } spin_unlock(&hugetlb_lock); } }
gpl-2.0
armenrz/adempiere
base/src/org/compiere/model/I_AD_WF_Responsible.java
6101
/****************************************************************************** * Product: Adempiere ERP & CRM Smart Business Solution * * Copyright (C) 1999-2007 ComPiere, Inc. All Rights Reserved. * * This program is free software, you can redistribute it and/or modify it * * under the terms version 2 of the GNU General Public License as published * * by the Free Software Foundation. This program is distributed in the hope * * that it will be useful, but WITHOUT ANY WARRANTY, without even the implied * * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * * with this program, if not, write to the Free Software Foundation, Inc., * * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * For the text or an alternative of this public license, you may reach us * * ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA * * or via [email protected] or http://www.compiere.org/license.html * *****************************************************************************/ package org.compiere.model; import java.math.BigDecimal; import java.sql.Timestamp; import org.compiere.util.KeyNamePair; /** Generated Interface for AD_WF_Responsible * @author Adempiere (generated) * @version Release 3.8.0 */ public interface I_AD_WF_Responsible { /** TableName=AD_WF_Responsible */ public static final String Table_Name = "AD_WF_Responsible"; /** AD_Table_ID=646 */ public static final int Table_ID = MTable.getTable_ID(Table_Name); KeyNamePair Model = new KeyNamePair(Table_ID, Table_Name); /** AccessLevel = 6 - System - Client */ BigDecimal accessLevel = BigDecimal.valueOf(6); /** Load Meta Data */ /** Column name AD_Client_ID */ public static final String COLUMNNAME_AD_Client_ID = "AD_Client_ID"; /** Get Client. * Client/Tenant for this installation. */ public int getAD_Client_ID(); /** Column name AD_Org_ID */ public static final String COLUMNNAME_AD_Org_ID = "AD_Org_ID"; /** Set Organization. * Organizational entity within client */ public void setAD_Org_ID (int AD_Org_ID); /** Get Organization. * Organizational entity within client */ public int getAD_Org_ID(); /** Column name AD_Role_ID */ public static final String COLUMNNAME_AD_Role_ID = "AD_Role_ID"; /** Set Role. * Responsibility Role */ public void setAD_Role_ID (int AD_Role_ID); /** Get Role. * Responsibility Role */ public int getAD_Role_ID(); public org.compiere.model.I_AD_Role getAD_Role() throws RuntimeException; /** Column name AD_User_ID */ public static final String COLUMNNAME_AD_User_ID = "AD_User_ID"; /** Set User/Contact. * User within the system - Internal or Business Partner Contact */ public void setAD_User_ID (int AD_User_ID); /** Get User/Contact. * User within the system - Internal or Business Partner Contact */ public int getAD_User_ID(); public org.compiere.model.I_AD_User getAD_User() throws RuntimeException; /** Column name AD_WF_Responsible_ID */ public static final String COLUMNNAME_AD_WF_Responsible_ID = "AD_WF_Responsible_ID"; /** Set Workflow Responsible. * Responsible for Workflow Execution */ public void setAD_WF_Responsible_ID (int AD_WF_Responsible_ID); /** Get Workflow Responsible. * Responsible for Workflow Execution */ public int getAD_WF_Responsible_ID(); /** Column name Created */ public static final String COLUMNNAME_Created = "Created"; /** Get Created. * Date this record was created */ public Timestamp getCreated(); /** Column name CreatedBy */ public static final String COLUMNNAME_CreatedBy = "CreatedBy"; /** Get Created By. * User who created this records */ public int getCreatedBy(); /** Column name Description */ public static final String COLUMNNAME_Description = "Description"; /** Set Description. * Optional short description of the record */ public void setDescription (String Description); /** Get Description. * Optional short description of the record */ public String getDescription(); /** Column name EntityType */ public static final String COLUMNNAME_EntityType = "EntityType"; /** Set Entity Type. * Dictionary Entity Type; Determines ownership and synchronization */ public void setEntityType (String EntityType); /** Get Entity Type. * Dictionary Entity Type; Determines ownership and synchronization */ public String getEntityType(); /** Column name IsActive */ public static final String COLUMNNAME_IsActive = "IsActive"; /** Set Active. * The record is active in the system */ public void setIsActive (boolean IsActive); /** Get Active. * The record is active in the system */ public boolean isActive(); /** Column name Name */ public static final String COLUMNNAME_Name = "Name"; /** Set Name. * Alphanumeric identifier of the entity */ public void setName (String Name); /** Get Name. * Alphanumeric identifier of the entity */ public String getName(); /** Column name ResponsibleType */ public static final String COLUMNNAME_ResponsibleType = "ResponsibleType"; /** Set Responsible Type. * Type of the Responsibility for a workflow */ public void setResponsibleType (String ResponsibleType); /** Get Responsible Type. * Type of the Responsibility for a workflow */ public String getResponsibleType(); /** Column name Updated */ public static final String COLUMNNAME_Updated = "Updated"; /** Get Updated. * Date this record was updated */ public Timestamp getUpdated(); /** Column name UpdatedBy */ public static final String COLUMNNAME_UpdatedBy = "UpdatedBy"; /** Get Updated By. * User who updated this records */ public int getUpdatedBy(); }
gpl-2.0
z3bu/AH4222
kernel/linux/include/asm-x86_64/mmu_context.h
1817
#ifndef __X86_64_MMU_CONTEXT_H #define __X86_64_MMU_CONTEXT_H #include <linux/config.h> #include <asm/desc.h> #include <asm/atomic.h> #include <asm/pgalloc.h> #include <asm/pda.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> /* * possibly do the LDT unload here? */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm); void destroy_context(struct mm_struct *mm); #ifdef CONFIG_SMP static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { if (read_pda(mmu_state) == TLBSTATE_OK) write_pda(mmu_state, TLBSTATE_LAZY); } #else static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } #endif static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); if (likely(prev != next)) { /* stop flush ipis for the previous mm */ clear_bit(cpu, &prev->cpu_vm_mask); #ifdef CONFIG_SMP write_pda(mmu_state, TLBSTATE_OK); write_pda(active_mm, next); #endif set_bit(cpu, &next->cpu_vm_mask); /* Re-load page tables */ *read_pda(level4_pgt) = __pa(next->pgd) | _PAGE_TABLE; __flush_tlb(); if (unlikely(next->context.ldt != prev->context.ldt)) load_LDT_nolock(&next->context, cpu); } #ifdef CONFIG_SMP else { write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) out_of_line_bug(); if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must flush our tlb. */ local_flush_tlb(); load_LDT_nolock(&next->context, cpu); } } #endif } #define deactivate_mm(tsk,mm) do { \ load_gs_index(0); \ asm volatile("movl %0,%%fs"::"r"(0)); \ } while(0) #define activate_mm(prev, next) \ switch_mm((prev),(next),NULL) #endif
gpl-2.0
Golrag/TrinityCore
src/server/scripts/Outland/Auchindoun/ManaTombs/boss_nexusprince_shaffar.cpp
12484
/* * Copyright (C) 2008-2018 TrinityCore <https://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_NexusPrince_Shaffar SD%Complete: 80 SDComment: Need more tuning of spell timers, it should not be as linear fight as current. Also should possibly find a better way to deal with his three initial beacons to make sure all aggro. SDCategory: Auchindoun, Mana Tombs EndScriptData */ #include "ScriptMgr.h" #include "mana_tombs.h" #include "MotionMaster.h" #include "ScriptedCreature.h" enum Yells { SAY_INTRO = 0, SAY_AGGRO = 1, SAY_SLAY = 2, SAY_SUMMON = 3, SAY_DEAD = 4 }; enum Spells { SPELL_BLINK = 34605, SPELL_FROSTBOLT = 32364, SPELL_FIREBALL = 32363, SPELL_FROSTNOVA = 32365, SPELL_ETHEREAL_BEACON = 32371, // Summons NPC_BEACON SPELL_ETHEREAL_BEACON_VISUAL = 32368, // Ethereal Beacon SPELL_ARCANE_BOLT = 15254, SPELL_ETHEREAL_APPRENTICE = 32372 // Summon 18430 }; enum Creatures { NPC_BEACON = 18431, NPC_SHAFFAR = 18344 }; enum Misc { NR_INITIAL_BEACONS = 3 }; enum Events { EVENT_BLINK = 1, EVENT_BEACON, EVENT_FIREBALL, EVENT_FROSTBOLT, EVENT_FROST_NOVA }; class boss_nexusprince_shaffar : public CreatureScript { public: boss_nexusprince_shaffar() : CreatureScript("boss_nexusprince_shaffar") { } struct boss_nexusprince_shaffarAI : public BossAI { boss_nexusprince_shaffarAI(Creature* creature) : BossAI(creature, DATA_NEXUSPRINCE_SHAFFAR) { _hasTaunted = false; } void Reset() override { _Reset(); float dist = 8.0f; float posX, posY, posZ, angle; me->GetHomePosition(posX, posY, posZ, angle); me->SummonCreature(NPC_BEACON, posX - dist, posY - dist, posZ, angle, TEMPSUMMON_CORPSE_TIMED_DESPAWN, 7200000); me->SummonCreature(NPC_BEACON, posX - dist, posY + dist, posZ, angle, TEMPSUMMON_CORPSE_TIMED_DESPAWN, 7200000); me->SummonCreature(NPC_BEACON, posX + dist, posY, posZ, angle, TEMPSUMMON_CORPSE_TIMED_DESPAWN, 7200000); } void MoveInLineOfSight(Unit* who) override { if (!_hasTaunted && who->GetTypeId() == TYPEID_PLAYER && me->IsWithinDistInMap(who, 100.0f)) { Talk(SAY_INTRO); _hasTaunted = true; } } void EnterCombat(Unit* /*who*/) override { Talk(SAY_AGGRO); _EnterCombat(); events.ScheduleEvent(EVENT_BEACON, 10000); events.ScheduleEvent(EVENT_FIREBALL, 8000); events.ScheduleEvent(EVENT_FROSTBOLT, 4000); events.ScheduleEvent(EVENT_FROST_NOVA, 15000); } void JustSummoned(Creature* summoned) override { if (summoned->GetEntry() == NPC_BEACON) { summoned->CastSpell(summoned, SPELL_ETHEREAL_BEACON_VISUAL, false); if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) summoned->AI()->AttackStart(target); } summons.Summon(summoned); } void KilledUnit(Unit* victim) override { if (victim->GetTypeId() == TYPEID_PLAYER) Talk(SAY_SLAY); } void JustDied(Unit* /*killer*/) override { Talk(SAY_DEAD); _JustDied(); } void ExecuteEvent(uint32 eventId) override { switch (eventId) { case EVENT_BLINK: if (me->IsNonMeleeSpellCast(false)) me->InterruptNonMeleeSpells(true); // expire movement, will prevent from running right back to victim after cast // (but should MoveChase be used again at a certain time or should he not move?) if (me->GetMotionMaster()->GetCurrentMovementGeneratorType() == CHASE_MOTION_TYPE) me->GetMotionMaster()->MovementExpired(); DoCast(me, SPELL_BLINK); break; case EVENT_BEACON: if (!urand(0, 3)) Talk(SAY_SUMMON); DoCast(me, SPELL_ETHEREAL_BEACON, true); events.ScheduleEvent(EVENT_BEACON, 10000); break; case EVENT_FIREBALL: DoCastVictim(SPELL_FROSTBOLT); events.ScheduleEvent(EVENT_FIREBALL, urand(4500, 6000)); break; case EVENT_FROSTBOLT: DoCastVictim(SPELL_FROSTBOLT); events.ScheduleEvent(EVENT_FROSTBOLT, urand(4500, 6000)); break; case EVENT_FROST_NOVA: DoCast(me, SPELL_FROSTNOVA); events.ScheduleEvent(EVENT_FROST_NOVA, urand(17500, 25000)); events.ScheduleEvent(EVENT_BLINK, 1500); break; default: break; } } private: bool _hasTaunted; }; CreatureAI* GetAI(Creature* creature) const override { return GetManaTombsAI<boss_nexusprince_shaffarAI>(creature); } }; enum EtherealBeacon { EVENT_APPRENTICE = 1, EVENT_ARCANE_BOLT }; class npc_ethereal_beacon : public CreatureScript { public: npc_ethereal_beacon() : CreatureScript("npc_ethereal_beacon") { } struct npc_ethereal_beaconAI : public ScriptedAI { npc_ethereal_beaconAI(Creature* creature) : ScriptedAI(creature) { } void Reset() override { _events.Reset(); } void EnterCombat(Unit* who) override { if (Creature* shaffar = me->FindNearestCreature(NPC_SHAFFAR, 100.0f)) if (!shaffar->IsInCombat()) shaffar->AI()->AttackStart(who); _events.ScheduleEvent(EVENT_APPRENTICE, DUNGEON_MODE(20000, 10000)); _events.ScheduleEvent(EVENT_ARCANE_BOLT, 1000); } void JustSummoned(Creature* summoned) override { summoned->AI()->AttackStart(me->GetVictim()); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; _events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = _events.ExecuteEvent()) { switch (eventId) { case EVENT_APPRENTICE: DoCast(me, SPELL_ETHEREAL_APPRENTICE, true); me->DespawnOrUnsummon(); break; case EVENT_ARCANE_BOLT: DoCastVictim(SPELL_ARCANE_BOLT); _events.ScheduleEvent(EVENT_ARCANE_BOLT, urand(2000, 4500)); break; default: break; } } } private: EventMap _events; }; CreatureAI* GetAI(Creature* creature) const override { return GetManaTombsAI<npc_ethereal_beaconAI>(creature); } }; enum EtherealApprentice { SPELL_ETHEREAL_APPRENTICE_FIREBOLT = 32369, SPELL_ETHEREAL_APPRENTICE_FROSTBOLT = 32370, EVENT_ETHEREAL_APPRENTICE_FIREBOLT = 1, EVENT_ETHEREAL_APPRENTICE_FROSTBOLT }; class npc_ethereal_apprentice : public CreatureScript { public: npc_ethereal_apprentice() : CreatureScript("npc_ethereal_apprentice") { } struct npc_ethereal_apprenticeAI : public ScriptedAI { npc_ethereal_apprenticeAI(Creature* creature) : ScriptedAI(creature) { } void Reset() override { _events.Reset(); } void EnterCombat(Unit* /*who*/) override { _events.ScheduleEvent(EVENT_ETHEREAL_APPRENTICE_FIREBOLT, 3000); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; _events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = _events.ExecuteEvent()) { switch (eventId) { case EVENT_ETHEREAL_APPRENTICE_FIREBOLT: DoCastVictim(SPELL_ETHEREAL_APPRENTICE_FIREBOLT, true); _events.ScheduleEvent(EVENT_ETHEREAL_APPRENTICE_FROSTBOLT, 3000); break; case EVENT_ETHEREAL_APPRENTICE_FROSTBOLT: DoCastVictim(SPELL_ETHEREAL_APPRENTICE_FROSTBOLT, true); _events.ScheduleEvent(EVENT_ETHEREAL_APPRENTICE_FIREBOLT, 3000); break; default: break; } } } private: EventMap _events; }; CreatureAI* GetAI(Creature* creature) const override { return GetManaTombsAI<npc_ethereal_apprenticeAI>(creature); } }; enum Yor { SPELL_DOUBLE_BREATH = 38361, EVENT_DOUBLE_BREATH = 1 }; class npc_yor : public CreatureScript { public: npc_yor() : CreatureScript("npc_yor") { } struct npc_yorAI : public ScriptedAI { npc_yorAI(Creature* creature) : ScriptedAI(creature) { } void Reset() override { } void EnterCombat(Unit* /*who*/) override { _events.ScheduleEvent(EVENT_DOUBLE_BREATH, urand(6000,9000)); } void UpdateAI(uint32 diff) override { if (!UpdateVictim()) return; _events.Update(diff); while (uint32 eventId = _events.ExecuteEvent()) { switch (eventId) { case EVENT_DOUBLE_BREATH: if (me->IsWithinDist(me->GetVictim(), ATTACK_DISTANCE)) DoCastVictim(SPELL_DOUBLE_BREATH); _events.ScheduleEvent(EVENT_DOUBLE_BREATH, urand(6000,9000)); break; default: break; } } DoMeleeAttackIfReady(); } private: EventMap _events; }; CreatureAI* GetAI(Creature* creature) const override { return GetManaTombsAI<npc_yorAI>(creature); } }; void AddSC_boss_nexusprince_shaffar() { new boss_nexusprince_shaffar(); new npc_ethereal_beacon(); new npc_ethereal_apprentice(); new npc_yor(); }
gpl-2.0
Bloodyaugust/sugarlabcppboilerplate
lib/boost/doc/html/boost/date_time/month_formatter.html
7569
<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=US-ASCII"> <title>Class template month_formatter</title> <link rel="stylesheet" href="../../../../doc/src/boostbook.css" type="text/css"> <meta name="generator" content="DocBook XSL Stylesheets V1.78.1"> <link rel="home" href="../../index.html" title="The Boost C++ Libraries BoostBook Documentation Subset"> <link rel="up" href="../../date_time/doxy.html#header.boost.date_time.date_formatting_hpp" title="Header &lt;boost/date_time/date_formatting.hpp&gt;"> <link rel="prev" href="date_formatter.html" title="Class template date_formatter"> <link rel="next" href="ymd_formatter.html" title="Class template ymd_formatter"> </head> <body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"> <table cellpadding="2" width="100%"><tr> <td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../boost.png"></td> <td align="center"><a href="../../../../index.html">Home</a></td> <td align="center"><a href="../../../../libs/libraries.htm">Libraries</a></td> <td align="center"><a href="http://www.boost.org/users/people.html">People</a></td> <td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td> <td align="center"><a href="../../../../more/index.htm">More</a></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="date_formatter.html"><img src="../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../date_time/doxy.html#header.boost.date_time.date_formatting_hpp"><img src="../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="ymd_formatter.html"><img src="../../../../doc/src/images/next.png" alt="Next"></a> </div> <div class="refentry"> <a name="boost.date_time.month_formatter"></a><div class="titlepage"></div> <div class="refnamediv"> <h2><span class="refentrytitle">Class template month_formatter</span></h2> <p>boost::date_time::month_formatter &#8212; Formats a month as as string into an ostream. </p> </div> <h2 xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" class="refsynopsisdiv-title">Synopsis</h2> <div xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" class="refsynopsisdiv"><pre class="synopsis"><span class="comment">// In header: &lt;<a class="link" href="../../date_time/doxy.html#header.boost.date_time.date_formatting_hpp" title="Header &lt;boost/date_time/date_formatting.hpp&gt;">boost/date_time/date_formatting.hpp</a>&gt; </span><span class="keyword">template</span><span class="special">&lt;</span><span class="keyword">typename</span> month_type<span class="special">,</span> <span class="keyword">typename</span> format_type<span class="special">,</span> <span class="keyword">typename</span> charT <span class="special">=</span> <span class="keyword">char</span><span class="special">&gt;</span> <span class="keyword">class</span> <a class="link" href="month_formatter.html" title="Class template month_formatter">month_formatter</a> <span class="special">{</span> <span class="keyword">public</span><span class="special">:</span> <span class="comment">// <a class="link" href="month_formatter.html#idp90965712-bb">public static functions</a></span> <span class="keyword">static</span> <span class="identifier">ostream_type</span> <span class="special">&amp;</span> <a class="link" href="month_formatter.html#idp100311200-bb"><span class="identifier">format_month</span></a><span class="special">(</span><span class="keyword">const</span> <span class="identifier">month_type</span> <span class="special">&amp;</span><span class="special">,</span> <span class="identifier">ostream_type</span> <span class="special">&amp;</span><span class="special">)</span><span class="special">;</span> <span class="keyword">static</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">ostream</span> <span class="special">&amp;</span> <a class="link" href="month_formatter.html#idp107619264-bb"><span class="identifier">format_month</span></a><span class="special">(</span><span class="keyword">const</span> <span class="identifier">month_type</span> <span class="special">&amp;</span><span class="special">,</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">ostream</span> <span class="special">&amp;</span><span class="special">)</span><span class="special">;</span> <span class="special">}</span><span class="special">;</span></pre></div> <div class="refsect1"> <a name="idp179260704"></a><h2>Description</h2> <div class="refsect2"> <a name="idp179261120"></a><h3> <a name="idp90965712-bb"></a><code class="computeroutput">month_formatter</code> public static functions</h3> <div class="orderedlist"><ol class="orderedlist" type="1"> <li class="listitem"> <pre class="literallayout"><span class="keyword">static</span> <span class="identifier">ostream_type</span> <span class="special">&amp;</span> <a name="idp100311200-bb"></a><span class="identifier">format_month</span><span class="special">(</span><span class="keyword">const</span> <span class="identifier">month_type</span> <span class="special">&amp;</span> month<span class="special">,</span> <span class="identifier">ostream_type</span> <span class="special">&amp;</span> os<span class="special">)</span><span class="special">;</span></pre>Formats a month as as string into an ostream. <p>This function demands that month_type provide functions for converting to short and long strings if that capability is used. </p> </li> <li class="listitem"> <pre class="literallayout"><span class="keyword">static</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">ostream</span> <span class="special">&amp;</span> <a name="idp107619264-bb"></a><span class="identifier">format_month</span><span class="special">(</span><span class="keyword">const</span> <span class="identifier">month_type</span> <span class="special">&amp;</span> month<span class="special">,</span> <span class="identifier">std</span><span class="special">::</span><span class="identifier">ostream</span> <span class="special">&amp;</span> os<span class="special">)</span><span class="special">;</span></pre>Formats a month as as string into an ostream. <p>This function demands that month_type provide functions for converting to short and long strings if that capability is used. </p> </li> </ol></div> </div> </div> </div> <table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr> <td align="left"></td> <td align="right"><div class="copyright-footer">Copyright &#169; 2001-2005 CrystalClear Software, Inc<p>Subject to the Boost Software License, Version 1.0. (See accompanying file <code class="filename">LICENSE_1_0.txt</code> or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)</p> </div></td> </tr></table> <hr> <div class="spirit-nav"> <a accesskey="p" href="date_formatter.html"><img src="../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../date_time/doxy.html#header.boost.date_time.date_formatting_hpp"><img src="../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="ymd_formatter.html"><img src="../../../../doc/src/images/next.png" alt="Next"></a> </div> </body> </html>
gpl-2.0
JrmyDev/CodenameOne
CodenameOne/src/com/codename1/ui/layouts/mig/LinkHandler.java
6705
package com.codename1.ui.layouts.mig; import com.codename1.ui.Display; import java.util.ArrayList; import java.util.HashMap; /* * License (BSD): * ============== * * Copyright (c) 2004, Mikael Grev, MiG InfoCom AB. (miglayout (at) miginfocom (dot) com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list * of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * Neither the name of the MiG InfoCom AB nor the names of its contributors may be * used to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * @version 1.0 * @author Mikael Grev, MiG InfoCom AB * Date: 2006-sep-08 */ /** */ public final class LinkHandler { public static final int X = 0; public static final int Y = 1; public static final int WIDTH = 2; public static final int HEIGHT = 3; public static final int X2 = 4; public static final int Y2 = 5; private static final ArrayList<Object> LAYOUTS = new ArrayList<Object>(4); private static final ArrayList<HashMap<String, int[]>> VALUES = new ArrayList<HashMap<String, int[]>>(4); private static final ArrayList<HashMap<String, int[]>> VALUES_TEMP = new ArrayList<HashMap<String, int[]>>(4); private LinkHandler() { } public synchronized static Integer getValue(Object layout, String key, int type) { Integer ret = null; boolean cont = true; for (int i = LAYOUTS.size() - 1; i >= 0; i--) { Object l = Display.getInstance().extractHardRef(LAYOUTS.get(i)); if (ret == null && l == layout) { int[] rect = VALUES_TEMP.get(i).get(key); if (cont && rect != null && rect[type] != LayoutUtil.NOT_SET) { ret = new Integer(rect[type]); } else { rect = VALUES.get(i).get(key); ret = (rect != null && rect[type] != LayoutUtil.NOT_SET) ? new Integer(rect[type]) : null; } cont = false; } if (l == null) { LAYOUTS.remove(i); VALUES.remove(i); VALUES_TEMP.remove(i); } } return ret; } /** Sets a key that can be linked to from any component. * @param layout The MigLayout instance * @param key The key to link to. This is the same as the ID in a component constraint. * @param x x * @param y y * @param width Width * @param height Height * @return If the value was changed */ public synchronized static boolean setBounds(Object layout, String key, int x, int y, int width, int height) { return setBounds(layout, key, x, y, width, height, false, false); } synchronized static boolean setBounds(Object layout, String key, int x, int y, int width, int height, boolean temporary, boolean incCur) { for (int i = LAYOUTS.size() - 1; i >= 0; i--) { Object l = Display.getInstance().extractHardRef(LAYOUTS.get(i)); if (l == layout) { HashMap<String, int[]> map = (temporary ? VALUES_TEMP : VALUES).get(i); int[] old = map.get(key); if (old == null || old[X] != x || old[Y] != y || old[WIDTH] != width || old[HEIGHT] != height) { if (old == null || incCur == false) { map.put(key, new int[] {x, y, width, height, x + width, y + height}); return true; } else { boolean changed = false; if (x != LayoutUtil.NOT_SET) { if (old[X] == LayoutUtil.NOT_SET || x < old[X]) { old[X] = x; old[WIDTH] = old[X2] - x; changed = true; } if (width != LayoutUtil.NOT_SET) { int x2 = x + width; if (old[X2] == LayoutUtil.NOT_SET || x2 > old[X2]) { old[X2] = x2; old[WIDTH] = x2 - old[X]; changed = true; } } } if (y != LayoutUtil.NOT_SET) { if (old[Y] == LayoutUtil.NOT_SET || y < old[Y]) { old[Y] = y; old[HEIGHT] = old[Y2] - y; changed = true; } if (height != LayoutUtil.NOT_SET) { int y2 = y + height; if (old[Y2] == LayoutUtil.NOT_SET || y2 > old[Y2]) { old[Y2] = y2; old[HEIGHT] = y2 - old[Y]; changed = true; } } } return changed; } } return false; } } LAYOUTS.add(Display.getInstance().createSoftWeakRef(layout)); int[] bounds = new int[] {x, y, width, height, x + width, y + height}; HashMap<String, int[]> values = new HashMap<String, int[]>(4); if (temporary) values.put(key, bounds); VALUES_TEMP.add(values); values = new HashMap<String, int[]>(4); if (temporary == false) values.put(key, bounds); VALUES.add(values); return true; } /** This method clear any weak references right away instead of waiting for the GC. This might be advantageous * if lots of layout are created and disposed of quickly to keep memory consumption down. * @since 3.7.4 */ public synchronized static void clearWeakReferencesNow() { LAYOUTS.clear(); } public synchronized static boolean clearBounds(Object layout, String key) { for (int i = LAYOUTS.size() - 1; i >= 0; i--) { Object l = Display.getInstance().extractHardRef(LAYOUTS.get(i)); if (l == layout) return VALUES.get(i).remove(key) != null; } return false; } synchronized static void clearTemporaryBounds(Object layout) { for (int i = LAYOUTS.size() - 1; i >= 0; i--) { Object l = Display.getInstance().extractHardRef(LAYOUTS.get(i)); if (l == layout) { VALUES_TEMP.get(i).clear(); return; } } } }
gpl-2.0
tectronics/houdini-ocean-toolkit
src/3rdparty/win64/blitz/memblock.h
9384
// -*- C++ -*- /*************************************************************************** * blitz/memblock.h MemoryBlock<T> and MemoryBlockReference<T> * * $Id: memblock.h,v 1.20 2008/02/21 03:21:53 julianc Exp $ * * Copyright (C) 1997-1999 Todd Veldhuizen <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Suggestions: [email protected] * Bugs: [email protected] * * For more information, please see the Blitz++ Home Page: * http://oonumerics.org/blitz/ * ***************************************************************************/ #ifndef BZ_MEMBLOCK_H #define BZ_MEMBLOCK_H #include <blitz/blitz.h> #include <stddef.h> // ptrdiff_t BZ_NAMESPACE(blitz) enum preexistingMemoryPolicy { duplicateData, deleteDataWhenDone, neverDeleteData }; // Forward declaration of MemoryBlockReference template<typename T_type> class MemoryBlockReference; // Class MemoryBlock provides a reference-counted block of memory. This block // may be referred to by multiple vector, matrix and array objects. The memory // is automatically deallocated when the last referring object is destructed. // MemoryBlock may be subclassed to provide special allocators. template<typename P_type> class MemoryBlock { friend class MemoryBlockReference<P_type>; public: typedef P_type T_type; protected: MemoryBlock() { length_ = 0; data_ = 0; dataBlockAddress_ = 0; references_ = 0; BZ_MUTEX_INIT(mutex) mutexLocking_ = true; } explicit MemoryBlock(size_t items) { length_ = items; allocate(length_); #ifdef BZ_DEBUG_LOG_ALLOCATIONS cout << "MemoryBlock: allocated " << setw(8) << length_ << " at " << ((void *)dataBlockAddress_) << endl; #endif BZASSERT(dataBlockAddress_ != 0); references_ = 0; BZ_MUTEX_INIT(mutex) mutexLocking_ = true; } MemoryBlock(size_t length, T_type* data) { length_ = length; data_ = data; dataBlockAddress_ = data; references_ = 0; BZ_MUTEX_INIT(mutex) mutexLocking_ = true; } virtual ~MemoryBlock() { if (dataBlockAddress_) { #ifdef BZ_DEBUG_LOG_ALLOCATIONS cout << "MemoryBlock: freed " << setw(8) << length_ << " at " << ((void *)dataBlockAddress_) << endl; #endif deallocate(); } BZ_MUTEX_DESTROY(mutex) } // set mutex locking policy and return true if successful bool doLock(bool lockingPolicy) { if (mutexLocking_ == lockingPolicy) { // already set return true; } else if (references_ <= 1) { // no multiple references, safe to change mutexLocking_ = lockingPolicy; return true; } return false; // unsafe to change } void addReference() { if (mutexLocking_) { BZ_MUTEX_LOCK(mutex) } ++references_; #ifdef BZ_DEBUG_LOG_REFERENCES cout << "MemoryBlock: reffed " << setw(8) << length_ << " at " << ((void *)dataBlockAddress_) << " (r=" << (int)references_ << ")" << endl; #endif if (mutexLocking_) { BZ_MUTEX_UNLOCK(mutex) } } T_type* restrict data() { return data_; } const T_type* restrict data() const { return data_; } T_type*& dataBlockAddress() { return dataBlockAddress_; } size_t length() const { return length_; } int removeReference() { if (mutexLocking_) { BZ_MUTEX_LOCK(mutex) } int refcount = --references_; #ifdef BZ_DEBUG_LOG_REFERENCES cout << "MemoryBlock: dereffed " << setw(8) << length_ << " at " << ((void *)dataBlockAddress_) << " (r=" << (int)references_ << ")" << endl; #endif if (mutexLocking_) { BZ_MUTEX_UNLOCK(mutex) } return refcount; } int references() const { if (mutexLocking_) { BZ_MUTEX_LOCK(mutex) } int refcount = references_; if (mutexLocking_) { BZ_MUTEX_UNLOCK(mutex) } return refcount; } protected: inline void allocate(size_t length); void deallocate(); private: // Disabled member functions MemoryBlock(const MemoryBlock<T_type>&) { } void operator=(const MemoryBlock<T_type>&) { } private: // Data members T_type * restrict data_; T_type * dataBlockAddress_; #ifdef BZ_DEBUG_REFERENCE_ROLLOVER volatile unsigned char references_; #else volatile int references_; #endif BZ_MUTEX_DECLARE(mutex) bool mutexLocking_; size_t length_; }; template<typename P_type> class MemoryBlockReference { public: typedef P_type T_type; protected: T_type * restrict data_; private: MemoryBlock<T_type>* block_; public: MemoryBlockReference() { block_ = 0; addReference(); data_ = 0; } MemoryBlockReference(MemoryBlockReference<T_type>& ref, size_t offset=0) { block_ = ref.block_; addReference(); data_ = ref.data_ + offset; } MemoryBlockReference(size_t length, T_type* data, preexistingMemoryPolicy deletionPolicy) { // Create a memory block using already allocated memory. // Note: if the deletionPolicy is duplicateData, this must // be handled by the leaf class. In MemoryBlockReference, // this is treated as neverDeleteData; the leaf class (e.g. Array) // must duplicate the data. if ((deletionPolicy == neverDeleteData) || (deletionPolicy == duplicateData)) { // in this case, we do not need a MemoryBlock to ref-count the data block_ = 0; } else if (deletionPolicy == deleteDataWhenDone) { block_ = new MemoryBlock<T_type>(length, data); #ifdef BZ_DEBUG_LOG_ALLOCATIONS cout << "MemoryBlockReference: created MemoryBlock at " << ((void*)block_) << endl; #endif } addReference(); data_ = data; } explicit MemoryBlockReference(size_t items) { block_ = new MemoryBlock<T_type>(items); addReference(); data_ = block_->data(); #ifdef BZ_DEBUG_LOG_ALLOCATIONS cout << "MemoryBlockReference: created MemoryBlock at " << ((void*)block_) << endl; #endif } ~MemoryBlockReference() { blockRemoveReference(); } protected: int numReferences() const { if (block_) return block_->references(); #ifdef BZ_DEBUG_LOG_REFERENCES cout << "Invalid reference count for data at "<< data_ << endl; #endif return -1; } bool lockReferenceCount(bool lockingPolicy) const { if (block_) return block_->doLock(lockingPolicy); // if we have no block, consider request successful #ifdef BZ_DEBUG_LOG_REFERENCES cout << "No reference count locking for data at "<< data_ << endl; #endif return true; } void changeToNullBlock() { blockRemoveReference(); block_ = 0; addReference(); data_ = 0; } void changeBlock(MemoryBlockReference<T_type>& ref, size_t offset=0) { blockRemoveReference(); block_ = ref.block_; addReference(); data_ = ref.data_ + offset; } void newBlock(size_t items) { blockRemoveReference(); block_ = new MemoryBlock<T_type>(items); addReference(); data_ = block_->data(); #ifdef BZ_DEBUG_LOG_ALLOCATIONS cout << "MemoryBlockReference: created MemoryBlock at " << ((void*)block_) << endl; #endif } private: void blockRemoveReference() { int refcount = removeReference(); if (refcount == 0) { #ifdef BZ_DEBUG_LOG_ALLOCATIONS cout << "MemoryBlock: no more refs, delete MemoryBlock object at " << ((void*)block_) << endl; #endif delete block_; } } void addReference() const { if (block_) { block_->addReference(); } else { #ifdef BZ_DEBUG_LOG_REFERENCES cout << "Skipping reference count for data at "<< data_ << endl; #endif } }; int removeReference() const { if (block_) return block_->removeReference(); #ifdef BZ_DEBUG_LOG_REFERENCES cout << "Skipping reference count for data at "<< data_ << endl; #endif return -1; }; void operator=(const MemoryBlockReference<T_type>&) { } }; BZ_NAMESPACE_END #include <blitz/memblock.cc> #endif // BZ_MEMBLOCK_H
gpl-2.0
arbuzarbuz/paparazzi
sw/airborne/modules/ins/ahrs_chimu_uart.c
2598
/* C code to connect a CHIMU using uart */ #include <stdbool.h> // Output #include "state.h" // For centripedal corrections #include "subsystems/gps.h" #include "subsystems/ahrs.h" #include "generated/airframe.h" #if CHIMU_DOWNLINK_IMMEDIATE #ifndef DOWNLINK_DEVICE #define DOWNLINK_DEVICE DOWNLINK_AP_DEVICE #endif #include "mcu_periph/uart.h" #include "messages.h" #include "subsystems/datalink/downlink.h" #endif #include "ins_module.h" #include "imu_chimu.h" #include "led.h" CHIMU_PARSER_DATA CHIMU_DATA; INS_FORMAT ins_roll_neutral; INS_FORMAT ins_pitch_neutral; void ahrs_init(void) { ahrs.status = AHRS_UNINIT; uint8_t ping[7] = {CHIMU_STX, CHIMU_STX, 0x01, CHIMU_BROADCAST, MSG00_PING, 0x00, 0xE6 }; uint8_t rate[12] = {CHIMU_STX, CHIMU_STX, 0x06, CHIMU_BROADCAST, MSG10_UARTSETTINGS, 0x05, 0xff, 0x79, 0x00, 0x00, 0x01, 0x76 }; // 50Hz attitude only + SPI uint8_t quaternions[7] = {CHIMU_STX, CHIMU_STX, 0x01, CHIMU_BROADCAST, MSG09_ESTIMATOR, 0x01, 0x39 }; // 25Hz attitude only + SPI // uint8_t rate[12] = {CHIMU_STX, CHIMU_STX, 0x06, CHIMU_BROADCAST, MSG10_UARTSETTINGS, 0x04, 0xff, 0x79, 0x00, 0x00, 0x01, 0xd3 }; // 25Hz attitude only + SPI // uint8_t euler[7] = {CHIMU_STX, CHIMU_STX, 0x01, CHIMU_BROADCAST, MSG09_ESTIMATOR, 0x00, 0xaf }; // 25Hz attitude only + SPI new_ins_attitude = 0; ins_roll_neutral = INS_ROLL_NEUTRAL_DEFAULT; ins_pitch_neutral = INS_PITCH_NEUTRAL_DEFAULT; CHIMU_Init(&CHIMU_DATA); // Request Software version for (int i=0;i<7;i++) { InsUartSend1(ping[i]); } // Quat Filter for (int i=0;i<7;i++) { InsUartSend1(quaternions[i]); } // 50Hz CHIMU_Checksum(rate,12); InsSend(rate,12); } void ahrs_align(void) { ahrs.status = AHRS_RUNNING; } void parse_ins_msg( void ) { while (InsLink(ChAvailable())) { uint8_t ch = InsLink(Getch()); if (CHIMU_Parse(ch, 0, &CHIMU_DATA)) { if(CHIMU_DATA.m_MsgID==0x03) { new_ins_attitude = 1; RunOnceEvery(25, LED_TOGGLE(3) ); if (CHIMU_DATA.m_attitude.euler.phi > M_PI) { CHIMU_DATA.m_attitude.euler.phi -= 2 * M_PI; } struct FloatEulers att = { CHIMU_DATA.m_attitude.euler.phi, CHIMU_DATA.m_attitude.euler.theta, CHIMU_DATA.m_attitude.euler.psi }; stateSetNedToBodyEulers_f(&att); #if CHIMU_DOWNLINK_IMMEDIATE DOWNLINK_SEND_AHRS_EULER(DefaultChannel, DefaultDevice, &CHIMU_DATA.m_attitude.euler.phi, &CHIMU_DATA.m_attitude.euler.theta, &CHIMU_DATA.m_attitude.euler.psi); #endif } } } } void ahrs_update_gps( void ) { }
gpl-2.0
Gurgel100/gcc
libgomp/testsuite/libgomp.oacc-fortran/optional-host_data.f90
767
! Test the host_data construct with optional arguments. ! Based on host_data-1.f90. ! { dg-do run } ! { dg-additional-options "-cpp" } program test implicit none integer, target :: i integer, pointer :: ip, iph ! Assign the same targets ip => i iph => i call foo(iph) call foo(iph, ip) contains subroutine foo(iph, ip) integer, pointer :: iph integer, pointer, optional :: ip !$acc data copyin(i) !$acc host_data use_device(ip) ! Test how the pointers compare inside a host_data construct if (present(ip)) then #if ACC_MEM_SHARED if (.not. associated(ip, iph)) STOP 1 #else if (associated(ip, iph)) STOP 2 #endif end if !$acc end host_data !$acc end data end subroutine foo end program test
gpl-2.0
jacques/connector
vendor/gems/soap4r-1.5.5.20061022/test/soap/header/test_authheader.rb
5284
require 'test/unit' require 'soap/rpc/driver' require 'soap/rpc/standaloneServer' require 'soap/header/simplehandler' module SOAP module Header class TestAuthHeader < Test::Unit::TestCase Port = 17171 PortName = 'http://tempuri.org/authHeaderPort' MyHeaderName = XSD::QName.new("http://tempuri.org/authHeader", "auth") DummyHeaderName = XSD::QName.new("http://tempuri.org/authHeader", "dummy") class AuthHeaderPortServer < SOAP::RPC::StandaloneServer class AuthHeaderService def self.create new end def deposit(amt) "deposit #{amt} OK" end def withdrawal(amt) "withdrawal #{amt} OK" end end def initialize(*arg) super add_rpc_servant(AuthHeaderService.new, PortName) ServerAuthHeaderHandler.init add_request_headerhandler(ServerAuthHeaderHandler) end class ServerAuthHeaderHandler < SOAP::Header::SimpleHandler class << self def create new end def init @users = { 'NaHi' => 'passwd', 'HiNa' => 'wspass' } @sessions = {} end def login(userid, passwd) userid and passwd and @users[userid] == passwd end def auth(sessionid) @sessions[sessionid][0] end def create_session(userid) while true key = create_sessionkey break unless @sessions[key] end @sessions[key] = [userid] key end def destroy_session(sessionkey) @sessions.delete(sessionkey) end def sessions @sessions end private def create_sessionkey Time.now.usec.to_s end end def initialize super(MyHeaderName) @userid = @sessionid = nil end def on_simple_outbound { "sessionid" => @sessionid } end def on_simple_inbound(my_header, mu) auth = false userid = my_header["userid"] passwd = my_header["passwd"] if self.class.login(userid, passwd) auth = true elsif sessionid = my_header["sessionid"] if userid = self.class.auth(sessionid) self.class.destroy_session(sessionid) auth = true end end raise RuntimeError.new("authentication failed") unless auth @userid = userid @sessionid = self.class.create_session(userid) end end end class ClientAuthHeaderHandler < SOAP::Header::SimpleHandler def initialize(userid, passwd, mustunderstand) super(MyHeaderName) @sessionid = nil @userid = userid @passwd = passwd @mustunderstand = mustunderstand end def on_simple_outbound if @sessionid { "sessionid" => @sessionid } else { "userid" => @userid, "passwd" => @passwd } end end def on_simple_inbound(my_header, mustunderstand) @sessionid = my_header["sessionid"] end def sessionid @sessionid end end class DummyHeaderHandler < SOAP::Header::SimpleHandler def initialize(mustunderstand) super(DummyHeaderName) @mustunderstand = mustunderstand end def on_simple_outbound { XSD::QName.new("foo", "bar") => nil } end def on_simple_inbound(my_header, mustunderstand) end end def setup @endpoint = "http://localhost:#{Port}/" setup_server setup_client end def setup_server @server = AuthHeaderPortServer.new(self.class.name, nil, '0.0.0.0', Port) @server.level = Logger::Severity::ERROR @t = Thread.new { @server.start } end def setup_client @client = SOAP::RPC::Driver.new(@endpoint, PortName) @client.wiredump_dev = STDERR if $DEBUG @client.add_method('deposit', 'amt') @client.add_method('withdrawal', 'amt') end def teardown teardown_server teardown_client end def teardown_server @server.shutdown @t.kill @t.join end def teardown_client @client.reset_stream end def test_success_no_mu h = ClientAuthHeaderHandler.new('NaHi', 'passwd', false) @client.headerhandler << h do_transaction_check(h) end def test_success_mu h = ClientAuthHeaderHandler.new('NaHi', 'passwd', true) @client.headerhandler << h do_transaction_check(h) end def test_no_mu h = ClientAuthHeaderHandler.new('NaHi', 'passwd', true) @client.headerhandler << h @client.headerhandler << DummyHeaderHandler.new(false) do_transaction_check(h) end def test_mu h = ClientAuthHeaderHandler.new('NaHi', 'passwd', true) @client.headerhandler << h @client.headerhandler << (h2 = DummyHeaderHandler.new(true)) assert_raise(SOAP::UnhandledMustUnderstandHeaderError) do assert_equal("deposit 150 OK", @client.deposit(150)) end @client.headerhandler.delete(h2) @client.headerhandler << (h2 = DummyHeaderHandler.new(false)) do_transaction_check(h) end def do_transaction_check(h) assert_equal("deposit 150 OK", @client.deposit(150)) serversess = AuthHeaderPortServer::ServerAuthHeaderHandler.sessions[h.sessionid] assert_equal("NaHi", serversess[0]) assert_equal("withdrawal 120 OK", @client.withdrawal(120)) serversess = AuthHeaderPortServer::ServerAuthHeaderHandler.sessions[h.sessionid] assert_equal("NaHi", serversess[0]) end def test_authfailure h = ClientAuthHeaderHandler.new('NaHi', 'pa', false) @client.headerhandler << h assert_raises(RuntimeError) do @client.deposit(150) end end end end end
gpl-2.0
PlanTool/plantool
wrappingPlanners/Deterministic/LAMA/seq-sat-lama/lama/translate/pddl/pretty_print.py
2178
####################################################################### # # Author: Malte Helmert ([email protected]) # (C) Copyright 2003-2004 Malte Helmert # # This file is part of LAMA. # # LAMA is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the license, or (at your option) any later version. # # LAMA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # ####################################################################### import cStringIO import textwrap __all__ = ["print_nested_list"] def tokenize_list(obj): if isinstance(obj, list): yield "(" for item in obj: for elem in tokenize_list(item): yield elem yield ")" else: yield obj def wrap_lines(lines): for line in lines: indent = " " * (len(line) - len(line.lstrip()) + 4) line = line.replace("-", "_") # textwrap breaks on "-", but not "_" line = textwrap.fill(line, subsequent_indent=indent, break_long_words=False) yield line.replace("_", "-") def print_nested_list(nested_list): stream = cStringIO.StringIO() indent = 0 startofline = True pendingspace = False for token in tokenize_list(nested_list): if token == "(": if not startofline: stream.write("\n") stream.write("%s(" % (" " * indent)) indent += 2 startofline = False pendingspace = False elif token == ")": indent -= 2 stream.write(")") startofline = False pendingspace = False else: if startofline: stream.write(" " * indent) if pendingspace: stream.write(" ") stream.write(token) startofline = False pendingspace = True for line in wrap_lines(stream.getvalue().splitlines()): print line
gpl-2.0
tobiasbuhrer/tobiasb
web/core/tests/Drupal/KernelTests/Core/Extension/ThemeExtensionListTest.php
1189
<?php namespace Drupal\KernelTests\Core\Extension; use Drupal\KernelTests\KernelTestBase; /** * @coversDefaultClass \Drupal\Core\Extension\ThemeExtensionList * @group Extension */ class ThemeExtensionListTest extends KernelTestBase { /** * @covers ::getList */ public function testGetlist() { \Drupal::configFactory()->getEditable('core.extension') ->set('module.testing', 1000) ->set('theme.test_theme', 0) ->save(); // The installation profile is provided by a container parameter. // Saving the configuration doesn't automatically trigger invalidation $this->container->get('kernel')->rebuildContainer(); /** @var \Drupal\Core\Extension\ThemeExtensionList $theme_extension_list */ $theme_extension_list = \Drupal::service('extension.list.theme'); $extensions = $theme_extension_list->getList(); $this->assertArrayHasKey('test_theme', $extensions); } /** * Tests that themes have an empty default version set. */ public function testThemeWithoutVersion() { $theme = \Drupal::service('extension.list.theme')->get('test_theme_settings_features'); $this->assertNull($theme->info['version']); } }
gpl-2.0
berkeley-amsa/amsa
tmp/install_4e730b9d77cb3/admin/classes/category.php
8168
<?php /** * @package com_zoo Component * @file category.php * @version 2.4.9 May 2011 * @author YOOtheme http://www.yootheme.com * @copyright Copyright (C) 2007 - 2011 YOOtheme GmbH * @license http://www.gnu.org/licenses/gpl-2.0.html GNU/GPLv2 only */ /* Class: Category Category related attributes and functions. */ class Category { /* Variable: id Primary key. */ public $id; /* Variable: application_id Related application id. */ public $application_id; /* Variable: name Category name. */ public $name; /* Variable: alias Category alias. */ public $alias; /* Variable: description Category description. */ public $description; /* Variable: parent Categories parent id. */ public $parent; /* Variable: ordering Categories ordering. */ public $ordering; /* Variable: published Category published state. */ public $published; /* Variable: params Category params. */ public $params; /* Variable: item_ids Related category item ids. */ public $item_ids; /* Variable: app App instance. */ public $app; /* Variable: _parent Related category parent object. */ protected $_parent; /* Variable: _children Related category children objects. */ protected $_children = array(); /* Variable: _items Related category item objects. */ protected $_items = array(); /* Variable: _item_count Related category item count. */ public $_item_count; /* Variable: _total_item_count Item count including subcategories. */ protected $_total_item_count = null; public function __construct() { // init vars $app = App::getInstance('zoo'); // decorate data as object $this->params = $app->parameter->create($this->params); // set related item ids $this->item_ids = isset($this->item_ids) ? explode(',', $this->item_ids) : array(); if (!empty($this->item_ids)) { $this->item_ids = array_combine($this->item_ids, $this->item_ids); } } /* Function: getApplication Get related application object. Returns: Application - application object */ public function getApplication() { return $this->app->table->application->get($this->application_id); } /* Function: hasChildren Does this category have children. Returns: Bool */ public function hasChildren() { return !empty($this->_children); } /* Function: getChildren Method to get category's children. Parameters: recursive - Recursivly retrieve childrens children. Returns: id - children */ public function getChildren($recursive = false) { if ($recursive) { $children = array(); foreach ($this->_children as $child) { $children[$child->id] = $child; $children += $child->getChildren(true); } return $children; } return $this->_children; } /* Function: setChildren Set children. Returns: Category */ public function setChildren($val) { $this->_children = $val; return $this; } /* Function: addChildren Add children. Returns: Category */ public function addChild($category) { $this->_children[] = $category; return $this; } /* Function: removeChild Remove a child. Returns: Category */ public function removeChild($child) { unset($this->_children[$child->id]); return $this; } /* Function: getParent Method to get category's parent. Returns: id - parent */ public function getParent() { return $this->_parent; } /* Function: setParent Set parent. Returns: Category */ public function setParent($val) { $this->_parent = $val; return $this; } /* Function: getPathway Method to get category's pathway. Returns: Array - Array of parent categories */ public function getPathway() { if ($this->_parent == null) { return array(); } $pathway = $this->_parent->getPathway(); $pathway[$this->id] = $this; return $pathway; } /* Function: isPublished Get published state. Returns: - */ public function isPublished() { return $this->published; } /* Function: setPublished Set category published state and fire event. Parameters: $val - State $save - Autosave category before fire event Returns: Category */ public function setPublished($val, $save = false) { if ($this->published != $val) { // set state $old_state = $this->state; $this->published = $val; // autosave category ? if ($save) { $this->app->table->category->save($this); } // fire event $this->app->event->dispatcher->notify($this->app->event->create($this, 'category:stateChanged', compact('old_state'))); } return $this; } /* Function: getPath Method to get the path to this category. Returns: Array - Category path */ public function getPath($path = array()) { $path[] = $this->id; if ($this->_parent != null) { $path = $this->_parent->getPath($path); } return $path; } /* Function: getItems Method to get category's items. Returns: Array */ public function getItems($published = false, $user = null, $orderby = '') { if (empty($this->_items)) { $this->_items = $this->app->table->item->getFromCategory($this->application_id, $this->id, $published, $user, $orderby); } return $this->_items; } /* Function: itemCount Method to count category's items. Returns: Int - Number of items */ public function itemCount() { if (!isset($this->_item_count)) { $this->_item_count = count($this->item_ids); } return $this->_item_count; } /* Function: total_item_count Method to count category's published items including all childrens items. Returns: Int - Number of items */ public function totalItemCount() { if (!isset($this->_total_item_count)) { $this->_total_item_count = count($this->getItemIds(true)); } return $this->_total_item_count; } /* Function: getItemIds Method to get related item ids. Returns: Array - Related item ids */ public function getItemIds($recursive = false) { $item_ids = $this->item_ids; if ($recursive) { foreach($this->getChildren(true) as $child) { $item_ids += $child->item_ids; } } return $item_ids; } /* Function: childrenHaveItems Method to check if children have items. Returns: Bool */ public function childrenHaveItems() { foreach ($this->getChildren(true) as $child) { if ($child->itemCount()) { return true; } } return false; } /* Function: getParams Gets category params. Parameters: $for - Get params for a specific use, including overidden values. Returns: Object - AppParameter */ public function getParams($for = null) { // get site params and inherit globals if ($for == 'site') { return $this->app->parameter->create() ->set('config.', $this->getApplication()->getParams()->get('global.config.')) ->set('template.', $this->getApplication()->getParams()->get('global.template.')) ->loadArray($this->params->getData()); } return $this->params; } /* Function: getImage Get image resource info. Parameters: $name - the param name of the image Returns: Array - Image info */ public function getImage($name) { $params = $this->getParams(); if ($image = $params->get($name)) { return $this->app->html->_('zoo.image', $image, $params->get($name . '_width'), $params->get($name . '_height')); } return null; } /* Function: getImage Executes Content Plugins on text. Parameters: $text - the text Returns: text - string */ public function getText($text) { return $this->app->zoo->triggerContentPlugins($text); } } /* Class: CategoryException */ class CategoryException extends AppException {}
gpl-2.0
hackthis02/xbmc
xbmc/input/joysticks/dialogs/GUIDialogNewJoystick.cpp
1952
/* * Copyright (C) 2016 Team Kodi * http://kodi.tv * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this Program; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "GUIDialogNewJoystick.h" #include "ServiceBroker.h" #include "guilib/GUIWindowManager.h" #include "guilib/WindowIDs.h" #include "messaging/helpers/DialogHelper.h" #include "settings/Settings.h" using namespace JOYSTICK; CGUIDialogNewJoystick::CGUIDialogNewJoystick() : CThread("NewJoystickDlg") { } void CGUIDialogNewJoystick::ShowAsync() { bool bShow = true; if (IsRunning()) bShow = false; else if (!CServiceBroker::GetSettings().GetBool(CSettings::SETTING_INPUT_ASKNEWCONTROLLERS)) bShow = false; else if (g_windowManager.IsWindowActive(WINDOW_DIALOG_GAME_CONTROLLERS, false)) bShow = false; if (bShow) Create(); } void CGUIDialogNewJoystick::Process() { using namespace KODI::MESSAGING::HELPERS; // "New controller detected" // "A new controller has been detected. Configuration can be done at any time in "Settings -> System Settings -> Input". Would you like to configure it now?" if (ShowYesNoDialogText(CVariant{ 35011 }, CVariant{ 35012 }) == DialogResponse::YES) { g_windowManager.ActivateWindow(WINDOW_DIALOG_GAME_CONTROLLERS); } else { CServiceBroker::GetSettings().SetBool(CSettings::SETTING_INPUT_ASKNEWCONTROLLERS, false); } }
gpl-2.0
domino-team/openwrt-cc
package/gli-pub/openwrt-node-packages-master/node/node-v6.9.1/deps/icu-small/source/tools/genrb/errmsg.h
1012
/* ******************************************************************************* * * Copyright (C) 1998-2016, International Business Machines * Corporation and others. All Rights Reserved. * ******************************************************************************* * * File error.h * * Modification History: * * Date Name Description * 05/28/99 stephen Creation. ******************************************************************************* */ #ifndef ERROR_H #define ERROR_H 1 #include "unicode/utypes.h" U_CDECL_BEGIN extern const char *gCurrentFileName; U_CFUNC void error(uint32_t linenumber, const char *msg, ...); U_CFUNC void warning(uint32_t linenumber, const char *msg, ...); /* Show warnings? */ U_CFUNC void setShowWarning(UBool val); U_CFUNC UBool getShowWarning(void); /* strict */ U_CFUNC void setStrict(UBool val); U_CFUNC UBool isStrict(void); /* verbosity */ U_CFUNC void setVerbose(UBool val); U_CFUNC UBool isVerbose(void); U_CDECL_END #endif
gpl-2.0
DarkDefender/coreboot
payloads/libpayload/arch/x86/multiboot.c
3165
/* * This file is part of the libpayload project. * * Copyright (C) 2008 Advanced Micro Devices, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <libpayload-config.h> #include <libpayload.h> #include <multiboot_tables.h> extern unsigned long loader_eax; extern unsigned long loader_ebx; static void mb_parse_mmap(struct multiboot_header *table, struct sysinfo_t *info) { u8 *start = (u8 *) phys_to_virt(table->mmap_addr); u8 *ptr = start; info->n_memranges = 0; while(ptr < (start + table->mmap_length)) { struct multiboot_mmap *mmap = (struct multiboot_mmap *) ptr; #ifdef CONFIG_MEMMAP_RAM_ONLY /* 1 == normal RAM. Ignore everything else for now */ if (mmap->type == 1) { #endif info->memrange[info->n_memranges].base = mmap->addr; info->memrange[info->n_memranges].size = mmap->length; info->memrange[info->n_memranges].type = mmap->type; if (++info->n_memranges == SYSINFO_MAX_MEM_RANGES) return; #ifdef CONFIG_MEMMAP_RAM_ONLY } #endif ptr += (mmap->size + sizeof(mmap->size)); } } static void mb_parse_cmdline(struct multiboot_header *table) { extern int main_argc; extern char *main_argv[]; char *c = phys_to_virt(table->cmdline); while(*c != '\0' && main_argc < MAX_ARGC_COUNT) { main_argv[main_argc++] = c; for( ; *c != '\0' && !isspace(*c); c++); if (*c) { *c = 0; c++; } } } int get_multiboot_info(struct sysinfo_t *info) { struct multiboot_header *table; if (loader_eax != MULTIBOOT_MAGIC) return -1; table = (struct multiboot_header *) phys_to_virt(loader_ebx); info->mbtable = phys_to_virt(loader_ebx); if (table->flags & MULTIBOOT_FLAGS_MMAP) mb_parse_mmap(table, info); if (table->flags & MULTIBOOT_FLAGS_CMDLINE) mb_parse_cmdline(table); return 0; }
gpl-2.0
teeple/pns_server
work/install/Python-2.7.4/Demo/turtle/tdemo_lindenmayer_indian.py
2432
#!/usr/bin/env python """ turtle-example-suite: xtx_lindenmayer_indian.py Each morning women in Tamil Nadu, in southern India, place designs, created by using rice flour and known as kolam on the thresholds of their homes. These can be described by Lindenmayer systems, which can easily be implemented with turtle graphics and Python. Two examples are shown here: (1) the snake kolam (2) anklets of Krishna Taken from Marcia Ascher: Mathematics Elsewhere, An Exploration of Ideas Across Cultures """ ################################ # Mini Lindenmayer tool ############################### from turtle import * def replace( seq, replacementRules, n ): for i in range(n): newseq = "" for element in seq: newseq = newseq + replacementRules.get(element,element) seq = newseq return seq def draw( commands, rules ): for b in commands: try: rules[b]() except TypeError: try: draw(rules[b], rules) except: pass def main(): ################################ # Example 1: Snake kolam ################################ def r(): right(45) def l(): left(45) def f(): forward(7.5) snake_rules = {"-":r, "+":l, "f":f, "b":"f+f+f--f--f+f+f"} snake_replacementRules = {"b": "b+f+b--f--b+f+b"} snake_start = "b--f--b--f" drawing = replace(snake_start, snake_replacementRules, 3) reset() speed(3) tracer(1,0) ht() up() backward(195) down() draw(drawing, snake_rules) from time import sleep sleep(3) ################################ # Example 2: Anklets of Krishna ################################ def A(): color("red") circle(10,90) def B(): from math import sqrt color("black") l = 5/sqrt(2) forward(l) circle(l, 270) forward(l) def F(): color("green") forward(10) krishna_rules = {"a":A, "b":B, "f":F} krishna_replacementRules = {"a" : "afbfa", "b" : "afbfbfbfa" } krishna_start = "fbfbfbfb" reset() speed(0) tracer(3,0) ht() left(45) drawing = replace(krishna_start, krishna_replacementRules, 3) draw(drawing, krishna_rules) tracer(1) return "Done!" if __name__=='__main__': msg = main() print msg mainloop()
gpl-2.0
asavah/xbmc
xbmc/platform/win32/Filesystem.cpp
2486
/* * Copyright (C) 2005-2018 Team Kodi * This file is part of Kodi - https://kodi.tv * * SPDX-License-Identifier: GPL-2.0-or-later * See LICENSES/README.md for more information. */ #include "platform/Filesystem.h" #include "platform/win32/CharsetConverter.h" #include <Windows.h> namespace win = KODI::PLATFORM::WINDOWS; namespace KODI { namespace PLATFORM { namespace FILESYSTEM { space_info space(const std::string& path, std::error_code& ec) { ec.clear(); space_info sp; auto pathW = win::ToW(path); ULARGE_INTEGER capacity; ULARGE_INTEGER available; ULARGE_INTEGER free; auto result = GetDiskFreeSpaceExW(pathW.c_str(), &available, &capacity, &free); if (result == FALSE) { ec.assign(GetLastError(), std::system_category()); sp.available = static_cast<uintmax_t>(-1); sp.capacity = static_cast<uintmax_t>(-1); sp.free = static_cast<uintmax_t>(-1); return sp; } sp.available = static_cast<uintmax_t>(available.QuadPart); sp.capacity = static_cast<uintmax_t>(capacity.QuadPart); sp.free = static_cast<uintmax_t>(free.QuadPart); return sp; } std::string temp_directory_path(std::error_code &ec) { wchar_t lpTempPathBuffer[MAX_PATH + 1]; if (!GetTempPathW(MAX_PATH, lpTempPathBuffer)) { ec.assign(GetLastError(), std::system_category()); return std::string(); } ec.clear(); return win::FromW(lpTempPathBuffer); } std::string create_temp_directory(std::error_code &ec) { wchar_t lpTempPathBuffer[MAX_PATH + 1]; std::wstring xbmcTempPath = win::ToW(temp_directory_path(ec)); if (ec) return std::string(); if (!GetTempFileNameW(xbmcTempPath.c_str(), L"xbm", 0, lpTempPathBuffer)) { ec.assign(GetLastError(), std::system_category()); return std::string(); } DeleteFileW(lpTempPathBuffer); if (!CreateDirectoryW(lpTempPathBuffer, nullptr)) { ec.assign(GetLastError(), std::system_category()); return std::string(); } ec.clear(); return win::FromW(lpTempPathBuffer); } std::string temp_file_path(const std::string&, std::error_code& ec) { wchar_t lpTempPathBuffer[MAX_PATH + 1]; std::wstring xbmcTempPath = win::ToW(create_temp_directory(ec)); if (ec) return std::string(); if (!GetTempFileNameW(xbmcTempPath.c_str(), L"xbm", 0, lpTempPathBuffer)) { ec.assign(GetLastError(), std::system_category()); return std::string(); } DeleteFileW(lpTempPathBuffer); ec.clear(); return win::FromW(lpTempPathBuffer); } } } }
gpl-2.0
acuicultor/android_kernel_oneplus_msm8974-1
net/ipv4/tcp_input.c
176266
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <[email protected]> * Mark Evans, <[email protected]> * Corey Minyard <[email protected]> * Florian La Roche, <[email protected]> * Charles Hedrick, <[email protected]> * Linus Torvalds, <[email protected]> * Alan Cox, <[email protected]> * Matthew Dillon, <[email protected]> * Arnt Gulbrandsen, <[email protected]> * Jorge Cwik, <[email protected]> */ /* * Changes: * Pedro Roque : Fast Retransmit/Recovery. * Two receive queues. * Retransmit queue handled by TCP. * Better retransmit timer handling. * New congestion avoidance. * Header prediction. * Variable renaming. * * Eric : Fast Retransmit. * Randy Scott : MSS option defines. * Eric Schenk : Fixes to slow start algorithm. * Eric Schenk : Yet another double ACK bug. * Eric Schenk : Delayed ACK bug fixes. * Eric Schenk : Floyd style fast retrans war avoidance. * David S. Miller : Don't allow zero congestion window. * Eric Schenk : Fix retransmitter so that it sends * next packet on ack of previous packet. * Andi Kleen : Moved open_request checking here * and process RSTs for open_requests. * Andi Kleen : Better prune_queue, and other fixes. * Andrey Savochkin: Fix RTT measurements in the presence of * timestamps. * Andrey Savochkin: Check sequence numbers correctly when * removing SACKs due to in sequence incoming * data segments. * Andi Kleen: Make sure we never ack data there is not * enough room for. Also make this condition * a fatal error if it might still happen. * Andi Kleen: Add tcp_measure_rcv_mss to make * connections with MSS<min(MTU,ann. MSS) * work without delayed acks. * Andi Kleen: Process packets with PSH set in the * fast path. * J Hadi Salim: ECN support * Andrei Gurtov, * Pasi Sarolahti, * Panu Kuhlberg: Experimental audit of TCP (re)transmission * engine. Lots of bugs are found. * Pasi Sarolahti: F-RTO for dealing with spurious RTOs */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/kernel.h> #include <net/dst.h> #include <net/tcp.h> #include <net/inet_common.h> #include <linux/ipsec.h> #include <asm/unaligned.h> #include <net/netdma.h> int sysctl_tcp_timestamps __read_mostly = 1; int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; int sysctl_tcp_fack __read_mostly = 1; int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; EXPORT_SYMBOL(sysctl_tcp_reordering); int sysctl_tcp_ecn __read_mostly = 2; EXPORT_SYMBOL(sysctl_tcp_ecn); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; int sysctl_tcp_max_orphans __read_mostly = NR_FILE; int sysctl_tcp_frto __read_mostly = 2; int sysctl_tcp_frto_response __read_mostly; int sysctl_tcp_nometrics_save __read_mostly; int sysctl_tcp_thin_dupack __read_mostly; int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; int sysctl_tcp_abc __read_mostly; int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND; #define FLAG_DATA 0x01 /* Incoming frame contained data. */ #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) /* Adapt the MSS value used to make delayed ack decision to the * real world. */ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); const unsigned int lss = icsk->icsk_ack.last_seg_size; unsigned int len; icsk->icsk_ack.last_seg_size = 0; /* skb->len may jitter because of SACKs, even if peer * sends good full-sized frames. */ len = skb_shinfo(skb)->gso_size ? : skb->len; if (len >= icsk->icsk_ack.rcv_mss) { icsk->icsk_ack.rcv_mss = len; } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. * * "len" is invariant segment length, including TCP header. */ len += skb->data - skb_transport_header(skb); if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || /* If PSH is not set, packet should be * full sized, provided peer TCP is not badly broken. * This observation (if it is correct 8)) allows * to handle super-low mtu links fairly. */ (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { /* Subtract also invariant (if peer is RFC compliant), * tcp header plus fixed timestamp option length. * Resulting "len" is MSS free of SACK jitter. */ len -= tcp_sk(sk)->tcp_header_len; icsk->icsk_ack.last_seg_size = len; if (len == lss) { icsk->icsk_ack.rcv_mss = len; return; } } if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; } } static void tcp_incr_quickack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; if (quickacks > icsk->icsk_ack.quick) icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); } static void tcp_enter_quickack_mode(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_incr_quickack(sk); icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } /* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. */ static inline int tcp_in_quickack_mode(const struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; } static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) { if (tp->ecn_flags & TCP_ECN_OK) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; } static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; } static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { if (!(tp->ecn_flags & TCP_ECN_OK)) return; switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, * and we already seen ECT on a previous segment, * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: tp->ecn_flags |= TCP_ECN_DEMAND_CWR; /* fallinto */ default: tp->ecn_flags |= TCP_ECN_SEEN; } } static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; } static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) return 1; return 0; } /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. */ static void tcp_fixup_sndbuf(struct sock *sk) { int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER); sndmem *= TCP_INIT_CWND; if (sk->sk_sndbuf < sndmem) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); } /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) * * All tcp_full_space() is split to two parts: "network" buffer, allocated * forward and advertised in receiver window (tp->rcv_wnd) and * "application buffer", required to isolate scheduling/application * latencies from network. * window_clamp is maximal advertised window. It can be less than * tcp_full_space(), in this case tcp_full_space() - window_clamp * is reserved for "application" buffer. The less window_clamp is * the smoother our behaviour from viewpoint of network, but the lower * throughput and the higher sensitivity of the connection to losses. 8) * * rcv_ssthresh is more strict window_clamp used at "slow start" * phase to predict further behaviour of this connection. * It is used for two goals: * - to enforce header prediction at sender, even when application * requires some significant "application buffer". It is check #1. * - to prevent pruning of receive queue because of misprediction * of receiver window. Check #2. * * The scheme does not work when sender sends good segments opening * window and then starts to feed us spaghetti. But it should work * in common situations. Otherwise, we have to rely on queue collapsing. */ /* Slow part of check#2. */ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Optimize this! */ int truesize = tcp_win_from_space(skb->truesize) >> 1; int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; while (tp->rcv_ssthresh <= window) { if (truesize <= skb->len) return 2 * inet_csk(sk)->icsk_ack.rcv_mss; truesize >>= 1; window >>= 1; } return 0; } static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < tcp_space(sk) && !sk_under_memory_pressure(sk)) { int incr; /* Check #2. Increase window, if skb with such overhead * will fit to rcvbuf in future. */ if (tcp_win_from_space(skb->truesize) <= skb->len) incr = 2 * tp->advmss; else incr = __tcp_grow_window(sk, skb); if (incr) { incr = max_t(int, incr, 2 * skb->len); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); inet_csk(sk)->icsk_ack.quick |= 1; } } } /* 3. Tuning rcvbuf, when connection enters established state. */ static void tcp_fixup_rcvbuf(struct sock *sk) { u32 mss = tcp_sk(sk)->advmss; u32 icwnd = sysctl_tcp_default_init_rwnd; int rcvmem; /* Limit to 10 segments if mss <= 1460, * or 14600/mss segments, with a minimum of two segments. */ if (mss > 1460) icwnd = max_t(u32, (1460 * icwnd) / mss, 2); rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < mss) rcvmem += 128; rcvmem *= icwnd; if (sk->sk_rcvbuf < rcvmem) sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); } /* 4. Try to fixup all. It is made immediately after connection enters * established state. */ static void tcp_init_buffer_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int maxwin; if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) tcp_fixup_rcvbuf(sk); if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_fixup_sndbuf(sk); tp->rcvq_space.space = tp->rcv_wnd; maxwin = tcp_full_space(sk); if (tp->window_clamp >= maxwin) { tp->window_clamp = maxwin; if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) tp->window_clamp = max(maxwin - (maxwin >> sysctl_tcp_app_win), 4 * tp->advmss); } /* Force reservation of one segment. */ if (sysctl_tcp_app_win && tp->window_clamp > 2 * tp->advmss && tp->window_clamp + tp->advmss > maxwin) tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_time_stamp; } /* 5. Recalculate window clamp after socket hit its memory bounds. */ static void tcp_clamp_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ack.quick = 0; if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && !sk_under_memory_pressure(sk) && sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), sysctl_tcp_rmem[2]); } if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); } /* Initialize RCV_MSS value. * RCV_MSS is an our guess about MSS used by the peer. * We haven't any direct information about the MSS. * It's better to underestimate the RCV_MSS rather than overestimate. * Overestimations make us ACKing less frequently than needed. * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). */ void tcp_initialize_rcv_mss(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); hint = min(hint, tp->rcv_wnd / 2); hint = min(hint, TCP_MSS_DEFAULT); hint = max(hint, TCP_MIN_MSS); inet_csk(sk)->icsk_ack.rcv_mss = hint; } EXPORT_SYMBOL(tcp_initialize_rcv_mss); /* Receiver "autotuning" code. * * The algorithm for RTT estimation w/o timestamps is based on * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. * <http://public.lanl.gov/radiant/pubs.html#DRS> * * More detail on this code can be found at * <http://staff.psc.edu/jheffner/>, * though this reference is out of date. A new paper * is pending. */ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { u32 new_sample = tp->rcv_rtt_est.rtt; long m = sample; if (m == 0) m = 1; if (new_sample != 0) { /* If we sample in larger samples in the non-timestamp * case, we could grossly overestimate the RTT especially * with chatty applications or bulk transfer apps which * are stalled on filesystem I/O. * * Also, since we are only going for a minimum in the * non-timestamp case, we do not smooth things out * else with timestamps disabled convergence takes too * long. */ if (!win_dep) { m -= (new_sample >> 3); new_sample += m; } else { m <<= 3; if (m < new_sample) new_sample = m; } } else { /* No previous measure. */ new_sample = m << 3; } if (tp->rcv_rtt_est.rtt != new_sample) tp->rcv_rtt_est.rtt = new_sample; } static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { if (tp->rcv_rtt_est.time == 0) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; tp->rcv_rtt_est.time = tcp_time_stamp; } static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); } /* * This function should be called every time data is copied to user space. * It calculates the appropriate TCP receive buffer space. */ void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int time; int space; if (tp->rcvq_space.time == 0) goto new_measure; time = tcp_time_stamp - tp->rcvq_space.time; if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) return; space = 2 * (tp->copied_seq - tp->rcvq_space.seq); space = max(tp->rcvq_space.space, space); if (tp->rcvq_space.space != space) { int rcvmem; tp->rcvq_space.space = space; if (sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int new_clamp = space; /* Receive space grows, normalize in order to * take into account packet headers and sk_buff * structure overhead. */ space /= tp->advmss; if (!space) space = 1; rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; space *= rcvmem; space = min(space, sysctl_tcp_rmem[2]); if (space > sk->sk_rcvbuf) { sk->sk_rcvbuf = space; /* Make the window clamp follow along. */ tp->window_clamp = new_clamp; } } } new_measure: tp->rcvq_space.seq = tp->copied_seq; tp->rcvq_space.time = tcp_time_stamp; } /* There is something which you must keep in mind when you analyze the * behavior of the tp->ato delayed ack timeout interval. When a * connection starts up, we want to ack as quickly as possible. The * problem is that "good" TCP's do slow start at the beginning of data * transmission. The means that until we send the first few ACK's the * sender will sit on his end and only queue most of his data, because * he can only send snd_cwnd unacked packets at any given time. For * each ACK we send, he increments snd_cwnd and transmits more of his * queue. -DaveM */ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); u32 now; inet_csk_schedule_ack(sk); tcp_measure_rcv_mss(sk, skb); tcp_rcv_rtt_measure(tp); now = tcp_time_stamp; if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize * delayed ACK engine. */ tcp_incr_quickack(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; } else { int m = now - icsk->icsk_ack.lrcvtime; if (m <= TCP_ATO_MIN / 2) { /* The fastest case is the first. */ icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; } else if (m < icsk->icsk_ack.ato) { icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; if (icsk->icsk_ack.ato > icsk->icsk_rto) icsk->icsk_ack.ato = icsk->icsk_rto; } else if (m > icsk->icsk_rto) { /* Too long gap. Apparently sender failed to * restart window, so that we send ACKs quickly. */ tcp_incr_quickack(sk); sk_mem_reclaim(sk); } } icsk->icsk_ack.lrcvtime = now; TCP_ECN_check_ce(tp, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); } /* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 * piece by Van Jacobson. * NOTE: the next three routines used to be one big routine. * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) { struct tcp_sock *tp = tcp_sk(sk); long m = mrtt; /* RTT */ /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. * This is designed to be as fast as possible * m stands for "measurement". * * On a 1990 paper the rto value is changed to: * RTO = rtt + 4 * mdev * * Funny. This algorithm seems to be very broken. * These formulae increase RTO, when it should be decreased, increase * too slowly, when it should be increased quickly, decrease too quickly * etc. I guess in BSD RTO takes ONE value, so that it is absolutely * does not matter how to _calculate_ it. Seems, it was trap * that VJ failed to avoid. 8) */ if (m == 0) m = 1; if (tp->srtt != 0) { m -= (tp->srtt >> 3); /* m is now error in rtt est */ tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ if (m < 0) { m = -m; /* m is now abs(error) */ m -= (tp->mdev >> 2); /* similar update on mdev */ /* This is similar to one of Eifel findings. * Eifel blocks mdev updates when rtt decreases. * This solution is a bit different: we use finer gain * for mdev in this case (alpha*beta). * Like Eifel it also prevents growth of rto, * but also it limits too fast rto decreases, * happening in pure Eifel. */ if (m > 0) m >>= 3; } else { m -= (tp->mdev >> 2); /* similar update on mdev */ } tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ if (tp->mdev > tp->mdev_max) { tp->mdev_max = tp->mdev; if (tp->mdev_max > tp->rttvar) tp->rttvar = tp->mdev_max; } if (after(tp->snd_una, tp->rtt_seq)) { if (tp->mdev_max < tp->rttvar) tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; tp->rtt_seq = tp->snd_nxt; tp->mdev_max = tcp_rto_min(sk); } } else { /* no previous measure. */ tp->srtt = m << 3; /* take the measured time to be rtt */ tp->mdev = m << 1; /* make sure rto = 3*rtt */ tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); tp->rtt_seq = tp->snd_nxt; } } /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ static inline void tcp_set_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) * * More seriously: * 1. If rtt variance happened to be less 50msec, it is hallucination. * It cannot be less due to utterly erratic ACK generation made * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ * to do with delayed acks, because at cwnd>2 true delack timeout * is invisible. Actually, Linux-2.4 also generates erratic * ACKs in some circumstances. */ inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); /* 2. Fixups made earlier cannot be right. * If we do not estimate RTO correctly without them, * all the algo is pure shit and should be replaced * with correct one. It is exactly, which we pretend to do. */ /* NOTE: clamping at TCP_RTO_MIN is not required, current algo * guarantees that rto is higher. */ tcp_bound_rto(sk); } /* Save metrics learned by this TCP session. This function is called only, when TCP finishes successfully i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. */ void tcp_update_metrics(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); if (sysctl_tcp_nometrics_save) return; dst_confirm(dst); if (dst && (dst->flags & DST_HOST)) { const struct inet_connection_sock *icsk = inet_csk(sk); int m; unsigned long rtt; if (icsk->icsk_backoff || !tp->srtt) { /* This session failed to estimate rtt. Why? * Probably, no packets returned in time. * Reset our results. */ if (!(dst_metric_locked(dst, RTAX_RTT))) dst_metric_set(dst, RTAX_RTT, 0); return; } rtt = dst_metric_rtt(dst, RTAX_RTT); m = rtt - tp->srtt; /* If newly calculated rtt larger than stored one, * store new one. Otherwise, use EWMA. Remember, * rtt overestimation is always better than underestimation. */ if (!(dst_metric_locked(dst, RTAX_RTT))) { if (m <= 0) set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); else set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); } if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { unsigned long var; if (m < 0) m = -m; /* Scale deviation to rttvar fixed point */ m >>= 1; if (m < tp->mdev) m = tp->mdev; var = dst_metric_rtt(dst, RTAX_RTTVAR); if (m >= var) var = m; else var -= (var - m) >> 2; set_dst_metric_rtt(dst, RTAX_RTTVAR, var); } if (tcp_in_initial_slowstart(tp)) { /* Slow start still did not finish. */ if (dst_metric(dst, RTAX_SSTHRESH) && !dst_metric_locked(dst, RTAX_SSTHRESH) && (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1); if (!dst_metric_locked(dst, RTAX_CWND) && tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd); } else if (tp->snd_cwnd > tp->snd_ssthresh && icsk->icsk_ca_state == TCP_CA_Open) { /* Cong. avoidance phase, cwnd is reliable. */ if (!dst_metric_locked(dst, RTAX_SSTHRESH)) dst_metric_set(dst, RTAX_SSTHRESH, max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); if (!dst_metric_locked(dst, RTAX_CWND)) dst_metric_set(dst, RTAX_CWND, (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1); } else { /* Else slow start did not finish, cwnd is non-sense, ssthresh may be also invalid. */ if (!dst_metric_locked(dst, RTAX_CWND)) dst_metric_set(dst, RTAX_CWND, (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1); if (dst_metric(dst, RTAX_SSTHRESH) && !dst_metric_locked(dst, RTAX_SSTHRESH) && tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh); } if (!dst_metric_locked(dst, RTAX_REORDERING)) { if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && tp->reordering != sysctl_tcp_reordering) dst_metric_set(dst, RTAX_REORDERING, tp->reordering); } } } __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) { __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); if (!cwnd) cwnd = TCP_INIT_CWND; return min_t(__u32, cwnd, tp->snd_cwnd_clamp); } /* Set slow start threshold and cwnd not falling to slow start */ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); tp->prior_ssthresh = 0; tp->bytes_acked = 0; if (icsk->icsk_ca_state < TCP_CA_CWR) { tp->undo_marker = 0; if (set_ssthresh) tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1U); tp->snd_cwnd_cnt = 0; tp->high_seq = tp->snd_nxt; tp->snd_cwnd_stamp = tcp_time_stamp; TCP_ECN_queue_cwr(tp); tcp_set_ca_state(sk, TCP_CA_CWR); } } /* * Packet counting of FACK is based on in-order assumptions, therefore TCP * disables it when reordering is detected */ static void tcp_disable_fack(struct tcp_sock *tp) { /* RFC3517 uses different metric in lost marker => reset on change */ if (tcp_is_fack(tp)) tp->lost_skb_hint = NULL; tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; } /* Take a notice that peer is sending D-SACKs */ static void tcp_dsack_seen(struct tcp_sock *tp) { tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; } /* Initialize metrics on socket. */ static void tcp_init_metrics(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); if (dst == NULL) goto reset; dst_confirm(dst); if (dst_metric_locked(dst, RTAX_CWND)) tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); if (dst_metric(dst, RTAX_SSTHRESH)) { tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); if (tp->snd_ssthresh > tp->snd_cwnd_clamp) tp->snd_ssthresh = tp->snd_cwnd_clamp; } else { /* ssthresh may have been reduced unnecessarily during. * 3WHS. Restore it back to its initial default. */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; } if (dst_metric(dst, RTAX_REORDERING) && tp->reordering != dst_metric(dst, RTAX_REORDERING)) { tcp_disable_fack(tp); tp->reordering = dst_metric(dst, RTAX_REORDERING); } if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0) goto reset; /* Initial rtt is determined from SYN,SYN-ACK. * The segment is small and rtt may appear much * less than real one. Use per-dst memory * to make it more realistic. * * A bit of theory. RTT is time passed after "normal" sized packet * is sent until it is ACKed. In normal circumstances sending small * packets force peer to delay ACKs and calculation is correct too. * The algorithm is adaptive and, provided we follow specs, it * NEVER underestimate RTT. BUT! If peer tries to make some clever * tricks sort of "quick acks" for time long enough to decrease RTT * to low value, and then abruptly stops to do it and starts to delay * ACKs, wait for troubles. */ if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { tp->srtt = dst_metric_rtt(dst, RTAX_RTT); tp->rtt_seq = tp->snd_nxt; } if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); } tcp_set_rto(sk); reset: if (tp->srtt == 0) { /* RFC2988bis: We've failed to get a valid RTT sample from * 3WHS. This is most likely due to retransmission, * including spurious one. Reset the RTO back to 3secs * from the more aggressive 1sec to avoid more spurious * retransmission. */ tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; } /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been * retransmitted. In light of RFC2988bis' more aggressive 1sec * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK * retransmission has occurred. */ if (tp->total_retrans > 1) tp->snd_cwnd = 1; else tp->snd_cwnd = tcp_init_cwnd(tp, dst); tp->snd_cwnd_stamp = tcp_time_stamp; } static void tcp_update_reordering(struct sock *sk, const int metric, const int ts) { struct tcp_sock *tp = tcp_sk(sk); if (metric > tp->reordering) { int mib_idx; tp->reordering = min(TCP_MAX_REORDERING, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) mib_idx = LINUX_MIB_TCPTSREORDER; else if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENOREORDER; else if (tcp_is_fack(tp)) mib_idx = LINUX_MIB_TCPFACKREORDER; else mib_idx = LINUX_MIB_TCPSACKREORDER; NET_INC_STATS_BH(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tp->reordering, tp->fackets_out, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif tcp_disable_fack(tp); } } /* This must be called before lost_out is incremented */ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { if ((tp->retransmit_skb_hint == NULL) || before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) tp->retransmit_skb_hint = skb; if (!tp->lost_out || after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) { if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tcp_verify_retransmit_hint(tp, skb); tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) { tcp_verify_retransmit_hint(tp, skb); if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } } /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). * Packets in queue with these bits set are counted in variables * sacked_out, retrans_out and lost_out, correspondingly. * * Valid combinations are: * Tag InFlight Description * 0 1 - orig segment is in flight. * S 0 - nothing flies, orig reached receiver. * L 0 - nothing flies, orig lost by net. * R 2 - both orig and retransmit are in flight. * L|R 1 - orig is lost, retransmit is in flight. * S|R 1 - orig reached receiver, retrans is still in flight. * (L|S|R is logically valid, it could occur when L|R is sacked, * but it is equivalent to plain S and code short-curcuits it to S. * L|S is logically invalid, it would mean -1 packet in flight 8)) * * These 6 states form finite state machine, controlled by the following events: * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) * 3. Loss detection event of two flavors: * A. Scoreboard estimator decided the packet is lost. * A'. Reno "three dupacks" marks head of queue lost. * A''. Its FACK modification, head until snd.fack is lost. * B. SACK arrives sacking SND.NXT at the moment, when the * segment was retransmitted. * 4. D-SACK added new rule: D-SACK changes any tag to S. * * It is pleasant to note, that state diagram turns out to be commutative, * so that we are allowed not to be bothered by order of our actions, * when multiple events arrive simultaneously. (see the function below). * * Reordering detection. * -------------------- * Reordering metric is maximal distance, which a packet can be displaced * in packet stream. With SACKs we can estimate it: * * 1. SACK fills old hole and the corresponding segment was not * ever retransmitted -> reordering. Alas, we cannot use it * when segment was retransmitted. * 2. The last flaw is solved with D-SACK. D-SACK arrives * for retransmitted and already SACKed segment -> reordering.. * Both of these heuristics are not used in Loss state, when we cannot * account for retransmits accurately. * * SACK block validation. * ---------------------- * * SACK block range validation checks that the received SACK block fits to * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. * Note that SND.UNA is not included to the range though being valid because * it means that the receiver is rather inconsistent with itself reporting * SACK reneging when it should advance SND.UNA. Such SACK block this is * perfectly valid, however, in light of RFC2018 which explicitly states * that "SACK block MUST reflect the newest segment. Even if the newest * segment is going to be discarded ...", not that it looks very clever * in case of head skb. Due to potentional receiver driven attacks, we * choose to avoid immediate execution of a walk in write queue due to * reneging and defer head skb's loss recovery to standard loss recovery * procedure that will eventually trigger (nothing forbids us doing this). * * Implements also blockage to start_seq wrap-around. Problem lies in the * fact that though start_seq (s) is before end_seq (i.e., not reversed), * there's no guarantee that it will be before snd_nxt (n). The problem * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt * wrap (s_w): * * <- outs wnd -> <- wrapzone -> * u e n u_w e_w s n_w * | | | | | | | * |<------------+------+----- TCP seqno space --------------+---------->| * ...-- <2^31 ->| |<--------... * ...---- >2^31 ------>| |<--------... * * Current code wouldn't be vulnerable but it's better still to discard such * crazy SACK blocks. Doing this check for start_seq alone closes somewhat * similar case (end_seq after snd_nxt wrap) as earlier reversed check in * snd_nxt wrap -> snd_una region will then become "well defined", i.e., * equal to the ideal case (infinite seqno space without wrap caused issues). * * With D-SACK the lower bound is extended to cover sequence space below * SND.UNA down to undo_marker, which is the last point of interest. Yet * again, D-SACK block must not to go across snd_una (for the same reason as * for the normal SACK blocks, explained above). But there all simplicity * ends, TCP might receive valid D-SACKs below that. As long as they reside * fully below undo_marker they do not affect behavior in anyway and can * therefore be safely ignored. In rare cases (which are more or less * theoretical ones), the D-SACK will nicely cross that boundary due to skb * fragmentation and packet reordering past skb's retransmission. To consider * them correctly, the acceptable range must be extended even more though * the exact amount is rather hard to quantify. However, tp->max_window can * be used as an exaggerated estimate. */ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, u32 start_seq, u32 end_seq) { /* Too far in future, or reversed (interpretation is ambiguous) */ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) return 0; /* Nasty start_seq wrap-around check (see comments above) */ if (!before(start_seq, tp->snd_nxt)) return 0; /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) return 1; if (!is_dsack || !tp->undo_marker) return 0; /* ...Then it's D-SACK, and must reside below snd_una completely */ if (after(end_seq, tp->snd_una)) return 0; if (!before(start_seq, tp->undo_marker)) return 1; /* Too old */ if (!after(end_seq, tp->undo_marker)) return 0; /* Undo_marker boundary crossing (overestimates a lot). Known already: * start_seq < undo_marker and end_seq >= undo_marker. */ return !before(start_seq, end_seq - tp->max_window); } /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". * Event "B". Later note: FACK people cheated me again 8), we have to account * for reordering! Ugly, but should help. * * Search retransmitted skbs from write_queue that were sent when snd_nxt was * less than what is now known to be received by the other end (derived from * highest SACK block). Also calculate the lowest snd_nxt among the remaining * retransmitted skbs to avoid some costly processing per ACKs. */ static void tcp_mark_lost_retrans(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt = 0; u32 new_low_seq = tp->snd_nxt; u32 received_upto = tcp_highest_sack_seq(tp); if (!tcp_is_fack(tp) || !tp->retrans_out || !after(received_upto, tp->lost_retrans_low) || icsk->icsk_ca_state != TCP_CA_Recovery) return; tcp_for_write_queue(skb, sk) { u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; if (skb == tcp_send_head(sk)) break; if (cnt == tp->retrans_out) break; if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) continue; if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) continue; /* TODO: We would like to get rid of tcp_is_fack(tp) only * constraint here (see above) but figuring out that at * least tp->reordering SACK blocks reside between ack_seq * and received_upto is not easy task to do cheaply with * the available datastructures. * * Whether FACK should check here for tp->reordering segs * in-between one could argue for either way (it would be * rather simple to implement as we could count fack_count * during the walk and do tp->fackets_out - fack_count). */ if (after(received_upto, ack_seq)) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); tcp_skb_mark_lost_uncond_verify(tp, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); } else { if (before(ack_seq, new_low_seq)) new_low_seq = ack_seq; cnt += tcp_skb_pcount(skb); } } if (tp->retrans_out) tp->lost_retrans_low = new_low_seq; } static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, struct tcp_sack_block_wire *sp, int num_sacks, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); int dup_sack = 0; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { dup_sack = 1; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); if (!after(end_seq_0, end_seq_1) && !before(start_seq_0, start_seq_1)) { dup_sack = 1; tcp_dsack_seen(tp); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); } } /* D-SACK for already forgotten data... Do dumb counting. */ if (dup_sack && tp->undo_marker && tp->undo_retrans && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; return dup_sack; } struct tcp_sacktag_state { int reord; int fack_count; int flag; }; /* Check if skb is fully within the SACK block. In presence of GSO skbs, * the incoming SACK may not exactly match but we can find smaller MSS * aligned portion of it that matches. Therefore we might need to fragment * which may fail and creates some hassle (caller must handle error case * returns). * * FIXME: this could be merged to shift decision code */ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) { int in_sack, err; unsigned int pkt_len; unsigned int mss; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (tcp_skb_pcount(skb) > 1 && !in_sack && after(TCP_SKB_CB(skb)->end_seq, start_seq)) { mss = tcp_skb_mss(skb); in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { pkt_len = start_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) pkt_len = mss; } else { pkt_len = end_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) return -EINVAL; } /* Round if necessary so that SACKs cover only full MSSes * and/or the remaining small portion (if present) */ if (pkt_len > mss) { unsigned int new_len = (pkt_len / mss) * mss; if (!in_sack && new_len < pkt_len) { new_len += mss; if (new_len > skb->len) return 0; } pkt_len = new_len; } err = tcp_fragment(sk, skb, pkt_len, mss); if (err < 0) return err; } return in_sack; } /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, int dup_sack, int pcount) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans && after(end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); } /* Nothing to do; acked frame is about to be dropped (was ACKed). */ if (!after(end_seq, tp->snd_una)) return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, * we do not clear RETRANS, believing * that retransmission is still in flight. */ if (sacked & TCPCB_LOST) { sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); tp->lost_out -= pcount; tp->retrans_out -= pcount; } } else { if (!(sacked & TCPCB_RETRANS)) { /* New sack for not retransmitted frame, * which was in hole. It is reordering. */ if (before(start_seq, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); /* SACK enhanced F-RTO (RFC4138; Appendix B) */ if (!after(end_seq, tp->frto_highmark)) state->flag |= FLAG_ONLY_ORIG_SACKED; } if (sacked & TCPCB_LOST) { sacked &= ~TCPCB_LOST; tp->lost_out -= pcount; } } sacked |= TCPCB_SACKED_ACKED; state->flag |= FLAG_DATA_SACKED; tp->sacked_out += pcount; fack_count += pcount; /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) tp->lost_cnt_hint += pcount; if (fack_count > tp->fackets_out) tp->fackets_out = fack_count; } /* D-SACK. We can detect redundant retransmission in S|R and plain R * frames and clear it. undo_retrans is decreased above, L|R frames * are accounted above as well. */ if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= pcount; } return sacked; } /* Shift newly-SACKed bytes from this skb to the immediately previous * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. */ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, unsigned int pcount, int shifted, int mss, int dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ BUG_ON(!pcount); /* Adjust counters and hints for the newly sacked sequence * range but discard the return value since prev is already * marked. We must tag the range first because the seq * advancement below implicitly advances * tcp_highest_sack_seq() when skb is highest_sack. */ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, start_seq, end_seq, dup_sack, pcount); if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(skb)->seq += shifted; skb_shinfo(prev)->gso_segs += pcount; BUG_ON(skb_shinfo(skb)->gso_segs < pcount); skb_shinfo(skb)->gso_segs -= pcount; /* When we're adding to gso_segs == 1, gso_size will be zero, * in theory this shouldn't be necessary but as long as DSACK * code can come after this skb later on it's better to keep * setting gso_size to something. */ if (!skb_shinfo(prev)->gso_size) { skb_shinfo(prev)->gso_size = mss; skb_shinfo(prev)->gso_type = sk->sk_gso_type; } /* CHECKME: To clear or not to clear? Mimics normal skb currently */ if (skb_shinfo(skb)->gso_segs <= 1) { skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; } /* Difference in this won't matter, both ACKed by the same cumul. ACK */ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); return 0; } /* Whole SKB was eaten :-) */ if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = prev; if (skb == tp->scoreboard_skb_hint) tp->scoreboard_skb_hint = prev; if (skb == tp->lost_skb_hint) { tp->lost_skb_hint = prev; tp->lost_cnt_hint -= tcp_skb_pcount(prev); } TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; if (skb == tcp_highest_sack(sk)) tcp_advance_highest_sack(sk, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); return 1; } /* I wish gso_size would have a bit more sane initialization than * something-or-zero which complicates things */ static int tcp_skb_seglen(const struct sk_buff *skb) { return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); } /* Shifting pages past head area doesn't work */ static int skb_can_shift(const struct sk_buff *skb) { return !skb_headlen(skb) && skb_is_nonlinear(skb); } /* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, int dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev; int mss; int pcount = 0; int len; int in_sack; if (!sk_can_gso(sk)) goto fallback; /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) goto fallback; if (!skb_can_shift(skb)) goto fallback; /* This frame is about to be dropped (was ACKed). */ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) goto fallback; /* Can only happen with delayed DSACK + discard craziness */ if (unlikely(skb == tcp_write_queue_head(sk))) goto fallback; prev = tcp_write_queue_prev(sk, skb); if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) goto fallback; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (in_sack) { len = skb->len; pcount = tcp_skb_pcount(skb); mss = tcp_skb_seglen(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; } else { if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) goto noop; /* CHECKME: This is non-MSS split case only?, this will * cause skipped skbs due to advancing loop btw, original * has that feature too */ if (tcp_skb_pcount(skb) <= 1) goto noop; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { /* TODO: head merge to next could be attempted here * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), * though it might not be worth of the additional hassle * * ...we can probably just fallback to what was done * previously. We could try merging non-SACKed ones * as well but it probably isn't going to buy off * because later SACKs might again split them, and * it would make skb timestamp tracking considerably * harder problem. */ goto fallback; } len = end_seq - TCP_SKB_CB(skb)->seq; BUG_ON(len < 0); BUG_ON(len > skb->len); /* MSS boundaries should be honoured or else pcount will * severely break even though it makes things bit trickier. * Optimize common case to avoid most of the divides */ mss = tcp_skb_mss(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; if (len == mss) { pcount = 1; } else if (len < mss) { goto noop; } else { pcount = len / mss; len = pcount * mss; } } /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) goto fallback; if (!skb_shift(prev, skb, len)) goto fallback; if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) goto out; /* Hole filled allows collapsing with the next as well, this is very * useful when hole on every nth skb pattern happens */ if (prev == tcp_write_queue_tail(sk)) goto out; skb = tcp_write_queue_next(sk, prev); if (!skb_can_shift(skb) || (skb == tcp_send_head(sk)) || ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || (mss != tcp_skb_seglen(skb))) goto out; len = skb->len; if (skb_shift(prev, skb, len)) { pcount += tcp_skb_pcount(skb); tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); } out: state->fack_count += pcount; return prev; noop: return skb; fallback: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); return NULL; } static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, int dup_sack_in) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *tmp; tcp_for_write_queue_from(skb, sk) { int in_sack = 0; int dup_sack = dup_sack_in; if (skb == tcp_send_head(sk)) break; /* queue is in-order => we can short-circuit the walk early */ if (!before(TCP_SKB_CB(skb)->seq, end_seq)) break; if ((next_dup != NULL) && before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { in_sack = tcp_match_skb_to_sack(sk, skb, next_dup->start_seq, next_dup->end_seq); if (in_sack > 0) dup_sack = 1; } /* skb reference here is a bit tricky to get right, since * shifting can eat and free both this skb and the next, * so not even _safe variant of the loop is enough. */ if (in_sack <= 0) { tmp = tcp_shift_skb_data(sk, skb, state, start_seq, end_seq, dup_sack); if (tmp != NULL) { if (tmp != skb) { skb = tmp; continue; } in_sack = 0; } else { in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); } } if (unlikely(in_sack < 0)) break; if (in_sack) { TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, dup_sack, tcp_skb_pcount(skb)); if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) tcp_advance_highest_sack(sk, skb); } state->fack_count += tcp_skb_pcount(skb); } return skb; } /* Avoid all extra work that is being done by sacktag while walking in * a normal way */ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, struct tcp_sacktag_state *state, u32 skip_to_seq) { tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) break; state->fack_count += tcp_skb_pcount(skb); } return skb; } static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 skip_to_seq) { if (next_dup == NULL) return skb; if (before(next_dup->start_seq, skip_to_seq)) { skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); skb = tcp_sacktag_walk(skb, sk, NULL, state, next_dup->start_seq, next_dup->end_seq, 1); } return skb; } static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) { return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } static int tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, u32 prior_snd_una) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + TCP_SKB_CB(ack_skb)->sacked); struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); struct tcp_sack_block sp[TCP_NUM_SACKS]; struct tcp_sack_block *cache; struct tcp_sacktag_state state; struct sk_buff *skb; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); int used_sacks; int found_dup_sack = 0; int i, j; int first_sack_index; state.flag = 0; state.reord = tp->packets_out; if (!tp->sacked_out) { if (WARN_ON(tp->fackets_out)) tp->fackets_out = 0; tcp_highest_sack_reset(sk); } found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, num_sacks, prior_snd_una); if (found_dup_sack) state.flag |= FLAG_DSACKING_ACK; /* Eliminate too old ACKs, but take into * account more or less fresh ones, they can * contain valid SACK info. */ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; if (!tp->packets_out) goto out; used_sacks = 0; first_sack_index = 0; for (i = 0; i < num_sacks; i++) { int dup_sack = !i && found_dup_sack; sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); if (!tcp_is_sackblock_valid(tp, dup_sack, sp[used_sacks].start_seq, sp[used_sacks].end_seq)) { int mib_idx; if (dup_sack) { if (!tp->undo_marker) mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; else mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; } else { /* Don't count olds caused by ACK reordering */ if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && !after(sp[used_sacks].end_seq, tp->snd_una)) continue; mib_idx = LINUX_MIB_TCPSACKDISCARD; } NET_INC_STATS_BH(sock_net(sk), mib_idx); if (i == 0) first_sack_index = -1; continue; } /* Ignore very old stuff early */ if (!after(sp[used_sacks].end_seq, prior_snd_una)) continue; used_sacks++; } /* order SACK blocks to allow in order walk of the retrans queue */ for (i = used_sacks - 1; i > 0; i--) { for (j = 0; j < i; j++) { if (after(sp[j].start_seq, sp[j + 1].start_seq)) { swap(sp[j], sp[j + 1]); /* Track where the first SACK block goes to */ if (j == first_sack_index) first_sack_index = j + 1; } } } skb = tcp_write_queue_head(sk); state.fack_count = 0; i = 0; if (!tp->sacked_out) { /* It's already past, so skip checking against it */ cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } else { cache = tp->recv_sack_cache; /* Skip empty blocks in at head of the cache */ while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && !cache->end_seq) cache++; } while (i < used_sacks) { u32 start_seq = sp[i].start_seq; u32 end_seq = sp[i].end_seq; int dup_sack = (found_dup_sack && (i == first_sack_index)); struct tcp_sack_block *next_dup = NULL; if (found_dup_sack && ((i + 1) == first_sack_index)) next_dup = &sp[i + 1]; /* Skip too early cached blocks */ while (tcp_sack_cache_ok(tp, cache) && !before(start_seq, cache->end_seq)) cache++; /* Can skip some work by looking recv_sack_cache? */ if (tcp_sack_cache_ok(tp, cache) && !dup_sack && after(end_seq, cache->start_seq)) { /* Head todo? */ if (before(start_seq, cache->start_seq)) { skb = tcp_sacktag_skip(skb, sk, &state, start_seq); skb = tcp_sacktag_walk(skb, sk, next_dup, &state, start_seq, cache->start_seq, dup_sack); } /* Rest of the block already fully processed? */ if (!after(end_seq, cache->end_seq)) goto advance_sp; skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, &state, cache->end_seq); /* ...tail remains todo... */ if (tcp_highest_sack_seq(tp) == cache->end_seq) { /* ...but better entrypoint exists! */ skb = tcp_highest_sack(sk); if (skb == NULL) break; state.fack_count = tp->fackets_out; cache++; goto walk; } skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); /* Check overlap against next cached too (past this one already) */ cache++; continue; } if (!before(start_seq, tcp_highest_sack_seq(tp))) { skb = tcp_highest_sack(sk); if (skb == NULL) break; state.fack_count = tp->fackets_out; } skb = tcp_sacktag_skip(skb, sk, &state, start_seq); walk: skb = tcp_sacktag_walk(skb, sk, next_dup, &state, start_seq, end_seq, dup_sack); advance_sp: /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct * due to in-order walk */ if (after(end_seq, tp->frto_highmark)) state.flag &= ~FLAG_ONLY_ORIG_SACKED; i++; } /* Clear the head of the cache sack blocks so we can skip it next time */ for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { tp->recv_sack_cache[i].start_seq = 0; tp->recv_sack_cache[i].end_seq = 0; } for (j = 0; j < used_sacks; j++) tp->recv_sack_cache[i++] = sp[j]; tcp_mark_lost_retrans(sk); tcp_verify_left_out(tp); if ((state.reord < tp->fackets_out) && ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); out: #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); WARN_ON((int)tcp_packets_in_flight(tp) < 0); #endif return state.flag; } /* Limits sacked_out so that sum with lost_out isn't ever larger than * packets_out. Returns zero if sacked_out adjustement wasn't necessary. */ static int tcp_limit_reno_sacked(struct tcp_sock *tp) { u32 holes; holes = max(tp->lost_out, 1U); holes = min(holes, tp->packets_out); if ((tp->sacked_out + holes) > tp->packets_out) { tp->sacked_out = tp->packets_out - holes; return 1; } return 0; } /* If we receive more dupacks than we expected counting segments * in assumption of absent reordering, interpret this as reordering. * The only another reason could be bug in receiver TCP. */ static void tcp_check_reno_reordering(struct sock *sk, const int addend) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_limit_reno_sacked(tp)) tcp_update_reordering(sk, tp->packets_out + addend, 0); } /* Emulate SACKs for SACKless connection: account for a new dupack. */ static void tcp_add_reno_sack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->sacked_out++; tcp_check_reno_reordering(sk, 0); tcp_verify_left_out(tp); } /* Account for ACK, ACKing some data in Reno Recovery phase. */ static void tcp_remove_reno_sacks(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ if (acked - 1 >= tp->sacked_out) tp->sacked_out = 0; else tp->sacked_out -= acked - 1; } tcp_check_reno_reordering(sk, acked); tcp_verify_left_out(tp); } static inline void tcp_reset_reno_sack(struct tcp_sock *tp) { tp->sacked_out = 0; } static int tcp_is_sackfrto(const struct tcp_sock *tp) { return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp); } /* F-RTO can only be used if TCP has never retransmitted anything other than * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) */ int tcp_use_frto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; if (!sysctl_tcp_frto) return 0; /* MTU probe and F-RTO won't really play nicely along currently */ if (icsk->icsk_mtup.probe_size) return 0; if (tcp_is_sackfrto(tp)) return 1; /* Avoid expensive walking of rexmit queue if possible */ if (tp->retrans_out > 1) return 0; skb = tcp_write_queue_head(sk); if (tcp_skb_is_last(sk, skb)) return 1; skb = tcp_write_queue_next(sk, skb); /* Skips head */ tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) return 0; /* Short-circuit when first non-SACKed skb has been checked */ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) break; } return 1; } /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO * recovery a bit and use heuristics in tcp_process_frto() to detect if * the RTO was spurious. Only clear SACKED_RETRANS of the head here to * keep retrans_out counting accurate (with SACK F-RTO, other than head * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS * bits are handled if the Loss state is really to be entered (in * tcp_enter_frto_loss). * * Do like tcp_enter_loss() would; when RTO expires the second time it * does: * "Reduce ssthresh if it has not yet been made inside this window." */ void tcp_enter_frto(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) || tp->snd_una == tp->high_seq || ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); /* Our state is too optimistic in ssthresh() call because cwnd * is not reduced until tcp_enter_frto_loss() when previous F-RTO * recovery has not yet completed. Pattern would be this: RTO, * Cumulative ACK, RTO (2xRTO for the same segment does not end * up here twice). * RFC4138 should be more specific on what to do, even though * RTO is quite unlikely to occur after the first Cumulative ACK * due to back-off and complexity of triggering events ... */ if (tp->frto_counter) { u32 stored_cwnd; stored_cwnd = tp->snd_cwnd; tp->snd_cwnd = 2; tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tp->snd_cwnd = stored_cwnd; } else { tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); } /* ... in theory, cong.control module could do "any tricks" in * ssthresh(), which means that ca_state, lost bits and lost_out * counter would have to be faked before the call occurs. We * consider that too expensive, unlikely and hacky, so modules * using these in ssthresh() must deal these incompatibility * issues if they receives CA_EVENT_FRTO and frto_counter != 0 */ tcp_ca_event(sk, CA_EVENT_FRTO); } tp->undo_marker = tp->snd_una; tp->undo_retrans = 0; skb = tcp_write_queue_head(sk); if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } tcp_verify_left_out(tp); /* Too bad if TCP was application limited */ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); /* Earlier loss recovery underway (see RFC4138; Appendix B). * The last condition is necessary at least in tp->frto_counter case. */ if (tcp_is_sackfrto(tp) && (tp->frto_counter || ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && after(tp->high_seq, tp->snd_una)) { tp->frto_highmark = tp->high_seq; } else { tp->frto_highmark = tp->snd_nxt; } tcp_set_ca_state(sk, TCP_CA_Disorder); tp->high_seq = tp->snd_nxt; tp->frto_counter = 1; } /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, * which indicates that we should follow the traditional RTO recovery, * i.e. mark everything lost and do go-back-N retransmission. */ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; tp->lost_out = 0; tp->retrans_out = 0; if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; /* * Count the retransmission made on RTO correctly (only when * waiting for the first ACK and did not get it)... */ if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { /* For some reason this R-bit might get cleared? */ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) tp->retrans_out += tcp_skb_pcount(skb); /* ...enter this if branch just for the first segment */ flag |= FLAG_DATA_ACKED; } else { if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; } /* Marking forward transmissions that were made after RTO lost * can cause unnecessary retransmissions in some scenarios, * SACK blocks will mitigate that in some but not in all cases. * We used to not mark them but it was causing break-ups with * receivers that do only in-order receival. * * TODO: we could detect presence of such receiver and select * different behavior per flow. */ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->frto_counter = 0; tp->bytes_acked = 0; tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); tcp_clear_all_retrans_hints(tp); } static void tcp_clear_retrans_partial(struct tcp_sock *tp) { tp->retrans_out = 0; tp->lost_out = 0; tp->undo_marker = 0; tp->undo_retrans = 0; } void tcp_clear_retrans(struct tcp_sock *tp) { tcp_clear_retrans_partial(tp); tp->fackets_out = 0; tp->sacked_out = 0; } /* Enter Loss state. If "how" is not zero, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. */ void tcp_enter_loss(struct sock *sk, int how) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->bytes_acked = 0; tcp_clear_retrans_partial(tp); if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (!how) { /* Push undo marker, if it was plain RTO and nothing * was retransmitted. */ tp->undo_marker = tp->snd_una; } else { tp->sacked_out = 0; tp->fackets_out = 0; } tcp_clear_all_retrans_hints(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; TCP_ECN_queue_cwr(tp); /* Abort F-RTO algorithm if one is in progress */ tp->frto_counter = 0; } /* If ACK arrived pointing to a remembered SACK, it means that our * remembered SACKs do not reflect real state of receiver i.e. * receiver _host_ is heavily congested (or buggy). * * Do processing similar to RTO timeout. */ static int tcp_check_sack_reneging(struct sock *sk, int flag) { if (flag & FLAG_SACK_RENEGING) { struct inet_connection_sock *icsk = inet_csk(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tcp_enter_loss(sk, 1); icsk->icsk_retransmits++; tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); return 1; } return 0; } static inline int tcp_fackets_out(const struct tcp_sock *tp) { return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; } /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs * counter when SACK is enabled (without SACK, sacked_out is used for * that purpose). * * Instead, with FACK TCP uses fackets_out that includes both SACKed * segments up to the highest received SACK block so far and holes in * between them. * * With reordering, holes may still be in flight, so RFC3517 recovery * uses pure sacked_out (total number of SACKed segments) even though * it violates the RFC that uses duplicate ACKs, often these are equal * but when e.g. out-of-window ACKs or packet duplication occurs, * they differ. Since neither occurs due to loss, TCP should really * ignore them. */ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) { return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; } static inline int tcp_skb_timedout(const struct sock *sk, const struct sk_buff *skb) { return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; } static inline int tcp_head_timedout(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); return tp->packets_out && tcp_skb_timedout(sk, tcp_write_queue_head(sk)); } /* Linux NewReno/SACK/FACK/ECN state machine. * -------------------------------------- * * "Open" Normal state, no dubious events, fast path. * "Disorder" In all the respects it is "Open", * but requires a bit more attention. It is entered when * we see some SACKs or dupacks. It is split of "Open" * mainly to move some processing from fast path to slow one. * "CWR" CWND was reduced due to some Congestion Notification event. * It can be ECN, ICMP source quench, local device congestion. * "Recovery" CWND was reduced, we are fast-retransmitting. * "Loss" CWND was reduced due to RTO timeout or SACK reneging. * * tcp_fastretrans_alert() is entered: * - each incoming ACK, if state is not "Open" * - when arrived ACK is unusual, namely: * * SACK * * Duplicate ACK. * * ECN ECE. * * Counting packets in flight is pretty simple. * * in_flight = packets_out - left_out + retrans_out * * packets_out is SND.NXT-SND.UNA counted in packets. * * retrans_out is number of retransmitted segments. * * left_out is number of segments left network, but not ACKed yet. * * left_out = sacked_out + lost_out * * sacked_out: Packets, which arrived to receiver out of order * and hence not ACKed. With SACKs this number is simply * amount of SACKed data. Even without SACKs * it is easy to give pretty reliable estimate of this number, * counting duplicate ACKs. * * lost_out: Packets lost by network. TCP has no explicit * "loss notification" feedback from network (for now). * It means that this number can be only _guessed_. * Actually, it is the heuristics to predict lossage that * distinguishes different algorithms. * * F.e. after RTO, when all the queue is considered as lost, * lost_out = packets_out and in_flight = retrans_out. * * Essentially, we have now two algorithms counting * lost packets. * * FACK: It is the simplest heuristics. As soon as we decided * that something is lost, we decide that _all_ not SACKed * packets until the most forward SACK are lost. I.e. * lost_out = fackets_out - sacked_out and left_out = fackets_out. * It is absolutely correct estimate, if network does not reorder * packets. And it loses any connection to reality when reordering * takes place. We use FACK by default until reordering * is suspected on the path to this destination. * * NewReno: when Recovery is entered, we assume that one segment * is lost (classic Reno). While we are in Recovery and * a partial ACK arrives, we assume that one more packet * is lost (NewReno). This heuristics are the same in NewReno * and SACK. * * Imagine, that's all! Forget about all this shamanism about CWND inflation * deflation etc. CWND is real congestion window, never inflated, changes * only according to classic VJ rules. * * Really tricky (and requiring careful tuning) part of algorithm * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). * The first determines the moment _when_ we should reduce CWND and, * hence, slow down forward transmission. In fact, it determines the moment * when we decide that hole is caused by loss, rather than by a reorder. * * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill * holes, caused by lost packets. * * And the most logically complicated part of algorithm is undo * heuristics. We detect false retransmits due to both too early * fast retransmit (reordering) and underestimated RTO, analyzing * timestamps and D-SACKs. When we detect that some segments were * retransmitted by mistake and CWND reduction was wrong, we undo * window reduction and abort recovery phase. This logic is hidden * inside several functions named tcp_try_undo_<something>. */ /* This function decides, when we should leave Disordered state * and enter Recovery phase, reducing congestion window. * * Main question: may we further continue forward transmission * with the same cwnd? */ static int tcp_time_to_recover(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; /* Do not perform any recovery during F-RTO algorithm */ if (tp->frto_counter) return 0; /* Trick#1: The loss is proven. */ if (tp->lost_out) return 1; /* Not-A-Trick#2 : Classic rule... */ if (tcp_dupack_heuristics(tp) > tp->reordering) return 1; /* Trick#3 : when we use RFC2988 timer restart, fast * retransmit can be triggered by timeout of queue head. */ if (tcp_is_fack(tp) && tcp_head_timedout(sk)) return 1; /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? */ packets_out = tp->packets_out; if (packets_out <= tp->reordering && tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && !tcp_may_send_now(sk)) { /* We have nothing to send. This connection is limited * either by receiver window or by application. */ return 1; } /* If a thin stream is detected, retransmit after first * received dupack. Employ only if SACK is supported in order * to avoid possible corner-case series of spurious retransmissions * Use only if there are no unsent data. */ if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && tcp_is_sack(tp) && !tcp_send_head(sk)) return 1; return 0; } /* New heuristics: it is possible only after we switched to restart timer * each time when something is ACKed. Hence, we can detect timed out packets * during fast retransmit without falling to slow start. * * Usefulness of this as is very questionable, since we should know which of * the segments is the next to timeout which is relatively expensive to find * in general case unless we add some data structure just for that. The * current approach certainly won't find the right one too often and when it * finally does find _something_ it usually marks large part of the window * right away (because a retransmission with a larger timestamp blocks the * loop from advancing). -ij */ static void tcp_timeout_skbs(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (!tcp_is_fack(tp) || !tcp_head_timedout(sk)) return; skb = tp->scoreboard_skb_hint; if (tp->scoreboard_skb_hint == NULL) skb = tcp_write_queue_head(sk); tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (!tcp_skb_timedout(sk, skb)) break; tcp_skb_mark_lost(tp, skb); } tp->scoreboard_skb_hint = skb; tcp_verify_left_out(tp); } /* Detect loss in event "A" above by marking head of queue up as lost. * For FACK or non-SACK(Reno) senders, the first "packets" number of segments * are considered lost. For RFC3517 SACK, a segment is considered lost if it * has at least tp->reordering SACKed seqments above it; "packets" refers to * the maximum SACKed segments to pass before reaching this limit. */ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt, oldcnt; int err; unsigned int mss; /* Use SACK to deduce losses of new sequences sent during recovery */ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { skb = tp->lost_skb_hint; cnt = tp->lost_cnt_hint; /* Head already handled? */ if (mark_head && skb != tcp_write_queue_head(sk)) return; } else { skb = tcp_write_queue_head(sk); cnt = 0; } tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; /* TODO: do this better */ /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) break; oldcnt = cnt; if (tcp_is_fack(tp) || tcp_is_reno(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) cnt += tcp_skb_pcount(skb); if (cnt > packets) { if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || (oldcnt >= packets)) break; mss = skb_shinfo(skb)->gso_size; err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); if (err < 0) break; cnt = packets; } tcp_skb_mark_lost(tp, skb); if (mark_head) break; } tcp_verify_left_out(tp); } /* Account newly detected lost packet(s) */ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_reno(tp)) { tcp_mark_head_lost(sk, 1, 1); } else if (tcp_is_fack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; tcp_mark_head_lost(sk, lost, 0); } else { int sacked_upto = tp->sacked_out - tp->reordering; if (sacked_upto >= 0) tcp_mark_head_lost(sk, sacked_upto, 0); else if (fast_rexmit) tcp_mark_head_lost(sk, 1, 1); } tcp_timeout_skbs(sk); } /* CWND moderation, preventing bursts due to too big ACKs * in dubious situations. */ static inline void tcp_moderate_cwnd(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + tcp_max_burst(tp)); tp->snd_cwnd_stamp = tcp_time_stamp; } /* Lower bound on congestion window is slow start threshold * unless congestion avoidance choice decides to overide it. */ static inline u32 tcp_cwnd_min(const struct sock *sk) { const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; } /* Decrease cwnd each second ack. */ static void tcp_cwnd_down(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); int decr = tp->snd_cwnd_cnt + 1; if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { tp->snd_cwnd_cnt = decr & 1; decr >>= 1; if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) tp->snd_cwnd -= decr; tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); tp->snd_cwnd_stamp = tcp_time_stamp; } } /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ static inline int tcp_packet_delayed(const struct tcp_sock *tp) { return !tp->retrans_stamp || (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); } /* Undo procedures. */ #if FASTRETRANS_DEBUG > 1 static void DBGUNDO(struct sock *sk, const char *msg) { struct tcp_sock *tp = tcp_sk(sk); struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, &inet->inet_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, &np->daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #endif } #else #define DBGUNDO(x...) do { } while (0) #endif static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) { struct tcp_sock *tp = tcp_sk(sk); if (tp->prior_ssthresh) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->undo_cwnd) tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; TCP_ECN_withdraw_cwr(tp); } } else { tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); } tp->snd_cwnd_stamp = tcp_time_stamp; } static inline int tcp_may_undo(const struct tcp_sock *tp) { return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } /* People celebrate: "We love our President!" */ static int tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { int mib_idx; /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwr(sk, true); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) mib_idx = LINUX_MIB_TCPLOSSUNDO; else mib_idx = LINUX_MIB_TCPFULLUNDO; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->undo_marker = 0; } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); return 1; } tcp_set_ca_state(sk, TCP_CA_Open); return 0; } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ static void tcp_try_undo_dsack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, "D-SACK"); tcp_undo_cwr(sk, true); tp->undo_marker = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); } } /* We can clear retrans_stamp when there are no retransmissions in the * window. It would seem that it is trivially available for us in * tp->retrans_out, however, that kind of assumptions doesn't consider * what will happen if errors occur when sending retransmission for the * second time. ...It could the that such segment has only * TCPCB_EVER_RETRANS set at the present time. It seems that checking * the head skb is enough except for some reneging corner cases that * are not worth the effort. * * Main reason for all this complexity is the fact that connection dying * time now depends on the validity of the retrans_stamp, in particular, * that successive retransmissions of a segment must not advance * retrans_stamp under any conditions. */ static int tcp_any_retrans_done(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (tp->retrans_out) return 1; skb = tcp_write_queue_head(sk); if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) return 1; return 0; } /* Undo during fast recovery after partial ACK. */ static int tcp_try_undo_partial(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); /* Partial ACK arrived. Force Hoe's retransmit. */ int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); if (tcp_may_undo(tp)) { /* Plain luck! Hole if filled with delayed * packet, rather than with a retransmit. */ if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); DBGUNDO(sk, "Hoe"); tcp_undo_cwr(sk, false); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); /* So... Do not make Hoe's retransmit yet. * If the first packet was delayed, the rest * ones are most probably delayed as well. */ failed = 0; } return failed; } /* Undo during loss recovery after partial ACK. */ static int tcp_try_undo_loss(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; } tcp_clear_all_retrans_hints(tp); DBGUNDO(sk, "partial loss"); tp->lost_out = 0; tcp_undo_cwr(sk, true); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); inet_csk(sk)->icsk_retransmits = 0; tp->undo_marker = 0; if (tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return 1; } return 0; } static inline void tcp_complete_cwr(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* Do not moderate cwnd if it's already undone in cwr or recovery. */ if (tp->undo_marker) { if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd_stamp = tcp_time_stamp; } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) { /* PRR algorithm. */ tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd_stamp = tcp_time_stamp; } } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } static void tcp_try_keep_open(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { tcp_set_ca_state(sk, state); tp->high_seq = tp->snd_nxt; } } static void tcp_try_to_open(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); tcp_verify_left_out(tp); if (!tp->frto_counter && !tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; if (flag & FLAG_ECE) tcp_enter_cwr(sk, 1); if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) tcp_moderate_cwnd(tp); } else { tcp_cwnd_down(sk, flag); } } static void tcp_mtup_probe_failed(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.probe_size = 0; } static void tcp_mtup_probe_success(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* FIXME: breaks with very large cwnd */ tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache) / icsk->icsk_mtup.probe_size; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_ssthresh = tcp_current_ssthresh(sk); icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.probe_size = 0; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } /* Do a simple retransmit without using the backoff mechanisms in * tcp_timer. This is used for path mtu discovery. * The socket is already locked here. */ void tcp_simple_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int mss = tcp_current_mss(sk); u32 prior_lost = tp->lost_out; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (tcp_skb_seglen(skb) > mss && !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } tcp_skb_mark_lost_uncond_verify(tp, skb); } } tcp_clear_retrans_hints_partial(tp); if (prior_lost == tp->lost_out) return; if (tcp_is_reno(tp)) tcp_limit_reno_sacked(tp); tcp_verify_left_out(tp); /* Don't muck with the congestion window here. * Reason is that we do not increase amount of _data_ * in network, but units changed and effective * cwnd/ssthresh really reduced now. */ if (icsk->icsk_ca_state != TCP_CA_Loss) { tp->high_seq = tp->snd_nxt; tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->prior_ssthresh = 0; tp->undo_marker = 0; tcp_set_ca_state(sk, TCP_CA_Loss); } tcp_xmit_retransmit_queue(sk); } EXPORT_SYMBOL(tcp_simple_retransmit); /* This function implements the PRR algorithm, specifcally the PRR-SSRB * (proportional rate reduction with slow start reduction bound) as described in * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. * It computes the number of packets to send (sndcnt) based on packets newly * delivered: * 1) If the packets in flight is larger than ssthresh, PRR spreads the * cwnd reductions across a full RTT. * 2) If packets in flight is lower than ssthresh (such as due to excess * losses and/or application stalls), do not perform any further cwnd * reductions, but instead slow start up to ssthresh. */ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, int fast_rexmit, int flag) { struct tcp_sock *tp = tcp_sk(sk); int sndcnt = 0; int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + tp->prior_cwnd - 1; sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; } else { sndcnt = min_t(int, delta, max_t(int, tp->prr_delivered - tp->prr_out, newly_acked_sacked) + 1); } sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; } /* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and * packets lost by network. * * Besides that it does CWND reduction, when packet loss is detected * and changes state of machine. * * It does _not_ decide what to send, it is made in function * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int newly_acked_sacked, bool is_dupack, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); int fast_rexmit = 0, mib_idx; if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; if (WARN_ON(!tp->sacked_out && tp->fackets_out)) tp->fackets_out = 0; /* Now state machine starts. * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ if (flag & FLAG_ECE) tp->prior_ssthresh = 0; /* B. In all the states check for reneging SACKs. */ if (tcp_check_sack_reneging(sk, flag)) return; /* C. Check consistency of the current state. */ tcp_verify_left_out(tp); /* D. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ if (icsk->icsk_ca_state == TCP_CA_Open) { WARN_ON(tp->retrans_out != 0); tp->retrans_stamp = 0; } else if (!before(tp->snd_una, tp->high_seq)) { switch (icsk->icsk_ca_state) { case TCP_CA_Loss: icsk->icsk_retransmits = 0; if (tcp_try_undo_recovery(sk)) return; break; case TCP_CA_CWR: /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ if (tp->snd_una != tp->high_seq) { tcp_complete_cwr(sk); tcp_set_ca_state(sk, TCP_CA_Open); } break; case TCP_CA_Recovery: if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; tcp_complete_cwr(sk); break; } } /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (tcp_is_reno(tp) && is_dupack) tcp_add_reno_sack(sk); } else do_lost = tcp_try_undo_partial(sk, pkts_acked); break; case TCP_CA_Loss: if (flag & FLAG_DATA_ACKED) icsk->icsk_retransmits = 0; if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (!tcp_try_undo_loss(sk)) { tcp_moderate_cwnd(tp); tcp_xmit_retransmit_queue(sk); return; } if (icsk->icsk_ca_state != TCP_CA_Open) return; /* Loss is undone; fall through to processing in Open state. */ default: if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (is_dupack) tcp_add_reno_sack(sk); } if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); if (!tcp_time_to_recover(sk)) { tcp_try_to_open(sk, flag); return; } /* MTU probe failure: don't reduce cwnd */ if (icsk->icsk_ca_state < TCP_CA_CWR && icsk->icsk_mtup.probe_size && tp->snd_una == tp->mtu_probe.probe_seq_start) { tcp_mtup_probe_failed(sk); /* Restores the reduction we did in tcp_mtup_probe() */ tp->snd_cwnd++; tcp_simple_retransmit(sk); return; } /* Otherwise enter Recovery state */ if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENORECOVERY; else mib_idx = LINUX_MIB_TCPSACKRECOVERY; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->high_seq = tp->snd_nxt; tp->prior_ssthresh = 0; tp->undo_marker = tp->snd_una; tp->undo_retrans = tp->retrans_out; if (icsk->icsk_ca_state < TCP_CA_CWR) { if (!(flag & FLAG_ECE)) tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); TCP_ECN_queue_cwr(tp); } tp->bytes_acked = 0; tp->snd_cwnd_cnt = 0; tp->prior_cwnd = tp->snd_cwnd; tp->prr_delivered = 0; tp->prr_out = 0; tcp_set_ca_state(sk, TCP_CA_Recovery); fast_rexmit = 1; } if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) tcp_update_scoreboard(sk, fast_rexmit); tp->prr_delivered += newly_acked_sacked; tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag); tcp_xmit_retransmit_queue(sk); } void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt) { tcp_rtt_estimator(sk, seq_rtt); tcp_set_rto(sk); inet_csk(sk)->icsk_backoff = 0; } EXPORT_SYMBOL(tcp_valid_rtt_meas); /* Read draft-ietf-tcplw-high-performance before mucking * with this code. (Supersedes RFC1323) */ static void tcp_ack_saw_tstamp(struct sock *sk, int flag) { /* RTTM Rule: A TSecr value received in a segment is used to * update the averaged RTT measurement only if the segment * acknowledges some new data, i.e., only if it advances the * left edge of the send window. * * See draft-ietf-tcplw-high-performance-00, section 3.3. * 1998/04/10 Andrey V. Savochkin <[email protected]> * * Changed: reset backoff as soon as we see the first valid sample. * If we do not, we get strongly overestimated rto. With timestamps * samples are accepted even from very old segments: f.e., when rtt=1 * increases to 8, we retransmit 5 times and after 8 seconds delayed * answer arrives rto becomes 120 seconds! If at least one of segments * in window is lost... Voila. --ANK (010210) */ struct tcp_sock *tp = tcp_sk(sk); tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr); } static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) { /* We don't have a timestamp. Can only use * packets that are not retransmitted to determine * rtt estimates. Also, we must not reset the * backoff for rto until we get a non-retransmitted * packet. This allows us to deal with a situation * where the network delay has increased suddenly. * I.e. Karn's algorithm. (SIGCOMM '87, p5.) */ if (flag & FLAG_RETRANS_DATA_ACKED) return; tcp_valid_rtt_meas(sk, seq_rtt); } static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, const s32 seq_rtt) { const struct tcp_sock *tp = tcp_sk(sk); /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tcp_ack_saw_tstamp(sk, flag); else if (seq_rtt >= 0) tcp_ack_no_tstamp(sk, seq_rtt, flag); } static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) { const struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; } /* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */ static void tcp_rearm_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto, TCP_RTO_MAX); } } /* If we get here, the whole TSO packet has not been acked. */ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); u32 packets_acked; BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); packets_acked = tcp_skb_pcount(skb); if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return 0; packets_acked -= tcp_skb_pcount(skb); if (packets_acked) { BUG_ON(tcp_skb_pcount(skb) == 0); BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); } return packets_acked; } /* Remove acknowledged frames from the retransmission queue. If our packet * is before the ack sequence we can discard it as it's confirmed to have * arrived at the other end. */ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb; u32 now = tcp_time_stamp; int fully_acked = 1; int flag = 0; u32 pkts_acked = 0; u32 reord = tp->packets_out; u32 prior_sacked = tp->sacked_out; s32 seq_rtt = -1; s32 ca_seq_rtt = -1; ktime_t last_ackt = net_invalid_timestamp(); while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); u32 acked_pcount; u8 sacked = scb->sacked; /* Determine how many packets and what bytes were acked, tso and else */ if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || !after(tp->snd_una, scb->seq)) break; acked_pcount = tcp_tso_acked(sk, skb); if (!acked_pcount) break; fully_acked = 0; } else { acked_pcount = tcp_skb_pcount(skb); } if (sacked & TCPCB_RETRANS) { if (sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= acked_pcount; flag |= FLAG_RETRANS_DATA_ACKED; ca_seq_rtt = -1; seq_rtt = -1; if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) flag |= FLAG_NONHEAD_RETRANS_ACKED; } else { ca_seq_rtt = now - scb->when; last_ackt = skb->tstamp; if (seq_rtt < 0) { seq_rtt = ca_seq_rtt; } if (!(sacked & TCPCB_SACKED_ACKED)) reord = min(pkts_acked, reord); } if (sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= acked_pcount; if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; tp->packets_out -= acked_pcount; pkts_acked += acked_pcount; /* Initial outgoing SYN's get put onto the write_queue * just like anything else we transmit. It is not * true data, and if we misinform our callers that * this ACK acks real data, we will erroneously exit * connection startup slow start one packet too * quickly. This is severely frowned upon behavior. */ if (!(scb->tcp_flags & TCPHDR_SYN)) { flag |= FLAG_DATA_ACKED; } else { flag |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; } if (!fully_acked) break; tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); tp->scoreboard_skb_hint = NULL; if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = NULL; if (skb == tp->lost_skb_hint) tp->lost_skb_hint = NULL; } if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) tp->snd_up = tp->snd_una; if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) flag |= FLAG_SACK_RENEGING; if (flag & FLAG_ACKED) { const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; if (unlikely(icsk->icsk_mtup.probe_size && !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { tcp_mtup_probe_success(sk); } tcp_ack_update_rtt(sk, flag, seq_rtt); tcp_rearm_rto(sk); if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); } else { int delta; /* Non-retransmitted hole got filled? That's reordering */ if (reord < prior_fackets) tcp_update_reordering(sk, tp->fackets_out - reord, 0); delta = tcp_is_fack(tp) ? pkts_acked : prior_sacked - tp->sacked_out; tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); } tp->fackets_out -= min(pkts_acked, tp->fackets_out); if (ca_ops->pkts_acked) { s32 rtt_us = -1; /* Is the ACK triggering packet unambiguous? */ if (!(flag & FLAG_RETRANS_DATA_ACKED)) { /* High resolution needed and available? */ if (ca_ops->flags & TCP_CONG_RTT_STAMP && !ktime_equal(last_ackt, net_invalid_timestamp())) rtt_us = ktime_us_delta(ktime_get_real(), last_ackt); else if (ca_seq_rtt >= 0) rtt_us = jiffies_to_usecs(ca_seq_rtt); } ca_ops->pkts_acked(sk, pkts_acked, rtt_us); } } #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); if (!tp->packets_out && tcp_is_sack(tp)) { icsk = inet_csk(sk); if (tp->lost_out) { printk(KERN_DEBUG "Leak l=%u %d\n", tp->lost_out, icsk->icsk_ca_state); tp->lost_out = 0; } if (tp->sacked_out) { printk(KERN_DEBUG "Leak s=%u %d\n", tp->sacked_out, icsk->icsk_ca_state); tp->sacked_out = 0; } if (tp->retrans_out) { printk(KERN_DEBUG "Leak r=%u %d\n", tp->retrans_out, icsk->icsk_ca_state); tp->retrans_out = 0; } } #endif return flag; } static void tcp_ack_probe(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* Was it a usable window open? */ if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { icsk->icsk_backoff = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); /* Socket must be waked up by subsequent tcp_data_snd_check(). * This function is not for random using! */ } else { inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), TCP_RTO_MAX); } } static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) { return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open; } static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) { const struct tcp_sock *tp = tcp_sk(sk); return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); } /* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, const u32 ack_seq, const u32 nwin) { return after(ack, tp->snd_una) || after(ack_seq, tp->snd_wl1) || (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); } /* Update our send window. * * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * and in FreeBSD. NetBSD's one is even worse.) is wrong. */ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, u32 ack_seq) { struct tcp_sock *tp = tcp_sk(sk); int flag = 0; u32 nwin = ntohs(tcp_hdr(skb)->window); if (likely(!tcp_hdr(skb)->syn)) nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { flag |= FLAG_WIN_UPDATE; tcp_update_wl(tp, ack_seq); if (tp->snd_wnd != nwin) { tp->snd_wnd = nwin; /* Note, it is the only place, where * fast path is recovered for sending TCP. */ tp->pred_flags = 0; tcp_fast_path_check(sk); if (nwin > tp->max_window) { tp->max_window = nwin; tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); } } } tp->snd_una = ack; return flag; } /* A very conservative spurious RTO response algorithm: reduce cwnd and * continue in congestion avoidance. */ static void tcp_conservative_spur_to_response(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); tp->snd_cwnd_cnt = 0; tp->bytes_acked = 0; TCP_ECN_queue_cwr(tp); tcp_moderate_cwnd(tp); } /* A conservative spurious RTO response algorithm: reduce cwnd using * rate halving and continue in congestion avoidance. */ static void tcp_ratehalving_spur_to_response(struct sock *sk) { tcp_enter_cwr(sk, 0); } static void tcp_undo_spur_to_response(struct sock *sk, int flag) { if (flag & FLAG_ECE) tcp_ratehalving_spur_to_response(sk); else tcp_undo_cwr(sk, true); } /* F-RTO spurious RTO detection algorithm (RFC4138) * * F-RTO affects during two new ACKs following RTO (well, almost, see inline * comments). State (ACK number) is kept in frto_counter. When ACK advances * window (but not to or beyond highest sequence sent before RTO): * On First ACK, send two new segments out. * On Second ACK, RTO was likely spurious. Do spurious response (response * algorithm is not part of the F-RTO detection algorithm * given in RFC4138 but can be selected separately). * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss * and TCP falls back to conventional RTO recovery. F-RTO allows overriding * of Nagle, this is done using frto_counter states 2 and 3, when a new data * segment of any size sent during F-RTO, state 2 is upgraded to 3. * * Rationale: if the RTO was spurious, new ACKs should arrive from the * original window even after we transmit two new data segments. * * SACK version: * on first step, wait until first cumulative ACK arrives, then move to * the second step. In second step, the next ACK decides. * * F-RTO is implemented (mainly) in four functions: * - tcp_use_frto() is used to determine if TCP is can use F-RTO * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is * called when tcp_use_frto() showed green light * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm * - tcp_enter_frto_loss() is called if there is not enough evidence * to prove that the RTO is indeed spurious. It transfers the control * from F-RTO to the conventional RTO recovery */ static int tcp_process_frto(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); tcp_verify_left_out(tp); /* Duplicate the behavior from Loss state (fastretrans_alert) */ if (flag & FLAG_DATA_ACKED) inet_csk(sk)->icsk_retransmits = 0; if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) tp->undo_marker = 0; if (!before(tp->snd_una, tp->frto_highmark)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); return 1; } if (!tcp_is_sackfrto(tp)) { /* RFC4138 shortcoming in step 2; should also have case c): * ACK isn't duplicate nor advances window, e.g., opposite dir * data, winupdate */ if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) return 1; if (!(flag & FLAG_DATA_ACKED)) { tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), flag); return 1; } } else { if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { /* Prevent sending of new data. */ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)); return 1; } if ((tp->frto_counter >= 2) && (!(flag & FLAG_FORWARD_PROGRESS) || ((flag & FLAG_DATA_SACKED) && !(flag & FLAG_ONLY_ORIG_SACKED)))) { /* RFC4138 shortcoming (see comment above) */ if (!(flag & FLAG_FORWARD_PROGRESS) && (flag & FLAG_NOT_DUP)) return 1; tcp_enter_frto_loss(sk, 3, flag); return 1; } } if (tp->frto_counter == 1) { /* tcp_may_send_now needs to see updated state */ tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; tp->frto_counter = 2; if (!tcp_may_send_now(sk)) tcp_enter_frto_loss(sk, 2, flag); return 1; } else { switch (sysctl_tcp_frto_response) { case 2: tcp_undo_spur_to_response(sk, flag); break; case 1: tcp_conservative_spur_to_response(tp); break; default: tcp_ratehalving_spur_to_response(sk); break; } tp->frto_counter = 0; tp->undo_marker = 0; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); } return 0; } /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); u32 prior_snd_una = tp->snd_una; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; bool is_dupack = false; u32 prior_in_flight; u32 prior_fackets; int prior_packets; int prior_sacked = tp->sacked_out; int pkts_acked = 0; int newly_acked_sacked = 0; int frto_cwnd = 0; /* If the ack is older than previous acks * then we can probably ignore it. */ if (before(ack, prior_snd_una)) goto old_ack; /* If the ack includes data we haven't sent yet, discard * this segment (RFC793 Section 3.9). */ if (after(ack, tp->snd_nxt)) goto invalid_ack; if (after(ack, prior_snd_una)) flag |= FLAG_SND_UNA_ADVANCED; if (sysctl_tcp_abc) { if (icsk->icsk_ca_state < TCP_CA_CWR) tp->bytes_acked += ack - prior_snd_una; else if (icsk->icsk_ca_state == TCP_CA_Loss) /* we assume just one segment left network */ tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache); } prior_fackets = tp->fackets_out; prior_in_flight = tcp_packets_in_flight(tp); if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. * Note, we use the fact that SND.UNA>=SND.WL2. */ tcp_update_wl(tp, ack_seq); tp->snd_una = ack; flag |= FLAG_WIN_UPDATE; tcp_ca_event(sk, CA_EVENT_FAST_ACK); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); if (TCP_SKB_CB(skb)->sacked) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; tcp_ca_event(sk, CA_EVENT_SLOW_ACK); } /* We passed data and got it acked, remove any soft error * log. Something worked... */ sk->sk_err_soft = 0; icsk->icsk_probes_out = 0; tp->rcv_tstamp = tcp_time_stamp; prior_packets = tp->packets_out; if (!prior_packets) goto no_queue; /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); pkts_acked = prior_packets - tp->packets_out; newly_acked_sacked = (prior_packets - prior_sacked) - (tp->packets_out - tp->sacked_out); if (tp->frto_counter) frto_cwnd = tcp_process_frto(sk, flag); /* Guarantee sacktag reordering detection against wrap-arounds */ if (before(tp->frto_highmark, tp->snd_una)) tp->frto_highmark = 0; if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */ if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, is_dupack, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) tcp_cong_avoid(sk, ack, prior_in_flight); } if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) dst_confirm(__sk_dst_get(sk)); return 1; no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. */ if (tcp_send_head(sk)) tcp_ack_probe(sk); return 1; invalid_ack: SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return -1; old_ack: /* If data was SACKed, tag it and see if we should send more data. * If data was DSACKed, see if we can undo a cwnd reduction. */ if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); newly_acked_sacked = tp->sacked_out - prior_sacked; tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, is_dupack, flag); } SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); return 0; } /* Look for tcp options. Normally only called on SYN and SYNACK packets. * But, this can also be called on packets in the established flow when * the fast version below fails. */ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, const u8 **hvpp, int estab) { const unsigned char *ptr; const struct tcphdr *th = tcp_hdr(skb); int length = (th->doff * 4) - sizeof(struct tcphdr); ptr = (const unsigned char *)(th + 1); opt_rx->saw_tstamp = 0; while (length > 0) { int opcode = *ptr++; int opsize; switch (opcode) { case TCPOPT_EOL: return; case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ length--; continue; default: opsize = *ptr++; if (opsize < 2) /* "silly options" */ return; if (opsize > length) return; /* don't parse partial options */ switch (opcode) { case TCPOPT_MSS: if (opsize == TCPOLEN_MSS && th->syn && !estab) { u16 in_mss = get_unaligned_be16(ptr); if (in_mss) { if (opt_rx->user_mss && opt_rx->user_mss < in_mss) in_mss = opt_rx->user_mss; opt_rx->mss_clamp = in_mss; } } break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && !estab && sysctl_tcp_window_scaling) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > 14) { if (net_ratelimit()) pr_info("%s: Illegal window scaling value %d >14 received\n", __func__, snd_wscale); snd_wscale = 14; } opt_rx->snd_wscale = snd_wscale; } break; case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || (!estab && sysctl_tcp_timestamps))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); } break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && !estab && sysctl_tcp_sack) { opt_rx->sack_ok = TCP_SACK_SEEN; tcp_sack_reset(opt_rx); } break; case TCPOPT_SACK: if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && opt_rx->sack_ok) { TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; } break; #ifdef CONFIG_TCP_MD5SIG case TCPOPT_MD5SIG: /* * The MD5 Hash has already been * checked (see tcp_v{4,6}_do_rcv()). */ break; #endif case TCPOPT_COOKIE: /* This option is variable length. */ switch (opsize) { case TCPOLEN_COOKIE_BASE: /* not yet implemented */ break; case TCPOLEN_COOKIE_PAIR: /* not yet implemented */ break; case TCPOLEN_COOKIE_MIN+0: case TCPOLEN_COOKIE_MIN+2: case TCPOLEN_COOKIE_MIN+4: case TCPOLEN_COOKIE_MIN+6: case TCPOLEN_COOKIE_MAX: /* 16-bit multiple */ opt_rx->cookie_plus = opsize; *hvpp = ptr; break; default: /* ignore option */ break; } break; } ptr += opsize-2; length -= opsize; } } } EXPORT_SYMBOL(tcp_parse_options); static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) { const __be32 *ptr = (const __be32 *)(th + 1); if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { tp->rx_opt.saw_tstamp = 1; ++ptr; tp->rx_opt.rcv_tsval = ntohl(*ptr); ++ptr; tp->rx_opt.rcv_tsecr = ntohl(*ptr); return 1; } return 0; } /* Fast parse options. This hopes to only see timestamps. * If it is wrong it falls back on tcp_parse_options(). */ static int tcp_fast_parse_options(const struct sk_buff *skb, const struct tcphdr *th, struct tcp_sock *tp, const u8 **hvpp) { /* In the spirit of fast parsing, compare doff directly to constant * values. Because equality is used, short doff can be ignored here. */ if (th->doff == (sizeof(*th) / 4)) { tp->rx_opt.saw_tstamp = 0; return 0; } else if (tp->rx_opt.tstamp_ok && th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { if (tcp_parse_aligned_timestamp(tp, th)) return 1; } tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); return 1; } #ifdef CONFIG_TCP_MD5SIG /* * Parse MD5 Signature option */ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) { int length = (th->doff << 2) - sizeof(*th); const u8 *ptr = (const u8 *)(th + 1); /* If the TCP option is too short, we can short cut */ if (length < TCPOLEN_MD5SIG) return NULL; while (length > 0) { int opcode = *ptr++; int opsize; switch(opcode) { case TCPOPT_EOL: return NULL; case TCPOPT_NOP: length--; continue; default: opsize = *ptr++; if (opsize < 2 || opsize > length) return NULL; if (opcode == TCPOPT_MD5SIG) return opsize == TCPOLEN_MD5SIG ? ptr : NULL; } ptr += opsize - 2; length -= opsize; } return NULL; } EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif static inline void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; tp->rx_opt.ts_recent_stamp = get_seconds(); } static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard * extra check below makes sure this can only happen * for pure ACK frames. -DaveM * * Not only, also it occurs for expired timestamps. */ if (tcp_paws_check(&tp->rx_opt, 0)) tcp_store_ts_recent(tp); } } /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) * it can pass through stack. So, the following predicate verifies that * this segment is not used for anything but congestion avoidance or * fast retransmit. Moreover, we even are able to eliminate most of such * second order effects, if we apply some small "replay" window (~RTO) * to timestamp space. * * All these measures still do not guarantee that we reject wrapped ACKs * on networks with high bandwidth, when sequence space is recycled fastly, * but it guarantees that such events will be very rare and do not affect * connection seriously. This doesn't look nice, but alas, PAWS is really * buggy extension. * * [ Later note. Even worse! It is buggy for segments _with_ data. RFC * states that events when retransmit arrives after original data are rare. * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is * the biggest problem on large power networks even with minor reordering. * OK, let's give it small replay window. If peer clock is even 1hz, it is safe * up to bandwidth of 18Gigabit/sec. 8) ] */ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); u32 seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; return (/* 1. Pure ACK with correct sequence number. */ (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && /* 2. ... and duplicate ACK. */ ack == tp->snd_una && /* 3. ... and does not update window. */ !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && /* 4. ... and sits in replay window. */ (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); } static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) { const struct tcp_sock *tp = tcp_sk(sk); return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && !tcp_disordered_ack(sk, skb); } /* Check segment sequence number for validity. * * Segment controls are considered valid, if the segment * fits to the window after truncation to the window. Acceptability * of data (and SYN, FIN, of course) is checked separately. * See tcp_data_queue(), for example. * * Also, controls (RST is main one) are accepted using RCV.WUP instead * of RCV.NXT. Peer still did not advance his SND.UNA when we * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. * (borrowed from freebsd) */ static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) { return !before(end_seq, tp->rcv_wup) && !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); } /* When we get a reset we do this. */ static void tcp_reset(struct sock *sk) { /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { case TCP_SYN_SENT: sk->sk_err = ECONNREFUSED; break; case TCP_CLOSE_WAIT: sk->sk_err = EPIPE; break; case TCP_CLOSE: return; default: sk->sk_err = ECONNRESET; } /* This barrier is coupled with smp_rmb() in tcp_poll() */ smp_wmb(); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); tcp_done(sk); } /* * Process the FIN bit. This now behaves as it is supposed to work * and the FIN takes effect when it is validly part of sequence * space. Not before when we get holes. * * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT * (and thence onto LAST-ACK and finally, CLOSE, we never enter * TIME-WAIT) * * If we are in FINWAIT-1, a received FIN indicates simultaneous * close and we go into CLOSING (and later onto TIME-WAIT) * * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. */ static void tcp_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); inet_csk_schedule_ack(sk); sk->sk_shutdown |= RCV_SHUTDOWN; sock_set_flag(sk, SOCK_DONE); switch (sk->sk_state) { case TCP_SYN_RECV: case TCP_ESTABLISHED: /* Move to CLOSE_WAIT */ tcp_set_state(sk, TCP_CLOSE_WAIT); inet_csk(sk)->icsk_ack.pingpong = 1; break; case TCP_CLOSE_WAIT: case TCP_CLOSING: /* Received a retransmission of the FIN, do * nothing. */ break; case TCP_LAST_ACK: /* RFC793: Remain in the LAST-ACK state. */ break; case TCP_FIN_WAIT1: /* This case occurs when a simultaneous close * happens, we must ack the received FIN and * enter the CLOSING state. */ tcp_send_ack(sk); tcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: /* Received a FIN -- send ACK and enter TIME_WAIT. */ tcp_send_ack(sk); tcp_time_wait(sk, TCP_TIME_WAIT, 0); break; default: /* Only TCP_LISTEN and TCP_CLOSE are left, in these * cases we should never reach this piece of code. */ pr_err("%s: Impossible, sk->sk_state=%d\n", __func__, sk->sk_state); break; } /* It _is_ possible, that we have something out-of-order _after_ FIN. * Probably, we should reset in this case. For now drop them. */ __skb_queue_purge(&tp->out_of_order_queue); if (tcp_is_sack(tp)) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); /* Do not send POLL_HUP for half duplex close. */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } } static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) { if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { if (before(seq, sp->start_seq)) sp->start_seq = seq; if (after(end_seq, sp->end_seq)) sp->end_seq = end_seq; return 1; } return 0; } static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { int mib_idx; if (before(seq, tp->rcv_nxt)) mib_idx = LINUX_MIB_TCPDSACKOLDSENT; else mib_idx = LINUX_MIB_TCPDSACKOFOSENT; NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->rx_opt.dsack = 1; tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].end_seq = end_seq; } } static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); if (!tp->rx_opt.dsack) tcp_dsack_set(sk, seq, end_seq); else tcp_sack_extend(tp->duplicate_sack, seq, end_seq); } static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_enter_quickack_mode(sk); if (tcp_is_sack(tp) && sysctl_tcp_dsack) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) end_seq = tp->rcv_nxt; tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); } } tcp_send_ack(sk); } /* These routines update the SACK block as out-of-order packets arrive or * in-order packets close up the sequence space. */ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) { int this_sack; struct tcp_sack_block *sp = &tp->selective_acks[0]; struct tcp_sack_block *swalk = sp + 1; /* See if the recent change to the first SACK eats into * or hits the sequence space of other SACK blocks, if so coalesce. */ for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { int i; /* Zap SWALK, by moving every further SACK up by one slot. * Decrease num_sacks. */ tp->rx_opt.num_sacks--; for (i = this_sack; i < tp->rx_opt.num_sacks; i++) sp[i] = sp[i + 1]; continue; } this_sack++, swalk++; } } static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_sack_block *sp = &tp->selective_acks[0]; int cur_sacks = tp->rx_opt.num_sacks; int this_sack; if (!cur_sacks) goto new_sack; for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { if (tcp_sack_extend(sp, seq, end_seq)) { /* Rotate this_sack to the first one. */ for (; this_sack > 0; this_sack--, sp--) swap(*sp, *(sp - 1)); if (cur_sacks > 1) tcp_sack_maybe_coalesce(tp); return; } } /* Could not find an adjacent existing SACK, build a new one, * put it at the front, and shift everyone else down. We * always know there is at least one SACK present already here. * * If the sack array is full, forget about the last one. */ if (this_sack >= TCP_NUM_SACKS) { this_sack--; tp->rx_opt.num_sacks--; sp--; } for (; this_sack > 0; this_sack--, sp--) *sp = *(sp - 1); new_sack: /* Build the new head SACK, and we're done. */ sp->start_seq = seq; sp->end_seq = end_seq; tp->rx_opt.num_sacks++; } /* RCV.NXT advances, some SACKs should be eaten. */ static void tcp_sack_remove(struct tcp_sock *tp) { struct tcp_sack_block *sp = &tp->selective_acks[0]; int num_sacks = tp->rx_opt.num_sacks; int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; return; } BUG_ON(num_sacks > 4); for (this_sack = 0; this_sack < num_sacks;) { /* Check if the start of the sack is covered by RCV.NXT. */ if (!before(tp->rcv_nxt, sp->start_seq)) { int i = 0; /* RCV.NXT must cover all the block! */ WARN_ON(before(tp->rcv_nxt, sp->end_seq)); /* Zap this SACK, by moving forward any other SACKS. */ for (i=this_sack+1; i < num_sacks; i++) tp->selective_acks[i-1] = tp->selective_acks[i]; num_sacks--; continue; } this_sack++; sp++; } tp->rx_opt.num_sacks = num_sacks; } /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ static void tcp_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; struct sk_buff *skb; while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { __u32 dsack = dsack_high; if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) dsack_high = TCP_SKB_CB(skb)->end_seq; tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { SOCK_DEBUG(sk, "ofo packet was already received\n"); __skb_unlink(skb, &tp->out_of_order_queue); __kfree_skb(skb); continue; } SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); __skb_unlink(skb, &tp->out_of_order_queue); __skb_queue_tail(&sk->sk_receive_queue, skb); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (tcp_hdr(skb)->fin) tcp_fin(sk); } } static int tcp_prune_ofo_queue(struct sock *sk); static int tcp_prune_queue(struct sock *sk); static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) { if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, size)) { if (tcp_prune_queue(sk) < 0) return -1; if (!sk_rmem_schedule(sk, size)) { if (!tcp_prune_ofo_queue(sk)) return -1; if (!sk_rmem_schedule(sk, size)) return -1; } } return 0; } static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb1; u32 seq, end_seq; TCP_ECN_check_ce(tp, skb); if (tcp_try_rmem_schedule(sk, skb->truesize)) { /* TODO: should increment a counter */ __kfree_skb(skb); return; } /* Disable header prediction. */ tp->pred_flags = 0; inet_csk_schedule_ack(sk); SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); skb1 = skb_peek_tail(&tp->out_of_order_queue); if (!skb1) { /* Initial out of order segment, build 1 SACK. */ if (tcp_is_sack(tp)) { tp->rx_opt.num_sacks = 1; tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq; } __skb_queue_head(&tp->out_of_order_queue, skb); goto end; } seq = TCP_SKB_CB(skb)->seq; end_seq = TCP_SKB_CB(skb)->end_seq; if (seq == TCP_SKB_CB(skb1)->end_seq) { /* Packets in ofo can stay in queue a long time. * Better try to coalesce them right now * to avoid future tcp_collapse_ofo_queue(), * probably the most expensive function in tcp stack. */ if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); BUG_ON(skb_copy_bits(skb, 0, skb_put(skb1, skb->len), skb->len)); TCP_SKB_CB(skb1)->end_seq = end_seq; TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq; __kfree_skb(skb); skb = NULL; } else { __skb_queue_after(&tp->out_of_order_queue, skb1, skb); } if (!tp->rx_opt.num_sacks || tp->selective_acks[0].end_seq != seq) goto add_sack; /* Common case: data arrive in order after hole. */ tp->selective_acks[0].end_seq = end_seq; goto end; } /* Find place to insert this segment. */ while (1) { if (!after(TCP_SKB_CB(skb1)->seq, seq)) break; if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { skb1 = NULL; break; } skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); } /* Do skb overlap to previous one? */ if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ __kfree_skb(skb); skb = NULL; tcp_dsack_set(sk, seq, end_seq); goto add_sack; } if (after(seq, TCP_SKB_CB(skb1)->seq)) { /* Partial overlap. */ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); } else { if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) skb1 = NULL; else skb1 = skb_queue_prev( &tp->out_of_order_queue, skb1); } } if (!skb1) __skb_queue_head(&tp->out_of_order_queue, skb); else __skb_queue_after(&tp->out_of_order_queue, skb1, skb); /* And clean segments covered by new one as whole. */ while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { skb1 = skb_queue_next(&tp->out_of_order_queue, skb); if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) break; if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, end_seq); break; } __skb_unlink(skb1, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); __kfree_skb(skb1); } add_sack: if (tcp_is_sack(tp)) tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) skb_set_owner_r(skb, sk); } static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) goto drop; skb_dst_drop(skb); __skb_pull(skb, th->doff * 4); TCP_ECN_accept_cwr(tp, skb); tp->rx_opt.dsack = 0; /* Queue data for delivery to the user. * Packets in sequence go to the receive queue. * Out of sequence packets to the out_of_order_queue. */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { if (tcp_receive_window(tp) == 0) goto out_of_window; /* Ok. In sequence. In window. */ if (tp->ucopy.task == current && tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && sock_owned_by_user(sk) && !tp->urg_data) { int chunk = min_t(unsigned int, skb->len, tp->ucopy.len); __set_current_state(TASK_RUNNING); local_bh_enable(); if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; eaten = (chunk == skb->len); tcp_rcv_space_adjust(sk); } local_bh_disable(); } if (eaten <= 0) { queue_and_out: if (eaten < 0 && tcp_try_rmem_schedule(sk, skb->truesize)) goto drop; skb_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); } tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; if (skb->len) tcp_event_data_recv(sk, skb); if (th->fin) tcp_fin(sk); if (!skb_queue_empty(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ if (skb_queue_empty(&tp->out_of_order_queue)) inet_csk(sk)->icsk_ack.pingpong = 0; } if (tp->rx_opt.num_sacks) tcp_sack_remove(tp); tcp_fast_path_check(sk); if (eaten > 0) __kfree_skb(skb); else if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; } if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { /* A retransmit, 2nd most common case. Force an immediate ack. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); out_of_window: tcp_enter_quickack_mode(sk); inet_csk_schedule_ack(sk); drop: __kfree_skb(skb); return; } /* Out of window. F.e. zero window probe. */ if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) goto out_of_window; tcp_enter_quickack_mode(sk); if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { /* Partial packet, seq < rcv_next < end_seq */ SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); /* If window is closed, drop tail of packet. But after * remembering D-SACK for its head made in previous line. */ if (!tcp_receive_window(tp)) goto out_of_window; goto queue_and_out; } tcp_data_queue_ofo(sk, skb); } static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next = NULL; if (!skb_queue_is_last(list, skb)) next = skb_queue_next(list, skb); __skb_unlink(skb, list); __kfree_skb(skb); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); return next; } /* Collapse contiguous sequence of skbs head..tail with * sequence numbers start..end. * * If tail is NULL, this means until the end of the list. * * Segments with FIN/SYN are not collapsed (only because this * simplifies code) */ static void tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) { struct sk_buff *skb, *n; bool end_of_skbs; /* First, check that queue is collapsible and find * the point where collapsing can be useful. */ skb = head; restart: end_of_skbs = true; skb_queue_walk_from_safe(list, skb, n) { if (skb == tail) break; /* No new bits? It is possible on ofo queue. */ if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb) break; goto restart; } /* The first skb to collapse is: * - not SYN/FIN and * - bloated or contains data before "start" or * overlaps to the next one. */ if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start))) { end_of_skbs = false; break; } if (!skb_queue_is_last(list, skb)) { struct sk_buff *next = skb_queue_next(list, skb); if (next != tail && TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { end_of_skbs = false; break; } } /* Decided to skip this, advance start seq. */ start = TCP_SKB_CB(skb)->end_seq; } if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) return; while (before(start, end)) { struct sk_buff *nskb; unsigned int header = skb_headroom(skb); int copy = SKB_MAX_ORDER(header, 0); /* Too big header? This can happen with IPv6. */ if (copy < 0) return; if (end - start < copy) copy = end - start; nskb = alloc_skb(copy + header, GFP_ATOMIC); if (!nskb) return; skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); skb_set_network_header(nskb, (skb_network_header(skb) - skb->head)); skb_set_transport_header(nskb, (skb_transport_header(skb) - skb->head)); skb_reserve(nskb, header); memcpy(nskb->head, skb->head, header); memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; __skb_queue_before(list, skb, nskb); skb_set_owner_r(nskb, sk); /* Copy data, releasing collapsed skbs. */ while (copy > 0) { int offset = start - TCP_SKB_CB(skb)->seq; int size = TCP_SKB_CB(skb)->end_seq - start; BUG_ON(offset < 0); if (size > 0) { size = min(copy, size); if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) BUG(); TCP_SKB_CB(nskb)->end_seq += size; copy -= size; start += size; } if (!before(start, TCP_SKB_CB(skb)->end_seq)) { skb = tcp_collapse_one(sk, skb, list); if (!skb || skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) return; } } } } /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs * and tcp_collapse() them until all the queue is collapsed. */ static void tcp_collapse_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); struct sk_buff *head; u32 start, end; if (skb == NULL) return; start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; head = skb; for (;;) { struct sk_buff *next = NULL; if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) next = skb_queue_next(&tp->out_of_order_queue, skb); skb = next; /* Segment is terminated when we see gap or when * we are at the end of all the queue. */ if (!skb || after(TCP_SKB_CB(skb)->seq, end) || before(TCP_SKB_CB(skb)->end_seq, start)) { tcp_collapse(sk, &tp->out_of_order_queue, head, skb, start, end); head = skb; if (!skb) break; /* Start new segment */ start = TCP_SKB_CB(skb)->seq; end = TCP_SKB_CB(skb)->end_seq; } else { if (before(TCP_SKB_CB(skb)->seq, start)) start = TCP_SKB_CB(skb)->seq; if (after(TCP_SKB_CB(skb)->end_seq, end)) end = TCP_SKB_CB(skb)->end_seq; } } } /* * Purge the out-of-order queue. * Return true if queue was pruned. */ static int tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int res = 0; if (!skb_queue_empty(&tp->out_of_order_queue)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will * do the same at a timeout based retransmit. When a connection * is in a sad state like this, we care only about integrity * of the connection not performance. */ if (tp->rx_opt.sack_ok) tcp_sack_reset(&tp->rx_opt); sk_mem_reclaim(sk); res = 1; } return res; } /* Reduce allocated memory if we can, trying to get * the socket within its memory limits again. * * Return less than zero if we should start dropping frames * until the socket owning process reads some of the data * to stabilize the situation. */ static int tcp_prune_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) tcp_clamp_window(sk); else if (sk_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tcp_collapse_ofo_queue(sk); if (!skb_queue_empty(&sk->sk_receive_queue)) tcp_collapse(sk, &sk->sk_receive_queue, skb_peek(&sk->sk_receive_queue), NULL, tp->copied_seq, tp->rcv_nxt); sk_mem_reclaim(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* Collapsing did not help, destructive actions follow. * This must not ever occur. */ tcp_prune_ofo_queue(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) return 0; /* If we are really being abused, tell the caller to silently * drop receive data on the floor. It will get retransmitted * and hopefully then we'll have sufficient space. */ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); /* Massive buffer overcommit. */ tp->pred_flags = 0; return -1; } /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. * As additional protections, we do not touch cwnd in retransmission phases, * and if application hit its sndbuf limit recently. */ void tcp_cwnd_application_limited(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { /* Limited by application or receiver window. */ u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); u32 win_used = max(tp->snd_cwnd_used, init_win); if (win_used < tp->snd_cwnd) { tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; } tp->snd_cwnd_used = 0; } tp->snd_cwnd_stamp = tcp_time_stamp; } static int tcp_should_expand_sndbuf(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* If the user specified a specific send buffer setting, do * not modify it. */ if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) return 0; /* If we are under global TCP memory pressure, do not expand. */ if (sk_under_memory_pressure(sk)) return 0; /* If we are under soft global TCP memory pressure, do not expand. */ if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) return 0; /* If we filled the congestion window, do not expand. */ if (tp->packets_out >= tp->snd_cwnd) return 0; return 1; } /* When incoming ACK allowed to free some skb from write_queue, * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket * on the exit from tcp input handler. * * PROBLEM: sndbuf expansion does not work well with largesend. */ static void tcp_new_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_should_expand_sndbuf(sk)) { int sndmem = SKB_TRUESIZE(max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + MAX_TCP_HEADER); int demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering + 1); sndmem *= 2 * demanded; if (sndmem > sk->sk_sndbuf) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); tp->snd_cwnd_stamp = tcp_time_stamp; } sk->sk_write_space(sk); } static void tcp_check_space(struct sock *sk) { if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) tcp_new_space(sk); } } static inline void tcp_data_snd_check(struct sock *sk) { tcp_push_pending_frames(sk); tcp_check_space(sk); } /* * Check if sending an ack is needed. */ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) { struct tcp_sock *tp = tcp_sk(sk); /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) * sysctl_tcp_delack_seg && /* ... and right edge of window advances far enough. * (tcp_recvmsg() will send ACK otherwise). Or... */ __tcp_select_window(sk) >= tp->rcv_wnd) || /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || /* We have out of order data. */ (ofo_possible && skb_peek(&tp->out_of_order_queue))) { /* Then ack it now */ tcp_send_ack(sk); } else { /* Else, send delayed ack. */ tcp_send_delayed_ack(sk); } } static inline void tcp_ack_snd_check(struct sock *sk) { if (!inet_csk_ack_scheduled(sk)) { /* We sent a data segment already. */ return; } __tcp_ack_snd_check(sk, 1); } /* * This routine is only called when we have urgent data * signaled. Its the 'slow' part of tcp_urg. It could be * moved inline now as tcp_urg is only called from one * place. We handle URGent data wrong. We have to - as * BSD still doesn't use the correction from RFC961. * For 1003.1g we should support a new option TCP_STDURG to permit * either form (or just set the sysctl tcp_stdurg). */ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); u32 ptr = ntohs(th->urg_ptr); if (ptr && !sysctl_tcp_stdurg) ptr--; ptr += ntohl(th->seq); /* Ignore urgent data that we've already seen and read. */ if (after(tp->copied_seq, ptr)) return; /* Do not replay urg ptr. * * NOTE: interesting situation not covered by specs. * Misbehaving sender may send urg ptr, pointing to segment, * which we already have in ofo queue. We are not able to fetch * such data and will stay in TCP_URG_NOTYET until will be eaten * by recvmsg(). Seems, we are not obliged to handle such wicked * situations. But it is worth to think about possibility of some * DoSes using some hypothetical application level deadlock. */ if (before(ptr, tp->rcv_nxt)) return; /* Do we already have a newer (or duplicate) urgent pointer? */ if (tp->urg_data && !after(ptr, tp->urg_seq)) return; /* Tell the world about our new urgent pointer. */ sk_send_sigurg(sk); /* We may be adding urgent data when the last byte read was * urgent. To do this requires some care. We cannot just ignore * tp->copied_seq since we would read the last urgent byte again * as data, nor can we alter copied_seq until this data arrives * or we break the semantics of SIOCATMARK (and thus sockatmark()) * * NOTE. Double Dutch. Rendering to plain English: author of comment * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); * and expect that both A and B disappear from stream. This is _wrong_. * Though this happens in BSD with high probability, this is occasional. * Any application relying on this is buggy. Note also, that fix "works" * only in this artificial test. Insert some normal data between A and B and we will * decline of BSD again. Verdict: it is better to remove to trap * buggy users. */ if (tp->urg_seq == tp->copied_seq && tp->urg_data && !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); tp->copied_seq++; if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); } } tp->urg_data = TCP_URG_NOTYET; tp->urg_seq = ptr; /* Disable header prediction. */ tp->pred_flags = 0; } /* This is the 'fast' part of urgent handling. */ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) { struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ if (th->urg) tcp_check_urg(sk, th); /* Do we wait for any urgent data? - normally not... */ if (tp->urg_data == TCP_URG_NOTYET) { u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - th->syn; /* Is the urgent pointer pointing into this packet? */ if (ptr < skb->len) { u8 tmp; if (skb_copy_bits(skb, ptr, &tmp, 1)) BUG(); tp->urg_data = TCP_URG_VALID | tmp; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); } } } static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int err; local_bh_enable(); if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); else err = skb_copy_and_csum_datagram_iovec(skb, hlen, tp->ucopy.iov); if (!err) { tp->ucopy.len -= chunk; tp->copied_seq += chunk; tcp_rcv_space_adjust(sk); } local_bh_disable(); return err; } static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { __sum16 result; if (sock_owned_by_user(sk)) { local_bh_enable(); result = __tcp_checksum_complete(skb); local_bh_disable(); } else { result = __tcp_checksum_complete(skb); } return result; } static inline int tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && __tcp_checksum_complete_user(sk, skb); } #ifdef CONFIG_NET_DMA static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int dma_cookie; int copied_early = 0; if (tp->ucopy.wakeup) return 0; if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) tp->ucopy.dma_chan = net_dma_find_channel(); if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, skb, hlen, tp->ucopy.iov, chunk, tp->ucopy.pinned_list); if (dma_cookie < 0) goto out; tp->ucopy.dma_cookie = dma_cookie; copied_early = 1; tp->ucopy.len -= chunk; tp->copied_seq += chunk; tcp_rcv_space_adjust(sk); if ((tp->ucopy.len == 0) || (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { tp->ucopy.wakeup = 1; sk->sk_data_ready(sk, 0); } } else if (chunk > 0) { tp->ucopy.wakeup = 1; sk->sk_data_ready(sk, 0); } out: return copied_early; } #endif /* CONFIG_NET_DMA */ /* Does PAWS and seqno based validation of an incoming segment, flags will * play significant role here. */ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, int syn_inerr) { const u8 *hash_location; struct tcp_sock *tp = tcp_sk(sk); /* RFC1323: H1. Apply PAWS check first. */ if (tcp_fast_parse_options(skb, th, tp, &hash_location) && tp->rx_opt.saw_tstamp && tcp_paws_discard(sk, skb)) { if (!th->rst) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); tcp_send_dupack(sk, skb); goto discard; } /* Reset is accepted even if it did not pass PAWS. */ } /* Step 1: check sequence number */ if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { /* RFC793, page 37: "In all states except SYN-SENT, all reset * (RST) segments are validated by checking their SEQ-fields." * And page 69: "If an incoming segment is not acceptable, * an acknowledgment should be sent in reply (unless the RST * bit is set, if so drop the segment and return)". */ if (!th->rst) tcp_send_dupack(sk, skb); goto discard; } /* Step 2: check RST bit */ if (th->rst) { tcp_reset(sk); goto discard; } /* ts_recent update must be made after we are sure that the packet * is in window. */ tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN in window. */ if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { if (syn_inerr) TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); tcp_reset(sk); return -1; } return 1; discard: __kfree_skb(skb); return 0; } /* * TCP receive function for the ESTABLISHED state. * * It is split into a fast path and a slow path. The fast path is * disabled when: * - A zero window was announced from us - zero window probing * is only handled properly in the slow path. * - Out of order segments arrived. * - Urgent data is expected. * - There is no buffer space left * - Unexpected TCP flags/window values/header lengths are received * (detected by checking the TCP header against pred_flags) * - Data is sent in both directions. Fast path only supports pure senders * or pure receivers (this means either the sequence number or the ack * value must stay constant) * - Unexpected TCP option. * * When these conditions are not satisfied it drops into a standard * receive procedure patterned after RFC793 to handle all cases. * The first three cases are guaranteed by proper pred_flags setting, * the rest is checked inline. Fast processing is turned on in * tcp_data_queue when everything is OK. */ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); int res; /* * Header prediction. * The code loosely follows the one in the famous * "30 instruction TCP receive" Van Jacobson mail. * * Van's trick is to deposit buffers into socket queue * on a device interrupt, to call tcp_recv function * on the receive process context and checksum and copy * the buffer to user space. smart... * * Our current scheme is not silly either but we take the * extra cost of the net_bh soft interrupt processing... * We do checksum and copy also but from device to kernel. */ tp->rx_opt.saw_tstamp = 0; /* pred_flags is 0xS?10 << 16 + snd_wnd * if header_prediction is to be made * 'S' will always be tp->tcp_header_len >> 2 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to * turn it off (when there are holes in the receive * space for instance) * PSH flag is ignored. */ if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && TCP_SKB_CB(skb)->seq == tp->rcv_nxt && !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { int tcp_header_len = tp->tcp_header_len; /* Timestamp header prediction: tcp_header_len * is automatically equal to th->doff*4 due to pred_flags * match. */ /* Check timestamp */ if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { /* No? Slow path! */ if (!tcp_parse_aligned_timestamp(tp, th)) goto slow_path; /* If PAWS failed, check it more carefully in slow path */ if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) goto slow_path; /* DO NOT update ts_recent here, if checksum fails * and timestamp was corrupted part, it will result * in a hung connection since we will drop all * future packets due to the PAWS test. */ } if (len <= tcp_header_len) { /* Bulk data transfer: sender */ if (len == tcp_header_len) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); /* We know that such packets are checksummed * on entry. */ tcp_ack(sk, skb, 0); __kfree_skb(skb); tcp_data_snd_check(sk); return 0; } else { /* Header too small */ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); goto discard; } } else { int eaten = 0; int copied_early = 0; if (tp->copied_seq == tp->rcv_nxt && len - tcp_header_len <= tp->ucopy.len) { #ifdef CONFIG_NET_DMA if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { copied_early = 1; eaten = 1; } #endif if (tp->ucopy.task == current && sock_owned_by_user(sk) && !copied_early) { __set_current_state(TASK_RUNNING); if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) eaten = 1; } if (eaten) { /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); __skb_pull(skb, tcp_header_len); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); } if (copied_early) tcp_cleanup_rbuf(sk, skb->len); } if (!eaten) { if (tcp_checksum_complete_user(sk, skb)) goto csum_error; /* Predicted packet is in window by definition. * seq == rcv_nxt and rcv_wup <= rcv_nxt. * Hence, check seq<=rcv_wup reduces to: */ if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); if ((int)skb->truesize > sk->sk_forward_alloc) goto step5; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ __skb_pull(skb, tcp_header_len); __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; } tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { /* Well, only one small jumplet in fast path... */ tcp_ack(sk, skb, FLAG_DATA); tcp_data_snd_check(sk); if (!inet_csk_ack_scheduled(sk)) goto no_ack; } if (!copied_early || tp->rcv_nxt != tp->rcv_wup) __tcp_ack_snd_check(sk, 0); no_ack: #ifdef CONFIG_NET_DMA if (copied_early) __skb_queue_tail(&sk->sk_async_wait_queue, skb); else #endif if (eaten) __kfree_skb(skb); else sk->sk_data_ready(sk, 0); return 0; } } slow_path: if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) goto csum_error; /* * Standard slow path. */ res = tcp_validate_incoming(sk, skb, th, 1); if (res <= 0) return -res; step5: if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) goto discard; tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ tcp_data_queue(sk, skb); tcp_data_snd_check(sk); tcp_ack_snd_check(sk); return 0; csum_error: TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); discard: __kfree_skb(skb); return 0; } EXPORT_SYMBOL(tcp_rcv_established); static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { const u8 *hash_location; struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_cookie_values *cvp = tp->cookie_values; int saved_clamp = tp->rx_opt.mss_clamp; tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); if (th->ack) { /* rfc793: * "If the state is SYN-SENT then * first check the ACK bit * If the ACK bit is set * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send * a reset (unless the RST bit is set, if so drop * the segment and return)" * * We do not send data with SYN, so that RFC-correct * test reduces to: */ if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) goto reset_and_undo; if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); goto reset_and_undo; } /* Now ACK is acceptable. * * "If the RST bit is set * If the ACK was acceptable then signal the user "error: * connection reset", drop the segment, enter CLOSED state, * delete TCB, and return." */ if (th->rst) { tcp_reset(sk); goto discard; } /* rfc793: * "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." * * See note below! * --ANK(990513) */ if (!th->syn) goto discard_and_undo; /* rfc793: * "If the SYN bit is on ... * are acceptable then ... * (our SYN has been ACKed), change the connection * state to ESTABLISHED..." */ TCP_ECN_rcv_synack(tp, th); tp->snd_wl1 = TCP_SKB_CB(skb)->seq; tcp_ack(sk, skb, FLAG_SLOWPATH); /* Ok.. it's good. Set up sequence numbers and * move to established. */ tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); if (!tp->rx_opt.wscale_ok) { tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; tp->window_clamp = min(tp->window_clamp, 65535U); } if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; tcp_store_ts_recent(tp); } else { tp->tcp_header_len = sizeof(struct tcphdr); } if (tcp_is_sack(tp) && sysctl_tcp_fack) tcp_enable_fack(tp); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); /* Remember, tcp_poll() does not lock socket! * Change state from SYN-SENT only after copied_seq * is initialized. */ tp->copied_seq = tp->rcv_nxt; if (cvp != NULL && cvp->cookie_pair_size > 0 && tp->rx_opt.cookie_plus > 0) { int cookie_size = tp->rx_opt.cookie_plus - TCPOLEN_COOKIE_BASE; int cookie_pair_size = cookie_size + cvp->cookie_desired; /* A cookie extension option was sent and returned. * Note that each incoming SYNACK replaces the * Responder cookie. The initial exchange is most * fragile, as protection against spoofing relies * entirely upon the sequence and timestamp (above). * This replacement strategy allows the correct pair to * pass through, while any others will be filtered via * Responder verification later. */ if (sizeof(cvp->cookie_pair) >= cookie_pair_size) { memcpy(&cvp->cookie_pair[cvp->cookie_desired], hash_location, cookie_size); cvp->cookie_pair_size = cookie_pair_size; } } smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); security_inet_conn_established(sk, skb); /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_metrics(sk); tcp_init_congestion_control(sk); /* Prevent spurious tcp_cwnd_restart() on first data * packet. */ tp->lsndtime = tcp_time_stamp; tcp_init_buffer_space(sk); if (sock_flag(sk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); if (!tp->rx_opt.snd_wscale) __tcp_fast_path_on(tp, tp->snd_wnd); else tp->pred_flags = 0; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); } if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || icsk->icsk_ack.pingpong) { /* Save one ACK. Data will be ready after * several ticks, if write_pending is set. * * It may be deleted, but with this feature tcpdumps * look so _wonderfully_ clever, that I was not able * to stand against the temptation 8) --ANK */ inet_csk_schedule_ack(sk); icsk->icsk_ack.lrcvtime = tcp_time_stamp; icsk->icsk_ack.ato = TCP_ATO_MIN; tcp_incr_quickack(sk); tcp_enter_quickack_mode(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX); discard: __kfree_skb(skb); return 0; } else { tcp_send_ack(sk); } return -1; } /* No ACK in the segment */ if (th->rst) { /* rfc793: * "If the RST bit is set * * Otherwise (no ACK) drop the segment and return." */ goto discard_and_undo; } /* PAWS check. */ if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && tcp_paws_reject(&tp->rx_opt, 0)) goto discard_and_undo; if (th->syn) { /* We see SYN without ACK. It is attempt of * simultaneous connect with crossed SYNs. * Particularly, it can be connect to self. */ tcp_set_state(sk, TCP_SYN_RECV); if (tp->rx_opt.saw_tstamp) { tp->rx_opt.tstamp_ok = 1; tcp_store_ts_recent(tp); tp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { tp->tcp_header_len = sizeof(struct tcphdr); } tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. */ tp->snd_wnd = ntohs(th->window); tp->snd_wl1 = TCP_SKB_CB(skb)->seq; tp->max_window = tp->snd_wnd; TCP_ECN_rcv_syn(tp, th); tcp_mtup_init(sk); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); tcp_send_synack(sk); #if 0 /* Note, we could accept data and URG from this segment. * There are no obstacles to make this. * * However, if we ignore data in ACKless segments sometimes, * we have no reasons to accept it sometimes. * Also, seems the code doing it in step6 of tcp_rcv_state_process * is not flawless. So, discard packet for sanity. * Uncomment this return to process the data. */ return -1; #else goto discard; #endif } /* "fifth, if neither of the SYN or RST bits is set then * drop the segment and return." */ discard_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; goto discard; reset_and_undo: tcp_clear_options(&tp->rx_opt); tp->rx_opt.mss_clamp = saved_clamp; return 1; } /* * This function implements the receiving procedure of RFC 793 for * all states except ESTABLISHED and TIME_WAIT. * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be * address independent. */ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th, unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int queued = 0; int res; tp->rx_opt.saw_tstamp = 0; switch (sk->sk_state) { case TCP_CLOSE: goto discard; case TCP_LISTEN: if (th->ack) return 1; if (th->rst) goto discard; if (th->syn) { if (th->fin) goto discard; if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) return 1; /* Now we have several options: In theory there is * nothing else in the frame. KA9Q has an option to * send data with the syn, BSD accepts data with the * syn up to the [to be] advertised window and * Solaris 2.1 gives you a protocol error. For now * we just ignore it, that fits the spec precisely * and avoids incompatibilities. It would be nice in * future to drop through and process the data. * * Now that TTCP is starting to be used we ought to * queue this data. * But, this leaves one open to an easy denial of * service attack, and SYN cookies can't defend * against this problem. So, we drop the data * in the interest of security over speed unless * it's still in use. */ kfree_skb(skb); return 0; } goto discard; case TCP_SYN_SENT: queued = tcp_rcv_synsent_state_process(sk, skb, th, len); if (queued >= 0) return queued; /* Do step6 onward by hand. */ tcp_urg(sk, skb, th); __kfree_skb(skb); tcp_data_snd_check(sk); return 0; } res = tcp_validate_incoming(sk, skb, th, 0); if (res <= 0) return -res; /* step 5: check the ACK field */ if (th->ack) { int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; switch (sk->sk_state) { case TCP_SYN_RECV: if (acceptable) { tp->copied_seq = tp->rcv_nxt; smp_mb(); tcp_set_state(sk, TCP_ESTABLISHED); sk->sk_state_change(sk); /* Note, that this wakeup is only for marginal * crossed SYN case. Passively open sockets * are not waked up, because sk->sk_sleep == * NULL and sk->sk_socket == NULL. */ if (sk->sk_socket) sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); tp->snd_una = TCP_SKB_CB(skb)->ack_seq; tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; /* Make sure socket is routed, for * correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); tcp_init_metrics(sk); tcp_init_congestion_control(sk); /* Prevent spurious tcp_cwnd_restart() on * first data packet. */ tp->lsndtime = tcp_time_stamp; tcp_mtup_init(sk); tcp_initialize_rcv_mss(sk); tcp_init_buffer_space(sk); tcp_fast_path_on(tp); } else { return 1; } break; case TCP_FIN_WAIT1: if (tp->snd_una == tp->write_seq) { tcp_set_state(sk, TCP_FIN_WAIT2); sk->sk_shutdown |= SEND_SHUTDOWN; dst_confirm(__sk_dst_get(sk)); if (!sock_flag(sk, SOCK_DEAD)) /* Wake up lingering close() */ sk->sk_state_change(sk); else { int tmo; if (tp->linger2 < 0 || (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { tcp_done(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; } tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else if (th->fin || sock_owned_by_user(sk)) { /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing * and not so rare event. We still can lose it now, * if it spins in bh_lock_sock(), but it is really * marginal case. */ inet_csk_reset_keepalive_timer(sk, tmo); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto discard; } } } break; case TCP_CLOSING: if (tp->snd_una == tp->write_seq) { tcp_time_wait(sk, TCP_TIME_WAIT, 0); goto discard; } break; case TCP_LAST_ACK: if (tp->snd_una == tp->write_seq) { tcp_update_metrics(sk); tcp_done(sk); goto discard; } break; } } else goto discard; /* step 6: check the URG bit */ tcp_urg(sk, skb, th); /* step 7: process the segment text */ switch (sk->sk_state) { case TCP_CLOSE_WAIT: case TCP_CLOSING: case TCP_LAST_ACK: if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) break; case TCP_FIN_WAIT1: case TCP_FIN_WAIT2: /* RFC 793 says to queue data in these states, * RFC 1122 says we MUST send a reset. * BSD 4.4 also does reset. */ if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; } } /* Fall through */ case TCP_ESTABLISHED: tcp_data_queue(sk, skb); queued = 1; break; } /* tcp_data could move socket to TIME-WAIT */ if (sk->sk_state != TCP_CLOSE) { tcp_data_snd_check(sk); tcp_ack_snd_check(sk); } if (!queued) { discard: __kfree_skb(skb); } return 0; } EXPORT_SYMBOL(tcp_rcv_state_process);
gpl-2.0
HirdayGupta/teammates
src/test/resources/pages/instructorCoursesUnarchiveSuccessful.html
10787
<div class="container" id="mainContent"> <div id="topOfPage"> </div> <h1> Add New Course </h1> <br> <div class="panel panel-primary"> <div class="panel-body fill-plain"> <form action="/page/instructorCourseAdd" class="form form-horizontal" method="get" name="form_addcourse"> <input id="instructorid" name="instructorid" type="hidden" value="CCAddUiTest.instructor"> <input name="token" type="hidden" value="${sessionToken}"> <input name="user" type="hidden" value="CCAddUiTest.instructor"> <div class="form-group"> <label class="col-sm-3 control-label"> Course ID: </label> <div class="col-sm-3"> <input class="form-control" data-original-title="Enter the identifier of the course, e.g.CS3215-2013Semester1." data-placement="top" data-toggle="tooltip" id="courseid" maxlength="40" name="courseid" placeholder="e.g. CS3215-2013Semester1" tabindex="1" title="" type="text" value=""> </div> </div> <div class="form-group"> <label class="col-sm-3 control-label"> Course Name: </label> <div class="col-sm-9"> <input class="form-control" data-original-title="Enter the name of the course, e.g. Software Engineering." data-placement="top" data-toggle="tooltip" id="coursename" maxlength="64" name="coursename" placeholder="e.g. Software Engineering" tabindex="2" title="" type="text" value=""> </div> </div> <div class="form-group"> <label class="col-sm-3 control-label"> Time Zone: </label> <div class="col-sm-9"> ${timezone.options} <div class="alert alert-info time-zone-info-box"> <span class="glyphicon glyphicon-info-sign"> </span> Time zone is auto-detected based on your device settings. </div> </div> </div> <div class="form-group"> <div class="col-sm-offset-3 col-sm-9"> <input class="btn btn-primary" id="btnAddCourse" tabindex="3" type="submit" value="Add Course"> </div> </div> </form> </div> </div> <form action="/page/instructorCoursesPage" class="ajaxForCoursesForm" id="ajaxForCourses" style="display:none;"> <input name="user" type="hidden" value="CCAddUiTest.instructor"> <input name="isusingAjax" type="hidden" value="on"> </form> <br> <div id="statusMessagesToUser"> <div class="overflow-auto alert alert-success icon-success statusMessage"> The course CCAddUiTest.CS1101 has been unarchived. </div> </div> <script defer="" src="/js/statusMessage.js" type="text/javascript"> </script> <br> <div class="" id="coursesList"> <h2> Active courses </h2> <table class="table table-bordered table-striped" id="tableActiveCourses"> <thead class="fill-primary"> <tr> <th class="button-sort-none toggle-sort button-sort-ascending" id="button_sortcourseid"> Course ID <span class="icon-sort sorted-ascending"> </span> </th> <th class="button-sort-none toggle-sort" id="button_sortcoursename"> Course Name <span class="icon-sort unsorted"> </span> </th> <th class="button-sort-none toggle-sort" data-toggle-sort-comparator="sortDate" data-toggle-sort-extractor="dateStampExtractor" id="button_sortcoursecreateddate"> Creation Date <span class="icon-sort unsorted"> </span> </th> <th> Sections </th> <th> Teams </th> <th> Total Students </th> <th> Total Unregistered </th> <th class="align-center no-print"> Action(s) </th> </tr> </thead> <tbody> <tr> <td id="courseid0"> CCAddUiTest.CS1101 </td> <td id="coursename0"> Programming Methodology </td> <td data-date-stamp="${datetime.now.iso8601utc}" data-original-title="${datetime.now}" data-toggle="tooltip" id="coursecreateddate0"> ${datetime.now.courses} </td> <td id="course-stats-sectionNum-0"> <a class="course-stats-link-0" href="/page/courseStatsPage?courseid=CCAddUiTest.CS1101&user=CCAddUiTest.instructor" oncontextmenu="return false;"> Show </a> </td> <td id="course-stats-teamNum-0"> <a class="course-stats-link-0" href="/page/courseStatsPage?courseid=CCAddUiTest.CS1101&user=CCAddUiTest.instructor" oncontextmenu="return false;"> Show </a> </td> <td id="course-stats-totalStudentNum-0"> <a class="course-stats-link-0" href="/page/courseStatsPage?courseid=CCAddUiTest.CS1101&user=CCAddUiTest.instructor" oncontextmenu="return false;"> Show </a> </td> <td id="course-stats-unregisteredStudentNum-0"> <a class="course-stats-link-0" href="/page/courseStatsPage?courseid=CCAddUiTest.CS1101&user=CCAddUiTest.instructor" oncontextmenu="return false;"> Show </a> </td> <td class="align-center no-print"> <a class="btn btn-default btn-xs t_course_enroll0" data-original-title="Enroll student into the course" data-placement="top" data-toggle="tooltip" disabled="" href="/page/instructorCourseEnrollPage?courseid=CCAddUiTest.CS1101&user=CCAddUiTest.instructor" title=""> Enroll </a> <a class="btn btn-default btn-xs t_course_view0" data-original-title="View, edit and send invitation emails to the students in the course" data-placement="top" data-toggle="tooltip" href="/page/instructorCourseDetailsPage?courseid=CCAddUiTest.CS1101&user=CCAddUiTest.instructor" title=""> View </a> <a class="btn btn-default btn-xs t_course_edit0" data-original-title="Edit Course information and instructor list" data-placement="top" data-toggle="tooltip" href="/page/instructorCourseEditPage?courseid=CCAddUiTest.CS1101&user=CCAddUiTest.instructor" title=""> Edit </a> <a class="btn btn-default btn-xs t_course_archive0" data-original-title="Archive the course so that it will not be shown in the home page any more (you can still access it from the 'Courses' tab)" data-placement="top" data-toggle="tooltip" href="/page/instructorCourseArchive?courseid=CCAddUiTest.CS1101&archive=true&next=%2Fpage%2FinstructorCoursesPage&user=CCAddUiTest.instructor&token=${sessionToken}" title=""> Archive </a> <a class="btn btn-default btn-xs course-delete-link t_course_delete0" data-course-id="CCAddUiTest.CS1101" data-original-title="Delete the course and its corresponding students and sessions" data-placement="top" data-toggle="tooltip" disabled="" href="/page/instructorCourseDelete?courseid=CCAddUiTest.CS1101&next=%2Fpage%2FinstructorCoursesPage&user=CCAddUiTest.instructor&token=${sessionToken}" title=""> Delete </a> </td> </tr> <tr> <td id="courseid1"> CCAddUiTest.CS2104 </td> <td id="coursename1"> Programming Language Concept </td> <td data-date-stamp="${datetime.now.iso8601utc}" data-original-title="${datetime.now}" data-toggle="tooltip" id="coursecreateddate1"> ${datetime.now.courses} </td> <td id="course-stats-sectionNum-1"> <a class="course-stats-link-1" href="/page/courseStatsPage?courseid=CCAddUiTest.CS2104&user=CCAddUiTest.instructor" oncontextmenu="return false;"> Show </a> </td> <td id="course-stats-teamNum-1"> <a class="course-stats-link-1" href="/page/courseStatsPage?courseid=CCAddUiTest.CS2104&user=CCAddUiTest.instructor" oncontextmenu="return false;"> Show </a> </td> <td id="course-stats-totalStudentNum-1"> <a class="course-stats-link-1" href="/page/courseStatsPage?courseid=CCAddUiTest.CS2104&user=CCAddUiTest.instructor" oncontextmenu="return false;"> Show </a> </td> <td id="course-stats-unregisteredStudentNum-1"> <a class="course-stats-link-1" href="/page/courseStatsPage?courseid=CCAddUiTest.CS2104&user=CCAddUiTest.instructor" oncontextmenu="return false;"> Show </a> </td> <td class="align-center no-print"> <a class="btn btn-default btn-xs t_course_enroll1" data-original-title="Enroll student into the course" data-placement="top" data-toggle="tooltip" href="/page/instructorCourseEnrollPage?courseid=CCAddUiTest.CS2104&user=CCAddUiTest.instructor" title=""> Enroll </a> <a class="btn btn-default btn-xs t_course_view1" data-original-title="View, edit and send invitation emails to the students in the course" data-placement="top" data-toggle="tooltip" href="/page/instructorCourseDetailsPage?courseid=CCAddUiTest.CS2104&user=CCAddUiTest.instructor" title=""> View </a> <a class="btn btn-default btn-xs t_course_edit1" data-original-title="Edit Course information and instructor list" data-placement="top" data-toggle="tooltip" href="/page/instructorCourseEditPage?courseid=CCAddUiTest.CS2104&user=CCAddUiTest.instructor" title=""> Edit </a> <a class="btn btn-default btn-xs t_course_archive1" data-original-title="Archive the course so that it will not be shown in the home page any more (you can still access it from the 'Courses' tab)" data-placement="top" data-toggle="tooltip" href="/page/instructorCourseArchive?courseid=CCAddUiTest.CS2104&archive=true&next=%2Fpage%2FinstructorCoursesPage&user=CCAddUiTest.instructor&token=${sessionToken}" title=""> Archive </a> <a class="btn btn-default btn-xs course-delete-link t_course_delete1" data-course-id="CCAddUiTest.CS2104" data-original-title="Delete the course and its corresponding students and sessions" data-placement="top" data-toggle="tooltip" href="/page/instructorCourseDelete?courseid=CCAddUiTest.CS2104&next=%2Fpage%2FinstructorCoursesPage&user=CCAddUiTest.instructor&token=${sessionToken}" title=""> Delete </a> </td> </tr> </tbody> </table> <br> <br> <br> <br> </div> </div>
gpl-2.0
LAGonauta/dolphin
Source/Core/VideoCommon/Fifo.cpp
18046
// Copyright 2008 Dolphin Emulator Project // Licensed under GPLv2+ // Refer to the license.txt file included. #include "VideoCommon/Fifo.h" #include <atomic> #include <cstring> #include "Common/Assert.h" #include "Common/Atomic.h" #include "Common/BlockingLoop.h" #include "Common/ChunkFile.h" #include "Common/Event.h" #include "Common/FPURoundMode.h" #include "Common/MemoryUtil.h" #include "Common/MsgHandler.h" #include "Core/ConfigManager.h" #include "Core/CoreTiming.h" #include "Core/HW/Memmap.h" #include "Core/Host.h" #include "VideoCommon/AsyncRequests.h" #include "VideoCommon/CPMemory.h" #include "VideoCommon/CommandProcessor.h" #include "VideoCommon/DataReader.h" #include "VideoCommon/OpcodeDecoding.h" #include "VideoCommon/VertexLoaderManager.h" #include "VideoCommon/VertexManagerBase.h" #include "VideoCommon/VideoBackendBase.h" namespace Fifo { static constexpr u32 FIFO_SIZE = 2 * 1024 * 1024; static constexpr int GPU_TIME_SLOT_SIZE = 1000; static Common::BlockingLoop s_gpu_mainloop; static Common::Flag s_emu_running_state; // Most of this array is unlikely to be faulted in... static u8 s_fifo_aux_data[FIFO_SIZE]; static u8* s_fifo_aux_write_ptr; static u8* s_fifo_aux_read_ptr; // This could be in SConfig, but it depends on multiple settings // and can change at runtime. static bool s_use_deterministic_gpu_thread; static CoreTiming::EventType* s_event_sync_gpu; // STATE_TO_SAVE static u8* s_video_buffer; static u8* s_video_buffer_read_ptr; static std::atomic<u8*> s_video_buffer_write_ptr; static std::atomic<u8*> s_video_buffer_seen_ptr; static u8* s_video_buffer_pp_read_ptr; // The read_ptr is always owned by the GPU thread. In normal mode, so is the // write_ptr, despite it being atomic. In deterministic GPU thread mode, // things get a bit more complicated: // - The seen_ptr is written by the GPU thread, and points to what it's already // processed as much of as possible - in the case of a partial command which // caused it to stop, not the same as the read ptr. It's written by the GPU, // under the lock, and updating the cond. // - The write_ptr is written by the CPU thread after it copies data from the // FIFO. Maybe someday it will be under the lock. For now, because RunGpuLoop // polls, it's just atomic. // - The pp_read_ptr is the CPU preprocessing version of the read_ptr. static std::atomic<int> s_sync_ticks; static bool s_syncing_suspended; static Common::Event s_sync_wakeup_event; void DoState(PointerWrap& p) { p.DoArray(s_video_buffer, FIFO_SIZE); u8* write_ptr = s_video_buffer_write_ptr; p.DoPointer(write_ptr, s_video_buffer); s_video_buffer_write_ptr = write_ptr; p.DoPointer(s_video_buffer_read_ptr, s_video_buffer); if (p.mode == PointerWrap::MODE_READ && s_use_deterministic_gpu_thread) { // We're good and paused, right? s_video_buffer_seen_ptr = s_video_buffer_pp_read_ptr = s_video_buffer_read_ptr; } p.Do(s_sync_ticks); p.Do(s_syncing_suspended); } void PauseAndLock(bool doLock, bool unpauseOnUnlock) { if (doLock) { SyncGPU(SyncGPUReason::Other); EmulatorState(false); const SConfig& param = SConfig::GetInstance(); if (!param.bCPUThread || s_use_deterministic_gpu_thread) return; s_gpu_mainloop.WaitYield(std::chrono::milliseconds(100), Host_YieldToUI); } else { if (unpauseOnUnlock) EmulatorState(true); } } void Init() { // Padded so that SIMD overreads in the vertex loader are safe s_video_buffer = static_cast<u8*>(Common::AllocateMemoryPages(FIFO_SIZE + 4)); ResetVideoBuffer(); if (SConfig::GetInstance().bCPUThread) s_gpu_mainloop.Prepare(); s_sync_ticks.store(0); } void Shutdown() { if (s_gpu_mainloop.IsRunning()) PanicAlert("Fifo shutting down while active"); Common::FreeMemoryPages(s_video_buffer, FIFO_SIZE + 4); s_video_buffer = nullptr; s_video_buffer_write_ptr = nullptr; s_video_buffer_pp_read_ptr = nullptr; s_video_buffer_read_ptr = nullptr; s_video_buffer_seen_ptr = nullptr; s_fifo_aux_write_ptr = nullptr; s_fifo_aux_read_ptr = nullptr; } // May be executed from any thread, even the graphics thread. // Created to allow for self shutdown. void ExitGpuLoop() { // This should break the wait loop in CPU thread CommandProcessor::fifo.bFF_GPReadEnable = false; FlushGpu(); // Terminate GPU thread loop s_emu_running_state.Set(); s_gpu_mainloop.Stop(s_gpu_mainloop.kNonBlock); } void EmulatorState(bool running) { s_emu_running_state.Set(running); if (running) s_gpu_mainloop.Wakeup(); else s_gpu_mainloop.AllowSleep(); } void SyncGPU(SyncGPUReason reason, bool may_move_read_ptr) { if (s_use_deterministic_gpu_thread) { s_gpu_mainloop.Wait(); if (!s_gpu_mainloop.IsRunning()) return; // Opportunistically reset FIFOs so we don't wrap around. if (may_move_read_ptr && s_fifo_aux_write_ptr != s_fifo_aux_read_ptr) PanicAlert("aux fifo not synced (%p, %p)", s_fifo_aux_write_ptr, s_fifo_aux_read_ptr); memmove(s_fifo_aux_data, s_fifo_aux_read_ptr, s_fifo_aux_write_ptr - s_fifo_aux_read_ptr); s_fifo_aux_write_ptr -= (s_fifo_aux_read_ptr - s_fifo_aux_data); s_fifo_aux_read_ptr = s_fifo_aux_data; if (may_move_read_ptr) { u8* write_ptr = s_video_buffer_write_ptr; // what's left over in the buffer size_t size = write_ptr - s_video_buffer_pp_read_ptr; memmove(s_video_buffer, s_video_buffer_pp_read_ptr, size); // This change always decreases the pointers. We write seen_ptr // after write_ptr here, and read it before in RunGpuLoop, so // 'write_ptr > seen_ptr' there cannot become spuriously true. s_video_buffer_write_ptr = write_ptr = s_video_buffer + size; s_video_buffer_pp_read_ptr = s_video_buffer; s_video_buffer_read_ptr = s_video_buffer; s_video_buffer_seen_ptr = write_ptr; } } } void PushFifoAuxBuffer(const void* ptr, size_t size) { if (size > (size_t)(s_fifo_aux_data + FIFO_SIZE - s_fifo_aux_write_ptr)) { SyncGPU(SyncGPUReason::AuxSpace, /* may_move_read_ptr */ false); if (!s_gpu_mainloop.IsRunning()) { // GPU is shutting down return; } if (size > (size_t)(s_fifo_aux_data + FIFO_SIZE - s_fifo_aux_write_ptr)) { // That will sync us up to the last 32 bytes, so this short region // of FIFO would have to point to a 2MB display list or something. PanicAlert("absurdly large aux buffer"); return; } } memcpy(s_fifo_aux_write_ptr, ptr, size); s_fifo_aux_write_ptr += size; } void* PopFifoAuxBuffer(size_t size) { void* ret = s_fifo_aux_read_ptr; s_fifo_aux_read_ptr += size; return ret; } // Description: RunGpuLoop() sends data through this function. static void ReadDataFromFifo(u32 readPtr) { size_t len = 32; if (len > (size_t)(s_video_buffer + FIFO_SIZE - s_video_buffer_write_ptr)) { size_t existing_len = s_video_buffer_write_ptr - s_video_buffer_read_ptr; if (len > (size_t)(FIFO_SIZE - existing_len)) { PanicAlert("FIFO out of bounds (existing %zu + new %zu > %u)", existing_len, len, FIFO_SIZE); return; } memmove(s_video_buffer, s_video_buffer_read_ptr, existing_len); s_video_buffer_write_ptr = s_video_buffer + existing_len; s_video_buffer_read_ptr = s_video_buffer; } // Copy new video instructions to s_video_buffer for future use in rendering the new picture Memory::CopyFromEmu(s_video_buffer_write_ptr, readPtr, len); s_video_buffer_write_ptr += len; } // The deterministic_gpu_thread version. static void ReadDataFromFifoOnCPU(u32 readPtr) { size_t len = 32; u8* write_ptr = s_video_buffer_write_ptr; if (len > (size_t)(s_video_buffer + FIFO_SIZE - write_ptr)) { // We can't wrap around while the GPU is working on the data. // This should be very rare due to the reset in SyncGPU. SyncGPU(SyncGPUReason::Wraparound); if (!s_gpu_mainloop.IsRunning()) { // GPU is shutting down, so the next asserts may fail return; } if (s_video_buffer_pp_read_ptr != s_video_buffer_read_ptr) { PanicAlert("desynced read pointers"); return; } write_ptr = s_video_buffer_write_ptr; size_t existing_len = write_ptr - s_video_buffer_pp_read_ptr; if (len > (size_t)(FIFO_SIZE - existing_len)) { PanicAlert("FIFO out of bounds (existing %zu + new %zu > %u)", existing_len, len, FIFO_SIZE); return; } } Memory::CopyFromEmu(s_video_buffer_write_ptr, readPtr, len); s_video_buffer_pp_read_ptr = OpcodeDecoder::Run<true>( DataReader(s_video_buffer_pp_read_ptr, write_ptr + len), nullptr, false); // This would have to be locked if the GPU thread didn't spin. s_video_buffer_write_ptr = write_ptr + len; } void ResetVideoBuffer() { s_video_buffer_read_ptr = s_video_buffer; s_video_buffer_write_ptr = s_video_buffer; s_video_buffer_seen_ptr = s_video_buffer; s_video_buffer_pp_read_ptr = s_video_buffer; s_fifo_aux_write_ptr = s_fifo_aux_data; s_fifo_aux_read_ptr = s_fifo_aux_data; } // Description: Main FIFO update loop // Purpose: Keep the Core HW updated about the CPU-GPU distance void RunGpuLoop() { AsyncRequests::GetInstance()->SetEnable(true); AsyncRequests::GetInstance()->SetPassthrough(false); s_gpu_mainloop.Run( [] { const SConfig& param = SConfig::GetInstance(); // Run events from the CPU thread. AsyncRequests::GetInstance()->PullEvents(); // Do nothing while paused if (!s_emu_running_state.IsSet()) return; if (s_use_deterministic_gpu_thread) { // All the fifo/CP stuff is on the CPU. We just need to run the opcode decoder. u8* seen_ptr = s_video_buffer_seen_ptr; u8* write_ptr = s_video_buffer_write_ptr; // See comment in SyncGPU if (write_ptr > seen_ptr) { s_video_buffer_read_ptr = OpcodeDecoder::Run(DataReader(s_video_buffer_read_ptr, write_ptr), nullptr, false); s_video_buffer_seen_ptr = write_ptr; } } else { CommandProcessor::SCPFifoStruct& fifo = CommandProcessor::fifo; CommandProcessor::SetCPStatusFromGPU(); // check if we are able to run this buffer while (!CommandProcessor::IsInterruptWaiting() && fifo.bFF_GPReadEnable && fifo.CPReadWriteDistance && !AtBreakpoint()) { if (param.bSyncGPU && s_sync_ticks.load() < param.iSyncGpuMinDistance) break; u32 cyclesExecuted = 0; u32 readPtr = fifo.CPReadPointer; ReadDataFromFifo(readPtr); if (readPtr == fifo.CPEnd) readPtr = fifo.CPBase; else readPtr += 32; ASSERT_MSG(COMMANDPROCESSOR, (s32)fifo.CPReadWriteDistance - 32 >= 0, "Negative fifo.CPReadWriteDistance = %i in FIFO Loop !\nThat can produce " "instability in the game. Please report it.", fifo.CPReadWriteDistance - 32); u8* write_ptr = s_video_buffer_write_ptr; s_video_buffer_read_ptr = OpcodeDecoder::Run( DataReader(s_video_buffer_read_ptr, write_ptr), &cyclesExecuted, false); Common::AtomicStore(fifo.CPReadPointer, readPtr); Common::AtomicAdd(fifo.CPReadWriteDistance, static_cast<u32>(-32)); if ((write_ptr - s_video_buffer_read_ptr) == 0) Common::AtomicStore(fifo.SafeCPReadPointer, fifo.CPReadPointer); CommandProcessor::SetCPStatusFromGPU(); if (param.bSyncGPU) { cyclesExecuted = (int)(cyclesExecuted / param.fSyncGpuOverclock); int old = s_sync_ticks.fetch_sub(cyclesExecuted); if (old >= param.iSyncGpuMaxDistance && old - (int)cyclesExecuted < param.iSyncGpuMaxDistance) s_sync_wakeup_event.Set(); } // This call is pretty important in DualCore mode and must be called in the FIFO Loop. // If we don't, s_swapRequested or s_efbAccessRequested won't be set to false // leading the CPU thread to wait in Video_BeginField or Video_AccessEFB thus slowing // things down. AsyncRequests::GetInstance()->PullEvents(); } // fast skip remaining GPU time if fifo is empty if (s_sync_ticks.load() > 0) { int old = s_sync_ticks.exchange(0); if (old >= param.iSyncGpuMaxDistance) s_sync_wakeup_event.Set(); } // The fifo is empty and it's unlikely we will get any more work in the near future. // Make sure VertexManager finishes drawing any primitives it has stored in it's buffer. g_vertex_manager->Flush(); } }, 100); AsyncRequests::GetInstance()->SetEnable(false); AsyncRequests::GetInstance()->SetPassthrough(true); } void FlushGpu() { const SConfig& param = SConfig::GetInstance(); if (!param.bCPUThread || s_use_deterministic_gpu_thread) return; s_gpu_mainloop.Wait(); } void GpuMaySleep() { s_gpu_mainloop.AllowSleep(); } bool AtBreakpoint() { CommandProcessor::SCPFifoStruct& fifo = CommandProcessor::fifo; return fifo.bFF_BPEnable && (fifo.CPReadPointer == fifo.CPBreakpoint); } void RunGpu() { const SConfig& param = SConfig::GetInstance(); // wake up GPU thread if (param.bCPUThread && !s_use_deterministic_gpu_thread) { s_gpu_mainloop.Wakeup(); } // if the sync GPU callback is suspended, wake it up. if (!SConfig::GetInstance().bCPUThread || s_use_deterministic_gpu_thread || SConfig::GetInstance().bSyncGPU) { if (s_syncing_suspended) { s_syncing_suspended = false; CoreTiming::ScheduleEvent(GPU_TIME_SLOT_SIZE, s_event_sync_gpu, GPU_TIME_SLOT_SIZE); } } } static int RunGpuOnCpu(int ticks) { CommandProcessor::SCPFifoStruct& fifo = CommandProcessor::fifo; bool reset_simd_state = false; int available_ticks = int(ticks * SConfig::GetInstance().fSyncGpuOverclock) + s_sync_ticks.load(); while (fifo.bFF_GPReadEnable && fifo.CPReadWriteDistance && !AtBreakpoint() && available_ticks >= 0) { if (s_use_deterministic_gpu_thread) { ReadDataFromFifoOnCPU(fifo.CPReadPointer); s_gpu_mainloop.Wakeup(); } else { if (!reset_simd_state) { FPURoundMode::SaveSIMDState(); FPURoundMode::LoadDefaultSIMDState(); reset_simd_state = true; } ReadDataFromFifo(fifo.CPReadPointer); u32 cycles = 0; s_video_buffer_read_ptr = OpcodeDecoder::Run( DataReader(s_video_buffer_read_ptr, s_video_buffer_write_ptr), &cycles, false); available_ticks -= cycles; } if (fifo.CPReadPointer == fifo.CPEnd) fifo.CPReadPointer = fifo.CPBase; else fifo.CPReadPointer += 32; fifo.CPReadWriteDistance -= 32; } CommandProcessor::SetCPStatusFromGPU(); if (reset_simd_state) { FPURoundMode::LoadSIMDState(); } // Discard all available ticks as there is nothing to do any more. s_sync_ticks.store(std::min(available_ticks, 0)); // If the GPU is idle, drop the handler. if (available_ticks >= 0) return -1; // Always wait at least for GPU_TIME_SLOT_SIZE cycles. return -available_ticks + GPU_TIME_SLOT_SIZE; } void UpdateWantDeterminism(bool want) { // We are paused (or not running at all yet), so // it should be safe to change this. const SConfig& param = SConfig::GetInstance(); bool gpu_thread = false; switch (param.m_GPUDeterminismMode) { case GPUDeterminismMode::Auto: gpu_thread = want; break; case GPUDeterminismMode::Disabled: gpu_thread = false; break; case GPUDeterminismMode::FakeCompletion: gpu_thread = true; break; } gpu_thread = gpu_thread && param.bCPUThread; if (s_use_deterministic_gpu_thread != gpu_thread) { s_use_deterministic_gpu_thread = gpu_thread; if (gpu_thread) { // These haven't been updated in non-deterministic mode. s_video_buffer_seen_ptr = s_video_buffer_pp_read_ptr = s_video_buffer_read_ptr; CopyPreprocessCPStateFromMain(); VertexLoaderManager::MarkAllDirty(); } } } bool UseDeterministicGPUThread() { return s_use_deterministic_gpu_thread; } /* This function checks the emulated CPU - GPU distance and may wake up the GPU, * or block the CPU if required. It should be called by the CPU thread regularly. * @ticks The gone emulated CPU time. * @return A good time to call WaitForGpuThread() next. */ static int WaitForGpuThread(int ticks) { const SConfig& param = SConfig::GetInstance(); int old = s_sync_ticks.fetch_add(ticks); int now = old + ticks; // GPU is idle, so stop polling. if (old >= 0 && s_gpu_mainloop.IsDone()) return -1; // Wakeup GPU if (old < param.iSyncGpuMinDistance && now >= param.iSyncGpuMinDistance) RunGpu(); // If the GPU is still sleeping, wait for a longer time if (now < param.iSyncGpuMinDistance) return GPU_TIME_SLOT_SIZE + param.iSyncGpuMinDistance - now; // Wait for GPU if (now >= param.iSyncGpuMaxDistance) s_sync_wakeup_event.Wait(); return GPU_TIME_SLOT_SIZE; } static void SyncGPUCallback(u64 ticks, s64 cyclesLate) { ticks += cyclesLate; int next = -1; if (!SConfig::GetInstance().bCPUThread || s_use_deterministic_gpu_thread) { next = RunGpuOnCpu((int)ticks); } else if (SConfig::GetInstance().bSyncGPU) { next = WaitForGpuThread((int)ticks); } s_syncing_suspended = next < 0; if (!s_syncing_suspended) CoreTiming::ScheduleEvent(next, s_event_sync_gpu, next); } // Initialize GPU - CPU thread syncing, this gives us a deterministic way to start the GPU thread. void Prepare() { s_event_sync_gpu = CoreTiming::RegisterEvent("SyncGPUCallback", SyncGPUCallback); s_syncing_suspended = true; } } // namespace Fifo
gpl-2.0
amyevans/davmschool
wp-content/mu-plugins/gd-system-plugin/class-gd-system-plugin-config.php
1813
<?php /** * Copyright 2013 Go Daddy Operating Company, LLC. All Rights Reserved. */ // Make sure it's wordpress if ( !defined( 'ABSPATH' ) ) die( 'Forbidden' ); /** * Class GD_System_Plugin_Config * Handle reading system and reseller configurations * @version 1.0 * @author Kurt Payne <[email protected]> */ class GD_System_Plugin_Config { /** * Config items * @var array */ var $config = array(); /** * Is this account missing a gd-config.php file? * @var bool */ var $missing_gd_config = false; // @codeCoverageIgnoreStart /** * Constructor */ public function __construct() { if ( !defined( 'GD_RESELLER') && !defined( 'GD_VARNISH_SERVERS' ) && !function_exists( 'is_mobile_user_agent' ) ) { if ( file_exists( ABSPATH . 'gd-config.php' ) && is_readable( ABSPATH . 'gd-config.php' ) ) { require_once( ABSPATH . 'gd-config.php' ); } else { $this->missing_gd_config = true; } } } // @codeCoverageIgnoreEnd /** * Get config * * @return array */ public function get_config() { if ( ! empty( $this->config ) ) { return $this->config; } $defaults = $this->_get_config( '/web/conf/gd-wordpress.conf' ); $resellers = $this->_get_config( '/web/conf/gd-resellers.conf' ); $reseller = ( defined( 'GD_RESELLER' ) && isset( $resellers[ GD_RESELLER ] ) ) ? $resellers[ GD_RESELLER ] : array(); $this->config = array_merge( (array) $defaults, (array) $reseller ); return $this->config; } /** * Read a config file * @param string $path * @return array */ protected function _get_config( $path ) { $conf = array(); if ( file_exists( $path ) && is_readable( $path ) && is_file( $path ) ) { $conf = @parse_ini_file( $path, true ); if ( false === $conf ) { $conf = array(); } } return $conf; } }
gpl-2.0
keegan2149/magento
sites/default/app/code/core/Mage/Core/Model/Website.php
13740
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to [email protected] so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magentocommerce.com for more information. * * @category Mage * @package Mage_Core * @copyright Copyright (c) 2012 Magento Inc. (http://www.magentocommerce.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ /** * Core Website model * * @method Mage_Core_Model_Resource_Website _getResource() * @method Mage_Core_Model_Resource_Website getResource() * @method Mage_Core_Model_Website setCode(string $value) * @method string getName() * @method Mage_Core_Model_Website setName(string $value) * @method int getSortOrder() * @method Mage_Core_Model_Website setSortOrder(int $value) * @method Mage_Core_Model_Website setDefaultGroupId(int $value) * @method int getIsDefault() * @method Mage_Core_Model_Website setIsDefault(int $value) * * @category Mage * @package Mage_Core * @author Magento Core Team <[email protected]> */ class Mage_Core_Model_Website extends Mage_Core_Model_Abstract { const ENTITY = 'core_website'; const CACHE_TAG = 'website'; protected $_cacheTag = true; /** * @var string */ protected $_eventPrefix = 'website'; /** * @var string */ protected $_eventObject = 'website'; /** * Cache configuration array * * @var array */ protected $_configCache = array(); /** * Website Group Coleection array * * @var array */ protected $_groups; /** * Website group ids array * * @var array */ protected $_groupIds = array(); /** * The number of groups in a website * * @var int */ protected $_groupsCount; /** * Website Store collection array * * @var array */ protected $_stores; /** * Website store ids array * * @var array */ protected $_storeIds = array(); /** * Website store codes array * * @var array */ protected $_storeCodes = array(); /** * The number of stores in a website * * @var int */ protected $_storesCount = 0; /** * Website default group * * @var Mage_Core_Model_Store_Group */ protected $_defaultGroup; /** * Website default store * * @var Mage_Core_Model_Store */ protected $_defaultStore; /** * is can delete website * * @var bool */ protected $_isCanDelete; /** * @var bool */ private $_isReadOnly = false; /** * init model * */ protected function _construct() { $this->_init('core/website'); } /** * Custom load * * @param int|string $id * @param string $field * @return Mage_Core_Model_Website */ public function load($id, $field = null) { if (!is_numeric($id) && is_null($field)) { $this->_getResource()->load($this, $id, 'code'); return $this; } return parent::load($id, $field); } /** * Load website configuration * * @param string $code * @return Mage_Core_Model_Website */ public function loadConfig($code) { if (!Mage::getConfig()->getNode('websites')) { return $this; } if (is_numeric($code)) { foreach (Mage::getConfig()->getNode('websites')->children() as $websiteCode=>$website) { if ((int)$website->system->website->id==$code) { $code = $websiteCode; break; } } } else { $website = Mage::getConfig()->getNode('websites/'.$code); } if (!empty($website)) { $this->setCode($code); $id = (int)$website->system->website->id; $this->setId($id)->setStoreId($id); } return $this; } /** * Get website config data * * @param string $path * @return mixed */ public function getConfig($path) { if (!isset($this->_configCache[$path])) { $config = Mage::getConfig()->getNode('websites/'.$this->getCode().'/'.$path); if (!$config) { return false; #throw Mage::exception('Mage_Core', Mage::helper('core')->__('Invalid website\'s configuration path: %s', $path)); } if ($config->hasChildren()) { $value = array(); foreach ($config->children() as $k=>$v) { $value[$k] = $v; } } else { $value = (string)$config; } $this->_configCache[$path] = $value; } return $this->_configCache[$path]; } /** * Load group collection and set internal data * */ protected function _loadGroups() { $this->_groups = array(); $this->_groupsCount = 0; foreach ($this->getGroupCollection() as $group) { $this->_groups[$group->getId()] = $group; $this->_groupIds[$group->getId()] = $group->getId(); if ($this->getDefaultGroupId() == $group->getId()) { $this->_defaultGroup = $group; } $this->_groupsCount ++; } } /** * Set website groups * * @param array $groups */ public function setGroups($groups) { $this->_groups = array(); $this->_groupsCount = 0; foreach ($groups as $group) { $this->_groups[$group->getId()] = $group; $this->_groupIds[$group->getId()] = $group->getId(); if ($this->getDefaultGroupId() == $group->getId()) { $this->_defaultGroup = $group; } $this->_groupsCount ++; } return $this; } /** * Retrieve new (not loaded) Group collection object with website filter * * @return Mage_Core_Model_Mysql4_Store_Group_Collection */ public function getGroupCollection() { return Mage::getModel('core/store_group') ->getCollection() ->addWebsiteFilter($this->getId()); } /** * Retrieve website groups * * @return array */ public function getGroups() { if (is_null($this->_groups)) { $this->_loadGroups(); } return $this->_groups; } /** * Retrieve website group ids * * @return array */ public function getGroupIds() { if (is_null($this->_groups)) { $this->_loadGroups(); } return $this->_groupIds; } /** * Retrieve number groups in a website * * @return int */ public function getGroupsCount() { if (is_null($this->_groups)) { $this->_loadGroups(); } return $this->_groupsCount; } /** * Retrieve default group model * * @return Mage_Core_Model_Store_Group */ public function getDefaultGroup() { if (!$this->hasDefaultGroupId()) { return false; } if (is_null($this->_groups)) { $this->_loadGroups(); } return $this->_defaultGroup; } /** * Load store collection and set internal data * */ protected function _loadStores() { $this->_stores = array(); $this->_storesCount = 0; foreach ($this->getStoreCollection() as $store) { $this->_stores[$store->getId()] = $store; $this->_storeIds[$store->getId()] = $store->getId(); $this->_storeCodes[$store->getId()] = $store->getCode(); if ($this->getDefaultGroup() && $this->getDefaultGroup()->getDefaultStoreId() == $store->getId()) { $this->_defaultStore = $store; } $this->_storesCount ++; } } /** * Set website stores * * @param array $stores */ public function setStores($stores) { $this->_stores = array(); $this->_storesCount = 0; foreach ($stores as $store) { $this->_stores[$store->getId()] = $store; $this->_storeIds[$store->getId()] = $store->getId(); $this->_storeCodes[$store->getId()] = $store->getCode(); if ($this->getDefaultGroup() && $this->getDefaultGroup()->getDefaultStoreId() == $store->getId()) { $this->_defaultStore = $store; } $this->_storesCount ++; } } /** * Retrieve new (not loaded) Store collection object with website filter * * @return Mage_Core_Model_Mysql4_Store_Collection */ public function getStoreCollection() { return Mage::getModel('core/store') ->getCollection() ->addWebsiteFilter($this->getId()); } /** * Retrieve wersite store objects * * @return array */ public function getStores() { if (is_null($this->_stores)) { $this->_loadStores(); } return $this->_stores; } /** * Retrieve website store ids * * @return array */ public function getStoreIds() { if (is_null($this->_stores)) { $this->_loadStores(); } return $this->_storeIds; } /** * Retrieve website store codes * * @return array */ public function getStoreCodes() { if (is_null($this->_stores)) { $this->_loadStores(); } return $this->_storeCodes; } /** * Retrieve number stores in a website * * @return int */ public function getStoresCount() { if (is_null($this->_stores)) { $this->_loadStores(); } return $this->_storesCount; } /** * is can delete website * * @return bool */ public function isCanDelete() { if ($this->_isReadOnly || !$this->getId()) { return false; } if (is_null($this->_isCanDelete)) { $this->_isCanDelete = (Mage::getModel('core/website')->getCollection()->getSize() > 2) && !$this->getIsDefault(); } return $this->_isCanDelete; } /** * Retrieve unique website-group-store key for collection with groups and stores * * @return string */ public function getWebsiteGroupStore() { return join('-', array($this->getWebsiteId(), $this->getGroupId(), $this->getStoreId())); } public function getDefaultGroupId() { return $this->_getData('default_group_id'); } public function getCode() { return $this->_getData('code'); } protected function _beforeDelete() { $this->_protectFromNonAdmin(); return parent::_beforeDelete(); } /** * rewrite in order to clear configuration cache * * @return Mage_Core_Model_Website */ protected function _afterDelete() { Mage::app()->clearWebsiteCache($this->getId()); parent::_afterDelete(); Mage::getConfig()->removeCache(); return $this; } /** * Retrieve website base currency code * * @return string */ public function getBaseCurrencyCode() { if ($this->getConfig(Mage_Core_Model_Store::XML_PATH_PRICE_SCOPE) == Mage_Core_Model_Store::PRICE_SCOPE_GLOBAL ) { return Mage::app()->getBaseCurrencyCode(); } else { return $this->getConfig(Mage_Directory_Model_Currency::XML_PATH_CURRENCY_BASE); } } /** * Retrieve website base currency * * @return Mage_Directory_Model_Currency */ public function getBaseCurrency() { $currency = $this->getData('base_currency'); if (is_null($currency)) { $currency = Mage::getModel('directory/currency')->load($this->getBaseCurrencyCode()); $this->setData('base_currency', $currency); } return $currency; } /** * Retrieve Default Website Store or null * * @return Mage_Core_Model_Store */ public function getDefaultStore() { // init stores if not loaded $this->getStores(); return $this->_defaultStore; } /** * Retrieve default stores select object * Select fields website_id, store_id * * @param $withDefault include/exclude default admin website * @return Varien_Db_Select */ public function getDefaultStoresSelect($withDefault = false) { return $this->getResource()->getDefaultStoresSelect($withDefault); } /** * Get/Set isReadOnly flag * * @param bool $value * @return bool */ public function isReadOnly($value = null) { if (null !== $value) { $this->_isReadOnly = (bool)$value; } return $this->_isReadOnly; } }
gpl-2.0
nslu2/Build-gcc-3.2.1
gcc/testsuite/g++.old-deja/g++.robertl/eb69.C
373
// Test that g++ complains about referring to a builtin type in a // mem-initializer. // Contributed by Kevin Buhr <[email protected]> int r = 0; struct foo { // ERROR - candidate foo(int x) { r = 1; } // ERROR - candidate }; struct bar : foo { typedef int an_int; bar() : bar::an_int(3) {} // ERROR - not a base }; int main() { bar b; return r; }
gpl-2.0
keegan2149/magento
sites/default/app/code/core/Mage/Adminhtml/Model/System/Config/Backend/Design/Package.php
1419
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to [email protected] so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magentocommerce.com for more information. * * @category Mage * @package Mage_Adminhtml * @copyright Copyright (c) 2012 Magento Inc. (http://www.magentocommerce.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ class Mage_Adminhtml_Model_System_Config_Backend_Design_Package extends Mage_Core_Model_Config_Data { protected function _beforeSave() { $value = $this->getValue(); if (empty($value)) { throw new Exception('package name is empty.'); } if (!Mage::getDesign()->designPackageExists($value)) { throw new Exception('package with this name does not exist and cannot be set.'); } } }
gpl-2.0
SurajAnil/KernelVirtualMachine
qemu/hw/timer/ds1338.c
6373
/* * MAXIM DS1338 I2C RTC+NVRAM * * Copyright (c) 2009 CodeSourcery. * Written by Paul Brook * * This code is licensed under the GNU GPL v2. * * Contributions after 2012-01-13 are licensed under the terms of the * GNU GPL, version 2 or (at your option) any later version. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "hw/i2c/i2c.h" #include "qemu/bcd.h" /* Size of NVRAM including both the user-accessible area and the * secondary register area. */ #define NVRAM_SIZE 64 /* Flags definitions */ #define SECONDS_CH 0x80 #define HOURS_12 0x40 #define HOURS_PM 0x20 #define CTRL_OSF 0x20 #define TYPE_DS1338 "ds1338" #define DS1338(obj) OBJECT_CHECK(DS1338State, (obj), TYPE_DS1338) typedef struct DS1338State { I2CSlave parent_obj; int64_t offset; uint8_t wday_offset; uint8_t nvram[NVRAM_SIZE]; int32_t ptr; bool addr_byte; } DS1338State; static const VMStateDescription vmstate_ds1338 = { .name = "ds1338", .version_id = 2, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, DS1338State), VMSTATE_INT64(offset, DS1338State), VMSTATE_UINT8_V(wday_offset, DS1338State, 2), VMSTATE_UINT8_ARRAY(nvram, DS1338State, NVRAM_SIZE), VMSTATE_INT32(ptr, DS1338State), VMSTATE_BOOL(addr_byte, DS1338State), VMSTATE_END_OF_LIST() } }; static void capture_current_time(DS1338State *s) { /* Capture the current time into the secondary registers * which will be actually read by the data transfer operation. */ struct tm now; qemu_get_timedate(&now, s->offset); s->nvram[0] = to_bcd(now.tm_sec); s->nvram[1] = to_bcd(now.tm_min); if (s->nvram[2] & HOURS_12) { int tmp = now.tm_hour; if (tmp % 12 == 0) { tmp += 12; } if (tmp <= 12) { s->nvram[2] = HOURS_12 | to_bcd(tmp); } else { s->nvram[2] = HOURS_12 | HOURS_PM | to_bcd(tmp - 12); } } else { s->nvram[2] = to_bcd(now.tm_hour); } s->nvram[3] = (now.tm_wday + s->wday_offset) % 7 + 1; s->nvram[4] = to_bcd(now.tm_mday); s->nvram[5] = to_bcd(now.tm_mon + 1); s->nvram[6] = to_bcd(now.tm_year - 100); } static void inc_regptr(DS1338State *s) { /* The register pointer wraps around after 0x3F; wraparound * causes the current time/date to be retransferred into * the secondary registers. */ s->ptr = (s->ptr + 1) & (NVRAM_SIZE - 1); if (!s->ptr) { capture_current_time(s); } } static void ds1338_event(I2CSlave *i2c, enum i2c_event event) { DS1338State *s = DS1338(i2c); switch (event) { case I2C_START_RECV: /* In h/w, capture happens on any START condition, not just a * START_RECV, but there is no need to actually capture on * START_SEND, because the guest can't get at that data * without going through a START_RECV which would overwrite it. */ capture_current_time(s); break; case I2C_START_SEND: s->addr_byte = true; break; default: break; } } static int ds1338_recv(I2CSlave *i2c) { DS1338State *s = DS1338(i2c); uint8_t res; res = s->nvram[s->ptr]; inc_regptr(s); return res; } static int ds1338_send(I2CSlave *i2c, uint8_t data) { DS1338State *s = DS1338(i2c); if (s->addr_byte) { s->ptr = data & (NVRAM_SIZE - 1); s->addr_byte = false; return 0; } if (s->ptr < 7) { /* Time register. */ struct tm now; qemu_get_timedate(&now, s->offset); switch(s->ptr) { case 0: /* TODO: Implement CH (stop) bit. */ now.tm_sec = from_bcd(data & 0x7f); break; case 1: now.tm_min = from_bcd(data & 0x7f); break; case 2: if (data & HOURS_12) { int tmp = from_bcd(data & (HOURS_PM - 1)); if (data & HOURS_PM) { tmp += 12; } if (tmp % 12 == 0) { tmp -= 12; } now.tm_hour = tmp; } else { now.tm_hour = from_bcd(data & (HOURS_12 - 1)); } break; case 3: { /* The day field is supposed to contain a value in the range 1-7. Otherwise behavior is undefined. */ int user_wday = (data & 7) - 1; s->wday_offset = (user_wday - now.tm_wday + 7) % 7; } break; case 4: now.tm_mday = from_bcd(data & 0x3f); break; case 5: now.tm_mon = from_bcd(data & 0x1f) - 1; break; case 6: now.tm_year = from_bcd(data) + 100; break; } s->offset = qemu_timedate_diff(&now); } else if (s->ptr == 7) { /* Control register. */ /* Ensure bits 2, 3 and 6 will read back as zero. */ data &= 0xB3; /* Attempting to write the OSF flag to logic 1 leaves the value unchanged. */ data = (data & ~CTRL_OSF) | (data & s->nvram[s->ptr] & CTRL_OSF); s->nvram[s->ptr] = data; } else { s->nvram[s->ptr] = data; } inc_regptr(s); return 0; } static int ds1338_init(I2CSlave *i2c) { return 0; } static void ds1338_reset(DeviceState *dev) { DS1338State *s = DS1338(dev); /* The clock is running and synchronized with the host */ s->offset = 0; s->wday_offset = 0; memset(s->nvram, 0, NVRAM_SIZE); s->ptr = 0; s->addr_byte = false; } static void ds1338_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *k = I2C_SLAVE_CLASS(klass); k->init = ds1338_init; k->event = ds1338_event; k->recv = ds1338_recv; k->send = ds1338_send; dc->reset = ds1338_reset; dc->vmsd = &vmstate_ds1338; } static const TypeInfo ds1338_info = { .name = TYPE_DS1338, .parent = TYPE_I2C_SLAVE, .instance_size = sizeof(DS1338State), .class_init = ds1338_class_init, }; static void ds1338_register_types(void) { type_register_static(&ds1338_info); } type_init(ds1338_register_types)
gpl-3.0
carthy/beard.gmp
mpn/arm/mod_34lsub1.asm
2299
dnl ARM mpn_mod_34lsub1 -- remainder modulo 2^24-1. dnl Copyright 2012 Free Software Foundation, Inc. dnl This file is part of the GNU MP Library. dnl The GNU MP Library is free software; you can redistribute it and/or modify dnl it under the terms of the GNU Lesser General Public License as published dnl by the Free Software Foundation; either version 3 of the License, or (at dnl your option) any later version. dnl The GNU MP Library is distributed in the hope that it will be useful, but dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public dnl License for more details. dnl You should have received a copy of the GNU Lesser General Public License dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. include(`../config.m4') C cycles/limb C StrongARM ? C XScale ? C Cortex-A8 ? C Cortex-A9 1.33 C Cortex-A15 ? define(`ap', r0) define(`n', r1) C mp_limb_t mpn_mod_34lsub1 (mp_srcptr up, mp_size_t n) C TODO C * Write cleverer summation code. C * Consider loading 6 64-bit aligned registers at a time, to approach 1 c/l. ASM_START() TEXT ALIGN(32) PROLOGUE(mpn_mod_34lsub1) push { r4, r5, r6, r7 } subs n, n, #3 mov r7, #0 blt L(le2) C n <= 2 ldmia ap!, { r2, r3, r12 } subs n, n, #3 blt L(sum) C n <= 5 adds r0, r0, #0 C clear carry sub n, n, #3 b L(mid) L(top): adcs r2, r2, r4 adcs r3, r3, r5 adcs r12, r12, r6 L(mid): ldmia ap!, { r4, r5, r6 } tst n, n sub n, n, #3 bpl L(top) add n, n, #3 adcs r2, r2, r4 adcs r3, r3, r5 adcs r12, r12, r6 movcs r7, #1 C r7 <= 1 L(sum): cmn n, #2 movlo r4, #0 ldrhs r4, [ap], #4 movls r5, #0 ldrhi r5, [ap], #4 adds r2, r2, r4 adcs r3, r3, r5 adcs r12, r12, #0 adc r7, r7, #0 C r7 <= 2 L(sum2): bic r0, r2, #0xff000000 add r0, r0, r2, lsr #24 add r0, r0, r7 lsl r7, r3, #8 bic r1, r7, #0xff000000 add r0, r0, r1 add r0, r0, r3, lsr #16 lsl r7, r12, #16 bic r1, r7, #0xff000000 add r0, r0, r1 add r0, r0, r12, lsr #8 pop { r4, r5, r6, r7 } bx lr L(le2): cmn n, #1 bne L(1) ldmia ap!, { r2, r3 } mov r12, #0 b L(sum2) L(1): ldr r2, [ap] bic r0, r2, #0xff000000 add r0, r0, r2, lsr #24 pop { r4, r5, r6, r7 } bx lr EPILOGUE()
gpl-3.0
grnet/synnefo
snf-astakos-app/astakos/im/migrations/old/0043_uninitialized_projects.py
25194
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): objs = orm.ProjectApplication.objects apps = objs.filter(chain__chained_project=None).order_by( 'chain', '-id') checked_chain = None projs = [] for app in apps: chain = app.chain if chain.pk != checked_chain: checked_chain = chain.pk projs.append(orm.Project(id=chain, application=app, state=1)) orm.Project.objects.bulk_create(projs) def backwards(self, orm): "Write your backwards methods here." models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'im.additionalmail': { 'Meta': {'object_name': 'AdditionalMail'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"}) }, 'im.approvalterms': { 'Meta': {'object_name': 'ApprovalTerms'}, 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'im.astakosuser': { 'Meta': {'object_name': 'AstakosUser', '_ormbases': ['auth.User']}, 'accepted_email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '75', 'null': 'True', 'blank': 'True'}), 'accepted_policy': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'activation_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'auth_token_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'auth_token_expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'date_signed_terms': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'deactivated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'deactivated_reason': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}), 'disturbed_quota': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}), 'email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'has_credits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'has_signed_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'invitations': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'is_rejected': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'level': ('django.db.models.fields.IntegerField', [], {'default': '4'}), 'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'moderated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'moderated_data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'policy': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['im.Resource']", 'null': 'True', 'through': "orm['im.AstakosUserQuota']", 'symmetrical': 'False'}), 'rejected_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {}), 'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}), 'uuid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}), 'verification_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}), 'verified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'im.astakosuserauthprovider': { 'Meta': {'unique_together': "(('identifier', 'module', 'user'),)", 'object_name': 'AstakosUserAuthProvider'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'affiliation': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}), 'auth_backend': ('django.db.models.fields.CharField', [], {'default': "'astakos'", 'max_length': '255'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'info_data': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}), 'module': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '255'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_providers'", 'to': "orm['im.AstakosUser']"}) }, 'im.astakosuserquota': { 'Meta': {'unique_together': "(('resource', 'user'),)", 'object_name': 'AstakosUserQuota'}, 'capacity': ('snf_django.lib.db.fields.IntDecimalField', [], {'max_digits': '38', 'decimal_places': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Resource']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"}) }, 'im.authproviderpolicyprofile': { 'Meta': {'object_name': 'AuthProviderPolicyProfile'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'authpolicy_profiles'", 'symmetrical': 'False', 'to': "orm['auth.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_exclusive': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'policy_add': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'policy_automoderate': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'policy_create': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'policy_limit': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}), 'policy_login': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'policy_remove': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'policy_required': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'policy_switch': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'priority': ('django.db.models.fields.IntegerField', [], {'default': '1'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'authpolicy_profiles'", 'symmetrical': 'False', 'to': "orm['im.AstakosUser']"}) }, 'im.chain': { 'Meta': {'object_name': 'Chain'}, 'chain': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'im.component': { 'Meta': {'object_name': 'Component'}, 'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'auth_token_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'auth_token_expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}) }, 'im.emailchange': { 'Meta': {'object_name': 'EmailChange'}, 'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'requested_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emailchanges'", 'unique': 'True', 'to': "orm['im.AstakosUser']"}) }, 'im.endpoint': { 'Meta': {'object_name': 'Endpoint'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'endpoints'", 'to': "orm['im.Service']"}) }, 'im.endpointdata': { 'Meta': {'unique_together': "(('endpoint', 'key'),)", 'object_name': 'EndpointData'}, 'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['im.Endpoint']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '1024'}) }, 'im.invitation': { 'Meta': {'object_name': 'Invitation'}, 'code': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}), 'consumed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inviter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations_sent'", 'null': 'True', 'to': "orm['im.AstakosUser']"}), 'is_consumed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'realname': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, 'im.pendingthirdpartyuser': { 'Meta': {'unique_together': "(('provider', 'third_party_identifier'),)", 'object_name': 'PendingThirdPartyUser'}, 'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}), 'provider': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'third_party_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'im.project': { 'Meta': {'object_name': 'Project'}, 'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'project'", 'unique': 'True', 'to': "orm['im.ProjectApplication']"}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deactivation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'deactivation_reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'id': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'chained_project'", 'unique': 'True', 'primary_key': 'True', 'db_column': "'id'", 'to': "orm['im.Chain']"}), 'last_approval_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['im.AstakosUser']", 'through': "orm['im.ProjectMembership']", 'symmetrical': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True', 'null': 'True', 'db_index': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'im.projectapplication': { 'Meta': {'unique_together': "(('chain', 'id'),)", 'object_name': 'ProjectApplication'}, 'applicant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects_applied'", 'to': "orm['im.AstakosUser']"}), 'chain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chained_apps'", 'db_column': "'chain'", 'to': "orm['im.Chain']"}), 'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_date': ('django.db.models.fields.DateTimeField', [], {}), 'homepage': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'issue_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'limit_on_members_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'member_join_policy': ('django.db.models.fields.IntegerField', [], {}), 'member_leave_policy': ('django.db.models.fields.IntegerField', [], {}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects_owned'", 'to': "orm['im.AstakosUser']"}), 'precursor_application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.ProjectApplication']", 'null': 'True', 'blank': 'True'}), 'resource_grants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['im.Resource']", 'null': 'True', 'through': "orm['im.ProjectResourceGrant']", 'blank': 'True'}), 'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'response_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'im.projectmembership': { 'Meta': {'unique_together': "(('person', 'project'),)", 'object_name': 'ProjectMembership'}, 'acceptance_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'leave_request_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Project']"}), 'request_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'state': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}) }, 'im.projectmembershiphistory': { 'Meta': {'object_name': 'ProjectMembershipHistory'}, 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'person': ('django.db.models.fields.BigIntegerField', [], {}), 'project': ('django.db.models.fields.BigIntegerField', [], {}), 'reason': ('django.db.models.fields.IntegerField', [], {}), 'serial': ('django.db.models.fields.BigIntegerField', [], {}) }, 'im.projectresourcegrant': { 'Meta': {'unique_together': "(('resource', 'project_application'),)", 'object_name': 'ProjectResourceGrant'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'member_capacity': ('snf_django.lib.db.fields.IntDecimalField', [], {'default': '0', 'max_digits': '38', 'decimal_places': '0'}), 'project_application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.ProjectApplication']", 'null': 'True'}), 'project_capacity': ('snf_django.lib.db.fields.IntDecimalField', [], {'null': 'True', 'max_digits': '38', 'decimal_places': '0'}), 'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Resource']"}) }, 'im.resource': { 'Meta': {'object_name': 'Resource'}, 'allow_in_projects': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'desc': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'service_origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'service_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'uplimit': ('snf_django.lib.db.fields.IntDecimalField', [], {'default': '0', 'max_digits': '38', 'decimal_places': '0'}) }, 'im.serial': { 'Meta': {'object_name': 'Serial'}, 'serial': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'im.service': { 'Meta': {'object_name': 'Service'}, 'component': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Component']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'im.sessioncatalog': { 'Meta': {'object_name': 'SessionCatalog'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'null': 'True', 'to': "orm['im.AstakosUser']"}) }, 'im.usersetting': { 'Meta': {'unique_together': "(('user', 'setting'),)", 'object_name': 'UserSetting'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'setting': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"}), 'value': ('django.db.models.fields.IntegerField', [], {}) } } complete_apps = ['im']
gpl-3.0
wimnat/ansible-modules-core
cloud/amazon/ec2.py
61472
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: ec2 short_description: create, terminate, start or stop an instance in ec2 description: - Creates or terminates ec2 instances. - C(state=restarted) was added in 2.2 version_added: "0.9" options: key_name: description: - key pair to use on the instance required: false default: null aliases: ['keypair'] id: version_added: "1.1" description: - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). required: false default: null aliases: [] group: description: - security group (or list of groups) to use with the instance required: false default: null aliases: [ 'groups' ] group_id: version_added: "1.1" description: - security group id (or list of ids) to use with the instance required: false default: null aliases: [] region: version_added: "1.2" description: - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region) required: false default: null aliases: [ 'aws_region', 'ec2_region' ] zone: version_added: "1.2" description: - AWS availability zone in which to launch the instance required: false default: null aliases: [ 'aws_zone', 'ec2_zone' ] instance_type: description: - instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) required: true default: null aliases: [] tenancy: version_added: "1.9" description: - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false default: default choices: [ "default", "dedicated" ] aliases: [] spot_price: version_added: "1.5" description: - Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started. required: false default: null aliases: [] spot_type: version_added: "2.0" description: - Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied. required: false default: "one-time" choices: [ "one-time", "persistent" ] aliases: [] image: description: - I(ami) ID to use for the instance required: true default: null aliases: [] kernel: description: - kernel I(eki) to use for the instance required: false default: null aliases: [] ramdisk: description: - ramdisk I(eri) to use for the instance required: false default: null aliases: [] wait: description: - wait for the instance to be 'running' before returning. Does not wait for SSH, see 'wait_for' example for details. required: false default: "no" choices: [ "yes", "no" ] aliases: [] wait_timeout: description: - how long before wait gives up, in seconds default: 300 aliases: [] spot_wait_timeout: version_added: "1.5" description: - how long to wait for the spot instance request to be fulfilled default: 600 aliases: [] count: description: - number of instances to launch required: False default: 1 aliases: [] monitoring: version_added: "1.1" description: - enable detailed monitoring (CloudWatch) for instance required: false default: null choices: [ "yes", "no" ] aliases: [] user_data: version_added: "0.9" description: - opaque blob of data which is made available to the ec2 instance required: false default: null aliases: [] instance_tags: version_added: "1.0" description: - a hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}' required: false default: null aliases: [] placement_group: version_added: "1.3" description: - placement group for the instance when using EC2 Clustered Compute required: false default: null aliases: [] vpc_subnet_id: version_added: "1.1" description: - the subnet ID in which to launch the instance (VPC) required: false default: null aliases: [] assign_public_ip: version_added: "1.5" description: - when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+ required: false default: null choices: [ "yes", "no" ] aliases: [] private_ip: version_added: "1.2" description: - the private ip address to assign the instance (from the vpc subnet) required: false default: null aliases: [] instance_profile_name: version_added: "1.3" description: - Name of the IAM instance profile to use. Boto library must be 2.5.0+ required: false default: null aliases: [] instance_ids: version_added: "1.3" description: - "list of instance ids, currently used for states: absent, running, stopped" required: false default: null aliases: ['instance_id'] source_dest_check: version_added: "1.6" description: - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) required: false default: yes choices: [ "yes", "no" ] termination_protection: version_added: "2.0" description: - Enable or Disable the Termination Protection required: false default: no choices: [ "yes", "no" ] instance_initiated_shutdown_behavior: version_added: "2.2" description: - Set whether AWS will Stop or Terminate an instance on shutdown required: false default: 'stop' choices: [ "stop", "terminate" ] state: version_added: "1.3" description: - create or terminate instances required: false default: 'present' aliases: [] choices: ['present', 'absent', 'running', 'restarted', 'stopped'] volumes: version_added: "1.5" description: - a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str), encrypted (bool; False), snapshot (str), volume_type (str), iops (int) - device_type is deprecated use volume_type, iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive. required: false default: null aliases: [] ebs_optimized: version_added: "1.6" description: - whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) required: false default: 'false' exact_count: version_added: "1.5" description: - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value. required: false default: null aliases: [] count_tag: version_added: "1.5" description: - Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". The specified tag must already exist or be passed in as the 'instance_tags' option. required: false default: null aliases: [] network_interfaces: version_added: "2.0" description: - A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are for creating a new network interface at launch.) required: false default: null aliases: ['network_interface'] spot_launch_group: version_added: "2.1" description: - Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group) required: false default: null author: - "Tim Gerla (@tgerla)" - "Lester Wade (@lwade)" - "Seth Vidal" extends_documentation_fragment: aws ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic provisioning example - ec2: key_name: mykey instance_type: t2.micro image: ami-123456 wait: yes group: webserver count: 3 vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # Advanced example with tagging and CloudWatch - ec2: key_name: mykey group: databases instance_type: t2.micro image: ami-123456 wait: yes wait_timeout: 500 count: 5 instance_tags: db: postgres monitoring: yes vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # Single instance with additional IOPS volume from snapshot and volume delete on termination - ec2: key_name: mykey group: webserver instance_type: c3.medium image: ami-123456 wait: yes wait_timeout: 500 volumes: - device_name: /dev/sdb snapshot: snap-abcdef12 volume_type: io1 iops: 1000 volume_size: 100 delete_on_termination: true monitoring: yes vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # Single instance with ssd gp2 root volume - ec2: key_name: mykey group: webserver instance_type: c3.medium image: ami-123456 wait: yes wait_timeout: 500 volumes: - device_name: /dev/xvda volume_type: gp2 volume_size: 8 vpc_subnet_id: subnet-29e63245 assign_public_ip: yes exact_count: 1 # Multiple groups example - ec2: key_name: mykey group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] instance_type: m1.large image: ami-6e649707 wait: yes wait_timeout: 500 count: 5 instance_tags: db: postgres monitoring: yes vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # Multiple instances with additional volume from snapshot - ec2: key_name: mykey group: webserver instance_type: m1.large image: ami-6e649707 wait: yes wait_timeout: 500 count: 5 volumes: - device_name: /dev/sdb snapshot: snap-abcdef12 volume_size: 10 monitoring: yes vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # Dedicated tenancy example - local_action: module: ec2 assign_public_ip: yes group_id: sg-1dc53f72 key_name: mykey image: ami-6e649707 instance_type: m1.small tenancy: dedicated vpc_subnet_id: subnet-29e63245 wait: yes # Spot instance example - ec2: spot_price: 0.24 spot_wait_timeout: 600 keypair: mykey group_id: sg-1dc53f72 instance_type: m1.small image: ami-6e649707 wait: yes vpc_subnet_id: subnet-29e63245 assign_public_ip: yes spot_launch_group: report_generators # Examples using pre-existing network interfaces - ec2: key_name: mykey instance_type: t2.small image: ami-f005ba11 network_interface: eni-deadbeef - ec2: key_name: mykey instance_type: t2.small image: ami-f005ba11 network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e'] # Launch instances, runs some tasks # and then terminate them - name: Create a sandbox instance hosts: localhost gather_facts: False vars: key_name: my_keypair instance_type: m1.small security_group: my_securitygroup image: my_ami_id region: us-east-1 tasks: - name: Launch instance ec2: key_name: "{{ keypair }}" group: "{{ security_group }}" instance_type: "{{ instance_type }}" image: "{{ image }}" wait: true region: "{{ region }}" vpc_subnet_id: subnet-29e63245 assign_public_ip: yes register: ec2 - name: Add new instance to host group add_host: hostname: "{{ item.public_ip }}" groupname: launched with_items: "{{ ec2.instances }}" - name: Wait for SSH to come up wait_for: host: "{{ item.public_dns_name }}" port: 22 delay: 60 timeout: 320 state: started with_items: "{{ ec2.instances }}" - name: Configure instance(s) hosts: launched become: True gather_facts: True roles: - my_awesome_role - my_awesome_test - name: Terminate instances hosts: localhost connection: local tasks: - name: Terminate instances that were previously launched ec2: state: 'absent' instance_ids: '{{ ec2.instance_ids }}' # Start a few existing instances, run some tasks # and stop the instances - name: Start sandbox instances hosts: localhost gather_facts: false connection: local vars: instance_ids: - 'i-xxxxxx' - 'i-xxxxxx' - 'i-xxxxxx' region: us-east-1 tasks: - name: Start the sandbox instances ec2: instance_ids: '{{ instance_ids }}' region: '{{ region }}' state: running wait: True vpc_subnet_id: subnet-29e63245 assign_public_ip: yes roles: - do_neat_stuff - do_more_neat_stuff - name: Stop sandbox instances hosts: localhost gather_facts: false connection: local vars: instance_ids: - 'i-xxxxxx' - 'i-xxxxxx' - 'i-xxxxxx' region: us-east-1 tasks: - name: Stop the sandbox instances ec2: instance_ids: '{{ instance_ids }}' region: '{{ region }}' state: stopped wait: True vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # # Start stopped instances specified by tag # - local_action: module: ec2 instance_tags: Name: ExtraPower state: running # # Restart instances specified by tag # - local_action: module: ec2 instance_tags: Name: ExtraPower state: restarted # # Enforce that 5 instances with a tag "foo" are running # (Highly recommended!) # - ec2: key_name: mykey instance_type: c1.medium image: ami-40603AD1 wait: yes group: webserver instance_tags: foo: bar exact_count: 5 count_tag: foo vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # # Enforce that 5 running instances named "database" with a "dbtype" of "postgres" # - ec2: key_name: mykey instance_type: c1.medium image: ami-40603AD1 wait: yes group: webserver instance_tags: Name: database dbtype: postgres exact_count: 5 count_tag: Name: database dbtype: postgres vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # # count_tag complex argument examples # # instances with tag foo count_tag: foo: # instances with tag foo=bar count_tag: foo: bar # instances with tags foo=bar & baz count_tag: foo: bar baz: # instances with tags foo & bar & baz=bang count_tag: - foo - bar - baz: bang ''' import time from ast import literal_eval from ansible.module_utils.six import iteritems from ansible.module_utils.six import get_function_code try: import boto.ec2 from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping from boto.exception import EC2ResponseError from boto.vpc import VPCConnection HAS_BOTO = True except ImportError: HAS_BOTO = False def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None): # get reservations for instances that match tag(s) and are running reservations = get_reservations(module, ec2, tags=count_tag, state="running", zone=zone) instances = [] for res in reservations: if hasattr(res, 'instances'): for inst in res.instances: instances.append(inst) return reservations, instances def _set_none_to_blank(dictionary): result = dictionary for k in result: if isinstance(result[k], dict): result[k] = _set_none_to_blank(result[k]) elif not result[k]: result[k] = "" return result def get_reservations(module, ec2, tags=None, state=None, zone=None): # TODO: filters do not work with tags that have underscores filters = dict() if tags is not None: if isinstance(tags, str): try: tags = literal_eval(tags) except: pass # if string, we only care that a tag of that name exists if isinstance(tags, str): filters.update({"tag-key": tags}) # if list, append each item to filters if isinstance(tags, list): for x in tags: if isinstance(x, dict): x = _set_none_to_blank(x) filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(x))) else: filters.update({"tag-key": x}) # if dict, add the key and value to the filter if isinstance(tags, dict): tags = _set_none_to_blank(tags) filters.update(dict(("tag:"+tn, tv) for (tn,tv) in iteritems(tags))) if state: # http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api filters.update({'instance-state-name': state}) if zone: filters.update({'availability-zone': zone}) results = ec2.get_all_instances(filters=filters) return results def get_instance_info(inst): """ Retrieves instance information from an instance ID and returns it as a dictionary """ instance_info = {'id': inst.id, 'ami_launch_index': inst.ami_launch_index, 'private_ip': inst.private_ip_address, 'private_dns_name': inst.private_dns_name, 'public_ip': inst.ip_address, 'dns_name': inst.dns_name, 'public_dns_name': inst.public_dns_name, 'state_code': inst.state_code, 'architecture': inst.architecture, 'image_id': inst.image_id, 'key_name': inst.key_name, 'placement': inst.placement, 'region': inst.placement[:-1], 'kernel': inst.kernel, 'ramdisk': inst.ramdisk, 'launch_time': inst.launch_time, 'instance_type': inst.instance_type, 'root_device_type': inst.root_device_type, 'root_device_name': inst.root_device_name, 'state': inst.state, 'hypervisor': inst.hypervisor, 'tags': inst.tags, 'groups': dict((group.id, group.name) for group in inst.groups), } try: instance_info['virtualization_type'] = getattr(inst,'virtualization_type') except AttributeError: instance_info['virtualization_type'] = None try: instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized') except AttributeError: instance_info['ebs_optimized'] = False try: bdm_dict = {} bdm = getattr(inst, 'block_device_mapping') for device_name in bdm.keys(): bdm_dict[device_name] = { 'status': bdm[device_name].status, 'volume_id': bdm[device_name].volume_id, 'delete_on_termination': bdm[device_name].delete_on_termination } instance_info['block_device_mapping'] = bdm_dict except AttributeError: instance_info['block_device_mapping'] = False try: instance_info['tenancy'] = getattr(inst, 'placement_tenancy') except AttributeError: instance_info['tenancy'] = 'default' return instance_info def boto_supports_associate_public_ip_address(ec2): """ Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification class. Added in Boto 2.13.0 ec2: authenticated ec2 connection object Returns: True if Boto library accepts associate_public_ip_address argument, else false """ try: network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification() getattr(network_interface, "associate_public_ip_address") return True except AttributeError: return False def boto_supports_profile_name_arg(ec2): """ Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0 ec2: authenticated ec2 connection object Returns: True if Boto library accept instance_profile_name argument, else false """ run_instances_method = getattr(ec2, 'run_instances') return 'instance_profile_name' in get_function_code(run_instances_method).co_varnames def create_block_device(module, ec2, volume): # Not aware of a way to determine this programatically # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ MAX_IOPS_TO_SIZE_RATIO = 30 # device_type has been used historically to represent volume_type, # however ec2_vol uses volume_type, as does the BlockDeviceType, so # we add handling for either/or but not both if all(key in volume for key in ['device_type','volume_type']): module.fail_json(msg = 'device_type is a deprecated name for volume_type. Do not use both device_type and volume_type') # get whichever one is set, or NoneType if neither are set volume_type = volume.get('device_type') or volume.get('volume_type') if 'snapshot' not in volume and 'ephemeral' not in volume: if 'volume_size' not in volume: module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume') if 'snapshot' in volume: if volume_type == 'io1' and 'iops' not in volume: module.fail_json(msg = 'io1 volumes must have an iops value set') if 'iops' in volume: snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0] size = volume.get('volume_size', snapshot.volume_size) if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) if 'encrypted' in volume: module.fail_json(msg = 'You can not set encryption when creating a volume from a snapshot') if 'ephemeral' in volume: if 'snapshot' in volume: module.fail_json(msg = 'Cannot set both ephemeral and snapshot') return BlockDeviceType(snapshot_id=volume.get('snapshot'), ephemeral_name=volume.get('ephemeral'), size=volume.get('volume_size'), volume_type=volume_type, delete_on_termination=volume.get('delete_on_termination', False), iops=volume.get('iops'), encrypted=volume.get('encrypted', None)) def boto_supports_param_in_spot_request(ec2, param): """ Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. ec2: authenticated ec2 connection object Returns: True if boto library has the named param as an argument on the request_spot_instances method, else False """ method = getattr(ec2, 'request_spot_instances') return param in get_function_code(method).co_varnames def await_spot_requests(module, ec2, spot_requests, count): """ Wait for a group of spot requests to be fulfilled, or fail. module: Ansible module object ec2: authenticated ec2 connection object spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances count: Total number of instances to be created by the spot requests Returns: list of instance ID's created by the spot request(s) """ spot_wait_timeout = int(module.params.get('spot_wait_timeout')) wait_complete = time.time() + spot_wait_timeout spot_req_inst_ids = dict() while time.time() < wait_complete: reqs = ec2.get_all_spot_instance_requests() for sirb in spot_requests: if sirb.id in spot_req_inst_ids: continue for sir in reqs: if sir.id != sirb.id: continue # this is not our spot instance if sir.instance_id is not None: spot_req_inst_ids[sirb.id] = sir.instance_id elif sir.state == 'open': continue # still waiting, nothing to do here elif sir.state == 'active': continue # Instance is created already, nothing to do here elif sir.state == 'failed': module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % ( sir.id, sir.status.code, sir.fault.code, sir.fault.message)) elif sir.state == 'cancelled': module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id) elif sir.state == 'closed': # instance is terminating or marked for termination # this may be intentional on the part of the operator, # or it may have been terminated by AWS due to capacity, # price, or group constraints in this case, we'll fail # the module if the reason for the state is anything # other than termination by user. Codes are documented at # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html if sir.status.code == 'instance-terminated-by-user': # do nothing, since the user likely did this on purpose pass else: spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s" module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message)) if len(spot_req_inst_ids) < count: time.sleep(5) else: return spot_req_inst_ids.values() module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime()) def enforce_count(module, ec2, vpc): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') zone = module.params.get('zone') # fail here if the exact count was specified without filtering # on a tag, as this may lead to a undesired removal of instances if exact_count and count_tag is None: module.fail_json(msg="you must use the 'count_tag' option with exact_count") reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone) changed = None checkmode = False instance_dict_array = [] changed_instance_ids = None if len(instances) == exact_count: changed = False elif len(instances) < exact_count: changed = True to_create = exact_count - len(instances) if not checkmode: (instance_dict_array, changed_instance_ids, changed) \ = create_instances(module, ec2, vpc, override_count=to_create) for inst in instance_dict_array: instances.append(inst) elif len(instances) > exact_count: changed = True to_remove = len(instances) - exact_count if not checkmode: all_instance_ids = sorted([ x.id for x in instances ]) remove_ids = all_instance_ids[0:to_remove] instances = [ x for x in instances if x.id not in remove_ids] (changed, instance_dict_array, changed_instance_ids) \ = terminate_instances(module, ec2, remove_ids) terminated_list = [] for inst in instance_dict_array: inst['state'] = "terminated" terminated_list.append(inst) instance_dict_array = terminated_list # ensure all instances are dictionaries all_instances = [] for inst in instances: if not isinstance(inst, dict): inst = get_instance_info(inst) all_instances.append(inst) return (all_instances, instance_dict_array, changed_instance_ids, changed) def create_instances(module, ec2, vpc, override_count=None): """ Creates new instances module : AnsibleModule object ec2: authenticated ec2 connection object Returns: A list of dictionaries with instance information about the instances that were launched """ key_name = module.params.get('key_name') id = module.params.get('id') group_name = module.params.get('group') group_id = module.params.get('group_id') zone = module.params.get('zone') instance_type = module.params.get('instance_type') tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') spot_type = module.params.get('spot_type') image = module.params.get('image') if override_count: count = override_count else: count = module.params.get('count') monitoring = module.params.get('monitoring') kernel = module.params.get('kernel') ramdisk = module.params.get('ramdisk') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) spot_wait_timeout = int(module.params.get('spot_wait_timeout')) placement_group = module.params.get('placement_group') user_data = module.params.get('user_data') instance_tags = module.params.get('instance_tags') vpc_subnet_id = module.params.get('vpc_subnet_id') assign_public_ip = module.boolean(module.params.get('assign_public_ip')) private_ip = module.params.get('private_ip') instance_profile_name = module.params.get('instance_profile_name') volumes = module.params.get('volumes') ebs_optimized = module.params.get('ebs_optimized') exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) termination_protection = module.boolean(module.params.get('termination_protection')) network_interfaces = module.params.get('network_interfaces') spot_launch_group = module.params.get('spot_launch_group') instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior') # group_id and group_name are exclusive of each other if group_id and group_name: module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) vpc_id = None if vpc_subnet_id: if not vpc: module.fail_json(msg="region must be specified") else: vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id else: vpc_id = None try: # Here we try to lookup the group id from the security group name - if group is set. if group_name: if vpc_id: grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id}) else: grp_details = ec2.get_all_security_groups() if isinstance(group_name, basestring): group_name = [group_name] unmatched = set(group_name).difference(str(grp.name) for grp in grp_details) if len(unmatched) > 0: module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] # Now we try to lookup the group id testing if group exists. elif group_id: #wrap the group_id in a list if it's not one already if isinstance(group_id, basestring): group_id = [group_id] grp_details = ec2.get_all_security_groups(group_ids=group_id) group_name = [grp_item.name for grp_item in grp_details] except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) # Lookup any instances that much our run id. running_instances = [] count_remaining = int(count) if id != None: filter_dict = {'client-token':id, 'instance-state-name' : 'running'} previous_reservations = ec2.get_all_instances(None, filter_dict) for res in previous_reservations: for prev_instance in res.instances: running_instances.append(prev_instance) count_remaining = count_remaining - len(running_instances) # Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want. if count_remaining == 0: changed = False else: changed = True try: params = {'image_id': image, 'key_name': key_name, 'monitoring_enabled': monitoring, 'placement': zone, 'instance_type': instance_type, 'kernel_id': kernel, 'ramdisk_id': ramdisk, 'user_data': user_data} if ebs_optimized: params['ebs_optimized'] = ebs_optimized # 'tenancy' always has a default value, but it is not a valid parameter for spot instance request if not spot_price: params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): params['instance_profile_name'] = instance_profile_name else: if instance_profile_name is not None: module.fail_json( msg="instance_profile_name parameter requires Boto version 2.5.0 or higher") if assign_public_ip: if not boto_supports_associate_public_ip_address(ec2): module.fail_json( msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.") elif not vpc_subnet_id: module.fail_json( msg="assign_public_ip only available with vpc_subnet_id") else: if private_ip: interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id=vpc_subnet_id, private_ip_address=private_ip, groups=group_id, associate_public_ip_address=assign_public_ip) else: interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id=vpc_subnet_id, groups=group_id, associate_public_ip_address=assign_public_ip) interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) params['network_interfaces'] = interfaces else: if network_interfaces: if isinstance(network_interfaces, basestring): network_interfaces = [network_interfaces] interfaces = [] for i, network_interface_id in enumerate(network_interfaces): interface = boto.ec2.networkinterface.NetworkInterfaceSpecification( network_interface_id=network_interface_id, device_index=i) interfaces.append(interface) params['network_interfaces'] = \ boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces) else: params['subnet_id'] = vpc_subnet_id if vpc_subnet_id: params['security_group_ids'] = group_id else: params['security_groups'] = group_name if volumes: bdm = BlockDeviceMapping() for volume in volumes: if 'device_name' not in volume: module.fail_json(msg = 'Device name must be set for volume') # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 # to be a signal not to create this volume if 'volume_size' not in volume or int(volume['volume_size']) > 0: bdm[volume['device_name']] = create_block_device(module, ec2, volume) params['block_device_map'] = bdm # check to see if we're using spot pricing first before starting instances if not spot_price: if assign_public_ip and private_ip: params.update(dict( min_count = count_remaining, max_count = count_remaining, client_token = id, placement_group = placement_group, )) else: params.update(dict( min_count = count_remaining, max_count = count_remaining, client_token = id, placement_group = placement_group, private_ip_address = private_ip, )) # For ordinary (not spot) instances, we can select 'stop' # (the default) or 'terminate' here. params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop' res = ec2.run_instances(**params) instids = [ i.id for i in res.instances ] while True: try: ec2.get_all_instances(instids) break except boto.exception.EC2ResponseError as e: if "<Code>InvalidInstanceID.NotFound</Code>" in str(e): # there's a race between start and get an instance continue else: module.fail_json(msg = str(e)) # The instances returned through ec2.run_instances above can be in # terminated state due to idempotency. See commit 7f11c3d for a complete # explanation. terminated_instances = [ str(instance.id) for instance in res.instances if instance.state == 'terminated' ] if terminated_instances: module.fail_json(msg = "Instances with id(s) %s " % terminated_instances + "were created previously but have since been terminated - " + "use a (possibly different) 'instanceid' parameter") else: if private_ip: module.fail_json( msg='private_ip only available with on-demand (non-spot) instances') if boto_supports_param_in_spot_request(ec2, 'placement_group'): params['placement_group'] = placement_group elif placement_group : module.fail_json( msg="placement_group parameter requires Boto version 2.3.0 or higher.") # You can't tell spot instances to 'stop'; they will always be # 'terminate'd. For convenience, we'll ignore the latter value. if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate': module.fail_json( msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.") if spot_launch_group and isinstance(spot_launch_group, basestring): params['launch_group'] = spot_launch_group params.update(dict( count = count_remaining, type = spot_type, )) res = ec2.request_spot_instances(spot_price, **params) # Now we have to do the intermediate waiting if wait: instids = await_spot_requests(module, ec2, res, count) except boto.exception.BotoServerError as e: module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message)) # wait here until the instances are up num_running = 0 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and num_running < len(instids): try: res_list = ec2.get_all_instances(instids) except boto.exception.BotoServerError as e: if e.error_code == 'InvalidInstanceID.NotFound': time.sleep(1) continue else: raise num_running = 0 for res in res_list: num_running += len([ i for i in res.instances if i.state=='running' ]) if len(res_list) <= 0: # got a bad response of some sort, possibly due to # stale/cached data. Wait a second and then try again time.sleep(1) continue if wait and num_running < len(instids): time.sleep(5) else: break if wait and wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) #We do this after the loop ends so that we end up with one list for res in res_list: running_instances.extend(res.instances) # Enabled by default by AWS if source_dest_check is False: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) # Disabled by default by AWS if termination_protection is True: for inst in res.instances: inst.modify_attribute('disableApiTermination', True) # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: ec2.create_tags(instids, instance_tags) except boto.exception.EC2ResponseError as e: module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message)) instance_dict_array = [] created_instance_ids = [] for inst in running_instances: inst.update() d = get_instance_info(inst) created_instance_ids.append(inst.id) instance_dict_array.append(d) return (instance_dict_array, created_instance_ids, changed) def terminate_instances(module, ec2, instance_ids): """ Terminates a list of instances module: Ansible module object ec2: authenticated ec2 connection object termination_list: a list of instances to terminate in the form of [ {id: <inst-id>}, ..] Returns a dictionary of instance information about the instances terminated. If the instance to be terminated is running "changed" will be set to False. """ # Whether to wait for termination to complete before returning wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) changed = False instance_dict_array = [] if not isinstance(instance_ids, list) or len(instance_ids) < 1: module.fail_json(msg='instance_ids should be a list of instances, aborting') terminated_instance_ids = [] for res in ec2.get_all_instances(instance_ids): for inst in res.instances: if inst.state == 'running' or inst.state == 'stopped': terminated_instance_ids.append(inst.id) instance_dict_array.append(get_instance_info(inst)) try: ec2.terminate_instances([inst.id]) except EC2ResponseError as e: module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e)) changed = True # wait here until the instances are 'terminated' if wait: num_terminated = 0 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids): response = ec2.get_all_instances( \ instance_ids=terminated_instance_ids, \ filters={'instance-state-name':'terminated'}) try: num_terminated = sum([len(res.instances) for res in response]) except Exception as e: # got a bad response of some sort, possibly due to # stale/cached data. Wait a second and then try again time.sleep(1) continue if num_terminated < len(terminated_instance_ids): time.sleep(5) # waiting took too long if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids): module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime()) #Lets get the current state of the instances after terminating - issue600 instance_dict_array = [] for res in ec2.get_all_instances(instance_ids=terminated_instance_ids,\ filters={'instance-state-name':'terminated'}): for inst in res.instances: instance_dict_array.append(get_instance_info(inst)) return (changed, instance_dict_array, terminated_instance_ids) def startstop_instances(module, ec2, instance_ids, state, instance_tags): """ Starts or stops a list of existing instances module: Ansible module object ec2: authenticated ec2 connection object instance_ids: The list of instances to start in the form of [ {id: <inst-id>}, ..] instance_tags: A dict of tag keys and values in the form of {key: value, ... } state: Intended state ("running" or "stopped") Returns a dictionary of instance information about the instances started/stopped. If the instance was not able to change state, "changed" will be set to False. Note that if instance_ids and instance_tags are both non-empty, this method will process the intersection of the two """ wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) source_dest_check = module.params.get('source_dest_check') termination_protection = module.params.get('termination_protection') changed = False instance_dict_array = [] if not isinstance(instance_ids, list) or len(instance_ids) < 1: # Fail unless the user defined instance tags if not instance_tags: module.fail_json(msg='instance_ids should be a list of instances, aborting') # To make an EC2 tag filter, we need to prepend 'tag:' to each key. # An empty filter does no filtering, so it's safe to pass it to the # get_all_instances method even if the user did not specify instance_tags filters = {} if instance_tags: for key, value in instance_tags.items(): filters["tag:" + key] = value # Check that our instances are not in the state we want to take # Check (and eventually change) instances attributes and instances state existing_instances_array = [] for res in ec2.get_all_instances(instance_ids, filters=filters): for inst in res.instances: # Check "source_dest_check" attribute try: if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: inst.modify_attribute('sourceDestCheck', source_dest_check) changed = True except boto.exception.EC2ResponseError as exc: # instances with more than one Elastic Network Interface will # fail, because they have the sourceDestCheck attribute defined # per-interface if exc.code == 'InvalidInstanceID': for interface in inst.interfaces: if interface.source_dest_check != source_dest_check: ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check) changed = True else: module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc), exception=traceback.format_exc(exc)) # Check "termination_protection" attribute if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None): inst.modify_attribute('disableApiTermination', termination_protection) changed = True # Check instance state if inst.state != state: instance_dict_array.append(get_instance_info(inst)) try: if state == 'running': inst.start() else: inst.stop() except EC2ResponseError as e: module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) changed = True existing_instances_array.append(inst.id) instance_ids = list(set(existing_instances_array + (instance_ids or []))) ## Wait for all the instances to finish starting or stopping wait_timeout = time.time() + wait_timeout while wait and wait_timeout > time.time(): instance_dict_array = [] matched_instances = [] for res in ec2.get_all_instances(instance_ids): for i in res.instances: if i.state == state: instance_dict_array.append(get_instance_info(i)) matched_instances.append(i) if len(matched_instances) < len(instance_ids): time.sleep(5) else: break if wait and wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime()) return (changed, instance_dict_array, instance_ids) def restart_instances(module, ec2, instance_ids, state, instance_tags): """ Restarts a list of existing instances module: Ansible module object ec2: authenticated ec2 connection object instance_ids: The list of instances to start in the form of [ {id: <inst-id>}, ..] instance_tags: A dict of tag keys and values in the form of {key: value, ... } state: Intended state ("restarted") Returns a dictionary of instance information about the instances. If the instance was not able to change state, "changed" will be set to False. Wait will not apply here as this is a OS level operation. Note that if instance_ids and instance_tags are both non-empty, this method will process the intersection of the two. """ source_dest_check = module.params.get('source_dest_check') termination_protection = module.params.get('termination_protection') changed = False instance_dict_array = [] if not isinstance(instance_ids, list) or len(instance_ids) < 1: # Fail unless the user defined instance tags if not instance_tags: module.fail_json(msg='instance_ids should be a list of instances, aborting') # To make an EC2 tag filter, we need to prepend 'tag:' to each key. # An empty filter does no filtering, so it's safe to pass it to the # get_all_instances method even if the user did not specify instance_tags filters = {} if instance_tags: for key, value in instance_tags.items(): filters["tag:" + key] = value # Check that our instances are not in the state we want to take # Check (and eventually change) instances attributes and instances state for res in ec2.get_all_instances(instance_ids, filters=filters): for inst in res.instances: # Check "source_dest_check" attribute try: if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: inst.modify_attribute('sourceDestCheck', source_dest_check) changed = True except boto.exception.EC2ResponseError as exc: # instances with more than one Elastic Network Interface will # fail, because they have the sourceDestCheck attribute defined # per-interface if exc.code == 'InvalidInstanceID': for interface in inst.interfaces: if interface.source_dest_check != source_dest_check: ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check) changed = True else: module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc), exception=traceback.format_exc(exc)) # Check "termination_protection" attribute if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection and termination_protection is not None): inst.modify_attribute('disableApiTermination', termination_protection) changed = True # Check instance state if inst.state != state: instance_dict_array.append(get_instance_info(inst)) try: inst.reboot() except EC2ResponseError as e: module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) changed = True return (changed, instance_dict_array, instance_ids) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( key_name = dict(aliases = ['keypair']), id = dict(), group = dict(type='list', aliases=['groups']), group_id = dict(type='list'), zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), spot_price = dict(), spot_type = dict(default='one-time', choices=["one-time", "persistent"]), spot_launch_group = dict(), image = dict(), kernel = dict(), count = dict(type='int', default='1'), monitoring = dict(type='bool', default=False), ramdisk = dict(), wait = dict(type='bool', default=False), wait_timeout = dict(default=300), spot_wait_timeout = dict(default=600), placement_group = dict(), user_data = dict(), instance_tags = dict(type='dict'), vpc_subnet_id = dict(), assign_public_ip = dict(type='bool', default=False), private_ip = dict(), instance_profile_name = dict(), instance_ids = dict(type='list', aliases=['instance_id']), source_dest_check = dict(type='bool', default=True), termination_protection = dict(type='bool', default=None), state = dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']), instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']), exact_count = dict(type='int', default=None), count_tag = dict(), volumes = dict(type='list'), ebs_optimized = dict(type='bool', default=False), tenancy = dict(default='default'), network_interfaces = dict(type='list', aliases=['network_interface']) ) ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive = [ ['exact_count', 'count'], ['exact_count', 'state'], ['exact_count', 'instance_ids'], ['network_interfaces', 'assign_public_ip'], ['network_interfaces', 'group'], ['network_interfaces', 'group_id'], ['network_interfaces', 'private_ip'], ['network_interfaces', 'vpc_subnet_id'], ], ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') ec2 = ec2_connect(module) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if region: try: vpc = connect_to_aws(boto.vpc, region, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg = str(e)) else: vpc = None tagged_instances = [] state = module.params['state'] if state == 'absent': instance_ids = module.params['instance_ids'] if not instance_ids: module.fail_json(msg='instance_ids list is required for absent state') (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) elif state in ('running', 'stopped'): instance_ids = module.params.get('instance_ids') instance_tags = module.params.get('instance_tags') if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags) elif state in ('restarted'): instance_ids = module.params.get('instance_ids') instance_tags = module.params.get('instance_tags') if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) (changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags) elif state == 'present': # Changed is always set to true when provisioning new instances if not module.params.get('image'): module.fail_json(msg='image parameter is required for new instance') if module.params.get('exact_count') is None: (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc) else: (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc) module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * main()
gpl-3.0
edgardoh/darktable
src/common/grouping.h
1375
/* This file is part of darktable, copyright (c) 2011 tobias ellinghaus. darktable is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. darktable is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with darktable. If not, see <http://www.gnu.org/licenses/>. */ #pragma once /** add an image to a group */ void dt_grouping_add_to_group(int group_id, int image_id); /** remove an image from a group. returns the new group_id of the other images. */ int dt_grouping_remove_from_group(int image_id); /** make an image the representative of the group it is in. returns the new group_id. */ int dt_grouping_change_representative(int image_id); // modelines: These editor modelines have been set for all relevant files by tools/update_modelines.sh // vim: shiftwidth=2 expandtab tabstop=2 cindent // kate: tab-indents: off; indent-width 2; replace-tabs on; indent-mode cstyle; remove-trailing-spaces modified;
gpl-3.0
gguruss/mixerp
src/Libraries/Web API/Transactions/Tests/GetPurchaseTests.cs
720
// ReSharper disable All using System; using System.Diagnostics; using System.Linq; using MixERP.Net.Api.Transactions.Fakes; using MixERP.Net.ApplicationState.Cache; using Xunit; namespace MixERP.Net.Api.Transactions.Tests { public class GetPurchaseTests { public static GetPurchaseController Fixture() { GetPurchaseController controller = new GetPurchaseController(new GetPurchaseRepository(), "", new LoginView()); return controller; } [Fact] [Conditional("Debug")] public void Execute() { var actual = Fixture().Execute(new GetPurchaseController.Annotation()); Assert.Equal(1, actual); } } }
gpl-3.0
mixerp/mixerp
src/FrontEnd/Modules/Sales.Data/Transactions/GlTransaction.cs
4220
using System; using System.Collections.ObjectModel; using System.Globalization; using System.Linq; using MixERP.Net.Common; using MixERP.Net.Core.Modules.Sales.Data.Data; using MixERP.Net.DbFactory; using MixERP.Net.Entities.Core; using MixERP.Net.Entities.Transactions.Models; using Npgsql; namespace MixERP.Net.Core.Modules.Sales.Data.Transactions { internal static class GlTransaction { public static long Add(string catalog, string bookName, DateTime valueDate, int officeId, int userId, long loginId, int costCenterId, string referenceNumber, string statementReference, StockMaster stockMaster, Collection<StockDetail> details, Collection<Attachment> attachments, bool nonTaxable, Collection<long> tranIds) { if (stockMaster == null) { return 0; } if (details == null) { return 0; } if (details.Count.Equals(0)) { return 0; } string detail = StockMasterDetailHelper.CreateStockMasterDetailParameter(details); string attachment = AttachmentHelper.CreateAttachmentModelParameter(attachments); string ids = "NULL::bigint"; if (tranIds != null && tranIds.Count > 0) { ids = string.Join(",", tranIds); } string sql = string.Format(CultureInfo.InvariantCulture, "SELECT * FROM transactions.post_sales(@BookName::national character varying(48), @OfficeId::integer, @UserId::integer, @LoginId::bigint, @ValueDate::date, @CostCenterId::integer, @ReferenceNumber::national character varying(24), @StatementReference::text, @IsCredit::boolean, @PaymentTermId::integer, @PartyCode::national character varying(12), @PriceTypeId::integer, @SalespersonId::integer, @ShipperId::integer, @ShippingAddressCode::national character varying(12), @StoreId::integer, @NonTaxable::boolean, ARRAY[{0}], ARRAY[{1}], ARRAY[{2}])", detail, attachment, ids); using (NpgsqlCommand command = new NpgsqlCommand(sql)) { command.Parameters.AddWithValue("@BookName", bookName); command.Parameters.AddWithValue("@OfficeId", officeId); command.Parameters.AddWithValue("@UserId", userId); command.Parameters.AddWithValue("@LoginId", loginId); command.Parameters.AddWithValue("@ValueDate", valueDate); command.Parameters.AddWithValue("@CostCenterId", costCenterId); command.Parameters.AddWithValue("@ReferenceNumber", referenceNumber); command.Parameters.AddWithValue("@StatementReference", statementReference); command.Parameters.AddWithValue("@IsCredit", stockMaster.IsCredit); if (stockMaster.PaymentTermId.Equals(0)) { command.Parameters.AddWithValue("@PaymentTermId", DBNull.Value); } else { command.Parameters.AddWithValue("@PaymentTermId", stockMaster.PaymentTermId); } command.Parameters.AddWithValue("@PartyCode", stockMaster.PartyCode); command.Parameters.AddWithValue("@PriceTypeId", stockMaster.PriceTypeId); command.Parameters.AddWithValue("@SalespersonId", stockMaster.SalespersonId); command.Parameters.AddWithValue("@ShipperId", stockMaster.ShipperId); command.Parameters.AddWithValue("@ShippingAddressCode", stockMaster.ShippingAddressCode); command.Parameters.AddWithValue("@StoreId", stockMaster.StoreId); command.Parameters.AddWithValue("@NonTaxable", nonTaxable); command.Parameters.AddRange(StockMasterDetailHelper.AddStockMasterDetailParameter(details).ToArray()); command.Parameters.AddRange(AttachmentHelper.AddAttachmentParameter(attachments).ToArray()); long tranId = Conversion.TryCastLong(DbOperation.GetScalarValue(catalog, command)); return tranId; } } } }
gpl-3.0
lemio/w-esp
w-esp-node-red/red/runtime/nodes/flows/Flow.js
16004
/** * Copyright 2015 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. **/ var when = require("when"); var clone = require("clone"); var typeRegistry = require("../registry"); var Log = require("../../log"); var redUtil = require("../../util"); var flowUtil = require("./util"); function Flow(global,flow) { if (typeof flow === 'undefined') { flow = global; } var activeNodes = {}; var subflowInstanceNodes = {}; var catchNodeMap = {}; var statusNodeMap = {}; this.start = function(diff) { var node; var newNode; var id; catchNodeMap = {}; statusNodeMap = {}; for (id in flow.configs) { if (flow.configs.hasOwnProperty(id)) { node = flow.configs[id]; if (!activeNodes[id]) { newNode = createNode(node.type,node); if (newNode) { activeNodes[id] = newNode; } } } } if (diff && diff.rewired) { for (var j=0;j<diff.rewired.length;j++) { var rewireNode = activeNodes[diff.rewired[j]]; if (rewireNode) { rewireNode.updateWires(flow.nodes[rewireNode.id].wires); } } } for (id in flow.nodes) { if (flow.nodes.hasOwnProperty(id)) { node = flow.nodes[id]; if (!node.subflow) { if (!activeNodes[id]) { newNode = createNode(node.type,node); if (newNode) { activeNodes[id] = newNode; } } } else { if (!subflowInstanceNodes[id]) { try { var nodes = createSubflow(flow.subflows[node.subflow]||global.subflows[node.subflow],node,flow.subflows,global.subflows,activeNodes); subflowInstanceNodes[id] = nodes.map(function(n) { return n.id}); for (var i=0;i<nodes.length;i++) { if (nodes[i]) { activeNodes[nodes[i].id] = nodes[i]; } } } catch(err) { console.log(err.stack) } } } } } for (id in activeNodes) { if (activeNodes.hasOwnProperty(id)) { node = activeNodes[id]; if (node.type === "catch") { catchNodeMap[node.z] = catchNodeMap[node.z] || []; catchNodeMap[node.z].push(node); } else if (node.type === "status") { statusNodeMap[node.z] = statusNodeMap[node.z] || []; statusNodeMap[node.z].push(node); } } } } this.stop = function(stopList) { return when.promise(function(resolve) { var i; if (stopList) { for (i=0;i<stopList.length;i++) { if (subflowInstanceNodes[stopList[i]]) { // The first in the list is the instance node we already // know about stopList = stopList.concat(subflowInstanceNodes[stopList[i]].slice(1)) } } } else { stopList = Object.keys(activeNodes); } var promises = []; for (i=0;i<stopList.length;i++) { var node = activeNodes[stopList[i]]; if (node) { delete activeNodes[stopList[i]]; if (subflowInstanceNodes[stopList[i]]) { delete subflowInstanceNodes[stopList[i]]; } try { var p = node.close(); if (p) { promises.push(p); } } catch(err) { node.error(err); } } } when.settle(promises).then(function() { resolve(); }); }); } this.update = function(_global,_flow) { global = _global; flow = _flow; } this.getNode = function(id) { return activeNodes[id]; } this.getActiveNodes = function() { return activeNodes; } this.handleStatus = function(node,statusMessage) { var targetStatusNodes = null; var reportingNode = node; var handled = false; while(reportingNode && !handled) { targetStatusNodes = statusNodeMap[reportingNode.z]; if (targetStatusNodes) { targetStatusNodes.forEach(function(targetStatusNode) { if (targetStatusNode.scope && targetStatusNode.scope.indexOf(node.id) === -1) { return; } var message = { status: { text: "", source: { id: node.id, type: node.type, name: node.name } } }; if (statusMessage.text) { message.status.text = statusMessage.text; } targetStatusNode.receive(message); handled = true; }); } if (!handled) { reportingNode = activeNodes[reportingNode.z]; } } } this.handleError = function(node,logMessage,msg) { var count = 1; if (msg && msg.hasOwnProperty("error")) { if (msg.error.hasOwnProperty("source")) { if (msg.error.source.id === node.id) { count = msg.error.source.count+1; if (count === 10) { node.warn(Log._("nodes.flow.error-loop")); return; } } } } var targetCatchNodes = null; var throwingNode = node; var handled = false; while (throwingNode && !handled) { targetCatchNodes = catchNodeMap[throwingNode.z]; if (targetCatchNodes) { targetCatchNodes.forEach(function(targetCatchNode) { if (targetCatchNode.scope && targetCatchNode.scope.indexOf(throwingNode.id) === -1) { return; } var errorMessage; if (msg) { errorMessage = redUtil.cloneMessage(msg); } else { errorMessage = {}; } if (errorMessage.hasOwnProperty("error")) { errorMessage._error = errorMessage.error; } errorMessage.error = { message: logMessage.toString(), source: { id: node.id, type: node.type, name: node.name, count: count } }; targetCatchNode.receive(errorMessage); handled = true; }); } if (!handled) { throwingNode = activeNodes[throwingNode.z]; } } } } var EnvVarPropertyRE = /^\$\((\S+)\)$/; function mapEnvVarProperties(obj,prop) { if (Buffer.isBuffer(obj[prop])) { return; } else if (Array.isArray(obj[prop])) { for (var i=0;i<obj[prop].length;i++) { mapEnvVarProperties(obj[prop],i); } } else if (typeof obj[prop] === 'string') { var m; if ( (m = EnvVarPropertyRE.exec(obj[prop])) !== null) { if (process.env.hasOwnProperty(m[1])) { obj[prop] = process.env[m[1]]; } } } else { for (var p in obj[prop]) { if (obj[prop].hasOwnProperty) { mapEnvVarProperties(obj[prop],p); } } } } function createNode(type,config) { var nn = null; var nt = typeRegistry.get(type); if (nt) { var conf = clone(config); delete conf.credentials; for (var p in conf) { if (conf.hasOwnProperty(p)) { mapEnvVarProperties(conf,p); } } try { nn = new nt(conf); } catch (err) { Log.log({ level: Log.ERROR, id:conf.id, type: type, msg: err }); } } else { Log.error(Log._("nodes.flow.unknown-type", {type:type})); } return nn; } function createSubflow(sf,sfn,subflows,globalSubflows,activeNodes) { //console.log("CREATE SUBFLOW",sf.id,sfn.id); var nodes = []; var node_map = {}; var newNodes = []; var node; var wires; var i,j,k; var createNodeInSubflow = function(def) { node = clone(def); var nid = redUtil.generateId(); node_map[node.id] = node; node._alias = node.id; node.id = nid; node.z = sfn.id; newNodes.push(node); } // Clone all of the subflow node definitions and give them new IDs for (i in sf.configs) { if (sf.configs.hasOwnProperty(i)) { createNodeInSubflow(sf.configs[i]); } } // Clone all of the subflow node definitions and give them new IDs for (i in sf.nodes) { if (sf.nodes.hasOwnProperty(i)) { createNodeInSubflow(sf.nodes[i]); } } // Look for any catch/status nodes and update their scope ids // Update all subflow interior wiring to reflect new node IDs for (i=0;i<newNodes.length;i++) { node = newNodes[i]; if (node.wires) { var outputs = node.wires; for (j=0;j<outputs.length;j++) { wires = outputs[j]; for (k=0;k<wires.length;k++) { outputs[j][k] = node_map[outputs[j][k]].id } } if ((node.type === 'catch' || node.type === 'status') && node.scope) { node.scope = node.scope.map(function(id) { return node_map[id]?node_map[id].id:"" }) } else { for (var prop in node) { if (node.hasOwnProperty(prop) && prop !== '_alias') { if (node_map[node[prop]]) { //console.log("Mapped",node.type,node.id,prop,node_map[node[prop]].id); node[prop] = node_map[node[prop]].id; } } } } } } // Create a subflow node to accept inbound messages and route appropriately var Node = require("../Node"); var subflowInstance = { id: sfn.id, type: sfn.type, z: sfn.z, name: sfn.name, wires: [] } if (sf.in) { subflowInstance.wires = sf.in.map(function(n) { return n.wires.map(function(w) { return node_map[w.id].id;})}) subflowInstance._originalWires = clone(subflowInstance.wires); } var subflowNode = new Node(subflowInstance); subflowNode.on("input", function(msg) { this.send(msg);}); subflowNode._updateWires = subflowNode.updateWires; subflowNode.updateWires = function(newWires) { // Wire the subflow outputs if (sf.out) { var node,wires,i,j; // Restore the original wiring to the internal nodes subflowInstance.wires = clone(subflowInstance._originalWires); for (i=0;i<sf.out.length;i++) { wires = sf.out[i].wires; for (j=0;j<wires.length;j++) { if (wires[j].id != sf.id) { node = node_map[wires[j].id]; if (node._originalWires) { node.wires = clone(node._originalWires); } } } } var modifiedNodes = {}; var subflowInstanceModified = false; for (i=0;i<sf.out.length;i++) { wires = sf.out[i].wires; for (j=0;j<wires.length;j++) { if (wires[j].id === sf.id) { subflowInstance.wires[wires[j].port] = subflowInstance.wires[wires[j].port].concat(newWires[i]); subflowInstanceModified = true; } else { node = node_map[wires[j].id]; node.wires[wires[j].port] = node.wires[wires[j].port].concat(newWires[i]); modifiedNodes[node.id] = node; } } } Object.keys(modifiedNodes).forEach(function(id) { var node = modifiedNodes[id]; subflowNode.instanceNodes[id].updateWires(node.wires); }); if (subflowInstanceModified) { subflowNode._updateWires(subflowInstance.wires); } } } nodes.push(subflowNode); // Wire the subflow outputs if (sf.out) { var modifiedNodes = {}; for (i=0;i<sf.out.length;i++) { wires = sf.out[i].wires; for (j=0;j<wires.length;j++) { if (wires[j].id === sf.id) { // A subflow input wired straight to a subflow output subflowInstance.wires[wires[j].port] = subflowInstance.wires[wires[j].port].concat(sfn.wires[i]) subflowNode._updateWires(subflowInstance.wires); } else { node = node_map[wires[j].id]; modifiedNodes[node.id] = node; if (!node._originalWires) { node._originalWires = clone(node.wires); } node.wires[wires[j].port] = (node.wires[wires[j].port]||[]).concat(sfn.wires[i]); } } } } // Instantiate the nodes for (i=0;i<newNodes.length;i++) { node = newNodes[i]; var type = node.type; var m = /^subflow:(.+)$/.exec(type); if (!m) { var newNode = createNode(type,node); if (newNode) { activeNodes[node.id] = newNode; nodes.push(newNode); } } else { var subflowId = m[1]; nodes = nodes.concat(createSubflow(subflows[subflowId]||globalSubflows[subflowId],node,subflows,globalSubflows,activeNodes)); } } subflowNode.instanceNodes = {}; nodes.forEach(function(node) { subflowNode.instanceNodes[node.id] = node; }); return nodes; } module.exports = { create: function(global,conf) { return new Flow(global,conf); } }
gpl-3.0
gmuro/dolibarr
htdocs/includes/ckeditor/ckeditor/_source/plugins/removeformat/lang/en-gb.js
227
/* Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.plugins.setLang( 'removeformat', 'en-gb', { toolbar: 'Remove Format' } );
gpl-3.0
gmuro/dolibarr
htdocs/includes/ckeditor/ckeditor/_source/plugins/flash/lang/ku.js
1731
/* Copyright (c) 2003-2014, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.plugins.setLang( 'flash', 'ku', { access: 'دەستپێگەیشتنی نووسراو', accessAlways: 'هەمیشه', accessNever: 'هەرگیز', accessSameDomain: 'هەمان دۆمەین', alignAbsBottom: 'له ژێرەوه', alignAbsMiddle: 'لەناوەند', alignBaseline: 'هێڵەبنەڕەت', alignTextTop: 'دەق لەسەرەوه', bgcolor: 'ڕەنگی پاشبنەما', chkFull: 'ڕێپێدان بە پڕی شاشه', chkLoop: 'گرێ', chkMenu: 'چالاککردنی لیستەی فلاش', chkPlay: 'پێکردنی یان لێدانی خۆکار', flashvars: 'گۆڕاوەکان بۆ فلاش', hSpace: 'بۆشایی ئاسۆیی', properties: 'خاسیەتی فلاش', propertiesTab: 'خاسیەت', quality: 'جۆرایەتی', qualityAutoHigh: 'بەرزی خۆکار', qualityAutoLow: 'نزمی خۆکار', qualityBest: 'باشترین', qualityHigh: 'بەرزی', qualityLow: 'نزم', qualityMedium: 'مامناوەند', scale: 'پێوانه', scaleAll: 'نیشاندانی هەموو', scaleFit: 'بەوردی بگونجێت', scaleNoBorder: 'بێ پەراوێز', title: 'خاسیەتی فلاش', vSpace: 'بۆشایی ئەستونی', validateHSpace: 'بۆشایی ئاسۆیی دەبێت ژمارە بێت.', validateSrc: 'ناونیشانی بەستەر نابێت خاڵی بێت', validateVSpace: 'بۆشایی ئەستونی دەبێت ژماره بێت.', windowMode: 'شێوازی پەنجەره', windowModeOpaque: 'ناڕوون', windowModeTransparent: 'ڕۆشن', windowModeWindow: 'پەنجەره' } );
gpl-3.0
xushiwei/fibjs
vender/src/v8/src/gdb-jit.cc
63323
// Copyright 2010 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifdef ENABLE_GDB_JIT_INTERFACE #include "src/v8.h" #include "src/base/platform/platform.h" #include "src/bootstrapper.h" #include "src/compiler.h" #include "src/frames-inl.h" #include "src/frames.h" #include "src/gdb-jit.h" #include "src/global-handles.h" #include "src/messages.h" #include "src/natives.h" #include "src/ostreams.h" #include "src/scopes.h" namespace v8 { namespace internal { #ifdef __APPLE__ #define __MACH_O class MachO; class MachOSection; typedef MachO DebugObject; typedef MachOSection DebugSection; #else #define __ELF class ELF; class ELFSection; typedef ELF DebugObject; typedef ELFSection DebugSection; #endif class Writer BASE_EMBEDDED { public: explicit Writer(DebugObject* debug_object) : debug_object_(debug_object), position_(0), capacity_(1024), buffer_(reinterpret_cast<byte*>(malloc(capacity_))) { } ~Writer() { free(buffer_); } uintptr_t position() const { return position_; } template<typename T> class Slot { public: Slot(Writer* w, uintptr_t offset) : w_(w), offset_(offset) { } T* operator-> () { return w_->RawSlotAt<T>(offset_); } void set(const T& value) { *w_->RawSlotAt<T>(offset_) = value; } Slot<T> at(int i) { return Slot<T>(w_, offset_ + sizeof(T) * i); } private: Writer* w_; uintptr_t offset_; }; template<typename T> void Write(const T& val) { Ensure(position_ + sizeof(T)); *RawSlotAt<T>(position_) = val; position_ += sizeof(T); } template<typename T> Slot<T> SlotAt(uintptr_t offset) { Ensure(offset + sizeof(T)); return Slot<T>(this, offset); } template<typename T> Slot<T> CreateSlotHere() { return CreateSlotsHere<T>(1); } template<typename T> Slot<T> CreateSlotsHere(uint32_t count) { uintptr_t slot_position = position_; position_ += sizeof(T) * count; Ensure(position_); return SlotAt<T>(slot_position); } void Ensure(uintptr_t pos) { if (capacity_ < pos) { while (capacity_ < pos) capacity_ *= 2; buffer_ = reinterpret_cast<byte*>(realloc(buffer_, capacity_)); } } DebugObject* debug_object() { return debug_object_; } byte* buffer() { return buffer_; } void Align(uintptr_t align) { uintptr_t delta = position_ % align; if (delta == 0) return; uintptr_t padding = align - delta; Ensure(position_ += padding); DCHECK((position_ % align) == 0); } void WriteULEB128(uintptr_t value) { do { uint8_t byte = value & 0x7F; value >>= 7; if (value != 0) byte |= 0x80; Write<uint8_t>(byte); } while (value != 0); } void WriteSLEB128(intptr_t value) { bool more = true; while (more) { int8_t byte = value & 0x7F; bool byte_sign = byte & 0x40; value >>= 7; if ((value == 0 && !byte_sign) || (value == -1 && byte_sign)) { more = false; } else { byte |= 0x80; } Write<int8_t>(byte); } } void WriteString(const char* str) { do { Write<char>(*str); } while (*str++); } private: template<typename T> friend class Slot; template<typename T> T* RawSlotAt(uintptr_t offset) { DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_); return reinterpret_cast<T*>(&buffer_[offset]); } DebugObject* debug_object_; uintptr_t position_; uintptr_t capacity_; byte* buffer_; }; class ELFStringTable; template<typename THeader> class DebugSectionBase : public ZoneObject { public: virtual ~DebugSectionBase() { } virtual void WriteBody(Writer::Slot<THeader> header, Writer* writer) { uintptr_t start = writer->position(); if (WriteBodyInternal(writer)) { uintptr_t end = writer->position(); header->offset = start; #if defined(__MACH_O) header->addr = 0; #endif header->size = end - start; } } virtual bool WriteBodyInternal(Writer* writer) { return false; } typedef THeader Header; }; struct MachOSectionHeader { char sectname[16]; char segname[16]; #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 uint32_t addr; uint32_t size; #else uint64_t addr; uint64_t size; #endif uint32_t offset; uint32_t align; uint32_t reloff; uint32_t nreloc; uint32_t flags; uint32_t reserved1; uint32_t reserved2; }; class MachOSection : public DebugSectionBase<MachOSectionHeader> { public: enum Type { S_REGULAR = 0x0u, S_ATTR_COALESCED = 0xbu, S_ATTR_SOME_INSTRUCTIONS = 0x400u, S_ATTR_DEBUG = 0x02000000u, S_ATTR_PURE_INSTRUCTIONS = 0x80000000u }; MachOSection(const char* name, const char* segment, uintptr_t align, uint32_t flags) : name_(name), segment_(segment), align_(align), flags_(flags) { if (align_ != 0) { DCHECK(IsPowerOf2(align)); align_ = WhichPowerOf2(align_); } } virtual ~MachOSection() { } virtual void PopulateHeader(Writer::Slot<Header> header) { header->addr = 0; header->size = 0; header->offset = 0; header->align = align_; header->reloff = 0; header->nreloc = 0; header->flags = flags_; header->reserved1 = 0; header->reserved2 = 0; memset(header->sectname, 0, sizeof(header->sectname)); memset(header->segname, 0, sizeof(header->segname)); DCHECK(strlen(name_) < sizeof(header->sectname)); DCHECK(strlen(segment_) < sizeof(header->segname)); strncpy(header->sectname, name_, sizeof(header->sectname)); strncpy(header->segname, segment_, sizeof(header->segname)); } private: const char* name_; const char* segment_; uintptr_t align_; uint32_t flags_; }; struct ELFSectionHeader { uint32_t name; uint32_t type; uintptr_t flags; uintptr_t address; uintptr_t offset; uintptr_t size; uint32_t link; uint32_t info; uintptr_t alignment; uintptr_t entry_size; }; #if defined(__ELF) class ELFSection : public DebugSectionBase<ELFSectionHeader> { public: enum Type { TYPE_NULL = 0, TYPE_PROGBITS = 1, TYPE_SYMTAB = 2, TYPE_STRTAB = 3, TYPE_RELA = 4, TYPE_HASH = 5, TYPE_DYNAMIC = 6, TYPE_NOTE = 7, TYPE_NOBITS = 8, TYPE_REL = 9, TYPE_SHLIB = 10, TYPE_DYNSYM = 11, TYPE_LOPROC = 0x70000000, TYPE_X86_64_UNWIND = 0x70000001, TYPE_HIPROC = 0x7fffffff, TYPE_LOUSER = 0x80000000, TYPE_HIUSER = 0xffffffff }; enum Flags { FLAG_WRITE = 1, FLAG_ALLOC = 2, FLAG_EXEC = 4 }; enum SpecialIndexes { INDEX_ABSOLUTE = 0xfff1 }; ELFSection(const char* name, Type type, uintptr_t align) : name_(name), type_(type), align_(align) { } virtual ~ELFSection() { } void PopulateHeader(Writer::Slot<Header> header, ELFStringTable* strtab); virtual void WriteBody(Writer::Slot<Header> header, Writer* w) { uintptr_t start = w->position(); if (WriteBodyInternal(w)) { uintptr_t end = w->position(); header->offset = start; header->size = end - start; } } virtual bool WriteBodyInternal(Writer* w) { return false; } uint16_t index() const { return index_; } void set_index(uint16_t index) { index_ = index; } protected: virtual void PopulateHeader(Writer::Slot<Header> header) { header->flags = 0; header->address = 0; header->offset = 0; header->size = 0; header->link = 0; header->info = 0; header->entry_size = 0; } private: const char* name_; Type type_; uintptr_t align_; uint16_t index_; }; #endif // defined(__ELF) #if defined(__MACH_O) class MachOTextSection : public MachOSection { public: MachOTextSection(uintptr_t align, uintptr_t addr, uintptr_t size) : MachOSection("__text", "__TEXT", align, MachOSection::S_REGULAR | MachOSection::S_ATTR_SOME_INSTRUCTIONS | MachOSection::S_ATTR_PURE_INSTRUCTIONS), addr_(addr), size_(size) { } protected: virtual void PopulateHeader(Writer::Slot<Header> header) { MachOSection::PopulateHeader(header); header->addr = addr_; header->size = size_; } private: uintptr_t addr_; uintptr_t size_; }; #endif // defined(__MACH_O) #if defined(__ELF) class FullHeaderELFSection : public ELFSection { public: FullHeaderELFSection(const char* name, Type type, uintptr_t align, uintptr_t addr, uintptr_t offset, uintptr_t size, uintptr_t flags) : ELFSection(name, type, align), addr_(addr), offset_(offset), size_(size), flags_(flags) { } protected: virtual void PopulateHeader(Writer::Slot<Header> header) { ELFSection::PopulateHeader(header); header->address = addr_; header->offset = offset_; header->size = size_; header->flags = flags_; } private: uintptr_t addr_; uintptr_t offset_; uintptr_t size_; uintptr_t flags_; }; class ELFStringTable : public ELFSection { public: explicit ELFStringTable(const char* name) : ELFSection(name, TYPE_STRTAB, 1), writer_(NULL), offset_(0), size_(0) { } uintptr_t Add(const char* str) { if (*str == '\0') return 0; uintptr_t offset = size_; WriteString(str); return offset; } void AttachWriter(Writer* w) { writer_ = w; offset_ = writer_->position(); // First entry in the string table should be an empty string. WriteString(""); } void DetachWriter() { writer_ = NULL; } virtual void WriteBody(Writer::Slot<Header> header, Writer* w) { DCHECK(writer_ == NULL); header->offset = offset_; header->size = size_; } private: void WriteString(const char* str) { uintptr_t written = 0; do { writer_->Write(*str); written++; } while (*str++); size_ += written; } Writer* writer_; uintptr_t offset_; uintptr_t size_; }; void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header, ELFStringTable* strtab) { header->name = strtab->Add(name_); header->type = type_; header->alignment = align_; PopulateHeader(header); } #endif // defined(__ELF) #if defined(__MACH_O) class MachO BASE_EMBEDDED { public: explicit MachO(Zone* zone) : zone_(zone), sections_(6, zone) { } uint32_t AddSection(MachOSection* section) { sections_.Add(section, zone_); return sections_.length() - 1; } void Write(Writer* w, uintptr_t code_start, uintptr_t code_size) { Writer::Slot<MachOHeader> header = WriteHeader(w); uintptr_t load_command_start = w->position(); Writer::Slot<MachOSegmentCommand> cmd = WriteSegmentCommand(w, code_start, code_size); WriteSections(w, cmd, header, load_command_start); } private: struct MachOHeader { uint32_t magic; uint32_t cputype; uint32_t cpusubtype; uint32_t filetype; uint32_t ncmds; uint32_t sizeofcmds; uint32_t flags; #if V8_TARGET_ARCH_X64 uint32_t reserved; #endif }; struct MachOSegmentCommand { uint32_t cmd; uint32_t cmdsize; char segname[16]; #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 uint32_t vmaddr; uint32_t vmsize; uint32_t fileoff; uint32_t filesize; #else uint64_t vmaddr; uint64_t vmsize; uint64_t fileoff; uint64_t filesize; #endif uint32_t maxprot; uint32_t initprot; uint32_t nsects; uint32_t flags; }; enum MachOLoadCommandCmd { LC_SEGMENT_32 = 0x00000001u, LC_SEGMENT_64 = 0x00000019u }; Writer::Slot<MachOHeader> WriteHeader(Writer* w) { DCHECK(w->position() == 0); Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>(); #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 header->magic = 0xFEEDFACEu; header->cputype = 7; // i386 header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL #elif V8_TARGET_ARCH_X64 header->magic = 0xFEEDFACFu; header->cputype = 7 | 0x01000000; // i386 | 64-bit ABI header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL header->reserved = 0; #else #error Unsupported target architecture. #endif header->filetype = 0x1; // MH_OBJECT header->ncmds = 1; header->sizeofcmds = 0; header->flags = 0; return header; } Writer::Slot<MachOSegmentCommand> WriteSegmentCommand(Writer* w, uintptr_t code_start, uintptr_t code_size) { Writer::Slot<MachOSegmentCommand> cmd = w->CreateSlotHere<MachOSegmentCommand>(); #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 cmd->cmd = LC_SEGMENT_32; #else cmd->cmd = LC_SEGMENT_64; #endif cmd->vmaddr = code_start; cmd->vmsize = code_size; cmd->fileoff = 0; cmd->filesize = 0; cmd->maxprot = 7; cmd->initprot = 7; cmd->flags = 0; cmd->nsects = sections_.length(); memset(cmd->segname, 0, 16); cmd->cmdsize = sizeof(MachOSegmentCommand) + sizeof(MachOSection::Header) * cmd->nsects; return cmd; } void WriteSections(Writer* w, Writer::Slot<MachOSegmentCommand> cmd, Writer::Slot<MachOHeader> header, uintptr_t load_command_start) { Writer::Slot<MachOSection::Header> headers = w->CreateSlotsHere<MachOSection::Header>(sections_.length()); cmd->fileoff = w->position(); header->sizeofcmds = w->position() - load_command_start; for (int section = 0; section < sections_.length(); ++section) { sections_[section]->PopulateHeader(headers.at(section)); sections_[section]->WriteBody(headers.at(section), w); } cmd->filesize = w->position() - (uintptr_t)cmd->fileoff; } Zone* zone_; ZoneList<MachOSection*> sections_; }; #endif // defined(__MACH_O) #if defined(__ELF) class ELF BASE_EMBEDDED { public: explicit ELF(Zone* zone) : zone_(zone), sections_(6, zone) { sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone); sections_.Add(new(zone) ELFStringTable(".shstrtab"), zone); } void Write(Writer* w) { WriteHeader(w); WriteSectionTable(w); WriteSections(w); } ELFSection* SectionAt(uint32_t index) { return sections_[index]; } uint32_t AddSection(ELFSection* section) { sections_.Add(section, zone_); section->set_index(sections_.length() - 1); return sections_.length() - 1; } private: struct ELFHeader { uint8_t ident[16]; uint16_t type; uint16_t machine; uint32_t version; uintptr_t entry; uintptr_t pht_offset; uintptr_t sht_offset; uint32_t flags; uint16_t header_size; uint16_t pht_entry_size; uint16_t pht_entry_num; uint16_t sht_entry_size; uint16_t sht_entry_num; uint16_t sht_strtab_index; }; void WriteHeader(Writer* w) { DCHECK(w->position() == 0); Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>(); #if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \ (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT)) const uint8_t ident[16] = { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT const uint8_t ident[16] = { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #else #error Unsupported target architecture. #endif memcpy(header->ident, ident, 16); header->type = 1; #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 header->machine = 3; #elif V8_TARGET_ARCH_X64 // Processor identification value for x64 is 62 as defined in // System V ABI, AMD64 Supplement // http://www.x86-64.org/documentation/abi.pdf header->machine = 62; #elif V8_TARGET_ARCH_ARM // Set to EM_ARM, defined as 40, in "ARM ELF File Format" at // infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf header->machine = 40; #else #error Unsupported target architecture. #endif header->version = 1; header->entry = 0; header->pht_offset = 0; header->sht_offset = sizeof(ELFHeader); // Section table follows header. header->flags = 0; header->header_size = sizeof(ELFHeader); header->pht_entry_size = 0; header->pht_entry_num = 0; header->sht_entry_size = sizeof(ELFSection::Header); header->sht_entry_num = sections_.length(); header->sht_strtab_index = 1; } void WriteSectionTable(Writer* w) { // Section headers table immediately follows file header. DCHECK(w->position() == sizeof(ELFHeader)); Writer::Slot<ELFSection::Header> headers = w->CreateSlotsHere<ELFSection::Header>(sections_.length()); // String table for section table is the first section. ELFStringTable* strtab = static_cast<ELFStringTable*>(SectionAt(1)); strtab->AttachWriter(w); for (int i = 0, length = sections_.length(); i < length; i++) { sections_[i]->PopulateHeader(headers.at(i), strtab); } strtab->DetachWriter(); } int SectionHeaderPosition(uint32_t section_index) { return sizeof(ELFHeader) + sizeof(ELFSection::Header) * section_index; } void WriteSections(Writer* w) { Writer::Slot<ELFSection::Header> headers = w->SlotAt<ELFSection::Header>(sizeof(ELFHeader)); for (int i = 0, length = sections_.length(); i < length; i++) { sections_[i]->WriteBody(headers.at(i), w); } } Zone* zone_; ZoneList<ELFSection*> sections_; }; class ELFSymbol BASE_EMBEDDED { public: enum Type { TYPE_NOTYPE = 0, TYPE_OBJECT = 1, TYPE_FUNC = 2, TYPE_SECTION = 3, TYPE_FILE = 4, TYPE_LOPROC = 13, TYPE_HIPROC = 15 }; enum Binding { BIND_LOCAL = 0, BIND_GLOBAL = 1, BIND_WEAK = 2, BIND_LOPROC = 13, BIND_HIPROC = 15 }; ELFSymbol(const char* name, uintptr_t value, uintptr_t size, Binding binding, Type type, uint16_t section) : name(name), value(value), size(size), info((binding << 4) | type), other(0), section(section) { } Binding binding() const { return static_cast<Binding>(info >> 4); } #if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \ (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT)) struct SerializedLayout { SerializedLayout(uint32_t name, uintptr_t value, uintptr_t size, Binding binding, Type type, uint16_t section) : name(name), value(value), size(size), info((binding << 4) | type), other(0), section(section) { } uint32_t name; uintptr_t value; uintptr_t size; uint8_t info; uint8_t other; uint16_t section; }; #elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT struct SerializedLayout { SerializedLayout(uint32_t name, uintptr_t value, uintptr_t size, Binding binding, Type type, uint16_t section) : name(name), info((binding << 4) | type), other(0), section(section), value(value), size(size) { } uint32_t name; uint8_t info; uint8_t other; uint16_t section; uintptr_t value; uintptr_t size; }; #endif void Write(Writer::Slot<SerializedLayout> s, ELFStringTable* t) { // Convert symbol names from strings to indexes in the string table. s->name = t->Add(name); s->value = value; s->size = size; s->info = info; s->other = other; s->section = section; } private: const char* name; uintptr_t value; uintptr_t size; uint8_t info; uint8_t other; uint16_t section; }; class ELFSymbolTable : public ELFSection { public: ELFSymbolTable(const char* name, Zone* zone) : ELFSection(name, TYPE_SYMTAB, sizeof(uintptr_t)), locals_(1, zone), globals_(1, zone) { } virtual void WriteBody(Writer::Slot<Header> header, Writer* w) { w->Align(header->alignment); int total_symbols = locals_.length() + globals_.length() + 1; header->offset = w->position(); Writer::Slot<ELFSymbol::SerializedLayout> symbols = w->CreateSlotsHere<ELFSymbol::SerializedLayout>(total_symbols); header->size = w->position() - header->offset; // String table for this symbol table should follow it in the section table. ELFStringTable* strtab = static_cast<ELFStringTable*>(w->debug_object()->SectionAt(index() + 1)); strtab->AttachWriter(w); symbols.at(0).set(ELFSymbol::SerializedLayout(0, 0, 0, ELFSymbol::BIND_LOCAL, ELFSymbol::TYPE_NOTYPE, 0)); WriteSymbolsList(&locals_, symbols.at(1), strtab); WriteSymbolsList(&globals_, symbols.at(locals_.length() + 1), strtab); strtab->DetachWriter(); } void Add(const ELFSymbol& symbol, Zone* zone) { if (symbol.binding() == ELFSymbol::BIND_LOCAL) { locals_.Add(symbol, zone); } else { globals_.Add(symbol, zone); } } protected: virtual void PopulateHeader(Writer::Slot<Header> header) { ELFSection::PopulateHeader(header); // We are assuming that string table will follow symbol table. header->link = index() + 1; header->info = locals_.length() + 1; header->entry_size = sizeof(ELFSymbol::SerializedLayout); } private: void WriteSymbolsList(const ZoneList<ELFSymbol>* src, Writer::Slot<ELFSymbol::SerializedLayout> dst, ELFStringTable* strtab) { for (int i = 0, len = src->length(); i < len; i++) { src->at(i).Write(dst.at(i), strtab); } } ZoneList<ELFSymbol> locals_; ZoneList<ELFSymbol> globals_; }; #endif // defined(__ELF) class LineInfo : public Malloced { public: LineInfo() : pc_info_(10) {} void SetPosition(intptr_t pc, int pos, bool is_statement) { AddPCInfo(PCInfo(pc, pos, is_statement)); } struct PCInfo { PCInfo(intptr_t pc, int pos, bool is_statement) : pc_(pc), pos_(pos), is_statement_(is_statement) {} intptr_t pc_; int pos_; bool is_statement_; }; List<PCInfo>* pc_info() { return &pc_info_; } private: void AddPCInfo(const PCInfo& pc_info) { pc_info_.Add(pc_info); } List<PCInfo> pc_info_; }; class CodeDescription BASE_EMBEDDED { public: #if V8_TARGET_ARCH_X64 enum StackState { POST_RBP_PUSH, POST_RBP_SET, POST_RBP_POP, STACK_STATE_MAX }; #endif CodeDescription(const char* name, Code* code, Handle<Script> script, LineInfo* lineinfo, GDBJITInterface::CodeTag tag, CompilationInfo* info) : name_(name), code_(code), script_(script), lineinfo_(lineinfo), tag_(tag), info_(info) {} const char* name() const { return name_; } LineInfo* lineinfo() const { return lineinfo_; } GDBJITInterface::CodeTag tag() const { return tag_; } CompilationInfo* info() const { return info_; } bool IsInfoAvailable() const { return info_ != NULL; } uintptr_t CodeStart() const { return reinterpret_cast<uintptr_t>(code_->instruction_start()); } uintptr_t CodeEnd() const { return reinterpret_cast<uintptr_t>(code_->instruction_end()); } uintptr_t CodeSize() const { return CodeEnd() - CodeStart(); } bool IsLineInfoAvailable() { return !script_.is_null() && script_->source()->IsString() && script_->HasValidSource() && script_->name()->IsString() && lineinfo_ != NULL; } #if V8_TARGET_ARCH_X64 uintptr_t GetStackStateStartAddress(StackState state) const { DCHECK(state < STACK_STATE_MAX); return stack_state_start_addresses_[state]; } void SetStackStateStartAddress(StackState state, uintptr_t addr) { DCHECK(state < STACK_STATE_MAX); stack_state_start_addresses_[state] = addr; } #endif SmartArrayPointer<char> GetFilename() { return String::cast(script_->name())->ToCString(); } int GetScriptLineNumber(int pos) { return script_->GetLineNumber(pos) + 1; } private: const char* name_; Code* code_; Handle<Script> script_; LineInfo* lineinfo_; GDBJITInterface::CodeTag tag_; CompilationInfo* info_; #if V8_TARGET_ARCH_X64 uintptr_t stack_state_start_addresses_[STACK_STATE_MAX]; #endif }; #if defined(__ELF) static void CreateSymbolsTable(CodeDescription* desc, Zone* zone, ELF* elf, int text_section_index) { ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone); ELFStringTable* strtab = new(zone) ELFStringTable(".strtab"); // Symbol table should be followed by the linked string table. elf->AddSection(symtab); elf->AddSection(strtab); symtab->Add(ELFSymbol("V8 Code", 0, 0, ELFSymbol::BIND_LOCAL, ELFSymbol::TYPE_FILE, ELFSection::INDEX_ABSOLUTE), zone); symtab->Add(ELFSymbol(desc->name(), 0, desc->CodeSize(), ELFSymbol::BIND_GLOBAL, ELFSymbol::TYPE_FUNC, text_section_index), zone); } #endif // defined(__ELF) class DebugInfoSection : public DebugSection { public: explicit DebugInfoSection(CodeDescription* desc) #if defined(__ELF) : ELFSection(".debug_info", TYPE_PROGBITS, 1), #else : MachOSection("__debug_info", "__DWARF", 1, MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG), #endif desc_(desc) { } // DWARF2 standard enum DWARF2LocationOp { DW_OP_reg0 = 0x50, DW_OP_reg1 = 0x51, DW_OP_reg2 = 0x52, DW_OP_reg3 = 0x53, DW_OP_reg4 = 0x54, DW_OP_reg5 = 0x55, DW_OP_reg6 = 0x56, DW_OP_reg7 = 0x57, DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset }; enum DWARF2Encoding { DW_ATE_ADDRESS = 0x1, DW_ATE_SIGNED = 0x5 }; bool WriteBodyInternal(Writer* w) { uintptr_t cu_start = w->position(); Writer::Slot<uint32_t> size = w->CreateSlotHere<uint32_t>(); uintptr_t start = w->position(); w->Write<uint16_t>(2); // DWARF version. w->Write<uint32_t>(0); // Abbreviation table offset. w->Write<uint8_t>(sizeof(intptr_t)); w->WriteULEB128(1); // Abbreviation code. w->WriteString(desc_->GetFilename().get()); w->Write<intptr_t>(desc_->CodeStart()); w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize()); w->Write<uint32_t>(0); uint32_t ty_offset = static_cast<uint32_t>(w->position() - cu_start); w->WriteULEB128(3); w->Write<uint8_t>(kPointerSize); w->WriteString("v8value"); if (desc_->IsInfoAvailable()) { Scope* scope = desc_->info()->scope(); w->WriteULEB128(2); w->WriteString(desc_->name()); w->Write<intptr_t>(desc_->CodeStart()); w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize()); Writer::Slot<uint32_t> fb_block_size = w->CreateSlotHere<uint32_t>(); uintptr_t fb_block_start = w->position(); #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87 w->Write<uint8_t>(DW_OP_reg5); // The frame pointer's here on ia32 #elif V8_TARGET_ARCH_X64 w->Write<uint8_t>(DW_OP_reg6); // and here on x64. #elif V8_TARGET_ARCH_ARM UNIMPLEMENTED(); #elif V8_TARGET_ARCH_MIPS UNIMPLEMENTED(); #elif V8_TARGET_ARCH_MIPS64 UNIMPLEMENTED(); #else #error Unsupported target architecture. #endif fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start)); int params = scope->num_parameters(); int slots = scope->num_stack_slots(); int context_slots = scope->ContextLocalCount(); // The real slot ID is internal_slots + context_slot_id. int internal_slots = Context::MIN_CONTEXT_SLOTS; int locals = scope->StackLocalCount(); int current_abbreviation = 4; for (int param = 0; param < params; ++param) { w->WriteULEB128(current_abbreviation++); w->WriteString( scope->parameter(param)->name()->ToCString(DISALLOW_NULLS).get()); w->Write<uint32_t>(ty_offset); Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>(); uintptr_t block_start = w->position(); w->Write<uint8_t>(DW_OP_fbreg); w->WriteSLEB128( JavaScriptFrameConstants::kLastParameterOffset + kPointerSize * (params - param - 1)); block_size.set(static_cast<uint32_t>(w->position() - block_start)); } EmbeddedVector<char, 256> buffer; StringBuilder builder(buffer.start(), buffer.length()); for (int slot = 0; slot < slots; ++slot) { w->WriteULEB128(current_abbreviation++); builder.Reset(); builder.AddFormatted("slot%d", slot); w->WriteString(builder.Finalize()); } // See contexts.h for more information. DCHECK(Context::MIN_CONTEXT_SLOTS == 4); DCHECK(Context::CLOSURE_INDEX == 0); DCHECK(Context::PREVIOUS_INDEX == 1); DCHECK(Context::EXTENSION_INDEX == 2); DCHECK(Context::GLOBAL_OBJECT_INDEX == 3); w->WriteULEB128(current_abbreviation++); w->WriteString(".closure"); w->WriteULEB128(current_abbreviation++); w->WriteString(".previous"); w->WriteULEB128(current_abbreviation++); w->WriteString(".extension"); w->WriteULEB128(current_abbreviation++); w->WriteString(".global"); for (int context_slot = 0; context_slot < context_slots; ++context_slot) { w->WriteULEB128(current_abbreviation++); builder.Reset(); builder.AddFormatted("context_slot%d", context_slot + internal_slots); w->WriteString(builder.Finalize()); } ZoneList<Variable*> stack_locals(locals, scope->zone()); ZoneList<Variable*> context_locals(context_slots, scope->zone()); scope->CollectStackAndContextLocals(&stack_locals, &context_locals); for (int local = 0; local < locals; ++local) { w->WriteULEB128(current_abbreviation++); w->WriteString( stack_locals[local]->name()->ToCString(DISALLOW_NULLS).get()); w->Write<uint32_t>(ty_offset); Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>(); uintptr_t block_start = w->position(); w->Write<uint8_t>(DW_OP_fbreg); w->WriteSLEB128( JavaScriptFrameConstants::kLocal0Offset - kPointerSize * local); block_size.set(static_cast<uint32_t>(w->position() - block_start)); } { w->WriteULEB128(current_abbreviation++); w->WriteString("__function"); w->Write<uint32_t>(ty_offset); Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>(); uintptr_t block_start = w->position(); w->Write<uint8_t>(DW_OP_fbreg); w->WriteSLEB128(JavaScriptFrameConstants::kFunctionOffset); block_size.set(static_cast<uint32_t>(w->position() - block_start)); } { w->WriteULEB128(current_abbreviation++); w->WriteString("__context"); w->Write<uint32_t>(ty_offset); Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>(); uintptr_t block_start = w->position(); w->Write<uint8_t>(DW_OP_fbreg); w->WriteSLEB128(StandardFrameConstants::kContextOffset); block_size.set(static_cast<uint32_t>(w->position() - block_start)); } w->WriteULEB128(0); // Terminate the sub program. } w->WriteULEB128(0); // Terminate the compile unit. size.set(static_cast<uint32_t>(w->position() - start)); return true; } private: CodeDescription* desc_; }; class DebugAbbrevSection : public DebugSection { public: explicit DebugAbbrevSection(CodeDescription* desc) #ifdef __ELF : ELFSection(".debug_abbrev", TYPE_PROGBITS, 1), #else : MachOSection("__debug_abbrev", "__DWARF", 1, MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG), #endif desc_(desc) { } // DWARF2 standard, figure 14. enum DWARF2Tags { DW_TAG_FORMAL_PARAMETER = 0x05, DW_TAG_POINTER_TYPE = 0xf, DW_TAG_COMPILE_UNIT = 0x11, DW_TAG_STRUCTURE_TYPE = 0x13, DW_TAG_BASE_TYPE = 0x24, DW_TAG_SUBPROGRAM = 0x2e, DW_TAG_VARIABLE = 0x34 }; // DWARF2 standard, figure 16. enum DWARF2ChildrenDetermination { DW_CHILDREN_NO = 0, DW_CHILDREN_YES = 1 }; // DWARF standard, figure 17. enum DWARF2Attribute { DW_AT_LOCATION = 0x2, DW_AT_NAME = 0x3, DW_AT_BYTE_SIZE = 0xb, DW_AT_STMT_LIST = 0x10, DW_AT_LOW_PC = 0x11, DW_AT_HIGH_PC = 0x12, DW_AT_ENCODING = 0x3e, DW_AT_FRAME_BASE = 0x40, DW_AT_TYPE = 0x49 }; // DWARF2 standard, figure 19. enum DWARF2AttributeForm { DW_FORM_ADDR = 0x1, DW_FORM_BLOCK4 = 0x4, DW_FORM_STRING = 0x8, DW_FORM_DATA4 = 0x6, DW_FORM_BLOCK = 0x9, DW_FORM_DATA1 = 0xb, DW_FORM_FLAG = 0xc, DW_FORM_REF4 = 0x13 }; void WriteVariableAbbreviation(Writer* w, int abbreviation_code, bool has_value, bool is_parameter) { w->WriteULEB128(abbreviation_code); w->WriteULEB128(is_parameter ? DW_TAG_FORMAL_PARAMETER : DW_TAG_VARIABLE); w->Write<uint8_t>(DW_CHILDREN_NO); w->WriteULEB128(DW_AT_NAME); w->WriteULEB128(DW_FORM_STRING); if (has_value) { w->WriteULEB128(DW_AT_TYPE); w->WriteULEB128(DW_FORM_REF4); w->WriteULEB128(DW_AT_LOCATION); w->WriteULEB128(DW_FORM_BLOCK4); } w->WriteULEB128(0); w->WriteULEB128(0); } bool WriteBodyInternal(Writer* w) { int current_abbreviation = 1; bool extra_info = desc_->IsInfoAvailable(); DCHECK(desc_->IsLineInfoAvailable()); w->WriteULEB128(current_abbreviation++); w->WriteULEB128(DW_TAG_COMPILE_UNIT); w->Write<uint8_t>(extra_info ? DW_CHILDREN_YES : DW_CHILDREN_NO); w->WriteULEB128(DW_AT_NAME); w->WriteULEB128(DW_FORM_STRING); w->WriteULEB128(DW_AT_LOW_PC); w->WriteULEB128(DW_FORM_ADDR); w->WriteULEB128(DW_AT_HIGH_PC); w->WriteULEB128(DW_FORM_ADDR); w->WriteULEB128(DW_AT_STMT_LIST); w->WriteULEB128(DW_FORM_DATA4); w->WriteULEB128(0); w->WriteULEB128(0); if (extra_info) { Scope* scope = desc_->info()->scope(); int params = scope->num_parameters(); int slots = scope->num_stack_slots(); int context_slots = scope->ContextLocalCount(); // The real slot ID is internal_slots + context_slot_id. int internal_slots = Context::MIN_CONTEXT_SLOTS; int locals = scope->StackLocalCount(); // Total children is params + slots + context_slots + internal_slots + // locals + 2 (__function and __context). // The extra duplication below seems to be necessary to keep // gdb from getting upset on OSX. w->WriteULEB128(current_abbreviation++); // Abbreviation code. w->WriteULEB128(DW_TAG_SUBPROGRAM); w->Write<uint8_t>(DW_CHILDREN_YES); w->WriteULEB128(DW_AT_NAME); w->WriteULEB128(DW_FORM_STRING); w->WriteULEB128(DW_AT_LOW_PC); w->WriteULEB128(DW_FORM_ADDR); w->WriteULEB128(DW_AT_HIGH_PC); w->WriteULEB128(DW_FORM_ADDR); w->WriteULEB128(DW_AT_FRAME_BASE); w->WriteULEB128(DW_FORM_BLOCK4); w->WriteULEB128(0); w->WriteULEB128(0); w->WriteULEB128(current_abbreviation++); w->WriteULEB128(DW_TAG_STRUCTURE_TYPE); w->Write<uint8_t>(DW_CHILDREN_NO); w->WriteULEB128(DW_AT_BYTE_SIZE); w->WriteULEB128(DW_FORM_DATA1); w->WriteULEB128(DW_AT_NAME); w->WriteULEB128(DW_FORM_STRING); w->WriteULEB128(0); w->WriteULEB128(0); for (int param = 0; param < params; ++param) { WriteVariableAbbreviation(w, current_abbreviation++, true, true); } for (int slot = 0; slot < slots; ++slot) { WriteVariableAbbreviation(w, current_abbreviation++, false, false); } for (int internal_slot = 0; internal_slot < internal_slots; ++internal_slot) { WriteVariableAbbreviation(w, current_abbreviation++, false, false); } for (int context_slot = 0; context_slot < context_slots; ++context_slot) { WriteVariableAbbreviation(w, current_abbreviation++, false, false); } for (int local = 0; local < locals; ++local) { WriteVariableAbbreviation(w, current_abbreviation++, true, false); } // The function. WriteVariableAbbreviation(w, current_abbreviation++, true, false); // The context. WriteVariableAbbreviation(w, current_abbreviation++, true, false); w->WriteULEB128(0); // Terminate the sibling list. } w->WriteULEB128(0); // Terminate the table. return true; } private: CodeDescription* desc_; }; class DebugLineSection : public DebugSection { public: explicit DebugLineSection(CodeDescription* desc) #ifdef __ELF : ELFSection(".debug_line", TYPE_PROGBITS, 1), #else : MachOSection("__debug_line", "__DWARF", 1, MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG), #endif desc_(desc) { } // DWARF2 standard, figure 34. enum DWARF2Opcodes { DW_LNS_COPY = 1, DW_LNS_ADVANCE_PC = 2, DW_LNS_ADVANCE_LINE = 3, DW_LNS_SET_FILE = 4, DW_LNS_SET_COLUMN = 5, DW_LNS_NEGATE_STMT = 6 }; // DWARF2 standard, figure 35. enum DWARF2ExtendedOpcode { DW_LNE_END_SEQUENCE = 1, DW_LNE_SET_ADDRESS = 2, DW_LNE_DEFINE_FILE = 3 }; bool WriteBodyInternal(Writer* w) { // Write prologue. Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>(); uintptr_t start = w->position(); // Used for special opcodes const int8_t line_base = 1; const uint8_t line_range = 7; const int8_t max_line_incr = (line_base + line_range - 1); const uint8_t opcode_base = DW_LNS_NEGATE_STMT + 1; w->Write<uint16_t>(2); // Field version. Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>(); uintptr_t prologue_start = w->position(); w->Write<uint8_t>(1); // Field minimum_instruction_length. w->Write<uint8_t>(1); // Field default_is_stmt. w->Write<int8_t>(line_base); // Field line_base. w->Write<uint8_t>(line_range); // Field line_range. w->Write<uint8_t>(opcode_base); // Field opcode_base. w->Write<uint8_t>(0); // DW_LNS_COPY operands count. w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count. w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count. w->Write<uint8_t>(1); // DW_LNS_SET_FILE operands count. w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count. w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count. w->Write<uint8_t>(0); // Empty include_directories sequence. w->WriteString(desc_->GetFilename().get()); // File name. w->WriteULEB128(0); // Current directory. w->WriteULEB128(0); // Unknown modification time. w->WriteULEB128(0); // Unknown file size. w->Write<uint8_t>(0); prologue_length.set(static_cast<uint32_t>(w->position() - prologue_start)); WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t)); w->Write<intptr_t>(desc_->CodeStart()); w->Write<uint8_t>(DW_LNS_COPY); intptr_t pc = 0; intptr_t line = 1; bool is_statement = true; List<LineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info(); pc_info->Sort(&ComparePCInfo); int pc_info_length = pc_info->length(); for (int i = 0; i < pc_info_length; i++) { LineInfo::PCInfo* info = &pc_info->at(i); DCHECK(info->pc_ >= pc); // Reduce bloating in the debug line table by removing duplicate line // entries (per DWARF2 standard). intptr_t new_line = desc_->GetScriptLineNumber(info->pos_); if (new_line == line) { continue; } // Mark statement boundaries. For a better debugging experience, mark // the last pc address in the function as a statement (e.g. "}"), so that // a user can see the result of the last line executed in the function, // should control reach the end. if ((i+1) == pc_info_length) { if (!is_statement) { w->Write<uint8_t>(DW_LNS_NEGATE_STMT); } } else if (is_statement != info->is_statement_) { w->Write<uint8_t>(DW_LNS_NEGATE_STMT); is_statement = !is_statement; } // Generate special opcodes, if possible. This results in more compact // debug line tables. See the DWARF 2.0 standard to learn more about // special opcodes. uintptr_t pc_diff = info->pc_ - pc; intptr_t line_diff = new_line - line; // Compute special opcode (see DWARF 2.0 standard) intptr_t special_opcode = (line_diff - line_base) + (line_range * pc_diff) + opcode_base; // If special_opcode is less than or equal to 255, it can be used as a // special opcode. If line_diff is larger than the max line increment // allowed for a special opcode, or if line_diff is less than the minimum // line that can be added to the line register (i.e. line_base), then // special_opcode can't be used. if ((special_opcode >= opcode_base) && (special_opcode <= 255) && (line_diff <= max_line_incr) && (line_diff >= line_base)) { w->Write<uint8_t>(special_opcode); } else { w->Write<uint8_t>(DW_LNS_ADVANCE_PC); w->WriteSLEB128(pc_diff); w->Write<uint8_t>(DW_LNS_ADVANCE_LINE); w->WriteSLEB128(line_diff); w->Write<uint8_t>(DW_LNS_COPY); } // Increment the pc and line operands. pc += pc_diff; line += line_diff; } // Advance the pc to the end of the routine, since the end sequence opcode // requires this. w->Write<uint8_t>(DW_LNS_ADVANCE_PC); w->WriteSLEB128(desc_->CodeSize() - pc); WriteExtendedOpcode(w, DW_LNE_END_SEQUENCE, 0); total_length.set(static_cast<uint32_t>(w->position() - start)); return true; } private: void WriteExtendedOpcode(Writer* w, DWARF2ExtendedOpcode op, size_t operands_size) { w->Write<uint8_t>(0); w->WriteULEB128(operands_size + 1); w->Write<uint8_t>(op); } static int ComparePCInfo(const LineInfo::PCInfo* a, const LineInfo::PCInfo* b) { if (a->pc_ == b->pc_) { if (a->is_statement_ != b->is_statement_) { return b->is_statement_ ? +1 : -1; } return 0; } else if (a->pc_ > b->pc_) { return +1; } else { return -1; } } CodeDescription* desc_; }; #if V8_TARGET_ARCH_X64 class UnwindInfoSection : public DebugSection { public: explicit UnwindInfoSection(CodeDescription* desc); virtual bool WriteBodyInternal(Writer* w); int WriteCIE(Writer* w); void WriteFDE(Writer* w, int); void WriteFDEStateOnEntry(Writer* w); void WriteFDEStateAfterRBPPush(Writer* w); void WriteFDEStateAfterRBPSet(Writer* w); void WriteFDEStateAfterRBPPop(Writer* w); void WriteLength(Writer* w, Writer::Slot<uint32_t>* length_slot, int initial_position); private: CodeDescription* desc_; // DWARF3 Specification, Table 7.23 enum CFIInstructions { DW_CFA_ADVANCE_LOC = 0x40, DW_CFA_OFFSET = 0x80, DW_CFA_RESTORE = 0xC0, DW_CFA_NOP = 0x00, DW_CFA_SET_LOC = 0x01, DW_CFA_ADVANCE_LOC1 = 0x02, DW_CFA_ADVANCE_LOC2 = 0x03, DW_CFA_ADVANCE_LOC4 = 0x04, DW_CFA_OFFSET_EXTENDED = 0x05, DW_CFA_RESTORE_EXTENDED = 0x06, DW_CFA_UNDEFINED = 0x07, DW_CFA_SAME_VALUE = 0x08, DW_CFA_REGISTER = 0x09, DW_CFA_REMEMBER_STATE = 0x0A, DW_CFA_RESTORE_STATE = 0x0B, DW_CFA_DEF_CFA = 0x0C, DW_CFA_DEF_CFA_REGISTER = 0x0D, DW_CFA_DEF_CFA_OFFSET = 0x0E, DW_CFA_DEF_CFA_EXPRESSION = 0x0F, DW_CFA_EXPRESSION = 0x10, DW_CFA_OFFSET_EXTENDED_SF = 0x11, DW_CFA_DEF_CFA_SF = 0x12, DW_CFA_DEF_CFA_OFFSET_SF = 0x13, DW_CFA_VAL_OFFSET = 0x14, DW_CFA_VAL_OFFSET_SF = 0x15, DW_CFA_VAL_EXPRESSION = 0x16 }; // System V ABI, AMD64 Supplement, Version 0.99.5, Figure 3.36 enum RegisterMapping { // Only the relevant ones have been added to reduce clutter. AMD64_RBP = 6, AMD64_RSP = 7, AMD64_RA = 16 }; enum CFIConstants { CIE_ID = 0, CIE_VERSION = 1, CODE_ALIGN_FACTOR = 1, DATA_ALIGN_FACTOR = 1, RETURN_ADDRESS_REGISTER = AMD64_RA }; }; void UnwindInfoSection::WriteLength(Writer* w, Writer::Slot<uint32_t>* length_slot, int initial_position) { uint32_t align = (w->position() - initial_position) % kPointerSize; if (align != 0) { for (uint32_t i = 0; i < (kPointerSize - align); i++) { w->Write<uint8_t>(DW_CFA_NOP); } } DCHECK((w->position() - initial_position) % kPointerSize == 0); length_slot->set(w->position() - initial_position); } UnwindInfoSection::UnwindInfoSection(CodeDescription* desc) #ifdef __ELF : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1), #else : MachOSection("__eh_frame", "__TEXT", sizeof(uintptr_t), MachOSection::S_REGULAR), #endif desc_(desc) { } int UnwindInfoSection::WriteCIE(Writer* w) { Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>(); uint32_t cie_position = w->position(); // Write out the CIE header. Currently no 'common instructions' are // emitted onto the CIE; every FDE has its own set of instructions. w->Write<uint32_t>(CIE_ID); w->Write<uint8_t>(CIE_VERSION); w->Write<uint8_t>(0); // Null augmentation string. w->WriteSLEB128(CODE_ALIGN_FACTOR); w->WriteSLEB128(DATA_ALIGN_FACTOR); w->Write<uint8_t>(RETURN_ADDRESS_REGISTER); WriteLength(w, &cie_length_slot, cie_position); return cie_position; } void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) { // The only FDE for this function. The CFA is the current RBP. Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>(); int fde_position = w->position(); w->Write<int32_t>(fde_position - cie_position + 4); w->Write<uintptr_t>(desc_->CodeStart()); w->Write<uintptr_t>(desc_->CodeSize()); WriteFDEStateOnEntry(w); WriteFDEStateAfterRBPPush(w); WriteFDEStateAfterRBPSet(w); WriteFDEStateAfterRBPPop(w); WriteLength(w, &fde_length_slot, fde_position); } void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) { // The first state, just after the control has been transferred to the the // function. // RBP for this function will be the value of RSP after pushing the RBP // for the previous function. The previous RBP has not been pushed yet. w->Write<uint8_t>(DW_CFA_DEF_CFA_SF); w->WriteULEB128(AMD64_RSP); w->WriteSLEB128(-kPointerSize); // The RA is stored at location CFA + kCallerPCOffset. This is an invariant, // and hence omitted from the next states. w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED); w->WriteULEB128(AMD64_RA); w->WriteSLEB128(StandardFrameConstants::kCallerPCOffset); // The RBP of the previous function is still in RBP. w->Write<uint8_t>(DW_CFA_SAME_VALUE); w->WriteULEB128(AMD64_RBP); // Last location described by this entry. w->Write<uint8_t>(DW_CFA_SET_LOC); w->Write<uint64_t>( desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_PUSH)); } void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) { // The second state, just after RBP has been pushed. // RBP / CFA for this function is now the current RSP, so just set the // offset from the previous rule (from -8) to 0. w->Write<uint8_t>(DW_CFA_DEF_CFA_OFFSET); w->WriteULEB128(0); // The previous RBP is stored at CFA + kCallerFPOffset. This is an invariant // in this and the next state, and hence omitted in the next state. w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED); w->WriteULEB128(AMD64_RBP); w->WriteSLEB128(StandardFrameConstants::kCallerFPOffset); // Last location described by this entry. w->Write<uint8_t>(DW_CFA_SET_LOC); w->Write<uint64_t>( desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_SET)); } void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) { // The third state, after the RBP has been set. // The CFA can now directly be set to RBP. w->Write<uint8_t>(DW_CFA_DEF_CFA); w->WriteULEB128(AMD64_RBP); w->WriteULEB128(0); // Last location described by this entry. w->Write<uint8_t>(DW_CFA_SET_LOC); w->Write<uint64_t>( desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_POP)); } void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) { // The fourth (final) state. The RBP has been popped (just before issuing a // return). // The CFA can is now calculated in the same way as in the first state. w->Write<uint8_t>(DW_CFA_DEF_CFA_SF); w->WriteULEB128(AMD64_RSP); w->WriteSLEB128(-kPointerSize); // The RBP w->Write<uint8_t>(DW_CFA_OFFSET_EXTENDED); w->WriteULEB128(AMD64_RBP); w->WriteSLEB128(StandardFrameConstants::kCallerFPOffset); // Last location described by this entry. w->Write<uint8_t>(DW_CFA_SET_LOC); w->Write<uint64_t>(desc_->CodeEnd()); } bool UnwindInfoSection::WriteBodyInternal(Writer* w) { uint32_t cie_position = WriteCIE(w); WriteFDE(w, cie_position); return true; } #endif // V8_TARGET_ARCH_X64 static void CreateDWARFSections(CodeDescription* desc, Zone* zone, DebugObject* obj) { if (desc->IsLineInfoAvailable()) { obj->AddSection(new(zone) DebugInfoSection(desc)); obj->AddSection(new(zone) DebugAbbrevSection(desc)); obj->AddSection(new(zone) DebugLineSection(desc)); } #if V8_TARGET_ARCH_X64 obj->AddSection(new(zone) UnwindInfoSection(desc)); #endif } // ------------------------------------------------------------------- // Binary GDB JIT Interface as described in // http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html extern "C" { typedef enum { JIT_NOACTION = 0, JIT_REGISTER_FN, JIT_UNREGISTER_FN } JITAction; struct JITCodeEntry { JITCodeEntry* next_; JITCodeEntry* prev_; Address symfile_addr_; uint64_t symfile_size_; }; struct JITDescriptor { uint32_t version_; uint32_t action_flag_; JITCodeEntry* relevant_entry_; JITCodeEntry* first_entry_; }; // GDB will place breakpoint into this function. // To prevent GCC from inlining or removing it we place noinline attribute // and inline assembler statement inside. void __attribute__((noinline)) __jit_debug_register_code() { __asm__(""); } // GDB will inspect contents of this descriptor. // Static initialization is necessary to prevent GDB from seeing // uninitialized descriptor. JITDescriptor __jit_debug_descriptor = { 1, 0, 0, 0 }; #ifdef OBJECT_PRINT void __gdb_print_v8_object(Object* object) { OFStream os(stdout); object->Print(os); os << flush; } #endif } static JITCodeEntry* CreateCodeEntry(Address symfile_addr, uintptr_t symfile_size) { JITCodeEntry* entry = static_cast<JITCodeEntry*>( malloc(sizeof(JITCodeEntry) + symfile_size)); entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1); entry->symfile_size_ = symfile_size; MemCopy(entry->symfile_addr_, symfile_addr, symfile_size); entry->prev_ = entry->next_ = NULL; return entry; } static void DestroyCodeEntry(JITCodeEntry* entry) { free(entry); } static void RegisterCodeEntry(JITCodeEntry* entry, bool dump_if_enabled, const char* name_hint) { #if defined(DEBUG) && !V8_OS_WIN static int file_num = 0; if (FLAG_gdbjit_dump && dump_if_enabled) { static const int kMaxFileNameSize = 64; static const char* kElfFilePrefix = "/tmp/elfdump"; static const char* kObjFileExt = ".o"; char file_name[64]; SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%s%d%s", kElfFilePrefix, (name_hint != NULL) ? name_hint : "", file_num++, kObjFileExt); WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_); } #endif entry->next_ = __jit_debug_descriptor.first_entry_; if (entry->next_ != NULL) entry->next_->prev_ = entry; __jit_debug_descriptor.first_entry_ = __jit_debug_descriptor.relevant_entry_ = entry; __jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN; __jit_debug_register_code(); } static void UnregisterCodeEntry(JITCodeEntry* entry) { if (entry->prev_ != NULL) { entry->prev_->next_ = entry->next_; } else { __jit_debug_descriptor.first_entry_ = entry->next_; } if (entry->next_ != NULL) { entry->next_->prev_ = entry->prev_; } __jit_debug_descriptor.relevant_entry_ = entry; __jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN; __jit_debug_register_code(); } static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) { #ifdef __MACH_O Zone zone(isolate); MachO mach_o(&zone); Writer w(&mach_o); mach_o.AddSection(new(&zone) MachOTextSection(kCodeAlignment, desc->CodeStart(), desc->CodeSize())); CreateDWARFSections(desc, &zone, &mach_o); mach_o.Write(&w, desc->CodeStart(), desc->CodeSize()); #else Zone zone(isolate); ELF elf(&zone); Writer w(&elf); int text_section_index = elf.AddSection( new(&zone) FullHeaderELFSection( ".text", ELFSection::TYPE_NOBITS, kCodeAlignment, desc->CodeStart(), 0, desc->CodeSize(), ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC)); CreateSymbolsTable(desc, &zone, &elf, text_section_index); CreateDWARFSections(desc, &zone, &elf); elf.Write(&w); #endif return CreateCodeEntry(w.buffer(), w.position()); } static bool SameCodeObjects(void* key1, void* key2) { return key1 == key2; } static HashMap* GetEntries() { static HashMap* entries = NULL; if (entries == NULL) { entries = new HashMap(&SameCodeObjects); } return entries; } static uint32_t HashForCodeObject(Code* code) { static const uintptr_t kGoldenRatio = 2654435761u; uintptr_t hash = reinterpret_cast<uintptr_t>(code->address()); return static_cast<uint32_t>((hash >> kCodeAlignmentBits) * kGoldenRatio); } static const intptr_t kLineInfoTag = 0x1; static bool IsLineInfoTagged(void* ptr) { return 0 != (reinterpret_cast<intptr_t>(ptr) & kLineInfoTag); } static void* TagLineInfo(LineInfo* ptr) { return reinterpret_cast<void*>( reinterpret_cast<intptr_t>(ptr) | kLineInfoTag); } static LineInfo* UntagLineInfo(void* ptr) { return reinterpret_cast<LineInfo*>(reinterpret_cast<intptr_t>(ptr) & ~kLineInfoTag); } void GDBJITInterface::AddCode(Handle<Name> name, Handle<Script> script, Handle<Code> code, CompilationInfo* info) { if (!FLAG_gdbjit) return; Script::InitLineEnds(script); if (!name.is_null() && name->IsString()) { SmartArrayPointer<char> name_cstring = Handle<String>::cast(name)->ToCString(DISALLOW_NULLS); AddCode(name_cstring.get(), *code, GDBJITInterface::FUNCTION, *script, info); } else { AddCode("", *code, GDBJITInterface::FUNCTION, *script, info); } } static void AddUnwindInfo(CodeDescription* desc) { #if V8_TARGET_ARCH_X64 if (desc->tag() == GDBJITInterface::FUNCTION) { // To avoid propagating unwinding information through // compilation pipeline we use an approximation. // For most use cases this should not affect usability. static const int kFramePointerPushOffset = 1; static const int kFramePointerSetOffset = 4; static const int kFramePointerPopOffset = -3; uintptr_t frame_pointer_push_address = desc->CodeStart() + kFramePointerPushOffset; uintptr_t frame_pointer_set_address = desc->CodeStart() + kFramePointerSetOffset; uintptr_t frame_pointer_pop_address = desc->CodeEnd() + kFramePointerPopOffset; desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH, frame_pointer_push_address); desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET, frame_pointer_set_address); desc->SetStackStateStartAddress(CodeDescription::POST_RBP_POP, frame_pointer_pop_address); } else { desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH, desc->CodeStart()); desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET, desc->CodeStart()); desc->SetStackStateStartAddress(CodeDescription::POST_RBP_POP, desc->CodeEnd()); } #endif // V8_TARGET_ARCH_X64 } static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER; void GDBJITInterface::AddCode(const char* name, Code* code, GDBJITInterface::CodeTag tag, Script* script, CompilationInfo* info) { base::LockGuard<base::Mutex> lock_guard(mutex.Pointer()); DisallowHeapAllocation no_gc; HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true); if (e->value != NULL && !IsLineInfoTagged(e->value)) return; LineInfo* lineinfo = UntagLineInfo(e->value); CodeDescription code_desc(name, code, script != NULL ? Handle<Script>(script) : Handle<Script>(), lineinfo, tag, info); if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) { delete lineinfo; GetEntries()->Remove(code, HashForCodeObject(code)); return; } AddUnwindInfo(&code_desc); Isolate* isolate = code->GetIsolate(); JITCodeEntry* entry = CreateELFObject(&code_desc, isolate); DCHECK(!IsLineInfoTagged(entry)); delete lineinfo; e->value = entry; const char* name_hint = NULL; bool should_dump = false; if (FLAG_gdbjit_dump) { if (strlen(FLAG_gdbjit_dump_filter) == 0) { name_hint = name; should_dump = true; } else if (name != NULL) { name_hint = strstr(name, FLAG_gdbjit_dump_filter); should_dump = (name_hint != NULL); } } RegisterCodeEntry(entry, should_dump, name_hint); } void GDBJITInterface::RemoveCode(Code* code) { if (!FLAG_gdbjit) return; base::LockGuard<base::Mutex> lock_guard(mutex.Pointer()); HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), false); if (e == NULL) return; if (IsLineInfoTagged(e->value)) { delete UntagLineInfo(e->value); } else { JITCodeEntry* entry = static_cast<JITCodeEntry*>(e->value); UnregisterCodeEntry(entry); DestroyCodeEntry(entry); } e->value = NULL; GetEntries()->Remove(code, HashForCodeObject(code)); } void GDBJITInterface::RemoveCodeRange(Address start, Address end) { HashMap* entries = GetEntries(); Zone zone(Isolate::Current()); ZoneList<Code*> dead_codes(1, &zone); for (HashMap::Entry* e = entries->Start(); e != NULL; e = entries->Next(e)) { Code* code = reinterpret_cast<Code*>(e->key); if (code->address() >= start && code->address() < end) { dead_codes.Add(code, &zone); } } for (int i = 0; i < dead_codes.length(); i++) { RemoveCode(dead_codes.at(i)); } } static void RegisterDetailedLineInfo(Code* code, LineInfo* line_info) { base::LockGuard<base::Mutex> lock_guard(mutex.Pointer()); DCHECK(!IsLineInfoTagged(line_info)); HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true); DCHECK(e->value == NULL); e->value = TagLineInfo(line_info); } void GDBJITInterface::EventHandler(const v8::JitCodeEvent* event) { if (!FLAG_gdbjit) return; switch (event->type) { case v8::JitCodeEvent::CODE_ADDED: { Code* code = Code::GetCodeFromTargetAddress( reinterpret_cast<Address>(event->code_start)); if (code->kind() == Code::OPTIMIZED_FUNCTION || code->kind() == Code::FUNCTION) { break; } EmbeddedVector<char, 256> buffer; StringBuilder builder(buffer.start(), buffer.length()); builder.AddSubstring(event->name.str, static_cast<int>(event->name.len)); AddCode(builder.Finalize(), code, NON_FUNCTION, NULL, NULL); break; } case v8::JitCodeEvent::CODE_MOVED: break; case v8::JitCodeEvent::CODE_REMOVED: { Code* code = Code::GetCodeFromTargetAddress( reinterpret_cast<Address>(event->code_start)); RemoveCode(code); break; } case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: { LineInfo* line_info = reinterpret_cast<LineInfo*>(event->user_data); line_info->SetPosition(static_cast<intptr_t>(event->line_info.offset), static_cast<int>(event->line_info.pos), event->line_info.position_type == v8::JitCodeEvent::STATEMENT_POSITION); break; } case v8::JitCodeEvent::CODE_START_LINE_INFO_RECORDING: { v8::JitCodeEvent* mutable_event = const_cast<v8::JitCodeEvent*>(event); mutable_event->user_data = new LineInfo(); break; } case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: { LineInfo* line_info = reinterpret_cast<LineInfo*>(event->user_data); Code* code = Code::GetCodeFromTargetAddress( reinterpret_cast<Address>(event->code_start)); RegisterDetailedLineInfo(code, line_info); break; } } } } } // namespace v8::internal #endif
gpl-3.0
MaxiReglisse/georchestra
mapfishapp/src/main/webapp/lib/geoext.ux/ux/GeoNamesSearchCombo/tests/lib/GeoExt.ux/GeoNamesSearchCombo.html
1694
<!DOCTYPE html> <html debug="true"> <head> <script type="text/javascript" src="http://www.openlayers.org/api/2.9/OpenLayers.js"></script> <script type="text/javascript" src="http://extjs.cachefly.net/ext-3.2.0/adapter/ext/ext-base.js"></script> <script type="text/javascript" src="http://extjs.cachefly.net/ext-3.2.0/ext-all.js"></script> <script type="text/javascript" src="../../../../../../geoext/lib/GeoExt.js"></script> <script type="text/javascript" src="../../../lib/GeoExt.ux/GeoNamesSearchCombo.js"></script> <script type="text/javascript"> function test_ctor(t) { t.plan(5); // set up var c, map = {}; // test c = new GeoExt.ux.GeoNamesSearchCombo({ renderTo: "GeoNamesSearch", map: map }); t.ok(c instanceof GeoExt.ux.GeoNamesSearchCombo, "ctor creates a GeoExt.ux.GeoNamesSearchCombo object"); t.ok(c instanceof Ext.form.ComboBox, "ctor creates an Ext.form.ComboBox object"); t.ok(c.hasListener("select"), "ctor registers a \"select\" listener"); t.eq(c.map, map, "ctor sets map in instance"); c.destroy(); c = new GeoExt.ux.GeoNamesSearchCombo({ renderTo: "GeoNamesSearch", zoom: -1 }); t.ok(!c.hasListener("select"), "ctor does not register a \"select\" listener if the " + "zoom value is negative"); c.destroy(); } </script> <body> <div id="GeoNamesSearch"></div> </body> </html>
gpl-3.0
diocles/gnash
gui/pythonmod/gnash-test.c
1556
// gnash-window.c: Gtk canvas widget for gnash // // Copyright (C) 2009, 2010 Free Software Foundation, Inc. // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation; either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA // #include <gtk/gtk.h> #include "gnash-view.h" static void destroy( GtkWidget *widget, gpointer data ) { gtk_main_quit (); } int main( int argc, char *argv[] ) { GtkWidget *window; GtkWidget *view; gtk_init (&argc, &argv); window = gtk_window_new (GTK_WINDOW_TOPLEVEL); g_signal_connect (G_OBJECT (window), "destroy", G_CALLBACK (destroy), NULL); view = gnash_view_new (); gtk_container_add (GTK_CONTAINER (window), view); gtk_widget_show (view); gtk_widget_show (window); gnash_view_load_movie(GNASH_VIEW(view), argv[1]); gnash_view_start(GNASH_VIEW(view)); gtk_main (); return 0; }
gpl-3.0
mzemel/kpsu.org
vendor/gems/ruby/1.8/gems/activerecord-3.0.3/lib/active_record.rb
3626
#-- # Copyright (c) 2004-2010 David Heinemeier Hansson # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #++ activesupport_path = File.expand_path('../../../activesupport/lib', __FILE__) $:.unshift(activesupport_path) if File.directory?(activesupport_path) && !$:.include?(activesupport_path) activemodel_path = File.expand_path('../../../activemodel/lib', __FILE__) $:.unshift(activemodel_path) if File.directory?(activemodel_path) && !$:.include?(activemodel_path) require 'active_support' require 'active_support/i18n' require 'active_model' require 'arel' require 'active_record/version' module ActiveRecord extend ActiveSupport::Autoload eager_autoload do autoload :ActiveRecordError, 'active_record/errors' autoload :ConnectionNotEstablished, 'active_record/errors' autoload :Aggregations autoload :AssociationPreload autoload :Associations autoload :AttributeMethods autoload :AutosaveAssociation autoload :Relation autoload_under 'relation' do autoload :QueryMethods autoload :FinderMethods autoload :Calculations autoload :PredicateBuilder autoload :SpawnMethods autoload :Batches end autoload :Base autoload :Callbacks autoload :CounterCache autoload :DynamicFinderMatch autoload :DynamicScopeMatch autoload :Migration autoload :Migrator, 'active_record/migration' autoload :NamedScope autoload :NestedAttributes autoload :Observer autoload :Persistence autoload :QueryCache autoload :Reflection autoload :Schema autoload :SchemaDumper autoload :Serialization autoload :SessionStore autoload :Timestamp autoload :Transactions autoload :Validations end module AttributeMethods extend ActiveSupport::Autoload eager_autoload do autoload :BeforeTypeCast autoload :Dirty autoload :PrimaryKey autoload :Query autoload :Read autoload :TimeZoneConversion autoload :Write end end module Locking extend ActiveSupport::Autoload eager_autoload do autoload :Optimistic autoload :Pessimistic end end module ConnectionAdapters extend ActiveSupport::Autoload eager_autoload do autoload :AbstractAdapter autoload :ConnectionManagement, "active_record/connection_adapters/abstract/connection_pool" end end autoload :TestCase autoload :TestFixtures, 'active_record/fixtures' end ActiveSupport.on_load(:active_record) do Arel::Table.engine = self end I18n.load_path << File.dirname(__FILE__) + '/active_record/locale/en.yml'
gpl-3.0
IBobko/signal
src/org/thoughtcrime/securesms/push/TextSecureCommunicationFactory.java
1107
package org.thoughtcrime.securesms.push; import android.content.Context; import org.thoughtcrime.securesms.BuildConfig; import org.thoughtcrime.securesms.util.TextSecurePreferences; import org.whispersystems.signalservice.api.SignalServiceAccountManager; public class TextSecureCommunicationFactory { public static SignalServiceAccountManager createManager(Context context) { return new SignalServiceAccountManager(BuildConfig.TEXTSECURE_URL, new TextSecurePushTrustStore(context), TextSecurePreferences.getLocalNumber(context), TextSecurePreferences.getPushServerPassword(context), BuildConfig.USER_AGENT); } public static SignalServiceAccountManager createManager(Context context, String number, String password) { return new SignalServiceAccountManager(BuildConfig.TEXTSECURE_URL, new TextSecurePushTrustStore(context), number, password, BuildConfig.USER_AGENT); } }
gpl-3.0
darkwing/debugger.html
test/mochitest/examples/sourcemapped/builds/webpack4-babel7/index.js
2374
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at <http://mozilla.org/MPL/2.0/>. */ const path = require("path"); const util = require("util"); const _ = require("lodash"); const webpack = require("webpack"); const TARGET_NAME = "webpack4-babel7"; module.exports = exports = async function(tests, dirname) { const fixtures = []; for (const [name, input] of tests) { if (/typescript-/.test(name)) { continue; } const testFnName = _.camelCase(`${TARGET_NAME}-${name}`); const evalMaps = name.match(/-eval/); const babelEnv = !name.match(/-es6/); const babelModules = name.match(/-cjs/); console.log(`Building ${TARGET_NAME} test ${name}`); const scriptPath = path.join(dirname, "output", TARGET_NAME, `${name}.js`); const result = await util.promisify(webpack)({ mode: "development", context: path.dirname(input), entry: `./${path.basename(input)}`, output: { path: path.dirname(scriptPath), filename: path.basename(scriptPath), devtoolModuleFilenameTemplate: `${TARGET_NAME}://./${name}/[resource-path]`, libraryTarget: "var", library: testFnName, libraryExport: "default" }, devtool: evalMaps ? "eval-source-map" : "source-map", module: { rules: [ { test: /\.js$/, exclude: /node_modules/, loader: require.resolve("babel-loader"), options: { babelrc: false, plugins: [ require.resolve("@babel/plugin-proposal-class-properties") ], presets: [ require.resolve("@babel/preset-flow"), babelEnv ? [ require.resolve("@babel/preset-env"), { modules: babelModules ? "commonjs" : false } ] : null ].filter(Boolean) } } ].filter(Boolean) } }); fixtures.push({ name, testFnName: testFnName, scriptPath, assets: [scriptPath, evalMaps ? null : `${scriptPath}.map`].filter( Boolean ) }); } return { target: TARGET_NAME, fixtures }; };
mpl-2.0
madd15/snipe-it
resources/lang/nl/admin/depreciations/general.php
528
<?php return array( 'about_asset_depreciations' => 'Over afschrijving van materiaal', 'about_depreciations' => 'U kan de materiaalafschrijving instellen om materiaal af te schrijven op basis van lineaire afschrijving.', 'asset_depreciations' => 'Materiaalafschrijvingen', 'create' => 'Afschrijving aanmaken', 'depreciation_name' => 'Afschrijvingsnaam', 'number_of_months' => 'Aantal maanden', 'update' => 'Afschrijving bijwerken', );
agpl-3.0
GiantSteps/essentia
src/examples/python/streaming_extractor/level.py
4852
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ #! /usr/bin/env python import sys, os import essentia, essentia.standard, essentia.streaming from essentia.streaming import * from numpy import argmax, log10, mean, tanh dynamicFrameSize = 88200 dynamicHopSize = 44100 analysisSampleRate = 44100.0 # expects the audio source to already be equal-loudness filtered class LevelExtractor(essentia.streaming.CompositeBase): #"""describes the dynamics of an audio signal""" def __init__(self, frameSize=dynamicFrameSize, hopSize=dynamicHopSize): super(LevelExtractor, self).__init__() fc = FrameCutter(frameSize=frameSize, hopSize=hopSize, startFromZero=True, silentFrames='noise') dy = Loudness() fc.frame >> dy.signal # define inputs: self.inputs['signal'] = fc.signal # define outputs: self.outputs['loudness'] = dy.loudness def squeezeRange(x, x1, x2): return 0.5 + 0.5 * tanh(-1.0 + 2.0 * (x - x1) / (x2 - x1)) def levelAverage(pool, namespace=''): epsilon = 1e-4 threshold = 1e-4 # -80dB if namespace: namespace += '.lowlevel.' else: namespace = 'lowlevel.' loudness = pool[namespace + 'loudness'] pool.remove(namespace + 'loudness') maxValue = loudness[argmax(loudness)] if maxValue <= epsilon: maxValue = epsilon # normalization of the maximum: def f(x): x /= float(maxValue) if x <= threshold : return threshold return x loudness = map(f, loudness) # average level: levelAverage = 10.0*log10(mean(loudness)) # Re-scaling and range-control # This yields in numbers between # 0 for signals with large dynamic variace and thus low dynamic average # 1 for signal with little dynamic range and thus # a dynamic average close to the maximum x1 = -5.0 x2 = -2.0 levelAverageSqueezed = squeezeRange(levelAverage, x1, x2) pool.set(namespace + 'average_loudness', levelAverageSqueezed) usage = 'level.py [options] <inputfilename> <outputfilename>' def parse_args(): import numpy essentia_version = '%s\n'\ 'python version: %s\n'\ 'numpy version: %s' % (essentia.__version__, # full version sys.version.split()[0], # python major version numpy.__version__) # numpy version from optparse import OptionParser parser = OptionParser(usage=usage, version=essentia_version) parser.add_option("-c","--cpp", action="store_true", dest="generate_cpp", help="generate cpp code from CompositeBase algorithm") parser.add_option("-d", "--dot", action="store_true", dest="generate_dot", help="generate dot and cpp code from CompositeBase algorithm") (options, args) = parser.parse_args() return options, args if __name__ == '__main__': opts, args = parse_args() if len(args) != 2: cmd = './'+os.path.basename(sys.argv[0])+ ' -h' os.system(cmd) sys.exit(1) if opts.generate_dot: essentia.translate(LevelExtractor, 'streaming_extractorlevel', dot_graph=True) elif opts.generate_cpp: essentia.translate(LevelExtractor, 'streaming_extractorlevel', dot_graph=False) # find out replay gain: loader = EqloudLoader(filename=args[0], sampleRate=analysisSampleRate, downmix='mix') rgain = ReplayGain(applyEqloud=False) pool = essentia.Pool() loader.audio >> rgain.signal rgain.replayGain >> (pool, 'replay_gain') essentia.run(loader) # get average level: loader = EqloudLoader(filename=args[0], replayGain=pool['replay_gain'], sampleRate=analysisSampleRate, downmix='mix') levelExtractor = LevelExtractor() loader.audio >> levelExtractor.signal levelExtractor.loudness >> (pool, 'lowlevel.loudness') essentia.run(loader) levelAverage(pool) essentia.standard.YamlOutput(filename=args[1])(pool)
agpl-3.0
sanjupolus/kc-coeus-1508.3
coeus-db/coeus-db-sql/src/main/resources/co/kuali/coeus/data/migration/sql/oracle/kc/bootstrap/V300_213__KC_DML_01_TRAINING.sql
2489
-- -- Kuali Coeus, a comprehensive research administration system for higher education. -- -- Copyright 2005-2015 Kuali, Inc. -- -- This program is free software: you can redistribute it and/or modify -- it under the terms of the GNU Affero General Public License as -- published by the Free Software Foundation, either version 3 of the -- License, or (at your option) any later version. -- -- This program is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -- GNU Affero General Public License for more details. -- -- You should have received a copy of the GNU Affero General Public License -- along with this program. If not, see <http://www.gnu.org/licenses/>. -- TRUNCATE TABLE TRAINING DROP STORAGE; INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (1,'Rochester Training Booklet - Human Subjects','admin',SYSDATE,SYS_GUID(),1); INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (2,'MIT Human Subjects Training via Traincasters','admin',SYSDATE,SYS_GUID(),1); INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (3,'NIH Training - Human Subjects','admin',SYSDATE,SYS_GUID(),1); INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (4,'Johns Hopkins - Human Subjects','admin',SYSDATE,SYS_GUID(),1); INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (5,'Massachusetts General Hospital - Human Subjects','admin',SYSDATE,SYS_GUID(),1); INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (6,'Stanford - Human Subjects','admin',SYSDATE,SYS_GUID(),1); INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (7,'Beth Israel Deaconess Medical Center - Human Subjects','admin',SYSDATE,SYS_GUID(),1); INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (8,'Massachusetts Eye & Ear Infirmary - Human Subjects','admin',SYSDATE,SYS_GUID(),1); INSERT INTO TRAINING (TRAINING_CODE,DESCRIPTION,UPDATE_USER,UPDATE_TIMESTAMP,OBJ_ID,VER_NBR) VALUES (9,'University of Miami CITI Ethics Training program','admin',SYSDATE,SYS_GUID(),1);
agpl-3.0
SerCom-KC/mastodon
config/application.rb
2782
require_relative 'boot' require 'rails/all' # Require the gems listed in Gemfile, including any gems # you've limited to :test, :development, or :production. Bundler.require(*Rails.groups) require_relative '../app/lib/exceptions' require_relative '../lib/paperclip/lazy_thumbnail' require_relative '../lib/paperclip/gif_transcoder' require_relative '../lib/paperclip/video_transcoder' require_relative '../lib/mastodon/snowflake' require_relative '../lib/mastodon/version' require_relative '../lib/devise/ldap_authenticatable' Dotenv::Railtie.load Bundler.require(:pam_authentication) if ENV['PAM_ENABLED'] == 'true' require_relative '../lib/mastodon/redis_config' module Mastodon class Application < Rails::Application # Initialize configuration defaults for originally generated Rails version. config.load_defaults 5.2 # Settings in config/environments/* take precedence over those specified here. # Application configuration should go into files in config/initializers # -- all .rb files in that directory are automatically loaded. # Set Time.zone default to the specified zone and make Active Record auto-convert to this zone. # Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC. # config.time_zone = 'Central Time (US & Canada)' # All translations from config/locales/*.rb,yml are auto loaded. # config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s] config.i18n.available_locales = [ :en, :ar, :ast, :bg, :ca, :co, :cs, :cy, :da, :de, :el, :eo, :es, :eu, :fa, :fi, :fr, :gl, :he, :hr, :hu, :hy, :id, :io, :it, :ja, :ka, :ko, :nl, :no, :oc, :pl, :pt, :'pt-BR', :ro, :ru, :sk, :sl, :sr, :'sr-Latn', :sv, :ta, :te, :th, :tr, :uk, :'zh-CN', :'zh-HK', :'zh-TW', ] config.i18n.default_locale = ENV['DEFAULT_LOCALE']&.to_sym unless config.i18n.available_locales.include?(config.i18n.default_locale) config.i18n.default_locale = :en end # config.paths.add File.join('app', 'api'), glob: File.join('**', '*.rb') # config.autoload_paths += Dir[Rails.root.join('app', 'api', '*')] config.active_job.queue_adapter = :sidekiq config.middleware.use Rack::Attack config.middleware.use Rack::Deflater config.to_prepare do Doorkeeper::AuthorizationsController.layout 'modal' Doorkeeper::AuthorizedApplicationsController.layout 'admin' Doorkeeper::Application.send :include, ApplicationExtension end end end
agpl-3.0
fionakim/biojava
biojava-structure/src/main/java/org/biojava/nbio/structure/io/mmcif/model/PdbxNonPolyScheme.java
2607
/* * BioJava development code * * This code may be freely distributed and modified under the * terms of the GNU Lesser General Public Licence. This should * be distributed with the code. If you do not have a copy, * see: * * http://www.gnu.org/copyleft/lesser.html * * Copyright for this code is held jointly by the individual * authors. These should be listed in @author doc comments. * * For more information on the BioJava project and its aims, * or to join the biojava-l mailing list, visit the home page * at: * * http://www.biojava.org/ * */ package org.biojava.nbio.structure.io.mmcif.model; /** A bean for the PDBX_NONPOLY_SCHEME category, which provides residue level nomenclature * mapping for non-polymer entities. * @author Andreas Prlic * @since 1.7 */ public class PdbxNonPolyScheme { String asym_id; String entity_id; String seq_id; String mon_id; String ndb_seq_num; String pdb_seq_num ; String auth_seq_num ; String pdb_mon_id; String auth_mon_id; String pdb_strand_id; String pdb_ins_code; public String getAsym_id() { return asym_id; } public void setAsym_id(String asym_id) { this.asym_id = asym_id; } public String getEntity_id() { return entity_id; } public void setEntity_id(String entity_id) { this.entity_id = entity_id; } public String getSeq_id() { return seq_id; } public void setSeq_id(String seq_id) { this.seq_id = seq_id; } public String getMon_id() { return mon_id; } public void setMon_id(String mon_id) { this.mon_id = mon_id; } public String getNdb_seq_num() { return ndb_seq_num; } public void setNdb_seq_num(String ndb_seq_num) { this.ndb_seq_num = ndb_seq_num; } public String getPdb_seq_num() { return pdb_seq_num; } public void setPdb_seq_num(String pdb_seq_num) { this.pdb_seq_num = pdb_seq_num; } public String getAuth_seq_num() { return auth_seq_num; } public void setAuth_seq_num(String auth_seq_num) { this.auth_seq_num = auth_seq_num; } public String getPdb_mon_id() { return pdb_mon_id; } public void setPdb_mon_id(String pdb_mon_id) { this.pdb_mon_id = pdb_mon_id; } public String getAuth_mon_id() { return auth_mon_id; } public void setAuth_mon_id(String auth_mon_id) { this.auth_mon_id = auth_mon_id; } public String getPdb_strand_id() { return pdb_strand_id; } public void setPdb_strand_id(String pdb_strand_id) { this.pdb_strand_id = pdb_strand_id; } public String getPdb_ins_code() { return pdb_ins_code; } public void setPdb_ins_code(String pdb_ins_code) { this.pdb_ins_code = pdb_ins_code; } }
lgpl-2.1
jpakkane/gstreamer-plugins-good
ext/jpeg/gstjpegdec.c
41389
/* GStreamer * Copyright (C) <1999> Erik Walthinsen <[email protected]> * Copyright (C) <2009> Tim-Philipp Müller <tim centricular net> * Copyright (C) 2012 Collabora Ltd. * Author : Edward Hervey <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, * Boston, MA 02110-1301, USA. */ /** * SECTION:element-jpegdec * * Decodes jpeg images. * * <refsect2> * <title>Example launch line</title> * |[ * gst-launch-1.0 -v filesrc location=mjpeg.avi ! avidemux ! queue ! jpegdec ! videoconvert ! videoscale ! autovideosink * ]| The above pipeline decode the mjpeg stream and renders it to the screen. * </refsect2> */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <string.h> #include "gstjpegdec.h" #include "gstjpeg.h" #include <gst/video/video.h> #include <gst/video/gstvideometa.h> #include <gst/video/gstvideopool.h> #include "gst/gst-i18n-plugin.h" #include <jerror.h> #define MIN_WIDTH 1 #define MAX_WIDTH 65535 #define MIN_HEIGHT 1 #define MAX_HEIGHT 65535 #define CINFO_GET_JPEGDEC(cinfo_ptr) \ (((struct GstJpegDecSourceMgr*)((cinfo_ptr)->src))->dec) #define JPEG_DEFAULT_IDCT_METHOD JDCT_FASTEST #define JPEG_DEFAULT_MAX_ERRORS 0 enum { PROP_0, PROP_IDCT_METHOD, PROP_MAX_ERRORS }; /* *INDENT-OFF* */ static GstStaticPadTemplate gst_jpeg_dec_src_pad_template = GST_STATIC_PAD_TEMPLATE ("src", GST_PAD_SRC, GST_PAD_ALWAYS, GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE ("{ I420, RGB, BGR, RGBx, xRGB, BGRx, xBGR, GRAY8 }")) ); /* *INDENT-ON* */ /* FIXME: sof-marker is for IJG libjpeg 8, should be different for 6.2 */ /* FIXME: add back "sof-marker = (int) { 0, 1, 2, 5, 6, 7, 9, 10, 13, 14 }" * once we have a parser and/or demuxer set caps properly */ static GstStaticPadTemplate gst_jpeg_dec_sink_pad_template = GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, GST_STATIC_CAPS ("image/jpeg") ); GST_DEBUG_CATEGORY_STATIC (jpeg_dec_debug); #define GST_CAT_DEFAULT jpeg_dec_debug GST_DEBUG_CATEGORY_STATIC (GST_CAT_PERFORMANCE); static void gst_jpeg_dec_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec); static void gst_jpeg_dec_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec); static gboolean gst_jpeg_dec_set_format (GstVideoDecoder * dec, GstVideoCodecState * state); static gboolean gst_jpeg_dec_start (GstVideoDecoder * bdec); static gboolean gst_jpeg_dec_stop (GstVideoDecoder * bdec); static gboolean gst_jpeg_dec_flush (GstVideoDecoder * bdec); static GstFlowReturn gst_jpeg_dec_parse (GstVideoDecoder * bdec, GstVideoCodecFrame * frame, GstAdapter * adapter, gboolean at_eos); static GstFlowReturn gst_jpeg_dec_handle_frame (GstVideoDecoder * bdec, GstVideoCodecFrame * frame); static gboolean gst_jpeg_dec_decide_allocation (GstVideoDecoder * bdec, GstQuery * query); #define gst_jpeg_dec_parent_class parent_class G_DEFINE_TYPE (GstJpegDec, gst_jpeg_dec, GST_TYPE_VIDEO_DECODER); static void gst_jpeg_dec_finalize (GObject * object) { GstJpegDec *dec = GST_JPEG_DEC (object); jpeg_destroy_decompress (&dec->cinfo); if (dec->input_state) gst_video_codec_state_unref (dec->input_state); G_OBJECT_CLASS (parent_class)->finalize (object); } static void gst_jpeg_dec_class_init (GstJpegDecClass * klass) { GObjectClass *gobject_class; GstElementClass *element_class; GstVideoDecoderClass *vdec_class; gobject_class = (GObjectClass *) klass; element_class = (GstElementClass *) klass; vdec_class = (GstVideoDecoderClass *) klass; parent_class = g_type_class_peek_parent (klass); gobject_class->finalize = gst_jpeg_dec_finalize; gobject_class->set_property = gst_jpeg_dec_set_property; gobject_class->get_property = gst_jpeg_dec_get_property; g_object_class_install_property (gobject_class, PROP_IDCT_METHOD, g_param_spec_enum ("idct-method", "IDCT Method", "The IDCT algorithm to use", GST_TYPE_IDCT_METHOD, JPEG_DEFAULT_IDCT_METHOD, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); /** * GstJpegDec:max-errors: * * Error out after receiving N consecutive decoding errors * (-1 = never error out, 0 = automatic, 1 = fail on first error, etc.) * * Deprecated: 1.3.1: Property wasn't used internally */ g_object_class_install_property (gobject_class, PROP_MAX_ERRORS, g_param_spec_int ("max-errors", "Maximum Consecutive Decoding Errors", "(Deprecated) Error out after receiving N consecutive decoding errors" " (-1 = never fail, 0 = automatic, 1 = fail on first error)", -1, G_MAXINT, JPEG_DEFAULT_MAX_ERRORS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_jpeg_dec_src_pad_template)); gst_element_class_add_pad_template (element_class, gst_static_pad_template_get (&gst_jpeg_dec_sink_pad_template)); gst_element_class_set_static_metadata (element_class, "JPEG image decoder", "Codec/Decoder/Image", "Decode images from JPEG format", "Wim Taymans <[email protected]>"); vdec_class->start = gst_jpeg_dec_start; vdec_class->stop = gst_jpeg_dec_stop; vdec_class->flush = gst_jpeg_dec_flush; vdec_class->parse = gst_jpeg_dec_parse; vdec_class->set_format = gst_jpeg_dec_set_format; vdec_class->handle_frame = gst_jpeg_dec_handle_frame; vdec_class->decide_allocation = gst_jpeg_dec_decide_allocation; GST_DEBUG_CATEGORY_INIT (jpeg_dec_debug, "jpegdec", 0, "JPEG decoder"); GST_DEBUG_CATEGORY_GET (GST_CAT_PERFORMANCE, "GST_PERFORMANCE"); } static boolean gst_jpeg_dec_fill_input_buffer (j_decompress_ptr cinfo) { GstJpegDec *dec; dec = CINFO_GET_JPEGDEC (cinfo); g_return_val_if_fail (dec != NULL, FALSE); g_return_val_if_fail (dec->current_frame != NULL, FALSE); g_return_val_if_fail (dec->current_frame_map.data != NULL, FALSE); cinfo->src->next_input_byte = dec->current_frame_map.data; cinfo->src->bytes_in_buffer = dec->current_frame_map.size; return TRUE; } static void gst_jpeg_dec_init_source (j_decompress_ptr cinfo) { GST_LOG_OBJECT (CINFO_GET_JPEGDEC (cinfo), "init_source"); } static void gst_jpeg_dec_skip_input_data (j_decompress_ptr cinfo, glong num_bytes) { GstJpegDec *dec = CINFO_GET_JPEGDEC (cinfo); GST_DEBUG_OBJECT (dec, "skip %ld bytes", num_bytes); if (num_bytes > 0 && cinfo->src->bytes_in_buffer >= num_bytes) { cinfo->src->next_input_byte += (size_t) num_bytes; cinfo->src->bytes_in_buffer -= (size_t) num_bytes; } } static boolean gst_jpeg_dec_resync_to_restart (j_decompress_ptr cinfo, gint desired) { GST_LOG_OBJECT (CINFO_GET_JPEGDEC (cinfo), "resync_to_start"); return TRUE; } static void gst_jpeg_dec_term_source (j_decompress_ptr cinfo) { GST_LOG_OBJECT (CINFO_GET_JPEGDEC (cinfo), "term_source"); return; } METHODDEF (void) gst_jpeg_dec_my_output_message (j_common_ptr cinfo) { return; /* do nothing */ } METHODDEF (void) gst_jpeg_dec_my_emit_message (j_common_ptr cinfo, int msg_level) { /* GST_LOG_OBJECT (CINFO_GET_JPEGDEC (&cinfo), "msg_level=%d", msg_level); */ return; } METHODDEF (void) gst_jpeg_dec_my_error_exit (j_common_ptr cinfo) { struct GstJpegDecErrorMgr *err_mgr = (struct GstJpegDecErrorMgr *) cinfo->err; (*cinfo->err->output_message) (cinfo); longjmp (err_mgr->setjmp_buffer, 1); } static void gst_jpeg_dec_init (GstJpegDec * dec) { GST_DEBUG ("initializing"); /* setup jpeglib */ memset (&dec->cinfo, 0, sizeof (dec->cinfo)); memset (&dec->jerr, 0, sizeof (dec->jerr)); dec->cinfo.err = jpeg_std_error (&dec->jerr.pub); dec->jerr.pub.output_message = gst_jpeg_dec_my_output_message; dec->jerr.pub.emit_message = gst_jpeg_dec_my_emit_message; dec->jerr.pub.error_exit = gst_jpeg_dec_my_error_exit; jpeg_create_decompress (&dec->cinfo); dec->cinfo.src = (struct jpeg_source_mgr *) &dec->jsrc; dec->cinfo.src->init_source = gst_jpeg_dec_init_source; dec->cinfo.src->fill_input_buffer = gst_jpeg_dec_fill_input_buffer; dec->cinfo.src->skip_input_data = gst_jpeg_dec_skip_input_data; dec->cinfo.src->resync_to_restart = gst_jpeg_dec_resync_to_restart; dec->cinfo.src->term_source = gst_jpeg_dec_term_source; dec->jsrc.dec = dec; /* init properties */ dec->idct_method = JPEG_DEFAULT_IDCT_METHOD; dec->max_errors = JPEG_DEFAULT_MAX_ERRORS; } static inline gboolean gst_jpeg_dec_parse_tag_has_entropy_segment (guint8 tag) { if (tag == 0xda || (tag >= 0xd0 && tag <= 0xd7)) return TRUE; return FALSE; } static GstFlowReturn gst_jpeg_dec_parse (GstVideoDecoder * bdec, GstVideoCodecFrame * frame, GstAdapter * adapter, gboolean at_eos) { guint size; gint toadd = 0; gboolean resync; gint offset = 0, noffset; GstJpegDec *dec = (GstJpegDec *) bdec; GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame); /* FIXME : The overhead of using scan_uint32 is massive */ size = gst_adapter_available (adapter); GST_DEBUG ("Parsing jpeg image data (%u bytes)", size); if (at_eos) { GST_DEBUG ("Flushing all data out"); toadd = size; /* If we have leftover data, throw it away */ if (!dec->saw_header) goto drop_frame; goto have_full_frame; } if (size < 8) goto need_more_data; if (!dec->saw_header) { gint ret; /* we expect at least 4 bytes, first of which start marker */ ret = gst_adapter_masked_scan_uint32 (adapter, 0xffff0000, 0xffd80000, 0, size - 4); GST_DEBUG ("ret:%d", ret); if (ret < 0) goto need_more_data; if (ret) { gst_adapter_flush (adapter, ret); size -= ret; } dec->saw_header = TRUE; } while (1) { guint frame_len; guint32 value; GST_DEBUG ("offset:%d, size:%d", offset, size); noffset = gst_adapter_masked_scan_uint32_peek (adapter, 0x0000ff00, 0x0000ff00, offset, size - offset, &value); /* lost sync if 0xff marker not where expected */ if ((resync = (noffset != offset))) { GST_DEBUG ("Lost sync at 0x%08x, resyncing", offset + 2); } /* may have marker, but could have been resyncng */ resync = resync || dec->parse_resync; /* Skip over extra 0xff */ while ((noffset >= 0) && ((value & 0xff) == 0xff)) { noffset++; noffset = gst_adapter_masked_scan_uint32_peek (adapter, 0x0000ff00, 0x0000ff00, noffset, size - noffset, &value); } /* enough bytes left for marker? (we need 0xNN after the 0xff) */ if (noffset < 0) { GST_DEBUG ("at end of input and no EOI marker found, need more data"); goto need_more_data; } /* now lock on the marker we found */ offset = noffset; value = value & 0xff; if (value == 0xd9) { GST_DEBUG ("0x%08x: EOI marker", offset + 2); /* clear parse state */ dec->saw_header = FALSE; dec->parse_resync = FALSE; toadd = offset + 4; goto have_full_frame; } if (value == 0xd8) { /* Skip this frame if we found another SOI marker */ GST_DEBUG ("0x%08x: SOI marker before EOI, skipping", offset + 2); dec->parse_resync = FALSE; size = offset + 2; goto drop_frame; } if (value >= 0xd0 && value <= 0xd7) frame_len = 0; else { /* peek tag and subsequent length */ if (offset + 2 + 4 > size) goto need_more_data; else gst_adapter_masked_scan_uint32_peek (adapter, 0x0, 0x0, offset + 2, 4, &frame_len); frame_len = frame_len & 0xffff; } GST_DEBUG ("0x%08x: tag %02x, frame_len=%u", offset + 2, value, frame_len); /* the frame length includes the 2 bytes for the length; here we want at * least 2 more bytes at the end for an end marker */ if (offset + 2 + 2 + frame_len + 2 > size) { goto need_more_data; } if (gst_jpeg_dec_parse_tag_has_entropy_segment (value)) { guint eseglen = dec->parse_entropy_len; GST_DEBUG ("0x%08x: finding entropy segment length (eseglen:%d)", offset + 2, eseglen); if (size < offset + 2 + frame_len + eseglen) goto need_more_data; noffset = offset + 2 + frame_len + dec->parse_entropy_len; while (1) { GST_DEBUG ("noffset:%d, size:%d, size - noffset:%d", noffset, size, size - noffset); noffset = gst_adapter_masked_scan_uint32_peek (adapter, 0x0000ff00, 0x0000ff00, noffset, size - noffset, &value); if (noffset < 0) { /* need more data */ dec->parse_entropy_len = size - offset - 4 - frame_len - 2; goto need_more_data; } if ((value & 0xff) != 0x00) { eseglen = noffset - offset - frame_len - 2; break; } noffset++; } dec->parse_entropy_len = 0; frame_len += eseglen; GST_DEBUG ("entropy segment length=%u => frame_len=%u", eseglen, frame_len); } if (resync) { /* check if we will still be in sync if we interpret * this as a sync point and skip this frame */ noffset = offset + frame_len + 2; noffset = gst_adapter_masked_scan_uint32 (adapter, 0x0000ff00, 0x0000ff00, noffset, 4); if (noffset < 0) { /* ignore and continue resyncing until we hit the end * of our data or find a sync point that looks okay */ offset++; continue; } GST_DEBUG ("found sync at 0x%x", offset + 2); } /* Add current data to output buffer */ toadd += frame_len + 2; offset += frame_len + 2; } need_more_data: if (toadd) gst_video_decoder_add_to_frame (bdec, toadd); return GST_VIDEO_DECODER_FLOW_NEED_DATA; have_full_frame: if (toadd) gst_video_decoder_add_to_frame (bdec, toadd); GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame); return gst_video_decoder_have_frame (bdec); drop_frame: gst_adapter_flush (adapter, size); return GST_FLOW_OK; } /* shamelessly ripped from jpegutils.c in mjpegtools */ static void add_huff_table (j_decompress_ptr dinfo, JHUFF_TBL ** htblptr, const UINT8 * bits, const UINT8 * val) /* Define a Huffman table */ { int nsymbols, len; if (*htblptr == NULL) *htblptr = jpeg_alloc_huff_table ((j_common_ptr) dinfo); g_assert (*htblptr); /* Copy the number-of-symbols-of-each-code-length counts */ memcpy ((*htblptr)->bits, bits, sizeof ((*htblptr)->bits)); /* Validate the counts. We do this here mainly so we can copy the right * number of symbols from the val[] array, without risking marching off * the end of memory. jchuff.c will do a more thorough test later. */ nsymbols = 0; for (len = 1; len <= 16; len++) nsymbols += bits[len]; if (nsymbols < 1 || nsymbols > 256) g_error ("jpegutils.c: add_huff_table failed badly. "); memcpy ((*htblptr)->huffval, val, nsymbols * sizeof (UINT8)); } static void std_huff_tables (j_decompress_ptr dinfo) /* Set up the standard Huffman tables (cf. JPEG standard section K.3) */ /* IMPORTANT: these are only valid for 8-bit data precision! */ { static const UINT8 bits_dc_luminance[17] = { /* 0-base */ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }; static const UINT8 val_dc_luminance[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; static const UINT8 bits_dc_chrominance[17] = { /* 0-base */ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }; static const UINT8 val_dc_chrominance[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; static const UINT8 bits_ac_luminance[17] = { /* 0-base */ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d }; static const UINT8 val_ac_luminance[] = { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; static const UINT8 bits_ac_chrominance[17] = { /* 0-base */ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 }; static const UINT8 val_ac_chrominance[] = { 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; add_huff_table (dinfo, &dinfo->dc_huff_tbl_ptrs[0], bits_dc_luminance, val_dc_luminance); add_huff_table (dinfo, &dinfo->ac_huff_tbl_ptrs[0], bits_ac_luminance, val_ac_luminance); add_huff_table (dinfo, &dinfo->dc_huff_tbl_ptrs[1], bits_dc_chrominance, val_dc_chrominance); add_huff_table (dinfo, &dinfo->ac_huff_tbl_ptrs[1], bits_ac_chrominance, val_ac_chrominance); } static void guarantee_huff_tables (j_decompress_ptr dinfo) { if ((dinfo->dc_huff_tbl_ptrs[0] == NULL) && (dinfo->dc_huff_tbl_ptrs[1] == NULL) && (dinfo->ac_huff_tbl_ptrs[0] == NULL) && (dinfo->ac_huff_tbl_ptrs[1] == NULL)) { GST_DEBUG ("Generating standard Huffman tables for this frame."); std_huff_tables (dinfo); } } static gboolean gst_jpeg_dec_set_format (GstVideoDecoder * dec, GstVideoCodecState * state) { GstJpegDec *jpeg = GST_JPEG_DEC (dec); GstVideoInfo *info = &state->info; /* FIXME : previously jpegdec would handled input as packetized * if the framerate was present. Here we consider it packetized if * the fps is != 1/1 */ if (GST_VIDEO_INFO_FPS_N (info) != 1 && GST_VIDEO_INFO_FPS_D (info) != 1) gst_video_decoder_set_packetized (dec, TRUE); else gst_video_decoder_set_packetized (dec, FALSE); if (jpeg->input_state) gst_video_codec_state_unref (jpeg->input_state); jpeg->input_state = gst_video_codec_state_ref (state); return TRUE; } /* yuk */ static void hresamplecpy1 (guint8 * dest, const guint8 * src, guint len) { gint i; for (i = 0; i < len; ++i) { /* equivalent to: dest[i] = src[i << 1] */ *dest = *src; ++dest; ++src; ++src; } } static void gst_jpeg_dec_free_buffers (GstJpegDec * dec) { gint i; for (i = 0; i < 16; i++) { g_free (dec->idr_y[i]); g_free (dec->idr_u[i]); g_free (dec->idr_v[i]); dec->idr_y[i] = NULL; dec->idr_u[i] = NULL; dec->idr_v[i] = NULL; } dec->idr_width_allocated = 0; } static inline gboolean gst_jpeg_dec_ensure_buffers (GstJpegDec * dec, guint maxrowbytes) { gint i; if (G_LIKELY (dec->idr_width_allocated == maxrowbytes)) return TRUE; /* FIXME: maybe just alloc one or three blocks altogether? */ for (i = 0; i < 16; i++) { dec->idr_y[i] = g_try_realloc (dec->idr_y[i], maxrowbytes); dec->idr_u[i] = g_try_realloc (dec->idr_u[i], maxrowbytes); dec->idr_v[i] = g_try_realloc (dec->idr_v[i], maxrowbytes); if (G_UNLIKELY (!dec->idr_y[i] || !dec->idr_u[i] || !dec->idr_v[i])) { GST_WARNING_OBJECT (dec, "out of memory, i=%d, bytes=%u", i, maxrowbytes); return FALSE; } } dec->idr_width_allocated = maxrowbytes; GST_LOG_OBJECT (dec, "allocated temp memory, %u bytes/row", maxrowbytes); return TRUE; } static void gst_jpeg_dec_decode_grayscale (GstJpegDec * dec, GstVideoFrame * frame) { guchar *rows[16]; guchar **scanarray[1] = { rows }; gint i, j, k; gint lines; guint8 *base[1]; gint width, height; gint pstride, rstride; GST_DEBUG_OBJECT (dec, "indirect decoding of grayscale"); width = GST_VIDEO_FRAME_WIDTH (frame); height = GST_VIDEO_FRAME_HEIGHT (frame); if (G_UNLIKELY (!gst_jpeg_dec_ensure_buffers (dec, GST_ROUND_UP_32 (width)))) return; base[0] = GST_VIDEO_FRAME_COMP_DATA (frame, 0); pstride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0); rstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); memcpy (rows, dec->idr_y, 16 * sizeof (gpointer)); i = 0; while (i < height) { lines = jpeg_read_raw_data (&dec->cinfo, scanarray, DCTSIZE); if (G_LIKELY (lines > 0)) { for (j = 0; (j < DCTSIZE) && (i < height); j++, i++) { gint p; p = 0; for (k = 0; k < width; k++) { base[0][p] = rows[j][k]; p += pstride; } base[0] += rstride; } } else { GST_INFO_OBJECT (dec, "jpeg_read_raw_data() returned 0"); } } } static void gst_jpeg_dec_decode_rgb (GstJpegDec * dec, GstVideoFrame * frame) { guchar *r_rows[16], *g_rows[16], *b_rows[16]; guchar **scanarray[3] = { r_rows, g_rows, b_rows }; gint i, j, k; gint lines; guint8 *base[3]; guint pstride, rstride; gint width, height; GST_DEBUG_OBJECT (dec, "indirect decoding of RGB"); width = GST_VIDEO_FRAME_WIDTH (frame); height = GST_VIDEO_FRAME_HEIGHT (frame); if (G_UNLIKELY (!gst_jpeg_dec_ensure_buffers (dec, GST_ROUND_UP_32 (width)))) return; for (i = 0; i < 3; i++) base[i] = GST_VIDEO_FRAME_COMP_DATA (frame, i); pstride = GST_VIDEO_FRAME_COMP_PSTRIDE (frame, 0); rstride = GST_VIDEO_FRAME_COMP_STRIDE (frame, 0); memcpy (r_rows, dec->idr_y, 16 * sizeof (gpointer)); memcpy (g_rows, dec->idr_u, 16 * sizeof (gpointer)); memcpy (b_rows, dec->idr_v, 16 * sizeof (gpointer)); i = 0; while (i < height) { lines = jpeg_read_raw_data (&dec->cinfo, scanarray, DCTSIZE); if (G_LIKELY (lines > 0)) { for (j = 0; (j < DCTSIZE) && (i < height); j++, i++) { gint p; p = 0; for (k = 0; k < width; k++) { base[0][p] = r_rows[j][k]; base[1][p] = g_rows[j][k]; base[2][p] = b_rows[j][k]; p += pstride; } base[0] += rstride; base[1] += rstride; base[2] += rstride; } } else { GST_INFO_OBJECT (dec, "jpeg_read_raw_data() returned 0"); } } } static void gst_jpeg_dec_decode_indirect (GstJpegDec * dec, GstVideoFrame * frame, gint r_v, gint r_h, gint comp) { guchar *y_rows[16], *u_rows[16], *v_rows[16]; guchar **scanarray[3] = { y_rows, u_rows, v_rows }; gint i, j, k; gint lines; guchar *base[3], *last[3]; gint stride[3]; gint width, height; GST_DEBUG_OBJECT (dec, "unadvantageous width or r_h, taking slow route involving memcpy"); width = GST_VIDEO_FRAME_WIDTH (frame); height = GST_VIDEO_FRAME_HEIGHT (frame); if (G_UNLIKELY (!gst_jpeg_dec_ensure_buffers (dec, GST_ROUND_UP_32 (width)))) return; for (i = 0; i < 3; i++) { base[i] = GST_VIDEO_FRAME_COMP_DATA (frame, i); stride[i] = GST_VIDEO_FRAME_COMP_STRIDE (frame, i); /* make sure we don't make jpeglib write beyond our buffer, * which might happen if (height % (r_v*DCTSIZE)) != 0 */ last[i] = base[i] + (GST_VIDEO_FRAME_COMP_STRIDE (frame, i) * (GST_VIDEO_FRAME_COMP_HEIGHT (frame, i) - 1)); } memcpy (y_rows, dec->idr_y, 16 * sizeof (gpointer)); memcpy (u_rows, dec->idr_u, 16 * sizeof (gpointer)); memcpy (v_rows, dec->idr_v, 16 * sizeof (gpointer)); /* fill chroma components for grayscale */ if (comp == 1) { GST_DEBUG_OBJECT (dec, "grayscale, filling chroma"); for (i = 0; i < 16; i++) { memset (u_rows[i], GST_ROUND_UP_32 (width), 0x80); memset (v_rows[i], GST_ROUND_UP_32 (width), 0x80); } } for (i = 0; i < height; i += r_v * DCTSIZE) { lines = jpeg_read_raw_data (&dec->cinfo, scanarray, r_v * DCTSIZE); if (G_LIKELY (lines > 0)) { for (j = 0, k = 0; j < (r_v * DCTSIZE); j += r_v, k++) { if (G_LIKELY (base[0] <= last[0])) { memcpy (base[0], y_rows[j], stride[0]); base[0] += stride[0]; } if (r_v == 2) { if (G_LIKELY (base[0] <= last[0])) { memcpy (base[0], y_rows[j + 1], stride[0]); base[0] += stride[0]; } } if (G_LIKELY (base[1] <= last[1] && base[2] <= last[2])) { if (r_h == 2) { memcpy (base[1], u_rows[k], stride[1]); memcpy (base[2], v_rows[k], stride[2]); } else if (r_h == 1) { hresamplecpy1 (base[1], u_rows[k], stride[1]); hresamplecpy1 (base[2], v_rows[k], stride[2]); } else { /* FIXME: implement (at least we avoid crashing by doing nothing) */ } } if (r_v == 2 || (k & 1) != 0) { base[1] += stride[1]; base[2] += stride[2]; } } } else { GST_INFO_OBJECT (dec, "jpeg_read_raw_data() returned 0"); } } } static GstFlowReturn gst_jpeg_dec_decode_direct (GstJpegDec * dec, GstVideoFrame * frame) { guchar **line[3]; /* the jpeg line buffer */ guchar *y[4 * DCTSIZE] = { NULL, }; /* alloc enough for the lines */ guchar *u[4 * DCTSIZE] = { NULL, }; /* r_v will be <4 */ guchar *v[4 * DCTSIZE] = { NULL, }; gint i, j; gint lines, v_samp[3]; guchar *base[3], *last[3]; gint stride[3]; guint height; line[0] = y; line[1] = u; line[2] = v; v_samp[0] = dec->cinfo.comp_info[0].v_samp_factor; v_samp[1] = dec->cinfo.comp_info[1].v_samp_factor; v_samp[2] = dec->cinfo.comp_info[2].v_samp_factor; if (G_UNLIKELY (v_samp[0] > 2 || v_samp[1] > 2 || v_samp[2] > 2)) goto format_not_supported; height = GST_VIDEO_FRAME_HEIGHT (frame); for (i = 0; i < 3; i++) { base[i] = GST_VIDEO_FRAME_COMP_DATA (frame, i); stride[i] = GST_VIDEO_FRAME_COMP_STRIDE (frame, i); /* make sure we don't make jpeglib write beyond our buffer, * which might happen if (height % (r_v*DCTSIZE)) != 0 */ last[i] = base[i] + (GST_VIDEO_FRAME_COMP_STRIDE (frame, i) * (GST_VIDEO_FRAME_COMP_HEIGHT (frame, i) - 1)); } /* let jpeglib decode directly into our final buffer */ GST_DEBUG_OBJECT (dec, "decoding directly into output buffer"); for (i = 0; i < height; i += v_samp[0] * DCTSIZE) { for (j = 0; j < (v_samp[0] * DCTSIZE); ++j) { /* Y */ line[0][j] = base[0] + (i + j) * stride[0]; if (G_UNLIKELY (line[0][j] > last[0])) line[0][j] = last[0]; /* U */ if (v_samp[1] == v_samp[0]) { line[1][j] = base[1] + ((i + j) / 2) * stride[1]; } else if (j < (v_samp[1] * DCTSIZE)) { line[1][j] = base[1] + ((i / 2) + j) * stride[1]; } if (G_UNLIKELY (line[1][j] > last[1])) line[1][j] = last[1]; /* V */ if (v_samp[2] == v_samp[0]) { line[2][j] = base[2] + ((i + j) / 2) * stride[2]; } else if (j < (v_samp[2] * DCTSIZE)) { line[2][j] = base[2] + ((i / 2) + j) * stride[2]; } if (G_UNLIKELY (line[2][j] > last[2])) line[2][j] = last[2]; } lines = jpeg_read_raw_data (&dec->cinfo, line, v_samp[0] * DCTSIZE); if (G_UNLIKELY (!lines)) { GST_INFO_OBJECT (dec, "jpeg_read_raw_data() returned 0"); } } return GST_FLOW_OK; format_not_supported: { gboolean ret = GST_FLOW_OK; GST_VIDEO_DECODER_ERROR (dec, 1, STREAM, DECODE, (_("Failed to decode JPEG image")), ("Unsupported subsampling schema: v_samp factors: %u %u %u", v_samp[0], v_samp[1], v_samp[2]), ret); return ret; } } static void gst_jpeg_dec_negotiate (GstJpegDec * dec, gint width, gint height, gint clrspc) { GstVideoCodecState *outstate; GstVideoInfo *info; GstVideoFormat format; switch (clrspc) { case JCS_RGB: format = GST_VIDEO_FORMAT_RGB; break; case JCS_GRAYSCALE: format = GST_VIDEO_FORMAT_GRAY8; break; default: format = GST_VIDEO_FORMAT_I420; break; } /* Compare to currently configured output state */ outstate = gst_video_decoder_get_output_state (GST_VIDEO_DECODER (dec)); if (outstate) { info = &outstate->info; if (width == GST_VIDEO_INFO_WIDTH (info) && height == GST_VIDEO_INFO_HEIGHT (info) && format == GST_VIDEO_INFO_FORMAT (info)) { gst_video_codec_state_unref (outstate); return; } gst_video_codec_state_unref (outstate); } outstate = gst_video_decoder_set_output_state (GST_VIDEO_DECODER (dec), format, width, height, dec->input_state); switch (clrspc) { case JCS_RGB: case JCS_GRAYSCALE: break; default: outstate->info.colorimetry.range = GST_VIDEO_COLOR_RANGE_0_255; outstate->info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_BT601; outstate->info.colorimetry.transfer = GST_VIDEO_TRANSFER_UNKNOWN; outstate->info.colorimetry.primaries = GST_VIDEO_COLOR_PRIMARIES_UNKNOWN; break; } gst_video_codec_state_unref (outstate); gst_video_decoder_negotiate (GST_VIDEO_DECODER (dec)); GST_DEBUG_OBJECT (dec, "max_v_samp_factor=%d", dec->cinfo.max_v_samp_factor); GST_DEBUG_OBJECT (dec, "max_h_samp_factor=%d", dec->cinfo.max_h_samp_factor); } static GstFlowReturn gst_jpeg_dec_handle_frame (GstVideoDecoder * bdec, GstVideoCodecFrame * frame) { GstFlowReturn ret = GST_FLOW_OK; GstJpegDec *dec = (GstJpegDec *) bdec; GstVideoFrame vframe; gint width, height; gint r_h, r_v; guint code, hdr_ok; gboolean need_unmap = TRUE; GstVideoCodecState *state = NULL; dec->current_frame = frame; gst_buffer_map (frame->input_buffer, &dec->current_frame_map, GST_MAP_READ); gst_jpeg_dec_fill_input_buffer (&dec->cinfo); if (setjmp (dec->jerr.setjmp_buffer)) { code = dec->jerr.pub.msg_code; if (code == JERR_INPUT_EOF) { GST_DEBUG ("jpeg input EOF error, we probably need more data"); goto need_more_data; } goto decode_error; } /* read header */ hdr_ok = jpeg_read_header (&dec->cinfo, TRUE); if (G_UNLIKELY (hdr_ok != JPEG_HEADER_OK)) { GST_WARNING_OBJECT (dec, "reading the header failed, %d", hdr_ok); } GST_LOG_OBJECT (dec, "num_components=%d", dec->cinfo.num_components); GST_LOG_OBJECT (dec, "jpeg_color_space=%d", dec->cinfo.jpeg_color_space); if (!dec->cinfo.num_components || !dec->cinfo.comp_info) goto components_not_supported; r_h = dec->cinfo.comp_info[0].h_samp_factor; r_v = dec->cinfo.comp_info[0].v_samp_factor; GST_LOG_OBJECT (dec, "r_h = %d, r_v = %d", r_h, r_v); if (dec->cinfo.num_components > 3) goto components_not_supported; /* verify color space expectation to avoid going *boom* or bogus output */ if (dec->cinfo.jpeg_color_space != JCS_YCbCr && dec->cinfo.jpeg_color_space != JCS_GRAYSCALE && dec->cinfo.jpeg_color_space != JCS_RGB) goto unsupported_colorspace; #ifndef GST_DISABLE_GST_DEBUG { gint i; for (i = 0; i < dec->cinfo.num_components; ++i) { GST_LOG_OBJECT (dec, "[%d] h_samp_factor=%d, v_samp_factor=%d, cid=%d", i, dec->cinfo.comp_info[i].h_samp_factor, dec->cinfo.comp_info[i].v_samp_factor, dec->cinfo.comp_info[i].component_id); } } #endif /* prepare for raw output */ dec->cinfo.do_fancy_upsampling = FALSE; dec->cinfo.do_block_smoothing = FALSE; dec->cinfo.out_color_space = dec->cinfo.jpeg_color_space; dec->cinfo.dct_method = dec->idct_method; dec->cinfo.raw_data_out = TRUE; GST_LOG_OBJECT (dec, "starting decompress"); guarantee_huff_tables (&dec->cinfo); if (!jpeg_start_decompress (&dec->cinfo)) { GST_WARNING_OBJECT (dec, "failed to start decompression cycle"); } /* sanity checks to get safe and reasonable output */ switch (dec->cinfo.jpeg_color_space) { case JCS_GRAYSCALE: if (dec->cinfo.num_components != 1) goto invalid_yuvrgbgrayscale; break; case JCS_RGB: if (dec->cinfo.num_components != 3 || dec->cinfo.max_v_samp_factor > 1 || dec->cinfo.max_h_samp_factor > 1) goto invalid_yuvrgbgrayscale; break; case JCS_YCbCr: if (dec->cinfo.num_components != 3 || r_v > 2 || r_v < dec->cinfo.comp_info[0].v_samp_factor || r_v < dec->cinfo.comp_info[1].v_samp_factor || r_h < dec->cinfo.comp_info[0].h_samp_factor || r_h < dec->cinfo.comp_info[1].h_samp_factor) goto invalid_yuvrgbgrayscale; break; default: g_assert_not_reached (); break; } width = dec->cinfo.output_width; height = dec->cinfo.output_height; if (G_UNLIKELY (width < MIN_WIDTH || width > MAX_WIDTH || height < MIN_HEIGHT || height > MAX_HEIGHT)) goto wrong_size; gst_jpeg_dec_negotiate (dec, width, height, dec->cinfo.jpeg_color_space); state = gst_video_decoder_get_output_state (bdec); ret = gst_video_decoder_allocate_output_frame (bdec, frame); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto alloc_failed; if (!gst_video_frame_map (&vframe, &state->info, frame->output_buffer, GST_MAP_READWRITE)) goto alloc_failed; GST_LOG_OBJECT (dec, "width %d, height %d", width, height); if (dec->cinfo.jpeg_color_space == JCS_RGB) { gst_jpeg_dec_decode_rgb (dec, &vframe); } else if (dec->cinfo.jpeg_color_space == JCS_GRAYSCALE) { gst_jpeg_dec_decode_grayscale (dec, &vframe); } else { GST_LOG_OBJECT (dec, "decompressing (reqired scanline buffer height = %u)", dec->cinfo.rec_outbuf_height); /* For some widths jpeglib requires more horizontal padding than I420 * provides. In those cases we need to decode into separate buffers and then * copy over the data into our final picture buffer, otherwise jpeglib might * write over the end of a line into the beginning of the next line, * resulting in blocky artifacts on the left side of the picture. */ if (G_UNLIKELY (width % (dec->cinfo.max_h_samp_factor * DCTSIZE) != 0 || dec->cinfo.comp_info[0].h_samp_factor != 2 || dec->cinfo.comp_info[1].h_samp_factor != 1 || dec->cinfo.comp_info[2].h_samp_factor != 1)) { GST_CAT_LOG_OBJECT (GST_CAT_PERFORMANCE, dec, "indirect decoding using extra buffer copy"); gst_jpeg_dec_decode_indirect (dec, &vframe, r_v, r_h, dec->cinfo.num_components); } else { ret = gst_jpeg_dec_decode_direct (dec, &vframe); if (G_UNLIKELY (ret != GST_FLOW_OK)) goto decode_direct_failed; } } gst_video_frame_unmap (&vframe); GST_LOG_OBJECT (dec, "decompressing finished"); jpeg_finish_decompress (&dec->cinfo); gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map); ret = gst_video_decoder_finish_frame (bdec, frame); need_unmap = FALSE; done: exit: if (need_unmap) gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map); if (state) gst_video_codec_state_unref (state); return ret; /* special cases */ need_more_data: { GST_LOG_OBJECT (dec, "we need more data"); ret = GST_FLOW_OK; goto exit; } /* ERRORS */ wrong_size: { GST_VIDEO_DECODER_ERROR (dec, 1, STREAM, DECODE, (_("Failed to decode JPEG image")), ("Picture is too small or too big (%ux%u)", width, height), ret); ret = GST_FLOW_ERROR; goto done; } decode_error: { gchar err_msg[JMSG_LENGTH_MAX]; dec->jerr.pub.format_message ((j_common_ptr) (&dec->cinfo), err_msg); GST_VIDEO_DECODER_ERROR (dec, 1, STREAM, DECODE, (_("Failed to decode JPEG image")), ("Decode error #%u: %s", code, err_msg), ret); gst_buffer_unmap (frame->input_buffer, &dec->current_frame_map); gst_video_decoder_drop_frame (bdec, frame); need_unmap = FALSE; jpeg_abort_decompress (&dec->cinfo); goto done; } decode_direct_failed: { /* already posted an error message */ jpeg_abort_decompress (&dec->cinfo); goto done; } alloc_failed: { const gchar *reason; reason = gst_flow_get_name (ret); GST_DEBUG_OBJECT (dec, "failed to alloc buffer, reason %s", reason); /* Reset for next time */ jpeg_abort_decompress (&dec->cinfo); if (ret != GST_FLOW_EOS && ret != GST_FLOW_FLUSHING && ret != GST_FLOW_NOT_LINKED) { GST_VIDEO_DECODER_ERROR (dec, 1, STREAM, DECODE, (_("Failed to decode JPEG image")), ("Buffer allocation failed, reason: %s", reason), ret); jpeg_abort_decompress (&dec->cinfo); } goto exit; } components_not_supported: { GST_VIDEO_DECODER_ERROR (dec, 1, STREAM, DECODE, (_("Failed to decode JPEG image")), ("number of components not supported: %d (max 3)", dec->cinfo.num_components), ret); jpeg_abort_decompress (&dec->cinfo); goto done; } unsupported_colorspace: { GST_VIDEO_DECODER_ERROR (dec, 1, STREAM, DECODE, (_("Failed to decode JPEG image")), ("Picture has unknown or unsupported colourspace"), ret); jpeg_abort_decompress (&dec->cinfo); goto done; } invalid_yuvrgbgrayscale: { GST_VIDEO_DECODER_ERROR (dec, 1, STREAM, DECODE, (_("Failed to decode JPEG image")), ("Picture is corrupt or unhandled YUV/RGB/grayscale layout"), ret); jpeg_abort_decompress (&dec->cinfo); goto done; } } static gboolean gst_jpeg_dec_decide_allocation (GstVideoDecoder * bdec, GstQuery * query) { GstBufferPool *pool = NULL; GstStructure *config; if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (bdec, query)) return FALSE; if (gst_query_get_n_allocation_pools (query) > 0) gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL); if (pool == NULL) return FALSE; config = gst_buffer_pool_get_config (pool); if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) { gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META); } gst_buffer_pool_set_config (pool, config); gst_object_unref (pool); return TRUE; } static gboolean gst_jpeg_dec_start (GstVideoDecoder * bdec) { GstJpegDec *dec = (GstJpegDec *) bdec; dec->saw_header = FALSE; dec->parse_entropy_len = 0; dec->parse_resync = FALSE; gst_video_decoder_set_packetized (bdec, FALSE); return TRUE; } static gboolean gst_jpeg_dec_flush (GstVideoDecoder * bdec) { GstJpegDec *dec = (GstJpegDec *) bdec; jpeg_abort_decompress (&dec->cinfo); dec->parse_entropy_len = 0; dec->parse_resync = FALSE; dec->saw_header = FALSE; return TRUE; } static void gst_jpeg_dec_set_property (GObject * object, guint prop_id, const GValue * value, GParamSpec * pspec) { GstJpegDec *dec; dec = GST_JPEG_DEC (object); switch (prop_id) { case PROP_IDCT_METHOD: dec->idct_method = g_value_get_enum (value); break; case PROP_MAX_ERRORS: g_atomic_int_set (&dec->max_errors, g_value_get_int (value)); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void gst_jpeg_dec_get_property (GObject * object, guint prop_id, GValue * value, GParamSpec * pspec) { GstJpegDec *dec; dec = GST_JPEG_DEC (object); switch (prop_id) { case PROP_IDCT_METHOD: g_value_set_enum (value, dec->idct_method); break; case PROP_MAX_ERRORS: g_value_set_int (value, g_atomic_int_get (&dec->max_errors)); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static gboolean gst_jpeg_dec_stop (GstVideoDecoder * bdec) { GstJpegDec *dec = (GstJpegDec *) bdec; gst_jpeg_dec_free_buffers (dec); return TRUE; }
lgpl-2.1
xasx/wildfly
iiop-openjdk/src/main/java/org/wildfly/iiop/openjdk/rmi/ir/LocalIDLType.java
1319
/* * JBoss, Home of Professional Open Source. * Copyright 2008, Red Hat Middleware LLC, and individual contributors * as indicated by the @author tags. See the copyright.txt file in the * distribution for a full listing of individual contributors. * * This is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this software; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA, or see the FSF site: http://www.fsf.org. */ package org.wildfly.iiop.openjdk.rmi.ir; import org.omg.CORBA.IDLTypeOperations; /** * Interface of local IDL types. * * @author <a href="mailto:[email protected]">Ole Husgaard</a> * @version $Revision: 81018 $ */ interface LocalIDLType extends IDLTypeOperations, LocalIRObject { }
lgpl-2.1
john-tornblom/mc
model/com.mentor.nucleus.bp.core/src50/ooaofooa_SM_STATE_class.h
2991
/*---------------------------------------------------------------------------- * File: ooaofooa_SM_STATE_class.h * * Class: State Machine State (SM_STATE) * Component: ooaofooa * * your copyright statement can go here (from te_copyright.body) *--------------------------------------------------------------------------*/ #ifndef OOAOFOOA_SM_STATE_CLASS_H #define OOAOFOOA_SM_STATE_CLASS_H #ifdef __cplusplus extern "C" { #endif /* * Structural representation of application analysis class: * State Machine State (SM_STATE) */ struct ooaofooa_SM_STATE { /* application analysis class attributes */ Escher_UniqueID_t SMstt_ID; Escher_UniqueID_t SM_ID; Escher_UniqueID_t SMspd_ID; c_t * Name; i_t Numb; i_t Final; /* relationship storage */ ooaofooa_SM_SM * SM_SM_R501; Escher_ObjectSet_s SM_SEME_R503; Escher_ObjectSet_s SM_TXN_R506_is_destination_of; Escher_ObjectSet_s SM_NETXN_R508_is_origination_of; ooaofooa_SM_MOAH * SM_MOAH_R511; ooaofooa_SM_SUPDT * SM_SUPDT_R521_receives_asynchronous_data_via; ooaofooa_TE_STATE * TE_STATE_R2037; Escher_ObjectSet_s I_INS_R2915_defines_state_of; ooaofooa_CSME_CIS * CSME_CIS_R2932; ooaofooa_BP_ST * BP_ST_R3104_has_set; }; void ooaofooa_SM_STATE_instancedumper( Escher_iHandle_t ); Escher_iHandle_t ooaofooa_SM_STATE_instanceloader( Escher_iHandle_t, const c_t * [] ); void ooaofooa_SM_STATE_batch_relate( Escher_iHandle_t ); /* * Where clause instance selection against identifying attribute set(s). */ ooaofooa_SM_STATE * ooaofooa_SM_STATE_AnyWhere1( Escher_UniqueID_t, Escher_UniqueID_t ); ooaofooa_SM_STATE * ooaofooa_SM_STATE_AnyWhere2( Escher_UniqueID_t, Escher_UniqueID_t, Escher_UniqueID_t ); void ooaofooa_SM_STATE_R501_Link_is_decomposed_into( ooaofooa_SM_SM *, ooaofooa_SM_STATE * ); void ooaofooa_SM_STATE_R501_Unlink_is_decomposed_into( ooaofooa_SM_SM *, ooaofooa_SM_STATE * ); #define ooaofooa_SM_SEME_R503_From_SM_STATE( SM_STATE ) ( &((SM_STATE)->SM_SEME_R503) ) #define ooaofooa_SM_TXN_R506_From_SM_STATE_is_destination_of( SM_STATE ) ( &((SM_STATE)->SM_TXN_R506_is_destination_of) ) /* Note: SM_STATE->SM_TXN[R506] not navigated */ #define ooaofooa_SM_NETXN_R508_From_SM_STATE_is_origination_of( SM_STATE ) ( &((SM_STATE)->SM_NETXN_R508_is_origination_of) ) /* Note: SM_STATE->SM_NETXN[R508] not navigated */ void ooaofooa_SM_STATE_R521_Link_is_delivered_by_received_event_to( ooaofooa_SM_SUPDT *, ooaofooa_SM_STATE * ); void ooaofooa_SM_STATE_R521_Unlink_is_delivered_by_received_event_to( ooaofooa_SM_SUPDT *, ooaofooa_SM_STATE * ); #define ooaofooa_I_INS_R2915_From_SM_STATE_defines_state_of( SM_STATE ) ( &((SM_STATE)->I_INS_R2915_defines_state_of) ) /* Note: SM_STATE->I_INS[R2915] not navigated */ #define ooaofooa_SM_STATE_MAX_EXTENT_SIZE 10 extern Escher_Extent_t pG_ooaofooa_SM_STATE_extent; #ifdef __cplusplus } #endif #endif /* OOAOFOOA_SM_STATE_CLASS_H */
apache-2.0
siosio/intellij-community
platform/code-style-api/src/com/intellij/formatting/FormattingModelBuilder.java
3839
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.formatting; import com.intellij.lang.ASTNode; import com.intellij.openapi.util.TextRange; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.intellij.psi.codeStyle.CodeStyleSettings; import org.jetbrains.annotations.ApiStatus; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * Allows a custom language plugin to build a formatting model for a file in the language, or * for a portion of a file. * A formatting model defines how a file is broken into non-whitespace blocks and different * types of whitespace (alignment, indents and wraps) between them. * <p>For certain aspects of the custom formatting to work properly, it is recommended to use TokenType.WHITE_SPACE * as the language's whitespace tokens. See {@link com.intellij.lang.ParserDefinition} * * @apiNote in case you getting a {@link StackOverflowError}, with your builder, most likely you haven't implemented any model building * methods. Please, implement {@link #createModel(FormattingContext)} * @see com.intellij.lang.LanguageFormatting * @see FormattingModelProvider#createFormattingModelForPsiFile(PsiFile, Block, CodeStyleSettings) */ public interface FormattingModelBuilder { /** * Requests building the formatting model for a section of the file containing * the specified PSI element and its children. * * @return the formatting model for the file. * @see FormattingContext */ default @NotNull FormattingModel createModel(@NotNull FormattingContext formattingContext) { return createModel(formattingContext.getPsiElement(), formattingContext.getFormattingRange(), formattingContext.getCodeStyleSettings(), formattingContext.getFormattingMode()); } /** * Returns the TextRange which should be processed by the formatter in order to detect proper indent options. * * @param file the file in which the line break is inserted. * @param offset the line break offset. * @param elementAtOffset the parameter at {@code offset} * @return the range to reformat, or null if the default range should be used */ default @Nullable TextRange getRangeAffectingIndent(PsiFile file, int offset, ASTNode elementAtOffset) { return null; } /** * @deprecated use {@link #createModel(FormattingContext)} */ @Deprecated @ApiStatus.ScheduledForRemoval(inVersion = "2021.1") default @NotNull FormattingModel createModel(final @NotNull PsiElement element, final @NotNull TextRange range, final @NotNull CodeStyleSettings settings, final @NotNull FormattingMode mode) { return createModel(element, settings, mode); // just for compatibility with old implementations } /** * @deprecated use {@link #createModel(FormattingContext)} */ @Deprecated @ApiStatus.ScheduledForRemoval(inVersion = "2021.1") default @NotNull FormattingModel createModel(final @NotNull PsiElement element, final @NotNull CodeStyleSettings settings, @NotNull FormattingMode mode) { return createModel(element, settings); } /** * @deprecated use {@link #createModel(FormattingContext)} */ @Deprecated @ApiStatus.ScheduledForRemoval(inVersion = "2021.1") default @NotNull FormattingModel createModel(final PsiElement element, final CodeStyleSettings settings) { return createModel(FormattingContext.create(element, settings)); } }
apache-2.0
jasonchaffee/apiman
manager/api/beans/src/main/java/io/apiman/manager/api/beans/clients/ClientStatus.java
790
/* * Copyright 2014 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.apiman.manager.api.beans.clients; /** * The various client statuses. * * @author [email protected] */ public enum ClientStatus { Created, Ready, Registered, Retired }
apache-2.0
maxim-rabinovich/epic
src/test/scala/epic/dense/SigmoidTransformTest.scala
1952
package epic.dense import org.scalatest.FunSuite import breeze.optimize.{GradientTester, DiffFunction} import breeze.linalg.{sum, argmax, max, DenseVector} /** * * * @author dlwh class SigmoidTransformTest extends FunSuite { test("chain rule") { val index = new SigmoidTransform(12, 10, true) val dv = DenseVector.rand(10) val objective = new DiffFunction[DenseVector[Double]] { def calculate(x: DenseVector[Double]): (Double, DenseVector[Double]) = { val layer = index.extractLayer(x) val acts = layer.activations(dv) val obj = sum(acts) val deriv = DenseVector.zeros[Double](x.length) layer.tallyDerivative(deriv, DenseVector.ones[Double](acts.length), dv) obj -> deriv } } val weights: DenseVector[Double] = DenseVector.rand[Double](index.index.size) - 0.5 val diffs = GradientTester.test[Int, DenseVector[Double]](objective, weights, randFraction = 1.0) assert(max(diffs) < 1E-3, s"${max(diffs)} was bigger than expected!!") } test("layered chain rule") { val index = new SigmoidTransform(new AffineTransform(20, 12, new SigmoidTransform(12, 10, true))) val dv = DenseVector.rand(10) val objective = new DiffFunction[DenseVector[Double]] { def calculate(x: DenseVector[Double]): (Double, DenseVector[Double]) = { val layer = index.extractLayer(x) val acts = layer.activations(dv) val obj = sum(acts) val deriv = DenseVector.zeros[Double](x.length) layer.tallyDerivative(deriv, DenseVector.ones[Double](acts.length), dv) obj -> deriv } } val weights: DenseVector[Double] = DenseVector.rand[Double](index.index.size)/2.0 - 0.25 val diffs = GradientTester.test[Int, DenseVector[Double]](objective, weights, randFraction = 1.0) assert(max(diffs) < 1E-2, s"${max(diffs)} was bigger than expected!! ${argmax(diffs)} ${index.index.get(argmax(diffs))}") } } */
apache-2.0
VinodKumarS-Huawei/ietf96yang
core/api/src/main/java/org/onosproject/ui/table/TableRequestHandler.java
5111
/* * Copyright 2015-present Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onosproject.ui.table; import com.fasterxml.jackson.databind.node.ObjectNode; import org.onosproject.ui.JsonUtils; import org.onosproject.ui.RequestHandler; import static org.onosproject.ui.table.TableModel.sortDir; /** * Message handler specifically for table views. */ public abstract class TableRequestHandler extends RequestHandler { private static final String FIRST_COL = "firstCol"; private static final String FIRST_DIR = "firstDir"; private static final String SECOND_COL = "secondCol"; private static final String SECOND_DIR = "secondDir"; private static final String ASC = "asc"; private static final String ANNOTS = "annots"; private static final String NO_ROWS_MSG_KEY = "no_rows_msg"; private final String respType; private final String nodeName; /** * Constructs a table request handler for a specific table view. When * table requests come in, the handler will generate the appropriate * table rows, sort them according the the request sort parameters, and * send back the response to the client. * * @param reqType type of the request event * @param respType type of the response event * @param nodeName name of JSON node holding row data */ public TableRequestHandler(String reqType, String respType, String nodeName) { super(reqType); this.respType = respType; this.nodeName = nodeName; } @Override public void process(long sid, ObjectNode payload) { TableModel tm = createTableModel(); populateTable(tm, payload); String firstCol = JsonUtils.string(payload, FIRST_COL, defaultColumnId()); String firstDir = JsonUtils.string(payload, FIRST_DIR, ASC); String secondCol = JsonUtils.string(payload, SECOND_COL, null); String secondDir = JsonUtils.string(payload, SECOND_DIR, null); tm.sort(firstCol, sortDir(firstDir), secondCol, sortDir(secondDir)); addTableConfigAnnotations(tm, payload); ObjectNode rootNode = MAPPER.createObjectNode(); rootNode.set(nodeName, TableUtils.generateRowArrayNode(tm)); rootNode.set(ANNOTS, TableUtils.generateAnnotObjectNode(tm)); sendMessage(respType, 0, rootNode); } /** * Creates the table model (devoid of data) using {@link #getColumnIds()} * to initialize it, ready to be populated. * <p> * This default implementation returns a table model with default * formatters and comparators for all columns. * * @return an empty table model */ protected TableModel createTableModel() { return new TableModel(getColumnIds()); } /** * Adds table configuration specific annotations to table model. * * @param tm a table model * @param payload the event payload from the client */ protected void addTableConfigAnnotations(TableModel tm, ObjectNode payload) { tm.addAnnotation(NO_ROWS_MSG_KEY, noRowsMessage(payload)); } /** * Returns the default column ID to be used when one is not supplied in * the payload as the column on which to sort. * <p> * This default implementation returns "id". * * @return default sort column identifier */ protected String defaultColumnId() { return "id"; } /** * Subclasses should return the array of column IDs with which * to initialize their table model. * * @return the column IDs */ protected abstract String[] getColumnIds(); /** * Subclasses should return the message to display in the table when there * are no rows to display. For example, a host table might return * "No hosts found". * * @param payload request payload * @return the message */ protected abstract String noRowsMessage(ObjectNode payload); /** * Subclasses should populate the table model by adding * {@link TableModel.Row rows}. * <pre> * tm.addRow() * .cell(COL_ONE, ...) * .cell(COL_TWO, ...) * ... ; * </pre> * The request payload is provided in case there are request filtering * parameters (other than sort column and sort direction) that are required * to generate the appropriate data. * * @param tm the table model * @param payload request payload */ protected abstract void populateTable(TableModel tm, ObjectNode payload); }
apache-2.0
SupunS/carbon-identity
components/identity/org.wso2.carbon.identity.entitlement.ui/src/main/java/org/wso2/carbon/identity/entitlement/ui/PolicyEditorConstants.java
8025
/* * Copyright (c) WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.identity.entitlement.ui; /** * Policy editor related constants */ public class PolicyEditorConstants { public static final String ATTRIBUTE_SEPARATOR = ","; public static final String TARGET_ELEMENT = "Target"; public static final String ANY_OF_ELEMENT = "AnyOf"; public static final String ALL_OF_ELEMENT = "AllOf"; public static final String COMBINE_FUNCTION_AND = "AND"; public static final String COMBINE_FUNCTION_OR = "OR"; public static final String COMBINE_FUNCTION_END = "END"; public static final String MATCH_ELEMENT = "Match"; public static final String MATCH_ID = "MatchId"; public static final String ATTRIBUTE_ID = "AttributeId"; public static final String CATEGORY = "Category"; public static final String DATA_TYPE = "DataType"; public static final String ISSUER = "Issuer"; public static final String SOA_CATEGORY_USER = "Subject"; public static final String SOA_CATEGORY_SUBJECT = "Subject"; public static final String SOA_CATEGORY_RESOURCE = "Resource"; public static final String SOA_CATEGORY_ACTION = "Action"; public static final String SOA_CATEGORY_ENVIRONMENT = "Environment"; public static final String MUST_BE_PRESENT = "MustBePresent"; public static final String ATTRIBUTE_DESIGNATOR = "AttributeDesignator"; public static final class PreFunctions { public static final String PRE_FUNCTION_IS = "is"; public static final String PRE_FUNCTION_IS_NOT = "is-not"; public static final String PRE_FUNCTION_ARE = "are"; public static final String PRE_FUNCTION_ARE_NOT = "are-not"; public static final String CAN_DO = "can"; public static final String CAN_NOT_DO = "can not"; } public static final class TargetPreFunctions { public static final String PRE_FUNCTION_IS = "is"; } public static final class TargetFunctions { public static final String FUNCTION_EQUAL = "equal"; } public static final String RULE_EFFECT_PERMIT = "Permit"; public static final String RULE_EFFECT_DENY = "Deny"; public static final class DataType { public static final String DAY_TIME_DURATION = "http://www.w3.org/2001/XMLSchema#dayTimeDuration"; public static final String YEAR_MONTH_DURATION = "http://www.w3.org/2001/XMLSchema#yearMonthDuration"; public static final String STRING = "http://www.w3.org/2001/XMLSchema#string"; public static final String TIME = "http://www.w3.org/2001/XMLSchema#time"; public static final String IP_ADDRESS = "urn:oasis:names:tc:xacml:2.0:data-type:ipAddress"; public static final String DATE_TIME = "http://www.w3.org/2001/XMLSchema#dateTime"; public static final String DATE = "http://www.w3.org/2001/XMLSchema#date"; public static final String DOUBLE = "http://www.w3.org/2001/XMLSchema#double"; public static final String INT = "http://www.w3.org/2001/XMLSchema#integer"; } public static final class CombiningAlog { public static final String DENY_OVERRIDE_ID = "deny-overrides"; public static final String PERMIT_OVERRIDE_ID = "permit-overrides"; public static final String FIRST_APPLICABLE_ID = "first-applicable"; public static final String ORDER_PERMIT_OVERRIDE_ID = "ordered-permit-overrides"; public static final String ORDER_DENY_OVERRIDE_ID = "ordered-deny-overrides"; public static final String DENY_UNLESS_PERMIT_ID = "deny-unless-permit"; public static final String PERMIT_UNLESS_DENY_ID = "permit-unless-deny"; public static final String ONLY_ONE_APPLICABLE_ID = "only-one-applicable"; } public static final String RULE_ALGORITHM_IDENTIFIER_1 = "urn:oasis:names:tc:xacml:1.0:" + "rule-combining-algorithm:"; public static final String RULE_ALGORITHM_IDENTIFIER_3 = "urn:oasis:names:tc:xacml:3.0:" + "rule-combining-algorithm:"; public static final String POLICY_ALGORITHM_IDENTIFIER_1 = "urn:oasis:names:tc:xacml:1.0:" + "policy-combining-algorithm:"; public static final String POLICY_ALGORITHM_IDENTIFIER_3 = "urn:oasis:names:tc:xacml:3.0:" + "policy-combining-algorithm:"; public static final String POLICY_EDITOR_SEPARATOR = "|"; public static final int POLICY_EDITOR_ROW_DATA = 7; public static final String DYNAMIC_SELECTOR_CATEGORY = "Category"; public static final String DYNAMIC_SELECTOR_FUNCTION = "Function"; public static final String SUBJECT_ID_DEFAULT= "urn:oasis:names:tc:xacml:1.0:subject:subject-id"; public static final String SUBJECT_ID_ROLE= "http://wso2.org/claims/role"; public static final String RESOURCE_ID_DEFAULT = "urn:oasis:names:tc:xacml:1.0:resource:resource-id"; public static final String ACTION_ID_DEFAULT = "urn:oasis:names:tc:xacml:1.0:action:action-id"; public static final String ENVIRONMENT_ID_DEFAULT = "urn:oasis:names:tc:xacml:1.0:environment:environment-id"; public static final String RESOURCE_CATEGORY_URI = "urn:oasis:names:tc:xacml:3.0:" + "attribute-category:resource"; public static final String SUBJECT_CATEGORY_URI = "urn:oasis:names:tc:xacml:1.0:" + "subject-category:access-subject"; public static final String ACTION_CATEGORY_URI = "urn:oasis:names:tc:xacml:3.0:" + "attribute-category:action"; public static final String ENVIRONMENT_CATEGORY_URI = "urn:oasis:names:tc:xacml:3.0:" + "attribute-category:environment"; public static final String ENVIRONMENT_CURRENT_DATE = "urn:oasis:names:tc:xacml:1.0:environment:current-date"; public static final String ENVIRONMENT_CURRENT_TIME = "urn:oasis:names:tc:xacml:1.0:environment:current-time"; public static final String ENVIRONMENT_CURRENT_DATETIME = "urn:oasis:names:tc:xacml:1.0:environment:current-dateTime"; public static final String SOA_POLICY_EDITOR = "SOA"; public static class FunctionIdentifier { public static final String ANY = "*"; public static final String EQUAL_RANGE = "["; public static final String EQUAL_RANGE_CLOSE = "]"; public static final String RANGE = "("; public static final String RANGE_CLOSE = ")"; public static final String GREATER = ">"; public static final String GREATER_EQUAL = ">="; public static final String LESS = "<"; public static final String LESS_EQUAL = "<="; public static final String REGEX = "{"; public static final String AND = "&"; public static final String OR = "|"; } public static final class AttributeId { public static final String ENV_DOMAIN = "Domain"; public static final String ENV_DATE = "Date"; public static final String ENV_DATE_TIME = "DateTime"; public static final String ENV_IP = "IP"; public static final String ENV_TIME = "Time"; public static final String USER_AGE = "Age"; } }
apache-2.0
dneuman64/traffic_control
traffic_monitor/src/test/java/health/CacheStatisticsClientTest.java
1971
package health; import com.comcast.cdn.traffic_control.traffic_monitor.config.Cache; import com.comcast.cdn.traffic_control.traffic_monitor.health.CacheStateUpdater; import com.comcast.cdn.traffic_control.traffic_monitor.health.CacheStatisticsClient; import com.ning.http.client.AsyncHttpClient; import com.ning.http.client.ListenableFuture; import com.ning.http.client.ProxyServer; import com.ning.http.client.Request; import org.junit.Test; import org.junit.runner.RunWith; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.powermock.api.mockito.PowerMockito.whenNew; @PrepareForTest({CacheStatisticsClient.class, AsyncHttpClient.class, ProxyServer.class}) @RunWith(PowerMockRunner.class) public class CacheStatisticsClientTest { @Test public void itExecutesAsynchronousRequest() throws Exception { ListenableFuture listenableFuture = mock(ListenableFuture.class); AsyncHttpClient asyncHttpClient = spy(new AsyncHttpClient()); doReturn(listenableFuture).when(asyncHttpClient).executeRequest(any(Request.class), any(CacheStateUpdater.class)); whenNew(AsyncHttpClient.class).withNoArguments().thenReturn(asyncHttpClient); Cache cache = mock(Cache.class); when(cache.getQueryIp()).thenReturn("192.168.99.100"); when(cache.getQueryPort()).thenReturn(0); when(cache.getStatisticsUrl()).thenReturn("http://cache1.example.com/astats"); CacheStateUpdater cacheStateUpdater = mock(CacheStateUpdater.class); CacheStatisticsClient cacheStatisticsClient = new CacheStatisticsClient(); cacheStatisticsClient.fetchCacheStatistics(cache, cacheStateUpdater); verify(cacheStateUpdater).setFuture(listenableFuture); } }
apache-2.0
ivan-fedorov/intellij-community
python/src/com/jetbrains/python/psi/impl/PyFunctionImpl.java
26495
/* * Copyright 2000-2014 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jetbrains.python.psi.impl; import com.intellij.lang.ASTNode; import com.intellij.navigation.ItemPresentation; import com.intellij.openapi.extensions.Extensions; import com.intellij.openapi.util.Pair; import com.intellij.openapi.util.Ref; import com.intellij.openapi.util.text.StringUtil; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiReference; import com.intellij.psi.StubBasedPsiElement; import com.intellij.psi.search.LocalSearchScope; import com.intellij.psi.search.SearchScope; import com.intellij.psi.stubs.IStubElementType; import com.intellij.psi.stubs.StubElement; import com.intellij.psi.util.CachedValueProvider; import com.intellij.psi.util.CachedValuesManager; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.psi.util.QualifiedName; import com.intellij.util.IncorrectOperationException; import com.intellij.util.PlatformIcons; import com.jetbrains.python.PyElementTypes; import com.jetbrains.python.PyNames; import com.jetbrains.python.PyTokenTypes; import com.jetbrains.python.codeInsight.controlflow.ControlFlowCache; import com.jetbrains.python.codeInsight.controlflow.ScopeOwner; import com.jetbrains.python.codeInsight.dataflow.scope.ScopeUtil; import com.jetbrains.python.documentation.docstrings.DocStringUtil; import com.jetbrains.python.psi.*; import com.jetbrains.python.psi.resolve.PyResolveContext; import com.jetbrains.python.psi.resolve.QualifiedNameFinder; import com.jetbrains.python.psi.stubs.PyClassStub; import com.jetbrains.python.psi.stubs.PyFunctionStub; import com.jetbrains.python.psi.stubs.PyTargetExpressionStub; import com.jetbrains.python.psi.types.*; import com.jetbrains.python.sdk.PythonSdkType; import icons.PythonIcons; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.util.*; import static com.intellij.openapi.util.text.StringUtil.notNullize; import static com.jetbrains.python.psi.PyFunction.Modifier.CLASSMETHOD; import static com.jetbrains.python.psi.PyFunction.Modifier.STATICMETHOD; import static com.jetbrains.python.psi.impl.PyCallExpressionHelper.interpretAsModifierWrappingCall; /** * Implements PyFunction. */ public class PyFunctionImpl extends PyBaseElementImpl<PyFunctionStub> implements PyFunction { public PyFunctionImpl(ASTNode astNode) { super(astNode); } public PyFunctionImpl(final PyFunctionStub stub) { this(stub, PyElementTypes.FUNCTION_DECLARATION); } public PyFunctionImpl(PyFunctionStub stub, IStubElementType nodeType) { super(stub, nodeType); } private class CachedStructuredDocStringProvider implements CachedValueProvider<StructuredDocString> { @Nullable @Override public Result<StructuredDocString> compute() { final PyFunctionImpl f = PyFunctionImpl.this; return Result.create(DocStringUtil.getStructuredDocString(f), f); } } private CachedStructuredDocStringProvider myCachedStructuredDocStringProvider = new CachedStructuredDocStringProvider(); @Nullable @Override public String getName() { final PyFunctionStub stub = getStub(); if (stub != null) { return stub.getName(); } ASTNode node = getNameNode(); return node != null ? node.getText() : null; } public PsiElement getNameIdentifier() { final ASTNode nameNode = getNameNode(); return nameNode != null ? nameNode.getPsi() : null; } public PsiElement setName(@NotNull String name) throws IncorrectOperationException { final ASTNode nameElement = PyUtil.createNewName(this, name); final ASTNode nameNode = getNameNode(); if (nameNode != null) { getNode().replaceChild(nameNode, nameElement); } return this; } @Override public Icon getIcon(int flags) { if (isValid()) { final Property property = getProperty(); if (property != null) { if (property.getGetter().valueOrNull() == this) { return PythonIcons.Python.PropertyGetter; } if (property.getSetter().valueOrNull() == this) { return PythonIcons.Python.PropertySetter; } if (property.getDeleter().valueOrNull() == this) { return PythonIcons.Python.PropertyDeleter; } return PlatformIcons.PROPERTY_ICON; } if (getContainingClass() != null) { return PlatformIcons.METHOD_ICON; } } return PythonIcons.Python.Function; } @Nullable public ASTNode getNameNode() { return getNode().findChildByType(PyTokenTypes.IDENTIFIER); } @NotNull public PyParameterList getParameterList() { return getRequiredStubOrPsiChild(PyElementTypes.PARAMETER_LIST); } @Override @NotNull public PyStatementList getStatementList() { final PyStatementList statementList = childToPsi(PyElementTypes.STATEMENT_LIST); assert statementList != null : "Statement list missing for function " + getText(); return statementList; } public PyClass getContainingClass() { final PyFunctionStub stub = getStub(); if (stub != null) { final StubElement parentStub = stub.getParentStub(); if (parentStub instanceof PyClassStub) { return ((PyClassStub)parentStub).getPsi(); } return null; } final PsiElement parent = PsiTreeUtil.getParentOfType(this, StubBasedPsiElement.class); if (parent instanceof PyClass) { return (PyClass)parent; } return null; } @Nullable public PyDecoratorList getDecoratorList() { return getStubOrPsiChild(PyElementTypes.DECORATOR_LIST); // PsiTreeUtil.getChildOfType(this, PyDecoratorList.class); } @Nullable @Override public PyType getReturnType(@NotNull TypeEvalContext context, @NotNull TypeEvalContext.Key key) { final PyType type = getReturnType(context); return isAsync() ? createCoroutineType(type) : type; } @Nullable private PyType getReturnType(@NotNull TypeEvalContext context) { for (PyTypeProvider typeProvider : Extensions.getExtensions(PyTypeProvider.EP_NAME)) { final Ref<PyType> returnTypeRef = typeProvider.getReturnType(this, context); if (returnTypeRef != null) { final PyType returnType = returnTypeRef.get(); if (returnType != null) { returnType.assertValid(typeProvider.toString()); } return returnType; } } final PyType docStringType = getReturnTypeFromDocString(); if (docStringType != null) { docStringType.assertValid("from docstring"); return docStringType; } if (context.allowReturnTypes(this)) { final Ref<? extends PyType> yieldTypeRef = getYieldStatementType(context); if (yieldTypeRef != null) { return yieldTypeRef.get(); } return getReturnStatementType(context); } return null; } @Nullable @Override public PyType getCallType(@NotNull TypeEvalContext context, @NotNull PyCallSiteExpression callSite) { for (PyTypeProvider typeProvider : Extensions.getExtensions(PyTypeProvider.EP_NAME)) { final PyType type = typeProvider.getCallType(this, callSite, context); if (type != null) { type.assertValid(typeProvider.toString()); return type; } } final PyExpression receiver = PyTypeChecker.getReceiver(callSite, this); final List<PyExpression> arguments = PyTypeChecker.getArguments(callSite, this); final List<PyParameter> parameters = PyUtil.getParameters(this, context); final PyResolveContext resolveContext = PyResolveContext.noImplicits().withTypeEvalContext(context); final List<PyParameter> explicitParameters = PyTypeChecker.filterExplicitParameters(parameters, this, callSite, resolveContext); final Map<PyExpression, PyNamedParameter> mapping = PyCallExpressionHelper.mapArguments(arguments, explicitParameters); return getCallType(receiver, mapping, context); } @Nullable @Override public PyType getCallType(@Nullable PyExpression receiver, @NotNull Map<PyExpression, PyNamedParameter> parameters, @NotNull TypeEvalContext context) { return analyzeCallType(context.getReturnType(this), receiver, parameters, context); } @Nullable private PyType analyzeCallType(@Nullable PyType type, @Nullable PyExpression receiver, @NotNull Map<PyExpression, PyNamedParameter> parameters, @NotNull TypeEvalContext context) { if (PyTypeChecker.hasGenerics(type, context)) { final Map<PyGenericType, PyType> substitutions = PyTypeChecker.unifyGenericCall(receiver, parameters, context); if (substitutions != null) { type = PyTypeChecker.substitute(type, substitutions, context); } else { type = null; } } if (receiver != null) { type = replaceSelf(type, receiver, context); } if (type != null && isDynamicallyEvaluated(parameters.values(), context)) { type = PyUnionType.createWeakType(type); } return type; } @Override public ItemPresentation getPresentation() { return new PyElementPresentation(this) { @Nullable @Override public String getPresentableText() { return notNullize(getName(), PyNames.UNNAMED_ELEMENT) + getParameterList().getPresentableText(true); } @Nullable @Override public String getLocationString() { final PyClass containingClass = getContainingClass(); if (containingClass != null) { return "(" + containingClass.getName() + " in " + getPackageForFile(getContainingFile()) + ")"; } return super.getLocationString(); } }; } @Nullable private PyType replaceSelf(@Nullable PyType returnType, @Nullable PyExpression receiver, @NotNull TypeEvalContext context) { if (receiver != null) { // TODO: Currently we substitute only simple subclass types, but we could handle union and collection types as well if (returnType instanceof PyClassType) { final PyClassType returnClassType = (PyClassType)returnType; if (returnClassType.getPyClass() == getContainingClass()) { final PyType receiverType = context.getType(receiver); if (receiverType instanceof PyClassType && PyTypeChecker.match(returnType, receiverType, context)) { return returnClassType.isDefinition() ? receiverType : ((PyClassType)receiverType).toInstance(); } } } } return returnType; } private static boolean isDynamicallyEvaluated(@NotNull Collection<PyNamedParameter> parameters, @NotNull TypeEvalContext context) { for (PyNamedParameter parameter : parameters) { final PyType type = context.getType(parameter); if (type instanceof PyDynamicallyEvaluatedType) { return true; } } return false; } @Nullable private Ref<? extends PyType> getYieldStatementType(@NotNull final TypeEvalContext context) { Ref<PyType> elementType = null; final PyBuiltinCache cache = PyBuiltinCache.getInstance(this); final PyStatementList statements = getStatementList(); final Set<PyType> types = new LinkedHashSet<PyType>(); statements.accept(new PyRecursiveElementVisitor() { @Override public void visitPyYieldExpression(PyYieldExpression node) { final PyType type = context.getType(node); if (node.isDelegating() && type instanceof PyCollectionType) { final PyCollectionType collectionType = (PyCollectionType)type; // TODO: Select the parameter types that matches T in Iterable[T] final List<PyType> elementTypes = collectionType.getElementTypes(context); types.add(elementTypes.isEmpty() ? null : elementTypes.get(0)); } else { types.add(type); } } @Override public void visitPyFunction(PyFunction node) { // Ignore nested functions } }); final int n = types.size(); if (n == 1) { elementType = Ref.create(types.iterator().next()); } else if (n > 0) { elementType = Ref.create(PyUnionType.union(types)); } if (elementType != null) { final PyClass generator = cache.getClass(PyNames.FAKE_GENERATOR); if (generator != null) { final List<PyType> parameters = Arrays.asList(elementType.get(), null, getReturnStatementType(context)); return Ref.create(new PyCollectionTypeImpl(generator, false, parameters)); } } if (!types.isEmpty()) { return Ref.create(null); } return null; } @Nullable public PyType getReturnStatementType(TypeEvalContext typeEvalContext) { final ReturnVisitor visitor = new ReturnVisitor(this, typeEvalContext); final PyStatementList statements = getStatementList(); statements.accept(visitor); if (isGeneratedStub() && !visitor.myHasReturns) { if (PyNames.INIT.equals(getName())) { return PyNoneType.INSTANCE; } return null; } return visitor.result(); } @Nullable private PyType createCoroutineType(@Nullable PyType returnType) { final PyBuiltinCache cache = PyBuiltinCache.getInstance(this); if (returnType instanceof PyClassLikeType && PyNames.FAKE_COROUTINE.equals(((PyClassLikeType)returnType).getClassQName())) { return returnType; } final PyClass generator = cache.getClass(PyNames.FAKE_COROUTINE); return generator != null ? new PyCollectionTypeImpl(generator, false, Collections.singletonList(returnType)) : null; } public PyFunction asMethod() { if (getContainingClass() != null) { return this; } else { return null; } } @Nullable @Override public PyType getReturnTypeFromDocString() { final String typeName = extractReturnType(); return typeName != null ? PyTypeParser.getTypeByName(this, typeName) : null; } @Nullable @Override public String getDeprecationMessage() { PyFunctionStub stub = getStub(); if (stub != null) { return stub.getDeprecationMessage(); } return extractDeprecationMessage(); } @Nullable public String extractDeprecationMessage() { PyStatementList statementList = getStatementList(); return extractDeprecationMessage(Arrays.asList(statementList.getStatements())); } @Override public PyType getType(@NotNull TypeEvalContext context, @NotNull TypeEvalContext.Key key) { for (PyTypeProvider provider : Extensions.getExtensions(PyTypeProvider.EP_NAME)) { final PyType type = provider.getCallableType(this, context); if (type != null) { return type; } } final PyFunctionTypeImpl type = new PyFunctionTypeImpl(this); if (PyKnownDecoratorUtil.hasUnknownDecorator(this, context) && getProperty() == null) { return PyUnionType.createWeakType(type); } return type; } @Nullable public static String extractDeprecationMessage(List<PyStatement> statements) { for (PyStatement statement : statements) { if (statement instanceof PyExpressionStatement) { PyExpressionStatement expressionStatement = (PyExpressionStatement)statement; if (expressionStatement.getExpression() instanceof PyCallExpression) { PyCallExpression callExpression = (PyCallExpression)expressionStatement.getExpression(); if (callExpression.isCalleeText(PyNames.WARN)) { PyReferenceExpression warningClass = callExpression.getArgument(1, PyReferenceExpression.class); if (warningClass != null && (PyNames.DEPRECATION_WARNING.equals(warningClass.getReferencedName()) || PyNames.PENDING_DEPRECATION_WARNING.equals(warningClass.getReferencedName()))) { return PyPsiUtils.strValue(callExpression.getArguments()[0]); } } } } } return null; } @Override public String getDocStringValue() { final PyFunctionStub stub = getStub(); if (stub != null) { return stub.getDocString(); } return DocStringUtil.getDocStringValue(this); } @Nullable @Override public StructuredDocString getStructuredDocString() { return CachedValuesManager.getCachedValue(this, myCachedStructuredDocStringProvider); } private boolean isGeneratedStub() { VirtualFile vFile = getContainingFile().getVirtualFile(); if (vFile != null) { vFile = vFile.getParent(); if (vFile != null) { vFile = vFile.getParent(); if (vFile != null && vFile.getName().equals(PythonSdkType.SKELETON_DIR_NAME)) { return true; } } } return false; } @Nullable private String extractReturnType() { final String ARROW = "->"; final StructuredDocString structuredDocString = getStructuredDocString(); if (structuredDocString != null) { return structuredDocString.getReturnType(); } final String docString = getDocStringValue(); if (docString != null && docString.contains(ARROW)) { final List<String> lines = StringUtil.split(docString, "\n"); while (lines.size() > 0 && lines.get(0).trim().length() == 0) { lines.remove(0); } if (lines.size() > 1 && lines.get(1).trim().length() == 0) { String firstLine = lines.get(0); int pos = firstLine.lastIndexOf(ARROW); if (pos >= 0) { return firstLine.substring(pos + 2).trim(); } } } return null; } private static class ReturnVisitor extends PyRecursiveElementVisitor { private final PyFunction myFunction; private final TypeEvalContext myContext; private PyType myResult = null; private boolean myHasReturns = false; private boolean myHasRaises = false; public ReturnVisitor(PyFunction function, final TypeEvalContext context) { myFunction = function; myContext = context; } @Override public void visitPyReturnStatement(PyReturnStatement node) { if (PsiTreeUtil.getParentOfType(node, ScopeOwner.class, true) == myFunction) { final PyExpression expr = node.getExpression(); PyType returnType; returnType = expr == null ? PyNoneType.INSTANCE : myContext.getType(expr); if (!myHasReturns) { myResult = returnType; myHasReturns = true; } else { myResult = PyUnionType.union(myResult, returnType); } } } @Override public void visitPyRaiseStatement(PyRaiseStatement node) { myHasRaises = true; } @Nullable PyType result() { return myHasReturns || myHasRaises ? myResult : PyNoneType.INSTANCE; } } @Override protected void acceptPyVisitor(PyElementVisitor pyVisitor) { pyVisitor.visitPyFunction(this); } public int getTextOffset() { final ASTNode name = getNameNode(); return name != null ? name.getStartOffset() : super.getTextOffset(); } public PyStringLiteralExpression getDocStringExpression() { final PyStatementList stmtList = getStatementList(); return DocStringUtil.findDocStringExpression(stmtList); } @NotNull public Iterable<PyElement> iterateNames() { return Collections.<PyElement>singleton(this); } public PyElement getElementNamed(final String the_name) { return the_name.equals(getName()) ? this : null; } public boolean mustResolveOutside() { return false; } @Override public String toString() { return super.toString() + "('" + getName() + "')"; } public void subtreeChanged() { super.subtreeChanged(); ControlFlowCache.clear(this); } public Property getProperty() { final PyClass containingClass = getContainingClass(); if (containingClass != null) { return containingClass.findPropertyByCallable(this); } return null; } @Override public PyAnnotation getAnnotation() { return getStubOrPsiChild(PyElementTypes.ANNOTATION); } @NotNull @Override public SearchScope getUseScope() { final ScopeOwner scopeOwner = ScopeUtil.getScopeOwner(this); if (scopeOwner instanceof PyFunction) { return new LocalSearchScope(scopeOwner); } return super.getUseScope(); } /** * Looks for two standard decorators to a function, or a wrapping assignment that closely follows it. * * @return a flag describing what was detected. */ @Nullable public Modifier getModifier() { String deconame = getClassOrStaticMethodDecorator(); if (PyNames.CLASSMETHOD.equals(deconame)) { return CLASSMETHOD; } else if (PyNames.STATICMETHOD.equals(deconame)) { return STATICMETHOD; } // implicit staticmethod __new__ PyClass cls = getContainingClass(); if (cls != null && PyNames.NEW.equals(getName()) && cls.isNewStyleClass(null)) { return STATICMETHOD; } // if (getStub() != null) { return getWrappersFromStub(); } String func_name = getName(); if (func_name != null) { PyAssignmentStatement assignment = PsiTreeUtil.getNextSiblingOfType(this, PyAssignmentStatement.class); if (assignment != null) { for (Pair<PyExpression, PyExpression> pair : assignment.getTargetsToValuesMapping()) { PyExpression value = pair.getSecond(); if (value instanceof PyCallExpression) { PyExpression target = pair.getFirst(); if (target instanceof PyTargetExpression && func_name.equals(target.getName())) { Pair<String, PyFunction> interpreted = interpretAsModifierWrappingCall((PyCallExpression)value, this); if (interpreted != null) { PyFunction original = interpreted.getSecond(); if (original == this) { String wrapper_name = interpreted.getFirst(); if (PyNames.CLASSMETHOD.equals(wrapper_name)) { return CLASSMETHOD; } else if (PyNames.STATICMETHOD.equals(wrapper_name)) { return STATICMETHOD; } } } } } } } } return null; } @Override public boolean isAsync() { final PyFunctionStub stub = getStub(); if (stub != null) { return stub.isAsync(); } return getNode().findChildByType(PyTokenTypes.ASYNC_KEYWORD) != null; } @Nullable private Modifier getWrappersFromStub() { final StubElement parentStub = getStub().getParentStub(); final List childrenStubs = parentStub.getChildrenStubs(); int index = childrenStubs.indexOf(getStub()); if (index >= 0 && index < childrenStubs.size() - 1) { StubElement nextStub = (StubElement)childrenStubs.get(index + 1); if (nextStub instanceof PyTargetExpressionStub) { final PyTargetExpressionStub targetExpressionStub = (PyTargetExpressionStub)nextStub; if (targetExpressionStub.getInitializerType() == PyTargetExpressionStub.InitializerType.CallExpression) { final QualifiedName qualifiedName = targetExpressionStub.getInitializer(); if (QualifiedName.fromComponents(PyNames.CLASSMETHOD).equals(qualifiedName)) { return CLASSMETHOD; } if (QualifiedName.fromComponents(PyNames.STATICMETHOD).equals(qualifiedName)) { return STATICMETHOD; } } } } return null; } /** * When a function is decorated many decorators, finds the deepest builtin decorator: * <pre> * &#x40;foo * &#x40;classmethod <b># &lt;-- that's it</b> * &#x40;bar * def moo(cls): * &nbsp;&nbsp;pass * </pre> * * @return name of the built-in decorator, or null (even if there are non-built-in decorators). */ @Nullable private String getClassOrStaticMethodDecorator() { PyDecoratorList decolist = getDecoratorList(); if (decolist != null) { PyDecorator[] decos = decolist.getDecorators(); if (decos.length > 0) { for (int i = decos.length - 1; i >= 0; i -= 1) { PyDecorator deco = decos[i]; String deconame = deco.getName(); if (PyNames.CLASSMETHOD.equals(deconame) || PyNames.STATICMETHOD.equals(deconame)) { return deconame; } for (PyKnownDecoratorProvider provider : PyUtil.KnownDecoratorProviderHolder.KNOWN_DECORATOR_PROVIDERS) { String name = provider.toKnownDecorator(deconame); if (name != null) { return name; } } } } } return null; } @Nullable @Override public String getQualifiedName() { return QualifiedNameFinder.getQualifiedName(this); } @NotNull @Override public List<PyAssignmentStatement> findAttributes() { final List<PyAssignmentStatement> result = new ArrayList<PyAssignmentStatement>(); for (final PyAssignmentStatement statement : new PsiQuery(this).siblings(PyAssignmentStatement.class).getElements()) { for (final PyQualifiedExpression targetExpression : new PsiQuery(statement.getTargets()).filter(PyQualifiedExpression.class) .getElements()) { final PyExpression qualifier = targetExpression.getQualifier(); if (qualifier == null) { continue; } final PsiReference qualifierReference = qualifier.getReference(); if (qualifierReference == null) { continue; } if (qualifierReference.isReferenceTo(this)) { result.add(statement); } } } return result; } @NotNull @Override public ProtectionLevel getProtectionLevel() { final int underscoreLevels = PyUtil.getInitialUnderscores(getName()); for (final ProtectionLevel level : ProtectionLevel.values()) { if (level.getUnderscoreLevel() == underscoreLevels) { return level; } } return ProtectionLevel.PRIVATE; } }
apache-2.0
GlenRSmith/elasticsearch
server/src/main/java/org/elasticsearch/common/settings/ConsistentSettingsService.java
13526
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License * 2.0 and the Server Side Public License, v 1; you may not use this file except * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ package org.elasticsearch.common.settings; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.hash.MessageDigests; import java.nio.charset.StandardCharsets; import java.security.NoSuchAlgorithmException; import java.security.spec.InvalidKeySpecException; import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; import javax.crypto.spec.PBEKeySpec; /** * Used to publish secure setting hashes in the cluster state and to validate those hashes against the local values of those same settings. * This is colloquially referred to as the secure setting consistency check. It will publish and verify hashes only for the collection * of settings passed in the constructor. The settings have to have the {@link Setting.Property#Consistent} property. */ public final class ConsistentSettingsService { private static final Logger logger = LogManager.getLogger(ConsistentSettingsService.class); private final Settings settings; private final ClusterService clusterService; private final Collection<Setting<?>> secureSettingsCollection; private final SecretKeyFactory pbkdf2KeyFactory; public ConsistentSettingsService(Settings settings, ClusterService clusterService, Collection<Setting<?>> secureSettingsCollection) { this.settings = settings; this.clusterService = clusterService; this.secureSettingsCollection = secureSettingsCollection; // this is used to compute the PBKDF2 hash (the published one) try { this.pbkdf2KeyFactory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA512"); } catch (NoSuchAlgorithmException e) { throw new RuntimeException("The \"PBKDF2WithHmacSHA512\" algorithm is required for consistent secure settings' hashes", e); } } /** * Returns a {@link LocalNodeMasterListener} that will publish hashes of all the settings passed in the constructor. These hashes are * published by the master node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. */ public LocalNodeMasterListener newHashPublisher() { // eagerly compute hashes to be published final Map<String, String> computedHashesOfConsistentSettings = computeHashesOfConsistentSecureSettings(); return new HashesPublisher(computedHashesOfConsistentSettings, clusterService); } /** * Verifies that the hashes of consistent secure settings in the latest {@code ClusterState} verify for the values of those same * settings on the local node. The settings to be checked are passed in the constructor. Also, validates that a missing local * value is also missing in the published set, and vice-versa. */ public boolean areAllConsistent() { final ClusterState state = clusterService.state(); final Map<String, String> publishedHashesOfConsistentSettings = state.metadata().hashesOfConsistentSettings(); final Set<String> publishedSettingKeysToVerify = new HashSet<>(); publishedSettingKeysToVerify.addAll(publishedHashesOfConsistentSettings.keySet()); final AtomicBoolean allConsistent = new AtomicBoolean(true); forEachConcreteSecureSettingDo(concreteSecureSetting -> { final String publishedSaltAndHash = publishedHashesOfConsistentSettings.get(concreteSecureSetting.getKey()); final byte[] localHash = concreteSecureSetting.getSecretDigest(settings); if (publishedSaltAndHash == null && localHash == null) { // consistency of missing logger.debug( "no published hash for the consistent secure setting [{}] but it also does NOT exist on the local node", concreteSecureSetting.getKey() ); } else if (publishedSaltAndHash == null && localHash != null) { // setting missing on master but present locally logger.warn( "no published hash for the consistent secure setting [{}] but it exists on the local node", concreteSecureSetting.getKey() ); if (state.nodes().isLocalNodeElectedMaster()) { throw new IllegalStateException( "Master node cannot validate consistent setting. No published hash for [" + concreteSecureSetting.getKey() + "] but setting exists." ); } allConsistent.set(false); } else if (publishedSaltAndHash != null && localHash == null) { // setting missing locally but present on master logger.warn( "the consistent secure setting [{}] does not exist on the local node but there is a published hash for it", concreteSecureSetting.getKey() ); allConsistent.set(false); } else { assert publishedSaltAndHash != null; assert localHash != null; final String[] parts = publishedSaltAndHash.split(":"); if (parts == null || parts.length != 2) { throw new IllegalArgumentException( "published hash [" + publishedSaltAndHash + " ] for secure setting [" + concreteSecureSetting.getKey() + "] is invalid" ); } final String publishedSalt = parts[0]; final String publishedHash = parts[1]; final byte[] computedSaltedHashBytes = computeSaltedPBKDF2Hash(localHash, publishedSalt.getBytes(StandardCharsets.UTF_8)); final String computedSaltedHash = new String(Base64.getEncoder().encode(computedSaltedHashBytes), StandardCharsets.UTF_8); if (false == publishedHash.equals(computedSaltedHash)) { logger.warn( "the published hash [{}] of the consistent secure setting [{}] differs from the locally computed one [{}]", publishedHash, concreteSecureSetting.getKey(), computedSaltedHash ); if (state.nodes().isLocalNodeElectedMaster()) { throw new IllegalStateException( "Master node cannot validate consistent setting. The published hash [" + publishedHash + "] of the consistent secure setting [" + concreteSecureSetting.getKey() + "] differs from the locally computed one [" + computedSaltedHash + "]." ); } allConsistent.set(false); } } publishedSettingKeysToVerify.remove(concreteSecureSetting.getKey()); }); // another case of settings missing locally, when group settings have not expanded to all the keys published for (String publishedSettingKey : publishedSettingKeysToVerify) { for (Setting<?> setting : secureSettingsCollection) { if (setting.match(publishedSettingKey)) { // setting missing locally but present on master logger.warn( "the consistent secure setting [{}] does not exist on the local node but there is a published hash for it", publishedSettingKey ); allConsistent.set(false); } } } return allConsistent.get(); } /** * Iterate over the passed in secure settings, expanding {@link Setting.AffixSetting} to concrete settings, in the scope of the local * settings. */ private void forEachConcreteSecureSettingDo(Consumer<SecureSetting<?>> secureSettingConsumer) { for (Setting<?> setting : secureSettingsCollection) { assert setting.isConsistent() : "[" + setting.getKey() + "] is not a consistent setting"; if (setting instanceof Setting.AffixSetting<?>) { ((Setting.AffixSetting<?>) setting).getAllConcreteSettings(settings).forEach(concreteSetting -> { assert concreteSetting instanceof SecureSetting<?> : "[" + concreteSetting.getKey() + "] is not a secure setting"; secureSettingConsumer.accept((SecureSetting<?>) concreteSetting); }); } else if (setting instanceof SecureSetting<?>) { secureSettingConsumer.accept((SecureSetting<?>) setting); } else { assert false : "Unrecognized consistent secure setting [" + setting.getKey() + "]"; } } } private Map<String, String> computeHashesOfConsistentSecureSettings() { final Map<String, String> hashesBySettingKey = new HashMap<>(); forEachConcreteSecureSettingDo(concreteSecureSetting -> { final byte[] localHash = concreteSecureSetting.getSecretDigest(settings); if (localHash != null) { final String salt = UUIDs.randomBase64UUID(); final byte[] publicHash = computeSaltedPBKDF2Hash(localHash, salt.getBytes(StandardCharsets.UTF_8)); final String encodedPublicHash = new String(Base64.getEncoder().encode(publicHash), StandardCharsets.UTF_8); hashesBySettingKey.put(concreteSecureSetting.getKey(), salt + ":" + encodedPublicHash); } }); return hashesBySettingKey; } private byte[] computeSaltedPBKDF2Hash(byte[] bytes, byte[] salt) { final int iterations = 5000; final int keyLength = 512; char[] value = null; try { value = MessageDigests.toHexCharArray(bytes); final PBEKeySpec spec = new PBEKeySpec(value, salt, iterations, keyLength); final SecretKey key = pbkdf2KeyFactory.generateSecret(spec); return key.getEncoded(); } catch (InvalidKeySpecException e) { throw new RuntimeException("Unexpected exception when computing PBKDF2 hash", e); } finally { if (value != null) { Arrays.fill(value, '0'); } } } static final class HashesPublisher implements LocalNodeMasterListener { // eagerly compute hashes to be published final Map<String, String> computedHashesOfConsistentSettings; final ClusterService clusterService; HashesPublisher(Map<String, String> computedHashesOfConsistentSettings, ClusterService clusterService) { this.computedHashesOfConsistentSettings = Map.copyOf(computedHashesOfConsistentSettings); this.clusterService = clusterService; } @Override public void onMaster() { clusterService.submitStateUpdateTask("publish-secure-settings-hashes", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { final Map<String, String> publishedHashesOfConsistentSettings = currentState.metadata().hashesOfConsistentSettings(); if (computedHashesOfConsistentSettings.equals(publishedHashesOfConsistentSettings)) { logger.debug("Nothing to publish. What is already published matches this node's view."); return currentState; } else { return ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.metadata()).hashesOfConsistentSettings(computedHashesOfConsistentSettings) ) .build(); } } @Override public void onFailure(String source, Exception e) { logger.error("unable to publish secure settings hashes", e); } }); } @Override public void offMaster() { logger.trace("I am no longer master, nothing to do"); } } }
apache-2.0
google/llvm-propeller
compiler-rt/test/builtins/Unit/fixunssfsi_test.c
2325
// RUN: %clang_builtins %s %librt -o %t && %run %t // REQUIRES: librt_has_fixunssfsi #include "int_lib.h" #include <stdio.h> // Returns: convert a to a unsigned int, rounding toward zero. // Negative values all become zero. // Assumption: float is a IEEE 32 bit floating point type // su_int is a 32 bit integral type // value in float is representable in su_int or is negative // (no range checking performed) // seee eeee emmm mmmm mmmm mmmm mmmm mmmm COMPILER_RT_ABI su_int __fixunssfsi(float a); int test__fixunssfsi(float a, su_int expected) { su_int x = __fixunssfsi(a); if (x != expected) printf("error in __fixunssfsi(%A) = %X, expected %X\n", a, x, expected); return x != expected; } char assumption_2[sizeof(su_int)*CHAR_BIT == 32] = {0}; char assumption_3[sizeof(float)*CHAR_BIT == 32] = {0}; int main() { if (test__fixunssfsi(0.0F, 0)) return 1; if (test__fixunssfsi(0.5F, 0)) return 1; if (test__fixunssfsi(0.99F, 0)) return 1; if (test__fixunssfsi(1.0F, 1)) return 1; if (test__fixunssfsi(1.5F, 1)) return 1; if (test__fixunssfsi(1.99F, 1)) return 1; if (test__fixunssfsi(2.0F, 2)) return 1; if (test__fixunssfsi(2.01F, 2)) return 1; if (test__fixunssfsi(-0.5F, 0)) return 1; if (test__fixunssfsi(-0.99F, 0)) return 1; #if !TARGET_LIBGCC if (test__fixunssfsi(-1.0F, 0)) // libgcc ignores "returns 0 for negative input" spec return 1; if (test__fixunssfsi(-1.5F, 0)) return 1; if (test__fixunssfsi(-1.99F, 0)) return 1; if (test__fixunssfsi(-2.0F, 0)) return 1; if (test__fixunssfsi(-2.01F, 0)) return 1; #endif if (test__fixunssfsi(0x1.000000p+31F, 0x80000000)) return 1; if (test__fixunssfsi(0x1.000000p+32F, 0xFFFFFFFF)) return 1; if (test__fixunssfsi(0x1.FFFFFEp+31F, 0xFFFFFF00)) return 1; if (test__fixunssfsi(0x1.FFFFFEp+30F, 0x7FFFFF80)) return 1; if (test__fixunssfsi(0x1.FFFFFCp+30F, 0x7FFFFF00)) return 1; #if !TARGET_LIBGCC if (test__fixunssfsi(-0x1.FFFFFEp+30F, 0)) return 1; if (test__fixunssfsi(-0x1.FFFFFCp+30F, 0)) return 1; #endif return 0; }
apache-2.0
JesseQin/Open-XML-SDK
src/ofapi/Validation/SemanticValidation/SemanticConstraint/RelationshipExistConstraint.cs
1905
// Copyright (c) Microsoft Open Technologies, Inc. All rights reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.Linq; using System.Text; using DocumentFormat.OpenXml.Validation; using System.Diagnostics; using System.Xml; namespace DocumentFormat.OpenXml.Internal.SemanticValidation { internal class RelationshipExistConstraint : SemanticConstraint { private byte _rIdAttribute; public RelationshipExistConstraint(byte rIdAttribute) : base(SemanticValidationLevel.Part) { _rIdAttribute = rIdAttribute; } public override ValidationErrorInfo Validate(ValidationContext context) { OpenXmlSimpleType attributeValue = context.Element.Attributes[_rIdAttribute]; //if the attribute is omited, semantic validation will do nothing if (attributeValue == null || string.IsNullOrEmpty(attributeValue.InnerText)) { return null; } if (context.Part.PackagePart.RelationshipExists(attributeValue.InnerText)) { return null; } else { string errorDescription = string.Format(System.Globalization.CultureInfo.CurrentUICulture, ValidationResources.Sem_InvalidRelationshipId, attributeValue, GetAttributeQualifiedName(context.Element, _rIdAttribute)); return new ValidationErrorInfo() { Id = "Sem_InvalidRelationshipId", ErrorType = ValidationErrorType.Semantic, Node = context.Element, Description = errorDescription }; } } } }
apache-2.0
HybridF5/nova
nova/tests/unit/virt/test_block_device.py
45741
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils import six from nova import block_device from nova import context from nova import exception from nova import objects from nova.objects import fields from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import matchers from nova.virt import block_device as driver_block_device from nova.virt import driver from nova.volume import cinder from nova.volume import encryptors class TestDriverBlockDevice(test.NoDBTestCase): driver_classes = { 'swap': driver_block_device.DriverSwapBlockDevice, 'ephemeral': driver_block_device.DriverEphemeralBlockDevice, 'volume': driver_block_device.DriverVolumeBlockDevice, 'snapshot': driver_block_device.DriverSnapshotBlockDevice, 'image': driver_block_device.DriverImageBlockDevice, 'blank': driver_block_device.DriverBlankBlockDevice } swap_bdm_dict = block_device.BlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdb1', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'disk_bus': 'scsi', 'volume_size': 2, 'boot_index': -1}) swap_driver_bdm = { 'device_name': '/dev/sdb1', 'swap_size': 2, 'disk_bus': 'scsi'} swap_legacy_driver_bdm = { 'device_name': '/dev/sdb1', 'swap_size': 2} ephemeral_bdm_dict = block_device.BlockDeviceDict( {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sdc1', 'source_type': 'blank', 'destination_type': 'local', 'disk_bus': 'scsi', 'device_type': 'disk', 'volume_size': 4, 'guest_format': 'ext4', 'delete_on_termination': True, 'boot_index': -1}) ephemeral_driver_bdm = { 'device_name': '/dev/sdc1', 'size': 4, 'device_type': 'disk', 'guest_format': 'ext4', 'disk_bus': 'scsi'} ephemeral_legacy_driver_bdm = { 'device_name': '/dev/sdc1', 'size': 4, 'virtual_name': 'ephemeral0', 'num': 0} volume_bdm_dict = block_device.BlockDeviceDict( {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda1', 'source_type': 'volume', 'disk_bus': 'scsi', 'device_type': 'disk', 'volume_size': 8, 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'guest_format': 'ext4', 'connection_info': '{"fake": "connection_info"}', 'delete_on_termination': False, 'boot_index': 0}) volume_driver_bdm = { 'mount_device': '/dev/sda1', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': False, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': 'ext4', 'boot_index': 0} volume_legacy_driver_bdm = { 'mount_device': '/dev/sda1', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': False} snapshot_bdm_dict = block_device.BlockDeviceDict( {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 3, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'snapshot', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) snapshot_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} snapshot_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} image_bdm_dict = block_device.BlockDeviceDict( {'id': 5, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 1, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'image', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'image_id': 'fake-image-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) image_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} image_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} blank_bdm_dict = block_device.BlockDeviceDict( {'id': 6, 'instance_uuid': 'fake-instance', 'device_name': '/dev/sda2', 'delete_on_termination': True, 'volume_size': 3, 'disk_bus': 'scsi', 'device_type': 'disk', 'source_type': 'blank', 'destination_type': 'volume', 'connection_info': '{"fake": "connection_info"}', 'snapshot_id': 'fake-snapshot-id-1', 'volume_id': 'fake-volume-id-2', 'boot_index': -1}) blank_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True, 'disk_bus': 'scsi', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1} blank_legacy_driver_bdm = { 'mount_device': '/dev/sda2', 'connection_info': {"fake": "connection_info"}, 'delete_on_termination': True} def setUp(self): super(TestDriverBlockDevice, self).setUp() self.volume_api = self.mox.CreateMock(cinder.API) self.virt_driver = self.mox.CreateMock(driver.ComputeDriver) self.context = context.RequestContext('fake_user', 'fake_project') # create bdm objects for testing self.swap_bdm = fake_block_device.fake_bdm_object( self.context, self.swap_bdm_dict) self.ephemeral_bdm = fake_block_device.fake_bdm_object( self.context, self.ephemeral_bdm_dict) self.volume_bdm = fake_block_device.fake_bdm_object( self.context, self.volume_bdm_dict) self.snapshot_bdm = fake_block_device.fake_bdm_object( self.context, self.snapshot_bdm_dict) self.image_bdm = fake_block_device.fake_bdm_object( self.context, self.image_bdm_dict) self.blank_bdm = fake_block_device.fake_bdm_object( self.context, self.blank_bdm_dict) def test_no_device_raises(self): for name, cls in self.driver_classes.items(): bdm = fake_block_device.fake_bdm_object( self.context, {'no_device': True}) self.assertRaises(driver_block_device._NotTransformable, cls, bdm) def _test_driver_device(self, name): db_bdm = getattr(self, "%s_bdm" % name) test_bdm = self.driver_classes[name](db_bdm) self.assertThat(test_bdm, matchers.DictMatches( getattr(self, "%s_driver_bdm" % name))) for k, v in six.iteritems(db_bdm): field_val = getattr(test_bdm._bdm_obj, k) if isinstance(field_val, bool): v = bool(v) self.assertEqual(field_val, v) self.assertThat(test_bdm.legacy(), matchers.DictMatches( getattr(self, "%s_legacy_driver_bdm" % name))) # Test passthru attributes for passthru in test_bdm._proxy_as_attr: self.assertEqual(getattr(test_bdm, passthru), getattr(test_bdm._bdm_obj, passthru)) # Make sure that all others raise _invalidType for other_name, cls in six.iteritems(self.driver_classes): if other_name == name: continue self.assertRaises(driver_block_device._InvalidType, cls, getattr(self, '%s_bdm' % name)) # Test the save method with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock: for fld, alias in six.iteritems(test_bdm._update_on_save): # We can't set fake values on enums, like device_type, # so skip those. if not isinstance(test_bdm._bdm_obj.fields[fld], fields.BaseEnumField): test_bdm[alias or fld] = 'fake_changed_value' test_bdm.save() for fld, alias in six.iteritems(test_bdm._update_on_save): self.assertEqual(test_bdm[alias or fld], getattr(test_bdm._bdm_obj, fld)) save_mock.assert_called_once_with() def check_save(): self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed()) # Test that nothing is set on the object if there are no actual changes test_bdm._bdm_obj.obj_reset_changes() with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock: save_mock.side_effect = check_save test_bdm.save() def _test_driver_default_size(self, name): size = 'swap_size' if name == 'swap' else 'size' no_size_bdm = getattr(self, "%s_bdm_dict" % name).copy() no_size_bdm['volume_size'] = None driver_bdm = self.driver_classes[name]( fake_block_device.fake_bdm_object(self.context, no_size_bdm)) self.assertEqual(driver_bdm[size], 0) del no_size_bdm['volume_size'] driver_bdm = self.driver_classes[name]( fake_block_device.fake_bdm_object(self.context, no_size_bdm)) self.assertEqual(driver_bdm[size], 0) def test_driver_swap_block_device(self): self._test_driver_device("swap") def test_driver_swap_default_size(self): self._test_driver_default_size('swap') def test_driver_ephemeral_block_device(self): self._test_driver_device("ephemeral") def test_driver_ephemeral_default_size(self): self._test_driver_default_size('ephemeral') def test_driver_volume_block_device(self): self._test_driver_device("volume") test_bdm = self.driver_classes['volume']( self.volume_bdm) self.assertEqual(test_bdm['connection_info'], jsonutils.loads(test_bdm._bdm_obj.connection_info)) self.assertEqual(test_bdm._bdm_obj.id, 3) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1') self.assertEqual(test_bdm.volume_size, 8) def test_driver_snapshot_block_device(self): self._test_driver_device("snapshot") test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) self.assertEqual(test_bdm._bdm_obj.id, 4) self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1') self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') self.assertEqual(test_bdm.volume_size, 3) def test_driver_image_block_device(self): self._test_driver_device('image') test_bdm = self.driver_classes['image']( self.image_bdm) self.assertEqual(test_bdm._bdm_obj.id, 5) self.assertEqual(test_bdm.image_id, 'fake-image-id-1') self.assertEqual(test_bdm.volume_size, 1) def test_driver_image_block_device_destination_local(self): self._test_driver_device('image') bdm = self.image_bdm_dict.copy() bdm['destination_type'] = 'local' self.assertRaises(driver_block_device._InvalidType, self.driver_classes['image'], fake_block_device.fake_bdm_object(self.context, bdm)) def test_driver_blank_block_device(self): self._test_driver_device('blank') test_bdm = self.driver_classes['blank']( self.blank_bdm) self.assertEqual(6, test_bdm._bdm_obj.id) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) self.assertEqual(3, test_bdm.volume_size) def _test_call_wait_func(self, delete_on_termination, delete_fail=False): test_bdm = self.driver_classes['volume'](self.volume_bdm) test_bdm['delete_on_termination'] = delete_on_termination with mock.patch.object(self.volume_api, 'delete') as vol_delete: wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id='fake-id', seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception if delete_on_termination and delete_fail: vol_delete.side_effect = Exception() self.assertRaises(exception.VolumeNotCreated, test_bdm._call_wait_func, context=self.context, wait_func=wait_func, volume_api=self.volume_api, volume_id='fake-id') self.assertEqual(delete_on_termination, vol_delete.called) def test_call_wait_delete_volume(self): self._test_call_wait_func(True) def test_call_wait_delete_volume_fail(self): self._test_call_wait_func(True, True) def test_call_wait_no_delete_volume(self): self._test_call_wait_func(False) def _test_volume_attach(self, driver_bdm, bdm_dict, fake_volume, check_attach=True, fail_check_attach=False, driver_attach=False, fail_driver_attach=False, volume_attach=True, fail_volume_attach=False, access_mode='rw', availability_zone=None): elevated_context = self.context.elevated() self.stubs.Set(self.context, 'elevated', lambda: elevated_context) self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save') self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata') instance_detail = {'id': '123', 'uuid': 'fake_uuid', 'availability_zone': availability_zone} instance = fake_instance.fake_instance_obj(self.context, **instance_detail) connector = {'ip': 'fake_ip', 'host': 'fake_host'} connection_info = {'data': {'access_mode': access_mode}} expected_conn_info = {'data': {'access_mode': access_mode}, 'serial': fake_volume['id']} enc_data = {'fake': 'enc_data'} self.volume_api.get(self.context, fake_volume['id']).AndReturn(fake_volume) if check_attach: if not fail_check_attach: self.volume_api.check_attach(self.context, fake_volume, instance=instance).AndReturn(None) else: self.volume_api.check_attach(self.context, fake_volume, instance=instance).AndRaise( test.TestingException) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info self.virt_driver.get_volume_connector(instance).AndReturn(connector) self.volume_api.initialize_connection( elevated_context, fake_volume['id'], connector).AndReturn(connection_info) if driver_attach: encryptors.get_encryption_metadata( elevated_context, self.volume_api, fake_volume['id'], connection_info).AndReturn(enc_data) if not fail_driver_attach: self.virt_driver.attach_volume( elevated_context, expected_conn_info, instance, bdm_dict['device_name'], disk_bus=bdm_dict['disk_bus'], device_type=bdm_dict['device_type'], encryption=enc_data).AndReturn(None) else: self.virt_driver.attach_volume( elevated_context, expected_conn_info, instance, bdm_dict['device_name'], disk_bus=bdm_dict['disk_bus'], device_type=bdm_dict['device_type'], encryption=enc_data).AndRaise(test.TestingException) self.volume_api.terminate_connection( elevated_context, fake_volume['id'], connector).AndReturn(None) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info if volume_attach: driver_bdm._bdm_obj.save().AndReturn(None) if not fail_volume_attach: self.volume_api.attach(elevated_context, fake_volume['id'], 'fake_uuid', bdm_dict['device_name'], mode=access_mode).AndReturn(None) else: self.volume_api.attach(elevated_context, fake_volume['id'], 'fake_uuid', bdm_dict['device_name'], mode=access_mode).AndRaise( test.TestingException) if driver_attach: self.virt_driver.detach_volume( expected_conn_info, instance, bdm_dict['device_name'], encryption=enc_data).AndReturn(None) self.volume_api.terminate_connection( elevated_context, fake_volume['id'], connector).AndReturn(None) self.volume_api.detach(elevated_context, fake_volume['id']).AndReturn(None) driver_bdm._bdm_obj.save().AndReturn(None) return instance, expected_conn_info def test_volume_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_ro(self): test_bdm = self.driver_classes['volume'](self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, access_mode='ro') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_update_size(self): test_bdm = self.driver_classes['volume'](self.volume_bdm) test_bdm.volume_size = None volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached', 'size': 42} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(expected_conn_info, test_bdm['connection_info']) self.assertEqual(42, test_bdm.volume_size) def test_volume_attach_check_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, fail_check_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver) def test_volume_no_volume_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, check_attach=False, driver_attach=False) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=False, do_driver_attach=False) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_no_check_driver_attach(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, expected_conn_info = self._test_volume_attach( test_bdm, self.volume_bdm, volume, check_attach=False, driver_attach=True) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=False, do_driver_attach=True) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_volume_attach_driver_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, driver_attach=True, fail_driver_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=True) def test_volume_attach_volume_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, driver_attach=True, fail_volume_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=True) def test_volume_attach_no_driver_attach_volume_attach_fails(self): test_bdm = self.driver_classes['volume']( self.volume_bdm) volume = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} instance, _ = self._test_volume_attach( test_bdm, self.volume_bdm, volume, fail_volume_attach=True) self.mox.ReplayAll() self.assertRaises(test.TestingException, test_bdm.attach, self.context, instance, self.volume_api, self.virt_driver, do_driver_attach=False) def test_refresh_connection(self): test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} connector = {'ip': 'fake_ip', 'host': 'fake_host'} connection_info = {'data': {'multipath_id': 'fake_multipath_id'}} expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'}, 'serial': 'fake-volume-id-2'} self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save') self.virt_driver.get_volume_connector(instance).AndReturn(connector) self.volume_api.initialize_connection( self.context, test_bdm.volume_id, connector).AndReturn(connection_info) test_bdm._bdm_obj.save().AndReturn(None) self.mox.ReplayAll() test_bdm.refresh_connection_info(self.context, instance, self.volume_api, self.virt_driver) self.assertThat(test_bdm['connection_info'], matchers.DictMatches(expected_conn_info)) def test_snapshot_attach_no_volume(self): no_volume_snapshot = self.snapshot_bdm_dict.copy() no_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, no_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.get_snapshot(self.context, 'fake-snapshot-id-1').AndReturn(snapshot) self.volume_api.create(self.context, 3, '', '', snapshot, availability_zone=None).AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_snapshot, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_snapshot_attach_no_volume_cinder_cross_az_attach_false(self): # Tests that the volume created from the snapshot has the same AZ as # the instance. self.flags(cross_az_attach=False, group='cinder') no_volume_snapshot = self.snapshot_bdm_dict.copy() no_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, no_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.get_snapshot(self.context, 'fake-snapshot-id-1').AndReturn(snapshot) self.volume_api.create(self.context, 3, '', '', snapshot, availability_zone='test-az').AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_snapshot, volume, availability_zone='test-az') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_snapshot_attach_fail_volume(self): fail_volume_snapshot = self.snapshot_bdm_dict.copy() fail_volume_snapshot['volume_id'] = None test_bdm = self.driver_classes['snapshot']( fake_block_device.fake_bdm_object( self.context, fail_volume_snapshot)) snapshot = {'id': 'fake-volume-id-1', 'attach_status': 'detached'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) with test.nested( mock.patch.object(self.volume_api, 'get_snapshot', return_value=snapshot), mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_get_snap, vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_get_snap.assert_called_once_with( self.context, 'fake-snapshot-id-1') vol_create.assert_called_once_with( self.context, 3, '', '', snapshot, availability_zone=None) vol_delete.assert_called_once_with(self.context, volume['id']) def test_snapshot_attach_volume(self): test_bdm = self.driver_classes['snapshot']( self.snapshot_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} volume_class = self.driver_classes['volume'] self.mox.StubOutWithMock(volume_class, 'attach') # Make sure theses are not called self.mox.StubOutWithMock(self.volume_api, 'get_snapshot') self.mox.StubOutWithMock(self.volume_api, 'create') volume_class.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True ).AndReturn(None) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_image_attach_no_volume(self): no_volume_image = self.image_bdm_dict.copy() no_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, no_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.create(self.context, 1, '', '', image_id=image['id'], availability_zone=None).AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_image, volume) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_image_attach_no_volume_cinder_cross_az_attach_false(self): # Tests that the volume created from the image has the same AZ as the # instance. self.flags(cross_az_attach=False, group='cinder') no_volume_image = self.image_bdm_dict.copy() no_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, no_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} wait_func = self.mox.CreateMockAnything() self.volume_api.create(self.context, 1, '', '', image_id=image['id'], availability_zone='test-az').AndReturn(volume) wait_func(self.context, 'fake-volume-id-2').AndReturn(None) instance, expected_conn_info = self._test_volume_attach( test_bdm, no_volume_image, volume, availability_zone='test-az') self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver, wait_func) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_image_attach_fail_volume(self): fail_volume_image = self.image_bdm_dict.copy() fail_volume_image['volume_id'] = None test_bdm = self.driver_classes['image']( fake_block_device.fake_bdm_object( self.context, fail_volume_image)) image = {'id': 'fake-image-id-1'} volume = {'id': 'fake-volume-id-2', 'attach_status': 'detached'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_create.assert_called_once_with( self.context, 1, '', '', image_id=image['id'], availability_zone=None) vol_delete.assert_called_once_with(self.context, volume['id']) def test_image_attach_volume(self): test_bdm = self.driver_classes['image']( self.image_bdm) instance = {'id': 'fake_id', 'uuid': 'fake_uuid'} volume_class = self.driver_classes['volume'] self.mox.StubOutWithMock(volume_class, 'attach') # Make sure theses are not called self.mox.StubOutWithMock(self.volume_api, 'get_snapshot') self.mox.StubOutWithMock(self.volume_api, 'create') volume_class.attach(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True ).AndReturn(None) self.mox.ReplayAll() test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2') def test_blank_attach_fail_volume(self): no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(self.volume_api, 'delete'), ) as (vol_create, vol_delete): wait_func = mock.MagicMock() mock_exception = exception.VolumeNotCreated(volume_id=volume['id'], seconds=1, attempts=1, volume_status='error') wait_func.side_effect = mock_exception self.assertRaises(exception.VolumeNotCreated, test_bdm.attach, context=self.context, instance=instance, volume_api=self.volume_api, virt_driver=self.virt_driver, wait_func=wait_func) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone=None) vol_delete.assert_called_once_with( self.context, volume['id']) def test_blank_attach_volume(self): no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **{'uuid': 'fake-uuid'}) volume_class = self.driver_classes['volume'] volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with test.nested( mock.patch.object(self.volume_api, 'create', return_value=volume), mock.patch.object(volume_class, 'attach') ) as (vol_create, vol_attach): test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone=None) vol_attach.assert_called_once_with(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_blank_attach_volume_cinder_cross_az_attach_false(self): # Tests that the blank volume created is in the same availability zone # as the instance. self.flags(cross_az_attach=False, group='cinder') no_blank_volume = self.blank_bdm_dict.copy() no_blank_volume['volume_id'] = None test_bdm = self.driver_classes['blank']( fake_block_device.fake_bdm_object( self.context, no_blank_volume)) updates = {'uuid': 'fake-uuid', 'availability_zone': 'test-az'} instance = fake_instance.fake_instance_obj(mock.sentinel.ctx, **updates) volume_class = self.driver_classes['volume'] volume = {'id': 'fake-volume-id-2', 'display_name': 'fake-uuid-blank-vol'} with mock.patch.object(self.volume_api, 'create', return_value=volume) as vol_create: with mock.patch.object(volume_class, 'attach') as vol_attach: test_bdm.attach(self.context, instance, self.volume_api, self.virt_driver) vol_create.assert_called_once_with( self.context, test_bdm.volume_size, 'fake-uuid-blank-vol', '', availability_zone='test-az') vol_attach.assert_called_once_with(self.context, instance, self.volume_api, self.virt_driver, do_check_attach=True) self.assertEqual('fake-volume-id-2', test_bdm.volume_id) def test_convert_block_devices(self): bdms = objects.BlockDeviceMappingList( objects=[self.volume_bdm, self.ephemeral_bdm]) converted = driver_block_device._convert_block_devices( self.driver_classes['volume'], bdms) self.assertEqual(converted, [self.volume_driver_bdm]) def test_convert_all_volumes(self): converted = driver_block_device.convert_all_volumes() self.assertEqual([], converted) converted = driver_block_device.convert_all_volumes( self.volume_bdm, self.ephemeral_bdm, self.image_bdm, self.blank_bdm, self.snapshot_bdm) self.assertEqual(converted, [self.volume_driver_bdm, self.image_driver_bdm, self.blank_driver_bdm, self.snapshot_driver_bdm]) def test_convert_volume(self): self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm)) self.assertEqual(self.volume_driver_bdm, driver_block_device.convert_volume(self.volume_bdm)) self.assertEqual(self.snapshot_driver_bdm, driver_block_device.convert_volume(self.snapshot_bdm)) def test_legacy_block_devices(self): test_snapshot = self.driver_classes['snapshot']( self.snapshot_bdm) block_device_mapping = [test_snapshot, test_snapshot] legacy_bdm = driver_block_device.legacy_block_devices( block_device_mapping) self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm, self.snapshot_legacy_driver_bdm]) # Test that the ephemerals work as expected test_ephemerals = [self.driver_classes['ephemeral']( self.ephemeral_bdm) for _ in range(2)] expected = [self.ephemeral_legacy_driver_bdm.copy() for _ in range(2)] expected[0]['virtual_name'] = 'ephemeral0' expected[0]['num'] = 0 expected[1]['virtual_name'] = 'ephemeral1' expected[1]['num'] = 1 legacy_ephemerals = driver_block_device.legacy_block_devices( test_ephemerals) self.assertEqual(expected, legacy_ephemerals) def test_get_swap(self): swap = [self.swap_driver_bdm] legacy_swap = [self.swap_legacy_driver_bdm] no_swap = [self.volume_driver_bdm] self.assertEqual(swap[0], driver_block_device.get_swap(swap)) self.assertEqual(legacy_swap[0], driver_block_device.get_swap(legacy_swap)) self.assertIsNone(driver_block_device.get_swap(no_swap)) self.assertIsNone(driver_block_device.get_swap([])) def test_is_implemented(self): for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm, self.ephemeral_bdm, self.snapshot_bdm): self.assertTrue(driver_block_device.is_implemented(bdm)) local_image = self.image_bdm_dict.copy() local_image['destination_type'] = 'local' self.assertFalse(driver_block_device.is_implemented( fake_block_device.fake_bdm_object(self.context, local_image))) def test_is_block_device_mapping(self): test_swap = self.driver_classes['swap'](self.swap_bdm) test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm) test_image = self.driver_classes['image'](self.image_bdm) test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm) test_volume = self.driver_classes['volume'](self.volume_bdm) test_blank = self.driver_classes['blank'](self.blank_bdm) for bdm in (test_image, test_snapshot, test_volume, test_blank): self.assertTrue(driver_block_device.is_block_device_mapping( bdm._bdm_obj)) for bdm in (test_swap, test_ephemeral): self.assertFalse(driver_block_device.is_block_device_mapping( bdm._bdm_obj)) def test_get_volume_create_az_cinder_cross_az_attach_true(self): # Tests that we get None back if cinder.cross_az_attach=True even if # the instance has an AZ assigned. Note that since cross_az_attach # defaults to True we don't need to set a flag explicitly for the test. updates = {'availability_zone': 'test-az'} instance = fake_instance.fake_instance_obj(self.context, **updates) self.assertIsNone( driver_block_device._get_volume_create_az_value(instance))
apache-2.0
taotetek/rsyslog-doc
README.md
2371
rsyslog-docs ============ Documentation for the rsyslog project ------------------------------------- This is a work in progress. We are currently migrating over to a new document generation framework. The process of this work will be done as follows: 1. Complete v5-stable documentation 2. Merge v5-stable into v7-stable branch 3. Update v7-stable branch with all new documentation and materials specific for that version 4. Repeat 2 and 3 merging current repo with next highest until Master is merged and updated. Current Status - * v5-stable - (In Development) * v7-stable - (In Development) * v8-devel - (In Development) ## Learning the doc tools If you are new to rst and Sphinx, visit the Sphinx doc to get started: http://sphinx-doc.org/contents.html ## Importing missing content For the time being, occasionally a page from the v7 or v8 doc branches seems to be missing in rsyslog-doc. To recover it, check out the respective version (v8.1.6 is the latest v8 with html doc) and use this too to convert to rst: $ pandoc -f html -t rst <html_file> -o <output_file> ## Instructions These assume default installs of Python for Windows and Linux ### Generate HTML Documentation on Linux 1. Download the pip installer from here: https://raw.github.com/pypa/pip/master/contrib/get-pip.py 2. Run: python ./get-pip.py 3. Run: pip install sphinx 4. Checkout Branch in Repo – 1. Run: git clone https://github.com/rsyslog/rsyslog-doc.git 2. Run: cd rsyslog-doc 3. Run: git checkout v5-stable 5. Run: sphinx-build -b html source build 6. open rsyslog-doc/build/index.html in a browser ###Generate HTML Documentation on Windows 1. Download the pip installer from here: https://raw.github.com/pypa/pip/master/contrib/get-pip.py 2. Download and install Git for windows if you don’t already have Git: 1. https://code.google.com/p/msysgit/downloads/list?can=3&q=full+installer+official+git&colspec=Filename+Summary+Uploaded+ReleaseDate+Size+DownloadCount 2. Install Git for Windows. 3. Run: c:\python27\python get-pip.py 4. Run: c:\python27\scripts\pip install sphinx 5. Checkout Branch in Repo – 1. Run: git clone https://github.com/rsyslog/rsyslog-doc.git 2. Run: cd rsyslog-doc 3. Run: git checkout v5-stable 6. Run: c:\python27\scripts\sphinx-build -b html source build 7. open rsyslog-doc/build/index.html in a browser
apache-2.0
irudyak/ignite
examples/src/test/java/org/apache/ignite/examples/MemcacheRestExamplesMultiNodeSelfTest.java
1462
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.examples; //import org.apache.ignite.examples.misc.client.memcache.*; /** * MemcacheRestExample multi-node self test. */ public class MemcacheRestExamplesMultiNodeSelfTest extends MemcacheRestExamplesSelfTest { // TODO: IGNITE-711 next example(s) should be implemented for java 8 // or testing method(s) should be removed if example(s) does not applicable for java 8. /** {@inheritDoc} */ // @Override protected void beforeTest() throws Exception { // for (int i = 0; i < RMT_NODES_CNT; i++) // startGrid("memcache-rest-examples-" + i, MemcacheRestExampleNodeStartup.configuration()); // } }
apache-2.0
jeltz/rust-debian-package
src/llvm/tools/clang/test/Parser/cxx-template-decl.cpp
4541
// RUN: %clang_cc1 -fsyntax-only -verify %s // Errors export class foo { }; // expected-error {{expected template}} template x; // expected-error {{C++ requires a type specifier for all declarations}} \ // expected-error {{does not refer}} export template x; // expected-error {{expected '<' after 'template'}} export template<class T> class x0; // expected-warning {{exported templates are unsupported}} template < ; // expected-error {{expected template parameter}} \ // expected-error{{expected ',' or '>' in template-parameter-list}} \ // expected-warning {{declaration does not declare anything}} template <int +> struct x1; // expected-error {{expected ',' or '>' in template-parameter-list}} // verifies that we only walk to the ',' & still produce errors on the rest of the template parameters template <int +, T> struct x2; // expected-error {{expected ',' or '>' in template-parameter-list}} \ expected-error {{expected unqualified-id}} template<template<int+>> struct x3; // expected-error {{expected ',' or '>' in template-parameter-list}} \ expected-error {{template template parameter requires 'class' after the parameter list}} template <template X> struct Err1; // expected-error {{expected '<' after 'template'}} \ // expected-error{{extraneous}} template <template <typename> > struct Err2; // expected-error {{template template parameter requires 'class' after the parameter list}} template <template <typename> Foo> struct Err3; // expected-error {{template template parameter requires 'class' after the parameter list}} // Template function declarations template <typename T> void foo(); template <typename T, typename U> void foo(); // Template function definitions. template <typename T> void foo() { } // Template class (forward) declarations template <typename T> struct A; template <typename T, typename U> struct b; template <typename> struct C; template <typename, typename> struct D; // Forward declarations with default parameters? template <typename T = int> class X1; template <typename = int> class X2; // Forward declarations w/template template parameters template <template <typename> class T> class TTP1; template <template <typename> class> class TTP2; template <template <typename> class T = foo> class TTP3; // expected-error{{must be a class template}} template <template <typename> class = foo> class TTP3; // expected-error{{must be a class template}} template <template <typename X, typename Y> class T> class TTP5; // Forward declarations with non-type params template <int> class NTP0; template <int N> class NTP1; template <int N = 5> class NTP2; template <int = 10> class NTP3; template <unsigned int N = 12u> class NTP4; template <unsigned int = 12u> class NTP5; template <unsigned = 15u> class NTP6; template <typename T, T Obj> class NTP7; // Template class declarations template <typename T> struct A { }; template <typename T, typename U> struct B { }; // Template parameter shadowing template<typename T, // expected-note{{template parameter is declared here}} typename T> // expected-error{{declaration of 'T' shadows template parameter}} void shadow1(); template<typename T> // expected-note{{template parameter is declared here}} void shadow2(int T); // expected-error{{declaration of 'T' shadows template parameter}} template<typename T> // expected-note{{template parameter is declared here}} class T { // expected-error{{declaration of 'T' shadows template parameter}} }; template<int Size> // expected-note{{template parameter is declared here}} void shadow3(int Size); // expected-error{{declaration of 'Size' shadows template parameter}} // <rdar://problem/6952203> template<typename T> // expected-note{{here}} struct shadow4 { int T; // expected-error{{shadows}} }; template<typename T> // expected-note{{here}} struct shadow5 { int T(int, float); // expected-error{{shadows}} }; // Non-type template parameters in scope template<int Size> void f(int& i) { i = Size; Size = i; // expected-error{{expression is not assignable}} } template<typename T> const T& min(const T&, const T&); void f2() { int x; A< typeof(x>1) > a; } // PR3844 template <> struct S<int> { }; // expected-error{{explicit specialization of non-template struct 'S'}} namespace PR6184 { namespace N { template <typename T> void bar(typename T::x); } template <typename T> void N::bar(typename T::x) { } }
apache-2.0
terryturner/VRPinGMapFx
jsprit-master/jsprit-io/src/main/java/com/graphhopper/jsprit/io/algorithm/AlgorithmConfig.java
1141
/* * Licensed to GraphHopper GmbH under one or more contributor * license agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * GraphHopper GmbH licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.graphhopper.jsprit.io.algorithm; import org.apache.commons.configuration.XMLConfiguration; public class AlgorithmConfig { private XMLConfiguration xmlConfig; public AlgorithmConfig() { xmlConfig = new XMLConfiguration(); } public XMLConfiguration getXMLConfiguration() { return xmlConfig; } }
apache-2.0
zhimin711/nova
nova/tests/unit/virt/vmwareapi/test_network_util.py
9191
# Copyright (c) 2014 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_vmware import vim_util from nova import exception from nova import test from nova.tests.unit.virt.vmwareapi import fake from nova.tests.unit.virt.vmwareapi import stubs from nova.virt.vmwareapi import driver from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import vm_util ResultSet = collections.namedtuple('ResultSet', ['objects']) ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet']) DynamicProperty = collections.namedtuple('DynamicProperty', ['name', 'val']) class GetNetworkWithTheNameTestCase(test.NoDBTestCase): def setUp(self): super(GetNetworkWithTheNameTestCase, self).setUp() fake.reset() self.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim', stubs.fake_vim_prop) self.stub_out('nova.virt.vmwareapi.driver.' 'VMwareAPISession.is_vim_object', stubs.fake_is_vim_object) self._session = driver.VMwareAPISession() def _build_cluster_networks(self, networks): """Returns a set of results for a cluster network lookup. This is an example: (ObjectContent){ obj = (obj){ value = "domain-c7" _type = "ClusterComputeResource" } propSet[] = (DynamicProperty){ name = "network" val = (ArrayOfManagedObjectReference){ ManagedObjectReference[] = (ManagedObjectReference){ value = "network-54" _type = "Network" }, (ManagedObjectReference){ value = "dvportgroup-14" _type = "DistributedVirtualPortgroup" }, } }, }] """ objects = [] obj = ObjectContent(obj=vim_util.get_moref("domain-c7", "ClusterComputeResource"), propSet=[]) value = fake.DataObject() value.ManagedObjectReference = [] for network in networks: value.ManagedObjectReference.append(network) obj.propSet.append( DynamicProperty(name='network', val=value)) objects.append(obj) return ResultSet(objects=objects) def test_get_network_no_match(self): net_morefs = [vim_util.get_moref("dvportgroup-135", "DistributedVirtualPortgroup"), vim_util.get_moref("dvportgroup-136", "DistributedVirtualPortgroup")] networks = self._build_cluster_networks(net_morefs) self._continue_retrieval_called = False def mock_call_method(module, method, *args, **kwargs): if method == 'get_object_properties': return networks if method == 'get_object_property': result = fake.DataObject() result.name = 'no-match' return result if method == 'continue_retrieval': self._continue_retrieval_called = True with mock.patch.object(self._session, '_call_method', mock_call_method): res = network_util.get_network_with_the_name(self._session, 'fake_net', 'fake_cluster') self.assertTrue(self._continue_retrieval_called) self.assertIsNone(res) def _get_network_dvs_match(self, name, token=False): net_morefs = [vim_util.get_moref("dvportgroup-135", "DistributedVirtualPortgroup")] networks = self._build_cluster_networks(net_morefs) def mock_call_method(module, method, *args, **kwargs): if method == 'get_object_properties': return networks if method == 'get_object_property': result = fake.DataObject() if not token or self._continue_retrieval_called: result.name = name else: result.name = 'fake_name' result.key = 'fake_key' result.distributedVirtualSwitch = 'fake_dvs' return result if method == 'continue_retrieval': if token: self._continue_retrieval_called = True return networks if method == 'cancel_retrieval': self._cancel_retrieval_called = True with mock.patch.object(self._session, '_call_method', mock_call_method): res = network_util.get_network_with_the_name(self._session, 'fake_net', 'fake_cluster') self.assertIsNotNone(res) def test_get_network_dvs_exact_match(self): self._cancel_retrieval_called = False self._get_network_dvs_match('fake_net') self.assertTrue(self._cancel_retrieval_called) def test_get_network_dvs_match(self): self._cancel_retrieval_called = False self._get_network_dvs_match('dvs_7-virtualwire-7-fake_net') self.assertTrue(self._cancel_retrieval_called) def test_get_network_dvs_match_with_token(self): self._continue_retrieval_called = False self._cancel_retrieval_called = False self._get_network_dvs_match('dvs_7-virtualwire-7-fake_net', token=True) self.assertTrue(self._continue_retrieval_called) self.assertTrue(self._cancel_retrieval_called) def test_get_network_network_match(self): net_morefs = [vim_util.get_moref("network-54", "Network")] networks = self._build_cluster_networks(net_morefs) def mock_call_method(module, method, *args, **kwargs): if method == 'get_object_properties': return networks if method == 'get_object_property': return 'fake_net' with mock.patch.object(self._session, '_call_method', mock_call_method): res = network_util.get_network_with_the_name(self._session, 'fake_net', 'fake_cluster') self.assertIsNotNone(res) class GetVlanIdAndVswitchForPortgroupTestCase(test.NoDBTestCase): @mock.patch.object(vm_util, 'get_host_ref') def test_no_port_groups(self, mock_get_host_ref): session = mock.Mock() session._call_method.return_value = None self.assertRaises( exception.NovaException, network_util.get_vlanid_and_vswitch_for_portgroup, session, 'port_group_name', 'fake_cluster' ) @mock.patch.object(vm_util, 'get_host_ref') def test_valid_port_group(self, mock_get_host_ref): session = mock.Mock() session._call_method.return_value = self._fake_port_groups() vlanid, vswitch = network_util.get_vlanid_and_vswitch_for_portgroup( session, 'port_group_name', 'fake_cluster' ) self.assertEqual(vlanid, 100) self.assertEqual(vswitch, 'vswitch_name') @mock.patch.object(vm_util, 'get_host_ref') def test_unknown_port_group(self, mock_get_host_ref): session = mock.Mock() session._call_method.return_value = self._fake_port_groups() vlanid, vswitch = network_util.get_vlanid_and_vswitch_for_portgroup( session, 'unknown_port_group', 'fake_cluster' ) self.assertIsNone(vlanid) self.assertIsNone(vswitch) def _fake_port_groups(self): port_group_spec = fake.DataObject() port_group_spec.name = 'port_group_name' port_group_spec.vlanId = 100 port_group = fake.DataObject() port_group.vswitch = 'vswitch_name' port_group.spec = port_group_spec response = fake.DataObject() response.HostPortGroup = [port_group] return response
apache-2.0
luna1x/chef-server
vendor/ruby/1.9.1/gems/fog-1.15.0/lib/fog/libvirt/compute.rb
3975
require 'fog/libvirt' require 'fog/compute' require 'fog/libvirt/models/compute/util/util' require 'fog/libvirt/models/compute/util/uri' module Fog module Compute class Libvirt < Fog::Service requires :libvirt_uri recognizes :libvirt_username, :libvirt_password recognizes :libvirt_ip_command model_path 'fog/libvirt/models/compute' model :server collection :servers model :network collection :networks model :interface collection :interfaces model :volume collection :volumes model :pool collection :pools model :node collection :nodes model :nic collection :nics request_path 'fog/libvirt/requests/compute' request :list_domains request :create_domain request :define_domain request :vm_action request :list_pools request :list_pool_volumes request :define_pool request :pool_action request :list_volumes request :volume_action request :create_volume request :list_networks request :destroy_network request :list_interfaces request :destroy_interface request :get_node_info request :update_display module Shared include Fog::Compute::LibvirtUtil end class Mock include Shared def initialize(options={}) # libvirt is part of the gem => ruby-libvirt require 'libvirt' end private def client return @client if defined?(@client) end #read mocks xml def read_xml(file_name) file_path = File.join(File.dirname(__FILE__),"requests","compute","mock_files",file_name) File.read(file_path) end end class Real include Shared attr_reader :client attr_reader :uri attr_reader :ip_command def initialize(options={}) @uri = ::Fog::Compute::LibvirtUtil::URI.new(enhance_uri(options[:libvirt_uri])) @ip_command = options[:libvirt_ip_command] # libvirt is part of the gem => ruby-libvirt begin require 'libvirt' rescue LoadError => e retry if require('rubygems') raise e.message end begin if options[:libvirt_username] and options[:libvirt_password] @client = ::Libvirt::open_auth(uri.uri, [::Libvirt::CRED_AUTHNAME, ::Libvirt::CRED_PASSPHRASE]) do |cred| case cred['type'] when ::Libvirt::CRED_AUTHNAME options[:libvirt_username] when ::Libvirt::CRED_PASSPHRASE options[:libvirt_password] end end else @client = ::Libvirt::open(uri.uri) end rescue ::Libvirt::ConnectionError raise Fog::Errors::Error.new("Error making a connection to libvirt URI #{uri.uri}:\n#{$!}") end end def terminate @client.close if @client and [email protected]? end def enhance_uri(uri) require 'cgi' append="" # on macosx, chances are we are using libvirt through homebrew # the client will default to a socket location based on it's own location (/opt) # we conveniently point it to /var/run/libvirt/libvirt-sock # if no socket option has been specified explicitly if RUBY_PLATFORM =~ /darwin/ querystring=::URI.parse(uri).query if querystring.nil? append="?socket=/var/run/libvirt/libvirt-sock" else if !::CGI.parse(querystring).has_key?("socket") append="&socket=/var/run/libvirt/libvirt-sock" end end end uri+append end end end end end
apache-2.0
dagolden/opscode-cookbooks
database/recipes/snapshot.rb
2156
# # Author:: AJ Christensen (<[email protected]>) # Cookbook Name:: database # Recipe:: snapshot # # Copyright 2009-2010, Opscode, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # include_recipe "aws" include_recipe "xfs" %w{ebs_vol_dev db_role app_environment username password aws_access_key_id aws_secret_access_key snapshots_to_keep volume_id}.collect do |key| Chef::Application.fatal!("Required db_snapshot configuration #{key} not found.", -47) unless node.db_snapshot.has_key? key end connection_info = {:host => localhost, :username => node.db_snapshot.username, :password => node.db_snapshot.password} mysql_database "locking tables for #{node.db_snapshot.app_environment}" do connection connection_info sql "flush tables with read lock" action :query end execute "xfs freeze" do command "xfs_freeze -f #{node.db_snapshot.ebs_vol_dev}" end aws_ebs_volume "#{node.db_snapshot.db_role.first}_#{node.db_snapshot.app_environment}" do aws_access_key node.db_snapshot.aws_access_key_id aws_secret_access_key node.db_snapshot.aws_secret_access_key size 50 device node.db_snapshot.ebs_vol_dev snapshots_to_keep node.db_snapshot.snapshots_to_keep action :snapshot volume_id node.db_snapshot.volume_id ignore_failure true # if this fails, continue to unfreeze and unlock end execute "xfs unfreeze" do command "xfs_freeze -u #{node.db_snapshot.ebs_vol_dev}" end mysql_database "unflushing tables for #{node.db_snapshot.app_environment}" do connection connection_info sql "unlock tables" action :query end aws_ebs_volume "#{node.db_snapshot.db_role.first}_#{node.db_snapshot.app_environment}" do action :prune end
apache-2.0
lukecwik/incubator-beam
runners/flink/src/main/java/org/apache/beam/runners/flink/translation/wrappers/streaming/stableinput/NonKeyedBufferingElementsHandler.java
2224
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.runners.flink.translation.wrappers.streaming.stableinput; import static org.apache.beam.vendor.guava.v26_0_jre.com.google.common.base.Preconditions.checkNotNull; import java.util.stream.Stream; import java.util.stream.StreamSupport; import org.apache.flink.api.common.state.ListState; /** A non-keyed implementation of a {@link BufferingElementsHandler}. */ public class NonKeyedBufferingElementsHandler<T> implements BufferingElementsHandler { static <T> NonKeyedBufferingElementsHandler<T> create(ListState<BufferedElement> elementState) { return new NonKeyedBufferingElementsHandler<>(elementState); } private final ListState<BufferedElement> elementState; private NonKeyedBufferingElementsHandler(ListState<BufferedElement> elementState) { this.elementState = checkNotNull(elementState); } @Override public Stream<BufferedElement> getElements() { try { return StreamSupport.stream(elementState.get().spliterator(), false); } catch (Exception e) { throw new RuntimeException("Failed to retrieve buffered element from state backend.", e); } } @Override public void buffer(BufferedElement element) { try { elementState.add(element); } catch (Exception e) { throw new RuntimeException("Failed to buffer element in state backend.", e); } } @Override public void clear() { elementState.clear(); } }
apache-2.0
nicoben/pentaho-kettle
engine/src/org/pentaho/di/job/entries/simpleeval/JobEntrySimpleEval.java
46419
/*! ****************************************************************************** * * Pentaho Data Integration * * Copyright (C) 2002-2016 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.job.entries.simpleeval; import java.text.SimpleDateFormat; import java.util.Date; import java.util.List; import java.util.regex.Pattern; import org.pentaho.di.cluster.SlaveServer; import org.pentaho.di.core.Const; import org.pentaho.di.core.util.Utils; import org.pentaho.di.core.Result; import org.pentaho.di.core.RowMetaAndData; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.exception.KettleDatabaseException; import org.pentaho.di.core.exception.KettleException; import org.pentaho.di.core.exception.KettleXMLException; import org.pentaho.di.core.row.value.ValueMetaString; import org.pentaho.di.core.util.StringUtil; import org.pentaho.di.core.xml.XMLHandler; import org.pentaho.di.i18n.BaseMessages; import org.pentaho.di.job.entry.JobEntryBase; import org.pentaho.di.job.entry.JobEntryInterface; import org.pentaho.di.repository.ObjectId; import org.pentaho.di.repository.Repository; import org.pentaho.metastore.api.IMetaStore; import org.w3c.dom.Node; /** * This defines a 'simple evaluation' job entry. * * @author Samatar Hassan * @since 01-01-2009 */ public class JobEntrySimpleEval extends JobEntryBase implements Cloneable, JobEntryInterface { private static Class<?> PKG = JobEntrySimpleEval.class; // for i18n purposes, needed by Translator2!! public static final String[] valueTypeDesc = new String[] { BaseMessages.getString( PKG, "JobSimpleEval.EvalPreviousField.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.EvalVariable.Label" ), }; public static final String[] valueTypeCode = new String[] { "field", "variable" }; public static final int VALUE_TYPE_FIELD = 0; public static final int VALUE_TYPE_VARIABLE = 1; public int valuetype; public static final String[] successConditionDesc = new String[] { BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenEqual.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenDifferent.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenContains.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenNotContains.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenStartWith.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenNotStartWith.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenEndWith.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenNotEndWith.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenRegExp.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenInList.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenNotInList.Label" ) }; public static final String[] successConditionCode = new String[] { "equal", "different", "contains", "notcontains", "startswith", "notstatwith", "endswith", "notendwith", "regexp", "inlist", "notinlist" }; public static final int SUCCESS_CONDITION_EQUAL = 0; public static final int SUCCESS_CONDITION_DIFFERENT = 1; public static final int SUCCESS_CONDITION_CONTAINS = 2; public static final int SUCCESS_CONDITION_NOT_CONTAINS = 3; public static final int SUCCESS_CONDITION_START_WITH = 4; public static final int SUCCESS_CONDITION_NOT_START_WITH = 5; public static final int SUCCESS_CONDITION_END_WITH = 6; public static final int SUCCESS_CONDITION_NOT_END_WITH = 7; public static final int SUCCESS_CONDITION_REGEX = 8; public static final int SUCCESS_CONDITION_IN_LIST = 9; public static final int SUCCESS_CONDITION_NOT_IN_LIST = 10; public int successcondition; public static final String[] fieldTypeDesc = new String[] { BaseMessages.getString( PKG, "JobSimpleEval.FieldTypeString.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.FieldTypeNumber.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.FieldTypeDateTime.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.FieldTypeBoolean.Label" ), }; public static final String[] fieldTypeCode = new String[] { "string", "number", "datetime", "boolean" }; public static final int FIELD_TYPE_STRING = 0; public static final int FIELD_TYPE_NUMBER = 1; public static final int FIELD_TYPE_DATE_TIME = 2; public static final int FIELD_TYPE_BOOLEAN = 3; public int fieldtype; public static final String[] successNumberConditionDesc = new String[] { BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenEqual.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenDifferent.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenSmallThan.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenSmallOrEqualThan.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenGreaterThan.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenGreaterOrEqualThan.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessBetween.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenInList.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenNotInList.Label" ), }; public static final String[] successNumberConditionCode = new String[] { "equal", "different", "smaller", "smallequal", "greater", "greaterequal", "between", "inlist", "notinlist" }; public static final int SUCCESS_NUMBER_CONDITION_EQUAL = 0; public static final int SUCCESS_NUMBER_CONDITION_DIFFERENT = 1; public static final int SUCCESS_NUMBER_CONDITION_SMALLER = 2; public static final int SUCCESS_NUMBER_CONDITION_SMALLER_EQUAL = 3; public static final int SUCCESS_NUMBER_CONDITION_GREATER = 4; public static final int SUCCESS_NUMBER_CONDITION_GREATER_EQUAL = 5; public static final int SUCCESS_NUMBER_CONDITION_BETWEEN = 6; public static final int SUCCESS_NUMBER_CONDITION_IN_LIST = 7; public static final int SUCCESS_NUMBER_CONDITION_NOT_IN_LIST = 8; public int successnumbercondition; public static final String[] successBooleanConditionDesc = new String[] { BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenTrue.Label" ), BaseMessages.getString( PKG, "JobSimpleEval.SuccessWhenFalse.Label" ) }; public static final String[] successBooleanConditionCode = new String[] { "true", "false" }; public static final int SUCCESS_BOOLEAN_CONDITION_TRUE = 0; public static final int SUCCESS_BOOLEAN_CONDITION_FALSE = 1; public int successbooleancondition; private String fieldname; private String variablename; private String mask; private String comparevalue; private String minvalue; private String maxvalue; private boolean successwhenvarset; public JobEntrySimpleEval( String n ) { super( n, "" ); valuetype = VALUE_TYPE_FIELD; successcondition = SUCCESS_CONDITION_EQUAL; successnumbercondition = SUCCESS_NUMBER_CONDITION_EQUAL; successbooleancondition = SUCCESS_BOOLEAN_CONDITION_FALSE; minvalue = null; maxvalue = null; comparevalue = null; fieldname = null; variablename = null; fieldtype = FIELD_TYPE_STRING; mask = null; successwhenvarset = false; } public JobEntrySimpleEval() { this( "" ); } @Override public Object clone() { JobEntrySimpleEval je = (JobEntrySimpleEval) super.clone(); return je; } private static String getValueTypeCode( int i ) { if ( i < 0 || i >= valueTypeCode.length ) { return valueTypeCode[0]; } return valueTypeCode[i]; } private static String getFieldTypeCode( int i ) { if ( i < 0 || i >= fieldTypeCode.length ) { return fieldTypeCode[0]; } return fieldTypeCode[i]; } private static String getSuccessConditionCode( int i ) { if ( i < 0 || i >= successConditionCode.length ) { return successConditionCode[0]; } return successConditionCode[i]; } public static String getSuccessNumberConditionCode( int i ) { if ( i < 0 || i >= successNumberConditionCode.length ) { return successNumberConditionCode[0]; } return successNumberConditionCode[i]; } private static String getSuccessBooleanConditionCode( int i ) { if ( i < 0 || i >= successBooleanConditionCode.length ) { return successBooleanConditionCode[0]; } return successBooleanConditionCode[i]; } @Override public String getXML() { StringBuilder retval = new StringBuilder( 300 ); retval.append( super.getXML() ); retval.append( " " ).append( XMLHandler.addTagValue( "valuetype", getValueTypeCode( valuetype ) ) ); retval.append( " " ).append( XMLHandler.addTagValue( "fieldname", fieldname ) ); retval.append( " " ).append( XMLHandler.addTagValue( "variablename", variablename ) ); retval.append( " " ).append( XMLHandler.addTagValue( "fieldtype", getFieldTypeCode( fieldtype ) ) ); retval.append( " " ).append( XMLHandler.addTagValue( "mask", mask ) ); retval.append( " " ).append( XMLHandler.addTagValue( "comparevalue", comparevalue ) ); retval.append( " " ).append( XMLHandler.addTagValue( "minvalue", minvalue ) ); retval.append( " " ).append( XMLHandler.addTagValue( "maxvalue", maxvalue ) ); retval.append( " " ).append( XMLHandler.addTagValue( "successcondition", getSuccessConditionCode( successcondition ) ) ); retval .append( " " ).append( XMLHandler.addTagValue( "successnumbercondition", getSuccessNumberConditionCode( successnumbercondition ) ) ); retval.append( " " ).append( XMLHandler.addTagValue( "successbooleancondition", getSuccessBooleanConditionCode( successbooleancondition ) ) ); retval.append( " " ).append( XMLHandler.addTagValue( "successwhenvarset", successwhenvarset ) ); return retval.toString(); } private static int getValueTypeByCode( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < valueTypeCode.length; i++ ) { if ( valueTypeCode[i].equalsIgnoreCase( tt ) ) { return i; } } return 0; } private static int getSuccessNumberByCode( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < successNumberConditionCode.length; i++ ) { if ( successNumberConditionCode[i].equalsIgnoreCase( tt ) ) { return i; } } return 0; } private static int getSuccessBooleanByCode( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < successBooleanConditionCode.length; i++ ) { if ( successBooleanConditionCode[i].equalsIgnoreCase( tt ) ) { return i; } } return 0; } private static int getFieldTypeByCode( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < fieldTypeCode.length; i++ ) { if ( fieldTypeCode[i].equalsIgnoreCase( tt ) ) { return i; } } return 0; } private static int getSuccessConditionByCode( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < successConditionCode.length; i++ ) { if ( successConditionCode[i].equalsIgnoreCase( tt ) ) { return i; } } return 0; } public void setSuccessWhenVarSet( boolean successwhenvarset ) { this.successwhenvarset = successwhenvarset; } public boolean isSuccessWhenVarSet() { return this.successwhenvarset; } public static int getSuccessNumberConditionByCode( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < successNumberConditionCode.length; i++ ) { if ( successNumberConditionCode[i].equalsIgnoreCase( tt ) ) { return i; } } return 0; } private static int getSuccessBooleanConditionByCode( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < successBooleanConditionCode.length; i++ ) { if ( successBooleanConditionCode[i].equalsIgnoreCase( tt ) ) { return i; } } return 0; } @Override public void loadXML( Node entrynode, List<DatabaseMeta> databases, List<SlaveServer> slaveServers, Repository rep, IMetaStore metaStore ) throws KettleXMLException { try { super.loadXML( entrynode, databases, slaveServers ); valuetype = getValueTypeByCode( Const.NVL( XMLHandler.getTagValue( entrynode, "valuetype" ), "" ) ); fieldname = XMLHandler.getTagValue( entrynode, "fieldname" ); fieldtype = getFieldTypeByCode( Const.NVL( XMLHandler.getTagValue( entrynode, "fieldtype" ), "" ) ); variablename = XMLHandler.getTagValue( entrynode, "variablename" ); mask = XMLHandler.getTagValue( entrynode, "mask" ); comparevalue = XMLHandler.getTagValue( entrynode, "comparevalue" ); minvalue = XMLHandler.getTagValue( entrynode, "minvalue" ); maxvalue = XMLHandler.getTagValue( entrynode, "maxvalue" ); successcondition = getSuccessConditionByCode( Const.NVL( XMLHandler.getTagValue( entrynode, "successcondition" ), "" ) ); successnumbercondition = getSuccessNumberConditionByCode( Const.NVL( XMLHandler.getTagValue( entrynode, "successnumbercondition" ), "" ) ); successbooleancondition = getSuccessBooleanConditionByCode( Const.NVL( XMLHandler.getTagValue( entrynode, "successbooleancondition" ), "" ) ); successwhenvarset = "Y".equalsIgnoreCase( XMLHandler.getTagValue( entrynode, "successwhenvarset" ) ); } catch ( KettleXMLException xe ) { throw new KettleXMLException( BaseMessages.getString( PKG, "JobEntrySimple.Error.Exception.UnableLoadXML" ), xe ); } } @Override public void loadRep( Repository rep, IMetaStore metaStore, ObjectId id_jobentry, List<DatabaseMeta> databases, List<SlaveServer> slaveServers ) throws KettleException { try { valuetype = getValueTypeByCode( Const.NVL( rep.getJobEntryAttributeString( id_jobentry, "valuetype" ), "" ) ); fieldname = rep.getJobEntryAttributeString( id_jobentry, "fieldname" ); variablename = rep.getJobEntryAttributeString( id_jobentry, "variablename" ); fieldtype = getFieldTypeByCode( Const.NVL( rep.getJobEntryAttributeString( id_jobentry, "fieldtype" ), "" ) ); mask = rep.getJobEntryAttributeString( id_jobentry, "mask" ); comparevalue = rep.getJobEntryAttributeString( id_jobentry, "comparevalue" ); minvalue = rep.getJobEntryAttributeString( id_jobentry, "minvalue" ); maxvalue = rep.getJobEntryAttributeString( id_jobentry, "maxvalue" ); successcondition = getSuccessConditionByCode( Const.NVL( rep.getJobEntryAttributeString( id_jobentry, "successcondition" ), "" ) ); successnumbercondition = getSuccessNumberConditionByCode( Const.NVL( rep.getJobEntryAttributeString( id_jobentry, "successnumbercondition" ), "" ) ); successbooleancondition = getSuccessBooleanConditionByCode( Const.NVL( rep.getJobEntryAttributeString( id_jobentry, "successbooleancondition" ), "" ) ); successwhenvarset = rep.getJobEntryAttributeBoolean( id_jobentry, "successwhenvarset" ); } catch ( KettleException dbe ) { throw new KettleException( BaseMessages.getString( PKG, "JobEntrySimple.Error.Exception.UnableLoadRep" ) + id_jobentry, dbe ); } } @Override public void saveRep( Repository rep, IMetaStore metaStore, ObjectId id_job ) throws KettleException { try { rep.saveJobEntryAttribute( id_job, getObjectId(), "valuetype", getValueTypeCode( valuetype ) ); rep.saveJobEntryAttribute( id_job, getObjectId(), "fieldname", fieldname ); rep.saveJobEntryAttribute( id_job, getObjectId(), "variablename", variablename ); rep.saveJobEntryAttribute( id_job, getObjectId(), "fieldtype", getFieldTypeCode( fieldtype ) ); rep.saveJobEntryAttribute( id_job, getObjectId(), "mask", mask ); rep.saveJobEntryAttribute( id_job, getObjectId(), "comparevalue", comparevalue ); rep.saveJobEntryAttribute( id_job, getObjectId(), "minvalue", minvalue ); rep.saveJobEntryAttribute( id_job, getObjectId(), "maxvalue", maxvalue ); rep.saveJobEntryAttribute( id_job, getObjectId(), "successcondition", getSuccessConditionCode( successcondition ) ); rep .saveJobEntryAttribute( id_job, getObjectId(), "successnumbercondition", getSuccessNumberConditionCode( successnumbercondition ) ); rep.saveJobEntryAttribute( id_job, getObjectId(), "successbooleancondition", getSuccessBooleanConditionCode( successbooleancondition ) ); rep.saveJobEntryAttribute( id_job, getObjectId(), "successwhenvarset", successwhenvarset ); } catch ( KettleDatabaseException dbe ) { throw new KettleException( BaseMessages.getString( PKG, "JobEntrySimple.Error.Exception.UnableSaveRep" ) + id_job, dbe ); } } @Override public Result execute( Result previousResult, int nr ) throws KettleException { Result result = previousResult; result.setNrErrors( 1 ); result.setResult( false ); String sourcevalue = null; switch ( valuetype ) { case VALUE_TYPE_FIELD: List<RowMetaAndData> rows = result.getRows(); RowMetaAndData resultRow = null; if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobEntrySimpleEval.Log.ArgFromPrevious.Found", ( rows != null ? rows.size() : 0 ) + "" ) ); } if ( rows.size() == 0 ) { rows = null; logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.NoRows" ) ); return result; } // get first row resultRow = rows.get( 0 ); String realfieldname = environmentSubstitute( fieldname ); int indexOfField = -1; indexOfField = resultRow.getRowMeta().indexOfValue( realfieldname ); if ( indexOfField == -1 ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.FieldNotExist", realfieldname ) ); resultRow = null; rows = null; return result; } sourcevalue = resultRow.getString( indexOfField, null ); if ( sourcevalue == null ) { sourcevalue = ""; } resultRow = null; rows = null; break; case VALUE_TYPE_VARIABLE: if ( Utils.isEmpty( variablename ) ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.VariableMissing" ) ); return result; } if ( isSuccessWhenVarSet() ) { // return variable name // remove specifications if needed String variableName = StringUtil.getVariableName( Const.NVL( getVariableName(), "" ) ); // Get value, if the variable is not set, Null will be returned String value = getVariable( variableName ); if ( value != null ) { if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobEntrySimpleEval.VariableSet", variableName ) ); } result.setResult( true ); result.setNrErrors( 0 ); return result; } else { if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobEntrySimpleEval.VariableNotSet", variableName ) ); } // PDI-6943: this job entry does not set errors upon evaluation, independently of the outcome of the check result.setNrErrors( 0 ); return result; } } sourcevalue = environmentSubstitute( getVariableWithSpec() ); break; default: break; } if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobSimpleEval.Log.ValueToevaluate", sourcevalue ) ); } boolean success = false; String realCompareValue = environmentSubstitute( comparevalue ); if ( realCompareValue == null ) { realCompareValue = ""; } String realMinValue = environmentSubstitute( minvalue ); String realMaxValue = environmentSubstitute( maxvalue ); switch ( fieldtype ) { case FIELD_TYPE_STRING: switch ( successcondition ) { case SUCCESS_CONDITION_EQUAL: // equal if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( sourcevalue.equals( realCompareValue ) ); if ( valuetype == VALUE_TYPE_VARIABLE && !success ) { // make the empty value evaluate to true when compared to a not set variable if ( Utils.isEmpty( realCompareValue ) ) { String variableName = StringUtil.getVariableName( variablename ); if ( System.getProperty( variableName ) == null ) { success = true; } } } break; case SUCCESS_CONDITION_DIFFERENT: // different if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( !sourcevalue.equals( realCompareValue ) ); break; case SUCCESS_CONDITION_CONTAINS: // contains if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( sourcevalue.contains( realCompareValue ) ); break; case SUCCESS_CONDITION_NOT_CONTAINS: // not contains if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( !sourcevalue.contains( realCompareValue ) ); break; case SUCCESS_CONDITION_START_WITH: // starts with if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( sourcevalue.startsWith( realCompareValue ) ); break; case SUCCESS_CONDITION_NOT_START_WITH: // not start with if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( !sourcevalue.startsWith( realCompareValue ) ); break; case SUCCESS_CONDITION_END_WITH: // ends with if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( sourcevalue.endsWith( realCompareValue ) ); break; case SUCCESS_CONDITION_NOT_END_WITH: // not ends with if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( !sourcevalue.endsWith( realCompareValue ) ); break; case SUCCESS_CONDITION_REGEX: // regexp if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } success = ( Pattern.compile( realCompareValue ).matcher( sourcevalue ).matches() ); break; case SUCCESS_CONDITION_IN_LIST: // in list if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } realCompareValue = Const.NVL( realCompareValue, "" ); String[] parts = realCompareValue.split( "," ); for ( int i = 0; i < parts.length && !success; i++ ) { success = ( sourcevalue.equals( parts[i].trim() ) ); } break; case SUCCESS_CONDITION_NOT_IN_LIST: // not in list if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } realCompareValue = Const.NVL( realCompareValue, "" ); parts = realCompareValue.split( "," ); success = true; for ( int i = 0; i < parts.length && success; i++ ) { success = !( sourcevalue.equals( parts[i].trim() ) ); } break; default: break; } break; case FIELD_TYPE_NUMBER: double valuenumber; try { valuenumber = Double.parseDouble( sourcevalue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", sourcevalue, e .getMessage() ) ); return result; } double valuecompare; switch ( successnumbercondition ) { case SUCCESS_NUMBER_CONDITION_EQUAL: // equal if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { valuecompare = Double.parseDouble( realCompareValue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", realCompareValue, e.getMessage() ) ); return result; } success = ( valuenumber == valuecompare ); break; case SUCCESS_NUMBER_CONDITION_DIFFERENT: // different if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { valuecompare = Double.parseDouble( realCompareValue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", realCompareValue, e.getMessage() ) ); return result; } success = ( valuenumber != valuecompare ); break; case SUCCESS_NUMBER_CONDITION_SMALLER: // smaller if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { valuecompare = Double.parseDouble( realCompareValue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", realCompareValue, e.getMessage() ) ); return result; } success = ( valuenumber < valuecompare ); break; case SUCCESS_NUMBER_CONDITION_SMALLER_EQUAL: // smaller or equal if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { valuecompare = Double.parseDouble( realCompareValue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", realCompareValue, e.getMessage() ) ); return result; } success = ( valuenumber <= valuecompare ); break; case SUCCESS_NUMBER_CONDITION_GREATER: // greater try { valuecompare = Double.parseDouble( realCompareValue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", realCompareValue, e.getMessage() ) ); return result; } success = ( valuenumber > valuecompare ); break; case SUCCESS_NUMBER_CONDITION_GREATER_EQUAL: // greater or equal if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { valuecompare = Double.parseDouble( realCompareValue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", realCompareValue, e.getMessage() ) ); return result; } success = ( valuenumber >= valuecompare ); break; case SUCCESS_NUMBER_CONDITION_BETWEEN: // between min and max if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValues", realMinValue, realMaxValue ) ); } double valuemin; try { valuemin = Double.parseDouble( realMinValue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", realMinValue, e .getMessage() ) ); return result; } double valuemax; try { valuemax = Double.parseDouble( realMaxValue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", realMaxValue, e .getMessage() ) ); return result; } if ( valuemin >= valuemax ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.IncorrectNumbers", realMinValue, realMaxValue ) ); return result; } success = ( valuenumber >= valuemin && valuenumber <= valuemax ); break; case SUCCESS_NUMBER_CONDITION_IN_LIST: // in list if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } String[] parts = realCompareValue.split( "," ); for ( int i = 0; i < parts.length && !success; i++ ) { try { valuecompare = Double.parseDouble( parts[i] ); } catch ( Exception e ) { logError( toString(), BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", parts[i], e.getMessage() ) ); return result; } success = ( valuenumber == valuecompare ); } break; case SUCCESS_NUMBER_CONDITION_NOT_IN_LIST: // not in list if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } realCompareValue = Const.NVL( realCompareValue, "" ); parts = realCompareValue.split( "," ); success = true; for ( int i = 0; i < parts.length && success; i++ ) { try { valuecompare = Double.parseDouble( parts[i] ); } catch ( Exception e ) { logError( toString(), BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableNumber", parts[i], e.getMessage() ) ); return result; } success = ( valuenumber != valuecompare ); } break; default: break; } break; case FIELD_TYPE_DATE_TIME: String realMask = environmentSubstitute( mask ); SimpleDateFormat df = new SimpleDateFormat(); if ( !Utils.isEmpty( realMask ) ) { df.applyPattern( realMask ); } Date datevalue = null; try { datevalue = convertToDate( sourcevalue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } Date datecompare; switch ( successnumbercondition ) { case SUCCESS_NUMBER_CONDITION_EQUAL: // equal if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { datecompare = convertToDate( realCompareValue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } success = ( datevalue.equals( datecompare ) ); break; case SUCCESS_NUMBER_CONDITION_DIFFERENT: // different if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { datecompare = convertToDate( realCompareValue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } success = ( !datevalue.equals( datecompare ) ); break; case SUCCESS_NUMBER_CONDITION_SMALLER: // smaller if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { datecompare = convertToDate( realCompareValue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } success = ( datevalue.before( datecompare ) ); break; case SUCCESS_NUMBER_CONDITION_SMALLER_EQUAL: // smaller or equal if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { datecompare = convertToDate( realCompareValue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } success = ( datevalue.before( datecompare ) || datevalue.equals( datecompare ) ); break; case SUCCESS_NUMBER_CONDITION_GREATER: // greater if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { datecompare = convertToDate( realCompareValue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } success = ( datevalue.after( datecompare ) ); break; case SUCCESS_NUMBER_CONDITION_GREATER_EQUAL: // greater or equal if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } try { datecompare = convertToDate( realCompareValue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } success = ( datevalue.after( datecompare ) || datevalue.equals( datecompare ) ); break; case SUCCESS_NUMBER_CONDITION_BETWEEN: // between min and max if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValues", realMinValue, realMaxValue ) ); } Date datemin; try { datemin = convertToDate( realMinValue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } Date datemax; try { datemax = convertToDate( realMaxValue, realMask, df ); } catch ( Exception e ) { logError( e.getMessage() ); return result; } if ( datemin.after( datemax ) || datemin.equals( datemax ) ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.IncorrectDates", realMinValue, realMaxValue ) ); return result; } success = ( ( datevalue.after( datemin ) || datevalue.equals( datemin ) ) && ( datevalue.before( datemax ) || datevalue.equals( datemax ) ) ); break; case SUCCESS_NUMBER_CONDITION_IN_LIST: // in list if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } String[] parts = realCompareValue.split( "," ); for ( int i = 0; i < parts.length && !success; i++ ) { try { datecompare = convertToDate( realCompareValue, realMask, df ); } catch ( Exception e ) { logError( toString(), e.getMessage() ); return result; } success = ( datevalue.equals( datecompare ) ); } break; case SUCCESS_NUMBER_CONDITION_NOT_IN_LIST: // not in list if ( isDebug() ) { logDebug( BaseMessages.getString( PKG, "JobSimpleEval.Log.CompareWithValue", sourcevalue, realCompareValue ) ); } realCompareValue = Const.NVL( realCompareValue, "" ); parts = realCompareValue.split( "," ); success = true; for ( int i = 0; i < parts.length && success; i++ ) { try { datecompare = convertToDate( realCompareValue, realMask, df ); } catch ( Exception e ) { logError( toString(), e.getMessage() ); return result; } success = ( !datevalue.equals( datecompare ) ); } break; default: break; } df = null; break; case FIELD_TYPE_BOOLEAN: boolean valuebool; try { valuebool = ValueMetaString.convertStringToBoolean( sourcevalue ); } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableBoolean", sourcevalue, e .getMessage() ) ); return result; } switch ( successbooleancondition ) { case SUCCESS_BOOLEAN_CONDITION_FALSE: // false success = ( !valuebool ); break; case SUCCESS_BOOLEAN_CONDITION_TRUE: // true success = ( valuebool ); break; default: break; } break; default: break; } result.setResult( success ); // PDI-6943: this job entry does not set errors upon evaluation, independently of the outcome of the check result.setNrErrors( 0 ); return result; } /* * Returns variable with specifications */ private String getVariableWithSpec() { String variable = getVariableName(); if ( ( !variable.contains( StringUtil.UNIX_OPEN ) && !variable.contains( StringUtil.WINDOWS_OPEN ) && !variable .contains( StringUtil.HEX_OPEN ) ) && ( ( !variable.contains( StringUtil.UNIX_CLOSE ) && !variable.contains( StringUtil.WINDOWS_CLOSE ) && !variable .contains( StringUtil.HEX_CLOSE ) ) ) ) { // Add specifications to variable variable = StringUtil.UNIX_OPEN + variable + StringUtil.UNIX_CLOSE; if ( isDetailed() ) { logDetailed( BaseMessages.getString( PKG, "JobEntrySimpleEval.CheckingVariable", variable ) ); } } return variable; } private Date convertToDate( String valueString, String mask, SimpleDateFormat df ) throws KettleException { Date datevalue = null; try { datevalue = df.parse( valueString ); } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "JobEntrySimpleEval.Error.UnparsableDate", valueString ) ); } return datevalue; } public static String getValueTypeDesc( int i ) { if ( i < 0 || i >= valueTypeDesc.length ) { return valueTypeDesc[0]; } return valueTypeDesc[i]; } public static String getFieldTypeDesc( int i ) { if ( i < 0 || i >= fieldTypeDesc.length ) { return fieldTypeDesc[0]; } return fieldTypeDesc[i]; } public static String getSuccessConditionDesc( int i ) { if ( i < 0 || i >= successConditionDesc.length ) { return successConditionDesc[0]; } return successConditionDesc[i]; } public static String getSuccessNumberConditionDesc( int i ) { if ( i < 0 || i >= successNumberConditionDesc.length ) { return successNumberConditionDesc[0]; } return successNumberConditionDesc[i]; } public static String getSuccessBooleanConditionDesc( int i ) { if ( i < 0 || i >= successBooleanConditionDesc.length ) { return successBooleanConditionDesc[0]; } return successBooleanConditionDesc[i]; } public static int getValueTypeByDesc( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < valueTypeDesc.length; i++ ) { if ( valueTypeDesc[i].equalsIgnoreCase( tt ) ) { return i; } } // If this fails, try to match using the code. return getValueTypeByCode( tt ); } public static int getFieldTypeByDesc( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < fieldTypeDesc.length; i++ ) { if ( fieldTypeDesc[i].equalsIgnoreCase( tt ) ) { return i; } } // If this fails, try to match using the code. return getFieldTypeByCode( tt ); } public static int getSuccessConditionByDesc( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < successConditionDesc.length; i++ ) { if ( successConditionDesc[i].equalsIgnoreCase( tt ) ) { return i; } } // If this fails, try to match using the code. return getSuccessConditionByCode( tt ); } public static int getSuccessNumberConditionByDesc( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < successNumberConditionDesc.length; i++ ) { if ( successNumberConditionDesc[i].equalsIgnoreCase( tt ) ) { return i; } } // If this fails, try to match using the code. return getSuccessNumberByCode( tt ); } public static int getSuccessBooleanConditionByDesc( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < successBooleanConditionDesc.length; i++ ) { if ( successBooleanConditionDesc[i].equalsIgnoreCase( tt ) ) { return i; } } // If this fails, try to match using the code. return getSuccessBooleanByCode( tt ); } public void setMinValue( String minvalue ) { this.minvalue = minvalue; } public String getMinValue() { return minvalue; } public void setCompareValue( String comparevalue ) { this.comparevalue = comparevalue; } public String getMask() { return mask; } public void setMask( String mask ) { this.mask = mask; } public String getFieldName() { return fieldname; } public void setFieldName( String fieldname ) { this.fieldname = fieldname; } public String getVariableName() { return variablename; } public void setVariableName( String variablename ) { this.variablename = variablename; } public String getCompareValue() { return comparevalue; } public void setMaxValue( String maxvalue ) { this.maxvalue = maxvalue; } public String getMaxValue() { return maxvalue; } @Override public boolean evaluates() { return true; } }
apache-2.0
janeen666/mi-instrument
mi/platform/rsn/simulator/oms_values.py
7128
#!/usr/bin/env python """ @package ion.agents.platform.rsn.simulator.oms_values @file ion/agents/platform/rsn/simulator/oms_values.py @author Carlos Rueda @brief Platform attribute value generators for the RSN OMS simulator. """ __author__ = 'Carlos Rueda' __license__ = 'Apache 2.0' import time import ntplib import math # time begins a few secs ago from now for purposes of reporting _START_TIME = ntplib.system_to_ntp_time(time.time() - 30) # maximum value array size for a single generation call _MAX_RESULT_SIZE = 1000 # next value for generators created by _create_simple_generator _next_value = 990000 def _create_simple_generator(gen_period): """ Returns a simple generator that reports incremental values every given time period. @param gen_period discretize the time axis by this period in secs @retval A function to be called with parameters (from_time, to_time) where from_time and to_time are the lower and upper limits (both inclusive) of desired time window (NTP). """ def _gen(from_time, to_time): global _next_value if from_time < _START_TIME: from_time = _START_TIME # t: initial abscissa coordinate within the time window l_from_time = long(from_time - 2*gen_period) t = float((l_from_time / gen_period) * gen_period) while t < from_time: t += gen_period values = [] while t <= to_time: val = _next_value _next_value += 1 timestamp = t values.append((val, timestamp)) t += gen_period if len(values) == _MAX_RESULT_SIZE: break return values return _gen def _create_sine_generator(sine_period, gen_period, min_val, max_val): """ Returns a sine stream fluctuating between min_val and max_val. @param sine_period Sine period in secs @param gen_period discretize the time axis by this period in secs @param min_val min value @param max_val max value @retval A function to be called with parameters (from_time, to_time) where from_time and to_time are the lower and upper limits (both inclusive) of desired time window (NTP). """ twopi = 2 * math.pi def _gen(from_time, to_time): if from_time < _START_TIME: from_time = _START_TIME # t: initial abscissa coordinate within the time window l_from_time = long(from_time - 2*gen_period) t = float((l_from_time / gen_period) * gen_period) while t < from_time: t += gen_period range2 = (max_val - min_val) / 2 values = [] while t <= to_time: s = math.sin(t / sine_period * twopi) val = s * range2 + (max_val + min_val) / 2 timestamp = t values.append((val, timestamp)) t += gen_period if len(values) == _MAX_RESULT_SIZE: break return values return _gen # generators per platform-ID/attribute-name: _plat_attr_generators = { # we used to have a couple here, but now none for the moment. # An example would be: # ('LJ01D', 'input_voltage'): _create_sine_generator(sine_period=30, # gen_period=2.5, # min_val=-500, # max_val=+500), } # generators per attribute name: _attribute_generators = { 'input_voltage': _create_sine_generator(sine_period=30, gen_period=2.5, min_val=-500, max_val=+500), 'input_bus_current': _create_sine_generator(sine_period=50, gen_period=5, min_val=-300, max_val=+300), 'MVPC_temperature': _create_sine_generator(sine_period=20, gen_period=4, min_val=-200, max_val=+200), 'MVPC_pressure_1': _create_sine_generator(sine_period=20, gen_period=4, min_val=-100, max_val=+100), } _default_generator = _create_simple_generator(gen_period=5) def generate_values(platform_id, attr_id, from_time, to_time): """ Generates synthetic values within a given time window (both ends are inclusive). Times are NTP. @param platform_id Platform ID @param attr_id Attribute ID. Only the name part is considered. See OOIION-1551. @param from_time lower limit of desired time window @param to_time upper limit of desired time window """ # get the attribute name from the given ID: separator = attr_id.rfind('|') attr_name = attr_id[:separator] if separator >= 0 else attr_id # try by platform/attribute: if (platform_id, attr_name) in _plat_attr_generators: gen = _plat_attr_generators[(platform_id, attr_name)] # else: try by the attribute only: elif attr_name in _attribute_generators: gen = _attribute_generators[attr_name] else: gen = _default_generator return gen(from_time, to_time) if __name__ == "__main__": # pragma: no cover # do not restrict the absolute from_time for this demo program: _START_TIME = 0 import sys if len(sys.argv) < 5: print(""" USAGE: oms_values.py platform_id attr_id delta_from delta_to Generates values in window [curr_time + delta_from, curr_time + delta_to] Example: oms_values.py Node1A input_voltage -35 0 """) exit() cur_time = ntplib.system_to_ntp_time(time.time()) platform_id = sys.argv[1] attr_id = sys.argv[2] delta_from = float(sys.argv[3]) delta_to = float(sys.argv[4]) from_time = cur_time + delta_from to_time = cur_time + delta_to values = generate_values(platform_id, attr_id, from_time, to_time) print("Generated %d values in time window [%s, %s]:" % ( len(values), from_time, to_time)) for n, (val, t) in enumerate(values): print("\t%2d: %5.2f -> %+4.3f" % (n, t, val)) """ $ bin/python ion/agents/platform/rsn/simulator/oms_values.py Node1A other_attr -35 0 Generated 7 values in time window [3561992754.4, 3561992789.4]: 0: 3561992755.00 -> +990000.000 1: 3561992760.00 -> +990001.000 2: 3561992765.00 -> +990002.000 3: 3561992770.00 -> +990003.000 4: 3561992775.00 -> +990004.000 5: 3561992780.00 -> +990005.000 6: 3561992785.00 -> +990006.000 $ bin/python ion/agents/platform/rsn/simulator/oms_values.py Node1A input_voltage -35 0 Generated 7 values in time window [3561992757.86, 3561992792.86]: 0: 3561992760.00 -> -0.000 1: 3561992765.00 -> +433.013 2: 3561992770.00 -> +433.013 3: 3561992775.00 -> +0.000 4: 3561992780.00 -> -433.013 5: 3561992785.00 -> -433.013 6: 3561992790.00 -> -0.000 """
bsd-2-clause
fw1121/uthash
tests/test72.c
1905
#include <stdlib.h> #include <stdio.h> #include "utlist.h" typedef struct el { int id; struct el *next, *prev; } el; int main(int argc, char *argv[]) { int i; el els[20], *e, *tmp, *tmp2; el *headA = NULL; el *headB = NULL; for(i=0; i<20; i++) { els[i].id=(int)'a'+i; } /* test CDL macros */ printf("CDL replace elem\n"); CDL_PREPEND(headA,&els[3]); CDL_PREPEND(headA,&els[2]); CDL_PREPEND(headA,&els[1]); CDL_PREPEND(headA,&els[0]); CDL_FOREACH(headA,e) { printf("%c ", e->id); } printf("\n"); /* replace head elem */ CDL_REPLACE_ELEM(headA, &els[0], &els[4]); CDL_FOREACH(headA,e) { printf("%c ", e->id); } printf("\n"); CDL_REPLACE_ELEM(headA, &els[4], &els[5]); CDL_FOREACH(headA,e) { printf("%c ", e->id); } printf("\n"); /* replace last elem */ CDL_REPLACE_ELEM(headA, &els[3], &els[6]); CDL_FOREACH(headA,e) { printf("%c ", e->id); } printf("\n"); CDL_REPLACE_ELEM(headA, &els[6], &els[7]); CDL_FOREACH(headA,e) { printf("%c ", e->id); } printf("\n"); /* replace middle elem */ CDL_REPLACE_ELEM(headA, &els[1], &els[8]); CDL_REPLACE_ELEM(headA, &els[2], &els[9]); CDL_FOREACH(headA,e) { printf("%c ", e->id); } printf("\n"); /* replace all just to be sure the list is intact... */ i = 10; CDL_FOREACH_SAFE(headA, e, tmp, tmp2) { CDL_REPLACE_ELEM(headA, e, &els[i]); i++; } CDL_FOREACH(headA,e) { printf("%c ", e->id); } printf("\n"); /* single elem */ CDL_PREPEND(headB, &els[18]); CDL_FOREACH(headB,e) { printf("%c ", e->id); } printf("\n"); CDL_REPLACE_ELEM(headB, &els[18], &els[19]); CDL_FOREACH(headB,e) { printf("%c ", e->id); } printf("\n"); return 0; }
bsd-2-clause