repo_name
stringlengths 4
116
| path
stringlengths 3
942
| size
stringlengths 1
7
| content
stringlengths 3
1.05M
| license
stringclasses 15
values |
---|---|---|---|---|
cklxmu/aliyun-openapi-go-sdk
|
rds/2014-08-15/ModifyDBInstanceNetworkType_test.go
|
461
|
package rds
import (
"fmt"
"testing"
)
func TestModifyDBInstanceNetworkType(t *testing.T) {
var req ModifyDBInstanceNetworkTypeRequest
req.Init()
req.SetFormat("JSON")
req.SetRegionId("cn-shenzhen")
var accessId = "Ie65kUInu5GeAsma"
var accessSecret = "8cCqoxdYU9zKUihwXFXiN1HEACBDwB"
resp, err := ModifyDBInstanceNetworkType(&req, accessId, accessSecret)
if err != nil {
t.Errorf("Error: %s", err.Error())
}
fmt.Printf("Success: %v\n", resp)
}
|
apache-2.0
|
jentfoo/aws-sdk-java
|
aws-java-sdk-kinesisvideo/src/main/java/com/amazonaws/services/kinesisvideo/AmazonKinesisVideoPutMedia.java
|
10008
|
/*
* Copyright 2012-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.kinesisvideo;
import com.amazonaws.services.kinesisvideo.model.ClientLimitExceededException;
import com.amazonaws.services.kinesisvideo.model.ConnectionLimitExceededException;
import com.amazonaws.services.kinesisvideo.model.InvalidArgumentException;
import com.amazonaws.services.kinesisvideo.model.InvalidEndpointException;
import com.amazonaws.services.kinesisvideo.model.NotAuthorizedException;
import com.amazonaws.services.kinesisvideo.model.PutMediaRequest;
import com.amazonaws.services.kinesisvideo.model.ResourceNotFoundException;
import java.io.Closeable;
/**
* Interface for accessing Amazon Kinesis Video's PutMedia operation. This is a special, asynchronous operation that is not supported
* in the normal client ({@link AWSKinesisVideoMediaClient}.
* <p>
* <b>Note:</b> Do not directly implement this interface, new methods are added to it regularly. Extend from
* {@link AbstractAmazonKinesisVideoPutMedia} instead.
* </p>
*/
// TODO service docs when available.
public interface AmazonKinesisVideoPutMedia extends Closeable {
/**
* <p>
* Use this API to send media data to a Kinesis video stream.
* </p>
* <note>
* <p>
* Before using this API, you must call the <code>GetDataEndpoint</code> API to get an endpoint. You then specify
* the endpoint in your <code>PutMedia</code> request.
* </p>
* </note>
* <p>
* In the request, you use the HTTP headers to provide parameter information, for example, stream name, time stamp,
* and whether the time stamp value is absolute or relative to when the producer started recording. You use the
* request body to send the media data. Kinesis Video Streams supports only the Matroska (MKV) container format for
* sending media data using this API.
* </p>
* <p>
* You have the following options for sending data using this API:
* </p>
* <ul>
* <li>
* <p>
* Send media data in real time: For example, a security camera can send frames in real time as it generates them.
* This approach minimizes the latency between the video recording and data sent on the wire. This is referred to as
* a continuous producer. In this case, a consumer application can read the stream in real time or when needed.
* </p>
* </li>
* <li>
* <p>
* Send media data offline (in batches): For example, a body camera might record video for hours and store it on the
* device. Later, when you connect the camera to the docking port, the camera can start a <code>PutMedia</code>
* session to send data to a Kinesis video stream. In this scenario, latency is not an issue.
* </p>
* </li>
* </ul>
* <p>
* When using this API, note the following considerations:
* </p>
* <ul>
* <li>
* <p>
* You must specify either <code>streamName</code> or <code>streamARN</code>, but not both.
* </p>
* </li>
* <li>
* <p>
* You might find it easier to use a single long-running <code>PutMedia</code> session and send a large number of
* media data fragments in the payload. Note that for each fragment received, Kinesis Video Streams sends one or
* more acknowledgements. Potential network considerations might cause you to not get all these acknowledgements as
* they are generated.
* </p>
* </li>
* <li>
* <p>
* You might choose multiple consecutive <code>PutMedia</code> sessions, each with fewer fragments to ensure that
* you get all acknowledgements from the service in real time.
* </p>
* </li>
* </ul>
* <note>
* <p>
* If you send data to the same stream on multiple simultaneous <code>PutMedia</code> sessions, the media fragments
* get interleaved on the stream. You should make sure that this is OK in your application scenario.
* </p>
* </note>
* <p>
* The following limits apply when using the <code>PutMedia</code> API:
* </p>
* <ul>
* <li>
* <p>
* A client can call <code>PutMedia</code> up to five times per second per stream.
* </p>
* </li>
* <li>
* <p>
* A client can send up to five fragments per second per stream.
* </p>
* </li>
* <li>
* <p>
* Kinesis Video Streams reads media data at a rate of up to 12.5 MB/second, or 100 Mbps during a
* <code>PutMedia</code> session.
* </p>
* </li>
* </ul>
* <p>
* Note the following constraints. In these cases, Kinesis Video Streams sends the Error acknowledgement in the
* response.
* </p>
* <ul>
* <li>
* <p>
* Fragments that have time codes spanning longer than 10 seconds and that contain more than 50 megabytes of data
* are not allowed.
* </p>
* </li>
* <li>
* <p>
* An MKV stream containing more than one MKV segment or containing disallowed MKV elements (like
* <code>track*</code>) also results in the Error acknowledgement.
* </p>
* </li>
* </ul>
* <p>
* Kinesis Video Streams stores each incoming fragment and related metadata in what is called a "chunk." The
* fragment metadata includes the following:
* </p>
* <ul>
* <li>
* <p>
* The MKV headers provided at the start of the <code>PutMedia</code> request
* </p>
* </li>
* <li>
* <p>
* The following Kinesis Video Streams-specific metadata for the fragment:
* </p>
* <ul>
* <li>
* <p>
* <code>server_timestamp</code> - Time stamp when Kinesis Video Streams started receiving the fragment.
* </p>
* </li>
* <li>
* <p>
* <code>producer_timestamp</code> - Time stamp, when the producer started recording the fragment. Kinesis Video
* Streams uses three pieces of information received in the request to calculate this value.
* </p>
* <ul>
* <li>
* <p>
* The fragment timecode value received in the request body along with the fragment.
* </p>
* </li>
* <li>
* <p>
* Two request headers: <code>producerStartTimestamp</code> (when the producer started recording) and
* <code>fragmentTimeCodeType</code> (whether the fragment timecode in the payload is absolute or relative).
* </p>
* </li>
* </ul>
* <p>
* Kinesis Video Streams then computes the <code>producer_timestamp</code> for the fragment as follows:
* </p>
* <p>
* If <code>fragmentTimeCodeType</code> is relative, then
* </p>
* <p>
* <code>producer_timestamp</code> = <code>producerStartTimeSamp</code> + fragment timecode
* </p>
* <p>
* If <code>fragmentTimeCodeType</code> is absolute, then
* </p>
* <p>
* <code>producer_timestamp</code> = fragment timecode (converted to milliseconds)
* </p>
* </li>
* <li>
* <p>
* Unique fragment number assigned by Kinesis Video Streams.
* </p>
* </li>
* </ul>
* <p/></li>
* </ul>
* <note>
* <p>
* When you make the <code>GetMedia</code> request, Kinesis Video Streams returns a stream of these chunks. The
* client can process the metadata as needed.
* </p>
* </note> <note>
* <p>
* This operation is only available for the AWS SDK for Java. It is not supported in AWS SDKs for other languages.
* </p>
* </note>
*
* @param request Represents the input of a <code>PutMedia</code> operation
* @param responseHandler Handler to asynchronously process the {@link com.amazonaws.services.kinesisvideo.model.AckEvent} that
* are received by the service.
* @return Result of the PutMedia operation returned by the service.
* @throws ResourceNotFoundException
* Status Code: 404, The stream with the given name does not exist.
* @throws NotAuthorizedException
* Status Code: 403, The caller is not authorized to perform an operation on the given stream, or the token
* has expired.
* @throws InvalidEndpointException
* Status Code: 400, Caller used wrong endpoint to write data to a stream. On receiving such an exception,
* the user must call <code>GetDataEndpoint</code> with <code>AccessMode</code> set to "READ" and use the
* endpoint Kinesis Video returns in the next <code>GetMedia</code> call.
* @throws InvalidArgumentException
* The value for this input parameter is invalid.
* @throws ClientLimitExceededException
* Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client
* calls. Try making the call later.
* @throws ConnectionLimitExceededException
* Kinesis Video Streams has throttled the request because you have exceeded the limit of allowed client
* connections.
* @sample AmazonKinesisVideoMedia.PutMedia
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/kinesis-video-media-2017-09-30/PutMedia" target="_top">AWS
* API Documentation</a>
*/
void putMedia(PutMediaRequest request, PutMediaAckResponseHandler responseHandler);
/**
* Closes the client and releases all resources like connection pools and threads.
*/
@Override
void close();
}
|
apache-2.0
|
torakiki/sambox
|
src/test/java/org/sejda/sambox/pdmodel/graphics/optionalcontent/TestOptionalContentGroups.java
|
18662
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sejda.sambox.pdmodel.graphics.optionalcontent;
import java.awt.Color;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferInt;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import javax.imageio.ImageIO;
import org.junit.Assert;
import org.sejda.io.SeekableSources;
import org.sejda.sambox.cos.COSName;
import org.sejda.sambox.input.PDFParser;
import org.sejda.sambox.pdmodel.PDDocument;
import org.sejda.sambox.pdmodel.PDDocumentCatalog;
import org.sejda.sambox.pdmodel.PDPage;
import org.sejda.sambox.pdmodel.PDPageContentStream;
import org.sejda.sambox.pdmodel.PDPageContentStream.AppendMode;
import org.sejda.sambox.pdmodel.PDResources;
import org.sejda.sambox.pdmodel.PageMode;
import org.sejda.sambox.pdmodel.font.PDFont;
import org.sejda.sambox.pdmodel.font.PDType1Font;
import org.sejda.sambox.pdmodel.graphics.optionalcontent.PDOptionalContentProperties.BaseState;
import org.sejda.sambox.rendering.PDFRenderer;
import org.sejda.sambox.util.SpecVersionUtils;
import junit.framework.TestCase;
/**
* Tests optional content group functionality (also called layers).
*/
public class TestOptionalContentGroups extends TestCase
{
private final File testResultsDir = new File("target/test-output");
@Override
protected void setUp() throws Exception
{
super.setUp();
testResultsDir.mkdirs();
}
/**
* Tests OCG generation.
*
* @throws Exception if an error occurs
*/
public void testOCGGeneration() throws Exception
{
PDDocument doc = new PDDocument();
try
{
// Create new page
PDPage page = new PDPage();
doc.addPage(page);
PDResources resources = page.getResources();
if (resources == null)
{
resources = new PDResources();
page.setResources(resources);
}
// Prepare OCG functionality
PDOptionalContentProperties ocprops = new PDOptionalContentProperties();
doc.getDocumentCatalog().setOCProperties(ocprops);
// ocprops.setBaseState(BaseState.ON); //ON=default
// Create OCG for background
PDOptionalContentGroup background = new PDOptionalContentGroup("background");
ocprops.addGroup(background);
assertTrue(ocprops.isGroupEnabled("background"));
// Create OCG for enabled
PDOptionalContentGroup enabled = new PDOptionalContentGroup("enabled");
ocprops.addGroup(enabled);
assertFalse(ocprops.setGroupEnabled("enabled", true));
assertTrue(ocprops.isGroupEnabled("enabled"));
// Create OCG for disabled
PDOptionalContentGroup disabled = new PDOptionalContentGroup("disabled");
ocprops.addGroup(disabled);
assertFalse(ocprops.setGroupEnabled("disabled", true));
assertTrue(ocprops.isGroupEnabled("disabled"));
assertTrue(ocprops.setGroupEnabled("disabled", false));
assertFalse(ocprops.isGroupEnabled("disabled"));
// Setup page content stream and paint background/title
PDPageContentStream contentStream = new PDPageContentStream(doc, page,
AppendMode.OVERWRITE, false);
PDFont font = PDType1Font.HELVETICA_BOLD;
contentStream.beginMarkedContent(COSName.OC, background);
contentStream.beginText();
contentStream.setFont(font, 14);
contentStream.newLineAtOffset(80, 700);
contentStream.showText("PDF 1.5: Optional Content Groups");
contentStream.endText();
font = PDType1Font.HELVETICA;
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 680);
contentStream.showText("You should see a green textline, but no red text line.");
contentStream.endText();
contentStream.endMarkedContent();
// Paint enabled layer
contentStream.beginMarkedContent(COSName.OC, enabled);
contentStream.setNonStrokingColor(Color.GREEN);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 600);
contentStream.showText("This is from an enabled layer. If you see this, that's good.");
contentStream.endText();
contentStream.endMarkedContent();
// Paint disabled layer
contentStream.beginMarkedContent(COSName.OC, disabled);
contentStream.setNonStrokingColor(Color.RED);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 500);
contentStream
.showText("This is from a disabled layer. If you see this, that's NOT good!");
contentStream.endText();
contentStream.endMarkedContent();
contentStream.close();
File targetFile = new File(testResultsDir, "ocg-generation.pdf");
doc.writeTo(targetFile);
}
finally
{
doc.close();
}
}
/**
* Tests OCG functions on a loaded PDF.
*
* @throws Exception if an error occurs
*/
public void testOCGConsumption() throws Exception
{
File pdfFile = new File(testResultsDir, "ocg-generation.pdf");
if (!pdfFile.exists())
{
testOCGGeneration();
}
try (PDDocument doc = PDFParser.parse(SeekableSources.seekableSourceFrom(pdfFile)))
{
assertEquals(SpecVersionUtils.V1_5, doc.getVersion());
PDDocumentCatalog catalog = doc.getDocumentCatalog();
PDPage page = doc.getPage(0);
PDResources resources = page.getResources();
COSName mc0 = COSName.getPDFName("oc1");
PDOptionalContentGroup ocg = (PDOptionalContentGroup) resources.getProperties(mc0);
assertNotNull(ocg);
assertEquals("background", ocg.getName());
assertNull(resources.getProperties(COSName.getPDFName("inexistent")));
PDOptionalContentProperties ocgs = catalog.getOCProperties();
assertEquals(BaseState.ON, ocgs.getBaseState());
Set<String> names = new java.util.HashSet<String>(Arrays.asList(ocgs.getGroupNames()));
assertEquals(3, names.size());
assertTrue(names.contains("background"));
assertTrue(ocgs.isGroupEnabled("background"));
assertTrue(ocgs.isGroupEnabled("enabled"));
assertFalse(ocgs.isGroupEnabled("disabled"));
ocgs.setGroupEnabled("background", false);
assertFalse(ocgs.isGroupEnabled("background"));
PDOptionalContentGroup background = ocgs.getGroup("background");
assertEquals(ocg.getName(), background.getName());
assertNull(ocgs.getGroup("inexistent"));
Collection<PDOptionalContentGroup> coll = ocgs.getOptionalContentGroups();
assertEquals(3, coll.size());
Set<String> nameSet = new HashSet<>();
for (PDOptionalContentGroup ocg2 : coll)
{
nameSet.add(ocg2.getName());
}
assertTrue(nameSet.contains("background"));
assertTrue(nameSet.contains("enabled"));
assertTrue(nameSet.contains("disabled"));
}
}
public void testOCGsWithSameNameCanHaveDifferentVisibility() throws Exception
{
PDDocument doc = new PDDocument();
try
{
// Create new page
PDPage page = new PDPage();
doc.addPage(page);
PDResources resources = page.getResources();
if (resources == null)
{
resources = new PDResources();
page.setResources(resources);
}
// Prepare OCG functionality
PDOptionalContentProperties ocprops = new PDOptionalContentProperties();
doc.getDocumentCatalog().setOCProperties(ocprops);
// ocprops.setBaseState(BaseState.ON); //ON=default
// Create visible OCG
PDOptionalContentGroup visible = new PDOptionalContentGroup("layer");
ocprops.addGroup(visible);
assertTrue(ocprops.isGroupEnabled(visible));
// Create invisible OCG
PDOptionalContentGroup invisible = new PDOptionalContentGroup("layer");
ocprops.addGroup(invisible);
assertFalse(ocprops.setGroupEnabled(invisible, false));
assertFalse(ocprops.isGroupEnabled(invisible));
// Check that visible layer is still visible
assertTrue(ocprops.isGroupEnabled(visible));
// Setup page content stream and paint background/title
PDPageContentStream contentStream = new PDPageContentStream(doc, page,
AppendMode.OVERWRITE, false);
PDFont font = PDType1Font.HELVETICA_BOLD;
contentStream.beginMarkedContent(COSName.OC, visible);
contentStream.beginText();
contentStream.setFont(font, 14);
contentStream.newLineAtOffset(80, 700);
contentStream.showText("PDF 1.5: Optional Content Groups");
contentStream.endText();
font = PDType1Font.HELVETICA;
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 680);
contentStream.showText("You should see this text, but no red text line.");
contentStream.endText();
contentStream.endMarkedContent();
// Paint disabled layer
contentStream.beginMarkedContent(COSName.OC, invisible);
contentStream.setNonStrokingColor(Color.RED);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 500);
contentStream
.showText("This is from a disabled layer. If you see this, that's NOT good!");
contentStream.endText();
contentStream.endMarkedContent();
contentStream.close();
File targetFile = new File(testResultsDir, "ocg-generation-same-name.pdf");
doc.writeTo(targetFile);
}
finally
{
doc.close();
}
}
/**
* PDFBOX-4496: setGroupEnabled(String, boolean) must catch all OCGs of a name even when several names are
* identical.
*
* @throws IOException
*/
public void testOCGGenerationSameNameCanHaveSameVisibilityOff() throws IOException
{
BufferedImage expectedImage;
BufferedImage actualImage;
try (PDDocument doc = new PDDocument())
{
// Create new page
PDPage page = new PDPage();
doc.addPage(page);
PDResources resources = page.getResources();
if (resources == null)
{
resources = new PDResources();
page.setResources(resources);
}
// Prepare OCG functionality
PDOptionalContentProperties ocprops = new PDOptionalContentProperties();
doc.getDocumentCatalog().setOCProperties(ocprops);
// ocprops.setBaseState(BaseState.ON); //ON=default
// Create OCG for background
PDOptionalContentGroup background = new PDOptionalContentGroup("background");
ocprops.addGroup(background);
assertTrue(ocprops.isGroupEnabled("background"));
// Create OCG for enabled
PDOptionalContentGroup enabled = new PDOptionalContentGroup("science");
ocprops.addGroup(enabled);
assertFalse(ocprops.setGroupEnabled("science", true));
assertTrue(ocprops.isGroupEnabled("science"));
// Create OCG for disabled1
PDOptionalContentGroup disabled1 = new PDOptionalContentGroup("alternative");
ocprops.addGroup(disabled1);
// Create OCG for disabled2 with same name as disabled1
PDOptionalContentGroup disabled2 = new PDOptionalContentGroup("alternative");
ocprops.addGroup(disabled2);
assertFalse(ocprops.setGroupEnabled("alternative", false));
assertFalse(ocprops.isGroupEnabled("alternative"));
// Setup page content stream and paint background/title
PDPageContentStream contentStream = new PDPageContentStream(doc, page,
AppendMode.OVERWRITE, false);
PDFont font = PDType1Font.HELVETICA_BOLD;
contentStream.beginMarkedContent(COSName.OC, background);
contentStream.beginText();
contentStream.setFont(font, 14);
contentStream.newLineAtOffset(80, 700);
contentStream.showText("PDF 1.5: Optional Content Groups");
contentStream.endText();
contentStream.endMarkedContent();
font = PDType1Font.HELVETICA;
// Paint enabled layer
contentStream.beginMarkedContent(COSName.OC, enabled);
contentStream.setNonStrokingColor(Color.GREEN);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 600);
contentStream.showText("The earth is a sphere");
contentStream.endText();
contentStream.endMarkedContent();
// Paint disabled layer1
contentStream.beginMarkedContent(COSName.OC, disabled1);
contentStream.setNonStrokingColor(Color.RED);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 500);
contentStream.showText("Alternative 1: The earth is a flat circle");
contentStream.endText();
contentStream.endMarkedContent();
// Paint disabled layer2
contentStream.beginMarkedContent(COSName.OC, disabled2);
contentStream.setNonStrokingColor(Color.BLUE);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 450);
contentStream.showText("Alternative 2: The earth is a flat parallelogram");
contentStream.endText();
contentStream.endMarkedContent();
contentStream.close();
doc.getDocumentCatalog().setPageMode(PageMode.USE_OPTIONAL_CONTENT);
File targetFile = new File(testResultsDir, "ocg-generation-same-name-off.pdf");
doc.writeTo(targetFile.getAbsolutePath());
}
// render PDF with science disabled and alternatives with same name enabled
try (PDDocument doc = PDDocument
.load(new File(testResultsDir, "ocg-generation-same-name-off.pdf")))
{
doc.getDocumentCatalog().getOCProperties().setGroupEnabled("background", false);
doc.getDocumentCatalog().getOCProperties().setGroupEnabled("science", false);
doc.getDocumentCatalog().getOCProperties().setGroupEnabled("alternative", true);
actualImage = new PDFRenderer(doc).renderImage(0, 2);
ImageIO.write(actualImage, "png",
new File(testResultsDir, "ocg-generation-same-name-off-actual.png"));
}
// create PDF without OCGs to created expected rendering
try (PDDocument doc2 = new PDDocument())
{
// Create new page
PDPage page = new PDPage();
doc2.addPage(page);
PDResources resources = page.getResources();
if (resources == null)
{
resources = new PDResources();
page.setResources(resources);
}
try (PDPageContentStream contentStream = new PDPageContentStream(doc2, page,
AppendMode.OVERWRITE, false))
{
PDFont font = PDType1Font.HELVETICA;
contentStream.setNonStrokingColor(Color.RED);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 500);
contentStream.showText("Alternative 1: The earth is a flat circle");
contentStream.endText();
contentStream.setNonStrokingColor(Color.BLUE);
contentStream.beginText();
contentStream.setFont(font, 12);
contentStream.newLineAtOffset(80, 450);
contentStream.showText("Alternative 2: The earth is a flat parallelogram");
contentStream.endText();
}
File targetFile = new File(testResultsDir, "ocg-generation-same-name-off-expected.pdf");
doc2.writeTo(targetFile.getAbsolutePath());
}
try (PDDocument doc = PDDocument
.load(new File(testResultsDir, "ocg-generation-same-name-off-expected.pdf")))
{
expectedImage = new PDFRenderer(doc).renderImage(0, 2);
ImageIO.write(expectedImage, "png",
new File(testResultsDir, "ocg-generation-same-name-off-expected.png"));
}
// compare images
DataBufferInt expectedData = (DataBufferInt) expectedImage.getRaster().getDataBuffer();
DataBufferInt actualData = (DataBufferInt) actualImage.getRaster().getDataBuffer();
Assert.assertArrayEquals(expectedData.getData(), actualData.getData());
}
}
|
apache-2.0
|
rfoltyns/log4j2-elasticsearch
|
log4j2-elasticsearch-core/src/test/java/org/appenders/log4j2/elasticsearch/AsyncBatchDeliveryPluginTest.java
|
11724
|
package org.appenders.log4j2.elasticsearch;
/*-
* #%L
* log4j2-elasticsearch
* %%
* Copyright (C) 2020 Rafal Foltynski
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import org.apache.logging.log4j.core.config.ConfigurationException;
import org.appenders.log4j2.elasticsearch.spi.BatchEmitterServiceProvider;
import org.appenders.log4j2.elasticsearch.spi.TestBatchEmitterFactory;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.Arrays;
import java.util.Random;
import java.util.UUID;
import java.util.function.Function;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.IsEqual.equalTo;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class AsyncBatchDeliveryPluginTest {
private static final int TEST_BATCH_SIZE = 100;
private static final int TEST_DELIVERY_INTERVAL = 100;
public static final String TEST_SERVER_URIS = "http://localhost:9200";
public static TestHttpObjectFactory.Builder createTestObjectFactoryBuilder() {
return TestHttpObjectFactory.newBuilder()
.withServerUris(TEST_SERVER_URIS);
}
public static AsyncBatchDeliveryPlugin.Builder createTestBatchDeliveryBuilder() {
return spy(AsyncBatchDeliveryPlugin.newBuilder()
.withBatchSize(TEST_BATCH_SIZE)
.withDeliveryInterval(TEST_DELIVERY_INTERVAL)
.withClientObjectFactory(createTestObjectFactoryBuilder().build()))
.withFailoverPolicy(new NoopFailoverPolicy());
}
/* To make testing easier and break when changed */
private BatchDelivery<String> invokePluginFactory(AsyncBatchDelivery.Builder builder) {
return AsyncBatchDeliveryPlugin.createAsyncBatchDelivery(
builder.clientObjectFactory,
builder.deliveryInterval,
builder.batchSize,
builder.failoverPolicy,
builder.shutdownDelayMillis,
builder.setupOpSources);
}
@Test
public void pluginFactoryReturnsNonNullObject() {
// given
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder();
// when
BatchDelivery<String> delivery = invokePluginFactory(batchDeliveryBuilder);
// then
assertNotNull(delivery);
}
@Test
public void pluginFactoryFailsWhenClientObjectFactoryIsNull() {
// given
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder();
batchDeliveryBuilder.withClientObjectFactory(null);
// when
final ConfigurationException exception = assertThrows(ConfigurationException.class, () -> invokePluginFactory(batchDeliveryBuilder));
// then
assertThat(exception.getMessage(),
equalTo("No Elasticsearch client factory [HCHttp|JestHttp|ElasticsearchBulkProcessor] provided for " + AsyncBatchDelivery.class.getSimpleName()));
}
@Test
public void pluginFactoryFallsBackToDefaults() {
// given
Function<BulkEmitterTest.TestBatch, Boolean> listener = mock(Function.class);
TestHttpObjectFactory objectFactory = spy(createTestObjectFactoryBuilder().build());
when(objectFactory.createBatchListener(any())).thenReturn(listener);
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder()
.withClientObjectFactory(objectFactory)
.withBatchSize(0)
.withDeliveryInterval(0)
.withShutdownDelayMillis(-1)
.withFailoverPolicy(null)
.withSetupOpSources();
// when
AsyncBatchDelivery batchDelivery = (AsyncBatchDelivery) invokePluginFactory(batchDeliveryBuilder);
int expectedBatches = 10;
for (int i = 0; i < AsyncBatchDelivery.Builder.DEFAULT_BATCH_SIZE * expectedBatches; i++) {
batchDelivery.add(NoopIndexNameFormatterTest.TEST_INDEX_NAME, "test");
}
// then
assertEquals(AsyncBatchDelivery.Builder.DEFAULT_FAILOVER_POLICY, batchDelivery.failoverPolicy);
assertEquals(Arrays.asList(AsyncBatchDelivery.Builder.DEFAULT_OP_SOURCES), batchDelivery.setupOpSources);
assertEquals(AsyncBatchDelivery.Builder.DEFAULT_SHUTDOWN_DELAY, batchDelivery.shutdownDelayMillis);
verify(listener, times(expectedBatches)).apply(any());
}
@Test
public void builderConfiguresShutdownDelayMillis() {
// given
long expectedShutdownDelayMillis = 10 + new Random().nextInt(100);
FailoverPolicy failoverPolicy = spy(new TestFailoverPolicy());
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder()
.withFailoverPolicy(failoverPolicy)
.withShutdownDelayMillis(expectedShutdownDelayMillis);
BatchDelivery<String> asyncBatchDelivery = invokePluginFactory(batchDeliveryBuilder);
asyncBatchDelivery.start();
// when
asyncBatchDelivery.stop();
// then
ArgumentCaptor<Long> captor = ArgumentCaptor.forClass(Long.class);
verify(LifeCycle.of(failoverPolicy)).stop(captor.capture(), anyBoolean());
assertEquals((Long) expectedShutdownDelayMillis, captor.getValue());
}
@Test
public void builderConfiguresSetupOpSources() {
// given
ClientObjectFactory clientObjectFactory = spy(createTestObjectFactoryBuilder().build());
OperationFactory operationFactory = mock(OperationFactory.class);
when(clientObjectFactory.setupOperationFactory()).thenReturn(operationFactory);
IndexTemplate indexTemplate = mock(IndexTemplate.class);
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder()
.withSetupOpSources(indexTemplate)
.withClientObjectFactory(clientObjectFactory);
BatchDelivery<String> asyncBatchDelivery = invokePluginFactory(batchDeliveryBuilder);
// when
asyncBatchDelivery.start();
// then
verify(operationFactory).create(eq(indexTemplate));
verify(clientObjectFactory).addOperation(any());
}
@Test
public void batchDeliveryAddObjectDelegatesToProvidedBatchOperationsObjectApi() {
// given
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder();
ClientObjectFactory clientObjectFactory = spy(createTestObjectFactoryBuilder().build());
BatchOperations batchOperations = spy(clientObjectFactory.createBatchOperations());
when(clientObjectFactory.createBatchOperations()).thenReturn(batchOperations);
batchDeliveryBuilder.withClientObjectFactory(clientObjectFactory);
BatchDelivery<String> batchDelivery = invokePluginFactory(batchDeliveryBuilder);
String indexName = UUID.randomUUID().toString();
String logObject = UUID.randomUUID().toString();
// when
batchDelivery.add(indexName, logObject);
// then
verify(batchOperations).createBatchItem(eq(indexName), eq(logObject));
}
@Test
public void batchDeliveryAddItemSourceDelegatesToProvidedBatchOperationsItemSourceApi() {
// given
AsyncBatchDeliveryPlugin.Builder batchDeliveryBuilder = createTestBatchDeliveryBuilder();
ClientObjectFactory clientObjectFactory = spy(createTestObjectFactoryBuilder().build());
BatchOperations batchOperations = spy(clientObjectFactory.createBatchOperations());
when(clientObjectFactory.createBatchOperations()).thenReturn(batchOperations);
batchDeliveryBuilder.withClientObjectFactory(clientObjectFactory);
BatchDelivery batchDelivery = invokePluginFactory(batchDeliveryBuilder);
String indexName = UUID.randomUUID().toString();
ItemSource itemSource = mock(ItemSource.class);
// when
batchDelivery.add(indexName, itemSource);
// then
verify(batchOperations).createBatchItem(eq(indexName), eq(itemSource));
}
@Test
public void deliveryAddsBatchItemToBatchEmitter() {
// given
TestHttpObjectFactory objectFactory = createTestObjectFactoryBuilder().build();
TestBatchEmitterFactory batchEmitterFactory = spy(new TestBatchEmitterFactory());
BatchEmitter emitter = batchEmitterFactory.createInstance(TEST_BATCH_SIZE, TEST_DELIVERY_INTERVAL, objectFactory, new NoopFailoverPolicy());
TestAsyncBatchDelivery delivery = spy(new TestAsyncBatchDelivery(createTestBatchDeliveryBuilder()
.withBatchSize(1)
.withDeliveryInterval(TEST_DELIVERY_INTERVAL)
.withClientObjectFactory(objectFactory)
.withFailoverPolicy(new NoopFailoverPolicy())
.withSetupOpSources()) {
@Override
protected BatchEmitterServiceProvider createBatchEmitterServiceProvider() {
return batchEmitterFactory;
}
});
String testMessage = "test message";
// when
delivery.add("testIndexName", testMessage);
// then
verify(batchEmitterFactory).createInstance(eq(1), eq(TEST_DELIVERY_INTERVAL), eq(objectFactory), any());
ArgumentCaptor<BulkEmitterTest.TestBatchItem> captor = ArgumentCaptor.forClass(BulkEmitterTest.TestBatchItem.class);
verify(emitter, times(1)).add(captor.capture());
assertEquals(testMessage, captor.getValue().getData(null));
}
public static class TestAsyncBatchDelivery extends AsyncBatchDeliveryPlugin {
public TestAsyncBatchDelivery(Builder builder) {
super(builder);
}
@Override
protected BatchEmitterServiceProvider createBatchEmitterServiceProvider() {
return null;
}
}
private static class TestFailoverPolicy implements FailoverPolicy, LifeCycle {
private State state = State.STOPPED;
@Override
public void deliver(Object failedPayload) {
}
@Override
public void start() {
state = State.STARTED;
}
@Override
public void stop() {
state = State.STOPPED;
}
@Override
public LifeCycle stop(long timeout, boolean runInBackground) {
state = State.STOPPED;
return this;
}
@Override
public boolean isStarted() {
return state == State.STARTED;
}
@Override
public boolean isStopped() {
return state == State.STOPPED;
}
}
}
|
apache-2.0
|
smartsheet-platform/smartsheet-ruby-sdk
|
test/unit/smartsheet/endpoints/sheets/comments_test.rb
|
1724
|
require_relative '../../../../test_helper'
require_relative '../endpoint_test_helper'
class CommentsTest < Minitest::Test
extend Smartsheet::Test::EndpointHelper
attr_accessor :mock_client
attr_accessor :smartsheet_client
def category
smartsheet_client.sheets.comments
end
def self.endpoints
[
{
symbol: :add,
method: :post,
url: ['sheets', :sheet_id, 'discussions', :discussion_id, 'comments'],
args: {sheet_id: 123, discussion_id: 234, body: {}},
has_params: false,
headers: nil
},
# TODO: Add this!
# {
# symbol: :add_with_file,
# method: :post,
# url: ['sheets', :sheet_id, 'rows', :row_id, 'discussions'],
# args: {sheet_id: 123, row_id: 234, body: {}},
# has_params: false,
# headers: nil
# },
{
symbol: :update,
method: :put,
url: ['sheets', :sheet_id, 'comments', :comment_id],
args: {sheet_id: 123, comment_id: 234, body: {}},
has_params: false,
headers: nil
},
{
symbol: :delete,
method: :delete,
url: ['sheets', :sheet_id, 'comments', :comment_id],
args: {sheet_id: 123, comment_id: 234},
has_params: false,
headers: nil
},
{
symbol: :get,
method: :get,
url: ['sheets', :sheet_id, 'comments', :comment_id],
args: {sheet_id: 123, comment_id: 234},
has_params: false,
headers: nil
},
]
end
define_setup
define_endpoints_tests
end
|
apache-2.0
|
QBNemo/spring-mvc-showcase
|
src/main/java/org/springframework/web/servlet/view/RedirectView.java
|
22944
|
/*
* Copyright 2002-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.web.servlet.view;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.lang.reflect.Array;
import java.net.URLEncoder;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.beans.BeanUtils;
import org.springframework.http.HttpStatus;
import org.springframework.util.CollectionUtils;
import org.springframework.util.ObjectUtils;
import org.springframework.util.StringUtils;
import org.springframework.web.context.WebApplicationContext;
import org.springframework.web.servlet.FlashMap;
import org.springframework.web.servlet.FlashMapManager;
import org.springframework.web.servlet.HandlerMapping;
import org.springframework.web.servlet.SmartView;
import org.springframework.web.servlet.View;
import org.springframework.web.servlet.support.RequestContextUtils;
import org.springframework.web.servlet.support.RequestDataValueProcessor;
import org.springframework.web.util.UriComponents;
import org.springframework.web.util.UriComponentsBuilder;
import org.springframework.web.util.UriUtils;
import org.springframework.web.util.WebUtils;
/**
* View that redirects to an absolute, context relative, or current request
* relative URL. The URL may be a URI template in which case the URI template
* variables will be replaced with values available in the model. By default
* all primitive model attributes (or collections thereof) are exposed as HTTP
* query parameters (assuming they've not been used as URI template variables),
* but this behavior can be changed by overriding the
* {@link #isEligibleProperty(String, Object)} method.
*
* <p>A URL for this view is supposed to be a HTTP redirect URL, i.e.
* suitable for HttpServletResponse's {@code sendRedirect} method, which
* is what actually does the redirect if the HTTP 1.0 flag is on, or via sending
* back an HTTP 303 code - if the HTTP 1.0 compatibility flag is off.
*
* <p>Note that while the default value for the "contextRelative" flag is off,
* you will probably want to almost always set it to true. With the flag off,
* URLs starting with "/" are considered relative to the web server root, while
* with the flag on, they are considered relative to the web application root.
* Since most web applications will never know or care what their context path
* actually is, they are much better off setting this flag to true, and submitting
* paths which are to be considered relative to the web application root.
*
* <p><b>NOTE when using this redirect view in a Portlet environment:</b> Make sure
* that your controller respects the Portlet {@code sendRedirect} constraints.
*
* @author Rod Johnson
* @author Juergen Hoeller
* @author Colin Sampaleanu
* @author Sam Brannen
* @author Arjen Poutsma
* @author Rossen Stoyanchev
* @see #setContextRelative
* @see #setHttp10Compatible
* @see #setExposeModelAttributes
* @see javax.servlet.http.HttpServletResponse#sendRedirect
*/
public class RedirectView extends AbstractUrlBasedView implements SmartView {
private static final Pattern URI_TEMPLATE_VARIABLE_PATTERN = Pattern.compile("\\{([^/]+?)\\}");
private boolean contextRelative = false;
private boolean http10Compatible = true;
private boolean exposeModelAttributes = true;
private String encodingScheme;
private HttpStatus statusCode;
private boolean expandUriTemplateVariables = true;
private boolean propagateQueryParams = false;
/**
* Constructor for use as a bean.
*/
public RedirectView() {
setExposePathVariables(false);
}
/**
* Create a new RedirectView with the given URL.
* <p>The given URL will be considered as relative to the web server,
* not as relative to the current ServletContext.
* @param url the URL to redirect to
* @see #RedirectView(String, boolean)
*/
public RedirectView(String url) {
super(url);
setExposePathVariables(false);
}
/**
* Create a new RedirectView with the given URL.
* @param url the URL to redirect to
* @param contextRelative whether to interpret the given URL as
* relative to the current ServletContext
*/
public RedirectView(String url, boolean contextRelative) {
super(url);
this.contextRelative = contextRelative;
setExposePathVariables(false);
}
/**
* Create a new RedirectView with the given URL.
* @param url the URL to redirect to
* @param contextRelative whether to interpret the given URL as
* relative to the current ServletContext
* @param http10Compatible whether to stay compatible with HTTP 1.0 clients
*/
public RedirectView(String url, boolean contextRelative, boolean http10Compatible) {
super(url);
this.contextRelative = contextRelative;
this.http10Compatible = http10Compatible;
setExposePathVariables(false);
}
/**
* Create a new RedirectView with the given URL.
* @param url the URL to redirect to
* @param contextRelative whether to interpret the given URL as
* relative to the current ServletContext
* @param http10Compatible whether to stay compatible with HTTP 1.0 clients
* @param exposeModelAttributes whether or not model attributes should be
* exposed as query parameters
*/
public RedirectView(String url, boolean contextRelative, boolean http10Compatible, boolean exposeModelAttributes) {
super(url);
this.contextRelative = contextRelative;
this.http10Compatible = http10Compatible;
this.exposeModelAttributes = exposeModelAttributes;
setExposePathVariables(false);
}
/**
* Set whether to interpret a given URL that starts with a slash ("/")
* as relative to the current ServletContext, i.e. as relative to the
* web application root.
* <p>Default is "false": A URL that starts with a slash will be interpreted
* as absolute, i.e. taken as-is. If "true", the context path will be
* prepended to the URL in such a case.
* @see javax.servlet.http.HttpServletRequest#getContextPath
*/
public void setContextRelative(boolean contextRelative) {
this.contextRelative = contextRelative;
}
/**
* Set whether to stay compatible with HTTP 1.0 clients.
* <p>In the default implementation, this will enforce HTTP status code 302
* in any case, i.e. delegate to {@code HttpServletResponse.sendRedirect}.
* Turning this off will send HTTP status code 303, which is the correct
* code for HTTP 1.1 clients, but not understood by HTTP 1.0 clients.
* <p>Many HTTP 1.1 clients treat 302 just like 303, not making any
* difference. However, some clients depend on 303 when redirecting
* after a POST request; turn this flag off in such a scenario.
* @see javax.servlet.http.HttpServletResponse#sendRedirect
*/
public void setHttp10Compatible(boolean http10Compatible) {
this.http10Compatible = http10Compatible;
}
/**
* Set the {@code exposeModelAttributes} flag which denotes whether
* or not model attributes should be exposed as HTTP query parameters.
* <p>Defaults to {@code true}.
*/
public void setExposeModelAttributes(final boolean exposeModelAttributes) {
this.exposeModelAttributes = exposeModelAttributes;
}
/**
* Set the encoding scheme for this view.
* <p>Default is the request's encoding scheme
* (which is ISO-8859-1 if not specified otherwise).
*/
public void setEncodingScheme(String encodingScheme) {
this.encodingScheme = encodingScheme;
}
/**
* Set the status code for this view.
* <p>Default is to send 302/303, depending on the value of the
* {@link #setHttp10Compatible(boolean) http10Compatible} flag.
*/
public void setStatusCode(HttpStatus statusCode) {
this.statusCode = statusCode;
}
/**
* Whether to treat the redirect URL as a URI template.
* Set this flag to {@code false} if the redirect URL contains open
* and close curly braces "{", "}" and you don't want them interpreted
* as URI variables.
* <p>Defaults to {@code true}.
*/
public void setExpandUriTemplateVariables(boolean expandUriTemplateVariables) {
this.expandUriTemplateVariables = expandUriTemplateVariables;
}
/**
* When set to {@code true} the query string of the current URL is appended
* and thus propagated through to the redirected URL.
* <p>Defaults to {@code false}.
* @since 4.1
*/
public void setPropagateQueryParams(boolean propagateQueryParams) {
this.propagateQueryParams = propagateQueryParams;
}
/**
* Whether to propagate the query params of the current URL.
* @since 4.1
*/
public boolean isPropagateQueryProperties() {
return this.propagateQueryParams;
}
/**
* Returns "true" indicating this view performs a redirect.
*/
@Override
public boolean isRedirectView() {
return true;
}
/**
* An ApplicationContext is not strictly required for RedirectView.
*/
@Override
protected boolean isContextRequired() {
return false;
}
/**
* Convert model to request parameters and redirect to the given URL.
* @see #appendQueryProperties
* @see #sendRedirect
*/
@Override
protected void renderMergedOutputModel(Map<String, Object> model, HttpServletRequest request,
HttpServletResponse response) throws IOException {
String targetUrl = createTargetUrl(model, request);
targetUrl = updateTargetUrl(targetUrl, model, request, response);
FlashMap flashMap = RequestContextUtils.getOutputFlashMap(request);
if (!CollectionUtils.isEmpty(flashMap)) {
UriComponents uriComponents = UriComponentsBuilder.fromUriString(targetUrl).build();
flashMap.setTargetRequestPath(uriComponents.getPath());
flashMap.addTargetRequestParams(uriComponents.getQueryParams());
FlashMapManager flashMapManager = RequestContextUtils.getFlashMapManager(request);
if (flashMapManager == null) {
throw new IllegalStateException("FlashMapManager not found despite output FlashMap having been set");
}
flashMapManager.saveOutputFlashMap(flashMap, request, response);
}
sendRedirect(request, response, targetUrl, this.http10Compatible);
}
/**
* Create the target URL by checking if the redirect string is a URI template first,
* expanding it with the given model, and then optionally appending simple type model
* attributes as query String parameters.
*/
protected final String createTargetUrl(Map<String, Object> model, HttpServletRequest request)
throws UnsupportedEncodingException {
// Prepare target URL.
StringBuilder targetUrl = new StringBuilder();
if (this.contextRelative && getUrl().startsWith("/")) {
// Do not apply context path to relative URLs.
targetUrl.append(request.getContextPath());
}
targetUrl.append(getUrl());
String enc = this.encodingScheme;
if (enc == null) {
enc = request.getCharacterEncoding();
}
if (enc == null) {
enc = WebUtils.DEFAULT_CHARACTER_ENCODING;
}
if (this.expandUriTemplateVariables && StringUtils.hasText(targetUrl)) {
Map<String, String> variables = getCurrentRequestUriVariables(request);
targetUrl = replaceUriTemplateVariables(targetUrl.toString(), model, variables, enc);
}
if (isPropagateQueryProperties()) {
appendCurrentQueryParams(targetUrl, request);
}
if (this.exposeModelAttributes) {
appendQueryProperties(targetUrl, model, enc);
}
return targetUrl.toString();
}
/**
* Replace URI template variables in the target URL with encoded model
* attributes or URI variables from the current request. Model attributes
* referenced in the URL are removed from the model.
* @param targetUrl the redirect URL
* @param model Map that contains model attributes
* @param currentUriVariables current request URI variables to use
* @param encodingScheme the encoding scheme to use
* @throws UnsupportedEncodingException if string encoding failed
*/
protected StringBuilder replaceUriTemplateVariables(
String targetUrl, Map<String, Object> model, Map<String, String> currentUriVariables, String encodingScheme)
throws UnsupportedEncodingException {
StringBuilder result = new StringBuilder();
Matcher matcher = URI_TEMPLATE_VARIABLE_PATTERN.matcher(targetUrl);
int endLastMatch = 0;
while (matcher.find()) {
String name = matcher.group(1);
Object value = (model.containsKey(name) ? model.remove(name) : currentUriVariables.get(name));
if (value == null) {
throw new IllegalArgumentException("Model has no value for key '" + name + "'");
}
result.append(targetUrl.substring(endLastMatch, matcher.start()));
result.append(UriUtils.encodePathSegment(value.toString(), encodingScheme));
endLastMatch = matcher.end();
}
result.append(targetUrl.substring(endLastMatch, targetUrl.length()));
return result;
}
@SuppressWarnings("unchecked")
private Map<String, String> getCurrentRequestUriVariables(HttpServletRequest request) {
String name = HandlerMapping.URI_TEMPLATE_VARIABLES_ATTRIBUTE;
Map<String, String> uriVars = (Map<String, String>) request.getAttribute(name);
return (uriVars != null) ? uriVars : Collections.<String, String> emptyMap();
}
/**
* Append the query string of the current request to the target redirect URL.
* @param targetUrl the StringBuilder to append the properties to
* @param request the current request
* @since 4.1
*/
protected void appendCurrentQueryParams(StringBuilder targetUrl, HttpServletRequest request) {
String query = request.getQueryString();
if (StringUtils.hasText(query)) {
// Extract anchor fragment, if any.
String fragment = null;
int anchorIndex = targetUrl.indexOf("#");
if (anchorIndex > -1) {
fragment = targetUrl.substring(anchorIndex);
targetUrl.delete(anchorIndex, targetUrl.length());
}
if (targetUrl.toString().indexOf('?') < 0) {
targetUrl.append('?').append(query);
}
else {
targetUrl.append('&').append(query);
}
// Append anchor fragment, if any, to end of URL.
if (fragment != null) {
targetUrl.append(fragment);
}
}
}
/**
* Append query properties to the redirect URL.
* Stringifies, URL-encodes and formats model attributes as query properties.
* @param targetUrl the StringBuilder to append the properties to
* @param model Map that contains model attributes
* @param encodingScheme the encoding scheme to use
* @throws UnsupportedEncodingException if string encoding failed
* @see #queryProperties
*/
@SuppressWarnings("unchecked")
protected void appendQueryProperties(StringBuilder targetUrl, Map<String, Object> model, String encodingScheme)
throws UnsupportedEncodingException {
// Extract anchor fragment, if any.
String fragment = null;
int anchorIndex = targetUrl.indexOf("#");
if (anchorIndex > -1) {
fragment = targetUrl.substring(anchorIndex);
targetUrl.delete(anchorIndex, targetUrl.length());
}
// If there aren't already some parameters, we need a "?".
boolean first = (targetUrl.toString().indexOf('?') < 0);
for (Map.Entry<String, Object> entry : queryProperties(model).entrySet()) {
Object rawValue = entry.getValue();
Iterator<Object> valueIter;
if (rawValue != null && rawValue.getClass().isArray()) {
valueIter = Arrays.asList(ObjectUtils.toObjectArray(rawValue)).iterator();
}
else if (rawValue instanceof Collection) {
valueIter = ((Collection<Object>) rawValue).iterator();
}
else {
valueIter = Collections.singleton(rawValue).iterator();
}
while (valueIter.hasNext()) {
Object value = valueIter.next();
if (first) {
targetUrl.append('?');
first = false;
}
else {
targetUrl.append('&');
}
String encodedKey = urlEncode(entry.getKey(), encodingScheme);
String encodedValue = (value != null ? urlEncode(value.toString(), encodingScheme) : "");
targetUrl.append(encodedKey).append('=').append(encodedValue);
}
}
// Append anchor fragment, if any, to end of URL.
if (fragment != null) {
targetUrl.append(fragment);
}
}
/**
* Determine name-value pairs for query strings, which will be stringified,
* URL-encoded and formatted by {@link #appendQueryProperties}.
* <p>This implementation filters the model through checking
* {@link #isEligibleProperty(String, Object)} for each element,
* by default accepting Strings, primitives and primitive wrappers only.
* @param model the original model Map
* @return the filtered Map of eligible query properties
* @see #isEligibleProperty(String, Object)
*/
protected Map<String, Object> queryProperties(Map<String, Object> model) {
Map<String, Object> result = new LinkedHashMap<String, Object>();
for (Map.Entry<String, Object> entry : model.entrySet()) {
if (isEligibleProperty(entry.getKey(), entry.getValue())) {
result.put(entry.getKey(), entry.getValue());
}
}
return result;
}
/**
* Determine whether the given model element should be exposed
* as a query property.
* <p>The default implementation considers Strings and primitives
* as eligible, and also arrays and Collections/Iterables with
* corresponding elements. This can be overridden in subclasses.
* @param key the key of the model element
* @param value the value of the model element
* @return whether the element is eligible as query property
*/
protected boolean isEligibleProperty(String key, Object value) {
if (value == null) {
return false;
}
if (isEligibleValue(value)) {
return true;
}
if (value.getClass().isArray()) {
int length = Array.getLength(value);
if (length == 0) {
return false;
}
for (int i = 0; i < length; i++) {
Object element = Array.get(value, i);
if (!isEligibleValue(element)) {
return false;
}
}
return true;
}
if (value instanceof Collection) {
Collection<?> coll = (Collection<?>) value;
if (coll.isEmpty()) {
return false;
}
for (Object element : coll) {
if (!isEligibleValue(element)) {
return false;
}
}
return true;
}
return false;
}
/**
* Determine whether the given model element value is eligible for exposure.
* <p>The default implementation considers primitives, Strings, Numbers, Dates,
* URIs, URLs and Locale objects as eligible. This can be overridden in subclasses.
* @param value the model element value
* @return whether the element value is eligible
* @see BeanUtils#isSimpleValueType
*/
protected boolean isEligibleValue(Object value) {
return (value != null && BeanUtils.isSimpleValueType(value.getClass()));
}
/**
* URL-encode the given input String with the given encoding scheme.
* <p>The default implementation uses {@code URLEncoder.encode(input, enc)}.
* @param input the unencoded input String
* @param encodingScheme the encoding scheme
* @return the encoded output String
* @throws UnsupportedEncodingException if thrown by the JDK URLEncoder
* @see java.net.URLEncoder#encode(String, String)
* @see java.net.URLEncoder#encode(String)
*/
protected String urlEncode(String input, String encodingScheme) throws UnsupportedEncodingException {
return (input != null ? URLEncoder.encode(input, encodingScheme) : null);
}
/**
* Find the registered {@link RequestDataValueProcessor}, if any, and allow
* it to update the redirect target URL.
* @param targetUrl the given redirect URL
* @return the updated URL or the same as URL as the one passed in
*/
protected String updateTargetUrl(String targetUrl, Map<String, Object> model,
HttpServletRequest request, HttpServletResponse response) {
WebApplicationContext wac = getWebApplicationContext();
if (wac == null) {
wac = RequestContextUtils.findWebApplicationContext(request, getServletContext());
}
if (wac != null && wac.containsBean(RequestContextUtils.REQUEST_DATA_VALUE_PROCESSOR_BEAN_NAME)) {
RequestDataValueProcessor processor = wac.getBean(
RequestContextUtils.REQUEST_DATA_VALUE_PROCESSOR_BEAN_NAME, RequestDataValueProcessor.class);
return processor.processUrl(request, targetUrl);
}
return targetUrl;
}
/**
* Send a redirect back to the HTTP client
* @param request current HTTP request (allows for reacting to request method)
* @param response current HTTP response (for sending response headers)
* @param targetUrl the target URL to redirect to
* @param http10Compatible whether to stay compatible with HTTP 1.0 clients
* @throws IOException if thrown by response methods
*/
protected void sendRedirect(HttpServletRequest request, HttpServletResponse response,
String targetUrl, boolean http10Compatible) throws IOException {
String encodedRedirectURL = response.encodeRedirectURL(targetUrl);
if (http10Compatible) {
HttpStatus attributeStatusCode = (HttpStatus) request.getAttribute(View.RESPONSE_STATUS_ATTRIBUTE);
if (this.statusCode != null) {
response.setStatus(this.statusCode.value());
response.setHeader("Location", encodedRedirectURL);
}
else if (attributeStatusCode != null) {
response.setStatus(attributeStatusCode.value());
response.setHeader("Location", encodedRedirectURL);
}
else {
// Send status code 302 by default.
response.sendRedirect(encodedRedirectURL);
}
}
else {
HttpStatus statusCode = getHttp11StatusCode(request, response, targetUrl);
response.setStatus(statusCode.value());
response.setHeader("Location", encodedRedirectURL);
}
}
/**
* Determines the status code to use for HTTP 1.1 compatible requests.
* <p>The default implementation returns the {@link #setStatusCode(HttpStatus) statusCode}
* property if set, or the value of the {@link #RESPONSE_STATUS_ATTRIBUTE} attribute.
* If neither are set, it defaults to {@link HttpStatus#SEE_OTHER} (303).
* @param request the request to inspect
* @param response the servlet response
* @param targetUrl the target URL
* @return the response status
*/
protected HttpStatus getHttp11StatusCode(
HttpServletRequest request, HttpServletResponse response, String targetUrl) {
if (this.statusCode != null) {
return this.statusCode;
}
HttpStatus attributeStatusCode = (HttpStatus) request.getAttribute(View.RESPONSE_STATUS_ATTRIBUTE);
if (attributeStatusCode != null) {
return attributeStatusCode;
}
return HttpStatus.SEE_OTHER;
}
}
|
apache-2.0
|
vmayoral/freeDDS
|
src/publication/publisher.c
|
959
|
/* OMG DDS Publisher implementation */
#include "publication/publisher.h"
#include <stdlib.h>
Publisher * new_Publisher(QoSPolicy* qos, DomainParticipant* dmp)
{
Publisher *p = (Publisher *) malloc(sizeof(Publisher));
p->qos = qos;
p->domain_participant = dmp;
return p;
}
void delete_Publisher(Publisher *self)
{
// ToDo, delete one by one if they are in a queue
delete_DataWritter(self->writters);
free(self);
}
void set_qos_Publisher(struct Publisher *self, QoSPolicy* qos_policy)
{
self->qos = qos_policy;
}
QoSPolicy* get_qos_Publisher(struct Publisher *self)
{
return self->qos;
}
/*
QoSPolicy instance pointer is passed because different DataWriters might have
different timing contraints
*/
DataWriter* create_datawriter_Publisher(struct Publisher *self, QoSPolicy* qos, Topic* topic);
{
//ToDo queue it up and return a pointer to it
self->writters = new_DataWritter(qos, self->domain_participant, topic);
return self->writters;
}
|
apache-2.0
|
shwetasshinde24/Panoply
|
case-studies/h2o/src/H2oEnclave/IO/sgx_fcntl.cpp
|
2173
|
#include <sgx_fcntl_util.h>
#include <sgx_ocall_util.h>
#include <sgx_thread.h>
INIT_LOCK(ocall_open2);
INIT_LOCK(ocall_creat);
INIT_LOCK(ocall_openat2);
INIT_LOCK(ocall_fcntl1);
INIT_LOCK(ocall_fcntl2);
INIT_LOCK(ocall_fcntl3);
int sgx_wrapper_open(const char *pathname, int flags, ...)
{
va_list ap;
mode_t mode = 0;
va_start(ap, flags);
if (flags & O_CREAT)
mode = va_arg(ap, mode_t);
else
mode = 0777;
va_end(ap);
int retval;
sgx_status_t status = SAFE_INVOKE(ocall_open2, &retval, pathname, flags, mode);
CHECK_STATUS(status);
return retval;
}
int sgx_wrapper_creat(const char *pathname, unsigned int mode)
{
int retval;
sgx_status_t status = SAFE_INVOKE(ocall_creat, &retval, pathname, mode);
CHECK_STATUS(status);
return retval;
}
int sgx_wrapper_openat(int dirfd, const char *pathname, int flags, ...)
{
va_list ap;
mode_t mode = 0;
va_start(ap, flags);
if (flags & O_CREAT)
mode = va_arg(ap, mode_t);
else
mode = 0777;
va_end(ap);
int retval;
sgx_status_t status = SAFE_INVOKE(ocall_openat2, &retval, dirfd, pathname, flags, mode);
CHECK_STATUS(status);
return retval;
}
int sgx_wrapper_fcntl(int fd, int cmd, ...)
{
sgx_status_t status;
va_list ap;
int retval;
va_start(ap, cmd);
long larg = -1;
struct flock *flarg = NULL;
// Fix me: Should refer to the linux kernel in order to do it in the right way
switch(cmd) {
case F_GETFD:
case F_GETFL:
case F_GETOWN:
va_end(ap);
status = SAFE_INVOKE(ocall_fcntl1, &retval, fd, cmd);
CHECK_STATUS(status);
return retval;
case F_DUPFD:
case F_DUPFD_CLOEXEC:
case F_SETFD:
case F_SETFL:
case F_SETOWN:
larg = va_arg(ap, long);
// fprintf(stderr, "fcntl setfd or setfl with flag: %d \n", larg);
status = SAFE_INVOKE(ocall_fcntl2, &retval, fd, cmd, larg);
CHECK_STATUS(status);
return retval;
case F_SETLK:
case F_GETLK:
case F_SETLKW:
flarg = va_arg(ap, struct flock *);
status = SAFE_INVOKE(ocall_fcntl3, &retval, fd, cmd, flarg, sizeof(struct flock));
CHECK_STATUS(status);
return retval;
default:
va_end(ap);
return -1;
};
return -1;
}
|
apache-2.0
|
aws/aws-sdk-java
|
aws-java-sdk-ssmincidents/src/main/java/com/amazonaws/services/ssmincidents/model/ListTimelineEventsResult.java
|
6834
|
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.ssmincidents.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/ssm-incidents-2018-05-10/ListTimelineEvents" target="_top">AWS
* API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ListTimelineEventsResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
*/
private java.util.List<EventSummary> eventSummaries;
/**
* <p>
* The pagination token to continue to the next page of results.
* </p>
*/
private String nextToken;
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
*
* @return Details about each event that occurred during the incident.
*/
public java.util.List<EventSummary> getEventSummaries() {
return eventSummaries;
}
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
*
* @param eventSummaries
* Details about each event that occurred during the incident.
*/
public void setEventSummaries(java.util.Collection<EventSummary> eventSummaries) {
if (eventSummaries == null) {
this.eventSummaries = null;
return;
}
this.eventSummaries = new java.util.ArrayList<EventSummary>(eventSummaries);
}
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setEventSummaries(java.util.Collection)} or {@link #withEventSummaries(java.util.Collection)} if you want
* to override the existing values.
* </p>
*
* @param eventSummaries
* Details about each event that occurred during the incident.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTimelineEventsResult withEventSummaries(EventSummary... eventSummaries) {
if (this.eventSummaries == null) {
setEventSummaries(new java.util.ArrayList<EventSummary>(eventSummaries.length));
}
for (EventSummary ele : eventSummaries) {
this.eventSummaries.add(ele);
}
return this;
}
/**
* <p>
* Details about each event that occurred during the incident.
* </p>
*
* @param eventSummaries
* Details about each event that occurred during the incident.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTimelineEventsResult withEventSummaries(java.util.Collection<EventSummary> eventSummaries) {
setEventSummaries(eventSummaries);
return this;
}
/**
* <p>
* The pagination token to continue to the next page of results.
* </p>
*
* @param nextToken
* The pagination token to continue to the next page of results.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* The pagination token to continue to the next page of results.
* </p>
*
* @return The pagination token to continue to the next page of results.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* The pagination token to continue to the next page of results.
* </p>
*
* @param nextToken
* The pagination token to continue to the next page of results.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTimelineEventsResult withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getEventSummaries() != null)
sb.append("EventSummaries: ").append(getEventSummaries()).append(",");
if (getNextToken() != null)
sb.append("NextToken: ").append(getNextToken());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ListTimelineEventsResult == false)
return false;
ListTimelineEventsResult other = (ListTimelineEventsResult) obj;
if (other.getEventSummaries() == null ^ this.getEventSummaries() == null)
return false;
if (other.getEventSummaries() != null && other.getEventSummaries().equals(this.getEventSummaries()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getEventSummaries() == null) ? 0 : getEventSummaries().hashCode());
hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode());
return hashCode;
}
@Override
public ListTimelineEventsResult clone() {
try {
return (ListTimelineEventsResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
|
apache-2.0
|
OSEHRA/ISAAC
|
core/api/src/main/java/sh/isaac/api/Util.java
|
3905
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
*
* You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributions from 2013-2017 where performed either by US government
* employees, or under US Veterans Health Administration contracts.
*
* US Veterans Health Administration contributions by government employees
* are work of the U.S. Government and are not subject to copyright
* protection in the United States. Portions contributed by government
* employees are USGovWork (17USC §105). Not subject to copyright.
*
* Contribution by contractors to the US Veterans Health Administration
* during this period are contractually contributed under the
* Apache License, Version 2.0.
*
* See: https://www.usa.gov/government-works
*
* Contributions prior to 2013:
*
* Copyright (C) International Health Terminology Standards Development Organisation.
* Licensed under the Apache License, Version 2.0.
*
*/
package sh.isaac.api;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import javafx.concurrent.Task;
import sh.isaac.api.chronicle.Chronology;
import sh.isaac.api.component.concept.ConceptChronology;
import sh.isaac.api.component.semantic.SemanticChronology;
import sh.isaac.api.externalizable.IsaacObjectType;
/**
* The Class Util.
*
* @author kec
*/
public class Util {
private static final Logger LOG = LogManager.getLogger(Util.class);
/**
* Adds the to task set and wait till done.
*
* @param <T> the generic type
* @param task the task
* @return the t
* @throws InterruptedException the interrupted exception
* @throws ExecutionException the execution exception
*/
public static <T> T addToTaskSetAndWaitTillDone(Task<T> task)
throws InterruptedException, ExecutionException {
Get.activeTasks().add(task);
try {
final T returnValue = task.get();
return returnValue;
} finally {
Get.activeTasks().remove(task);
}
}
/**
* String array to path array.
*
* @param strings the strings
* @return the path[]
*/
public static Path[] stringArrayToPathArray(String... strings) {
final Path[] paths = new Path[strings.length];
for (int i = 0; i < paths.length; i++) {
paths[i] = Paths.get(strings[i]);
}
return paths;
}
/**
* Convenience method to find the nearest concept related to a semantic. Recursively walks referenced components until it finds a concept.
* @param nid
* @return the nearest concept nid, or empty, if no concept can be found.
*/
public static Optional<Integer> getNearestConcept(int nid) {
Optional<? extends Chronology> c = Get.identifiedObjectService().getChronology(nid);
if (c.isPresent()) {
if (c.get().getIsaacObjectType() == IsaacObjectType.SEMANTIC) {
return getNearestConcept(((SemanticChronology)c.get()).getReferencedComponentNid());
}
else if (c.get().getIsaacObjectType() == IsaacObjectType.CONCEPT) {
return Optional.of(((ConceptChronology)c.get()).getNid());
}
else {
LOG.warn("Unexpected object type: " + c.get().getIsaacObjectType());
}
}
return Optional.empty();
}
}
|
apache-2.0
|
agolovatjuk/alexander4j
|
README.md
|
544
|
# alexander4j
[](https://travis-ci.org/agolovatjuk/alexander4j)
[](https://codecov.io/gh/agolovatjuk/alexander4j)
http://job4j.ru/
Моя цель - освоить java на промышленном уровне и в соответствии с этим сменить род деятельности.
Срок, который я установил - сентябрь 2017
|
apache-2.0
|
garethahealy/jboss-fuse-examples
|
mbeans-expose/src/test/java/com/garethahealy/mbeans/expose/routes/CamelContextTest.java
|
923
|
/*
* #%L
* GarethHealy :: JBoss Fuse Examples :: MBeans Expose
* %%
* Copyright (C) 2013 - 2018 Gareth Healy
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.garethahealy.mbeans.expose.routes;
import org.junit.Test;
public class CamelContextTest extends BaseCamelBlueprintTestSupport {
@Test
public void camelContextIsNotNull() {
assertNotNull(context);
}
}
|
apache-2.0
|
googleads/google-ads-java
|
google-ads-stubs-v10/src/main/java/com/google/ads/googleads/v10/resources/SharedCriterionOrBuilder.java
|
8879
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/ads/googleads/v10/resources/shared_criterion.proto
package com.google.ads.googleads.v10.resources;
public interface SharedCriterionOrBuilder extends
// @@protoc_insertion_point(interface_extends:google.ads.googleads.v10.resources.SharedCriterion)
com.google.protobuf.MessageOrBuilder {
/**
* <pre>
* Immutable. The resource name of the shared criterion.
* Shared set resource names have the form:
* `customers/{customer_id}/sharedCriteria/{shared_set_id}~{criterion_id}`
* </pre>
*
* <code>string resource_name = 1 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return The resourceName.
*/
java.lang.String getResourceName();
/**
* <pre>
* Immutable. The resource name of the shared criterion.
* Shared set resource names have the form:
* `customers/{customer_id}/sharedCriteria/{shared_set_id}~{criterion_id}`
* </pre>
*
* <code>string resource_name = 1 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return The bytes for resourceName.
*/
com.google.protobuf.ByteString
getResourceNameBytes();
/**
* <pre>
* Immutable. The shared set to which the shared criterion belongs.
* </pre>
*
* <code>optional string shared_set = 10 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return Whether the sharedSet field is set.
*/
boolean hasSharedSet();
/**
* <pre>
* Immutable. The shared set to which the shared criterion belongs.
* </pre>
*
* <code>optional string shared_set = 10 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return The sharedSet.
*/
java.lang.String getSharedSet();
/**
* <pre>
* Immutable. The shared set to which the shared criterion belongs.
* </pre>
*
* <code>optional string shared_set = 10 [(.google.api.field_behavior) = IMMUTABLE, (.google.api.resource_reference) = { ... }</code>
* @return The bytes for sharedSet.
*/
com.google.protobuf.ByteString
getSharedSetBytes();
/**
* <pre>
* Output only. The ID of the criterion.
* This field is ignored for mutates.
* </pre>
*
* <code>optional int64 criterion_id = 11 [(.google.api.field_behavior) = OUTPUT_ONLY];</code>
* @return Whether the criterionId field is set.
*/
boolean hasCriterionId();
/**
* <pre>
* Output only. The ID of the criterion.
* This field is ignored for mutates.
* </pre>
*
* <code>optional int64 criterion_id = 11 [(.google.api.field_behavior) = OUTPUT_ONLY];</code>
* @return The criterionId.
*/
long getCriterionId();
/**
* <pre>
* Output only. The type of the criterion.
* </pre>
*
* <code>.google.ads.googleads.v10.enums.CriterionTypeEnum.CriterionType type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY];</code>
* @return The enum numeric value on the wire for type.
*/
int getTypeValue();
/**
* <pre>
* Output only. The type of the criterion.
* </pre>
*
* <code>.google.ads.googleads.v10.enums.CriterionTypeEnum.CriterionType type = 4 [(.google.api.field_behavior) = OUTPUT_ONLY];</code>
* @return The type.
*/
com.google.ads.googleads.v10.enums.CriterionTypeEnum.CriterionType getType();
/**
* <pre>
* Immutable. Keyword.
* </pre>
*
* <code>.google.ads.googleads.v10.common.KeywordInfo keyword = 3 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the keyword field is set.
*/
boolean hasKeyword();
/**
* <pre>
* Immutable. Keyword.
* </pre>
*
* <code>.google.ads.googleads.v10.common.KeywordInfo keyword = 3 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The keyword.
*/
com.google.ads.googleads.v10.common.KeywordInfo getKeyword();
/**
* <pre>
* Immutable. Keyword.
* </pre>
*
* <code>.google.ads.googleads.v10.common.KeywordInfo keyword = 3 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.KeywordInfoOrBuilder getKeywordOrBuilder();
/**
* <pre>
* Immutable. YouTube Video.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeVideoInfo youtube_video = 5 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the youtubeVideo field is set.
*/
boolean hasYoutubeVideo();
/**
* <pre>
* Immutable. YouTube Video.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeVideoInfo youtube_video = 5 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The youtubeVideo.
*/
com.google.ads.googleads.v10.common.YouTubeVideoInfo getYoutubeVideo();
/**
* <pre>
* Immutable. YouTube Video.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeVideoInfo youtube_video = 5 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.YouTubeVideoInfoOrBuilder getYoutubeVideoOrBuilder();
/**
* <pre>
* Immutable. YouTube Channel.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeChannelInfo youtube_channel = 6 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the youtubeChannel field is set.
*/
boolean hasYoutubeChannel();
/**
* <pre>
* Immutable. YouTube Channel.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeChannelInfo youtube_channel = 6 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The youtubeChannel.
*/
com.google.ads.googleads.v10.common.YouTubeChannelInfo getYoutubeChannel();
/**
* <pre>
* Immutable. YouTube Channel.
* </pre>
*
* <code>.google.ads.googleads.v10.common.YouTubeChannelInfo youtube_channel = 6 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.YouTubeChannelInfoOrBuilder getYoutubeChannelOrBuilder();
/**
* <pre>
* Immutable. Placement.
* </pre>
*
* <code>.google.ads.googleads.v10.common.PlacementInfo placement = 7 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the placement field is set.
*/
boolean hasPlacement();
/**
* <pre>
* Immutable. Placement.
* </pre>
*
* <code>.google.ads.googleads.v10.common.PlacementInfo placement = 7 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The placement.
*/
com.google.ads.googleads.v10.common.PlacementInfo getPlacement();
/**
* <pre>
* Immutable. Placement.
* </pre>
*
* <code>.google.ads.googleads.v10.common.PlacementInfo placement = 7 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.PlacementInfoOrBuilder getPlacementOrBuilder();
/**
* <pre>
* Immutable. Mobile App Category.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileAppCategoryInfo mobile_app_category = 8 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the mobileAppCategory field is set.
*/
boolean hasMobileAppCategory();
/**
* <pre>
* Immutable. Mobile App Category.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileAppCategoryInfo mobile_app_category = 8 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The mobileAppCategory.
*/
com.google.ads.googleads.v10.common.MobileAppCategoryInfo getMobileAppCategory();
/**
* <pre>
* Immutable. Mobile App Category.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileAppCategoryInfo mobile_app_category = 8 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.MobileAppCategoryInfoOrBuilder getMobileAppCategoryOrBuilder();
/**
* <pre>
* Immutable. Mobile application.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileApplicationInfo mobile_application = 9 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return Whether the mobileApplication field is set.
*/
boolean hasMobileApplication();
/**
* <pre>
* Immutable. Mobile application.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileApplicationInfo mobile_application = 9 [(.google.api.field_behavior) = IMMUTABLE];</code>
* @return The mobileApplication.
*/
com.google.ads.googleads.v10.common.MobileApplicationInfo getMobileApplication();
/**
* <pre>
* Immutable. Mobile application.
* </pre>
*
* <code>.google.ads.googleads.v10.common.MobileApplicationInfo mobile_application = 9 [(.google.api.field_behavior) = IMMUTABLE];</code>
*/
com.google.ads.googleads.v10.common.MobileApplicationInfoOrBuilder getMobileApplicationOrBuilder();
public com.google.ads.googleads.v10.resources.SharedCriterion.CriterionCase getCriterionCase();
}
|
apache-2.0
|
hilmiat/NF_android_complete
|
Pertemuan2/pertemuan_dua/src/main/java/com/example/DemoConditional.java
|
707
|
package com.example;
/**
* Created by hilmiat on 7/29/17.
*/
public class DemoConditional {
public static char getGrade(int nilai){
char grade = 'D';
if(nilai > 85){
grade = 'A';
}else if(nilai > 69){
grade = 'B';
}else if(nilai >= 60){
grade = 'C';
}
return grade;
}
public static void main(String[] args) {
int[] nilai_siswa = {78,90,89,68,77};
OperasiArray.cetakArray(nilai_siswa);
//100-86 A,70-85 B,60-69 C,0-59 D
//kalau D tidak lulus, A,B,C lulus
for(int n:nilai_siswa){
System.out.println("Nilai "+n+",garade-nya:"+getGrade(n));
}
}
}
|
apache-2.0
|
resin-io-library/base-images
|
balena-base-images/node/beaglebone-green-wifi/fedora/34/17.6.0/build/Dockerfile
|
2760
|
# AUTOGENERATED FILE
FROM balenalib/beaglebone-green-wifi-fedora:34-build
ENV NODE_VERSION 17.6.0
ENV YARN_VERSION 1.22.4
RUN for key in \
6A010C5166006599AA17F08146C2130DFD2497F5 \
; do \
gpg --keyserver pgp.mit.edu --recv-keys "$key" || \
gpg --keyserver keyserver.pgp.com --recv-keys "$key" || \
gpg --keyserver keyserver.ubuntu.com --recv-keys "$key" ; \
done \
&& curl -SLO "http://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-armv7l.tar.gz" \
&& echo "31786cf6387c85a34f1eb85be5838facaad40f50f61030557e42a4af4bb31294 node-v$NODE_VERSION-linux-armv7l.tar.gz" | sha256sum -c - \
&& tar -xzf "node-v$NODE_VERSION-linux-armv7l.tar.gz" -C /usr/local --strip-components=1 \
&& rm "node-v$NODE_VERSION-linux-armv7l.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \
&& gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& mkdir -p /opt/yarn \
&& tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \
&& rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& npm config set unsafe-perm true -g --unsafe-perm \
&& rm -rf /tmp/*
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/[email protected]" \
&& echo "Running test-stack@node" \
&& chmod +x [email protected] \
&& bash [email protected] \
&& rm -rf [email protected]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo $'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Fedora 34 \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nNode.js v17.6.0, Yarn v1.22.4 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo $'#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
apache-2.0
|
sijie/bookkeeper
|
bookkeeper-server/src/test/java/org/apache/bookkeeper/client/BookKeeperTest.java
|
49996
|
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.client;
import static org.apache.bookkeeper.client.BookKeeperClientStats.WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS;
import static org.apache.bookkeeper.client.BookKeeperClientStats.WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS;
import static org.apache.bookkeeper.common.concurrent.FutureUtils.result;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import io.netty.util.IllegalReferenceCountException;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.bookkeeper.client.AsyncCallback.AddCallback;
import org.apache.bookkeeper.client.AsyncCallback.ReadCallback;
import org.apache.bookkeeper.client.BKException.BKBookieHandleNotAvailableException;
import org.apache.bookkeeper.client.BKException.BKIllegalOpException;
import org.apache.bookkeeper.client.BookKeeper.DigestType;
import org.apache.bookkeeper.client.api.WriteFlag;
import org.apache.bookkeeper.client.api.WriteHandle;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.net.BookieSocketAddress;
import org.apache.bookkeeper.proto.BookieServer;
import org.apache.bookkeeper.stats.NullStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.test.BookKeeperClusterTestCase;
import org.apache.bookkeeper.test.TestStatsProvider;
import org.apache.bookkeeper.zookeeper.BoundExponentialBackoffRetryPolicy;
import org.apache.bookkeeper.zookeeper.ZooKeeperClient;
import org.apache.bookkeeper.zookeeper.ZooKeeperWatcherBase;
import org.apache.zookeeper.AsyncCallback.StringCallback;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.ConnectionLossException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
import org.apache.zookeeper.Watcher.Event.KeeperState;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.ZooKeeper.States;
import org.apache.zookeeper.data.ACL;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests of the main BookKeeper client.
*/
public class BookKeeperTest extends BookKeeperClusterTestCase {
private static final Logger LOG = LoggerFactory.getLogger(BookKeeperTest.class);
private static final long INVALID_LEDGERID = -1L;
private final DigestType digestType;
public BookKeeperTest() {
super(4);
this.digestType = DigestType.CRC32;
}
@Test
public void testConstructionZkDelay() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri())
.setZkTimeout(20000);
CountDownLatch l = new CountDownLatch(1);
zkUtil.sleepCluster(200, TimeUnit.MILLISECONDS, l);
l.await();
BookKeeper bkc = new BookKeeper(conf);
bkc.createLedger(digestType, "testPasswd".getBytes()).close();
bkc.close();
}
@Test
public void testConstructionNotConnectedExplicitZk() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri())
.setZkTimeout(20000);
CountDownLatch l = new CountDownLatch(1);
zkUtil.sleepCluster(200, TimeUnit.MILLISECONDS, l);
l.await();
ZooKeeper zk = new ZooKeeper(
zkUtil.getZooKeeperConnectString(),
50,
event -> {});
assertFalse("ZK shouldn't have connected yet", zk.getState().isConnected());
try {
BookKeeper bkc = new BookKeeper(conf, zk);
fail("Shouldn't be able to construct with unconnected zk");
} catch (IOException cle) {
// correct behaviour
assertTrue(cle.getCause() instanceof ConnectionLossException);
}
}
/**
* Test that bookkeeper is not able to open ledgers if
* it provides the wrong password or wrong digest.
*/
@Test
public void testBookkeeperDigestPasswordWithAutoDetection() throws Exception {
testBookkeeperDigestPassword(true);
}
@Test
public void testBookkeeperDigestPasswordWithoutAutoDetection() throws Exception {
testBookkeeperDigestPassword(false);
}
void testBookkeeperDigestPassword(boolean autodetection) throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
conf.setEnableDigestTypeAutodetection(autodetection);
BookKeeper bkc = new BookKeeper(conf);
DigestType digestCorrect = digestType;
byte[] passwdCorrect = "AAAAAAA".getBytes();
DigestType digestBad = digestType == DigestType.MAC ? DigestType.CRC32 : DigestType.MAC;
byte[] passwdBad = "BBBBBBB".getBytes();
LedgerHandle lh = null;
try {
lh = bkc.createLedger(digestCorrect, passwdCorrect);
long id = lh.getId();
for (int i = 0; i < 100; i++) {
lh.addEntry("foobar".getBytes());
}
lh.close();
// try open with bad passwd
try {
bkc.openLedger(id, digestCorrect, passwdBad);
fail("Shouldn't be able to open with bad passwd");
} catch (BKException.BKUnauthorizedAccessException bke) {
// correct behaviour
}
// try open with bad digest
try {
bkc.openLedger(id, digestBad, passwdCorrect);
if (!autodetection) {
fail("Shouldn't be able to open with bad digest");
}
} catch (BKException.BKDigestMatchException bke) {
// correct behaviour
if (autodetection) {
fail("Should not throw digest match exception if `autodetection` is enabled");
}
}
// try open with both bad
try {
bkc.openLedger(id, digestBad, passwdBad);
fail("Shouldn't be able to open with bad passwd and digest");
} catch (BKException.BKUnauthorizedAccessException bke) {
// correct behaviour
}
// try open with both correct
bkc.openLedger(id, digestCorrect, passwdCorrect).close();
} finally {
if (lh != null) {
lh.close();
}
bkc.close();
}
}
/**
* Tests that when trying to use a closed BK client object we get
* a callback error and not an InterruptedException.
* @throws Exception
*/
@Test
public void testAsyncReadWithError() throws Exception {
LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "testPasswd".getBytes());
bkc.close();
final AtomicInteger result = new AtomicInteger(0);
final CountDownLatch counter = new CountDownLatch(1);
// Try to write, we shoud get and error callback but not an exception
lh.asyncAddEntry("test".getBytes(), new AddCallback() {
public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
result.set(rc);
counter.countDown();
}
}, null);
counter.await();
assertTrue(result.get() != 0);
}
/**
* Test that bookkeeper will close cleanly if close is issued
* while another operation is in progress.
*/
@Test
public void testCloseDuringOp() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
for (int i = 0; i < 10; i++) {
final BookKeeper client = new BookKeeper(conf);
final CountDownLatch l = new CountDownLatch(1);
final AtomicBoolean success = new AtomicBoolean(false);
Thread t = new Thread() {
public void run() {
try {
LedgerHandle lh = client.createLedger(3, 3, digestType, "testPasswd".getBytes());
startNewBookie();
killBookie(0);
lh.asyncAddEntry("test".getBytes(), new AddCallback() {
@Override
public void addComplete(int rc, LedgerHandle lh, long entryId, Object ctx) {
// noop, we don't care if this completes
}
}, null);
client.close();
success.set(true);
l.countDown();
} catch (Exception e) {
LOG.error("Error running test", e);
success.set(false);
l.countDown();
}
}
};
t.start();
assertTrue("Close never completed", l.await(10, TimeUnit.SECONDS));
assertTrue("Close was not successful", success.get());
}
}
@Test
public void testIsClosed() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
BookKeeper bkc = new BookKeeper(conf);
LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes());
Long lId = lh.getId();
lh.addEntry("000".getBytes());
boolean result = bkc.isClosed(lId);
assertTrue("Ledger shouldn't be flagged as closed!", !result);
lh.close();
result = bkc.isClosed(lId);
assertTrue("Ledger should be flagged as closed!", result);
bkc.close();
}
@Test
public void testReadFailureCallback() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
BookKeeper bkc = new BookKeeper(conf);
LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes());
final int numEntries = 10;
for (int i = 0; i < numEntries; i++) {
lh.addEntry(("entry-" + i).getBytes());
}
stopBKCluster();
try {
lh.readEntries(0, numEntries - 1);
fail("Read operation should have failed");
} catch (BKBookieHandleNotAvailableException e) {
// expected
}
final CountDownLatch counter = new CountDownLatch(1);
final AtomicInteger receivedResponses = new AtomicInteger(0);
final AtomicInteger returnCode = new AtomicInteger();
lh.asyncReadEntries(0, numEntries - 1, new ReadCallback() {
@Override
public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
returnCode.set(rc);
receivedResponses.incrementAndGet();
counter.countDown();
}
}, null);
counter.await();
// Wait extra time to ensure no extra responses received
Thread.sleep(1000);
assertEquals(1, receivedResponses.get());
assertEquals(BKException.Code.BookieHandleNotAvailableException, returnCode.get());
bkc.close();
}
@Test
public void testAutoCloseableBookKeeper() throws Exception {
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
BookKeeper bkc2;
try (BookKeeper bkc = new BookKeeper(conf)) {
bkc2 = bkc;
long ledgerId;
try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) {
ledgerId = lh.getId();
for (int i = 0; i < 100; i++) {
lh.addEntry("foobar".getBytes());
}
}
assertTrue("Ledger should be closed!", bkc.isClosed(ledgerId));
}
assertTrue("BookKeeper should be closed!", bkc2.closed);
}
@Test
public void testReadAfterLastAddConfirmed() throws Exception {
ClientConfiguration clientConfiguration = new ClientConfiguration();
clientConfiguration.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkWriter = new BookKeeper(clientConfiguration)) {
LedgerHandle writeLh = bkWriter.createLedger(digestType, "testPasswd".getBytes());
long ledgerId = writeLh.getId();
int numOfEntries = 5;
for (int i = 0; i < numOfEntries; i++) {
writeLh.addEntry(("foobar" + i).getBytes());
}
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
assertFalse(writeLh.isClosed());
// with readUnconfirmedEntries we are able to read all of the entries
Enumeration<LedgerEntry> entries = rlh.readUnconfirmedEntries(0, numOfEntries - 1);
int entryId = 0;
while (entries.hasMoreElements()) {
LedgerEntry entry = entries.nextElement();
String entryString = new String(entry.getEntry());
assertTrue("Expected entry String: " + ("foobar" + entryId)
+ " actual entry String: " + entryString,
entryString.equals("foobar" + entryId));
entryId++;
}
}
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
assertFalse(writeLh.isClosed());
// without readUnconfirmedEntries we are not able to read all of the entries
try {
rlh.readEntries(0, numOfEntries - 1);
fail("shoud not be able to read up to " + (numOfEntries - 1) + " with readEntries");
} catch (BKException.BKReadException expected) {
}
// read all entries within the 0..LastAddConfirmed range with readEntries
assertEquals(rlh.getLastAddConfirmed() + 1,
Collections.list(rlh.readEntries(0, rlh.getLastAddConfirmed())).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
// read all entries within the 0..LastAddConfirmed range with readUnconfirmedEntries
assertEquals(rlh.getLastAddConfirmed() + 1,
Collections.list(rlh.readUnconfirmedEntries(0, rlh.getLastAddConfirmed())).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
// read all entries within the LastAddConfirmed..numOfEntries - 1 range with readUnconfirmedEntries
assertEquals(numOfEntries - rlh.getLastAddConfirmed(),
Collections.list(rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries - 1)).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
try {
// read all entries within the LastAddConfirmed..numOfEntries range with readUnconfirmedEntries
// this is an error, we are going outside the range of existing entries
rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries);
fail("the read tried to access data for unexisting entry id " + numOfEntries);
} catch (BKException.BKNoSuchEntryException expected) {
// expecting a BKNoSuchEntryException, as the entry does not exist on bookies
}
try {
// read all entries within the LastAddConfirmed..numOfEntries range with readEntries
// this is an error, we are going outside the range of existing entries
rlh.readEntries(rlh.getLastAddConfirmed(), numOfEntries);
fail("the read tries to access data for unexisting entry id " + numOfEntries);
} catch (BKException.BKReadException expected) {
// expecting a BKReadException, as the client rejected the request to access entries
// after local LastAddConfirmed
}
}
// ensure that after restarting every bookie entries are not lost
// even entries after the LastAddConfirmed
restartBookies();
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
assertFalse(writeLh.isClosed());
// with readUnconfirmedEntries we are able to read all of the entries
Enumeration<LedgerEntry> entries = rlh.readUnconfirmedEntries(0, numOfEntries - 1);
int entryId = 0;
while (entries.hasMoreElements()) {
LedgerEntry entry = entries.nextElement();
String entryString = new String(entry.getEntry());
assertTrue("Expected entry String: " + ("foobar" + entryId)
+ " actual entry String: " + entryString,
entryString.equals("foobar" + entryId));
entryId++;
}
}
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedgerNoRecovery(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
assertFalse(writeLh.isClosed());
// without readUnconfirmedEntries we are not able to read all of the entries
try {
rlh.readEntries(0, numOfEntries - 1);
fail("shoud not be able to read up to " + (numOfEntries - 1) + " with readEntries");
} catch (BKException.BKReadException expected) {
}
// read all entries within the 0..LastAddConfirmed range with readEntries
assertEquals(rlh.getLastAddConfirmed() + 1,
Collections.list(rlh.readEntries(0, rlh.getLastAddConfirmed())).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
// read all entries within the 0..LastAddConfirmed range with readUnconfirmedEntries
assertEquals(rlh.getLastAddConfirmed() + 1,
Collections.list(rlh.readUnconfirmedEntries(0, rlh.getLastAddConfirmed())).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
// read all entries within the LastAddConfirmed..numOfEntries - 1 range with readUnconfirmedEntries
assertEquals(numOfEntries - rlh.getLastAddConfirmed(),
Collections.list(rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries - 1)).size());
// assert local LAC does not change after reads
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 2) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 2)));
try {
// read all entries within the LastAddConfirmed..numOfEntries range with readUnconfirmedEntries
// this is an error, we are going outside the range of existing entries
rlh.readUnconfirmedEntries(rlh.getLastAddConfirmed(), numOfEntries);
fail("the read tried to access data for unexisting entry id " + numOfEntries);
} catch (BKException.BKNoSuchEntryException expected) {
// expecting a BKNoSuchEntryException, as the entry does not exist on bookies
}
try {
// read all entries within the LastAddConfirmed..numOfEntries range with readEntries
// this is an error, we are going outside the range of existing entries
rlh.readEntries(rlh.getLastAddConfirmed(), numOfEntries);
fail("the read tries to access data for unexisting entry id " + numOfEntries);
} catch (BKException.BKReadException expected) {
// expecting a BKReadException, as the client rejected the request to access entries
// after local LastAddConfirmed
}
}
// open ledger with fencing, this will repair the ledger and make the last entry readable
try (BookKeeper bkReader = new BookKeeper(clientConfiguration);
LedgerHandle rlh = bkReader.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertTrue(
"Expected LAC of rlh: " + (numOfEntries - 1) + " actual LAC of rlh: " + rlh.getLastAddConfirmed(),
(rlh.getLastAddConfirmed() == (numOfEntries - 1)));
assertFalse(writeLh.isClosed());
// without readUnconfirmedEntries we are not able to read all of the entries
Enumeration<LedgerEntry> entries = rlh.readEntries(0, numOfEntries - 1);
int entryId = 0;
while (entries.hasMoreElements()) {
LedgerEntry entry = entries.nextElement();
String entryString = new String(entry.getEntry());
assertTrue("Expected entry String: " + ("foobar" + entryId)
+ " actual entry String: " + entryString,
entryString.equals("foobar" + entryId));
entryId++;
}
}
// should still be able to close as long as recovery closed the ledger
// with the same last entryId and length as in the write handle.
writeLh.close();
}
}
@Test
public void testReadWriteWithV2WireProtocol() throws Exception {
ClientConfiguration conf = new ClientConfiguration().setUseV2WireProtocol(true);
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
int numEntries = 100;
byte[] data = "foobar".getBytes();
try (BookKeeper bkc = new BookKeeper(conf)) {
// basic read/write
{
long ledgerId;
try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) {
ledgerId = lh.getId();
for (int i = 0; i < numEntries; i++) {
lh.addEntry(data);
}
}
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
assertArrayEquals(data, entry.getEntry());
}
}
}
// basic fencing
{
long ledgerId;
try (LedgerHandle lh2 = bkc.createLedger(digestType, "testPasswd".getBytes())) {
ledgerId = lh2.getId();
lh2.addEntry(data);
try (LedgerHandle lh2Fence = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
}
try {
lh2.addEntry(data);
fail("ledger should be fenced");
} catch (BKException.BKLedgerFencedException ex){
}
}
}
}
}
@SuppressWarnings("deprecation")
@Test
public void testReadEntryReleaseByteBufs() throws Exception {
ClientConfiguration confWriter = new ClientConfiguration();
confWriter.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
int numEntries = 10;
byte[] data = "foobar".getBytes();
long ledgerId;
try (BookKeeper bkc = new BookKeeper(confWriter)) {
try (LedgerHandle lh = bkc.createLedger(digestType, "testPasswd".getBytes())) {
ledgerId = lh.getId();
for (int i = 0; i < numEntries; i++) {
lh.addEntry(data);
}
}
}
// v2 protocol, using pooled buffers
ClientConfiguration confReader1 = new ClientConfiguration()
.setUseV2WireProtocol(true)
.setNettyUsePooledBuffers(true)
.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader1)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
try {
entry.data.release();
} catch (IllegalReferenceCountException ok) {
fail("ByteBuf already released");
}
}
}
}
// v2 protocol, not using pooled buffers
ClientConfiguration confReader2 = new ClientConfiguration()
.setUseV2WireProtocol(true)
.setNettyUsePooledBuffers(false);
confReader2.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader2)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
try {
entry.data.release();
} catch (IllegalReferenceCountException e) {
fail("ByteBuf already released");
}
}
}
}
// v3 protocol, not using pooled buffers
ClientConfiguration confReader3 = new ClientConfiguration()
.setUseV2WireProtocol(false)
.setNettyUsePooledBuffers(false)
.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader3)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
assertTrue("Can't release entry " + entry.getEntryId() + ": ref = " + entry.data.refCnt(),
entry.data.release());
try {
assertFalse(entry.data.release());
fail("ByteBuf already released");
} catch (IllegalReferenceCountException ok) {
}
}
}
}
// v3 protocol, using pooled buffers
// v3 protocol from 4.5 always "wraps" buffers returned by protobuf
ClientConfiguration confReader4 = new ClientConfiguration()
.setUseV2WireProtocol(false)
.setNettyUsePooledBuffers(true)
.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader4)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
// ButeBufs not reference counter
assertTrue("Can't release entry " + entry.getEntryId() + ": ref = " + entry.data.refCnt(),
entry.data.release());
try {
assertFalse(entry.data.release());
fail("ByteBuf already released");
} catch (IllegalReferenceCountException ok) {
}
}
}
}
// cannot read twice an entry
ClientConfiguration confReader5 = new ClientConfiguration();
confReader5.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
try (BookKeeper bkc = new BookKeeper(confReader5)) {
try (LedgerHandle lh = bkc.openLedger(ledgerId, digestType, "testPasswd".getBytes())) {
assertEquals(numEntries - 1, lh.readLastConfirmed());
for (Enumeration<LedgerEntry> readEntries = lh.readEntries(0, numEntries - 1);
readEntries.hasMoreElements();) {
LedgerEntry entry = readEntries.nextElement();
entry.getEntry();
try {
entry.getEntry();
fail("entry data accessed twice");
} catch (IllegalStateException ok){
}
try {
entry.getEntryInputStream();
fail("entry data accessed twice");
} catch (IllegalStateException ok){
}
}
}
}
}
/**
* Tests that issuing multiple reads for the same entry at the same time works as expected.
*
* @throws Exception
*/
@Test
public void testDoubleRead() throws Exception {
LedgerHandle lh = bkc.createLedger(digestType, "".getBytes());
lh.addEntry("test".getBytes());
// Read the same entry more times asynchronously
final int n = 10;
final CountDownLatch latch = new CountDownLatch(n);
for (int i = 0; i < n; i++) {
lh.asyncReadEntries(0, 0, new ReadCallback() {
public void readComplete(int rc, LedgerHandle lh,
Enumeration<LedgerEntry> seq, Object ctx) {
if (rc == BKException.Code.OK) {
latch.countDown();
} else {
fail("Read fail");
}
}
}, null);
}
latch.await();
}
/**
* Tests that issuing multiple reads for the same entry at the same time works as expected.
*
* @throws Exception
*/
@Test
public void testDoubleReadWithV2Protocol() throws Exception {
ClientConfiguration conf = new ClientConfiguration(baseClientConf);
conf.setUseV2WireProtocol(true);
BookKeeperTestClient bkc = new BookKeeperTestClient(conf);
LedgerHandle lh = bkc.createLedger(digestType, "".getBytes());
lh.addEntry("test".getBytes());
// Read the same entry more times asynchronously
final int n = 10;
final CountDownLatch latch = new CountDownLatch(n);
for (int i = 0; i < n; i++) {
lh.asyncReadEntries(0, 0, new ReadCallback() {
public void readComplete(int rc, LedgerHandle lh,
Enumeration<LedgerEntry> seq, Object ctx) {
if (rc == BKException.Code.OK) {
latch.countDown();
} else {
fail("Read fail");
}
}
}, null);
}
latch.await();
bkc.close();
}
@Test(expected = BKIllegalOpException.class)
public void testCannotUseWriteFlagsOnV2Protocol() throws Exception {
ClientConfiguration conf = new ClientConfiguration(baseClientConf);
conf.setUseV2WireProtocol(true);
try (BookKeeperTestClient bkc = new BookKeeperTestClient(conf)) {
try (WriteHandle wh = result(bkc.newCreateLedgerOp()
.withEnsembleSize(3)
.withWriteQuorumSize(3)
.withAckQuorumSize(2)
.withPassword("".getBytes())
.withWriteFlags(WriteFlag.DEFERRED_SYNC)
.execute())) {
result(wh.appendAsync("test".getBytes()));
}
}
}
@Test(expected = BKIllegalOpException.class)
public void testCannotUseForceOnV2Protocol() throws Exception {
ClientConfiguration conf = new ClientConfiguration(baseClientConf);
conf.setUseV2WireProtocol(true);
try (BookKeeperTestClient bkc = new BookKeeperTestClient(conf)) {
try (WriteHandle wh = result(bkc.newCreateLedgerOp()
.withEnsembleSize(3)
.withWriteQuorumSize(3)
.withAckQuorumSize(2)
.withPassword("".getBytes())
.withWriteFlags(WriteFlag.NONE)
.execute())) {
result(wh.appendAsync("".getBytes()));
result(wh.force());
}
}
}
class MockZooKeeperClient extends ZooKeeperClient {
class MockZooKeeper extends ZooKeeper {
public MockZooKeeper(String connectString, int sessionTimeout, Watcher watcher, boolean canBeReadOnly)
throws IOException {
super(connectString, sessionTimeout, watcher, canBeReadOnly);
}
@Override
public void create(final String path, byte[] data, List<ACL> acl, CreateMode createMode, StringCallback cb,
Object ctx) {
StringCallback injectedCallback = new StringCallback() {
@Override
public void processResult(int rc, String path, Object ctx, String name) {
/**
* if ledgerIdToInjectFailure matches with the path of
* the node, then throw CONNECTIONLOSS error and then
* reset it to INVALID_LEDGERID.
*/
if (path.contains(ledgerIdToInjectFailure.toString())) {
ledgerIdToInjectFailure.set(INVALID_LEDGERID);
cb.processResult(KeeperException.Code.CONNECTIONLOSS.intValue(), path, ctx, name);
} else {
cb.processResult(rc, path, ctx, name);
}
}
};
super.create(path, data, acl, createMode, injectedCallback, ctx);
}
}
private final String connectString;
private final int sessionTimeoutMs;
private final ZooKeeperWatcherBase watcherManager;
private final AtomicLong ledgerIdToInjectFailure;
MockZooKeeperClient(String connectString, int sessionTimeoutMs, ZooKeeperWatcherBase watcher,
AtomicLong ledgerIdToInjectFailure) throws IOException {
/*
* in OperationalRetryPolicy maxRetries is > 0. So in case of any
* RecoverableException scenario, it will retry.
*/
super(connectString, sessionTimeoutMs, watcher,
new BoundExponentialBackoffRetryPolicy(sessionTimeoutMs, sessionTimeoutMs, Integer.MAX_VALUE),
new BoundExponentialBackoffRetryPolicy(sessionTimeoutMs, sessionTimeoutMs, 3),
NullStatsLogger.INSTANCE, 1, 0, false);
this.connectString = connectString;
this.sessionTimeoutMs = sessionTimeoutMs;
this.watcherManager = watcher;
this.ledgerIdToInjectFailure = ledgerIdToInjectFailure;
}
@Override
protected ZooKeeper createZooKeeper() throws IOException {
return new MockZooKeeper(this.connectString, this.sessionTimeoutMs, this.watcherManager, false);
}
}
@Test
public void testZKConnectionLossForLedgerCreation() throws Exception {
int zkSessionTimeOut = 10000;
AtomicLong ledgerIdToInjectFailure = new AtomicLong(INVALID_LEDGERID);
ZooKeeperWatcherBase zooKeeperWatcherBase = new ZooKeeperWatcherBase(zkSessionTimeOut,
NullStatsLogger.INSTANCE);
MockZooKeeperClient zkFaultInjectionWrapper = new MockZooKeeperClient(zkUtil.getZooKeeperConnectString(),
zkSessionTimeOut, zooKeeperWatcherBase, ledgerIdToInjectFailure);
zkFaultInjectionWrapper.waitForConnection();
assertEquals("zkFaultInjectionWrapper should be in connected state", States.CONNECTED,
zkFaultInjectionWrapper.getState());
BookKeeper bk = new BookKeeper(baseClientConf, zkFaultInjectionWrapper);
long oldZkInstanceSessionId = zkFaultInjectionWrapper.getSessionId();
long ledgerId = 567L;
LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null);
lh.close();
/*
* trigger Expired event so that MockZooKeeperClient would run
* 'clientCreator' and create new zk handle. In this case it would
* create MockZooKeeper.
*/
zooKeeperWatcherBase.process(new WatchedEvent(EventType.None, KeeperState.Expired, ""));
zkFaultInjectionWrapper.waitForConnection();
for (int i = 0; i < 10; i++) {
if (zkFaultInjectionWrapper.getState() == States.CONNECTED) {
break;
}
Thread.sleep(200);
}
assertEquals("zkFaultInjectionWrapper should be in connected state", States.CONNECTED,
zkFaultInjectionWrapper.getState());
assertNotEquals("Session Id of old and new ZK instance should be different", oldZkInstanceSessionId,
zkFaultInjectionWrapper.getSessionId());
ledgerId++;
ledgerIdToInjectFailure.set(ledgerId);
/**
* ledgerIdToInjectFailure is set to 'ledgerId', so zookeeper.create
* would return CONNECTIONLOSS error for the first time and when it is
* retried, as expected it would return NODEEXISTS error.
*
* AbstractZkLedgerManager.createLedgerMetadata should deal with this
* scenario appropriately.
*/
lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null);
lh.close();
assertEquals("injectZnodeCreationNoNodeFailure should have been reset it to INVALID_LEDGERID", INVALID_LEDGERID,
ledgerIdToInjectFailure.get());
lh = bk.openLedger(ledgerId, DigestType.CRC32, "".getBytes());
lh.close();
ledgerId++;
lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null);
lh.close();
bk.close();
}
@Test
public void testLedgerDeletionIdempotency() throws Exception {
BookKeeper bk = new BookKeeper(baseClientConf);
long ledgerId = 789L;
LedgerHandle lh = bk.createLedgerAdv(ledgerId, 1, 1, 1, DigestType.CRC32, "".getBytes(), null);
lh.close();
bk.deleteLedger(ledgerId);
bk.deleteLedger(ledgerId);
bk.close();
}
/**
* Mock of RackawareEnsemblePlacementPolicy. Overrides areAckedBookiesAdheringToPlacementPolicy to only return true
* when ackedBookies consists of writeQuorumSizeToUseForTesting bookies.
*/
public static class MockRackawareEnsemblePlacementPolicy extends RackawareEnsemblePlacementPolicy {
private int writeQuorumSizeToUseForTesting;
private CountDownLatch conditionFirstInvocationLatch;
void setWriteQuorumSizeToUseForTesting(int writeQuorumSizeToUseForTesting) {
this.writeQuorumSizeToUseForTesting = writeQuorumSizeToUseForTesting;
}
void setConditionFirstInvocationLatch(CountDownLatch conditionFirstInvocationLatch) {
this.conditionFirstInvocationLatch = conditionFirstInvocationLatch;
}
@Override
public boolean areAckedBookiesAdheringToPlacementPolicy(Set<BookieSocketAddress> ackedBookies,
int writeQuorumSize,
int ackQuorumSize) {
conditionFirstInvocationLatch.countDown();
return ackedBookies.size() == writeQuorumSizeToUseForTesting;
}
}
/**
* Test to verify that PendingAddOp waits for success condition from areAckedBookiesAdheringToPlacementPolicy
* before returning success to client. Also tests working of WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS and
* WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS counters.
*/
@Test
public void testEnforceMinNumFaultDomainsForWrite() throws Exception {
byte[] data = "foobar".getBytes();
byte[] password = "testPasswd".getBytes();
startNewBookie();
startNewBookie();
ClientConfiguration conf = new ClientConfiguration();
conf.setMetadataServiceUri(zkUtil.getMetadataServiceUri());
conf.setEnsemblePlacementPolicy(MockRackawareEnsemblePlacementPolicy.class);
conf.setAddEntryTimeout(2);
conf.setAddEntryQuorumTimeout(4);
conf.setEnforceMinNumFaultDomainsForWrite(true);
TestStatsProvider statsProvider = new TestStatsProvider();
// Abnormal values for testing to prevent timeouts
BookKeeperTestClient bk = new BookKeeperTestClient(conf, statsProvider);
StatsLogger statsLogger = bk.getStatsLogger();
int ensembleSize = 3;
int writeQuorumSize = 3;
int ackQuorumSize = 2;
CountDownLatch countDownLatch = new CountDownLatch(1);
MockRackawareEnsemblePlacementPolicy currPlacementPolicy =
(MockRackawareEnsemblePlacementPolicy) bk.getPlacementPolicy();
currPlacementPolicy.setConditionFirstInvocationLatch(countDownLatch);
currPlacementPolicy.setWriteQuorumSizeToUseForTesting(writeQuorumSize);
BookieSocketAddress bookieToSleep;
try (LedgerHandle lh = bk.createLedger(ensembleSize, writeQuorumSize, ackQuorumSize, digestType, password)) {
CountDownLatch sleepLatchCase1 = new CountDownLatch(1);
CountDownLatch sleepLatchCase2 = new CountDownLatch(1);
// Put all non ensemble bookies to sleep
LOG.info("Putting all non ensemble bookies to sleep.");
for (BookieServer bookieServer : bs) {
try {
if (!lh.getCurrentEnsemble().contains(bookieServer.getLocalAddress())) {
sleepBookie(bookieServer.getLocalAddress(), sleepLatchCase2);
}
} catch (UnknownHostException ignored) {}
}
Thread writeToLedger = new Thread(() -> {
try {
LOG.info("Initiating write for entry");
long entryId = lh.addEntry(data);
LOG.info("Wrote entry with entryId = {}", entryId);
} catch (InterruptedException | BKException ignored) {
}
});
bookieToSleep = lh.getCurrentEnsemble().get(0);
LOG.info("Putting picked bookie to sleep");
sleepBookie(bookieToSleep, sleepLatchCase1);
assertEquals(statsLogger
.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS)
.get()
.longValue(), 0);
// Trying to write entry
writeToLedger.start();
// Waiting and checking to make sure that write has not succeeded
countDownLatch.await(conf.getAddEntryTimeout(), TimeUnit.SECONDS);
assertEquals("Write succeeded but should not have", -1, lh.lastAddConfirmed);
// Wake the bookie
sleepLatchCase1.countDown();
// Waiting and checking to make sure that write has succeeded
writeToLedger.join(conf.getAddEntryTimeout() * 1000);
assertEquals("Write did not succeed but should have", 0, lh.lastAddConfirmed);
assertEquals(statsLogger
.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS)
.get()
.longValue(), 1);
// AddEntry thread for second scenario
Thread writeToLedger2 = new Thread(() -> {
try {
LOG.info("Initiating write for entry");
long entryId = lh.addEntry(data);
LOG.info("Wrote entry with entryId = {}", entryId);
} catch (InterruptedException | BKException ignored) {
}
});
bookieToSleep = lh.getCurrentEnsemble().get(1);
LOG.info("Putting picked bookie to sleep");
sleepBookie(bookieToSleep, sleepLatchCase2);
// Trying to write entry
writeToLedger2.start();
// Waiting and checking to make sure that write has failed
writeToLedger2.join((conf.getAddEntryQuorumTimeout() + 2) * 1000);
assertEquals("Write succeeded but should not have", 0, lh.lastAddConfirmed);
sleepLatchCase2.countDown();
assertEquals(statsLogger.getCounter(WRITE_DELAYED_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(),
2);
assertEquals(statsLogger.getCounter(WRITE_TIMED_OUT_DUE_TO_NOT_ENOUGH_FAULT_DOMAINS).get().longValue(),
1);
}
}
}
|
apache-2.0
|
ben-ng/swift
|
tools/SourceKit/lib/SwiftLang/CodeCompletion.h
|
8445
|
//===--- CodeCompletion.h - -------------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SOURCEKIT_LIB_SWIFTLANG_CODECOMPLETION_H
#define LLVM_SOURCEKIT_LIB_SWIFTLANG_CODECOMPLETION_H
#include "SourceKit/Core/LLVM.h"
#include "swift/IDE/CodeCompletion.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
namespace SourceKit {
namespace CodeCompletion {
using CodeCompletionDeclKind = swift::ide::CodeCompletionDeclKind;
using CodeCompletionKeywordKind = swift::ide::CodeCompletionKeywordKind;
using CodeCompletionLiteralKind = swift::ide::CodeCompletionLiteralKind;
using SemanticContextKind = swift::ide::SemanticContextKind;
using CodeCompletionString = swift::ide::CodeCompletionString;
using SwiftResult = swift::ide::CodeCompletionResult;
using CompletionKind = swift::ide::CompletionKind;
struct Group;
class CodeCompletionOrganizer;
class CompletionBuilder;
/// A representation of the 'popularity' of a code completion result.
struct PopularityFactor {
/// Raw popularity score in the range [-1, 1], where higher values are more
/// popular and 0.0 indicates an unknown popularity.
double rawValue = 0.0;
bool isPopular() const { return rawValue > 0.0; }
bool isUnpopular() const { return rawValue < 0.0; }
PopularityFactor() = default;
explicit PopularityFactor(double value) : rawValue(value) {}
};
struct NameStyle {
enum WordDelimiter : uint8_t {
Unknown,
Lowercase, // lowercase
Uppercase, // UPPERCASE
UpperCamelCase, // UpperCamelCase
LowerCamelCase, // lowerCamelCase
LowercaseWithUnderscores, // lowercase_with_underscores
UppercaseWithUnderscores, // UPPERCASE_WITH_UNDERSCORES
};
WordDelimiter wordDelimiter = Unknown;
uint8_t leadingUnderscores : 2;
uint8_t trailingUnderscores : 2;
explicit NameStyle(StringRef name);
bool possiblyLowerCamelCase() const {
return wordDelimiter == Lowercase || wordDelimiter == LowerCamelCase;
}
bool possiblyUpperCamelCase() const {
return wordDelimiter == Uppercase || wordDelimiter == UpperCamelCase;
}
};
/// Code completion result type for SourceKit::SwiftLangSupport.
///
/// Extends a \c swift::ide::CodeCompletionResult with extra fields that are
/// filled in by SourceKit. Generally stored in an \c CompletionSink.
class Completion : public SwiftResult {
void *opaqueCustomKind = nullptr;
Optional<uint8_t> moduleImportDepth;
PopularityFactor popularityFactor;
StringRef name;
StringRef description;
friend class CompletionBuilder;
public:
static constexpr unsigned numSemanticContexts = 8;
static constexpr unsigned maxModuleImportDepth = 10;
/// Wraps \p base with an \c Completion. The \p name and \p description
/// should outlive the result, generally by being stored in the same
/// \c CompletionSink.
Completion(SwiftResult base, StringRef name, StringRef description)
: SwiftResult(base), name(name), description(description) {}
bool hasCustomKind() const { return opaqueCustomKind; }
void *getCustomKind() const { return opaqueCustomKind; }
StringRef getName() const { return name; }
StringRef getDescription() const { return description; }
Optional<uint8_t> getModuleImportDepth() const { return moduleImportDepth; }
/// A popularity factory in the range [-1, 1]. The higher the value, the more
/// 'popular' this result is. 0 indicates unknown.
PopularityFactor getPopularityFactor() const { return popularityFactor; }
};
/// Storage sink for \c Completion objects.
///
/// In addition to allocating the results themselves, uses \c swiftSink to keep
/// the storage for the underlying swift results alive.
struct CompletionSink {
swift::ide::CodeCompletionResultSink swiftSink;
llvm::BumpPtrAllocator allocator;
/// Adds references to a swift sink's allocators to keep its storage alive.
void adoptSwiftSink(swift::ide::CodeCompletionResultSink &sink) {
swiftSink.ForeignAllocators.insert(swiftSink.ForeignAllocators.end(),
sink.ForeignAllocators.begin(),
sink.ForeignAllocators.end());
swiftSink.ForeignAllocators.push_back(sink.Allocator);
}
};
class CompletionBuilder {
CompletionSink &sink;
SwiftResult ¤t;
bool modified = false;
bool isNotRecommended;
Completion::NotRecommendedReason notRecommendedReason;
SemanticContextKind semanticContext;
CodeCompletionString *completionString;
llvm::SmallVector<char, 64> originalName;
void *customKind = nullptr;
Optional<uint8_t> moduleImportDepth;
PopularityFactor popularityFactor;
public:
static void getFilterName(CodeCompletionString *str, raw_ostream &OS);
static void getDescription(SwiftResult *result, raw_ostream &OS,
bool leadingPunctuation);
public:
CompletionBuilder(CompletionSink &sink, SwiftResult &base);
void setCustomKind(void *opaqueCustomKind) { customKind = opaqueCustomKind; }
void setModuleImportDepth(Optional<uint8_t> value) {
assert(!value || *value <= Completion::maxModuleImportDepth);
moduleImportDepth = value;
}
void setNotRecommended(Completion::NotRecommendedReason Reason) {
modified = true;
notRecommendedReason = Reason;
if (Reason != Completion::NoReason)
isNotRecommended = true;
}
void setSemanticContext(SemanticContextKind kind) {
modified = true;
semanticContext = kind;
}
void setPopularityFactor(PopularityFactor val) { popularityFactor = val; }
void setPrefix(CodeCompletionString *prefix);
StringRef getOriginalName() const {
return StringRef(originalName.begin(), originalName.size());
}
Completion *finish();
};
/// Immutable view of code completion results.
///
/// Provides a possibly filtered view of code completion results
/// (\c Completion) organized into groups. Clients walk the tree using
/// CodeCompletionView::Walker. The \c Completion objects are not owned
/// by the view and must outlive it.
class CodeCompletionView {
const Group *rootGroup = nullptr; ///< Owned by the view.
friend class CodeCompletionOrganizer;
friend class LimitedResultView;
CodeCompletionView(const CodeCompletionView &) = delete;
void operator=(const CodeCompletionView &) = delete;
public:
CodeCompletionView() = default;
CodeCompletionView(CodeCompletionView &&) = default;
virtual ~CodeCompletionView();
struct Walker;
virtual bool walk(Walker &walker) const;
};
/// Interface implemented by clients of \c CodeCompletionView.
struct CodeCompletionView::Walker {
virtual ~Walker() {}
virtual bool handleResult(Completion *result) = 0;
virtual void startGroup(StringRef name) = 0;
virtual void endGroup() = 0;
};
using CodeCompletionViewRef = std::shared_ptr<const CodeCompletionView>;
class LimitedResultView : public CodeCompletionView {
const CodeCompletionView &baseView;
mutable unsigned start;
unsigned maxResults;
public:
LimitedResultView(const CodeCompletionView &baseView, unsigned start,
unsigned maxResults)
: baseView(baseView), start(start), maxResults(maxResults) {}
unsigned getNextOffset() const;
bool walk(Walker &walker) const override;
};
struct FilterRules {
bool hideAll = false;
bool hideAllValueLiterals = false;
llvm::SmallDenseMap<CodeCompletionLiteralKind, bool, 8> hideValueLiteral;
bool hideAllKeywords = false;
llvm::DenseMap<CodeCompletionKeywordKind, bool> hideKeyword;
bool hideCustomCompletions = false;
// FIXME: hide individual custom completions
llvm::StringMap<bool> hideModule;
llvm::StringMap<bool> hideByName;
bool hideCompletion(Completion *completion) const;
bool hideCompletion(SwiftResult *completion,
StringRef name,
void *customKind = nullptr) const;
bool hideName(StringRef name) const;
};
} // end namespace CodeCompletion
} // end namespace SourceKit
#endif // LLVM_SOURCEKIT_LIB_SWIFTLANG_CODECOMPLETION_H
|
apache-2.0
|
efetepe/magicdocumentation
|
framework/php/Template.php
|
666
|
<?php
class Template
{
protected $template;
protected $variables = array();
public function __construct($template)
{
$this->template = $template;
}
public function __get($key)
{
return $this->variables[$key];
}
public function __set($key, $value)
{
$this->variables[$key] = $value;
}
public function __toString()
{
$oldir = getcwd();
extract($this->variables);
//Utils::debug($this->template);
chdir(dirname($this->template));
ob_start();
include basename($this->template);
chdir($oldir);
return ob_get_clean();
}
}
|
apache-2.0
|
thbonk/electron-openui5-boilerplate
|
libs/openui5-runtime/resources/sap/uxap/ObjectPageLayout.designtime-dbg.js
|
1307
|
/*!
* UI development toolkit for HTML5 (OpenUI5)
* (c) Copyright 2009-2017 SAP SE or an SAP affiliate company.
* Licensed under the Apache License, Version 2.0 - see LICENSE.txt.
*/
// Provides the Design Time Metadata for the sap.uxap.ObjectPageLayout control
sap.ui.define([],
function() {
"use strict";
return {
name : {
singular : function(){
return sap.uxap.i18nModel.getResourceBundle().getText("LAYOUT_CONTROL_NAME");
},
plural : function(){
return sap.uxap.i18nModel.getResourceBundle().getText("LAYOUT_CONTROL__PLURAL");
}
},
aggregations : {
sections : {
domRef : function(oElement) {
return oElement.$("sectionsContainer").get(0);
},
childNames : {
singular : function(){
return sap.uxap.i18nModel.getResourceBundle().getText("SECTION_CONTROL_NAME");
},
plural : function(){
return sap.uxap.i18nModel.getResourceBundle().getText("SECTION_CONTROL_NAME_PLURAL");
}
},
actions : {
move : "moveControls"
}
}
},
scrollContainers : [{
domRef : "> .sapUxAPObjectPageWrapper",
aggregations : ["sections", "headerContent"]
}, {
domRef : function(oElement) {
return oElement.$("vertSB-sb").get(0);
}
}],
cloneDomRef : ":sap-domref > header"
};
}, /* bExport= */ false);
|
apache-2.0
|
googleapis/gapic-generator-ruby
|
shared/output/cloud/showcase/test/google/showcase/v1beta1/echo_operations_test.rb
|
10911
|
# frozen_string_literal: true
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "minitest/autorun"
require "gapic/grpc/service_stub"
require "google/showcase/v1beta1/echo_pb"
require "google/showcase/v1beta1/echo_services_pb"
require "google/showcase/v1beta1/echo"
class Google::Showcase::V1beta1::Echo::OperationsTest < Minitest::Test
class ClientStub
attr_accessor :call_rpc_count, :requests
def initialize response, operation, &block
@response = response
@operation = operation
@block = block
@call_rpc_count = 0
@requests = []
end
def call_rpc *args
@call_rpc_count += 1
@requests << @block&.call(*args)
yield @response, @operation if block_given?
@response
end
end
def test_list_operations
# Create GRPC objects.
grpc_response = Google::Longrunning::ListOperationsResponse.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
filter = "hello world"
page_size = 42
page_token = "hello world"
list_operations_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :list_operations, name
assert_kind_of Google::Longrunning::ListOperationsRequest, request
assert_equal "hello world", request.name
assert_equal "hello world", request.filter
assert_equal 42, request.page_size
assert_equal "hello world", request.page_token
refute_nil options
end
Gapic::ServiceStub.stub :new, list_operations_client_stub do
# Create client
client = Google::Showcase::V1beta1::Echo::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.list_operations({ name: name, filter: filter, page_size: page_size, page_token: page_token }) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use named arguments
client.list_operations name: name, filter: filter, page_size: page_size, page_token: page_token do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.list_operations Google::Longrunning::ListOperationsRequest.new(name: name, filter: filter, page_size: page_size, page_token: page_token) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.list_operations({ name: name, filter: filter, page_size: page_size, page_token: page_token }, grpc_options) do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.list_operations Google::Longrunning::ListOperationsRequest.new(name: name, filter: filter, page_size: page_size, page_token: page_token), grpc_options do |response, operation|
assert_kind_of Gapic::PagedEnumerable, response
assert_equal grpc_response, response.response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, list_operations_client_stub.call_rpc_count
end
end
def test_get_operation
# Create GRPC objects.
grpc_response = Google::Longrunning::Operation.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
get_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :get_operation, name
assert_kind_of Google::Longrunning::GetOperationRequest, request
assert_equal "hello world", request.name
refute_nil options
end
Gapic::ServiceStub.stub :new, get_operation_client_stub do
# Create client
client = Google::Showcase::V1beta1::Echo::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.get_operation({ name: name }) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use named arguments
client.get_operation name: name do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object
client.get_operation Google::Longrunning::GetOperationRequest.new(name: name) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use hash object with options
client.get_operation({ name: name }, grpc_options) do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.get_operation Google::Longrunning::GetOperationRequest.new(name: name), grpc_options do |response, operation|
assert_kind_of Gapic::Operation, response
assert_equal grpc_response, response.grpc_op
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, get_operation_client_stub.call_rpc_count
end
end
def test_delete_operation
# Create GRPC objects.
grpc_response = Google::Protobuf::Empty.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
delete_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :delete_operation, name
assert_kind_of Google::Longrunning::DeleteOperationRequest, request
assert_equal "hello world", request.name
refute_nil options
end
Gapic::ServiceStub.stub :new, delete_operation_client_stub do
# Create client
client = Google::Showcase::V1beta1::Echo::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.delete_operation({ name: name }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.delete_operation name: name do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.delete_operation Google::Longrunning::DeleteOperationRequest.new(name: name) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.delete_operation({ name: name }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.delete_operation Google::Longrunning::DeleteOperationRequest.new(name: name), grpc_options do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, delete_operation_client_stub.call_rpc_count
end
end
def test_cancel_operation
# Create GRPC objects.
grpc_response = Google::Protobuf::Empty.new
grpc_operation = GRPC::ActiveCall::Operation.new nil
grpc_channel = GRPC::Core::Channel.new "localhost:8888", nil, :this_channel_is_insecure
grpc_options = {}
# Create request parameters for a unary method.
name = "hello world"
cancel_operation_client_stub = ClientStub.new grpc_response, grpc_operation do |name, request, options:|
assert_equal :cancel_operation, name
assert_kind_of Google::Longrunning::CancelOperationRequest, request
assert_equal "hello world", request.name
refute_nil options
end
Gapic::ServiceStub.stub :new, cancel_operation_client_stub do
# Create client
client = Google::Showcase::V1beta1::Echo::Operations.new do |config|
config.credentials = grpc_channel
end
# Use hash object
client.cancel_operation({ name: name }) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use named arguments
client.cancel_operation name: name do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object
client.cancel_operation Google::Longrunning::CancelOperationRequest.new(name: name) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use hash object with options
client.cancel_operation({ name: name }, grpc_options) do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Use protobuf object with options
client.cancel_operation Google::Longrunning::CancelOperationRequest.new(name: name), grpc_options do |response, operation|
assert_equal grpc_response, response
assert_equal grpc_operation, operation
end
# Verify method calls
assert_equal 5, cancel_operation_client_stub.call_rpc_count
end
end
end
|
apache-2.0
|
SeNDA-UAB/aDTN-platform
|
bundleAgent/executor/include/worker.h
|
1490
|
/*
* Copyright (c) 2014 SeNDA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef H_WORKER_INC
#define H_WORKER_INC
#include <pthread.h>
#include <stdint.h>
#include "modules/include/hash.h"
#include "common/include/bundle.h"
typedef struct _worker_params {
int thread_num;
int main_socket;
uint8_t *respawn_child;
pthread_mutex_t **respawn_child_mutex;
pthread_mutex_t *preparing_exec;
} worker_params;
enum exec_state {
RECV_PETITION,
LOAD_CODE,
RM_BUNDLE,
RM_OK,
RM_ERROR,
NOTIFY_CHILD_RESPAWN_AND_RESPAWN,
RESPAWN_CHILD,
EXEC_CODE,
EXEC_ERROR,
EXEC_OK,
SEND_RESULT,
END
};
struct _child_exec_petition {
char bundle_id[NAME_MAX];
code_type_e code_type;
/* code_type == ROUTING */
char prev_hop[MAX_ID_LEN];
char dest[MAX_ID_LEN];
routing_dl_s *routing_dl;
/**/
/* code_type == LIFETIME || code_type == PRIO*/
prio_dl_s *prio_dl;
life_dl_s *life_dl;
/**/
};
void worker_thread(worker_params *params);
int clean_all_bundle_dl(void);
#endif
|
apache-2.0
|
cyborg314/Borg_Daniel_Project4
|
build/iphone/headers/JavaScriptCore/TiExport.h
|
7001
|
/**
* Borg_Daniel_Project4 Borg_Daniel_Project4 License
* This source code and all modifications done by Borg_Daniel_Project4
* are licensed under the Apache Public License (version 2) and
* are Copyright (c) 2009-2014 by Borg_Daniel_Project4, Inc.
*/
/*
* Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#import <JavaScriptCore/TiCore.h>
#if JSC_OBJC_API_ENABLED
/*!
@protocol
@abstract TiExport provides a declarative way to export Objective-C instance methods,
class methods, and properties to JavaScript code.
@discussion When a JavaScript value is created from an instance of an Objective-C class
for which no copying conversion is specified a JavaScript wrapper object will
be created.
In JavaScript, inheritance is supported via a chain of prototype objects, and
for each Objective-C class (and per TiContext) an object appropriate for use
as a prototype will be provided. For the class NSObject the prototype object
will be the JavaScript context's Object Prototype. For all other Objective-C
classes a Prototype object will be created. The Prototype object for a given
Objective-C class will have its internal [Prototype] property set to point to
the Prototype object of the Objective-C class's superclass. As such the
prototype chain for a JavaScript wrapper object will reflect the wrapped
Objective-C type's inheritance hierarchy.
In addition to the Prototype object a JavaScript Constructor object will also
be produced for each Objective-C class. The Constructor object has a property
named 'prototype' that references the Prototype object, and the Prototype
object has a property named 'constructor' that references the Constructor.
The Constructor object is not callable.
By default no methods or properties of the Objective-C class will be exposed
to JavaScript; however methods and properties may explicitly be exported.
For each protocol that a class conforms to, if the protocol incorporates the
protocol TiExport, then the protocol will be interpreted as a list of methods
and properties to be exported to JavaScript.
For each instance method being exported a corresponding JavaScript function
will be assigned as a property of the Prototype object. For each Objective-C
property being exported a JavaScript accessor property will be created on the
Prototype. For each class method exported a JavaScript function will be
created on the Constructor object. For example:
<pre>
@textblock
@protocol MyClassJavaScriptMethods <TiExport>
- (void)foo;
@end
@interface MyClass : NSObject <MyClassJavaScriptMethods>
- (void)foo;
- (void)bar;
@end
@/textblock
</pre>
Data properties that are created on the prototype or constructor objects have
the attributes: <code>writable:true</code>, <code>enumerable:false</code>, <code>configurable:true</code>.
Accessor properties have the attributes: <code>enumerable:false</code> and <code>configurable:true</code>.
If an instance of <code>MyClass</code> is converted to a JavaScript value, the resulting
wrapper object will (via its prototype) export the method <code>foo</code> to JavaScript,
since the class conforms to the <code>MyClassJavaScriptMethods</code> protocol, and this
protocol incorporates <code>TiExport</code>. <code>bar</code> will not be exported.
Properties, arguments, and return values of the following types are
supported:
Primitive numbers: signed values of up to 32-bits are converted in a manner
consistent with valueWithInt32/toInt32, unsigned values of up to 32-bits
are converted in a manner consistent with valueWithUInt32/toUInt32, all
other numeric values are converted consistently with valueWithDouble/
toDouble.
BOOL: values are converted consistently with valueWithBool/toBool.
id: values are converted consistently with valueWithObject/toObject.
Objective-C Class: - where the type is a pointer to a specified Objective-C
class, conversion is consistent with valueWithObjectOfClass/toObject.
struct types: C struct types are supported, where TiValue provides support
for the given type. Support is built in for CGPoint, NSRange, CGRect, and
CGSize.
block types: Blocks can only be passed if they had been converted
successfully by valueWithObject/toObject previously.
For any interface that conforms to TiExport the normal copying conversion for
built in types will be inhibited - so, for example, if an instance that
derives from NSString but conforms to TiExport is passed to valueWithObject:
then a wrapper object for the Objective-C object will be returned rather than
a JavaScript string primitive.
*/
@protocol TiExport
@end
/*!
@define
@abstract Rename a selector when it's exported to JavaScript.
@discussion When a selector that takes one or more arguments is converted to a JavaScript
property name, by default a property name will be generated by performing the
following conversion:
- All colons are removed from the selector
- Any lowercase letter that had followed a colon will be capitalized.
Under the default conversion a selector <code>doFoo:withBar:</code> will be exported as
<code>doFooWithBar</code>. The default conversion may be overriden using the TiExportAs
macro, for example to export a method <code>doFoo:withBar:</code> as <code>doFoo</code>:
<pre>
@textblock
@protocol MyClassJavaScriptMethods <TiExport>
TiExportAs(doFoo,
- (void)doFoo:(id)foo withBar:(id)bar
);
@end
@/textblock
</pre>
Note that the TiExport macro may only be applied to a selector that takes one
or more argument.
*/
#define TiExportAs(PropertyName, Selector) \
@optional Selector __JS_EXPORT_AS__##PropertyName:(id)argument; @required Selector
#endif
|
apache-2.0
|
akiellor/selenium
|
java/server/test/org/openqa/selenium/server/browserlaunchers/SafariLauncherIntegrationTest.java
|
1913
|
package org.openqa.selenium.server.browserlaunchers;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.openqa.jetty.log.LogFactory;
import org.openqa.selenium.browserlaunchers.AsyncExecute;
import org.openqa.selenium.server.RemoteControlConfiguration;
/**
* {@link org.openqa.selenium.server.browserlaunchers.SafariCustomProfileLauncher} integration test class.
*/
public class SafariLauncherIntegrationTest extends TestCase {
private static final Log LOGGER = LogFactory.getLog(SafariLauncherIntegrationTest.class);
private static final int SECONDS = 1000;
private static final int WAIT_TIME = 15 * SECONDS;
public void testLauncherWithDefaultConfiguration() throws Exception {
final SafariCustomProfileLauncher launcher;
launcher = new SafariCustomProfileLauncher(BrowserOptions.newBrowserOptions(), new RemoteControlConfiguration(), "CUST", null);
launcher.launch("http://www.google.com");
int seconds = 15;
LOGGER.info("Killing browser in " + Integer.toString(seconds) + " seconds");
AsyncExecute.sleepTight(WAIT_TIME);
launcher.close();
LOGGER.info("He's dead now, right?");
}
public void testLauncherWithHonorSystemProxyEnabled() throws Exception {
final SafariCustomProfileLauncher launcher;
final RemoteControlConfiguration configuration;
configuration = new RemoteControlConfiguration();
configuration.setHonorSystemProxy(true);
launcher = new SafariCustomProfileLauncher(BrowserOptions.newBrowserOptions(), configuration, "CUST", null);
launcher.launch("http://www.google.com");
int seconds = 15;
LOGGER.info("Killing browser in " + Integer.toString(seconds) + " seconds");
AsyncExecute.sleepTight(WAIT_TIME);
launcher.close();
LOGGER.info("He's dead now, right?");
}
}
|
apache-2.0
|
aemino/discord.js
|
src/structures/Guild.js
|
34442
|
const Long = require('long');
const User = require('./User');
const Role = require('./Role');
const Emoji = require('./Emoji');
const Presence = require('./Presence').Presence;
const GuildMember = require('./GuildMember');
const Constants = require('../util/Constants');
const Collection = require('../util/Collection');
const Util = require('../util/Util');
const Snowflake = require('../util/Snowflake');
/**
* Represents a guild (or a server) on Discord.
* <info>It's recommended to see if a guild is available before performing operations or reading data from it. You can
* check this with `guild.available`.</info>
*/
class Guild {
constructor(client, data) {
/**
* The client that created the instance of the the guild
* @name Guild#client
* @type {Client}
* @readonly
*/
Object.defineProperty(this, 'client', { value: client });
/**
* A collection of members that are in this guild. The key is the member's ID, the value is the member
* @type {Collection<Snowflake, GuildMember>}
*/
this.members = new Collection();
/**
* A collection of channels that are in this guild. The key is the channel's ID, the value is the channel
* @type {Collection<Snowflake, GuildChannel>}
*/
this.channels = new Collection();
/**
* A collection of roles that are in this guild. The key is the role's ID, the value is the role
* @type {Collection<Snowflake, Role>}
*/
this.roles = new Collection();
/**
* A collection of presences in this guild
* @type {Collection<Snowflake, Presence>}
*/
this.presences = new Collection();
if (!data) return;
if (data.unavailable) {
/**
* Whether the guild is available to access. If it is not available, it indicates a server outage
* @type {boolean}
*/
this.available = false;
/**
* The Unique ID of the guild, useful for comparisons
* @type {Snowflake}
*/
this.id = data.id;
} else {
this.available = true;
this.setup(data);
}
}
/**
* Sets up the guild.
* @param {*} data The raw data of the guild
* @private
*/
setup(data) {
/**
* The name of the guild
* @type {string}
*/
this.name = data.name;
/**
* The hash of the guild icon
* @type {?string}
*/
this.icon = data.icon;
/**
* The hash of the guild splash image (VIP only)
* @type {?string}
*/
this.splash = data.splash;
/**
* The region the guild is located in
* @type {string}
*/
this.region = data.region;
/**
* The full amount of members in this guild as of `READY`
* @type {number}
*/
this.memberCount = data.member_count || this.memberCount;
/**
* Whether the guild is "large" (has more than 250 members)
* @type {boolean}
*/
this.large = Boolean('large' in data ? data.large : this.large);
/**
* An array of guild features
* @type {Object[]}
*/
this.features = data.features;
/**
* The ID of the application that created this guild (if applicable)
* @type {?Snowflake}
*/
this.applicationID = data.application_id;
/**
* The time in seconds before a user is counted as "away from keyboard"
* @type {?number}
*/
this.afkTimeout = data.afk_timeout;
/**
* The ID of the voice channel where AFK members are moved
* @type {?string}
*/
this.afkChannelID = data.afk_channel_id;
/**
* Whether embedded images are enabled on this guild
* @type {boolean}
*/
this.embedEnabled = data.embed_enabled;
/**
* The verification level of the guild
* @type {number}
*/
this.verificationLevel = data.verification_level;
/**
* The explicit content filter level of the guild
* @type {number}
*/
this.explicitContentFilter = data.explicit_content_filter;
/**
* The timestamp the client user joined the guild at
* @type {number}
*/
this.joinedTimestamp = data.joined_at ? new Date(data.joined_at).getTime() : this.joinedTimestamp;
this.id = data.id;
this.available = !data.unavailable;
this.features = data.features || this.features || [];
if (data.members) {
this.members.clear();
for (const guildUser of data.members) this._addMember(guildUser, false);
}
if (data.owner_id) {
/**
* The user ID of this guild's owner
* @type {Snowflake}
*/
this.ownerID = data.owner_id;
}
if (data.channels) {
this.channels.clear();
for (const channel of data.channels) this.client.dataManager.newChannel(channel, this);
}
if (data.roles) {
this.roles.clear();
for (const role of data.roles) {
const newRole = new Role(this, role);
this.roles.set(newRole.id, newRole);
}
}
if (data.presences) {
for (const presence of data.presences) {
this._setPresence(presence.user.id, presence);
}
}
this._rawVoiceStates = new Collection();
if (data.voice_states) {
for (const voiceState of data.voice_states) {
this._rawVoiceStates.set(voiceState.user_id, voiceState);
const member = this.members.get(voiceState.user_id);
if (member) {
member.serverMute = voiceState.mute;
member.serverDeaf = voiceState.deaf;
member.selfMute = voiceState.self_mute;
member.selfDeaf = voiceState.self_deaf;
member.voiceSessionID = voiceState.session_id;
member.voiceChannelID = voiceState.channel_id;
this.channels.get(voiceState.channel_id).members.set(member.user.id, member);
}
}
}
if (!this.emojis) {
/**
* A collection of emojis that are in this guild. The key is the emoji's ID, the value is the emoji.
* @type {Collection<Snowflake, Emoji>}
*/
this.emojis = new Collection();
for (const emoji of data.emojis) this.emojis.set(emoji.id, new Emoji(this, emoji));
} else {
this.client.actions.GuildEmojisUpdate.handle({
guild_id: this.id,
emojis: data.emojis,
});
}
}
/**
* The timestamp the guild was created at
* @type {number}
* @readonly
*/
get createdTimestamp() {
return Snowflake.deconstruct(this.id).timestamp;
}
/**
* The time the guild was created
* @type {Date}
* @readonly
*/
get createdAt() {
return new Date(this.createdTimestamp);
}
/**
* The time the client user joined the guild
* @type {Date}
* @readonly
*/
get joinedAt() {
return new Date(this.joinedTimestamp);
}
/**
* The URL to this guild's icon
* @type {?string}
* @readonly
*/
get iconURL() {
if (!this.icon) return null;
return Constants.Endpoints.Guild(this).Icon(this.client.options.http.cdn, this.icon);
}
/**
* The URL to this guild's splash
* @type {?string}
* @readonly
*/
get splashURL() {
if (!this.splash) return null;
return Constants.Endpoints.Guild(this).Splash(this.client.options.http.cdn, this.splash);
}
/**
* The owner of the guild
* @type {GuildMember}
* @readonly
*/
get owner() {
return this.members.get(this.ownerID);
}
/**
* If the client is connected to any voice channel in this guild, this will be the relevant VoiceConnection
* @type {?VoiceConnection}
* @readonly
*/
get voiceConnection() {
if (this.client.browser) return null;
return this.client.voice.connections.get(this.id) || null;
}
/**
* The `#general` TextChannel of the guild
* @type {TextChannel}
* @readonly
*/
get defaultChannel() {
return this.channels.get(this.id);
}
/**
* The position of this guild
* <warn>This is only available when using a user account.</warn>
* @type {?number}
*/
get position() {
if (this.client.user.bot) return null;
if (!this.client.user.settings.guildPositions) return null;
return this.client.user.settings.guildPositions.indexOf(this.id);
}
/**
* The `@everyone` role of the guild
* @type {Role}
* @readonly
*/
get defaultRole() {
return this.roles.get(this.id);
}
/**
* The client user as a GuildMember of this guild
* @type {?GuildMember}
* @readonly
*/
get me() {
return this.members.get(this.client.user.id);
}
/**
* Fetches a collection of roles in the current guild sorted by position
* @type {Collection<Snowflake, Role>}
* @readonly
* @private
*/
get _sortedRoles() {
return this._sortPositionWithID(this.roles);
}
/**
* Returns the GuildMember form of a User object, if the user is present in the guild.
* @param {UserResolvable} user The user that you want to obtain the GuildMember of
* @returns {?GuildMember}
* @example
* // Get the guild member of a user
* const member = guild.member(message.author);
*/
member(user) {
return this.client.resolver.resolveGuildMember(this, user);
}
/**
* Fetch a collection of banned users in this guild.
* @returns {Promise<Collection<Snowflake, User>>}
*/
fetchBans() {
return this.client.rest.methods.getGuildBans(this)
// This entire re-mapping can be removed in the next major release
.then(bans => {
const users = new Collection();
for (const ban of bans.values()) users.set(ban.user.id, ban.user);
return users;
});
}
/**
* Fetch a collection of invites to this guild. Resolves with a collection mapping invites by their codes.
* @returns {Promise<Collection<string, Invite>>}
*/
fetchInvites() {
return this.client.rest.methods.getGuildInvites(this);
}
/**
* Fetch all webhooks for the guild.
* @returns {Collection<Snowflake, Webhook>}
*/
fetchWebhooks() {
return this.client.rest.methods.getGuildWebhooks(this);
}
/**
* Fetch available voice regions.
* @returns {Collection<string, VoiceRegion>}
*/
fetchVoiceRegions() {
return this.client.rest.methods.fetchVoiceRegions(this.id);
}
/**
* Fetch audit logs for this guild.
* @param {Object} [options={}] Options for fetching audit logs
* @param {Snowflake|GuildAuditLogsEntry} [options.before] Limit to entries from before specified entry
* @param {Snowflake|GuildAuditLogsEntry} [options.after] Limit to entries from after specified entry
* @param {number} [options.limit] Limit number of entries
* @param {UserResolvable} [options.user] Only show entries involving this user
* @param {string|number} [options.type] Only show entries involving this action type
* @returns {Promise<GuildAuditLogs>}
*/
fetchAuditLogs(options) {
return this.client.rest.methods.getGuildAuditLogs(this, options);
}
/**
* Adds a user to the guild using OAuth2. Requires the `CREATE_INSTANT_INVITE` permission.
* @param {UserResolvable} user User to add to the guild
* @param {Object} options Options for the addition
* @param {string} options.accessToken An OAuth2 access token for the user with the `guilds.join` scope granted to the
* bot's application
* @param {string} [options.nick] Nickname to give the member (requires `MANAGE_NICKNAMES`)
* @param {Collection<Snowflake, Role>|Role[]|Snowflake[]} [options.roles] Roles to add to the member
* (requires `MANAGE_ROLES`)
* @param {boolean} [options.mute] Whether the member should be muted (requires `MUTE_MEMBERS`)
* @param {boolean} [options.deaf] Whether the member should be deafened (requires `DEAFEN_MEMBERS`)
* @returns {Promise<GuildMember>}
*/
addMember(user, options) {
if (this.members.has(user.id)) return Promise.resolve(this.members.get(user.id));
return this.client.rest.methods.putGuildMember(this, user, options);
}
/**
* Fetch a single guild member from a user.
* @param {UserResolvable} user The user to fetch the member for
* @param {boolean} [cache=true] Insert the user into the users cache
* @returns {Promise<GuildMember>}
*/
fetchMember(user, cache = true) {
user = this.client.resolver.resolveUser(user);
if (!user) return Promise.reject(new Error('User is not cached. Use Client.fetchUser first.'));
if (this.members.has(user.id)) return Promise.resolve(this.members.get(user.id));
return this.client.rest.methods.getGuildMember(this, user, cache);
}
/**
* Fetches all the members in the guild, even if they are offline. If the guild has less than 250 members,
* this should not be necessary.
* @param {string} [query=''] Limit fetch to members with similar usernames
* @param {number} [limit=0] Maximum number of members to request
* @returns {Promise<Guild>}
*/
fetchMembers(query = '', limit = 0) {
return new Promise((resolve, reject) => {
if (this.memberCount === this.members.size) {
// Uncomment in v12
// resolve(this.members)
resolve(this);
return;
}
this.client.ws.send({
op: Constants.OPCodes.REQUEST_GUILD_MEMBERS,
d: {
guild_id: this.id,
query,
limit,
},
});
const handler = (members, guild) => {
if (guild.id !== this.id) return;
if (this.memberCount === this.members.size || members.length < 1000) {
this.client.removeListener(Constants.Events.GUILD_MEMBERS_CHUNK, handler);
// Uncomment in v12
// resolve(this.members)
resolve(this);
}
};
this.client.on(Constants.Events.GUILD_MEMBERS_CHUNK, handler);
this.client.setTimeout(() => reject(new Error('Members didn\'t arrive in time.')), 120 * 1000);
});
}
/**
* Performs a search within the entire guild.
* <warn>This is only available when using a user account.</warn>
* @param {MessageSearchOptions} [options={}] Options to pass to the search
* @returns {Promise<Array<Message[]>>}
* An array containing arrays of messages. Each inner array is a search context cluster.
* The message which has triggered the result will have the `hit` property set to `true`.
* @example
* guild.search({
* content: 'discord.js',
* before: '2016-11-17'
* }).then(res => {
* const hit = res.messages[0].find(m => m.hit).content;
* console.log(`I found: **${hit}**, total results: ${res.totalResults}`);
* }).catch(console.error);
*/
search(options = {}) {
return this.client.rest.methods.search(this, options);
}
/**
* The data for editing a guild.
* @typedef {Object} GuildEditData
* @property {string} [name] The name of the guild
* @property {string} [region] The region of the guild
* @property {number} [verificationLevel] The verification level of the guild
* @property {ChannelResolvable} [afkChannel] The AFK channel of the guild
* @property {number} [afkTimeout] The AFK timeout of the guild
* @property {Base64Resolvable} [icon] The icon of the guild
* @property {GuildMemberResolvable} [owner] The owner of the guild
* @property {Base64Resolvable} [splash] The splash screen of the guild
*/
/**
* Updates the guild with new information - e.g. a new name.
* @param {GuildEditData} data The data to update the guild with
* @returns {Promise<Guild>}
* @example
* // Set the guild name and region
* guild.edit({
* name: 'Discord Guild',
* region: 'london',
* })
* .then(updated => console.log(`New guild name ${updated.name} in region ${updated.region}`))
* .catch(console.error);
*/
edit(data) {
return this.client.rest.methods.updateGuild(this, data);
}
/**
* Edit the name of the guild.
* @param {string} name The new name of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild name
* guild.setName('Discord Guild')
* .then(updated => console.log(`Updated guild name to ${guild.name}`))
* .catch(console.error);
*/
setName(name) {
return this.edit({ name });
}
/**
* Edit the region of the guild.
* @param {string} region The new region of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild region
* guild.setRegion('london')
* .then(updated => console.log(`Updated guild region to ${guild.region}`))
* .catch(console.error);
*/
setRegion(region) {
return this.edit({ region });
}
/**
* Edit the verification level of the guild.
* @param {number} verificationLevel The new verification level of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild verification level
* guild.setVerificationLevel(1)
* .then(updated => console.log(`Updated guild verification level to ${guild.verificationLevel}`))
* .catch(console.error);
*/
setVerificationLevel(verificationLevel) {
return this.edit({ verificationLevel });
}
/**
* Edit the AFK channel of the guild.
* @param {ChannelResolvable} afkChannel The new AFK channel
* @returns {Promise<Guild>}
* @example
* // Edit the guild AFK channel
* guild.setAFKChannel(channel)
* .then(updated => console.log(`Updated guild AFK channel to ${guild.afkChannel}`))
* .catch(console.error);
*/
setAFKChannel(afkChannel) {
return this.edit({ afkChannel });
}
/**
* Edit the AFK timeout of the guild.
* @param {number} afkTimeout The time in seconds that a user must be idle to be considered AFK
* @returns {Promise<Guild>}
* @example
* // Edit the guild AFK channel
* guild.setAFKTimeout(60)
* .then(updated => console.log(`Updated guild AFK timeout to ${guild.afkTimeout}`))
* .catch(console.error);
*/
setAFKTimeout(afkTimeout) {
return this.edit({ afkTimeout });
}
/**
* Set a new guild icon.
* @param {Base64Resolvable} icon The new icon of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild icon
* guild.setIcon(fs.readFileSync('./icon.png'))
* .then(updated => console.log('Updated the guild icon'))
* .catch(console.error);
*/
setIcon(icon) {
return this.edit({ icon });
}
/**
* Sets a new owner of the guild.
* @param {GuildMemberResolvable} owner The new owner of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild owner
* guild.setOwner(guild.members.first())
* .then(updated => console.log(`Updated the guild owner to ${updated.owner.username}`))
* .catch(console.error);
*/
setOwner(owner) {
return this.edit({ owner });
}
/**
* Set a new guild splash screen.
* @param {Base64Resolvable} splash The new splash screen of the guild
* @returns {Promise<Guild>}
* @example
* // Edit the guild splash
* guild.setIcon(fs.readFileSync('./splash.png'))
* .then(updated => console.log('Updated the guild splash'))
* .catch(console.error);
*/
setSplash(splash) {
return this.edit({ splash });
}
/**
* @param {number} position Absolute or relative position
* @param {boolean} [relative=false] Whether to position relatively or absolutely
* @returns {Promise<Guild>}
*/
setPosition(position, relative) {
if (this.client.user.bot) {
return Promise.reject(new Error('Setting guild position is only available for user accounts'));
}
return this.client.user.settings.setGuildPosition(this, position, relative);
}
/**
* Marks all messages in this guild as read.
* <warn>This is only available when using a user account.</warn>
* @returns {Promise<Guild>} This guild
*/
acknowledge() {
return this.client.rest.methods.ackGuild(this);
}
/**
* Allow direct messages from guild members.
* @param {boolean} allow Whether to allow direct messages
* @returns {Promise<Guild>}
*/
allowDMs(allow) {
const settings = this.client.user.settings;
if (allow) return settings.removeRestrictedGuild(this);
else return settings.addRestrictedGuild(this);
}
/**
* Bans a user from the guild.
* @param {UserResolvable} user The user to ban
* @param {Object} [options] Ban options.
* @param {number} [options.days=0] Number of days of messages to delete
* @param {string} [options.reason] Reason for banning
* @returns {Promise<GuildMember|User|string>} Result object will be resolved as specifically as possible.
* If the GuildMember cannot be resolved, the User will instead be attempted to be resolved. If that also cannot
* be resolved, the user ID will be the result.
* @example
* // Ban a user by ID (or with a user/guild member object)
* guild.ban('some user ID')
* .then(user => console.log(`Banned ${user.username || user.id || user} from ${guild.name}`))
* .catch(console.error);
*/
ban(user, options = {}) {
if (typeof options === 'number') {
options = { reason: null, days: options };
} else if (typeof options === 'string') {
options = { reason: options, days: 0 };
}
return this.client.rest.methods.banGuildMember(this, user, options);
}
/**
* Unbans a user from the guild.
* @param {UserResolvable} user The user to unban
* @returns {Promise<User>}
* @example
* // Unban a user by ID (or with a user/guild member object)
* guild.unban('some user ID')
* .then(user => console.log(`Unbanned ${user.username} from ${guild.name}`))
* .catch(console.error);
*/
unban(user) {
return this.client.rest.methods.unbanGuildMember(this, user);
}
/**
* Prunes members from the guild based on how long they have been inactive.
* @param {number} days Number of days of inactivity required to kick
* @param {boolean} [dry=false] If true, will return number of users that will be kicked, without actually doing it
* @returns {Promise<number>} The number of members that were/will be kicked
* @example
* // See how many members will be pruned
* guild.pruneMembers(12, true)
* .then(pruned => console.log(`This will prune ${pruned} people!`))
* .catch(console.error);
* @example
* // Actually prune the members
* guild.pruneMembers(12)
* .then(pruned => console.log(`I just pruned ${pruned} people!`))
* .catch(console.error);
*/
pruneMembers(days, dry = false) {
if (typeof days !== 'number') throw new TypeError('Days must be a number.');
return this.client.rest.methods.pruneGuildMembers(this, days, dry);
}
/**
* Syncs this guild (already done automatically every 30 seconds).
* <warn>This is only available when using a user account.</warn>
*/
sync() {
if (!this.client.user.bot) this.client.syncGuilds([this]);
}
/**
* Creates a new channel in the guild.
* @param {string} name The name of the new channel
* @param {string} type The type of the new channel, either `text` or `voice`
* @param {Array<PermissionOverwrites|Object>} overwrites Permission overwrites to apply to the new channel
* @returns {Promise<TextChannel|VoiceChannel>}
* @example
* // Create a new text channel
* guild.createChannel('new-general', 'text')
* .then(channel => console.log(`Created new channel ${channel}`))
* .catch(console.error);
*/
createChannel(name, type, overwrites) {
return this.client.rest.methods.createChannel(this, name, type, overwrites);
}
/**
* The data needed for updating a channel's position.
* @typedef {Object} ChannelPosition
* @property {ChannelResolvable} channel Channel to update
* @property {number} position New position for the channel
*/
/**
* Batch-updates the guild's channels' positions.
* @param {ChannelPosition[]} channelPositions Channel positions to update
* @returns {Promise<Guild>}
* @example
* guild.updateChannels([{ channel: channelID, position: newChannelIndex }])
* .then(guild => console.log(`Updated channel positions for ${guild.id}`))
* .catch(console.error);
*/
setChannelPositions(channelPositions) {
return this.client.rest.methods.updateChannelPositions(this.id, channelPositions);
}
/**
* Creates a new role in the guild with given information
* @param {RoleData} [data] The data to update the role with
* @returns {Promise<Role>}
* @example
* // Create a new role
* guild.createRole()
* .then(role => console.log(`Created role ${role}`))
* .catch(console.error);
* @example
* // Create a new role with data
* guild.createRole({
* name: 'Super Cool People',
* color: 'BLUE',
* })
* .then(role => console.log(`Created role ${role}`))
* .catch(console.error)
*/
createRole(data = {}) {
return this.client.rest.methods.createGuildRole(this, data);
}
/**
* Creates a new custom emoji in the guild.
* @param {BufferResolvable|Base64Resolvable} attachment The image for the emoji
* @param {string} name The name for the emoji
* @param {Collection<Snowflake, Role>|Role[]} [roles] Roles to limit the emoji to
* @returns {Promise<Emoji>} The created emoji
* @example
* // Create a new emoji from a url
* guild.createEmoji('https://i.imgur.com/w3duR07.png', 'rip')
* .then(emoji => console.log(`Created new emoji with name ${emoji.name}!`))
* .catch(console.error);
* @example
* // Create a new emoji from a file on your computer
* guild.createEmoji('./memes/banana.png', 'banana')
* .then(emoji => console.log(`Created new emoji with name ${emoji.name}!`))
* .catch(console.error);
*/
createEmoji(attachment, name, roles) {
return new Promise(resolve => {
if (typeof attachment === 'string' && attachment.startsWith('data:')) {
resolve(this.client.rest.methods.createEmoji(this, attachment, name, roles));
} else {
this.client.resolver.resolveBuffer(attachment).then(data => {
const dataURI = this.client.resolver.resolveBase64(data);
resolve(this.client.rest.methods.createEmoji(this, dataURI, name, roles));
});
}
});
}
/**
* Delete an emoji.
* @param {Emoji|string} emoji The emoji to delete
* @returns {Promise}
*/
deleteEmoji(emoji) {
if (!(emoji instanceof Emoji)) emoji = this.emojis.get(emoji);
return this.client.rest.methods.deleteEmoji(emoji);
}
/**
* Causes the client to leave the guild.
* @returns {Promise<Guild>}
* @example
* // Leave a guild
* guild.leave()
* .then(g => console.log(`Left the guild ${g}`))
* .catch(console.error);
*/
leave() {
return this.client.rest.methods.leaveGuild(this);
}
/**
* Causes the client to delete the guild.
* @returns {Promise<Guild>}
* @example
* // Delete a guild
* guild.delete()
* .then(g => console.log(`Deleted the guild ${g}`))
* .catch(console.error);
*/
delete() {
return this.client.rest.methods.deleteGuild(this);
}
/**
* Whether this guild equals another guild. It compares all properties, so for most operations
* it is advisable to just compare `guild.id === guild2.id` as it is much faster and is often
* what most users need.
* @param {Guild} guild The guild to compare with
* @returns {boolean}
*/
equals(guild) {
let equal =
guild &&
this.id === guild.id &&
this.available === !guild.unavailable &&
this.splash === guild.splash &&
this.region === guild.region &&
this.name === guild.name &&
this.memberCount === guild.member_count &&
this.large === guild.large &&
this.icon === guild.icon &&
Util.arraysEqual(this.features, guild.features) &&
this.ownerID === guild.owner_id &&
this.verificationLevel === guild.verification_level &&
this.embedEnabled === guild.embed_enabled;
if (equal) {
if (this.embedChannel) {
if (this.embedChannel.id !== guild.embed_channel_id) equal = false;
} else if (guild.embed_channel_id) {
equal = false;
}
}
return equal;
}
/**
* When concatenated with a string, this automatically concatenates the guild's name instead of the guild object.
* @returns {string}
* @example
* // Logs: Hello from My Guild!
* console.log(`Hello from ${guild}!`);
* @example
* // Logs: Hello from My Guild!
* console.log('Hello from ' + guild + '!');
*/
toString() {
return this.name;
}
_addMember(guildUser, emitEvent = true) {
const existing = this.members.has(guildUser.user.id);
if (!(guildUser.user instanceof User)) guildUser.user = this.client.dataManager.newUser(guildUser.user);
guildUser.joined_at = guildUser.joined_at || 0;
const member = new GuildMember(this, guildUser);
this.members.set(member.id, member);
if (this._rawVoiceStates && this._rawVoiceStates.has(member.user.id)) {
const voiceState = this._rawVoiceStates.get(member.user.id);
member.serverMute = voiceState.mute;
member.serverDeaf = voiceState.deaf;
member.selfMute = voiceState.self_mute;
member.selfDeaf = voiceState.self_deaf;
member.voiceSessionID = voiceState.session_id;
member.voiceChannelID = voiceState.channel_id;
if (this.client.channels.has(voiceState.channel_id)) {
this.client.channels.get(voiceState.channel_id).members.set(member.user.id, member);
} else {
this.client.emit('warn', `Member ${member.id} added in guild ${this.id} with an uncached voice channel`);
}
}
/**
* Emitted whenever a user joins a guild.
* @event Client#guildMemberAdd
* @param {GuildMember} member The member that has joined a guild
*/
if (this.client.ws.connection.status === Constants.Status.READY && emitEvent && !existing) {
this.client.emit(Constants.Events.GUILD_MEMBER_ADD, member);
}
return member;
}
_updateMember(member, data) {
const oldMember = Util.cloneObject(member);
if (data.roles) member._roles = data.roles;
if (typeof data.nick !== 'undefined') member.nickname = data.nick;
const notSame = member.nickname !== oldMember.nickname || !Util.arraysEqual(member._roles, oldMember._roles);
if (this.client.ws.connection.status === Constants.Status.READY && notSame) {
/**
* Emitted whenever a guild member changes - i.e. new role, removed role, nickname.
* @event Client#guildMemberUpdate
* @param {GuildMember} oldMember The member before the update
* @param {GuildMember} newMember The member after the update
*/
this.client.emit(Constants.Events.GUILD_MEMBER_UPDATE, oldMember, member);
}
return {
old: oldMember,
mem: member,
};
}
_removeMember(guildMember) {
this.members.delete(guildMember.id);
}
_memberSpeakUpdate(user, speaking) {
const member = this.members.get(user);
if (member && member.speaking !== speaking) {
member.speaking = speaking;
/**
* Emitted once a guild member starts/stops speaking.
* @event Client#guildMemberSpeaking
* @param {GuildMember} member The member that started/stopped speaking
* @param {boolean} speaking Whether or not the member is speaking
*/
this.client.emit(Constants.Events.GUILD_MEMBER_SPEAKING, member, speaking);
}
}
_setPresence(id, presence) {
if (this.presences.get(id)) {
this.presences.get(id).update(presence);
return;
}
this.presences.set(id, new Presence(presence));
}
/**
* Set the position of a role in this guild.
* @param {string|Role} role The role to edit, can be a role object or a role ID
* @param {number} position The new position of the role
* @param {boolean} [relative=false] Position Moves the role relative to its current position
* @returns {Promise<Guild>}
*/
setRolePosition(role, position, relative = false) {
if (typeof role === 'string') {
role = this.roles.get(role);
if (!role) return Promise.reject(new Error('Supplied role is not a role or snowflake.'));
}
position = Number(position);
if (isNaN(position)) return Promise.reject(new Error('Supplied position is not a number.'));
let updatedRoles = this._sortedRoles.array();
Util.moveElementInArray(updatedRoles, role, position, relative);
updatedRoles = updatedRoles.map((r, i) => ({ id: r.id, position: i }));
return this.client.rest.methods.setRolePositions(this.id, updatedRoles);
}
/**
* Set the position of a channel in this guild.
* @param {string|GuildChannel} channel The channel to edit, can be a channel object or a channel ID
* @param {number} position The new position of the channel
* @param {boolean} [relative=false] Position Moves the channel relative to its current position
* @returns {Promise<Guild>}
*/
setChannelPosition(channel, position, relative = false) {
if (typeof channel === 'string') {
channel = this.channels.get(channel);
if (!channel) return Promise.reject(new Error('Supplied channel is not a channel or snowflake.'));
}
position = Number(position);
if (isNaN(position)) return Promise.reject(new Error('Supplied position is not a number.'));
let updatedChannels = this._sortedChannels(channel.type).array();
Util.moveElementInArray(updatedChannels, channel, position, relative);
updatedChannels = updatedChannels.map((r, i) => ({ id: r.id, position: i }));
return this.client.rest.methods.setChannelPositions(this.id, updatedChannels);
}
/**
* Fetches a collection of channels in the current guild sorted by position.
* @param {string} type The channel type
* @returns {Collection<Snowflake, GuildChannel>}
* @private
*/
_sortedChannels(type) {
return this._sortPositionWithID(this.channels.filter(c => {
if (type === 'voice' && c.type === 'voice') return true;
else if (type !== 'voice' && c.type !== 'voice') return true;
else return type === c.type;
}));
}
/**
* Sorts a collection by object position or ID if the positions are equivalent.
* Intended to be identical to Discord's sorting method.
* @param {Collection} collection The collection to sort
* @returns {Collection}
* @private
*/
_sortPositionWithID(collection) {
return collection.sort((a, b) =>
a.position !== b.position ?
a.position - b.position :
Long.fromString(a.id).sub(Long.fromString(b.id)).toNumber()
);
}
}
module.exports = Guild;
|
apache-2.0
|
zimingd/synapsePythonClient
|
tests/integration/test_command_line_client.py
|
36272
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import six
import logging
import filecmp
import os
import re
import sys
import uuid
import json
import time
from nose.plugins.attrib import attr
from nose.tools import assert_raises, assert_equals, assert_less
import tempfile
import shutil
from mock import patch
import synapseclient
import synapseclient.client as client
import synapseclient.utils as utils
import synapseclient.__main__ as cmdline
from synapseclient.evaluation import Evaluation
import integration
from integration import schedule_for_cleanup, QUERY_TIMEOUT_SEC
if six.PY2:
from StringIO import StringIO
else:
from io import StringIO
def setup_module(module):
module.syn = integration.syn
module.project = integration.project
module.parser = cmdline.build_parser()
#used for --description and --descriptionFile tests
module.upload_filename = _create_temp_file_with_cleanup()
module.description_text = "'some description text'"
module.desc_filename = _create_temp_file_with_cleanup(module.description_text)
module.update_description_text = "'SOMEBODY ONCE TOLD ME THE WORLD WAS GONNA ROLL ME I AINT THE SHARPEST TOOL IN THE SHED'"
module.other_user = integration.other_user
def run(*command, **kwargs):
"""
Sends the given command list to the command line client.
:returns: The STDOUT output of the command.
"""
old_stdout = sys.stdout
capturedSTDOUT = StringIO()
syn_client = kwargs.get('syn', syn)
stream_handler = logging.StreamHandler(capturedSTDOUT)
try:
sys.stdout = capturedSTDOUT
syn_client.logger.addHandler(stream_handler)
sys.argv = [item for item in command]
args = parser.parse_args()
args.debug = True
cmdline.perform_main(args, syn_client)
except SystemExit:
pass # Prevent the test from quitting prematurely
finally:
sys.stdout = old_stdout
syn_client.logger.handlers.remove(stream_handler)
capturedSTDOUT = capturedSTDOUT.getvalue()
return capturedSTDOUT
def parse(regex, output):
"""Returns the first match."""
m = re.search(regex, output)
if m:
if len(m.groups()) > 0:
return m.group(1).strip()
else:
raise Exception('ERROR parsing output: "' + str(output) + '"')
def test_command_line_client():
# Create a Project
output = run('synapse',
'--skip-checks',
'create',
'-name',
str(uuid.uuid4()),
'-description',
'test of command line client',
'Project')
project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusFileEntity',
'-description',
'Bogus data to test file upload',
'-parentid',
project_id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
# Get File from the command line
output = run('synapse',
'--skip-checks',
'get',
file_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
assert filecmp.cmp(filename, downloaded_filename)
# Update the File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--id',
file_entity_id,
filename)
updated_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)', output)
# Get the File again
output = run('synapse',
'--skip-checks',
'get',
file_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
assert filecmp.cmp(filename, downloaded_filename)
# Test query
output = ""
start_time = time.time()
while not ('BogusFileEntity' in output and file_entity_id in output):
assert_less(time.time() - start_time, QUERY_TIMEOUT_SEC)
output = run('synapse',
'--skip-checks',
'query',
'select id, name from entity where parentId=="%s"' % project_id)
# Move the file to new folder
folder = syn.store(synapseclient.Folder(parentId=project_id))
output = run('synapse',
'mv',
'--id',
file_entity_id,
'--parentid',
folder.id)
downloaded_filename = parse(r'Moved\s+(.*)', output)
movedFile = syn.get(file_entity_id, downloadFile=False)
assert movedFile.parentId == folder.id
# Test Provenance
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run('synapse',
'--skip-checks',
'set-provenance',
'-id',
file_entity_id,
'-name',
'TestActivity',
'-description',
'A very excellent provenance',
'-used',
file_entity_id,
'-executed',
repo_url)
activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output)
output = run('synapse',
'--skip-checks',
'get-provenance',
'--id',
file_entity_id)
activity = json.loads(output)
assert activity['name'] == 'TestActivity'
assert activity['description'] == 'A very excellent provenance'
used = utils._find_used(activity, lambda used: 'reference' in used)
assert used['reference']['targetId'] == file_entity_id
used = utils._find_used(activity, lambda used: 'url' in used)
assert used['url'] == repo_url
assert used['wasExecuted'] == True
# Note: Tests shouldn't have external dependencies
# but this is a pretty picture of Singapore
singapore_url = 'http://upload.wikimedia.org/wikipedia/commons/' \
'thumb/3/3e/1_singapore_city_skyline_dusk_panorama_2011.jpg' \
'/1280px-1_singapore_city_skyline_dusk_panorama_2011.jpg'
# Test external file handle
output = run('synapse',
'--skip-checks',
'add',
'-name',
'Singapore',
'-description',
'A nice picture of Singapore',
'-parentid',
project_id,
singapore_url)
exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we created an external file handle
f2 = syn.get(exteral_entity_id)
fh = syn._getFileHandle(f2.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'
output = run('synapse',
'--skip-checks',
'get',
exteral_entity_id)
downloaded_filename = parse(r'Downloaded file:\s+(.*)', output)
schedule_for_cleanup(downloaded_filename)
assert os.path.exists(downloaded_filename)
# Delete the Project
output = run('synapse',
'--skip-checks',
'delete',
project_id)
def test_command_line_client_annotations():
# Create a Project
output = run('synapse',
'--skip-checks',
'create',
'-name',
str(uuid.uuid4()),
'-description',
'test of command line client',
'Project')
project_id = parse(r'Created entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusFileEntity',
'-description',
'Bogus data to test file upload',
'-parentid',
project_id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test setting annotations
output = run('synapse',
'--skip-checks',
'set-annotations',
'--id',
file_entity_id,
'--annotations',
'{"foo": 1, "bar": "1", "baz": [1, 2, 3]}',
)
# Test getting annotations
# check that the three things set are correct
# This test should be adjusted to check for equality of the
# whole annotation dictionary once the issue of other
# attributes (creationDate, eTag, id, uri) being returned is resolved
# See: https://sagebionetworks.jira.com/browse/SYNPY-175
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id
)
annotations = json.loads(output)
assert annotations['foo'] == [1]
assert annotations['bar'] == [u"1"]
assert annotations['baz'] == [1, 2, 3]
# Test setting annotations by replacing existing ones.
output = run('synapse',
'--skip-checks',
'set-annotations',
'--id',
file_entity_id,
'--annotations',
'{"foo": 2}',
'--replace'
)
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id
)
annotations = json.loads(output)
assert annotations['foo'] == [2]
# Since this replaces the existing annotations, previous values
# Should not be available.
assert_raises(KeyError, lambda key: annotations[key], 'bar')
assert_raises(KeyError, lambda key: annotations[key], 'baz')
# Test running add command to set annotations on a new object
filename2 = utils.make_bogus_data_file()
schedule_for_cleanup(filename2)
output = run('synapse',
'--skip-checks',
'add',
'-name',
'BogusData2',
'-description',
'Bogus data to test file upload with add and add annotations',
'-parentid',
project_id,
'--annotations',
'{"foo": 123}',
filename2)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id
)
annotations = json.loads(output)
assert annotations['foo'] == [123]
# Test running store command to set annotations on a new object
filename3 = utils.make_bogus_data_file()
schedule_for_cleanup(filename3)
output = run('synapse',
'--skip-checks',
'store',
'--name',
'BogusData3',
'--description',
'\"Bogus data to test file upload with store and add annotations\"',
'--parentid',
project_id,
'--annotations',
'{"foo": 456}',
filename3)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Test that the annotation was updated
output = run('synapse',
'--skip-checks',
'get-annotations',
'--id',
file_entity_id
)
annotations = json.loads(output)
assert annotations['foo'] == [456]
def test_command_line_store_and_submit():
# Create a Project
output = run('synapse',
'--skip-checks',
'store',
'--name',
str(uuid.uuid4()),
'--description',
'test of store command',
'--type',
'Project')
project_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(project_id)
# Create and upload a file
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--description',
'Bogus data to test file upload',
'--parentid',
project_id,
'--file',
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
# Test that entity is named after the file it contains
assert f1.name == os.path.basename(filename)
# Create an Evaluation to submit to
eval = Evaluation(name=str(uuid.uuid4()), contentSource=project_id)
eval = syn.store(eval)
schedule_for_cleanup(eval)
# Submit a bogus file
output = run('synapse',
'--skip-checks',
'submit',
'--evaluation',
eval.id,
'--name',
'Some random name',
'--entity',
file_entity_id)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
#testing different commmand line options for submitting to an evaluation
#. submitting to an evaluation by evaluationID
output = run('synapse',
'--skip-checks',
'submit',
'--evalID',
eval.id,
'--name',
'Some random name',
'--alias',
'My Team',
'--entity',
file_entity_id)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# Update the file
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'store',
'--id',
file_entity_id,
'--file',
filename)
updated_entity_id = parse(r'Updated entity:\s+(syn\d+)', output)
schedule_for_cleanup(updated_entity_id)
# Submit an updated bogus file and this time by evaluation name
output = run('synapse',
'--skip-checks',
'submit',
'--evaluationName',
eval.name,
'--entity',
file_entity_id)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# Tests shouldn't have external dependencies, but here it's required
ducky_url = 'https://www.synapse.org/Portal/clear.cache.gif'
# Test external file handle
output = run('synapse',
'--skip-checks',
'store',
'--name',
'Rubber Ducky',
'--description',
'I like rubber duckies',
'--parentid',
project_id,
'--file',
ducky_url)
exteral_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
schedule_for_cleanup(exteral_entity_id)
# Verify that we created an external file handle
f2 = syn.get(exteral_entity_id)
fh = syn._getFileHandle(f2.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.ExternalFileHandle'
#submit an external file to an evaluation and use provenance
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run('synapse',
'--skip-checks',
'submit',
'--evalID',
eval.id,
'--file',
filename,
'--parent',
project_id,
'--used',
exteral_entity_id,
'--executed',
repo_url
)
submission_id = parse(r'Submitted \(id: (\d+)\) entity:\s+', output)
# Delete project
output = run('synapse',
'--skip-checks',
'delete',
project_id)
def test_command_get_recursive_and_query():
"""Tests the 'synapse get -r' and 'synapse get -q' functions"""
project_entity = project
# Create Folders in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=project_entity))
folder_entity2 = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=folder_entity))
# Create and upload two files in sub-Folder
uploaded_paths = []
file_entities = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = synapseclient.File(f, parent=folder_entity2)
file_entity = syn.store(file_entity)
file_entities.append(file_entity)
schedule_for_cleanup(f)
#Add a file in the Folder as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
schedule_for_cleanup(f)
file_entity = synapseclient.File(f, parent=folder_entity)
file_entity = syn.store(file_entity)
file_entities.append(file_entity)
time.sleep(2) # get -r uses syncFromSynapse() which uses getChildren(), which is not immediately consistent, but faster than chunked queries.
### Test recursive get
output = run('synapse', '--skip-checks',
'get', '-r',
folder_entity.id)
#Verify that we downloaded files:
new_paths = [os.path.join('.', folder_entity2.name, os.path.basename(f)) for f in uploaded_paths[:-1]]
new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1])))
schedule_for_cleanup(folder_entity.name)
for downloaded, uploaded in zip(new_paths, uploaded_paths):
assert os.path.exists(downloaded)
assert filecmp.cmp(downloaded, uploaded)
schedule_for_cleanup(downloaded)
time.sleep(3) # get -q uses chunkedQuery which are eventually consistent
### Test query get
### Note: We're not querying on annotations because tests can fail if there
### are lots of jobs queued as happens when staging is syncing
output = run('synapse', '--skip-checks',
'get', '-q', "select id from file where parentId=='%s'" %
folder_entity2.id)
#Verify that we downloaded files from folder_entity2
new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]]
for downloaded, uploaded in zip(new_paths, uploaded_paths[:-1]):
assert os.path.exists(downloaded)
assert filecmp.cmp(downloaded, uploaded)
schedule_for_cleanup(downloaded)
schedule_for_cleanup(new_paths[0])
### Test query get using a Table with an entity column
### This should be replaced when Table File Views are implemented in the client
cols = []
cols.append(synapseclient.Column(name='id', columnType='ENTITYID'))
schema1 = syn.store(synapseclient.Schema(name='Foo Table', columns=cols, parent=project_entity))
schedule_for_cleanup(schema1.id)
data1 =[[x.id] for x in file_entities]
row_reference_set1 = syn.store(synapseclient.RowSet(schema=schema1,
rows=[synapseclient.Row(r) for r in data1]))
time.sleep(3) # get -q uses chunkedQuery which are eventually consistent
### Test Table/View query get
output = run('synapse', '--skip-checks', 'get', '-q',
"select id from %s" % schema1.id)
#Verify that we downloaded files:
new_paths = [os.path.join('.', os.path.basename(f)) for f in uploaded_paths[:-1]]
new_paths.append(os.path.join('.', os.path.basename(uploaded_paths[-1])))
schedule_for_cleanup(folder_entity.name)
for downloaded, uploaded in zip(new_paths, uploaded_paths):
assert os.path.exists(downloaded)
assert filecmp.cmp(downloaded, uploaded)
schedule_for_cleanup(downloaded)
schedule_for_cleanup(new_paths[0])
def test_command_copy():
"""Tests the 'synapse cp' function"""
# Create a Project
project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()),
parent=project_entity))
schedule_for_cleanup(folder_entity.id)
# Create and upload a file in Folder
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
annots = {'test':['hello_world']}
# Create, upload, and set annotations on a file in Folder
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(synapseclient.File(filename, parent=folder_entity))
externalURL_entity = syn.store(synapseclient.File(repo_url,name='rand',parent=folder_entity,synapseStore=False))
syn.setAnnotations(file_entity,annots)
syn.setAnnotations(externalURL_entity,annots)
schedule_for_cleanup(file_entity.id)
schedule_for_cleanup(externalURL_entity.id)
### Test cp function
output = run('synapse', '--skip-checks',
'cp',file_entity.id,
'--destinationId',project_entity.id)
output_URL = run('synapse', '--skip-checks',
'cp',externalURL_entity.id,
'--destinationId',project_entity.id)
copied_id = parse(r'Copied syn\d+ to (syn\d+)',output)
copied_URL_id = parse(r'Copied syn\d+ to (syn\d+)',output_URL)
#Verify that our copied files are identical
copied_ent = syn.get(copied_id)
copied_URL_ent = syn.get(copied_URL_id,downloadFile=False)
schedule_for_cleanup(copied_id)
schedule_for_cleanup(copied_URL_id)
copied_ent_annot = syn.getAnnotations(copied_id)
copied_url_annot = syn.getAnnotations(copied_URL_id)
copied_prov = syn.getProvenance(copied_id)['used'][0]['reference']['targetId']
copied_url_prov = syn.getProvenance(copied_URL_id)['used'][0]['reference']['targetId']
#Make sure copied files are the same
assert copied_prov == file_entity.id
assert copied_ent_annot == annots
assert copied_ent.properties.dataFileHandleId == file_entity.properties.dataFileHandleId
#Make sure copied URLs are the same
assert copied_url_prov == externalURL_entity.id
assert copied_url_annot == annots
assert copied_URL_ent.externalURL == repo_url
assert copied_URL_ent.name == 'rand'
assert copied_URL_ent.properties.dataFileHandleId == externalURL_entity.properties.dataFileHandleId
#Verify that errors are being thrown when a
#file is copied to a folder/project that has a file with the same filename
assert_raises(ValueError,run, 'synapse', '--debug', '--skip-checks',
'cp',file_entity.id,
'--destinationId',project_entity.id)
def test_command_line_using_paths():
# Create a Project
project_entity = syn.store(synapseclient.Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = syn.store(synapseclient.Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload a file in Folder
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(synapseclient.File(filename, parent=folder_entity))
# Verify that we can use show with a filename
output = run('synapse', '--skip-checks', 'show', filename)
id = parse(r'File: %s\s+\((syn\d+)\)\s+' %os.path.split(filename)[1], output)
assert file_entity.id == id
# Verify that limitSearch works by making sure we get the file entity
# that's inside the folder
file_entity2 = syn.store(synapseclient.File(filename, parent=project_entity))
output = run('synapse', '--skip-checks', 'get',
'--limitSearch', folder_entity.id,
filename)
id = parse(r'Associated file: .* with synapse ID (syn\d+)', output)
name = parse(r'Associated file: (.*) with synapse ID syn\d+', output)
assert_equals(file_entity.id, id)
assert utils.equal_paths(name, filename)
#Verify that set-provenance works with filepath
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
output = run('synapse', '--skip-checks', 'set-provenance',
'-id', file_entity2.id,
'-name', 'TestActivity',
'-description', 'A very excellent provenance',
'-used', filename,
'-executed', repo_url,
'-limitSearch', folder_entity.id)
activity_id = parse(r'Set provenance record (\d+) on entity syn\d+', output)
output = run('synapse', '--skip-checks', 'get-provenance',
'-id', file_entity2.id)
activity = json.loads(output)
assert activity['name'] == 'TestActivity'
assert activity['description'] == 'A very excellent provenance'
#Verify that store works with provenance specified with filepath
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
filename2 = utils.make_bogus_data_file()
schedule_for_cleanup(filename2)
output = run('synapse', '--skip-checks', 'add', filename2,
'-parentid', project_entity.id,
'-used', filename,
'-executed', '%s %s' %(repo_url, filename))
entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
output = run('synapse', '--skip-checks', 'get-provenance',
'-id', entity_id)
activity = json.loads(output)
a = [a for a in activity['used'] if a['wasExecuted']==False]
assert a[0]['reference']['targetId'] in [file_entity.id, file_entity2.id]
#Test associate command
#I have two files in Synapse filename and filename2
path = tempfile.mkdtemp()
schedule_for_cleanup(path)
shutil.copy(filename, path)
shutil.copy(filename2, path)
output = run('synapse', '--skip-checks', 'associate', path, '-r')
output = run('synapse', '--skip-checks', 'show', filename)
def test_table_query():
"""Test command line ability to do table query.
"""
cols = []
cols.append(synapseclient.Column(name='name', columnType='STRING', maximumSize=1000))
cols.append(synapseclient.Column(name='foo', columnType='STRING', enumValues=['foo', 'bar', 'bat']))
cols.append(synapseclient.Column(name='x', columnType='DOUBLE'))
cols.append(synapseclient.Column(name='age', columnType='INTEGER'))
cols.append(synapseclient.Column(name='cartoon', columnType='BOOLEAN'))
project_entity = project
schema1 = syn.store(synapseclient.Schema(name=str(uuid.uuid4()), columns=cols, parent=project_entity))
schedule_for_cleanup(schema1.id)
data1 =[['Chris', 'bar', 11.23, 45, False],
['Jen', 'bat', 14.56, 40, False],
['Jane', 'bat', 17.89, 6, False],
['Henry', 'bar', 10.12, 1, False]]
row_reference_set1 = syn.store(synapseclient.RowSet(schema=schema1,
rows=[synapseclient.Row(r) for r in data1]))
# Test query
output = run('synapse', '--skip-checks', 'query',
'select * from %s' % schema1.id)
output_rows = output.rstrip("\n").split("\n")
# Check the length of the output
assert len(output_rows) == 5, "got %s rows" % (len(output_rows),)
# Check that headers are correct.
# Should be column names in schema plus the ROW_ID and ROW_VERSION
my_headers_set = output_rows[0].split("\t")
expected_headers_set = ["ROW_ID", "ROW_VERSION"] + list(map(lambda x: x.name, cols))
assert my_headers_set == expected_headers_set, "%r != %r" % (my_headers_set, expected_headers_set)
def test_login():
if not other_user['username']:
raise SkipTest("Skipping test for login command: No [test-authentication] in %s" % client.CONFIG_FILE)
alt_syn = synapseclient.Synapse()
with patch.object(alt_syn, "login") as mock_login, patch.object(alt_syn, "getUserProfile", return_value={"userName":"test_user","ownerId":"ownerId"}) as mock_get_user_profile:
output = run('synapse', '--skip-checks', 'login',
'-u', other_user['username'],
'-p', other_user['password'],
'--rememberMe',
syn=alt_syn)
mock_login.assert_called_once_with(other_user['username'], other_user['password'], forced=True, rememberMe=True, silent=False)
mock_get_user_profile.assert_called_once_with()
def test_configPath():
"""Test using a user-specified configPath for Synapse configuration file.
"""
tmp_config_file = tempfile.NamedTemporaryFile(suffix='.synapseConfig', delete=False)
shutil.copyfile(synapseclient.client.CONFIG_FILE, tmp_config_file.name)
# Create a File
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
output = run('synapse',
'--skip-checks',
'--configPath',
tmp_config_file.name,
'add',
'-name',
'BogusFileEntityTwo',
'-description',
'Bogus data to test file upload',
'-parentid',
project.id,
filename)
file_entity_id = parse(r'Created/Updated entity:\s+(syn\d+)\s+', output)
# Verify that we stored the file in Synapse
f1 = syn.get(file_entity_id)
fh = syn._getFileHandle(f1.dataFileHandleId)
assert fh['concreteType'] == 'org.sagebionetworks.repo.model.file.S3FileHandle'
def _description_wiki_check(run_output, expected_description):
entity_id = parse(r'Created.* entity:\s+(syn\d+)\s+', run_output)
wiki = syn.getWiki(entity_id)
assert_equals(expected_description, wiki.markdown)
def _create_temp_file_with_cleanup(specific_file_text = None):
if specific_file_text:
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as file:
file.write(specific_file_text)
filename = file.name
else:
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
return filename
def test_create__with_description():
output = run('synapse',
'create',
'Folder',
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_store__with_description():
output = run('synapse',
'store',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_add__with_description():
output = run('synapse',
'add',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--description',
description_text
)
_description_wiki_check(output, description_text)
def test_create__with_descriptionFile():
output = run('synapse',
'create',
'Folder',
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_store__with_descriptionFile():
output = run('synapse',
'store',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_add__with_descriptionFile():
output = run('synapse',
'add',
upload_filename,
'-name',
str(uuid.uuid4()),
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
def test_create__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'create',
'Folder',
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'create',
'Folder',
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
def test_store__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'store',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'store',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
def test_add__update_description():
name = str(uuid.uuid4())
output = run('synapse',
'add',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--descriptionFile',
desc_filename
)
_description_wiki_check(output, description_text)
output = run('synapse',
'add',
upload_filename,
'-name',
name,
'-parentid',
project.id,
'--description',
update_description_text
)
_description_wiki_check(output, update_description_text)
|
apache-2.0
|
g8os/home
|
Previous Releases.md
|
2665
|
# Previous releases
- October 11, 2017: **[1.1.0-alpha-8](release_notes/1.1.0-alpha-8.md)**:
[0-Initramfs Builder](https://github.com/zero-os/0-initramfs/tree/1.1.0-alpha-8), [0-Core](https://github.com/zero-os/0-core/tree/1.1.0-alpha-8), [0-FS](https://github.com/zero-os/0-fs/tree/1.1.0-alpha-8), [Zero-OS Orchestrator](https://github.com/zero-os/0-orchestrator/tree/1.1.0-alpha-8), [Zero-os 0-Disk](https://github.com/zero-os/0-disk/tree/1.1.0-alpha-8)
- August 09, 2017: This release is a combinaison of the milestone **[1.1.0-alpha-6 and 1.1.0-alpha-7](release_notes/1.1.0-alpha-7.md)**:
[0-Initramfs Builder](https://github.com/zero-os/0-initramfs/tree/1.1.0-alpha-7), [0-Core](https://github.com/zero-os/0-core/tree/1.1.0-alpha-7), [0-FS](https://github.com/zero-os/0-fs/tree/1.1.0-alpha-7), [Zero-OS Orchestrator](https://github.com/zero-os/0-orchestrator/tree/1.1.0-alpha-7)
- July 7, 2017: **[v1.1.0-alpha-5](release_notes/1.1.0-alpha-5.md)**:
[0-Initramfs Builder](https://github.com/zero-os/0-initramfs/tree/1.1.0-alpha-5), [0-Core](https://github.com/zero-os/0-core/tree/1.1.0-alpha-5), [0-FS](https://github.com/zero-os/0-fs/tree/1.1.0-alpha-5), [Zero-OS Orchestrator](https://github.com/zero-os/0-orchestrator/tree/1.1.0-alpha-5)
- June 16, 2017: **[v1.1.0-alpha-4](release_notes/1.1.0-alpha-4.md)**: [0-Initramfs Builder](https://github.com/zero-os/0-initramfs/tree/1.1.0-alpha-4), [0-Core](https://github.com/zero-os/0-core/tree/1.1.0-alpha-4), [0-FS](https://github.com/zero-os/0-fs/tree/1.1.0-alpha-4), [Zero-OS Orchestrator](https://github.com/zero-os/0-orchestrator/tree/1.1.0-alpha-4)
- Introduces: Redundant vDisks, vDisk rollback, Security
- June 6, 2017: **[v1.1.0-alpha-3](release_notes/1.1.0-alpha-3.md)** : [0-Initramfs Builder](https://github.com/zero-os/0-initramfs/releases/tag/v1.1.0-alpha-3), [0-Core](https://github.com/zero-os/0-core/releases/tag/v1.1.0-alpha-3), [0-FS](https://github.com/zero-os/0-fs/releases/tag/v1.1.0-alpha-3), [Zero-OS Orchestrator](https://github.com/zero-os/0-orchestrator/releases/tag/v1.1.0-alpha-3)
- Introduces: Basic integration with OpenvCloud, Zero-OS Gateway, JumpScale 9
- May 12, 2017: **v1.1.0-alpha-2**: [0-Initramfs Builder](https://github.com/zero-os/0-initramfs/releases/tag/v1.1.0-alpha-2), [0-Core](https://github.com/zero-os/0-core/releases/tag/v1.1.0-alpha-2), [0-FS](https://github.com/zero-os/0-fs/releases/tag/v1.1.0-alpha-2), [Zero-OS Rest API](https://github.com/zero-os/0-rest-api/releases/tag/v1.1.0-alpha-2)
- Introduces: Hub, Rest API, 0-disk
- Dec 7, 2016: [v0.9.0](https://github.com/zero-os/0-core/releases/tag/v0.9.0)
- First usable beta version of the Zero-OS
|
apache-2.0
|
lisencn11/lisencn11.github.io
|
_posts/2016-08-20-Leetcode-256-summary.md
|
1654
|
---
layout: post
title: Leetcode Problem 256 Summary
date: 2016-08-20
categories: blog
tags: [study]
---
# 题目
There are a row of n houses, each house can be painted with one of the three colors: red, blue or green. The cost of painting each house with a certain color is different. You have to paint all the houses such that no two adjacent houses have the same color.
The cost of painting each house with a certain color is represented by a n x 3 cost matrix. For example, costs[0][0] is the cost of painting house 0 with color red; costs[1][2] is the cost of painting house 1 with color green, and so on... Find the minimum cost to paint all houses.
Note:
All costs are positive integers.
# 我的算法
动态规划问题,用 minCosts[i][j] 表示第 i 栋房子用第 j 个颜色的情况下,从 0 到 i 的最小值。
# 代码
{% highlight java %}
public class Solution {
public int minCost(int[][] costs) {
if (costs == null || costs.length == 0) return 0;
int len = costs.length;
int[][] minCosts = new int[len][costs[0].length];
for (int i = 0; i < costs[0].length; i++) minCosts[0][i] = costs[0][i];
for (int i = 1; i < len; i++) {
minCosts[i][0] = Math.min(minCosts[i - 1][1], minCosts[i - 1][2]) + costs[i][0];
minCosts[i][1] = Math.min(minCosts[i - 1][0], minCosts[i - 1][2]) + costs[i][1];
minCosts[i][2] = Math.min(minCosts[i - 1][0], minCosts[i - 1][1]) + costs[i][2];
}
return Math.min(Math.min(minCosts[len - 1][0], minCosts[len - 1][1]), minCosts[len - 1][2]);
}
}
{% endhighlight %}
|
apache-2.0
|
keith-turner/accumulo
|
core/src/main/java/org/apache/accumulo/core/metadata/TabletFileUtil.java
|
1705
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.core.metadata;
import org.apache.hadoop.fs.Path;
/**
* Utility class for validation of metadata tablet files.
*/
public class TabletFileUtil {
/**
* Validate if string is a valid path. Return normalized string or throw exception if not valid.
* This was added to facilitate more use of TabletFile over String but this puts the validation in
* one location in the case where TabletFile can't be used.
*/
public static String validate(String path) {
Path p = new Path(path);
if (p.toUri().getScheme() == null) {
throw new IllegalArgumentException("Invalid path provided, no scheme in " + path);
}
return p.toString();
}
public static Path validate(Path path) {
if (path.toUri().getScheme() == null) {
throw new IllegalArgumentException("Invalid path provided, no scheme in " + path);
}
return path;
}
}
|
apache-2.0
|
istio/istio.io
|
content/en/docs/examples/microservices-istio/setup-kubernetes-cluster/index.md
|
9160
|
---
title: Setup a Kubernetes Cluster
overview: Set up your Kubernetes cluster for the tutorial.
weight: 2
owner: istio/wg-docs-maintainers
test: no
---
{{< boilerplate work-in-progress >}}
In this module, you set up a Kubernetes cluster that has Istio installed and a
namespace to use throughout the tutorial.
{{< warning >}}
If you are in a workshop and the instructors provide a cluster for you,
proceed to [setting up your local computer](/docs/examples/microservices-istio/setup-local-computer).
{{</ warning >}}
1. Ensure you have access to a
[Kubernetes cluster](https://kubernetes.io/docs/tutorials/kubernetes-basics/).
You can use the
[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/docs/quickstart)
or the
[IBM Cloud Kubernetes Service](https://cloud.ibm.com/docs/containers?topic=containers-getting-started).
1. Create an environment variable to store the name
of a namespace that you will use when you run the tutorial commands.
You can use any name, for example `tutorial`.
{{< text bash >}}
$ export NAMESPACE=tutorial
{{< /text >}}
1. Create the namespace:
{{< text bash >}}
$ kubectl create namespace $NAMESPACE
{{< /text >}}
{{< tip >}}
If you are an instructor, you should allocate a separate namespace per each
participant. The tutorial supports work in multiple namespaces
simultaneously by multiple participants.
{{< /tip >}}
1. [Install Istio](/docs/setup/getting-started/) using the `demo` profile.
1. The [Kiali](/docs/ops/integrations/kiali/) and [Prometheus](/docs/ops/integrations/prometheus/) addons are used in this example and need to be installed. All addons are installed using:
{{< text bash >}}
$ kubectl apply -f @samples/addons@
{{< /text >}}
{{< tip >}}
If there are errors trying to install the addons, try running the command again. There may
be some timing issues which will be resolved when the command is run again.
{{< /tip >}}
1. Create a Kubernetes Ingress resource for these common Istio services using
the `kubectl` command shown. It is not necessary to be familiar with each of
these services at this point in the tutorial.
- [Grafana](https://grafana.com/docs/guides/getting_started/)
- [Jaeger](https://www.jaegertracing.io/docs/1.13/getting-started/)
- [Prometheus](https://prometheus.io/docs/prometheus/latest/getting_started/)
- [Kiali](https://www.kiali.io/documentation/getting-started/)
The `kubectl` command can accept an in-line configuration to create the
Ingress resources for each service:
{{< text bash >}}
$ kubectl apply -f - <<EOF
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: istio-system
namespace: istio-system
annotations:
kubernetes.io/ingress.class: istio
spec:
rules:
- host: my-istio-dashboard.io
http:
paths:
- path: /
pathType: Prefix
backend:
serviceName: grafana
servicePort: 3000
- host: my-istio-tracing.io
http:
paths:
- path: /
pathType: Prefix
backend:
serviceName: tracing
servicePort: 9411
- host: my-istio-logs-database.io
http:
paths:
- path: /
pathType: Prefix
backend:
serviceName: prometheus
servicePort: 9090
- host: my-kiali.io
http:
paths:
- path: /
pathType: Prefix
backend:
serviceName: kiali
servicePort: 20001
EOF
{{< /text >}}
1. Create a role to provide read access to the `istio-system` namespace. This
role is required to limit permissions of the participants in the steps
below.
{{< text bash >}}
$ kubectl apply -f - <<EOF
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: istio-system-access
namespace: istio-system
rules:
- apiGroups: ["", "extensions", "apps"]
resources: ["*"]
verbs: ["get", "list"]
EOF
{{< /text >}}
1. Create a service account for each participant:
{{< text bash >}}
$ kubectl apply -f - <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: ${NAMESPACE}-user
namespace: $NAMESPACE
EOF
{{< /text >}}
1. Limit each participant's permissions. During the tutorial, participants only
need to create resources in their namespace and to read resources from
`istio-system` namespace. It is a good practice, even if using your own
cluster, to avoid interfering with other namespaces in
your cluster.
Create a role to allow read-write access to each participant's namespace.
Bind the participant's service account to this role and to the role for
reading resources from `istio-system`:
{{< text bash >}}
$ kubectl apply -f - <<EOF
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: ${NAMESPACE}-access
namespace: $NAMESPACE
rules:
- apiGroups: ["", "extensions", "apps", "networking.k8s.io", "networking.istio.io", "authentication.istio.io",
"rbac.istio.io", "config.istio.io", "security.istio.io"]
resources: ["*"]
verbs: ["*"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: ${NAMESPACE}-access
namespace: $NAMESPACE
subjects:
- kind: ServiceAccount
name: ${NAMESPACE}-user
namespace: $NAMESPACE
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ${NAMESPACE}-access
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: ${NAMESPACE}-istio-system-access
namespace: istio-system
subjects:
- kind: ServiceAccount
name: ${NAMESPACE}-user
namespace: $NAMESPACE
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: istio-system-access
EOF
{{< /text >}}
1. Each participant needs to use their own Kubernetes configuration file. This configuration file specifies
the cluster details, the service account, the credentials and the namespace of the participant.
The `kubectl` command uses the configuration file to operate on the cluster.
Generate a Kubernetes configuration file for each participant:
{{< tip >}}
This command assumes your cluster is named `tutorial-cluster`. If your cluster is named differently, replace all references with the name of your cluster.
{{</ tip >}}
{{< text bash >}}
$ cat <<EOF > ./${NAMESPACE}-user-config.yaml
apiVersion: v1
kind: Config
preferences: {}
clusters:
- cluster:
certificate-authority-data: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath='{.data.ca\.crt}')
server: $(kubectl config view -o jsonpath="{.clusters[?(.name==\"$(kubectl config view -o jsonpath="{.contexts[?(.name==\"$(kubectl config current-context)\")].context.cluster}")\")].cluster.server}")
name: ${NAMESPACE}-cluster
users:
- name: ${NAMESPACE}-user
user:
as-user-extra: {}
client-key-data: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath='{.data.ca\.crt}')
token: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath={.data.token} | base64 --decode)
contexts:
- context:
cluster: ${NAMESPACE}-cluster
namespace: ${NAMESPACE}
user: ${NAMESPACE}-user
name: ${NAMESPACE}
current-context: ${NAMESPACE}
EOF
{{< /text >}}
1. Set the `KUBECONFIG` environment variable for the `${NAMESPACE}-user-config.yaml`
configuration file:
{{< text bash >}}
$ export KUBECONFIG=$PWD/${NAMESPACE}-user-config.yaml
{{< /text >}}
1. Verify that the configuration took effect by printing the current namespace:
{{< text bash >}}
$ kubectl config view -o jsonpath="{.contexts[?(@.name==\"$(kubectl config current-context)\")].context.namespace}"
tutorial
{{< /text >}}
You should see the name of your namespace in the output.
1. If you are setting up the cluster for yourself, copy the
`${NAMESPACE}-user-config.yaml` file mentioned in the previous steps to your
local computer, where `${NAMESPACE}` is the name of the namespace you
provided in the previous steps. For example, `tutorial-user-config.yaml`.
You will need this file later in the tutorial.
If you are an instructor, send the generated configuration files to each
participant. The participants must copy their configuration file to their local computer.
Congratulations, you configured your cluster for the tutorial!
You are ready to [setup a local computer](/docs/examples/microservices-istio/setup-local-computer).
|
apache-2.0
|
scarrupt/Capstone-Project
|
app/src/main/java/com/codefactoring/android/backlogtracker/sync/fetchers/UserDataFetcher.java
|
3378
|
package com.codefactoring.android.backlogtracker.sync.fetchers;
import android.util.Log;
import com.codefactoring.android.backlogapi.BacklogApiClient;
import com.codefactoring.android.backlogapi.models.User;
import com.codefactoring.android.backlogtracker.sync.models.BacklogImage;
import com.codefactoring.android.backlogtracker.sync.models.UserDto;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import okhttp3.ResponseBody;
import rx.Observable;
import rx.functions.Func1;
public class UserDataFetcher {
public final String LOG_TAG = UserDataFetcher.class.getSimpleName();
private final BacklogApiClient mBacklogApiClient;
public UserDataFetcher(BacklogApiClient backlogApiClient) {
mBacklogApiClient = backlogApiClient;
}
public List<UserDto> getUserList() {
return mBacklogApiClient.getUserOperations().getUserList()
.onErrorReturn(new Func1<Throwable, List<User>>() {
@Override
public List<User> call(Throwable throwable) {
Log.e(LOG_TAG, "Error on getUserList", throwable);
return new ArrayList<>();
}
})
.flatMapIterable(new Func1<List<User>, Iterable<User>>() {
@Override
public Iterable<User> call(List<User> users) {
return users;
}
})
.flatMap(new Func1<User, Observable<UserDto>>() {
@Override
public Observable<UserDto> call(User user) {
final UserDto userDto = new UserDto();
userDto.setId(user.getId());
userDto.setUserId(user.getUserId());
userDto.setName(user.getName());
userDto.setImage(getBacklogImage(user.getId()));
return Observable.just(userDto);
}
})
.toList()
.toBlocking()
.first();
}
private BacklogImage getBacklogImage(final long id) {
return mBacklogApiClient.getUserOperations().getUserIcon(id)
.flatMap(new Func1<ResponseBody, Observable<BacklogImage>>() {
@Override
public Observable<BacklogImage> call(ResponseBody response) {
final String subtype = response.contentType().subtype();
final byte[] bytes;
try {
bytes = response.bytes();
} catch (IOException ex) {
Log.e(LOG_TAG, "Error on reading image", ex);
return null;
}
return Observable.just(new BacklogImage(id + "." + subtype, bytes));
}
})
.onErrorReturn(new Func1<Throwable, BacklogImage>() {
@Override
public BacklogImage call(Throwable throwable) {
Log.e(LOG_TAG, "Error on get Project Icon", throwable);
return null;
}
})
.toBlocking()
.first();
}
}
|
apache-2.0
|
zeit/now-cli
|
examples/hugo/themes/ananke/exampleSite/content/about/_index.md
|
862
|
---
title: 'About'
description: 'A few years ago, while visiting or, rather, rummaging about Notre-Dame, the author of this book found, in an obscure nook of one of the towers, the following word, engraved by hand upon the wall: —ANANKE.'
featured_image: ''
---
{{< figure src="/images/Victor_Hugo-Hunchback.jpg" title="Illustration from Victor Hugo et son temps (1881)" >}}
_The Hunchback of Notre-Dame_ (French: _Notre-Dame de Paris_) is a French Romantic/Gothic novel by Victor Hugo, published in 1831. The original French title refers to Notre Dame Cathedral, on which the story is centered. English translator Frederic Shoberl named the novel The Hunchback of Notre Dame in 1833 because at the time, Gothic novels were more popular than Romance novels in England. The story is set in Paris, France in the Late Middle Ages, during the reign of Louis XI.
|
apache-2.0
|
thobbs/cassandra-dtest
|
materialized_views_test.py
|
61309
|
import collections
import re
import sys
import time
import traceback
from functools import partial
from multiprocessing import Process, Queue
from unittest import skipIf
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
# TODO add in requirements.txt
from enum import Enum # Remove when switching to py3
from assertions import (assert_all, assert_crc_check_chance_equal,
assert_invalid, assert_none, assert_one,
assert_unavailable)
from dtest import Tester, debug
from nose.plugins.attrib import attr
from tools import known_failure, new_node, require, since
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def prepare(self, user_table=False, rf=1, options=None, nodes=3):
cluster = self.cluster
cluster.populate([nodes, 0])
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
def _replay_batchlogs(self):
debug("Replaying batchlog on all nodes")
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
def create_test(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting 1 materialized view, got" + str(result))
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def insert_test(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
def populate_mv_after_insert_test(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
def crc_check_chance_test(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def prepared_statement_test(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
result = list(session.execute(selectPrepared.bind(['TX'])))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute(selectPrepared.bind(['CA'])))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
result = list(session.execute(selectPrepared.bind(['MA'])))
self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
def immutable_test(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def drop_mv_test(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 2, "Expecting {} materialized view, got {}".format(2, len(result)))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
def drop_column_test(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state, depended on by materialized views"
)
def drop_table_test(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 1,
"Expecting {} materialized view, got {}".format(1, len(result))
)
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 1,
"Expecting {} materialized view, got {}".format(1, len(result))
)
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 0,
"Expecting {} materialized view, got {}".format(1, len(result))
)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12225',
flaky=True)
def clustering_column_test(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare()
session.default_consistency_level = ConsistencyLevel.QUORUM
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
session.cluster.control_connection.wait_for_schema_agreement()
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
def _add_dc_after_mv_test(self, rf):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Writing 1k to base")
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
debug("Reading 1k from view")
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
debug("Reading 1k from base")
for i in xrange(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
debug("Verifying data from new node in view")
for i in xrange(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
debug("Inserting 100 into base")
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
debug("Verify 100 in view")
for i in xrange(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140',
flaky=True)
def add_dc_after_mv_simple_replication_test(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140',
flaky=True)
def add_dc_after_mv_network_replication_test(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1, 'dc2': 1})
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140',
flaky=True)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12446',
flaky=True)
def add_node_after_mv_test(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in xrange(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12140',
flaky=True)
def add_write_survey_node_after_mv_test(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def allow_filtering_test(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
self.assertEqual(len(rows), 1000, "Expected 1000 rows but got {}".format(len(rows)))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def secondary_index_test(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def ttl_test(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in xrange(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in xrange(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
self.assertEqual(len(rows), 0, "Expected 0 rows but got {}".format(len(rows)))
def query_all_new_column_test(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
self.assertEqual(len(results), 1)
self.assertTrue(hasattr(results[0], 'first_name'), 'Column "first_name" not found')
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def query_new_column_test(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
self.assertEqual(len(results), 1)
self.assertFalse(hasattr(results[0], 'first_name'), 'Column "first_name" found in view')
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def lwt_test(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Inserting initial data using IF NOT EXISTS")
for i in xrange(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
debug("All rows should have been inserted")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in xrange(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
debug("No rows should have changed")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug("Update the 10 first rows with a different value")
for i in xrange(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
self.assertEqual(len(results), 1000)
for i in xrange(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
debug("Deleting the first 10 rows")
for i in xrange(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
self.assertEqual(len(results), 990)
for i in xrange(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11612',
flaky=True,
notes='flaps on Windows')
def interrupt_build_process_test(self):
"""Test that an interupted MV build process is resumed as it should"""
session = self.prepare(options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
debug("Inserting initial data")
for i in xrange(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
debug("Restart the cluster")
self.cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
session.execute("USE ks")
debug("MV shouldn't be built yet.")
assert_none(session, "SELECT * FROM t_by_v WHERE v=10000;")
debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
start = time.time()
while True:
try:
result = list(session.execute("SELECT count(*) FROM t_by_v;"))
self.assertNotEqual(result[0].count, 10000)
except AssertionError:
debug("MV build process is finished")
break
elapsed = (time.time() - start) / 60
if elapsed > 2:
break
time.sleep(5)
debug("Verify all data")
result = list(session.execute("SELECT count(*) FROM t_by_v;"))
self.assertEqual(result[0].count, 10000)
for i in xrange(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
def view_tombstone_test(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1",
consistency_level=ConsistencyLevel.QUORUM))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
self.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
self.fail("Didn't find digest mismatch")
def simple_repair_test(self):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
debug('Verify the data in the MV with CL=ONE')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in xrange(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
debug('Start node2, and repair')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
node1.repair()
debug('Verify the data in the MV with CL=ONE. All should be available now.')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ONE
)
def base_replica_repair_test(self):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Write initial data')
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
debug('Verify the data in the MV with CL=ALL')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
debug('Shutdown node1')
node1.stop(wait_other_notice=True)
debug('Delete node1 data')
node1.clear(clear_all=True)
debug('Restarting node1')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
debug('Verify that there is no data on node1')
for i in xrange(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
debug('Restarting node2 and node3')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
# Just repair the base replica
node1.nodetool("repair ks t")
debug('Verify data with cl=ALL')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@attr("resource-intensive")
def complex_repair_test(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in xrange(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
debug('Verify the data in the MV on node1 with CL=ONE')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug('Close connection to node1')
session.cluster.shutdown()
debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in xrange(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in xrange(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
debug('Verify the new data in the MV on node2 with CL=ONE')
for i in xrange(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
debug('Start remaining nodes')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
node4.start(wait_other_notice=True, wait_for_binary_proto=True)
node5.start(wait_other_notice=True, wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
debug('Read data from MV at QUORUM (old data should be returned)')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
debug('Run global repair on node1')
node1.repair()
debug('Read data from MV at quorum (new data should be returned after repair)')
for i in xrange(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
def really_complex_repair_test(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
debug('Start remaining nodes')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
node4.start(wait_other_notice=True, wait_for_binary_proto=True)
node5.start(wait_other_notice=True, wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def complex_mv_select_statements_test(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.default_consistency_level = ConsistencyLevel.QUORUM
debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) / num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print str(e)
queue.close()
@since('3.0')
@skipIf(sys.platform == 'win32', 'Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in self.exception_type.keys():
output = "{} ({}: {})".format(output, key, self.exception_type[key])
sys.stdout.write(output)
sys.stdout.flush()
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
sys.stdout.write(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
sys.stdout.write(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
sys.stdout.flush()
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print traceback.format_exception_only(type(e), e)
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
@known_failure(failure_source='cassandra',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11290',
flaky=True)
@require(11290)
def single_partition_consistent_reads_after_write_test(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def multi_partition_consistent_reads_after_write_test(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(20)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
node1, node2, node3 = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) / processes
debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
debug("Writing data to base table")
for i in range(upper / 10):
self._do_row(insert1, i, num_partitions)
debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
debug("Writing more data to base table")
for i in range(upper / 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
debug("Finished writes, now verifying reads")
self._populate_rows()
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = self.get_ip_from_node(node2)
p = Process(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
p.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
mm = queues[i % processes].get()
if not mm.out() is None:
sys.stdout.write("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
sys.stdout.write("\n")
sys.stdout.flush()
|
apache-2.0
|
apetresc/aws-sdk-for-java-on-gae
|
src/main/java/com/amazonaws/services/autoscaling/model/transform/LimitExceededExceptionUnmarshaller.java
|
1518
|
/*
* Copyright 2010-2011 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.autoscaling.model.transform;
import org.w3c.dom.Node;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.util.XpathUtils;
import com.amazonaws.transform.StandardErrorUnmarshaller;
import com.amazonaws.services.autoscaling.model.LimitExceededException;
public class LimitExceededExceptionUnmarshaller extends StandardErrorUnmarshaller {
public LimitExceededExceptionUnmarshaller() {
super(LimitExceededException.class);
}
public AmazonServiceException unmarshall(Node node) throws Exception {
// Bail out if this isn't the right error code that this
// marshaller understands.
String errorCode = parseErrorCode(node);
if (errorCode == null || !errorCode.equals("LimitExceeded"))
return null;
LimitExceededException e = (LimitExceededException)super.unmarshall(node);
return e;
}
}
|
apache-2.0
|
MartinLoeper/KAMP-DSL
|
edu.kit.ipd.sdq.kamp.ruledsl/src/edu/kit/ipd/sdq/kamp/ruledsl/runtime/rule/StopwatchRule.java
|
2567
|
package edu.kit.ipd.sdq.kamp.ruledsl.runtime.rule;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import edu.kit.ipd.sdq.kamp.architecture.AbstractArchitectureVersion;
import edu.kit.ipd.sdq.kamp.propagation.AbstractChangePropagationAnalysis;
import edu.kit.ipd.sdq.kamp.ruledsl.support.ChangePropagationStepRegistry;
import edu.kit.ipd.sdq.kamp.ruledsl.support.IRule;
/**
* This standard (helper) rule is used to measure the time of a given rule.
*
* @author Martin Löper
*
*/
public class StopwatchRule implements IRule {
private final Stopwatch stopwatch;
private final IRule rule;
private final long iterations;
/**
* Creates a Stopwatch (wrapper) rule for the given {@code rule}.
* @param rule the rule which will be observed
*/
public StopwatchRule(IRule rule) {
this(rule, 1);
}
/**
* Creates a Stopwatch (wrapper) rule for the given {@code rule}.
* @param rule the rule which will be observed
* @param iterations the number of times the {@link IRule#apply(AbstractArchitectureVersion, ChangePropagationStepRegistry, AbstractChangePropagationAnalysis)} method of {@code rule} is called
*/
public StopwatchRule(IRule rule, long iterations) {
this.stopwatch = Stopwatch.createUnstarted();
this.rule = rule;
this.iterations = iterations;
}
@Override
public void apply(AbstractArchitectureVersion version, ChangePropagationStepRegistry registry) {
this.stopwatch.start();
for(long i=0; i < this.iterations; i++) {
this.rule.apply(version, registry);
}
this.stopwatch.stop();
}
/**
* Returns the elapsed time in the given time format.
* @see Stopwatch#elapsed(TimeUnit)
* @param timeUnit the time unit which is used to express the elapsed time
* @return the elapsed time in the given time unit
*/
public long getElapsedTime(TimeUnit timeUnit) {
return this.stopwatch.elapsed(timeUnit);
}
/**
* Returns the elapsed time per iteration in the given time format.
* This essentially divides the total time by the number of iterations.
* @param timeUnit timeUnit the time unit which is used to express the elapsed time
* @return the elapsed time per iteration in the given time unit
*/
public long getElapsedTimePerIteration(TimeUnit timeUnit) {
return this.stopwatch.elapsed(timeUnit) / this.iterations;
}
/**
* Returns the elapsed time in a human readable format.
* @see Stopwatch#toString()
* @return the elapsed time (human readable)
*/
public String getElapsedTimeAsString() {
return this.stopwatch.toString();
}
}
|
apache-2.0
|
Ubicall/node-red
|
red/ubicall/plist/utils/index.js
|
3754
|
var when = require('when');
var request = require('request');
var settings = require("../../../../settings");
var log = require("../../../log");
// view components
var view_start = require('../nodes/view/start');
var view_choice = require('../nodes/view/choice');
var view_form = require('../nodes/view/form');
var view_grid = require('../nodes/view/grid');
var view_info = require('../nodes/view/info');
var view_url = require('../nodes/view/url');
var view_zendesk_ticket = require('../nodes/view/zendesk-ticket');
var submit_call = require('../nodes/view/call');
var zopim_chat = require('../nodes/view/zopim_chat');
// action components
var action_submit_email = require('../nodes/action/submit-email');
var action_submit_zendesk_ticket = require('../nodes/action/submit-zendesk-ticket');
var PLIST_DEPLOY = settings.staticPlistSubmittingService;
var PLIST_HOST = settings.staticPlistHostingUrl || "https://designer.ubicall.com/plist/";
if (!PLIST_DEPLOY) {
throw new Error("ws.ubicall.com is abslote use new configuration i.e. config_version=20150920")
}
function extractFlow(flow) {
return when.promise(function(resolve, reject) {
// initialize flow with content of start node
var __flow = view_start.createStart(flow);
for (var i = 0; i < flow.Nodes.length; i++) {
var node = flow.Nodes[i];
switch (node.type) {
case "view-choice":
__flow[node.id] = view_choice.createChoice(node);
break;
case "view-form":
__flow[node.id] = view_form.createForm(node);
break;
case "view-grid":
__flow[node.id] = view_grid.createGrid(node);
break;
case "view-info":
__flow[node.id] = view_info.createInfo(node);
break;
case "view-url":
__flow[node.id] = view_url.createURL(node);
break;
case "view-zendesk-ticket-form":
__flow[node.id] = view_zendesk_ticket.createZendeskForm(node);
break;
case "view-submit-call":
__flow[node.id] = submit_call.createViewCall(node);
break;
case "view-zopim-chat":
__flow[node.id] = zopim_chat.createZopimChat(node);
break;
// action components
case "action-submit-email":
__flow[node.id] = action_submit_email.createActionEmail(node);
break;
case "action-submit-zendesk-ticket":
__flow[node.id] = action_submit_zendesk_ticket.createActionZendeskTicket(node);
break;
// do nothing nodes
case "view-zendesk-help-center":
break;
case "tab":
break;
default:
if (node.type !== "start") { // it aleardy handle outside switch statment
log.info("unknown node " + JSON.stringify(node));
}
}
}
return resolve(__flow);
});
}
function deployFlowOnline(authorization_header, version) {
return when.promise(function(resolve, reject) {
var options = {
url: PLIST_DEPLOY + version,
method: 'POST'
};
if (authorization_header) {
options.headers = options.headers || {};
options.headers['Authorization'] = authorization_header;
}
if (process.env.node_env !== "production") {
log.warn("This info appear because you are not start with production flag");
log.warn(JSON.stringify(options, null, 4));
}
request(options, function(err, response, body) {
if (err || response.statusCode !== 200) {
log.error(err || response.statusCode);
return reject(err || response.statusCode);
} else {
return resolve(body);
}
});
});
}
module.exports = {
extractFlow: extractFlow,
deployFlowOnline: deployFlowOnline
}
|
apache-2.0
|
picklesrus/blockly
|
demos/code/code.js
|
17281
|
/**
* @license
* Copyright 2012 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview JavaScript for Blockly's Code demo.
* @author [email protected] (Neil Fraser)
*/
'use strict';
/**
* Create a namespace for the application.
*/
var Code = {};
/**
* Lookup for names of supported languages. Keys should be in ISO 639 format.
*/
Code.LANGUAGE_NAME = {
'ar': 'العربية',
'be-tarask': 'Taraškievica',
'br': 'Brezhoneg',
'ca': 'Català',
'cs': 'Česky',
'da': 'Dansk',
'de': 'Deutsch',
'el': 'Ελληνικά',
'en': 'English',
'es': 'Español',
'et': 'Eesti',
'fa': 'فارسی',
'fr': 'Français',
'he': 'עברית',
'hrx': 'Hunsrik',
'hu': 'Magyar',
'ia': 'Interlingua',
'is': 'Íslenska',
'it': 'Italiano',
'ja': '日本語',
'kab': 'Kabyle',
'ko': '한국어',
'mk': 'Македонски',
'ms': 'Bahasa Melayu',
'nb': 'Norsk Bokmål',
'nl': 'Nederlands, Vlaams',
'oc': 'Lenga d\'òc',
'pl': 'Polski',
'pms': 'Piemontèis',
'pt-br': 'Português Brasileiro',
'ro': 'Română',
'ru': 'Русский',
'sc': 'Sardu',
'sk': 'Slovenčina',
'sr': 'Српски',
'sv': 'Svenska',
'ta': 'தமிழ்',
'th': 'ภาษาไทย',
'tlh': 'tlhIngan Hol',
'tr': 'Türkçe',
'uk': 'Українська',
'vi': 'Tiếng Việt',
'zh-hans': '简体中文',
'zh-hant': '正體中文'
};
/**
* List of RTL languages.
*/
Code.LANGUAGE_RTL = ['ar', 'fa', 'he', 'lki'];
/**
* Blockly's main workspace.
* @type {Blockly.WorkspaceSvg}
*/
Code.workspace = null;
/**
* Extracts a parameter from the URL.
* If the parameter is absent default_value is returned.
* @param {string} name The name of the parameter.
* @param {string} defaultValue Value to return if parameter not found.
* @return {string} The parameter value or the default value if not found.
*/
Code.getStringParamFromUrl = function(name, defaultValue) {
var val = location.search.match(new RegExp('[?&]' + name + '=([^&]+)'));
return val ? decodeURIComponent(val[1].replace(/\+/g, '%20')) : defaultValue;
};
/**
* Get the language of this user from the URL.
* @return {string} User's language.
*/
Code.getLang = function() {
var lang = Code.getStringParamFromUrl('lang', '');
if (Code.LANGUAGE_NAME[lang] === undefined) {
// Default to English.
lang = 'en';
}
return lang;
};
/**
* Is the current language (Code.LANG) an RTL language?
* @return {boolean} True if RTL, false if LTR.
*/
Code.isRtl = function() {
return Code.LANGUAGE_RTL.indexOf(Code.LANG) != -1;
};
/**
* Load blocks saved on App Engine Storage or in session/local storage.
* @param {string} defaultXml Text representation of default blocks.
*/
Code.loadBlocks = function(defaultXml) {
try {
var loadOnce = window.sessionStorage.loadOnceBlocks;
} catch(e) {
// Firefox sometimes throws a SecurityError when accessing sessionStorage.
// Restarting Firefox fixes this, so it looks like a bug.
var loadOnce = null;
}
if ('BlocklyStorage' in window && window.location.hash.length > 1) {
// An href with #key trigers an AJAX call to retrieve saved blocks.
BlocklyStorage.retrieveXml(window.location.hash.substring(1));
} else if (loadOnce) {
// Language switching stores the blocks during the reload.
delete window.sessionStorage.loadOnceBlocks;
var xml = Blockly.Xml.textToDom(loadOnce);
Blockly.Xml.domToWorkspace(xml, Code.workspace);
} else if (defaultXml) {
// Load the editor with default starting blocks.
var xml = Blockly.Xml.textToDom(defaultXml);
Blockly.Xml.domToWorkspace(xml, Code.workspace);
} else if ('BlocklyStorage' in window) {
// Restore saved blocks in a separate thread so that subsequent
// initialization is not affected from a failed load.
window.setTimeout(BlocklyStorage.restoreBlocks, 0);
}
};
/**
* Save the blocks and reload with a different language.
*/
Code.changeLanguage = function() {
// Store the blocks for the duration of the reload.
// MSIE 11 does not support sessionStorage on file:// URLs.
if (window.sessionStorage) {
var xml = Blockly.Xml.workspaceToDom(Code.workspace);
var text = Blockly.Xml.domToText(xml);
window.sessionStorage.loadOnceBlocks = text;
}
var languageMenu = document.getElementById('languageMenu');
var newLang = encodeURIComponent(
languageMenu.options[languageMenu.selectedIndex].value);
var search = window.location.search;
if (search.length <= 1) {
search = '?lang=' + newLang;
} else if (search.match(/[?&]lang=[^&]*/)) {
search = search.replace(/([?&]lang=)[^&]*/, '$1' + newLang);
} else {
search = search.replace(/\?/, '?lang=' + newLang + '&');
}
window.location = window.location.protocol + '//' +
window.location.host + window.location.pathname + search;
};
/**
* Bind a function to a button's click event.
* On touch enabled browsers, ontouchend is treated as equivalent to onclick.
* @param {!Element|string} el Button element or ID thereof.
* @param {!Function} func Event handler to bind.
*/
Code.bindClick = function(el, func) {
if (typeof el == 'string') {
el = document.getElementById(el);
}
el.addEventListener('click', func, true);
el.addEventListener('touchend', func, true);
};
/**
* Load the Prettify CSS and JavaScript.
*/
Code.importPrettify = function() {
var script = document.createElement('script');
script.setAttribute('src', 'https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js');
document.head.appendChild(script);
};
/**
* Compute the absolute coordinates and dimensions of an HTML element.
* @param {!Element} element Element to match.
* @return {!Object} Contains height, width, x, and y properties.
* @private
*/
Code.getBBox_ = function(element) {
var height = element.offsetHeight;
var width = element.offsetWidth;
var x = 0;
var y = 0;
do {
x += element.offsetLeft;
y += element.offsetTop;
element = element.offsetParent;
} while (element);
return {
height: height,
width: width,
x: x,
y: y
};
};
/**
* User's language (e.g. "en").
* @type {string}
*/
Code.LANG = Code.getLang();
/**
* List of tab names.
* @private
*/
Code.TABS_ = ['blocks', 'javascript', 'php', 'python', 'dart', 'lua', 'xml'];
Code.selected = 'blocks';
/**
* Switch the visible pane when a tab is clicked.
* @param {string} clickedName Name of tab clicked.
*/
Code.tabClick = function(clickedName) {
// If the XML tab was open, save and render the content.
if (document.getElementById('tab_xml').className == 'tabon') {
var xmlTextarea = document.getElementById('content_xml');
var xmlText = xmlTextarea.value;
var xmlDom = null;
try {
xmlDom = Blockly.Xml.textToDom(xmlText);
} catch (e) {
var q =
window.confirm(MSG['badXml'].replace('%1', e));
if (!q) {
// Leave the user on the XML tab.
return;
}
}
if (xmlDom) {
Code.workspace.clear();
Blockly.Xml.domToWorkspace(xmlDom, Code.workspace);
}
}
if (document.getElementById('tab_blocks').className == 'tabon') {
Code.workspace.setVisible(false);
}
// Deselect all tabs and hide all panes.
for (var i = 0; i < Code.TABS_.length; i++) {
var name = Code.TABS_[i];
document.getElementById('tab_' + name).className = 'taboff';
document.getElementById('content_' + name).style.visibility = 'hidden';
}
// Select the active tab.
Code.selected = clickedName;
document.getElementById('tab_' + clickedName).className = 'tabon';
// Show the selected pane.
document.getElementById('content_' + clickedName).style.visibility =
'visible';
Code.renderContent();
if (clickedName == 'blocks') {
Code.workspace.setVisible(true);
}
Blockly.svgResize(Code.workspace);
};
/**
* Populate the currently selected pane with content generated from the blocks.
*/
Code.renderContent = function() {
var content = document.getElementById('content_' + Code.selected);
// Initialize the pane.
if (content.id == 'content_xml') {
var xmlTextarea = document.getElementById('content_xml');
var xmlDom = Blockly.Xml.workspaceToDom(Code.workspace);
var xmlText = Blockly.Xml.domToPrettyText(xmlDom);
xmlTextarea.value = xmlText;
xmlTextarea.focus();
} else if (content.id == 'content_javascript') {
Code.attemptCodeGeneration(Blockly.JavaScript);
} else if (content.id == 'content_python') {
Code.attemptCodeGeneration(Blockly.Python);
} else if (content.id == 'content_php') {
Code.attemptCodeGeneration(Blockly.PHP);
} else if (content.id == 'content_dart') {
Code.attemptCodeGeneration(Blockly.Dart);
} else if (content.id == 'content_lua') {
Code.attemptCodeGeneration(Blockly.Lua);
}
if (typeof PR == 'object') {
PR.prettyPrint();
}
};
/**
* Attempt to generate the code and display it in the UI, pretty printed.
* @param generator {!Blockly.Generator} The generator to use.
*/
Code.attemptCodeGeneration = function(generator) {
var content = document.getElementById('content_' + Code.selected);
content.textContent = '';
if (Code.checkAllGeneratorFunctionsDefined(generator)) {
var code = generator.workspaceToCode(Code.workspace);
content.textContent = code;
// Remove the 'prettyprinted' class, so that Prettify will recalculate.
content.className = content.className.replace('prettyprinted', '');
}
};
/**
* Check whether all blocks in use have generator functions.
* @param generator {!Blockly.Generator} The generator to use.
*/
Code.checkAllGeneratorFunctionsDefined = function(generator) {
var blocks = Code.workspace.getAllBlocks(false);
var missingBlockGenerators = [];
for (var i = 0; i < blocks.length; i++) {
var blockType = blocks[i].type;
if (!generator[blockType]) {
if (missingBlockGenerators.indexOf(blockType) == -1) {
missingBlockGenerators.push(blockType);
}
}
}
var valid = missingBlockGenerators.length == 0;
if (!valid) {
var msg = 'The generator code for the following blocks not specified for ' +
generator.name_ + ':\n - ' + missingBlockGenerators.join('\n - ');
Blockly.alert(msg); // Assuming synchronous. No callback.
}
return valid;
};
/**
* Initialize Blockly. Called on page load.
*/
Code.init = function() {
Code.initLanguage();
var rtl = Code.isRtl();
var container = document.getElementById('content_area');
var onresize = function(e) {
var bBox = Code.getBBox_(container);
for (var i = 0; i < Code.TABS_.length; i++) {
var el = document.getElementById('content_' + Code.TABS_[i]);
el.style.top = bBox.y + 'px';
el.style.left = bBox.x + 'px';
// Height and width need to be set, read back, then set again to
// compensate for scrollbars.
el.style.height = bBox.height + 'px';
el.style.height = (2 * bBox.height - el.offsetHeight) + 'px';
el.style.width = bBox.width + 'px';
el.style.width = (2 * bBox.width - el.offsetWidth) + 'px';
}
// Make the 'Blocks' tab line up with the toolbox.
if (Code.workspace && Code.workspace.toolbox_.width) {
document.getElementById('tab_blocks').style.minWidth =
(Code.workspace.toolbox_.width - 38) + 'px';
// Account for the 19 pixel margin and on each side.
}
};
window.addEventListener('resize', onresize, false);
// The toolbox XML specifies each category name using Blockly's messaging
// format (eg. `<category name="%{BKY_CATLOGIC}">`).
// These message keys need to be defined in `Blockly.Msg` in order to
// be decoded by the library. Therefore, we'll use the `MSG` dictionary that's
// been defined for each language to import each category name message
// into `Blockly.Msg`.
// TODO: Clean up the message files so this is done explicitly instead of
// through this for-loop.
for (var messageKey in MSG) {
if (messageKey.indexOf('cat') == 0) {
Blockly.Msg[messageKey.toUpperCase()] = MSG[messageKey];
}
}
// Construct the toolbox XML, replacing translated variable names.
var toolboxText = document.getElementById('toolbox').outerHTML;
toolboxText = toolboxText.replace(/(^|[^%]){(\w+)}/g,
function(m, p1, p2) {return p1 + MSG[p2];});
var toolboxXml = Blockly.Xml.textToDom(toolboxText);
Code.workspace = Blockly.inject('content_blocks',
{grid:
{spacing: 25,
length: 3,
colour: '#ccc',
snap: true},
media: '../../media/',
rtl: rtl,
toolbox: toolboxXml,
zoom:
{controls: true,
wheel: true}
});
// Add to reserved word list: Local variables in execution environment (runJS)
// and the infinite loop detection function.
Blockly.JavaScript.addReservedWords('code,timeouts,checkTimeout');
Code.loadBlocks('');
if ('BlocklyStorage' in window) {
// Hook a save function onto unload.
BlocklyStorage.backupOnUnload(Code.workspace);
}
Code.tabClick(Code.selected);
Code.bindClick('trashButton',
function() {Code.discard(); Code.renderContent();});
Code.bindClick('runButton', Code.runJS);
// Disable the link button if page isn't backed by App Engine storage.
var linkButton = document.getElementById('linkButton');
if ('BlocklyStorage' in window) {
BlocklyStorage['HTTPREQUEST_ERROR'] = MSG['httpRequestError'];
BlocklyStorage['LINK_ALERT'] = MSG['linkAlert'];
BlocklyStorage['HASH_ERROR'] = MSG['hashError'];
BlocklyStorage['XML_ERROR'] = MSG['xmlError'];
Code.bindClick(linkButton,
function() {BlocklyStorage.link(Code.workspace);});
} else if (linkButton) {
linkButton.className = 'disabled';
}
for (var i = 0; i < Code.TABS_.length; i++) {
var name = Code.TABS_[i];
Code.bindClick('tab_' + name,
function(name_) {return function() {Code.tabClick(name_);};}(name));
}
onresize();
Blockly.svgResize(Code.workspace);
// Lazy-load the syntax-highlighting.
window.setTimeout(Code.importPrettify, 1);
};
/**
* Initialize the page language.
*/
Code.initLanguage = function() {
// Set the HTML's language and direction.
var rtl = Code.isRtl();
document.dir = rtl ? 'rtl' : 'ltr';
document.head.parentElement.setAttribute('lang', Code.LANG);
// Sort languages alphabetically.
var languages = [];
for (var lang in Code.LANGUAGE_NAME) {
languages.push([Code.LANGUAGE_NAME[lang], lang]);
}
var comp = function(a, b) {
// Sort based on first argument ('English', 'Русский', '简体字', etc).
if (a[0] > b[0]) return 1;
if (a[0] < b[0]) return -1;
return 0;
};
languages.sort(comp);
// Populate the language selection menu.
var languageMenu = document.getElementById('languageMenu');
languageMenu.options.length = 0;
for (var i = 0; i < languages.length; i++) {
var tuple = languages[i];
var lang = tuple[tuple.length - 1];
var option = new Option(tuple[0], lang);
if (lang == Code.LANG) {
option.selected = true;
}
languageMenu.options.add(option);
}
languageMenu.addEventListener('change', Code.changeLanguage, true);
// Inject language strings.
document.title += ' ' + MSG['title'];
document.getElementById('title').textContent = MSG['title'];
document.getElementById('tab_blocks').textContent = MSG['blocks'];
document.getElementById('linkButton').title = MSG['linkTooltip'];
document.getElementById('runButton').title = MSG['runTooltip'];
document.getElementById('trashButton').title = MSG['trashTooltip'];
};
/**
* Execute the user's code.
* Just a quick and dirty eval. Catch infinite loops.
*/
Code.runJS = function() {
Blockly.JavaScript.INFINITE_LOOP_TRAP = 'checkTimeout();\n';
var timeouts = 0;
var checkTimeout = function() {
if (timeouts++ > 1000000) {
throw MSG['timeout'];
}
};
var code = Blockly.JavaScript.workspaceToCode(Code.workspace);
Blockly.JavaScript.INFINITE_LOOP_TRAP = null;
try {
eval(code);
} catch (e) {
alert(MSG['badCode'].replace('%1', e));
}
};
/**
* Discard all blocks from the workspace.
*/
Code.discard = function() {
var count = Code.workspace.getAllBlocks(false).length;
if (count < 2 ||
window.confirm(Blockly.Msg['DELETE_ALL_BLOCKS'].replace('%1', count))) {
Code.workspace.clear();
if (window.location.hash) {
window.location.hash = '';
}
}
};
// Load the Code demo's language strings.
document.write('<script src="msg/' + Code.LANG + '.js"></script>\n');
// Load Blockly's language strings.
document.write('<script src="../../msg/js/' + Code.LANG + '.js"></script>\n');
window.addEventListener('load', Code.init);
|
apache-2.0
|
dyf102/blogrepo
|
blog/source/_posts/About-Me.md
|
529
|
title: README
date: 2015-11-24 13:28:06
tags:
---
# /* About
##
I am a graduating Cmputer Science student from University of Alberta and looking for a Software Engineering position in North America. This is my [resume](https://www.dropbox.com/s/5r85kpjiy0ngbzg/Resume_20151130.pdf?dl=0).
##Extracurricular Activities:
* [MIT 6.824: Distributed Systems](http:// nil.csail.mit.edu/6.824/2015/index.html)
* [kattis OJ](https://open.kattis.com/submit)
# Interest
* Distributed System/Database
* JVM
* Compiler Optimization
# */
|
apache-2.0
|
idumancic/bookify
|
web/src/main/java/com/bookify/web/models/PaymentViewModel.java
|
1184
|
package com.bookify.web.models;
import com.bookify.core.Bill;
import com.bookify.core.BillItem;
import java.util.ArrayList;
/**
* Created by idumancic on 11/07/2017.
*/
public class PaymentViewModel {
private Bill bill;
private String username;
private ArrayList<ItemViewModel> billItems;
private String paymentMethod;
private int totalCost;
public Bill getBill() {
return bill;
}
public void setBill(Bill bill) {
this.bill = bill;
}
public ArrayList<ItemViewModel> getBillItems() {
return billItems;
}
public void setBillItems(ArrayList<ItemViewModel> billItems) {
this.billItems = billItems;
}
public String getPaymentMethod() {
return paymentMethod;
}
public void setPaymentMethod(String paymentMethod) {
this.paymentMethod = paymentMethod;
}
public int getTotalCost() {
return totalCost;
}
public void setTotalCost(int totalCost) {
this.totalCost = totalCost;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
}
|
apache-2.0
|
spinnaker/halyard
|
halyard-cli/src/main/java/com/netflix/spinnaker/halyard/cli/command/v1/config/deploy/ha/AbstractHaServiceEnableDisableCommand.java
|
2298
|
/*
* Copyright 2018 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License")
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package com.netflix.spinnaker.halyard.cli.command.v1.config.deploy.ha;
import com.beust.jcommander.Parameters;
import com.netflix.spinnaker.halyard.cli.command.v1.NestableCommand;
import com.netflix.spinnaker.halyard.cli.services.v1.Daemon;
import com.netflix.spinnaker.halyard.cli.services.v1.OperationHandler;
import java.util.HashMap;
import java.util.Map;
import lombok.AccessLevel;
import lombok.Getter;
@Parameters(separators = "=")
public abstract class AbstractHaServiceEnableDisableCommand extends AbstractHaServiceCommand {
@Override
public String getCommandName() {
return isEnable() ? "enable" : "disable";
}
private String subjunctivePerfectAction() {
return isEnable() ? "enabled" : "disabled";
}
private String indicativePastPerfectAction() {
return isEnable() ? "enabled" : "disabled";
}
protected abstract boolean isEnable();
@Getter(AccessLevel.PROTECTED)
private Map<String, NestableCommand> subcommands = new HashMap<>();
@Override
public String getShortDescription() {
return "Set the "
+ getServiceName()
+ " high availability service as "
+ subjunctivePerfectAction();
}
@Override
protected void executeThis() {
String currentDeployment = getCurrentDeployment();
String serviceName = getServiceName();
boolean enable = isEnable();
new OperationHandler<Void>()
.setSuccessMessage("Successfully " + indicativePastPerfectAction() + " " + serviceName)
.setFailureMesssage("Failed to " + getCommandName() + " " + serviceName)
.setOperation(
Daemon.setHaServiceEnableDisable(currentDeployment, serviceName, !noValidate, enable))
.get();
}
}
|
apache-2.0
|
camunda/feel-scala
|
src/main/scala/org/camunda/feel/impl/builtin/ListBuiltinFunctions.scala
|
13252
|
package org.camunda.feel.impl.builtin
import org.camunda.feel.impl.builtin.BuiltinFunction.builtinFunction
import org.camunda.feel.{Number, logger}
import org.camunda.feel.syntaxtree.{
Val,
ValBoolean,
ValError,
ValFunction,
ValList,
ValNull,
ValNumber,
ValString
}
import scala.annotation.tailrec
object ListBuiltinFunctions {
def functions = Map(
"list contains" -> List(listContainsFunction),
"count" -> List(countFunction),
"min" -> List(minFunction),
"max" -> List(maxFunction),
"sum" -> List(sumFunction),
"product" -> List(productFunction),
"mean" -> List(meanFunction),
"median" -> List(medianFunction),
"stddev" -> List(stddevFunction),
"mode" -> List(modeFunction),
"and" -> List(andFunction),
"all" -> List(andFunction),
"or" -> List(orFunction),
"any" -> List(orFunction),
"sublist" -> List(sublistFunction, sublistFunction3),
"append" -> List(appendFunction),
"concatenate" -> List(concatenateFunction),
"insert before" -> List(insertBeforeFunction),
"remove" -> List(removeFunction),
"reverse" -> List(reverseFunction),
"index of" -> List(indexOfFunction),
"union" -> List(unionFunction),
"distinct values" -> List(distinctValuesFunction),
"flatten" -> List(flattenFunction),
"sort" -> List(sortFunction),
"string join" -> List(joinFunction,
joinWithDelimiterFunction,
joinWithDelimiterAndPrefixAndSuffixFunction)
)
private def listContainsFunction =
builtinFunction(params = List("list", "element"), invoke = {
case List(ValList(list), element) => ValBoolean(list.contains(element))
})
private def countFunction =
builtinFunction(params = List("list"), invoke = {
case List(ValList(list)) => ValNumber(list.size)
})
private def minFunction = builtinFunction(
params = List("list"),
invoke = {
case List(l @ ValList(list)) =>
list match {
case Nil => ValNull
case _ if (l.isComparable) => list.min
case _ => logger.warn(s"$l is not comparable"); ValNull
}
},
hasVarArgs = true
)
private def maxFunction = builtinFunction(
params = List("list"),
invoke = {
case List(l @ ValList(list)) =>
list match {
case Nil => ValNull
case _ if (l.isComparable) => list.max
case _ => logger.warn(s"$l is not comparable"); ValNull
}
},
hasVarArgs = true
)
private def sumFunction = builtinFunction(
params = List("list"),
invoke = {
case List(ValList(list)) if list.isEmpty => ValNull
case List(ValList(list)) =>
withListOfNumbers(list, numbers => ValNumber(numbers.sum))
},
hasVarArgs = true
)
private def withListOfNumbers(list: List[Val],
f: List[Number] => Val): Val = {
list
.map(_ match {
case n: ValNumber => n
case x => ValError(s"expected number but found '$x'")
})
.find(_.isInstanceOf[ValError]) match {
case Some(e) => e
case None => f(list.asInstanceOf[List[ValNumber]].map(_.value))
}
}
private def productFunction = builtinFunction(
params = List("list"),
invoke = {
case List(ValList(list)) if list.isEmpty => ValNull
case List(ValList(list)) =>
withListOfNumbers(list, numbers => ValNumber(numbers.product))
},
hasVarArgs = true
)
private def meanFunction = builtinFunction(
params = List("list"),
invoke = {
case List(ValList(list)) =>
list match {
case Nil => ValNull
case l =>
withListOfNumbers(list,
numbers => ValNumber(numbers.sum / numbers.size))
}
},
hasVarArgs = true
)
private def medianFunction = builtinFunction(
params = List("list"),
invoke = {
case List(ValList(list)) if list.isEmpty => ValNull
case List(ValList(list)) =>
withListOfNumbers(
list,
numbers => {
val sortedList = numbers.sorted
if (list.size % 2 == 1) {
ValNumber(sortedList(list.size / 2))
} else {
val i = list.size / 2
val x = sortedList(i - 1)
val y = sortedList(i)
ValNumber((x + y) / 2)
}
}
)
},
hasVarArgs = true
)
private def stddevFunction = builtinFunction(
params = List("list"),
invoke = {
case List(ValList(list)) if list.isEmpty => ValNull
case List(ValList(list)) =>
withListOfNumbers(
list,
numbers => {
val sum = numbers.sum
val mean = sum / numbers.size
val d = ((0: Number) /: numbers) {
case (dev, n) => dev + (n - mean).pow(2)
}
val stddev = Math.sqrt((d / (numbers.size - 1)).toDouble)
ValNumber(stddev)
}
)
},
hasVarArgs = true
)
private def modeFunction = builtinFunction(
params = List("list"),
invoke = {
case List(ValList(list)) if list.isEmpty => ValList(List.empty)
case List(ValList(list)) =>
withListOfNumbers(
list,
numbers => {
val sortedList = numbers
.groupBy(n => n)
.map { case (n, ns) => n -> ns.size }
.toList
.sortBy { case (n, count) => count }
.reverse
val maxCount = sortedList.head._2
val modeElements = sortedList
.takeWhile { case (n, count) => count == maxCount }
.map(_._1)
.sorted
ValList(modeElements.map(ValNumber))
}
)
},
hasVarArgs = true
)
private def andFunction =
builtinFunction(params = List("list"), invoke = {
case List(ValList(list)) => all(list)
}, hasVarArgs = true)
private def all(items: List[Val]): Val = {
items.foldLeft[Val](ValBoolean(true)) {
case (ValBoolean(false), _) => ValBoolean(false)
case (ValBoolean(true), item: ValBoolean) => item
case (ValNull, ValBoolean(false)) => ValBoolean(false)
case (_, _) => ValNull
}
}
private def orFunction =
builtinFunction(params = List("list"), invoke = {
case List(ValList(list)) => atLeastOne(list)
}, hasVarArgs = true)
private def atLeastOne(items: List[Val]): Val = {
items.foldLeft[Val](ValBoolean(false)) {
case (ValBoolean(true), _) => ValBoolean(true)
case (ValBoolean(false), item: ValBoolean) => item
case (ValNull, ValBoolean(true)) => ValBoolean(true)
case (_, _) => ValNull
}
}
private def sublistFunction =
builtinFunction(params = List("list", "start"), invoke = {
case List(ValList(list), ValNumber(start)) =>
ValList(list.slice(listIndex(list, start.intValue), list.length))
})
private def sublistFunction3 = builtinFunction(
params = List("list", "start", "length"),
invoke = {
case List(ValList(list), ValNumber(start), ValNumber(length)) =>
ValList(
list.slice(listIndex(list, start.intValue),
listIndex(list, start.intValue) + length.intValue))
}
)
private def listIndex(list: List[_], index: Int) =
if (index > 0) {
index - 1
} else {
list.size + index
}
private def appendFunction =
builtinFunction(params = List("list", "items"), invoke = {
case List(ValList(list), ValList(items)) => ValList(list ++ items)
}, hasVarArgs = true)
private def concatenateFunction = builtinFunction(
params = List("lists"),
invoke = {
case List(ValList(lists)) =>
ValList(
lists
.flatMap(_ match {
case ValList(list) => list
case v => List(v)
})
.toList)
},
hasVarArgs = true
)
private def insertBeforeFunction = builtinFunction(
params = List("list", "position", "newItem"),
invoke = {
case List(ValList(list), ValNumber(position), newItem: Val) =>
ValList(list
.take(listIndex(list, position.intValue)) ++ (newItem :: Nil) ++ list
.drop(listIndex(list, position.intValue)))
}
)
private def removeFunction = builtinFunction(
params = List("list", "position"),
invoke = {
case List(ValList(list), ValNumber(position)) =>
ValList(
list.take(listIndex(list, position.intValue)) ++ list.drop(
listIndex(list, position.intValue + 1)))
}
)
private def reverseFunction =
builtinFunction(params = List("list"), invoke = {
case List(ValList(list)) => ValList(list.reverse)
})
private def indexOfFunction =
builtinFunction(params = List("list", "match"), invoke = {
case List(ValList(list), m: Val) =>
ValList(indexOfList(list, m) map (ValNumber(_)))
})
@tailrec
private def indexOfList(list: List[Val],
item: Val,
from: Int = 0,
indexList: List[Int] = List()): List[Int] = {
val index = list.indexOf(item, from)
if (index >= 0) {
indexOfList(list, item, index + 1, indexList ++ List(index + 1))
} else {
indexList
}
}
private def unionFunction = builtinFunction(
params = List("lists"),
invoke = {
case List(ValList(lists)) =>
ValList(
lists
.flatMap(_ match {
case ValList(list) => list
case v => List(v)
})
.toList
.distinct)
},
hasVarArgs = true
)
private def distinctValuesFunction =
builtinFunction(params = List("list"), invoke = {
case List(ValList(list)) => ValList(list.distinct)
})
private def flattenFunction =
builtinFunction(params = List("list"), invoke = {
case List(ValList(list)) => ValList(flatten(list))
})
private def flatten(list: List[Val]): List[Val] = {
list.flatten {
case ValList(items) => flatten(items)
case item => List(item)
}
}
private def sortFunction = builtinFunction(
params = List("list", "precedes"),
invoke = {
case List(ValList(list), ValFunction(params, f, _))
if (params.size == 2) => {
try {
ValList(list.sortWith {
case (x, y) =>
f(List(x, y)) match {
case ValBoolean(isMet) => isMet
case e =>
throw new RuntimeException(s"expected boolean but found '$e'")
}
})
} catch {
case e: Throwable =>
ValError(
s"fail to sort list by given precedes function: ${e.getMessage}")
}
}
case List(ValList(list), ValFunction(params, _, _)) =>
ValError(
s"expect boolean function with 2 arguments, but found '${params.size}'")
}
)
private def joinFunction = builtinFunction(
params = List("list"),
invoke = {
case List(ValList(list)) => joinStringList(list = list)
}
)
private def joinWithDelimiterFunction = builtinFunction(
params = List("list", "delimiter"),
invoke = {
case List(ValList(list), ValString(delimiter)) =>
joinStringList(list = list, delimiter = delimiter)
case List(ValList(list), ValNull) => joinStringList(list = list)
}
)
private def joinWithDelimiterAndPrefixAndSuffixFunction = builtinFunction(
params = List("list", "delimiter", "prefix", "suffix"),
invoke = {
case List(ValList(list),
ValString(delimiter),
ValString(prefix),
ValString(suffix)) =>
joinStringList(list = list,
delimiter = delimiter,
prefix = prefix,
suffix = suffix)
case List(ValList(list), ValNull, ValString(prefix), ValString(suffix)) =>
joinStringList(list = list, prefix = prefix, suffix = suffix)
case List(ValList(list), ValString(delimiter), ValNull, ValNull) =>
joinStringList(list = list, delimiter = delimiter)
case List(ValList(list), ValNull, ValNull, ValNull) =>
joinStringList(list = list)
}
)
private def joinStringList(list: List[Val],
delimiter: String = "",
prefix: String = "",
suffix: String = ""): Val = {
val isStringList = list.forall {
case _: ValString => true
case ValNull => true
case _ => false
}
if (!isStringList) {
ValError(s"expected a list of strings but found '$list'")
} else {
val stringList = list
.filterNot(_ == ValNull)
.map { case ValString(x) => x }
ValString(
stringList.mkString(start = prefix, sep = delimiter, end = suffix))
}
}
}
|
apache-2.0
|
Intel-EPID-SDK/epid-sdk
|
ext/ipp-crypto/sources/ippcp/asm_intel64/pcpsha256l9as.asm
|
18846
|
;===============================================================================
; Copyright 2017-2020 Intel Corporation
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;===============================================================================
;
;
; Purpose: Cryptography Primitive.
; Message block processing according to SHA256
;
; Content:
; UpdateSHA256
;
%include "asmdefs.inc"
%include "ia_32e.inc"
%include "ia_32e_regs.inc"
%include "pcpvariant.inc"
%if (_ENABLE_ALG_SHA256_)
%if (_SHA_NI_ENABLING_ == _FEATURE_OFF_) || (_SHA_NI_ENABLING_ == _FEATURE_TICKTOCK_)
%if (_IPP32E >= _IPP32E_L9 )
;;
;; assignments
;;
%xdefine hA eax ;; hash values into GPU registers
%xdefine hB ebx
%xdefine hC ecx
%xdefine hD edx
%xdefine hE r8d
%xdefine hF r9d
%xdefine hG r10d
%xdefine hH r11d
%xdefine T1 r12d ;; scratch
%xdefine T2 r13d
%xdefine T3 r14d
%xdefine T4 r15d
%xdefine T5 edi
%xdefine W0 ymm0 ;; W values into YMM registers
%xdefine W1 ymm1
%xdefine W2 ymm2
%xdefine W3 ymm3
%xdefine yT1 ymm4 ;; scratch
%xdefine yT2 ymm5
%xdefine yT3 ymm6
%xdefine yT4 ymm7
%xdefine W0L xmm0
%xdefine W1L xmm1
%xdefine W2L xmm2
%xdefine W3L xmm3
%xdefine YMM_zzBA ymm8 ;; byte swap constant
%xdefine YMM_DCzz ymm9 ;; byte swap constant
%xdefine YMM_SHUFB_BSWAP ymm10 ;; byte swap constant
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; textual rotation of W args
;;
%macro ROTATE_W 0.nolist
%xdefine %%_X W0
%xdefine W0 W1
%xdefine W1 W2
%xdefine W2 W3
%xdefine W3 %%_X
%endmacro
;;
;; textual rotation of HASH arguments
;;
%macro ROTATE_H 0.nolist
%xdefine %%_X hH
%xdefine hH hG
%xdefine hG hF
%xdefine hF hE
%xdefine hE hD
%xdefine hD hC
%xdefine hC hB
%xdefine hB hA
%xdefine hA %%_X
%endmacro
%macro ROTATE_T4_T5 0.nolist
%xdefine %%T T4
%xdefine T4 T5
%xdefine T5 %%T
%endmacro
;;
;; compute next 4 W[t], W[t+1], W[t+2] and W[t+3], t=16,...63
;; (see pcpsha256e9as.asm for details)
%macro UPDATE_W 5.nolist
%xdefine %%nr %1
%xdefine %%W0 %2
%xdefine %%W1 %3
%xdefine %%W2 %4
%xdefine %%W3 %5
%assign %%W_AHEAD 16
vpalignr yT3,%%W1,%%W0,4
vpalignr yT2,%%W3,%%W2,4
vpsrld yT1,yT3,7
vpaddd %%W0,%%W0,yT2
vpsrld yT2,yT3,3
vpslld yT4,yT3,14
vpxor yT3,yT2,yT1
vpshufd yT2,%%W3,250
vpsrld yT1,yT1,11
vpxor yT3,yT3,yT4
vpslld yT4,yT4,11
vpxor yT3,yT3,yT1
vpsrld yT1,yT2,10
vpxor yT3,yT3,yT4
vpsrlq yT2,yT2,17
vpaddd %%W0,%%W0,yT3
vpxor yT1,yT1,yT2
vpsrlq yT2,yT2,2
vpxor yT1,yT1,yT2
vpshufb yT1,yT1,YMM_zzBA
vpaddd %%W0,%%W0,yT1
vpshufd yT2,%%W0,80
vpsrld yT1,yT2,10
vpsrlq yT2,yT2,17
vpxor yT1,yT1,yT2
vpsrlq yT2,yT2,2
vpxor yT1,yT1,yT2
vpshufb yT1,yT1,YMM_DCzz
vpaddd %%W0,%%W0,yT1
vpaddd yT1,%%W0,YMMWORD [rbp+(%%nr/4)*sizeof(ymmword)]
vmovdqa YMMWORD [rsp+(%%W_AHEAD/4)*sizeof(ymmword)+(%%nr/4)*sizeof(ymmword)],yT1
%endmacro
;;
;; regular round (i):
;;
;; T1 = h + Sum1(e) + Ch(e,f,g) + K[i] + W[i]
;; T2 = Sum0(a) + Maj(a,b,c)
;; h = g
;; g = f
;; f = e
;; e = d + T1
;; d = c
;; c = b
;; b = a
;; a = T1+T2
;;
;; sum1(e) = (e>>>25)^(e>>>11)^(e>>>6)
;; sum0(a) = (a>>>13)^(a>>>22)^(a>>>2)
;; ch(e,f,g) = (e&f)^(~e^g)
;; maj(a,b,m)= (a&b)^(a&c)^(b&c)
;;
;; note:
;; 1) U + ch(e,f,g) = U + (e&f) & (~e&g)
;; 2) maj(a,b,c)= (a&b)^(a&c)^(b&c) = (a^b)&(b^c) ^b
;; to make sure both are correct - use GF(2) arith instead of logic
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; or
;; X = sum0(a[i-1]) computed on prev round
;; a[i] += X
;; h[i] += (K[i]+W[i]) + sum1(e[i]) + ch(e[i],f[i],g[i]) or
;; h[i] += (K[i]+W[i]) + sum1(e[i]) + (e[i]&f[i]) + (~e[i]&g[i]) -- helps break dependencies
;; d[i] += h[i]
;; h[i] += maj(a[i],b[i],c[i])
;; and following textual shift
;; {a[i+1],b[i+1],c[i+1],d[i+1],e[i+1],f[i+1],g[i+1],h[i+1]} <= {h[i],a[i],b[i],c[i],d[i],e[i],f[i],g[i]}
;;
;; on entry:
;; - T1 = f
;; - T3 = sum0{a[i-1])
;; - T5 = b&c
%macro SHA256_ROUND 9.nolist
%xdefine %%nr %1
%xdefine %%hA %2
%xdefine %%hB %3
%xdefine %%hC %4
%xdefine %%hD %5
%xdefine %%hE %6
%xdefine %%hF %7
%xdefine %%hG %8
%xdefine %%hH %9
add %%hH, dword [rsp+(%%nr/4)*sizeof(ymmword)+(%%nr & 3)*sizeof(dword)] ;; h += (k[t]+w[t])
and T1, %%hE ;; ch(e,f,g): (f&e)
rorx T2, %%hE, 25 ;; sum1(e): e>>>25
rorx T4, %%hE, 11 ;; sum1(e): e>>>11
add %%hA, T3 ;; complete computation a += sum0(a[t-1])
add %%hH, T1 ;; h += (k[t]+w[t]) + (f&e)
andn T1, %%hE, %%hG ;; ch(e,f,g): (~e&g)
xor T2, T4 ;; sum1(e): (e>>>25)^(e>>>11)
rorx T3, %%hE, 6 ;; sum1(e): e>>>6
add %%hH, T1 ;; h += (k[t]+w[t]) + (f&e) + (~e&g)
xor T2, T3 ;; sum1(e) = (e>>>25)^(e>>>11)^(e>>>6)
mov T4, %%hA ;; maj(a,b,c): a
rorx T1, %%hA, 22 ;; sum0(a): a>>>22
add %%hH, T2 ;; h += (k[t]+w[t]) +(f&e) +(~e&g) +sig1(e)
xor T4, %%hB ;; maj(a,b,c): (a^b)
rorx T3, %%hA, 13 ;; sum0(a): a>>>13
rorx T2, %%hA, 2 ;; sum0(a): a>>>2
add %%hD, %%hH ;; d += h
and T5, T4 ;; maj(a,b,c): (b^c)&(a^b)
xor T3, T1 ;; sum0(a): (a>>>13)^(a>>>22)
xor T5, %%hB ;; maj(a,b,c) = (b^c)&(a^b)^b = (a&b)^(a&c)^(b&c)
xor T3, T2 ;; sum0(a): = (a>>>13)^(a>>>22)^(a>>>2)
add %%hH, T5 ;; h += (k[t]+w[t]) +(f&e) +(~e&g) +sig1(e) +maj(a,b,c)
mov T1, %%hE ;; T1 = f (next round)
ROTATE_T4_T5 ;; T5 = (b^c) (next round)
%endmacro
;;
;; does 4 regular rounds and computes next 4 W values
;; (just 4 instances of SHA256_ROUND merged together woth UPDATE_W)
;;
%macro SHA256_4ROUND_SHED 1.nolist
%xdefine %%round %1
%assign %%W_AHEAD 16
vpalignr yT3,W1,W0,4
%assign %%nr %%round
add hH, dword [rsp+(%%nr/4)*sizeof(ymmword)+(%%nr & 3)*sizeof(dword)]
and T1, hE
rorx T2, hE, 25
vpalignr yT2,W3,W2,4
rorx T4, hE, 11
add hA, T3
add hH, T1
vpsrld yT1,yT3,7
andn T1, hE, hG
xor T2, T4
rorx T3, hE, 6
vpaddd W0,W0,yT2
add hH, T1
xor T2, T3
mov T4, hA
vpsrld yT2,yT3,3
rorx T1, hA, 22
add hH, T2
xor T4, hB
vpslld yT4,yT3,14
rorx T3, hA, 13
rorx T2, hA, 2
add hD, hH
vpxor yT3,yT2,yT1
and T5, T4
xor T3, T1
xor T5, hB
vpshufd yT2,W3,250
xor T3, T2
add hH, T5
mov T1, hE
ROTATE_T4_T5
ROTATE_H
vpsrld yT1,yT1,11
%assign %%nr %%nr+1
add hH, dword [rsp+(%%nr/4)*sizeof(ymmword)+(%%nr & 3)*sizeof(dword)]
and T1, hE
rorx T2, hE, 25
vpxor yT3,yT3,yT4
rorx T4, hE, 11
add hA, T3
add hH, T1
vpslld yT4,yT4,11
andn T1, hE, hG
xor T2, T4
rorx T3, hE, 6
vpxor yT3,yT3,yT1
add hH, T1
xor T2, T3
mov T4, hA
vpsrld yT1,yT2,10
rorx T1, hA, 22
add hH, T2
xor T4, hB
vpxor yT3,yT3,yT4
rorx T3, hA, 13
rorx T2, hA, 2
add hD, hH
vpsrlq yT2,yT2,17
and T5, T4
xor T3, T1
xor T5, hB
vpaddd W0,W0,yT3
xor T3, T2
add hH, T5
mov T1, hE
ROTATE_T4_T5
ROTATE_H
vpxor yT1,yT1,yT2
%assign %%nr %%nr+1
add hH, dword [rsp+(%%nr/4)*sizeof(ymmword)+(%%nr & 3)*sizeof(dword)]
and T1, hE
rorx T2, hE, 25
vpsrlq yT2,yT2,2
rorx T4, hE, 11
add hA, T3
add hH, T1
vpxor yT1,yT1,yT2
andn T1, hE, hG
xor T2, T4
rorx T3, hE, 6
vpshufb yT1,yT1,YMM_zzBA
add hH, T1
xor T2, T3
mov T4, hA
vpaddd W0,W0,yT1
rorx T1, hA, 22
add hH, T2
xor T4, hB
vpshufd yT2,W0,80
rorx T3, hA, 13
rorx T2, hA, 2
add hD, hH
vpsrld yT1,yT2,10
and T5, T4
xor T3, T1
xor T5, hB
vpsrlq yT2,yT2,17
xor T3, T2
add hH, T5
mov T1, hE
ROTATE_T4_T5
ROTATE_H
vpxor yT1,yT1,yT2
%assign %%nr %%nr+1
add hH, dword [rsp+(%%nr/4)*sizeof(ymmword)+(%%nr & 3)*sizeof(dword)]
and T1, hE
rorx T2, hE, 25
vpsrlq yT2,yT2,2
rorx T4, hE, 11
add hA, T3
add hH, T1
vpxor yT1,yT1,yT2
andn T1, hE, hG
xor T2, T4
rorx T3, hE, 6
vpshufb yT1,yT1,YMM_DCzz
add hH, T1
xor T2, T3
mov T4, hA
vpaddd W0,W0,yT1
rorx T1, hA, 22
add hH, T2
xor T4, hB
vpaddd yT1,W0,YMMWORD [rbp+(%%nr/4)*sizeof(ymmword)]
rorx T3, hA, 13
rorx T2, hA, 2
add hD, hH
and T5, T4
xor T3, T1
xor T5, hB
vmovdqa YMMWORD [rsp+(%%W_AHEAD/4)*sizeof(ymmword)+(%%round/4)*sizeof(ymmword)],yT1
xor T3, T2
add hH, T5
mov T1, hE
ROTATE_T4_T5
ROTATE_H
ROTATE_W
%endmacro
;;
;; update hash
;;
%macro UPDATE_HASH 2.nolist
%xdefine %%hashMem %1
%xdefine %%hash %2
add %%hash, %%hashMem
mov %%hashMem, %%hash
%endmacro
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
segment .text align=IPP_ALIGN_FACTOR
align IPP_ALIGN_FACTOR
SHA256_YMM_K dd 0428a2f98h, 071374491h, 0b5c0fbcfh, 0e9b5dba5h, 0428a2f98h, 071374491h, 0b5c0fbcfh, 0e9b5dba5h
dd 03956c25bh, 059f111f1h, 0923f82a4h, 0ab1c5ed5h, 03956c25bh, 059f111f1h, 0923f82a4h, 0ab1c5ed5h
dd 0d807aa98h, 012835b01h, 0243185beh, 0550c7dc3h, 0d807aa98h, 012835b01h, 0243185beh, 0550c7dc3h
dd 072be5d74h, 080deb1feh, 09bdc06a7h, 0c19bf174h, 072be5d74h, 080deb1feh, 09bdc06a7h, 0c19bf174h
dd 0e49b69c1h, 0efbe4786h, 00fc19dc6h, 0240ca1cch, 0e49b69c1h, 0efbe4786h, 00fc19dc6h, 0240ca1cch
dd 02de92c6fh, 04a7484aah, 05cb0a9dch, 076f988dah, 02de92c6fh, 04a7484aah, 05cb0a9dch, 076f988dah
dd 0983e5152h, 0a831c66dh, 0b00327c8h, 0bf597fc7h, 0983e5152h, 0a831c66dh, 0b00327c8h, 0bf597fc7h
dd 0c6e00bf3h, 0d5a79147h, 006ca6351h, 014292967h, 0c6e00bf3h, 0d5a79147h, 006ca6351h, 014292967h
dd 027b70a85h, 02e1b2138h, 04d2c6dfch, 053380d13h, 027b70a85h, 02e1b2138h, 04d2c6dfch, 053380d13h
dd 0650a7354h, 0766a0abbh, 081c2c92eh, 092722c85h, 0650a7354h, 0766a0abbh, 081c2c92eh, 092722c85h
dd 0a2bfe8a1h, 0a81a664bh, 0c24b8b70h, 0c76c51a3h, 0a2bfe8a1h, 0a81a664bh, 0c24b8b70h, 0c76c51a3h
dd 0d192e819h, 0d6990624h, 0f40e3585h, 0106aa070h, 0d192e819h, 0d6990624h, 0f40e3585h, 0106aa070h
dd 019a4c116h, 01e376c08h, 02748774ch, 034b0bcb5h, 019a4c116h, 01e376c08h, 02748774ch, 034b0bcb5h
dd 0391c0cb3h, 04ed8aa4ah, 05b9cca4fh, 0682e6ff3h, 0391c0cb3h, 04ed8aa4ah, 05b9cca4fh, 0682e6ff3h
dd 0748f82eeh, 078a5636fh, 084c87814h, 08cc70208h, 0748f82eeh, 078a5636fh, 084c87814h, 08cc70208h
dd 090befffah, 0a4506cebh, 0bef9a3f7h, 0c67178f2h, 090befffah, 0a4506cebh, 0bef9a3f7h, 0c67178f2h
SHA256_YMM_BF dd 000010203h, 004050607h, 008090a0bh, 00c0d0e0fh, 000010203h, 004050607h, 008090a0bh, 00c0d0e0fh
SHA256_DCzz db 0ffh,0ffh,0ffh,0ffh,0ffh,0ffh,0ffh,0ffh, 0,1,2,3, 8,9,10,11
db 0ffh,0ffh,0ffh,0ffh,0ffh,0ffh,0ffh,0ffh, 0,1,2,3, 8,9,10,11
SHA256_zzBA db 0,1,2,3, 8,9,10,11, 0ffh,0ffh,0ffh,0ffh,0ffh,0ffh,0ffh,0ffh
db 0,1,2,3, 8,9,10,11, 0ffh,0ffh,0ffh,0ffh,0ffh,0ffh,0ffh,0ffh
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; UpdateSHA256(Ipp32u digest[], Ipp8u dataBlock[], int datalen, Ipp32u K_256[])
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
align IPP_ALIGN_FACTOR
IPPASM UpdateSHA256,PUBLIC
%assign LOCAL_FRAME (sizeof(qword)*4 + sizeof(dword)*64*2)
USES_GPR rbx,rsi,rdi,rbp,rbx,r12,r13,r14,r15
USES_XMM_AVX xmm6,xmm7,xmm8,xmm9,xmm10
COMP_ABI 4
;;
;; rdi = pointer to the updated hash
;; rsi = pointer to the data block
;; rdx = data block length
;; rcx = pointer to the SHA_256 constant (ignored)
;;
%xdefine MBS_SHA256 (64)
;;
;; stack structure:
;;
%assign _reserve 0 ;; reserved
%assign _hash _reserve+sizeof(qword) ;; hash address
%assign _len _hash+sizeof(qword) ;; rest of processed data
%assign _frame _len+sizeof(qword) ;; rsp before alignment
%assign _dataW _frame+sizeof(qword) ;; W[t] values
mov r15, rsp ; store orifinal rsp
and rsp, -IPP_ALIGN_FACTOR ; 32-byte aligned stack
movsxd r14, edx ; input length in bytes
mov qword [rsp+_hash], rdi ; store hash address
mov qword [rsp+_len], r14 ; store length
mov qword [rsp+_frame], r15 ; store rsp
lea rsp, qword [rsp+_dataW] ; set up rsp
mov hA, dword [rdi] ; load initial hash value
mov hB, dword [rdi+1*sizeof(dword)]
mov hC, dword [rdi+2*sizeof(dword)]
mov hD, dword [rdi+3*sizeof(dword)]
mov hE, dword [rdi+4*sizeof(dword)]
mov hF, dword [rdi+5*sizeof(dword)]
mov hG, dword [rdi+6*sizeof(dword)]
mov hH, dword [rdi+7*sizeof(dword)]
vmovdqa YMM_SHUFB_BSWAP, ymmword [rel SHA256_YMM_BF] ; load byte shuffler
vmovdqa YMM_zzBA, ymmword [rel SHA256_zzBA] ; load byte shuffler (zzBA)
vmovdqa YMM_DCzz, ymmword [rel SHA256_DCzz] ; load byte shuffler (DCzz)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; process next data 2 block
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
align IPP_ALIGN_FACTOR
.sha256_block2_loop:
lea r12, [rsi+MBS_SHA256] ; next block
cmp r14, MBS_SHA256 ; %if single block processed
cmovbe r12, rsi ; use the same data block address
lea rbp, ymmword [rel SHA256_YMM_K] ; to SHA256 consts
vmovdqu W0L, xmmword [rsi] ; load data block
vmovdqu W1L, xmmword [rsi+1*sizeof(xmmword)]
vmovdqu W2L, xmmword [rsi+2*sizeof(xmmword)]
vmovdqu W3L, xmmword [rsi+3*sizeof(xmmword)]
vinserti128 W0, W0, xmmword [r12], 1 ; merge next data block
vinserti128 W1, W1, xmmword [r12+1*sizeof(xmmword)], 1
vinserti128 W2, W2, xmmword [r12+2*sizeof(xmmword)], 1
vinserti128 W3, W3, xmmword [r12+3*sizeof(xmmword)], 1
vpshufb W0, W0, YMM_SHUFB_BSWAP
vpshufb W1, W1, YMM_SHUFB_BSWAP
vpshufb W2, W2, YMM_SHUFB_BSWAP
vpshufb W3, W3, YMM_SHUFB_BSWAP
vpaddd yT1, W0, ymmword [rbp]
vpaddd yT2, W1, ymmword [rbp+1*sizeof(ymmword)]
vpaddd yT3, W2, ymmword [rbp+2*sizeof(ymmword)]
vpaddd yT4, W3, ymmword [rbp+3*sizeof(ymmword)]
add rbp, 4*sizeof(ymmword)
vmovdqa ymmword [rsp], yT1
vmovdqa ymmword [rsp+1*sizeof(ymmword)], yT2
vmovdqa ymmword [rsp+2*sizeof(ymmword)], yT3
vmovdqa ymmword [rsp+3*sizeof(ymmword)], yT4
mov T5, hB ; T5 = b^c
xor T3, T3 ; T3 = 0
mov T1, hF ; T1 = f
xor T5, hC
align IPP_ALIGN_FACTOR
.block1_shed_proc:
SHA256_4ROUND_SHED 0
SHA256_4ROUND_SHED 4
SHA256_4ROUND_SHED 8
SHA256_4ROUND_SHED 12
add rsp, 4*sizeof(ymmword)
add rbp, 4*sizeof(ymmword)
;; and repeat
cmp dword [rbp-sizeof(dword)],0c67178f2h
jne .block1_shed_proc
;; the rest 16 rounds
SHA256_ROUND 0, hA,hB,hC,hD,hE,hF,hG,hH
SHA256_ROUND 1, hH,hA,hB,hC,hD,hE,hF,hG
SHA256_ROUND 2, hG,hH,hA,hB,hC,hD,hE,hF
SHA256_ROUND 3, hF,hG,hH,hA,hB,hC,hD,hE
SHA256_ROUND 4, hE,hF,hG,hH,hA,hB,hC,hD
SHA256_ROUND 5, hD,hE,hF,hG,hH,hA,hB,hC
SHA256_ROUND 6, hC,hD,hE,hF,hG,hH,hA,hB
SHA256_ROUND 7, hB,hC,hD,hE,hF,hG,hH,hA
SHA256_ROUND 8, hA,hB,hC,hD,hE,hF,hG,hH
SHA256_ROUND 9, hH,hA,hB,hC,hD,hE,hF,hG
SHA256_ROUND 10, hG,hH,hA,hB,hC,hD,hE,hF
SHA256_ROUND 11, hF,hG,hH,hA,hB,hC,hD,hE
SHA256_ROUND 12, hE,hF,hG,hH,hA,hB,hC,hD
SHA256_ROUND 13, hD,hE,hF,hG,hH,hA,hB,hC
SHA256_ROUND 14, hC,hD,hE,hF,hG,hH,hA,hB
SHA256_ROUND 15, hB,hC,hD,hE,hF,hG,hH,hA
add hA, T3
sub rsp, (16-4)*sizeof(ymmword) ; restore stack to W
mov rdi, qword [rsp+_hash-_dataW] ; restore hash pointer
mov r14, qword [rsp+_len-_dataW] ; restore data length
;; update hash values by 1-st data block
UPDATE_HASH dword [rdi], hA
UPDATE_HASH dword [rdi+1*sizeof(dword)], hB
UPDATE_HASH dword [rdi+2*sizeof(dword)], hC
UPDATE_HASH dword [rdi+3*sizeof(dword)], hD
UPDATE_HASH dword [rdi+4*sizeof(dword)], hE
UPDATE_HASH dword [rdi+5*sizeof(dword)], hF
UPDATE_HASH dword [rdi+6*sizeof(dword)], hG
UPDATE_HASH dword [rdi+7*sizeof(dword)], hH
cmp r14, MBS_SHA256*2
jl .done
;; do 64 rounds for the next block
add rsp, 4*sizeof(dword) ; restore stack to next block W
lea rbp, [rsp+16*sizeof(ymmword)] ; use rbp for loop limiter
mov T5, hB ; T5 = b^c
xor T3, T3 ; T3 = 0
mov T1, hF ; T1 = f
xor T5, hC
align IPP_ALIGN_FACTOR
.block2_proc:
SHA256_ROUND 0, hA,hB,hC,hD,hE,hF,hG,hH
SHA256_ROUND 1, hH,hA,hB,hC,hD,hE,hF,hG
SHA256_ROUND 2, hG,hH,hA,hB,hC,hD,hE,hF
SHA256_ROUND 3, hF,hG,hH,hA,hB,hC,hD,hE
SHA256_ROUND 4, hE,hF,hG,hH,hA,hB,hC,hD
SHA256_ROUND 5, hD,hE,hF,hG,hH,hA,hB,hC
SHA256_ROUND 6, hC,hD,hE,hF,hG,hH,hA,hB
SHA256_ROUND 7, hB,hC,hD,hE,hF,hG,hH,hA
add rsp, 2*sizeof(ymmword)
cmp rsp, rbp
jb .block2_proc
add hA, T3
sub rsp, 16*sizeof(ymmword)+4*sizeof(dword) ; restore stack
mov rdi, qword [rsp+_hash-_dataW] ; restore hash pointer
mov r14, qword [rsp+_len-_dataW] ; restore data length
;; update hash values by 2-nd data block
UPDATE_HASH dword [rdi], hA
UPDATE_HASH dword [rdi+1*sizeof(dword)], hB
UPDATE_HASH dword [rdi+2*sizeof(dword)], hC
UPDATE_HASH dword [rdi+3*sizeof(dword)], hD
UPDATE_HASH dword [rdi+4*sizeof(dword)], hE
UPDATE_HASH dword [rdi+5*sizeof(dword)], hF
UPDATE_HASH dword [rdi+6*sizeof(dword)], hG
UPDATE_HASH dword [rdi+7*sizeof(dword)], hH
add rsi, MBS_SHA256*2 ; move data pointer
sub r14, MBS_SHA256*2 ; update data length
mov qword [rsp+_len-_dataW], r14
jg .sha256_block2_loop
.done:
mov rsp, qword [rsp+_frame-_dataW]
REST_XMM_AVX
REST_GPR
ret
ENDFUNC UpdateSHA256
%endif ;; _IPP32E_L9 and above
%endif ;; _FEATURE_OFF_ / _FEATURE_TICKTOCK_
%endif ;; _ENABLE_ALG_SHA256_
|
apache-2.0
|
hochanh/hochanh.github.io
|
rtk/rtk3-remain/2815.md
|
1221
|
---
layout: kanji-remain
v4: 2815
kanji: 鮭
keyword: salmon
strokes: 17
on-yomi: カイ
permalink: /rtk/鮭/
---
## Koohii stories:
1) [<a href="http://kanji.koohii.com/profile/Megaqwerty">Megaqwerty</a>] 17-7-2007(30): <strong>Salmon</strong> are <em>fish</em> that go over <em>ivy</em> trying to get back to their breeding grounds, killing bears along the way.
2) [<a href="http://kanji.koohii.com/profile/oregum">oregum</a>] 11-6-2010(13): <strong>SALMON</strong> swim upstream like they're climbing IVY.
3) [<a href="http://kanji.koohii.com/profile/synewave">synewave</a>] 26-4-2007(9): <strong>さけ,<strong> salmon</strong></strong> are <em>fish</em> that go to higher <em>ground</em> (<em>ground on ground</em>) to spawn.
4) [<a href="http://kanji.koohii.com/profile/mantixen">mantixen</a>] 30-7-2009(5): <strong>Salmon</strong> is a <em>fish</em> that can keep your brain working well enough to get into the <em>Ivy</em> League.
5) [<a href="http://kanji.koohii.com/profile/blannk">blannk</a>] 9-3-2010(2): <strong>Salman</strong> Rushdie speaks at an <em>Ivy</em> League college & afterward is treated to a <em>fish</em> dinner,<strong> salmon</strong>, of course, with the board of trustees.
|
apache-2.0
|
tuxology/bcc
|
tests/cc/test_usdt_probes.cc
|
12086
|
/*
* Copyright (c) 2016 GitHub, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "catch.hpp"
#include "usdt.h"
#include "api/BPF.h"
/* required to insert USDT probes on this very executable --
* we're gonna be testing them live! */
#include "folly/tracing/StaticTracepoint.h"
static int a_probed_function() {
int an_int = 23 + getpid();
void *a_pointer = malloc(4);
FOLLY_SDT(libbcc_test, sample_probe_1, an_int, a_pointer);
free(a_pointer);
return an_int;
}
extern "C" int lib_probed_function();
int call_shared_lib_func() {
return lib_probed_function();
}
TEST_CASE("test finding a probe in our own process", "[usdt]") {
USDT::Context ctx(getpid());
REQUIRE(ctx.num_probes() >= 1);
SECTION("our test probe") {
auto probe = ctx.get("sample_probe_1");
REQUIRE(probe);
if(probe->in_shared_object(probe->bin_path()))
return;
REQUIRE(probe->name() == "sample_probe_1");
REQUIRE(probe->provider() == "libbcc_test");
REQUIRE(probe->bin_path().find("/test_libbcc") != std::string::npos);
REQUIRE(probe->num_locations() == 1);
REQUIRE(probe->num_arguments() == 2);
REQUIRE(probe->need_enable() == false);
REQUIRE(a_probed_function() != 0);
}
}
TEST_CASE("test probe's attributes with C++ API", "[usdt]") {
const ebpf::USDT u("/proc/self/exe", "libbcc_test", "sample_probe_1", "on_event");
REQUIRE(u.binary_path() == "/proc/self/exe");
REQUIRE(u.pid() == -1);
REQUIRE(u.provider() == "libbcc_test");
REQUIRE(u.name() == "sample_probe_1");
REQUIRE(u.probe_func() == "on_event");
}
TEST_CASE("test fine a probe in our own binary with C++ API", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u("/proc/self/exe", "libbcc_test", "sample_probe_1", "on_event");
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.code() == 0);
res = bpf.attach_usdt(u);
REQUIRE(res.code() == 0);
res = bpf.detach_usdt(u);
REQUIRE(res.code() == 0);
}
TEST_CASE("test fine probes in our own binary with C++ API", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u("/proc/self/exe", "libbcc_test", "sample_probe_1", "on_event");
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.ok());
res = bpf.attach_usdt_all();
REQUIRE(res.ok());
res = bpf.detach_usdt_all();
REQUIRE(res.ok());
}
TEST_CASE("test fine a probe in our Process with C++ API", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u(::getpid(), "libbcc_test", "sample_probe_1", "on_event");
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.code() == 0);
res = bpf.attach_usdt(u);
REQUIRE(res.code() == 0);
res = bpf.detach_usdt(u);
REQUIRE(res.code() == 0);
}
TEST_CASE("test find a probe in our process' shared libs with c++ API", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u(::getpid(), "libbcc_test", "sample_lib_probe_1", "on_event");
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
}
TEST_CASE("test usdt partial init w/ fail init_usdt", "[usdt]") {
ebpf::BPF bpf;
ebpf::USDT u(::getpid(), "libbcc_test", "sample_lib_probe_nonexistent", "on_event");
ebpf::USDT p(::getpid(), "libbcc_test", "sample_lib_probe_1", "on_event");
// We should be able to fail initialization and subsequently do bpf.init w/o USDT
// successfully
auto res = bpf.init_usdt(u);
REQUIRE(res.msg() != "");
REQUIRE(res.code() != 0);
// Shouldn't be necessary to re-init bpf object either after failure to init w/
// bad USDT
res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.msg() != "");
REQUIRE(res.code() != 0);
res = bpf.init_usdt(p);
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
res = bpf.init("int on_event() { return 0; }", {}, {});
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
}
class ChildProcess {
pid_t pid_;
public:
ChildProcess(const char *name, char *const argv[]) {
pid_ = fork();
if (pid_ == 0) {
execvp(name, argv);
exit(0);
}
if (spawned()) {
usleep(250000);
if (kill(pid_, 0) < 0)
pid_ = -1;
}
}
~ChildProcess() {
if (spawned()) {
int status;
kill(pid_, SIGKILL);
if (waitpid(pid_, &status, 0) != pid_)
abort();
}
}
bool spawned() const { return pid_ > 0; }
pid_t pid() const { return pid_; }
};
extern int cmd_scanf(const char *cmd, const char *fmt, ...);
static int probe_num_locations(const char *bin_path, const char *func_name) {
int num_locations;
char cmd[512];
const char *cmdfmt = "readelf -n %s | grep -c \"Name: %s$\"";
sprintf(cmd, cmdfmt, bin_path, func_name);
if (cmd_scanf(cmd, "%d", &num_locations) != 0) {
return -1;
}
return num_locations;
}
static int probe_num_arguments(const char *bin_path, const char *func_name) {
int num_arguments;
char cmd[512];
const char *cmdfmt = "readelf -n %s | grep -m 1 -A 2 \" %s$\" | " \
"tail -1 | cut -d \" \" -f 6- | wc -w";
sprintf(cmd, cmdfmt, bin_path, func_name);
if (cmd_scanf(cmd, "%d", &num_arguments) != 0) {
return -1;
}
return num_arguments;
}
// Unsharing pid namespace requires forking
// this uses pgrep to find the child process, by searching for a process
// that has the unshare as its parent
static int unshared_child_pid(const int ppid) {
int child_pid;
char cmd[512];
const char *cmdfmt = "pgrep -P %d";
sprintf(cmd, cmdfmt, ppid);
if (cmd_scanf(cmd, "%d", &child_pid) != 0) {
return -1;
}
return child_pid;
}
// FIXME This seems like a legitimate bug with probing ruby where the
// ruby symbols are in libruby.so?
TEST_CASE("test listing all USDT probes in Ruby/MRI", "[usdt][!mayfail]") {
size_t mri_probe_count = 0;
SECTION("without a running Ruby process") {
USDT::Context ctx("ruby");
if (!ctx.loaded())
return;
REQUIRE(ctx.num_probes() > 10);
mri_probe_count = ctx.num_probes();
SECTION("GC static probe") {
auto name = "gc__mark__begin";
auto probe = ctx.get(name);
REQUIRE(probe);
REQUIRE(probe->in_shared_object(probe->bin_path()) == true);
REQUIRE(probe->name() == name);
REQUIRE(probe->provider() == "ruby");
auto bin_path = probe->bin_path();
bool bin_path_match =
(bin_path.find("/ruby") != std::string::npos) ||
(bin_path.find("/libruby") != std::string::npos);
REQUIRE(bin_path_match);
int exp_locations, exp_arguments;
exp_locations = probe_num_locations(bin_path.c_str(), name);
exp_arguments = probe_num_arguments(bin_path.c_str(), name);
REQUIRE(probe->num_locations() == exp_locations);
REQUIRE(probe->num_arguments() == exp_arguments);
REQUIRE(probe->need_enable() == true);
}
SECTION("object creation probe") {
auto name = "object__create";
auto probe = ctx.get(name);
REQUIRE(probe);
REQUIRE(probe->in_shared_object(probe->bin_path()) == true);
REQUIRE(probe->name() == name);
REQUIRE(probe->provider() == "ruby");
auto bin_path = probe->bin_path();
bool bin_path_match =
(bin_path.find("/ruby") != std::string::npos) ||
(bin_path.find("/libruby") != std::string::npos);
REQUIRE(bin_path_match);
int exp_locations, exp_arguments;
exp_locations = probe_num_locations(bin_path.c_str(), name);
exp_arguments = probe_num_arguments(bin_path.c_str(), name);
REQUIRE(probe->num_locations() == exp_locations);
REQUIRE(probe->num_arguments() == exp_arguments);
REQUIRE(probe->need_enable() == true);
}
SECTION("array creation probe") {
auto name = "array__create";
auto probe = ctx.get(name);
REQUIRE(probe);
REQUIRE(probe->name() == name);
auto bin_path = probe->bin_path().c_str();
int exp_locations, exp_arguments;
exp_locations = probe_num_locations(bin_path, name);
exp_arguments = probe_num_arguments(bin_path, name);
REQUIRE(probe->num_locations() == exp_locations);
REQUIRE(probe->num_arguments() == exp_arguments);
REQUIRE(probe->need_enable() == true);
}
}
SECTION("with a running Ruby process") {
static char _ruby[] = "ruby";
char *const argv[2] = {_ruby, NULL};
ChildProcess ruby(argv[0], argv);
if (!ruby.spawned())
return;
USDT::Context ctx(ruby.pid());
REQUIRE(ctx.num_probes() >= mri_probe_count);
SECTION("get probe in running process") {
auto name = "gc__mark__begin";
auto probe = ctx.get(name);
REQUIRE(probe);
REQUIRE(probe->in_shared_object(probe->bin_path()) == true);
REQUIRE(probe->name() == name);
REQUIRE(probe->provider() == "ruby");
auto bin_path = probe->bin_path();
bool bin_path_match =
(bin_path.find("/ruby") != std::string::npos) ||
(bin_path.find("/libruby") != std::string::npos);
REQUIRE(bin_path_match);
int exp_locations, exp_arguments;
exp_locations = probe_num_locations(bin_path.c_str(), name);
exp_arguments = probe_num_arguments(bin_path.c_str(), name);
REQUIRE(probe->num_locations() == exp_locations);
REQUIRE(probe->num_arguments() == exp_arguments);
REQUIRE(probe->need_enable() == true);
}
}
}
// These tests are expected to fail if there is no Ruby with dtrace probes
TEST_CASE("test probing running Ruby process in namespaces",
"[usdt][!mayfail]") {
SECTION("in separate mount namespace") {
static char _unshare[] = "unshare";
const char *const argv[4] = {_unshare, "--mount", "ruby", NULL};
ChildProcess unshare(argv[0], (char **const)argv);
if (!unshare.spawned())
return;
int ruby_pid = unshare.pid();
ebpf::BPF bpf;
ebpf::USDT u(ruby_pid, "ruby", "gc__mark__begin", "on_event");
u.set_probe_matching_kludge(1); // Also required for overlayfs...
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
res = bpf.attach_usdt(u, ruby_pid);
REQUIRE(res.code() == 0);
res = bpf.detach_usdt(u, ruby_pid);
REQUIRE(res.code() == 0);
}
SECTION("in separate mount namespace and separate PID namespace") {
static char _unshare[] = "unshare";
const char *const argv[8] = {_unshare, "--fork", "--kill-child",
"--mount", "--pid", "--mount-proc",
"ruby", NULL};
ChildProcess unshare(argv[0], (char **const)argv);
if (!unshare.spawned())
return;
int ruby_pid = unshared_child_pid(unshare.pid());
ebpf::BPF bpf;
ebpf::USDT u(ruby_pid, "ruby", "gc__mark__begin", "on_event");
u.set_probe_matching_kludge(1); // Also required for overlayfs...
auto res = bpf.init("int on_event() { return 0; }", {}, {u});
REQUIRE(res.msg() == "");
REQUIRE(res.code() == 0);
res = bpf.attach_usdt(u, ruby_pid);
REQUIRE(res.code() == 0);
res = bpf.detach_usdt(u, ruby_pid);
REQUIRE(res.code() == 0);
struct bcc_symbol sym;
std::string pid_root= "/proc/" + std::to_string(ruby_pid) + "/root/";
std::string module = pid_root + "usr/local/bin/ruby";
REQUIRE(bcc_resolve_symname(module.c_str(), "rb_gc_mark", 0x0, ruby_pid, nullptr, &sym) == 0);
REQUIRE(std::string(sym.module).find(pid_root, 1) == std::string::npos);
}
}
|
apache-2.0
|
alexstuart/ukftools
|
geteduGAIN.sh
|
538
|
#!/bin/bash
#
# Downloads the eduGAIN metadata aggregate
#
# NB: a HEAD request gets a "HTTP/1.1 405 Method Not Allowed"
# Tomasz suggested http://mds.edugain.org/feed-sha256.xml
#
# Configuration options:
DIRECTORY='/home/astuart4/eduGAIN/'
eduGAIN='http://mds.edugain.org/feed-sha256.xml'
eduGAINtest='http://mds-test.edugain.org'
# End of configuration options
DATE=`/bin/date -u '+%Y-%m-%dT%H:%M:%SZ'`
FILE="eduGAIN.xml.$DATE"
echo "downloading $eduGAIN and storing in $FILE"
/usr/bin/curl $eduGAIN > ${DIRECTORY}/${FILE} 2>/dev/null
|
apache-2.0
|
blueboxgroup/neutron
|
neutron/tests/unit/test_l3_agent.py
|
100926
|
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import eventlet
import mock
import netaddr
from oslo.config import cfg
from oslo import messaging
from testtools import matchers
from neutron.agent.common import config as agent_config
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import config as l3_config
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_router
from neutron.agent.l3 import ha
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import router_info as l3router
from neutron.agent.linux import interface
from neutron.agent.linux import ra
from neutron.agent.metadata import driver as metadata_driver
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron.i18n import _LE
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as p_const
from neutron.tests import base
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
FAKE_ID_2 = _uuid()
FIP_PRI = 32768
class FakeDev(object):
def __init__(self, name):
self.name = name
def router_append_interface(router, count=1, ip_version=4, ra_mode=None,
addr_mode=None):
if ip_version == 4:
ip_pool = '35.4.%i.4'
cidr_pool = '35.4.%i.0/24'
gw_pool = '35.4.%i.1'
elif ip_version == 6:
ip_pool = 'fd01:%x::6'
cidr_pool = 'fd01:%x::/64'
gw_pool = 'fd01:%x::1'
else:
raise ValueError("Invalid ip_version: %s" % ip_version)
interfaces = router[l3_constants.INTERFACE_KEY]
current = sum(
[netaddr.IPNetwork(p['subnet']['cidr']).version == ip_version
for p in interfaces])
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
for i in range(current, current + count):
interfaces.append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': ip_pool % i,
'subnet_id': _uuid()}],
'mac_address': str(mac_address),
'subnet': {'cidr': cidr_pool % i,
'gateway_ip': gw_pool % i,
'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': addr_mode}})
mac_address.value += 1
def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1,
enable_floating_ip=False, enable_ha=False,
extra_routes=False):
if ip_version == 4:
ip_addr = '19.4.4.4'
cidr = '19.4.4.0/24'
gateway_ip = '19.4.4.1'
elif ip_version == 6:
ip_addr = 'fd00::4'
cidr = 'fd00::/64'
gateway_ip = 'fd00::1'
else:
raise ValueError("Invalid ip_version: %s" % ip_version)
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ee',
'network_id': _uuid(),
'fixed_ips': [{'ip_address': ip_addr,
'subnet_id': _uuid()}],
'subnet': {'cidr': cidr,
'gateway_ip': gateway_ip}}
routes = []
if extra_routes:
routes = [{'destination': '8.8.8.0/24', 'nexthop': ip_addr}]
router = {
'id': router_id,
'distributed': False,
l3_constants.INTERFACE_KEY: [],
'routes': routes,
'gw_port': ex_gw_port}
if enable_floating_ip:
router[l3_constants.FLOATINGIP_KEY] = [{
'id': _uuid(),
'port_id': _uuid(),
'floating_ip_address': '19.4.4.2',
'fixed_ip_address': '10.0.0.1'}]
router_append_interface(router, count=num_internal_ports,
ip_version=ip_version)
if enable_ha:
router['ha'] = True
router['ha_vr_id'] = 1
router[l3_constants.HA_INTERFACE_KEY] = get_ha_interface()
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router
def _get_subnet_id(port):
return port['fixed_ips'][0]['subnet_id']
#TODO(jschwarz): This is a shared function with both the unit tests
# and the functional tests, and should be moved elsewhere (probably
# neutron/tests/common/).
def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'):
return {'admin_state_up': True,
'device_id': _uuid(),
'device_owner': 'network:router_ha_interface',
'fixed_ips': [{'ip_address': ip,
'subnet_id': _uuid()}],
'id': _uuid(),
'mac_address': mac,
'name': u'L3 HA Admin port 0',
'network_id': _uuid(),
'status': u'ACTIVE',
'subnet': {'cidr': '169.254.192.0/18',
'gateway_ip': '169.254.255.254',
'id': _uuid()},
'tenant_id': '',
'agent_id': _uuid(),
'agent_host': 'aaa',
'priority': 1}
class TestBasicRouterOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRouterOperations, self).setUp()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
self.conf.register_cli_opts(log.common_cli_opts)
self.conf.register_cli_opts(log.logging_cli_opts)
self.conf.register_opts(l3_config.OPTS)
self.conf.register_opts(ha.OPTS)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_use_namespaces_opts_helper(self.conf)
agent_config.register_root_helper(self.conf)
self.conf.register_opts(interface.OPTS)
self.conf.set_override('router_id', 'fake_id')
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('send_arp_for_ha', 1)
self.conf.set_override('state_path', '')
self.conf.root_helper = 'sudo'
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
mock.patch('neutron.agent.l3.ha.AgentMixin'
'._init_ha_conf_path').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedNotifierMixin'
'._get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron.agent.linux.utils.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.send_arp_p = mock.patch(
'neutron.agent.linux.ip_lib.send_gratuitous_arp')
self.send_arp = self.send_arp_p.start()
self.send_arp_proxyarp_p = mock.patch(
'neutron.agent.linux.ip_lib.send_garp_for_proxyarp')
self.send_arp_proxyarp = self.send_arp_proxyarp_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
ip_rule = mock.patch('neutron.agent.linux.ip_lib.IpRule').start()
self.mock_rule = mock.MagicMock()
ip_rule.return_value = self.mock_rule
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
self.snat_ports = [{'subnet': {'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': _uuid()},
'network_id': _uuid(),
'device_owner': 'network:router_centralized_snat',
'ip_cidr': '152.2.0.13/16',
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': _uuid(),
'ip_address': '152.2.0.13'}],
'id': _uuid(), 'device_id': _uuid()},
{'subnet': {'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': _uuid()},
'network_id': _uuid(),
'device_owner': 'network:router_centralized_snat',
'ip_cidr': '152.10.0.13/16',
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': _uuid(),
'ip_address': '152.10.0.13'}],
'id': _uuid(), 'device_id': _uuid()}]
def _prepare_internal_network_data(self):
port_id = _uuid()
router_id = _uuid()
network_id = _uuid()
router = prepare_router_data(num_internal_ports=2)
router_id = router['id']
ri = l3router.RouterInfo(router_id, self.conf.root_helper,
router=router)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
cidr = '99.0.1.9/24'
mac = 'ca:fe:de:ad:be:ef'
port = {'network_id': network_id,
'id': port_id, 'ip_cidr': cidr,
'mac_address': mac}
return agent, ri, port
def test_periodic_sync_routers_task_raise_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_routers.side_effect = ValueError()
with mock.patch.object(agent, '_cleanup_namespaces') as f:
self.assertRaises(ValueError, agent.periodic_sync_routers_task,
agent.context)
self.assertTrue(agent.fullsync)
self.assertFalse(f.called)
def test_l3_initial_full_sync_done(self):
with mock.patch.object(l3_agent.L3NATAgent,
'periodic_sync_routers_task') as router_sync:
with mock.patch.object(eventlet, 'spawn_n'):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.after_start()
router_sync.assert_called_once_with(agent.context)
def test_periodic_sync_routers_task_call_clean_stale_namespaces(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_routers.return_value = []
with mock.patch.object(agent, '_cleanup_namespaces') as f:
agent.periodic_sync_routers_task(agent.context)
self.assertTrue(f.called)
def test_router_info_create(self):
id = _uuid()
ns = "ns-" + id
ri = l3router.RouterInfo(id, self.conf.root_helper, {}, ns_name=ns)
self.assertTrue(ri.ns_name.endswith(id))
def test_router_info_create_with_router(self):
id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
ns = "ns-" + id
ri = l3router.RouterInfo(id, self.conf.root_helper, router, ns_name=ns)
self.assertTrue(ri.ns_name.endswith(id))
self.assertEqual(ri.router, router)
def test_agent_create(self):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
def _test_internal_network_action(self, action):
agent, ri, port = self._prepare_internal_network_data()
interface_name = agent.get_internal_device_name(port['id'])
if action == 'add':
self.device_exists.return_value = False
agent.internal_network_added(ri, port)
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri.ns_name, interface_name,
'99.0.1.9',
mock.ANY, mock.ANY)
elif action == 'remove':
self.device_exists.return_value = True
agent.internal_network_removed(ri, port)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def _test_internal_network_action_dist(self, action):
agent, ri, port = self._prepare_internal_network_data()
ri.router['distributed'] = True
ri.router['gw_port_host'] = HOSTNAME
agent.host = HOSTNAME
agent.conf.agent_mode = 'dvr_snat'
sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.31/24'}
if action == 'add':
self.device_exists.return_value = False
agent._map_internal_interfaces = mock.Mock(return_value=sn_port)
agent._snat_redirect_add = mock.Mock()
agent._set_subnet_info = mock.Mock()
agent._internal_network_added = mock.Mock()
agent.internal_network_added(ri, port)
self.assertEqual(agent._snat_redirect_add.call_count, 1)
self.assertEqual(agent._set_subnet_info.call_count, 1)
self.assertEqual(agent._internal_network_added.call_count, 2)
agent._internal_network_added.assert_called_with(
agent.get_snat_ns_name(ri.router['id']),
sn_port['network_id'],
sn_port['id'],
sn_port['ip_cidr'],
sn_port['mac_address'],
agent.get_snat_int_device_name(sn_port['id']),
dvr.SNAT_INT_DEV_PREFIX)
def test_agent_add_internal_network(self):
self._test_internal_network_action('add')
def test_agent_add_internal_network_dist(self):
self._test_internal_network_action_dist('add')
def test_agent_remove_internal_network(self):
self._test_internal_network_action('remove')
def _test_external_gateway_action(self, action, router):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router,
ns_name=agent.get_ns_name(router['id']))
# Special setup for dvr routers
if router.get('distributed'):
agent.conf.agent_mode = 'dvr_snat'
agent.host = HOSTNAME
agent._create_dvr_gateway = mock.Mock()
agent.get_snat_interfaces = mock.Mock(return_value=self.snat_ports)
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self.device_exists.return_value = False
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
agent.external_gateway_added(ri, ex_gw_port, interface_name)
if not router.get('distributed'):
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri.ns_name,
interface_name,
'20.0.0.30',
mock.ANY, mock.ANY)
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'gateway': '20.0.0.1',
'extra_subnets': [{'cidr': '172.16.0.0/24'}]}
self.mock_driver.init_l3.assert_called_with(interface_name,
['20.0.0.30/24'],
**kwargs)
else:
agent._create_dvr_gateway.assert_called_once_with(
ri, ex_gw_port, interface_name,
self.snat_ports)
elif action == 'remove':
self.device_exists.return_value = True
agent.external_gateway_removed(ri, ex_gw_port, interface_name)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
else:
raise Exception("Invalid action %s" % action)
def _prepare_ext_gw_test(self, agent):
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_external_device_name(ex_gw_port['id'])
self.device_exists.return_value = True
return interface_name, ex_gw_port
def test_external_gateway_updated(self):
router = prepare_router_data(num_internal_ports=2)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router,
ns_name=agent.get_ns_name(router['id']))
interface_name, ex_gw_port = self._prepare_ext_gw_test(agent)
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
agent.external_gateway_updated(ri, ex_gw_port,
interface_name)
self.assertEqual(self.mock_driver.plug.call_count, 0)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
self.send_arp.assert_called_once_with(ri.ns_name, interface_name,
'20.0.0.30', mock.ANY, mock.ANY)
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'gateway': '20.0.0.1',
'extra_subnets': [{'cidr': '172.16.0.0/24'}]}
self.mock_driver.init_l3.assert_called_with(interface_name,
['20.0.0.30/24'],
**kwargs)
def _test_ext_gw_updated_dvr_agent_mode(self, host,
agent_mode, expected_call_count):
router = prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
interface_name, ex_gw_port = self._prepare_ext_gw_test(agent)
agent._external_gateway_added = mock.Mock()
# test agent mode = dvr (compute node)
router['distributed'] = True
router['gw_port_host'] = host
agent.conf.agent_mode = agent_mode
agent.external_gateway_updated(ri, ex_gw_port,
interface_name)
# no gateway should be added on dvr node
self.assertEqual(expected_call_count,
agent._external_gateway_added.call_count)
def test_ext_gw_updated_dvr_agent_mode(self):
# no gateway should be added on dvr node
self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr', 0)
def test_ext_gw_updated_dvr_snat_agent_mode_no_host(self):
# no gateway should be added on dvr_snat node without host match
self._test_ext_gw_updated_dvr_agent_mode('any-foo', 'dvr_snat', 0)
def test_ext_gw_updated_dvr_snat_agent_mode_host(self):
# gateway should be added on dvr_snat node
self._test_ext_gw_updated_dvr_agent_mode(self.conf.host,
'dvr_snat', 1)
def test_agent_add_external_gateway(self):
router = prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('add', router)
def test_agent_add_external_gateway_dist(self):
router = prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('add', router)
def test_agent_remove_external_gateway(self):
router = prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('remove', router)
def test_agent_remove_external_gateway_dist(self):
router = prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('remove', router)
def _check_agent_method_called(self, agent, calls, namespace):
self.mock_ip.netns.execute.assert_has_calls(
[mock.call(call, check_exit_code=False) for call in calls],
any_order=True)
def _test_routing_table_update(self, namespace):
if not namespace:
self.conf.set_override('use_namespaces', False)
router_id = _uuid()
ri = l3router.RouterInfo(router_id, self.conf.root_helper, {})
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
agent._update_routing_table(ri, 'replace', fake_route1)
expected = [['ip', 'route', 'replace', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'delete', fake_route1)
expected = [['ip', 'route', 'delete', 'to', '135.207.0.0/16',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'replace', fake_route2)
expected = [['ip', 'route', 'replace', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
agent._update_routing_table(ri, 'delete', fake_route2)
expected = [['ip', 'route', 'delete', 'to', '135.207.111.111/32',
'via', '1.2.3.4']]
self._check_agent_method_called(agent, expected, namespace)
def test_agent_routing_table_updated(self):
self._test_routing_table_update(namespace=True)
def test_agent_routing_table_updated_no_namespace(self):
self._test_routing_table_update(namespace=False)
def test_routes_updated(self):
self._test_routes_updated(namespace=True)
def test_routes_updated_no_namespace(self):
self._test_routes_updated(namespace=False)
def _test_routes_updated(self, namespace=True):
if not namespace:
self.conf.set_override('use_namespaces', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
ri = l3router.RouterInfo(router_id, self.conf.root_helper, {})
ri.router = {}
fake_old_routes = []
fake_new_routes = [{'destination': "110.100.31.0/24",
'nexthop': "10.100.10.30"},
{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.routes = fake_old_routes
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'replace', 'to', '110.100.30.0/24',
'via', '10.100.10.30'],
['ip', 'route', 'replace', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
fake_new_routes = [{'destination': "110.100.30.0/24",
'nexthop': "10.100.10.30"}]
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'delete', 'to', '110.100.31.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
fake_new_routes = []
ri.router['routes'] = fake_new_routes
agent.routes_updated(ri)
expected = [['ip', 'route', 'delete', 'to', '110.100.30.0/24',
'via', '10.100.10.30']]
self._check_agent_method_called(agent, expected, namespace)
def _verify_snat_rules(self, rules, router, negate=False):
interfaces = router[l3_constants.INTERFACE_KEY]
source_cidrs = []
for iface in interfaces:
prefix = iface['subnet']['cidr'].split('/')[1]
source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'],
prefix)
source_cidrs.append(source_cidr)
source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address']
interface_name = ('qg-%s' % router['gw_port']['id'])[:14]
expected_rules = [
'! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' %
(interface_name, interface_name),
'-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip)]
for r in rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
def test__get_snat_idx_ipv4(self):
ip_cidr = '101.12.13.00/24'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
snat_idx = agent._get_snat_idx(ip_cidr)
# 0x650C0D00 is numerical value of 101.12.13.00
self.assertEqual(0x650C0D00, snat_idx)
def test__get_snat_idx_ipv6(self):
ip_cidr = '2620:0:a03:e100::/64'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
snat_idx = agent._get_snat_idx(ip_cidr)
# 0x3D345705 is 30 bit xor folded crc32 of the ip_cidr
self.assertEqual(0x3D345705, snat_idx)
def test__get_snat_idx_ipv6_below_32768(self):
ip_cidr = 'd488::/30'
# crc32 of this ip_cidr is 0x1BD7
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
snat_idx = agent._get_snat_idx(ip_cidr)
# 0x1BD7 + 0x3FFFFFFF = 0x40001BD6
self.assertEqual(0x40001BD6, snat_idx)
def test__map_internal_interfaces(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=4)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
test_port = {
'mac_address': '00:12:23:34:45:56',
'fixed_ips': [{'subnet_id': _get_subnet_id(
router[l3_constants.INTERFACE_KEY][0]),
'ip_address': '101.12.13.14'}]}
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
# test valid case
res_port = agent._map_internal_interfaces(ri,
internal_ports[0],
[test_port])
self.assertEqual(test_port, res_port)
# test invalid case
test_port['fixed_ips'][0]['subnet_id'] = 1234
res_ip = agent._map_internal_interfaces(ri,
internal_ports[0],
[test_port])
self.assertNotEqual(test_port, res_ip)
self.assertIsNone(res_ip)
def test_get_internal_port(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=4)
subnet_ids = [_get_subnet_id(port) for port in
router[l3_constants.INTERFACE_KEY]]
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
# Test Basic cases
port = agent.get_internal_port(ri, subnet_ids[0])
fips = port.get('fixed_ips', [])
subnet_id = fips[0]['subnet_id']
self.assertEqual(subnet_ids[0], subnet_id)
port = agent.get_internal_port(ri, subnet_ids[1])
fips = port.get('fixed_ips', [])
subnet_id = fips[0]['subnet_id']
self.assertEqual(subnet_ids[1], subnet_id)
port = agent.get_internal_port(ri, subnet_ids[3])
fips = port.get('fixed_ips', [])
subnet_id = fips[0]['subnet_id']
self.assertEqual(subnet_ids[3], subnet_id)
# Test miss cases
no_port = agent.get_internal_port(ri, FAKE_ID)
self.assertIsNone(no_port)
port = agent.get_internal_port(ri, subnet_ids[0])
fips = port.get('fixed_ips', [])
subnet_id = fips[0]['subnet_id']
self.assertNotEqual(subnet_ids[3], subnet_id)
def test__set_subnet_arp_info(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
router['distributed'] = True
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
test_ports = [{'mac_address': '00:11:22:33:44:55',
'device_owner': 'network:dhcp',
'subnet_id': _get_subnet_id(ports[0]),
'fixed_ips': [{'ip_address': '1.2.3.4'}]}]
self.plugin_api.get_ports_by_subnet.return_value = test_ports
# Test basic case
ports[0]['subnet']['id'] = _get_subnet_id(ports[0])
agent._set_subnet_arp_info(ri, ports[0])
self.mock_ip_dev.neigh.add.assert_called_once_with(
4, '1.2.3.4', '00:11:22:33:44:55')
# Test negative case
router['distributed'] = False
agent._set_subnet_arp_info(ri, ports[0])
self.mock_ip_dev.neigh.add.never_called()
def test_add_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
agent.add_arp_entry(None, payload)
agent.router_deleted(None, router['id'])
self.mock_ip_dev.neigh.add.assert_called_once_with(
4, '1.7.23.11', '00:11:22:33:44:55')
def test_add_arp_entry_no_routerinfo(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.7.23.11',
'mac_address': '00:11:22:33:44:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._update_arp_entry = mock.Mock()
agent.add_arp_entry(None, payload)
self.assertFalse(agent._update_arp_entry.called)
def test__update_arp_entry_with_no_subnet(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(
'foo_router_id', mock.ANY,
{'distributed': True, 'gw_port_host': HOSTNAME})
with mock.patch.object(l3_agent.ip_lib, 'IPDevice') as f:
agent._update_arp_entry(ri, mock.ANY, mock.ANY,
'foo_subnet_id', 'add')
self.assertFalse(f.call_count)
def test_del_arp_entry(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0])
arp_table = {'ip_address': '1.5.25.15',
'mac_address': '00:44:33:22:11:55',
'subnet_id': subnet_id}
payload = {'arp_table': arp_table, 'router_id': router['id']}
agent._router_added(router['id'], router)
# first add the entry
agent.add_arp_entry(None, payload)
# now delete it
agent.del_arp_entry(None, payload)
self.mock_ip_dev.neigh.delete.assert_called_once_with(
4, '1.5.25.15', '00:44:33:22:11:55')
agent.router_deleted(None, router['id'])
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def _test_scan_fip_ports(self, ri, ip_list, IPDevice):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.device_exists.return_value = True
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = ip_list
agent.scan_fip_ports(ri)
def test_scan_fip_ports_restart_fips(self):
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.router['distributed'] = True
ip_list = [{'cidr': '111.2.3.4/32'}, {'cidr': '111.2.3.5/32'}]
self._test_scan_fip_ports(ri, ip_list)
self.assertEqual(ri.dist_fip_count, 2)
def test_scan_fip_ports_restart_none(self):
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.router['distributed'] = True
ip_list = []
self._test_scan_fip_ports(ri, ip_list)
self.assertEqual(ri.dist_fip_count, 0)
def test_scan_fip_ports_restart_zero(self):
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.router['distributed'] = True
ri.dist_fip_count = 0
ip_list = None
self._test_scan_fip_ports(ri, ip_list)
self.assertEqual(ri.dist_fip_count, 0)
def test_process_cent_router(self):
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
self._test_process_router(ri)
def test_process_dist_router(self):
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
subnet_id = _get_subnet_id(router[l3_constants.INTERFACE_KEY][0])
ri.router['distributed'] = True
ri.router['_snat_router_interfaces'] = [{
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '1.2.3.4'}]}]
ri.router['gw_port_host'] = None
self._test_process_router(ri)
def _test_process_router(self, ri):
router = ri.router
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.host = HOSTNAME
fake_fip_id = 'fake_fip_id'
agent.create_dvr_fip_interfaces = mock.Mock()
agent.process_router_floating_ip_addresses = mock.Mock()
agent.process_router_floating_ip_nat_rules = mock.Mock()
agent.process_router_floating_ip_addresses.return_value = {
fake_fip_id: 'ACTIVE'}
agent.external_gateway_added = mock.Mock()
agent.external_gateway_updated = mock.Mock()
fake_floatingips1 = {'floatingips': [
{'id': fake_fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid(),
'host': HOSTNAME}]}
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
agent.external_gateway_added.reset_mock()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
self.assertEqual(agent.external_gateway_added.call_count, 0)
self.assertEqual(agent.external_gateway_updated.call_count, 0)
agent.external_gateway_added.reset_mock()
agent.external_gateway_updated.reset_mock()
# change the ex_gw_port a bit to test gateway update
new_gw_port = copy.deepcopy(ri.router['gw_port'])
ri.router['gw_port'] = new_gw_port
old_ip = (netaddr.IPAddress(ri.router['gw_port']
['fixed_ips'][0]['ip_address']))
ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1)
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.reset_mock()
self.assertEqual(agent.external_gateway_added.call_count, 0)
self.assertEqual(agent.external_gateway_updated.call_count, 1)
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
agent.process_router(ri)
ex_gw_port = agent._get_ex_gw_port(ri)
agent.process_router_floating_ip_addresses.assert_called_with(
ri, ex_gw_port)
agent.process_router_floating_ip_addresses.reset_mock()
agent.process_router_floating_ip_nat_rules.assert_called_with(ri)
agent.process_router_floating_ip_nat_rules.reset_mock()
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
agent.process_router(ri)
self.assertEqual(self.send_arp.call_count, 1)
distributed = ri.router.get('distributed', False)
self.assertEqual(agent.process_router_floating_ip_addresses.called,
distributed)
self.assertEqual(agent.process_router_floating_ip_nat_rules.called,
distributed)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def _test_process_router_floating_ip_addresses_add(self, ri,
agent, IPDevice):
floating_ips = agent.get_floating_ips(ri)
fip_id = floating_ips[0]['id']
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = []
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ex_gw_port = {'id': _uuid()}
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
if ri.router['distributed']:
agent.create_dvr_fip_interfaces(ri, ex_gw_port)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, ex_gw_port)
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
device.addr.add.assert_called_once_with(4, '15.1.2.3/32', '15.1.2.3')
def test_process_router_floating_ip_nat_rules_add(self):
fip = {
'id': _uuid(), 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1'
}
ri = mock.MagicMock()
ri.router['distributed'].__nonzero__ = lambda self: False
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.get_floating_ips = mock.Mock(return_value=[fip])
agent.process_router_floating_ip_nat_rules(ri)
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
rules = agent.floating_forward_rules('15.1.2.3', '192.168.0.1')
for chain, rule in rules:
nat.add_rule.assert_any_call(chain, rule, tag='floating_ip')
def test_process_router_cent_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
router = prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._test_process_router_floating_ip_addresses_add(ri, agent)
def test_process_router_dist_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid()},
{'id': _uuid(),
'host': 'some-other-host',
'floating_ip_address': '15.1.2.4',
'fixed_ip_address': '192.168.0.10',
'floating_network_id': _uuid(),
'port_id': _uuid()}]}
router = prepare_router_data(enable_snat=True)
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router['distributed'] = True
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ri.dist_fip_count = 0
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.host = HOSTNAME
agent.agent_gateway_port = (
{'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
)
self._test_process_router_floating_ip_addresses_add(ri, agent)
def test_get_router_cidrs_returns_cidrs(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.MagicMock()
ri.is_ha = False
addresses = ['15.1.2.2/24', '15.1.2.3/32']
device = mock.MagicMock()
device.addr.list.return_value = [{'cidr': addresses[0]},
{'cidr': addresses[1]}]
self.assertEqual(set(addresses), agent._get_router_cidrs(ri, device))
def test_get_router_cidrs_returns_ha_cidrs(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.MagicMock()
ri.is_ha = True
device = mock.MagicMock()
device.name.return_value = 'eth2'
addresses = ['15.1.2.2/24', '15.1.2.3/32']
agent._ha_get_existing_cidrs = mock.MagicMock()
agent._ha_get_existing_cidrs.return_value = addresses
self.assertEqual(set(addresses), agent._get_router_cidrs(ri, device))
# TODO(mrsmith): refactor for DVR cases
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_remove(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = mock.MagicMock()
ri.router.get.return_value = []
type(ri).is_ha = mock.PropertyMock(return_value=False)
ri.router['distributed'].__nonzero__ = lambda self: False
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({}, fip_statuses)
device.addr.delete.assert_called_once_with(4, '15.1.2.3/32')
self.mock_driver.delete_conntrack_state.assert_called_once_with(
root_helper=self.conf.root_helper,
namespace=ri.ns_name,
ip='15.1.2.3/32')
def test_process_router_floating_ip_nat_rules_remove(self):
ri = mock.MagicMock()
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_nat_rules(ri)
nat = ri.iptables_manager.ipv4['nat']
nat = ri.iptables_manager.ipv4['nat`']
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_addresses_remap(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}]
ri = mock.MagicMock()
ri.router['distributed'].__nonzero__ = lambda self: False
type(ri).is_ha = mock.PropertyMock(return_value=False)
ri.router.get.return_value = [fip]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
self.assertFalse(device.addr.add.called)
self.assertFalse(device.addr.delete.called)
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_with_disabled_floating_ip(self, IPDevice):
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = mock.MagicMock()
ri.floating_ips = [fip]
ri.router.get.return_value = []
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertIsNone(fip_statuses.get(fip_id))
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
def test_process_router_floating_ip_with_device_add_error(self, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.add.side_effect = RuntimeError()
device.addr.list.return_value = []
fip_id = _uuid()
fip = {
'id': fip_id, 'port_id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.2'
}
ri = mock.MagicMock()
type(ri).is_ha = mock.PropertyMock(return_value=False)
ri.router.get.return_value = [fip]
ri.router['distributed'].__nonzero__ = lambda self: False
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
fip_statuses = agent.process_router_floating_ip_addresses(
ri, {'id': _uuid()})
self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR},
fip_statuses)
def test_process_router_snat_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(enable_snat=True)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Reprocess without NAT
router['enable_snat'] = False
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(len(nat_rules_delta), 2)
self._verify_snat_rules(nat_rules_delta, router)
self.assertEqual(self.send_arp.call_count, 1)
def test_process_router_snat_enabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(enable_snat=False)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process without NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Reprocess with NAT
router['enable_snat'] = True
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(len(nat_rules_delta), 2)
self._verify_snat_rules(nat_rules_delta, router)
self.assertEqual(self.send_arp.call_count, 1)
def test_process_router_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
# Add an interface and reprocess
router_append_interface(router)
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# send_arp is called both times process_router is called
self.assertEqual(self.send_arp.call_count, 2)
def test_process_ipv6_only_gw(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(ip_version=6)
# Get NAT rules without the gw_port
gw_port = router['gw_port']
router['gw_port'] = None
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Get NAT rules with the gw_port
router['gw_port'] = gw_port
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
with mock.patch.object(
agent,
'external_gateway_nat_rules') as external_gateway_nat_rules:
agent.process_router(ri)
new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# There should be no change with the NAT rules
self.assertFalse(external_gateway_nat_rules.called)
self.assertEqual(orig_nat_rules, new_nat_rules)
def _process_router_ipv6_interface_added(
self, router, ra_mode=None, addr_mode=None):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an IPv6 interface and reprocess
router_append_interface(router, count=1, ip_version=6, ra_mode=ra_mode,
addr_mode=addr_mode)
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# IPv4 NAT rules should not be changed by adding an IPv6 interface
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertFalse(nat_rules_delta)
return ri
def _expected_call_lookup_ri_process(self, ri, process):
"""Expected call if a process is looked up in a router instance."""
return [mock.call(cfg.CONF,
ri.router['id'],
self.conf.root_helper,
ri.ns_name,
process)]
def _assert_ri_process_enabled(self, ri, process):
"""Verify that process was enabled for a router instance."""
expected_calls = self._expected_call_lookup_ri_process(ri, process)
expected_calls.append(mock.call().enable(mock.ANY, True))
self.assertEqual(expected_calls, self.external_process.mock_calls)
def _assert_ri_process_disabled(self, ri, process):
"""Verify that process was disabled for a router instance."""
expected_calls = self._expected_call_lookup_ri_process(ri, process)
expected_calls.append(mock.call().disable())
self.assertEqual(expected_calls, self.external_process.mock_calls)
def test_process_router_ipv6_interface_added(self):
router = prepare_router_data()
ri = self._process_router_ipv6_interface_added(router)
self._assert_ri_process_enabled(ri, 'radvd')
# Expect radvd configured without prefix
self.assertNotIn('prefix',
self.utils_replace_file.call_args[0][1].split())
def test_process_router_ipv6_slaac_interface_added(self):
router = prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=l3_constants.IPV6_SLAAC)
self._assert_ri_process_enabled(ri, 'radvd')
# Expect radvd configured with prefix
self.assertIn('prefix',
self.utils_replace_file.call_args[0][1].split())
def test_process_router_ipv6v4_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
# Add an IPv4 and IPv6 interface and reprocess
router_append_interface(router, count=1, ip_version=4)
router_append_interface(router, count=1, ip_version=6)
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
self._assert_ri_process_enabled(ri, 'radvd')
def test_process_router_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# Process with NAT
agent.process_router(ri)
# Add an interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
# Reassign the router object to RouterInfo
ri.router = router
agent.process_router(ri)
# send_arp is called both times process_router is called
self.assertEqual(self.send_arp.call_count, 2)
def test_process_router_ipv6_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
ri.router = router
agent.process_router(ri)
# Add an IPv6 interface and reprocess
router_append_interface(router, count=1, ip_version=6)
agent.process_router(ri)
self._assert_ri_process_enabled(ri, 'radvd')
# Reset the calls so we can check for disable radvd
self.external_process.reset_mock()
# Remove the IPv6 interface and reprocess
del router[l3_constants.INTERFACE_KEY][1]
agent.process_router(ri)
self._assert_ri_process_disabled(ri, 'radvd')
def test_process_router_internal_network_added_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
with mock.patch.object(
l3_agent.L3NATAgent,
'internal_network_added') as internal_network_added:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_network_added.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
agent.process_router(ri)
# We were able to add the port to ri.internal_ports
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
# add an internal port
agent.process_router(ri)
with mock.patch.object(
l3_agent.L3NATAgent,
'internal_network_removed') as internal_net_removed:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_net_removed.side_effect = RuntimeError
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError, agent.process_router, ri)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_net_removed.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
agent.process_router(ri)
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_floatingip_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE})
mock_update_fip_status.reset_mock()
# Process the router again, this time without floating IPs
router[l3_constants.FLOATINGIP_KEY] = []
ri.router = router
agent.process_router(ri)
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_DOWN})
def test_process_router_floatingip_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_addresses = mock.Mock()
agent.process_router_floating_ip_addresses.side_effect = RuntimeError
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = prepare_router_data(num_internal_ports=1)
router[l3_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
agent.external_gateway_added = mock.Mock()
agent.process_router(ri)
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: l3_constants.FLOATINGIP_STATUS_ERROR})
def test_handle_router_snat_rules_distributed_without_snat_manager(self):
ri = dvr_router.DvrRouter(
'foo_router_id', mock.ANY, {'distributed': True})
ri.iptables_manager = mock.Mock()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(l3_agent.LOG, 'debug') as log_debug:
agent._handle_router_snat_rules(
ri, mock.ANY, mock.ANY, mock.ANY)
self.assertIsNone(ri.snat_iptables_manager)
self.assertFalse(ri.iptables_manager.called)
self.assertTrue(log_debug.called)
def test_handle_router_snat_rules_add_back_jump(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.MagicMock()
port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri.router = {'distributed': False}
agent._handle_router_snat_rules(ri, port, "iface", "add_rules")
nat = ri.iptables_manager.ipv4['nat']
nat.empty_chain.assert_any_call('snat')
nat.add_rule.assert_any_call('snat', '-j $float-snat')
for call in nat.mock_calls:
name, args, kwargs = call
if name == 'add_rule':
self.assertEqual(args, ('snat', '-j $float-snat'))
self.assertEqual(kwargs, {})
break
def test_handle_router_snat_rules_add_rules(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(_uuid(), self.conf.root_helper, {})
ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri.router = {'distributed': False}
agent._handle_router_snat_rules(ri, ex_gw_port,
"iface", "add_rules")
nat_rules = map(str, ri.iptables_manager.ipv4['nat'].rules)
wrap_name = ri.iptables_manager.wrap_name
jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name,
wrap_name)
snat_rule = ("-A %s-snat -o iface -j SNAT --to-source %s") % (
wrap_name, ex_gw_port['fixed_ips'][0]['ip_address'])
self.assertIn(jump_float_rule, nat_rules)
self.assertIn(snat_rule, nat_rules)
self.assertThat(nat_rules.index(jump_float_rule),
matchers.LessThan(nat_rules.index(snat_rule)))
def test_process_router_delete_stale_internal_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [FakeDev('qr-a1b2c3d4-e5'),
FakeDev('qr-b2c3d4e5-f6')]
stale_devnames = [dev.name for dev in stale_devlist]
get_devices_return = []
get_devices_return.extend(stale_devlist)
self.mock_ip.get_devices.return_value = get_devices_return
router = prepare_router_data(enable_snat=True, num_internal_ports=1)
ri = l3router.RouterInfo(router['id'],
self.conf.root_helper,
router=router)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
self.assertEqual(len(internal_ports), 1)
internal_port = internal_ports[0]
with contextlib.nested(mock.patch.object(l3_agent.L3NATAgent,
'internal_network_removed'),
mock.patch.object(l3_agent.L3NATAgent,
'internal_network_added'),
mock.patch.object(l3_agent.L3NATAgent,
'external_gateway_removed'),
mock.patch.object(l3_agent.L3NATAgent,
'external_gateway_added')
) as (internal_network_removed,
internal_network_added,
external_gateway_removed,
external_gateway_added):
agent.process_router(ri)
self.assertEqual(external_gateway_added.call_count, 1)
self.assertFalse(external_gateway_removed.called)
self.assertFalse(internal_network_removed.called)
internal_network_added.assert_called_once_with(
ri, internal_port)
self.assertEqual(self.mock_driver.unplug.call_count,
len(stale_devnames))
calls = [mock.call(stale_devname,
namespace=ri.ns_name,
prefix=l3_agent.INTERNAL_DEV_PREFIX)
for stale_devname in stale_devnames]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def test_process_router_delete_stale_external_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [FakeDev('qg-a1b2c3d4-e5')]
stale_devnames = [dev.name for dev in stale_devlist]
router = prepare_router_data(enable_snat=True, num_internal_ports=1)
del router['gw_port']
ri = l3router.RouterInfo(router['id'],
self.conf.root_helper,
router=router)
self.mock_ip.get_devices.return_value = stale_devlist
agent.process_router(ri)
self.mock_driver.unplug.assert_called_with(
stale_devnames[0],
bridge="br-ex",
namespace=ri.ns_name,
prefix=l3_agent.EXTERNAL_DEV_PREFIX)
def test_router_deleted(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_deleted(None, FAKE_ID)
self.assertEqual(1, agent._queue.add.call_count)
def test_routers_updated(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.routers_updated(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_removed_from_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_removed_from_agent(None, {'router_id': FAKE_ID})
self.assertEqual(1, agent._queue.add.call_count)
def test_added_to_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_added_to_agent(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_destroy_fip_namespace(self):
namespaces = ['qrouter-foo', 'qrouter-bar']
self.mock_ip.get_namespaces.return_value = namespaces
self.mock_ip.get_devices.return_value = [FakeDev('fpr-aaaa'),
FakeDev('fg-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_fip_namespace(namespaces[0])
self.mock_driver.unplug.assert_called_once_with('fg-aaaa',
bridge='br-ex',
prefix='fg-',
namespace='qrouter'
'-foo')
self.mock_ip.del_veth.assert_called_once_with('fpr-aaaa')
def test_destroy_namespace(self):
namespace = 'qrouter-bar'
self.mock_ip.get_namespaces.return_value = [namespace]
self.mock_ip.get_devices.return_value = [FakeDev('qr-aaaa'),
FakeDev('rfp-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_namespace(namespace)
self.mock_driver.unplug.assert_called_once_with('qr-aaaa',
prefix='qr-',
namespace='qrouter'
'-bar')
self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa')
def test_destroy_router_namespace_skips_ns_removal(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_router_namespace("fakens")
self.assertEqual(self.mock_ip.netns.delete.call_count, 0)
def test_destroy_router_namespace_removes_ns(self):
self.conf.set_override('router_delete_namespaces', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._destroy_router_namespace("fakens")
self.mock_ip.netns.delete.assert_called_once_with("fakens")
def _configure_metadata_proxy(self, enableflag=True):
if not enableflag:
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router = {'id': router_id,
'external_gateway_info': {},
'routes': [],
'distributed': False}
driver = metadata_driver.MetadataDriver
with mock.patch.object(
driver, '_destroy_metadata_proxy') as destroy_proxy:
with mock.patch.object(
driver, '_spawn_metadata_proxy') as spawn_proxy:
agent._process_added_router(router)
if enableflag:
spawn_proxy.assert_called_with(router_id,
mock.ANY,
mock.ANY)
else:
self.assertFalse(spawn_proxy.call_count)
agent._router_removed(router_id)
if enableflag:
destroy_proxy.assert_called_with(router_id,
mock.ANY,
mock.ANY)
else:
self.assertFalse(destroy_proxy.call_count)
def test_enable_metadata_proxy(self):
self._configure_metadata_proxy()
def test_disable_metadata_proxy_spawn(self):
self._configure_metadata_proxy(enableflag=False)
def test_router_id_specified_in_conf(self):
self.conf.set_override('use_namespaces', False)
self.conf.set_override('router_id', '')
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
self.conf.set_override('router_id', '1234')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual('1234', agent.conf.router_id)
self.assertFalse(agent._clean_stale_namespaces)
def test_process_router_if_compatible_with_no_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_with_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.assertFalse(self.plugin_api.get_external_network_id.called)
def test_process_router_if_compatible_with_stale_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'bbb'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.plugin_api.get_external_network_id.side_effect = (
n_exc.TooManyExternalNetworks())
self.assertRaises(n_exc.TooManyExternalNetworks,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}
agent.router_info = {}
self.conf.set_override('gateway_external_network_id', 'aaa')
self.assertRaises(n_exc.RouterNotCompatibleWithAgent,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_no_bridge_no_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.conf.set_override('external_network_bridge', '')
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
def test_nonexistent_interface_driver(self):
self.conf.set_override('interface_driver', None)
with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
msg = 'An interface driver must be specified'
log.error.assert_called_once_with(msg)
self.conf.set_override('interface_driver', 'wrong_driver')
with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
msg = _LE("Error importing interface driver '%s'")
log.error.assert_called_once_with(msg, 'wrong_driver')
def _cleanup_namespace_test(self,
stale_namespace_list,
router_list,
other_namespaces):
self.conf.set_override('router_delete_namespaces', True)
good_namespace_list = [l3_agent.NS_PREFIX + r['id']
for r in router_list]
good_namespace_list += [dvr.SNAT_NS_PREFIX + r['id']
for r in router_list]
self.mock_ip.get_namespaces.return_value = (stale_namespace_list +
good_namespace_list +
other_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertTrue(agent._clean_stale_namespaces)
pm = self.external_process.return_value
pm.reset_mock()
agent._destroy_router_namespace = mock.MagicMock()
agent._destroy_snat_namespace = mock.MagicMock()
ns_list = agent._list_namespaces()
agent._cleanup_namespaces(ns_list, [r['id'] for r in router_list])
# Expect process manager to disable metadata proxy per qrouter ns
qrouters = [n for n in stale_namespace_list
if n.startswith(l3_agent.NS_PREFIX)]
self.assertEqual(agent._destroy_router_namespace.call_count,
len(qrouters))
self.assertEqual(agent._destroy_snat_namespace.call_count,
len(stale_namespace_list) - len(qrouters))
expected_args = [mock.call(ns) for ns in qrouters]
agent._destroy_router_namespace.assert_has_calls(expected_args,
any_order=True)
self.assertFalse(agent._clean_stale_namespaces)
def test_cleanup_namespace(self):
self.conf.set_override('router_id', None)
stale_namespaces = [l3_agent.NS_PREFIX + 'foo',
l3_agent.NS_PREFIX + 'bar',
dvr.SNAT_NS_PREFIX + 'foo']
other_namespaces = ['unknown']
self._cleanup_namespace_test(stale_namespaces,
[],
other_namespaces)
def test_cleanup_namespace_with_registered_router_ids(self):
self.conf.set_override('router_id', None)
stale_namespaces = [l3_agent.NS_PREFIX + 'cccc',
l3_agent.NS_PREFIX + 'eeeee',
dvr.SNAT_NS_PREFIX + 'fffff']
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_cleanup_namespace_with_conf_router_id(self):
self.conf.set_override('router_id', 'bbbbb')
stale_namespaces = [l3_agent.NS_PREFIX + 'cccc',
l3_agent.NS_PREFIX + 'eeeee',
l3_agent.NS_PREFIX + self.conf.router_id]
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_create_dvr_gateway(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = l3router.RouterInfo(router['id'], self.conf.root_helper,
router=router)
port_id = _uuid()
dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': port_id,
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
interface_name = agent.get_snat_int_device_name(port_id)
self.device_exists.return_value = False
agent._create_dvr_gateway(ri, dvr_gw_port, interface_name,
self.snat_ports)
# check 2 internal ports are plugged
# check 1 ext-gw-port is plugged
self.assertEqual(self.mock_driver.plug.call_count, 3)
self.assertEqual(self.mock_driver.init_l3.call_count, 3)
def test_agent_gateway_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
network_id = _uuid()
port_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': port_id,
'network_id': network_id,
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
fip_ns_name = (
agent.get_fip_ns_name(str(network_id)))
interface_name = (
agent.get_fip_ext_device_name(port_id))
self.device_exists.return_value = False
agent.agent_gateway_added(fip_ns_name, agent_gw_port,
interface_name)
self.assertEqual(self.mock_driver.plug.call_count, 1)
self.assertEqual(self.mock_driver.init_l3.call_count, 1)
if self.conf.use_namespaces:
self.send_arp.assert_called_once_with(fip_ns_name, interface_name,
'20.0.0.30',
mock.ANY, mock.ANY)
else:
self.utils_exec.assert_any_call(
check_exit_code=True, root_helper=self.conf.root_helper)
def test_create_rtr_2_fip_link(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
fip = {'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid()}
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
rtr_2_fip_name = agent.get_rtr_int_device_name(ri.router_id)
fip_2_rtr_name = agent.get_fip_int_device_name(ri.router_id)
fip_ns_name = agent.get_fip_ns_name(str(fip['floating_network_id']))
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
self.device_exists.return_value = False
agent.create_rtr_2_fip_link(ri, fip['floating_network_id'])
self.mock_ip.add_veth.assert_called_with(rtr_2_fip_name,
fip_2_rtr_name, fip_ns_name)
# TODO(mrsmith): add more aasserts -
self.mock_ip_dev.route.add_gateway.assert_called_once_with(
'169.254.31.29', table=16)
# TODO(mrsmith): test _create_agent_gateway_port
def test_create_rtr_2_fip_link_already_exists(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
self.device_exists.return_value = True
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
agent.create_rtr_2_fip_link(ri, {})
self.assertFalse(self.mock_ip.add_veth.called)
def test_floating_ip_added_dist(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
fip = {'id': _uuid(),
'host': HOSTNAME,
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid()}
agent.agent_gateway_port = agent_gw_port
ri.rtr_fip_subnet = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.dist_fip_count = 0
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
agent.floating_ip_added_dist(ri, fip, ip_cidr)
self.mock_rule.add_rule_from.assert_called_with('192.168.0.1',
16, FIP_PRI)
# TODO(mrsmith): add more asserts
@mock.patch.object(l3_agent.L3NATAgent, '_fip_ns_unsubscribe')
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_floating_ip_removed_dist(self, write, unsubscribe):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = prepare_router_data()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
fip_cidr = '11.22.33.44/24'
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper,
router=router)
ri.dist_fip_count = 2
agent.fip_ns_subscribers.add(ri.router_id)
ri.floating_ips_dict['11.22.33.44'] = FIP_PRI
ri.fip_2_rtr = '11.22.33.42'
ri.rtr_2_fip = '11.22.33.40'
agent.agent_gateway_port = agent_gw_port
s = lla.LinkLocalAddressPair('169.254.30.42/31')
ri.rtr_fip_subnet = s
agent.floating_ip_removed_dist(ri, fip_cidr)
self.mock_rule.delete_rule_priority.assert_called_with(FIP_PRI)
self.mock_ip_dev.route.delete_route.assert_called_with(fip_cidr,
str(s.ip))
self.assertFalse(unsubscribe.called, '_fip_ns_unsubscribe called!')
with mock.patch.object(agent, '_destroy_fip_namespace') as f:
ri.dist_fip_count = 1
fip_ns_name = agent.get_fip_ns_name(
str(agent._fetch_external_net_id()))
ri.rtr_fip_subnet = agent.local_subnets.allocate(ri.router_id)
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
agent.floating_ip_removed_dist(ri, fip_cidr)
self.mock_ip.del_veth.assert_called_once_with(
agent.get_fip_int_device_name(router['id']))
self.mock_ip_dev.route.delete_gateway.assert_called_once_with(
str(fip_to_rtr.ip), table=16)
f.assert_called_once_with(fip_ns_name)
unsubscribe.assert_called_once_with(ri.router_id)
def test_get_service_plugin_list(self):
service_plugins = [p_const.L3_ROUTER_NAT]
self.plugin_api.get_service_plugin_list.return_value = service_plugins
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(service_plugins, agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_failed(self):
raise_rpc = messaging.RemoteError()
self.plugin_api.get_service_plugin_list.side_effect = raise_rpc
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertIsNone(agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_retried(self):
raise_timeout = messaging.MessagingTimeout()
# Raise a timeout the first 2 times it calls
# get_service_plugin_list then return a empty tuple
self.plugin_api.get_service_plugin_list.side_effect = (
raise_timeout, raise_timeout, tuple()
)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(agent.neutron_service_plugins, tuple())
def test_get_service_plugin_list_retried_max(self):
raise_timeout = messaging.MessagingTimeout()
# Raise a timeout 5 times
self.plugin_api.get_service_plugin_list.side_effect = (
(raise_timeout, ) * 5
)
self.assertRaises(messaging.MessagingTimeout, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
def test__fip_ns_subscribe_is_first_true(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
is_first = agent._fip_ns_subscribe(router_id)
self.assertTrue(is_first)
self.assertEqual(len(agent.fip_ns_subscribers), 1)
def test__fip_ns_subscribe_is_first_false(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router2_id = _uuid()
agent._fip_ns_subscribe(router_id)
is_first = agent._fip_ns_subscribe(router2_id)
self.assertFalse(is_first)
self.assertEqual(len(agent.fip_ns_subscribers), 2)
def test__fip_ns_unsubscribe_is_last_true(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
agent.fip_ns_subscribers.add(router_id)
is_last = agent._fip_ns_unsubscribe(router_id)
self.assertTrue(is_last)
self.assertEqual(len(agent.fip_ns_subscribers), 0)
def test__fip_ns_unsubscribe_is_last_false(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router2_id = _uuid()
agent.fip_ns_subscribers.add(router_id)
agent.fip_ns_subscribers.add(router2_id)
is_last = agent._fip_ns_unsubscribe(router_id)
self.assertFalse(is_last)
self.assertEqual(len(agent.fip_ns_subscribers), 1)
def test_external_gateway_removed_ext_gw_port_and_fip(self):
self.conf.set_override('state_path', '/tmp')
self.conf.set_override('router_delete_namespaces', True)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr'
agent.agent_gateway_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'subnet_id': _uuid()}],
'subnet': {'gateway_ip': '20.0.0.1'},
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'ip_cidr': '20.0.0.30/24'}
external_net_id = _uuid()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
router = prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
ri = dvr_router.DvrRouter(router['id'], self.conf.root_helper, router)
vm_floating_ip = '19.4.4.2'
ri.floating_ips_dict[vm_floating_ip] = FIP_PRI
ri.dist_fip_count = 1
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.rtr_fip_subnet = agent.local_subnets.allocate(ri.router_id)
_, fip_to_rtr = ri.rtr_fip_subnet.get_pair()
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
self.mock_ip.get_devices.return_value = [
FakeDev(agent.get_fip_ext_device_name(_uuid()))]
self.mock_ip_dev.addr.list.return_value = [
{'cidr': vm_floating_ip + '/32'},
{'cidr': '19.4.4.1/24'}]
self.device_exists.return_value = True
agent.external_gateway_removed(
ri, ri.ex_gw_port,
agent.get_external_device_name(ri.ex_gw_port['id']))
self.mock_ip.del_veth.assert_called_once_with(
agent.get_fip_int_device_name(ri.router['id']))
self.mock_ip_dev.route.delete_gateway.assert_called_once_with(
str(fip_to_rtr.ip), table=dvr.FIP_RT_TBL)
self.assertEqual(ri.dist_fip_count, 0)
self.assertEqual(len(agent.fip_ns_subscribers), 0)
self.assertEqual(self.mock_driver.unplug.call_count, 1)
self.assertIsNone(agent.agent_gateway_port)
self.mock_ip.netns.delete.assert_called_once_with(
agent.get_fip_ns_name(external_net_id))
self.assertFalse(nat.add_rule.called)
nat.clear_rules_by_tag.assert_called_once_with('floating_ip')
def test_spawn_radvd(self):
router = prepare_router_data()
conffile = '/fake/radvd.conf'
pidfile = '/fake/radvd.pid'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# we don't want the whole process manager to be mocked to be
# able to catch execute() calls
self.external_process_p.stop()
self.ip_cls_p.stop()
get_pid_file_name = ('neutron.agent.linux.external_process.'
'ProcessManager.get_pid_file_name')
with mock.patch('neutron.agent.linux.utils.execute') as execute:
with mock.patch(get_pid_file_name) as get_pid:
get_pid.return_value = pidfile
ra._spawn_radvd(router['id'],
conffile,
agent.get_ns_name(router['id']),
self.conf.root_helper)
cmd = execute.call_args[0][0]
self.assertIn('radvd', cmd)
_join = lambda *args: ' '.join(args)
cmd = _join(*cmd)
self.assertIn(_join('-C', conffile), cmd)
self.assertIn(_join('-p', pidfile), cmd)
self.assertIn(_join('-m', 'syslog'), cmd)
def test_generate_radvd_conf_other_flag(self):
# we don't check other flag for stateful since it's redundant
# for this mode and can be ignored by clients, as per RFC4861
expected = {l3_constants.IPV6_SLAAC: False,
l3_constants.DHCPV6_STATELESS: True}
for ra_mode, flag_set in expected.iteritems():
router = prepare_router_data()
ri = self._process_router_ipv6_interface_added(router,
ra_mode=ra_mode)
ra._generate_radvd_conf(ri.router['id'],
router[l3_constants.INTERFACE_KEY],
mock.Mock())
asserter = self.assertIn if flag_set else self.assertNotIn
asserter('AdvOtherConfigFlag on;',
self.utils_replace_file.call_args[0][1])
def test__put_fips_in_error_state(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = mock.Mock()
ri.router.get.return_value = [{'id': mock.sentinel.id1},
{'id': mock.sentinel.id2}]
statuses = agent._put_fips_in_error_state(ri)
expected = [{mock.sentinel.id1: l3_constants.FLOATINGIP_STATUS_ERROR,
mock.sentinel.id2: l3_constants.FLOATINGIP_STATUS_ERROR}]
self.assertNotEqual(expected, statuses)
def test__process_snat_dnat_for_fip(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_nat_rules = mock.Mock(
side_effect=Exception)
self.assertRaises(n_exc.FloatingIpSetupException,
agent._process_snat_dnat_for_fip,
mock.sentinel.ri)
agent.process_router_floating_ip_nat_rules.assert_called_with(
mock.sentinel.ri)
def test__configure_fip_addresses(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.process_router_floating_ip_addresses = mock.Mock(
side_effect=Exception)
self.assertRaises(n_exc.FloatingIpSetupException,
agent._configure_fip_addresses,
mock.sentinel.ri,
mock.sentinel.ex_gw_port)
agent.process_router_floating_ip_addresses.assert_called_with(
mock.sentinel.ri,
mock.sentinel.ex_gw_port)
|
apache-2.0
|
Wellington-Junior/ingressos
|
src/app/pages/evento/evento-editar/evento-editar.component.ts
|
3826
|
import { Component, ElementRef, NgZone, OnInit, ViewChild} from '@angular/core';
import { Router, ActivatedRoute } from '@angular/router';
import { EventoModel } from './../../../model/evento.model';
import { TipoIngressoModel } from './../../../model/tipoingresso.model';
import { EventoService } from './../../../service/evento.service';
import * as firebase from 'firebase';
import { } from 'googlemaps';
import { MapsAPILoader } from '@agm/core';
@Component({
selector: 'app-evento-editar',
templateUrl: './evento-editar.component.html'
})
export class EventoEditarComponent implements OnInit {
imagemUrl:string;
idevento:number;
progress:string;
storageRef:any;
public dateMask = [/\d/, /\d/, '/', /\d/, /\d/, '/', /\d/, /\d/, /\d/, /\d/,' ',/\d/,/\d/,':',/\d/,/\d/];
@ViewChild("search")
public searchElementRef: ElementRef;
constructor(
public actroute: ActivatedRoute,
public eventoService:EventoService,
public evento:EventoModel,
public router:Router,
public mapsAPILoader: MapsAPILoader,
public ngZone: NgZone
)
{
this.storageRef = firebase.storage().ref();
}
ngOnInit() {
this.actroute.params.subscribe(
(params: any) => {
this.evento.idevento = params['id'];
this.eventoService.getEvento(this.evento)
.subscribe((evento)=>{
this.evento = evento.json().data[0];
console.log(this.evento);
this.imagemUrl = this.evento.imagem;
})
this.eventoService.getAllTipoIngressos(this.evento)
.subscribe((tipos)=>{
this.evento.tipos = tipos;
console.log(this.evento.tipos);
})
}
);
this.mapsAPILoader.load().then(() => {
let autocomplete = new google.maps.places.Autocomplete(this.searchElementRef.nativeElement, {
types: ["address"]
});
autocomplete.addListener("place_changed", () => {
this.ngZone.run(() => {
//get the place result
let place: google.maps.places.PlaceResult = autocomplete.getPlace();
console.log(autocomplete.getPlace())
//verify result
if (place.geometry === undefined || place.geometry === null) {
return;
}
//set latitude, longitude and zoom
this.evento.latitude = place.geometry.location.lat();
this.evento.longitude = place.geometry.location.lng();
});
});
});
}
uploadImagem($event){
//this.deleteImgStorage();
let files = $event.target.files || $event.srcElement.files;
let file = files[0];
let uploadTask = this.storageRef.child(file.name).put(file);
uploadTask.on(firebase.storage.TaskEvent.STATE_CHANGED,
(snapshot)=>{
let vlrPorcent = (snapshot.bytesTransferred / snapshot.totalBytes) * 100;
this.progress = vlrPorcent.toFixed(2);
},(error)=>{
switch (error.code) {
case 'storage/unauthorized':
// User doesn't have permission to access the object
break;
case 'storage/canceled':
// User canceled the upload
break;
case 'storage/unknown':
// Unknown error occurred, inspect error.serverResponse
break;
}
},()=>{
this.imagemUrl = uploadTask.snapshot.downloadURL;
});
}
atualizarDados(evento){
this.evento.imagem = this.imagemUrl;
console.log(this.evento.data);
this.eventoService.atualizarEvento(this.evento.idevento,evento);
}
cancelar(){
this.router.navigate(['/eventos/listar']);
}
novotipo(){
this.evento.tipos.push(new TipoIngressoModel());
}
}
|
apache-2.0
|
ryantenney/metrics-spring
|
src/main/java/com/ryantenney/metrics/spring/reporter/AbstractReporterFactoryBean.java
|
5530
|
/**
* Copyright (C) 2012 Ryan W Tenney ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ryantenney.metrics.spring.reporter;
import java.util.Map;
import java.util.regex.Pattern;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.FactoryBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.core.convert.ConversionService;
import org.springframework.core.convert.TypeDescriptor;
import org.springframework.core.convert.support.DefaultConversionService;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
public abstract class AbstractReporterFactoryBean<T> implements FactoryBean<T>, InitializingBean, BeanFactoryAware {
protected static final String FILTER_PATTERN = "filter";
protected static final String FILTER_REF = "filter-ref";
protected static final String PREFIX = "prefix";
protected static final String PREFIX_SUPPLIER_REF = "prefix-supplier-ref";
private MetricRegistry metricRegistry;
private BeanFactory beanFactory;
private ConversionService conversionService;
private Map<String, String> properties;
private T instance;
private boolean enabled = true;
private boolean initialized = false;
@Override
public abstract Class<? extends T> getObjectType();
@Override
public boolean isSingleton() {
return true;
}
@Override
public T getObject() {
if (!this.enabled) {
return null;
}
if (!this.initialized) {
throw new IllegalStateException("Singleton instance not initialized yet");
}
return this.instance;
}
@Override
public void afterPropertiesSet() throws Exception {
this.instance = createInstance();
this.initialized = true;
}
protected abstract T createInstance() throws Exception;
@Override
public void setBeanFactory(final BeanFactory beanFactory) {
this.beanFactory = beanFactory;
if (beanFactory instanceof ConfigurableBeanFactory) {
this.conversionService = ((ConfigurableBeanFactory) beanFactory).getConversionService();
}
}
public BeanFactory getBeanFactory() {
return this.beanFactory;
}
public ConversionService getConversionService() {
if (this.conversionService == null) {
this.conversionService = new DefaultConversionService();
}
return this.conversionService;
}
public void setMetricRegistry(final MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
}
public MetricRegistry getMetricRegistry() {
return metricRegistry;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
public boolean isEnabled() {
return this.enabled;
}
public void setProperties(final Map<String, String> properties) {
this.properties = properties;
}
public Map<String, String> getProperties() {
return properties;
}
protected boolean hasProperty(String key) {
return getProperty(key) != null;
}
protected String getProperty(String key) {
return this.properties.get(key);
}
protected String getProperty(String key, String defaultValue) {
final String value = this.properties.get(key);
if (value == null) {
return defaultValue;
}
return value;
}
protected <V> V getProperty(String key, Class<V> requiredType) {
return getProperty(key, requiredType, null);
}
@SuppressWarnings("unchecked")
protected <V> V getProperty(String key, Class<V> requiredType, V defaultValue) {
final String value = this.properties.get(key);
if (value == null) {
return defaultValue;
}
return (V) getConversionService().convert(value, TypeDescriptor.forObject(value), TypeDescriptor.valueOf(requiredType));
}
protected Object getPropertyRef(String key) {
return getPropertyRef(key, null);
}
protected <V> V getPropertyRef(String key, Class<V> requiredType) {
final String value = this.properties.get(key);
if (value == null) {
return null;
}
return this.beanFactory.getBean(value, requiredType);
}
protected MetricFilter getMetricFilter() {
if (hasProperty(FILTER_PATTERN)) {
return metricFilterPattern(getProperty(FILTER_PATTERN));
}
else if (hasProperty(FILTER_REF)) {
return getPropertyRef(FILTER_REF, MetricFilter.class);
}
return MetricFilter.ALL;
}
protected String getPrefix() {
if (hasProperty(PREFIX)) {
return getProperty(PREFIX);
}
else if (hasProperty(PREFIX_SUPPLIER_REF)) {
return getPropertyRef(PREFIX_SUPPLIER_REF, MetricPrefixSupplier.class).getPrefix();
}
return null;
}
protected MetricFilter metricFilterPattern(String pattern) {
final Pattern filter = Pattern.compile(pattern);
return new MetricFilter() {
@Override
public boolean matches(String name, Metric metric) {
return filter.matcher(name).matches();
}
@Override
public String toString() {
return "[MetricFilter regex=" + filter.pattern() + "]";
}
};
}
}
|
apache-2.0
|
mminella/composed-task-runner
|
spring-cloud-starter-task-composedtaskrunner/src/test/java/org/springframework/cloud/task/app/composedtaskrunner/configuration/ComposedRunnerVisitorConfiguration.java
|
6001
|
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cloud.task.app.composedtaskrunner.configuration;
import org.springframework.batch.core.ExitStatus;
import org.springframework.batch.core.Step;
import org.springframework.batch.core.StepContribution;
import org.springframework.batch.core.StepExecution;
import org.springframework.batch.core.StepExecutionListener;
import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing;
import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
import org.springframework.batch.core.scope.context.ChunkContext;
import org.springframework.batch.core.step.tasklet.Tasklet;
import org.springframework.batch.repeat.RepeatStatus;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cloud.task.app.composedtaskrunner.ComposedRunnerJobFactory;
import org.springframework.cloud.task.app.composedtaskrunner.ComposedRunnerVisitor;
import org.springframework.cloud.task.app.composedtaskrunner.properties.ComposedTaskProperties;
import org.springframework.cloud.task.configuration.EnableTask;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.task.TaskExecutor;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.transaction.annotation.Isolation;
import org.springframework.transaction.interceptor.DefaultTransactionAttribute;
import org.springframework.transaction.interceptor.TransactionAttribute;
/**
* @author Glenn Renfro
*/
@Configuration
@EnableBatchProcessing
@EnableTask
@EnableConfigurationProperties(ComposedTaskProperties.class)
public class ComposedRunnerVisitorConfiguration {
@Autowired
private StepBuilderFactory steps;
@Autowired
private ComposedTaskProperties composedTaskProperties;
@Bean
public ComposedRunnerJobFactory job() {
return new ComposedRunnerJobFactory(this.composedTaskProperties.getGraph());
}
@Bean
public ComposedRunnerVisitor composedRunnerStack() {
return new ComposedRunnerVisitor();
}
@Bean
public Step AAA_0() {
return createTaskletStep("AAA_0");
}
@Bean
public Step AAA_1() {
return createTaskletStep("AAA_1");
}
@Bean
public Step AAA_2() {
return createTaskletStep("AAA_2");
}
@Bean
public Step BBB_0() {
return createTaskletStep("BBB_0");
}
@Bean
public Step BBB_1() {
return createTaskletStep("BBB_1");
}
@Bean
public Step CCC_0() {
return createTaskletStep("CCC_0");
}
@Bean
public Step DDD_0() {
return createTaskletStep("DDD_0");
}
@Bean
public Step EEE_0() {
return createTaskletStep("EEE_0");
}
@Bean
public Step FFF_0() {
return createTaskletStep("FFF_0");
}
@Bean
public Step LABELA() {
return createTaskletStep("LABELA");
}
@Bean
public Step failedStep_0() {
return createTaskletStepWithListener("failedStep_0",
failedStepExecutionListener());
}
@Bean
public Step successStep() {
return createTaskletStepWithListener("successStep",
successStepExecutionListener());
}
@Bean
public StepExecutionListener failedStepExecutionListener() {
return new StepExecutionListener() {
@Override
public void beforeStep(StepExecution stepExecution) {
}
@Override
public ExitStatus afterStep(StepExecution stepExecution) {
return ExitStatus.FAILED;
}
};
}
@Bean
public StepExecutionListener successStepExecutionListener() {
return new StepExecutionListener() {
@Override
public void beforeStep(StepExecution stepExecution) {
}
@Override
public ExitStatus afterStep(StepExecution stepExecution) {
return ExitStatus.COMPLETED;
}
};
}
@Bean
public TaskExecutor taskExecutor() {
return new ThreadPoolTaskExecutor();
}
private Step createTaskletStepWithListener(final String taskName,
StepExecutionListener stepExecutionListener) {
return this.steps.get(taskName)
.tasklet(new Tasklet() {
@Override
public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) throws Exception {
return RepeatStatus.FINISHED;
}
})
.transactionAttribute(getTransactionAttribute())
.listener(stepExecutionListener)
.build();
}
private Step createTaskletStep(final String taskName) {
return this.steps.get(taskName)
.tasklet(new Tasklet() {
@Override
public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) throws Exception {
return RepeatStatus.FINISHED;
}
})
.transactionAttribute(getTransactionAttribute())
.build();
}
/**
* Using the default transaction attribute for the job will cause the
* TaskLauncher not to see the latest state in the database but rather
* what is in its transaction. By setting isolation to READ_COMMITTED
* The task launcher can see latest state of the db. Since the changes
* to the task execution are done by the tasks.
* @return DefaultTransactionAttribute with isolation set to READ_COMMITTED.
*/
private TransactionAttribute getTransactionAttribute() {
DefaultTransactionAttribute defaultTransactionAttribute =
new DefaultTransactionAttribute();
defaultTransactionAttribute.setIsolationLevel(
Isolation.READ_COMMITTED.value());
return defaultTransactionAttribute;
}
}
|
apache-2.0
|
ridoo/IlwisCore
|
ilwiscoreui/propertyeditors/mapinformationattributesetter.cpp
|
1233
|
#include "kernel.h"
#include "ilwisdata.h"
#include "datadefinition.h"
#include "columndefinition.h"
#include "table.h"
#include "visualattributemodel.h"
#include "mapinformationattributesetter.h"
REGISTER_PROPERTYEDITOR("mapinfopropertyeditor",MapInformationPropertySetter)
MapInformationPropertySetter::MapInformationPropertySetter(QObject *parent) :
VisualAttributeEditor("mapinfopropertyeditor",TR("Mouse over Info"),QUrl("MapinfoProperties.qml"), parent)
{
}
MapInformationPropertySetter::~MapInformationPropertySetter()
{
}
bool MapInformationPropertySetter::canUse(const IIlwisObject& obj, const QString& name ) const
{
if (!obj.isValid())
return false;
if(!hasType(obj->ilwisType(), itCOVERAGE))
return false;
return name == VisualAttributeModel::LAYER_ONLY;
}
VisualAttributeEditor *MapInformationPropertySetter::create()
{
return new MapInformationPropertySetter();
}
bool MapInformationPropertySetter::showInfo() const
{
if ( attribute()->layer())
return attribute()->layer()->showInfo();
return true;
}
void MapInformationPropertySetter::setShowInfo(bool yesno)
{
if (!attribute()->layer())
return;
attribute()->layer()->showInfo(yesno);
}
|
apache-2.0
|
knative/pkg
|
test/upgrade/testing_operations_test.go
|
7868
|
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrade_test
import (
"bytes"
"fmt"
"testing"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"knative.dev/pkg/test/upgrade"
)
const (
failureTestingMessage = "This error is expected to be seen. Upgrade suite should fail."
)
func newConfig(t *testing.T) (upgrade.Configuration, fmt.Stringer) {
var buf bytes.Buffer
cfg := zap.NewDevelopmentConfig()
cfg.EncoderConfig.TimeKey = ""
cfg.EncoderConfig.CallerKey = ""
syncedBuf := zapcore.AddSync(&buf)
c := upgrade.Configuration{
T: t,
LogConfig: upgrade.LogConfig{
Config: cfg,
Options: []zap.Option{
zap.WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewCore(
zapcore.NewConsoleEncoder(cfg.EncoderConfig),
zapcore.NewMultiWriteSyncer(syncedBuf), cfg.Level)
}),
zap.ErrorOutput(syncedBuf),
},
},
}
return c, &buf
}
func createSteps(s upgrade.Suite) []*step {
continualTestsGeneralized := generalizeOpsFromBg(s.Tests.Continual)
return []*step{{
messages: messageFormatters.baseInstall,
ops: generalizeOps(s.Installations.Base),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Installations.Base = ops.asOperations()
},
}, {
messages: messageFormatters.preUpgrade,
ops: generalizeOps(s.Tests.PreUpgrade),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.PreUpgrade = ops.asOperations()
},
}, {
messages: messageFormatters.startContinual,
ops: continualTestsGeneralized,
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.Continual = ops.asBackgroundOperation()
},
}, {
messages: messageFormatters.upgrade,
ops: generalizeOps(s.Installations.UpgradeWith),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Installations.UpgradeWith = ops.asOperations()
},
}, {
messages: messageFormatters.postUpgrade,
ops: generalizeOps(s.Tests.PostUpgrade),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.PostUpgrade = ops.asOperations()
},
}, {
messages: messageFormatters.downgrade,
ops: generalizeOps(s.Installations.DowngradeWith),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Installations.DowngradeWith = ops.asOperations()
},
}, {
messages: messageFormatters.postDowngrade,
ops: generalizeOps(s.Tests.PostDowngrade),
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.PostDowngrade = ops.asOperations()
},
}, {
messages: messageFormatters.verifyContinual,
ops: continualTestsGeneralized,
updateSuite: func(ops operations, s *upgrade.Suite) {
s.Tests.Continual = ops.asBackgroundOperation()
},
}}
}
func expectedTexts(s upgrade.Suite, fp failurePoint) texts {
steps := createSteps(s)
tt := texts{elms: nil}
for i, st := range steps {
stepIdx := i + 1
if st.ops.length() == 0 {
tt.append(st.skipped(stepIdx))
} else {
tt.append(st.starting(stepIdx, st.ops.length()))
for j, op := range st.ops.ops {
elemIdx := j + 1
tt.append(st.element(stepIdx, elemIdx, op.Name()))
if fp.step == stepIdx && fp.element == elemIdx {
return tt
}
}
}
}
return tt
}
func generalizeOps(ops []upgrade.Operation) operations {
gen := make([]*operation, len(ops))
for idx, op := range ops {
gen[idx] = &operation{op: op}
}
return operations{ops: gen}
}
func generalizeOpsFromBg(ops []upgrade.BackgroundOperation) operations {
gen := make([]*operation, len(ops))
for idx, op := range ops {
gen[idx] = &operation{bg: op}
}
return operations{ops: gen}
}
func createMessages(mf formats) messages {
return messages{
skipped: func(args ...interface{}) string {
empty := ""
if mf.skipped == empty {
return empty
}
return fmt.Sprintf(mf.skipped, args...)
},
starting: func(args ...interface{}) string {
return fmt.Sprintf(mf.starting, args...)
},
element: func(args ...interface{}) string {
return fmt.Sprintf(mf.element, args...)
},
}
}
func (tt *texts) append(messages ...string) {
for _, msg := range messages {
if msg == "" {
continue
}
tt.elms = append(tt.elms, msg)
}
}
func completeSuiteExample(fp failurePoint) upgrade.Suite {
serving := servingComponent()
eventing := eventingComponent()
suite := upgrade.Suite{
Tests: upgrade.Tests{
PreUpgrade: []upgrade.Operation{
serving.tests.preUpgrade, eventing.tests.preUpgrade,
},
PostUpgrade: []upgrade.Operation{
serving.tests.postUpgrade, eventing.tests.postUpgrade,
},
PostDowngrade: []upgrade.Operation{
serving.tests.postDowngrade, eventing.tests.postDowngrade,
},
Continual: []upgrade.BackgroundOperation{
serving.tests.continual, eventing.tests.continual,
},
},
Installations: upgrade.Installations{
Base: []upgrade.Operation{
serving.installs.stable, eventing.installs.stable,
},
UpgradeWith: []upgrade.Operation{
serving.installs.head, eventing.installs.head,
},
DowngradeWith: []upgrade.Operation{
serving.installs.stable, eventing.installs.stable,
},
},
}
return enrichSuiteWithFailures(suite, fp)
}
func emptySuiteExample() upgrade.Suite {
return upgrade.Suite{
Tests: upgrade.Tests{},
Installations: upgrade.Installations{},
}
}
func enrichSuiteWithFailures(suite upgrade.Suite, fp failurePoint) upgrade.Suite {
steps := createSteps(suite)
for i, st := range steps {
for j, op := range st.ops.ops {
if fp.step == i+1 && fp.element == j+1 {
op.fail(fp.step == 3)
}
}
}
return recreateSuite(steps)
}
func recreateSuite(steps []*step) upgrade.Suite {
suite := &upgrade.Suite{
Tests: upgrade.Tests{},
Installations: upgrade.Installations{},
}
for _, st := range steps {
st.updateSuite(st.ops, suite)
}
return *suite
}
func (o operation) Name() string {
if o.op != nil {
return o.op.Name()
}
return o.bg.Name()
}
func (o *operation) fail(setupFail bool) {
testName := fmt.Sprintf("FailingOf%s", o.Name())
if o.op != nil {
prev := o.op
o.op = upgrade.NewOperation(testName, func(c upgrade.Context) {
handler := prev.Handler()
handler(c)
c.T.Error(failureTestingMessage)
c.Log.Error(failureTestingMessage)
})
} else {
prev := o.bg
o.bg = upgrade.NewBackgroundOperation(testName, func(c upgrade.Context) {
handler := prev.Setup()
handler(c)
if setupFail {
c.T.Error(failureTestingMessage)
c.Log.Error(failureTestingMessage)
}
}, func(bc upgrade.BackgroundContext) {
upgrade.WaitForStopEvent(bc, upgrade.WaitForStopEventConfiguration{
Name: testName,
OnStop: func(event upgrade.StopEvent) {
if !setupFail {
event.T.Error(failureTestingMessage)
bc.Log.Error(failureTestingMessage)
}
},
OnWait: func(bc upgrade.BackgroundContext, self upgrade.WaitForStopEventConfiguration) {
bc.Log.Debugf("%s - probing functionality...", self.Name)
},
WaitTime: shortWait,
})
})
}
}
func (o operations) length() int {
return len(o.ops)
}
func (o operations) asOperations() []upgrade.Operation {
ops := make([]upgrade.Operation, o.length())
for i, op := range o.ops {
ops[i] = op.op
}
return ops
}
func (o operations) asBackgroundOperation() []upgrade.BackgroundOperation {
ops := make([]upgrade.BackgroundOperation, o.length())
for i, op := range o.ops {
ops[i] = op.bg
}
return ops
}
|
apache-2.0
|
kelltrick/roslyn
|
src/Features/Core/Portable/ConvertToInterpolatedString/AbstractConvertPlaceholderToInterpolatedStringRefactoringProvider.cs
|
13484
|
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Immutable;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis.CodeActions;
using Microsoft.CodeAnalysis.PooledObjects;
using Microsoft.CodeAnalysis.Text;
using Microsoft.CodeAnalysis.LanguageServices;
using Microsoft.CodeAnalysis.Shared.Extensions;
using Microsoft.CodeAnalysis.Editing;
using Microsoft.CodeAnalysis.Simplification;
using Microsoft.CodeAnalysis.Formatting;
using Microsoft.CodeAnalysis.CodeRefactorings;
using System.Collections.Generic;
namespace Microsoft.CodeAnalysis.ConvertToInterpolatedString
{
internal abstract class AbstractConvertPlaceholderToInterpolatedStringRefactoringProvider<TInvocationExpressionSyntax, TExpressionSyntax, TArgumentSyntax, TLiteralExpressionSyntax> : CodeRefactoringProvider
where TExpressionSyntax : SyntaxNode
where TInvocationExpressionSyntax : TExpressionSyntax
where TArgumentSyntax : SyntaxNode
where TLiteralExpressionSyntax : SyntaxNode
{
protected abstract SyntaxNode GetInterpolatedString(string text);
public override async Task ComputeRefactoringsAsync(CodeRefactoringContext context)
{
var semanticModel = await context.Document.GetSemanticModelAsync(context.CancellationToken).ConfigureAwait(false);
var stringType = semanticModel.Compilation.GetSpecialType(SpecialType.System_String);
if (stringType == null)
{
return;
}
var formatMethods = stringType
.GetMembers(nameof(string.Format))
.OfType<IMethodSymbol>()
.Where(ShouldIncludeFormatMethod)
.ToImmutableArray();
if (formatMethods.Length == 0)
{
return;
}
var syntaxFactsService = context.Document.GetLanguageService<ISyntaxFactsService>();
if (syntaxFactsService == null)
{
return;
}
var root = await context.Document.GetSyntaxRootAsync(context.CancellationToken).ConfigureAwait(false);
if (TryFindInvocation(context.Span, root, semanticModel, formatMethods, syntaxFactsService, context.CancellationToken, out var invocation, out var invocationSymbol) &&
IsArgumentListCorrect(syntaxFactsService.GetArgumentsOfInvocationExpression(invocation), invocationSymbol, formatMethods, semanticModel, syntaxFactsService, context.CancellationToken))
{
context.RegisterRefactoring(
new ConvertToInterpolatedStringCodeAction(
FeaturesResources.Convert_to_interpolated_string,
c => CreateInterpolatedString(invocation, context.Document, syntaxFactsService, c)));
}
}
private bool TryFindInvocation(
TextSpan span,
SyntaxNode root,
SemanticModel semanticModel,
ImmutableArray<IMethodSymbol> formatMethods,
ISyntaxFactsService syntaxFactsService,
CancellationToken cancellationToken,
out TInvocationExpressionSyntax invocation,
out ISymbol invocationSymbol)
{
invocationSymbol = null;
invocation = root.FindNode(span, getInnermostNodeForTie: true)?.FirstAncestorOrSelf<TInvocationExpressionSyntax>();
while (invocation != null)
{
var arguments = syntaxFactsService.GetArgumentsOfInvocationExpression(invocation);
if (arguments.Count >= 2)
{
var firstArgumentExpression = syntaxFactsService.GetExpressionOfArgument(GetFormatArgument(arguments, syntaxFactsService)) as TLiteralExpressionSyntax;
if (firstArgumentExpression != null && syntaxFactsService.IsStringLiteral(firstArgumentExpression.GetFirstToken()))
{
invocationSymbol = semanticModel.GetSymbolInfo(invocation, cancellationToken).Symbol;
if (formatMethods.Contains(invocationSymbol))
{
break;
}
}
}
invocation = invocation.Parent?.FirstAncestorOrSelf<TInvocationExpressionSyntax>();
}
return invocation != null;
}
private bool IsArgumentListCorrect(
SeparatedSyntaxList<TArgumentSyntax>? nullableArguments,
ISymbol invocationSymbol,
ImmutableArray<IMethodSymbol> formatMethods,
SemanticModel semanticModel,
ISyntaxFactsService syntaxFactsService,
CancellationToken cancellationToken)
{
var arguments = nullableArguments.Value;
var firstExpression = syntaxFactsService.GetExpressionOfArgument(GetFormatArgument(arguments, syntaxFactsService)) as TLiteralExpressionSyntax;
if (arguments.Count >= 2 &&
firstExpression != null &&
syntaxFactsService.IsStringLiteral(firstExpression.GetFirstToken()))
{
// We do not want to substitute the expression if it is being passed to params array argument
// Example:
// string[] args;
// String.Format("{0}{1}{2}", args);
return IsArgumentListNotPassingArrayToParams(
syntaxFactsService.GetExpressionOfArgument(GetParamsArgument(arguments, syntaxFactsService)),
invocationSymbol,
formatMethods,
semanticModel,
cancellationToken);
}
return false;
}
private async Task<Document> CreateInterpolatedString(
TInvocationExpressionSyntax invocation,
Document document,
ISyntaxFactsService syntaxFactsService,
CancellationToken cancellationToken)
{
var semanticModel = await document.GetSemanticModelAsync(cancellationToken).ConfigureAwait(false);
var arguments = syntaxFactsService.GetArgumentsOfInvocationExpression(invocation);
var literalExpression = syntaxFactsService.GetExpressionOfArgument(GetFormatArgument(arguments, syntaxFactsService)) as TLiteralExpressionSyntax;
var text = literalExpression.GetFirstToken().ToString();
var syntaxGenerator = document.Project.LanguageServices.GetService<SyntaxGenerator>();
var expandedArguments = GetExpandedArguments(semanticModel, arguments, syntaxGenerator, syntaxFactsService);
var interpolatedString = GetInterpolatedString(text);
var newInterpolatedString = VisitArguments(expandedArguments, interpolatedString, syntaxFactsService);
var root = await document.GetSyntaxRootAsync(cancellationToken).ConfigureAwait(false);
var newRoot = root.ReplaceNode(invocation, newInterpolatedString.WithTriviaFrom(invocation));
return document.WithSyntaxRoot(newRoot);
}
private string GetArgumentName(TArgumentSyntax argument, ISyntaxFactsService syntaxFactsService)
=> syntaxFactsService.GetNameForArgument(argument);
private SyntaxNode GetParamsArgument(SeparatedSyntaxList<TArgumentSyntax> arguments, ISyntaxFactsService syntaxFactsService)
=> arguments.FirstOrDefault(argument => string.Equals(GetArgumentName(argument, syntaxFactsService), StringFormatArguments.FormatArgumentName, StringComparison.OrdinalIgnoreCase)) ?? arguments[1];
private TArgumentSyntax GetFormatArgument(SeparatedSyntaxList<TArgumentSyntax> arguments, ISyntaxFactsService syntaxFactsService)
=> arguments.FirstOrDefault(argument => string.Equals(GetArgumentName(argument, syntaxFactsService), StringFormatArguments.FormatArgumentName, StringComparison.OrdinalIgnoreCase)) ?? arguments[0];
private TArgumentSyntax GetArgument(SeparatedSyntaxList<TArgumentSyntax> arguments, int index, ISyntaxFactsService syntaxFactsService)
{
if (arguments.Count > 4)
{
return arguments[index];
}
return arguments.FirstOrDefault(
argument => string.Equals(GetArgumentName(argument, syntaxFactsService), StringFormatArguments.ParamsArgumentNames[index], StringComparison.OrdinalIgnoreCase))
?? arguments[index];
}
private ImmutableArray<TExpressionSyntax> GetExpandedArguments(
SemanticModel semanticModel,
SeparatedSyntaxList<TArgumentSyntax> arguments,
SyntaxGenerator syntaxGenerator,
ISyntaxFactsService syntaxFactsService)
{
var builder = ArrayBuilder<TExpressionSyntax>.GetInstance();
for (int i = 1; i < arguments.Count; i++)
{
var argumentExpression = syntaxFactsService.GetExpressionOfArgument(GetArgument(arguments, i, syntaxFactsService));
var convertedType = semanticModel.GetTypeInfo(argumentExpression).ConvertedType;
if (convertedType == null)
{
builder.Add(syntaxFactsService.Parenthesize(argumentExpression) as TExpressionSyntax);
}
else
{
var castExpression = syntaxGenerator.CastExpression(convertedType, syntaxFactsService.Parenthesize(argumentExpression)).WithAdditionalAnnotations(Simplifier.Annotation);
builder.Add(castExpression as TExpressionSyntax);
}
}
var expandedArguments = builder.ToImmutableAndFree();
return expandedArguments;
}
private SyntaxNode VisitArguments(
ImmutableArray<TExpressionSyntax> expandedArguments,
SyntaxNode interpolatedString,
ISyntaxFactsService syntaxFactsService)
{
return interpolatedString.ReplaceNodes(syntaxFactsService.GetContentsOfInterpolatedString(interpolatedString), (oldNode, newNode) =>
{
var interpolationSyntaxNode = newNode;
if (interpolationSyntaxNode != null)
{
var literalExpression = syntaxFactsService.GetExpressionOfInterpolation(interpolationSyntaxNode) as TLiteralExpressionSyntax;
if (literalExpression != null && syntaxFactsService.IsNumericLiteralExpression(literalExpression))
{
if (int.TryParse(literalExpression.GetFirstToken().ValueText, out var index))
{
if (index >= 0 && index < expandedArguments.Length)
{
return interpolationSyntaxNode.ReplaceNode(
syntaxFactsService.GetExpressionOfInterpolation(interpolationSyntaxNode),
syntaxFactsService.ConvertToSingleLine(expandedArguments[index], useElasticTrivia: true).WithAdditionalAnnotations(Formatter.Annotation));
}
}
}
}
return newNode;
});
}
private static bool ShouldIncludeFormatMethod(IMethodSymbol methodSymbol)
{
if (!methodSymbol.IsStatic)
{
return false;
}
if (methodSymbol.Parameters.Length == 0)
{
return false;
}
var firstParameter = methodSymbol.Parameters[0];
if (firstParameter?.Name != StringFormatArguments.FormatArgumentName)
{
return false;
}
return true;
}
private static bool IsArgumentListNotPassingArrayToParams(
SyntaxNode expression,
ISymbol invocationSymbol,
ImmutableArray<IMethodSymbol> formatMethods,
SemanticModel semanticModel,
CancellationToken cancellationToken)
{
var formatMethodsAcceptingParamsArray = formatMethods
.Where(x => x.Parameters.Length > 1 && x.Parameters[1].Type.Kind == SymbolKind.ArrayType);
if (formatMethodsAcceptingParamsArray.Contains(invocationSymbol))
{
return semanticModel.GetTypeInfo(expression, cancellationToken).Type?.Kind != SymbolKind.ArrayType;
}
return true;
}
private class ConvertToInterpolatedStringCodeAction : CodeAction.DocumentChangeAction
{
public ConvertToInterpolatedStringCodeAction(string title, Func<CancellationToken, Task<Document>> createChangedDocument) :
base(title, createChangedDocument)
{
}
}
private static class StringFormatArguments
{
public const string FormatArgumentName = "format";
public const string ArgsArgumentName = "args";
public static readonly ImmutableArray<string> ParamsArgumentNames =
ImmutableArray.Create("", "arg0", "arg1", "arg2");
}
}
}
|
apache-2.0
|
aws/aws-sdk-cpp
|
aws-cpp-sdk-migration-hub-refactor-spaces/include/aws/migration-hub-refactor-spaces/model/GetResourcePolicyRequest.h
|
3039
|
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/migration-hub-refactor-spaces/MigrationHubRefactorSpaces_EXPORTS.h>
#include <aws/migration-hub-refactor-spaces/MigrationHubRefactorSpacesRequest.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace MigrationHubRefactorSpaces
{
namespace Model
{
/**
*/
class AWS_MIGRATIONHUBREFACTORSPACES_API GetResourcePolicyRequest : public MigrationHubRefactorSpacesRequest
{
public:
GetResourcePolicyRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetResourcePolicy"; }
Aws::String SerializePayload() const override;
/**
* <p>The Amazon Resource Name (ARN) of the resource associated with the policy.
* </p>
*/
inline const Aws::String& GetIdentifier() const{ return m_identifier; }
/**
* <p>The Amazon Resource Name (ARN) of the resource associated with the policy.
* </p>
*/
inline bool IdentifierHasBeenSet() const { return m_identifierHasBeenSet; }
/**
* <p>The Amazon Resource Name (ARN) of the resource associated with the policy.
* </p>
*/
inline void SetIdentifier(const Aws::String& value) { m_identifierHasBeenSet = true; m_identifier = value; }
/**
* <p>The Amazon Resource Name (ARN) of the resource associated with the policy.
* </p>
*/
inline void SetIdentifier(Aws::String&& value) { m_identifierHasBeenSet = true; m_identifier = std::move(value); }
/**
* <p>The Amazon Resource Name (ARN) of the resource associated with the policy.
* </p>
*/
inline void SetIdentifier(const char* value) { m_identifierHasBeenSet = true; m_identifier.assign(value); }
/**
* <p>The Amazon Resource Name (ARN) of the resource associated with the policy.
* </p>
*/
inline GetResourcePolicyRequest& WithIdentifier(const Aws::String& value) { SetIdentifier(value); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the resource associated with the policy.
* </p>
*/
inline GetResourcePolicyRequest& WithIdentifier(Aws::String&& value) { SetIdentifier(std::move(value)); return *this;}
/**
* <p>The Amazon Resource Name (ARN) of the resource associated with the policy.
* </p>
*/
inline GetResourcePolicyRequest& WithIdentifier(const char* value) { SetIdentifier(value); return *this;}
private:
Aws::String m_identifier;
bool m_identifierHasBeenSet;
};
} // namespace Model
} // namespace MigrationHubRefactorSpaces
} // namespace Aws
|
apache-2.0
|
heinousjay/JibbrJabbr
|
kernel/src/test/resources/jj/css/test/replacement.css
|
602
|
@import "test.css";
@import "http://example.com/something.css";
.box-icon {
background-image: url(images/box-icon.png);
}
.rox-icon {
background-image: url("images/other/rox-icon.png") no-repeat 1% 25%;
}
.sox-icon {
background-image: url('images/./sox-icon.png');
}
.sox-icon2 {
background-image: url('/images/sox-icon.png');
}
.warning-icon {
background-image: url(not-found-thing.jpg);
}
.fqdn-icon {
background-image: url(http://example.com/absolute/path);
}
body { background: url('https://www.example.com/pinkish.png') }
li { list-style: url("//www.example.com/redball.png") disc }
|
apache-2.0
|
Selventa/model-builder
|
tools/groovy/doc/html/gapi/org/codehaus/groovy/package-frame.html
|
1461
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<HTML>
<HEAD>
<TITLE>
org.codehaus.groovy
</TITLE>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<META NAME="keywords" CONTENT="org/codehaus/groovy package">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../stylesheet.css" TITLE="Style">
</HEAD>
<BODY BGCOLOR="white">
<FONT size="+1" CLASS="FrameTitleFont">
<A HREF="package-summary.html" target="classFrame">org.codehaus.groovy</A></FONT>
<TABLE BORDER="0" WIDTH="100%" SUMMARY="">
<TR>
<TD NOWRAP><FONT size="+1" CLASS="FrameHeadingFont">
Interfaces</FONT>
<FONT CLASS="FrameItemFont"><BR><A HREF="GroovyExceptionInterface.html" title="interface in org.codehaus.groovy" target="classFrame"><I>GroovyExceptionInterface</I></A></FONT></TD>
</TR>
</TABLE>
<TABLE BORDER="0" WIDTH="100%" SUMMARY="">
<TR>
<TD NOWRAP><FONT size="+1" CLASS="FrameHeadingFont">
Exceptions</FONT>
<FONT CLASS="FrameItemFont"><BR><A HREF="GroovyException.html" title="exception in org.codehaus.groovy" target="classFrame">GroovyException</A></FONT></TD>
</TR>
</TABLE>
<TABLE BORDER="0" WIDTH="100%" SUMMARY="">
<TR>
<TD NOWRAP><FONT size="+1" CLASS="FrameHeadingFont">
Errors</FONT>
<FONT CLASS="FrameItemFont"><BR><A HREF="GroovyBugError.html" title="error in org.codehaus.groovy" target="classFrame">GroovyBugError</A></FONT></TD>
</TR>
</TABLE>
</BODY>
</HTML>
|
apache-2.0
|
aol/cyclops
|
cyclops/src/test/java/cyclops/control/trytests/TryTest.java
|
8892
|
package cyclops.control.trytests;
import cyclops.control.Either;
import cyclops.control.Future;
import cyclops.control.Ior;
import cyclops.control.Maybe;
import cyclops.control.Option;
import cyclops.control.Trampoline;
import cyclops.control.Try;
import cyclops.function.Monoid;
import cyclops.companion.Semigroups;
import com.oath.cyclops.util.box.Mutable;
import cyclops.companion.Streams;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.Optional;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.*;
public class TryTest {
Try<Integer,RuntimeException> just;
Try<Integer,RuntimeException> none;
RuntimeException exception = new RuntimeException();
@Before
public void setUp() throws Exception {
just = Try.success(10);
none = Try.failure(exception);
just.toEither(-5000).mapLeft(x-> new Exception()).toTry(Exception.class);
}
@Test
public void infinite() {
Try<String, Exception> result = Option.<String>none().toTry(new Exception("asdf"));
for (int i = 0; i < 10; i++) {
result = result.recoverFlatMap(e -> Try.failure(new Exception("asdf")));
}
System.out.println(result.toString());
}
@Test
public void recover(){
final String result = Try.withCatch(() -> "takeOne", RuntimeException.class)
.recoverFlatMap(__ -> Try.<String,RuntimeException>success("ignored"))
.orElse("boo!");
Try.withCatch(() -> "hello", RuntimeException.class)
.recover(()->"world");
}
@Test
public void coFlatMap(){
assertThat(just.coflatMap(m-> m.isPresent()? m.toOptional().get() : 50),equalTo(just));
assertThat(none.coflatMap(m-> m.isPresent()? m.toOptional().get() : 50),equalTo(Try.success(50)));
}
@Test
public void testToMaybe() {
assertThat(just.toMaybe(),equalTo(Maybe.of(10)));
assertThat(none.toMaybe(),equalTo(Maybe.nothing()));
}
private int add1(int i){
return i+1;
}
@Test
public void testOfT() {
assertThat(Ior.right(1),equalTo(Ior.right(1)));
}
@Test
public void testUnitT() {
assertThat(just.unit(20),equalTo(Try.success(20)));
}
@Test
public void testisPrimary() {
assertTrue(just.isSuccess());
assertFalse(none.isSuccess());
}
@Test
public void testMapFunctionOfQsuperTQextendsR() {
assertThat(just.map(i->i+5),equalTo(Try.success(15)));
assertThat(none.map(i->i+5).toEither(),equalTo(Either.left(exception)));
}
@Test
public void testFlatMap() {
assertThat(just.flatMap(i->Try.success(i+5)),equalTo(Try.success(15)));
assertThat(none.flatMap(i->Try.success(i+5)),equalTo(Try.failure(exception)));
}
@Test
public void testWhenFunctionOfQsuperTQextendsRSupplierOfQextendsR() {
assertThat(just.fold(i->i+1,()->20),equalTo(11));
assertThat(none.fold(i->i+1,()->20),equalTo(20));
}
@Test
public void testStream() {
assertThat(just.stream().toList(),equalTo(Arrays.asList(10)));
assertThat(none.stream().toList(),equalTo(Arrays.asList()));
}
@Test
public void testOfSupplierOfT() {
}
@Test
public void testConvertTo() {
Stream<Integer> toStream = just.fold(m->Stream.of(m),()->Stream.of());
assertThat(toStream.collect(Collectors.toList()),equalTo(Arrays.asList(10)));
}
@Test
public void testConvertToAsync() {
Future<Stream<Integer>> async = Future.of(()->just.fold(f->Stream.of((int)f),()->Stream.of()));
assertThat(async.orElse(Stream.empty()).collect(Collectors.toList()),equalTo(Arrays.asList(10)));
}
@Test
public void testIterate() {
assertThat(just.asSupplier(-100).iterate(i->i+1).limit(10).sumInt(i->i),equalTo(145));
}
@Test
public void testGenerate() {
assertThat(just.asSupplier(-100).generate().limit(10).sumInt(i->i),equalTo(100));
}
@Test
public void testToXor() {
assertThat(just.toEither(-5000),equalTo(Either.right(10)));
}
@Test
public void testToXorNone(){
Either<RuntimeException,Integer> xor = none.toEither();
assertTrue(xor.isLeft());
assertThat(xor,equalTo(Either.left(exception)));
}
@Test
public void testToXorSecondary() {
assertThat(just.toEither(-5000).swap(),equalTo(Either.left(10)));
}
@Test
public void testToXorSecondaryNone(){
Either<Integer,RuntimeException> xorNone = none.toEither().swap();
assertThat(xorNone,equalTo(Either.right(exception)));
}
@Test
public void testToTry() {
assertTrue(none.toTry().isFailure());
assertThat(just.toTry(),equalTo(Try.success(10)));
}
@Test
public void testToTryClassOfXArray() {
assertTrue(none.toTry(Throwable.class).isFailure());
}
@Test
public void testToIor() {
assertThat(just.toIor(),equalTo(Ior.right(10)));
}
@Test
public void testToIorNone(){
Ior<RuntimeException,Integer> ior = none.toIor();
assertTrue(ior.isLeft());
assertThat(ior,equalTo(Ior.left(exception)));
}
@Test
public void testToIorSecondary() {
assertThat(just.toIor().swap(),equalTo(Ior.left(10)));
}
@Test
public void testToIorSecondaryNone(){
Ior<Integer,RuntimeException> ior = none.toIor().swap();
assertTrue(ior.isRight());
assertThat(ior,equalTo(Ior.right(exception)));
}
@Test
public void testMkString() {
assertThat(just.mkString(),equalTo("Success[10]"));
assertThat(none.mkString(),equalTo("Failure["+exception+"]"));
}
@Test
public void testGet() {
assertThat(just.get(),equalTo(Option.some(10)));
}
@Test
public void testFilter() {
assertFalse(just.filter(i->i<5).isPresent());
assertTrue(just.filter(i->i>5).isPresent());
assertFalse(none.filter(i->i<5).isPresent());
assertFalse(none.filter(i->i>5).isPresent());
}
@Test
public void testOfType() {
assertFalse(just.ofType(String.class).isPresent());
assertTrue(just.ofType(Integer.class).isPresent());
assertFalse(none.ofType(String.class).isPresent());
assertFalse(none.ofType(Integer.class).isPresent());
}
@Test
public void testFilterNot() {
assertTrue(just.filterNot(i->i<5).isPresent());
assertFalse(just.filterNot(i->i>5).isPresent());
assertFalse(none.filterNot(i->i<5).isPresent());
assertFalse(none.filterNot(i->i>5).isPresent());
}
@Test
public void testNotNull() {
assertTrue(just.notNull().isPresent());
assertFalse(none.notNull().isPresent());
}
private int add(int a, int b){
return a+b;
}
private int add3(int a, int b, int c){
return a+b+c;
}
private int add4(int a, int b, int c,int d){
return a+b+c+d;
}
private int add5(int a, int b, int c,int d,int e){
return a+b+c+d+e;
}
@Test
public void testFoldRightMonoidOfT() {
assertThat(just.fold(Monoid.of(1,Semigroups.intMult)),equalTo(10));
}
@Test
public void testWhenFunctionOfQsuperMaybeOfTQextendsR() {
assertThat(just.fold(s->"hello", ()->"world"),equalTo("hello"));
assertThat(none.fold(s->"hello", ()->"world"),equalTo("world"));
}
@Test
public void testOrElseGet() {
assertThat(none.orElseGet(()->2),equalTo(2));
assertThat(just.orElseGet(()->2),equalTo(10));
}
@Test
public void testToOptional() {
assertFalse(none.toOptional().isPresent());
assertTrue(just.toOptional().isPresent());
assertThat(just.toOptional(),equalTo(Optional.of(10)));
}
@Test
public void testToStream() {
assertThat(none.stream().collect(Collectors.toList()).size(),equalTo(0));
assertThat(just.stream().collect(Collectors.toList()).size(),equalTo(1));
}
@Test
public void testOrElse() {
assertThat(none.orElse(20),equalTo(20));
assertThat(just.orElse(20),equalTo(10));
}
Executor exec = Executors.newFixedThreadPool(1);
@Test
public void testIterator1() {
assertThat(Streams.stream(just.iterator()).collect(Collectors.toList()),
equalTo(Arrays.asList(10)));
}
@Test
public void testForEach() {
Mutable<Integer> capture = Mutable.of(null);
none.forEach(c->capture.set(c));
assertNull(capture.get());
just.forEach(c->capture.set(c));
assertThat(capture.get(),equalTo(10));
}
@Test
public void testSpliterator() {
assertThat(StreamSupport.stream(just.spliterator(),false).collect(Collectors.toList()),
equalTo(Arrays.asList(10)));
}
@Test
public void testMapFunctionOfQsuperTQextendsR1() {
assertThat(just.map(i->i+5),equalTo(Try.success(15)));
}
@Test
public void testPeek() {
Mutable<Integer> capture = Mutable.of(null);
just = just.peek(c->capture.set(c));
assertThat(capture.get(),equalTo(10));
}
private Trampoline<Integer> sum(int times, int sum){
return times ==0 ? Trampoline.done(sum) : Trampoline.more(()->sum(times-1,sum+times));
}
@Test
public void testUnitT1() {
assertThat(none.unit(10),equalTo(just));
}
}
|
apache-2.0
|
mdoering/backbone
|
life/Plantae/Ivonia/Ivonia amplexicaulis/README.md
|
176
|
# Ivonia amplexicaulis Vell. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
apache-2.0
|
BrentDouglas/chainlink
|
core/src/main/java/io/machinecode/chainlink/core/jsl/fluent/task/FluentCheckpointAlgorithm.java
|
1211
|
/*
* Copyright 2015 Brent Douglas and other contributors
* as indicated by the @author tags. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.machinecode.chainlink.core.jsl.fluent.task;
import io.machinecode.chainlink.core.jsl.fluent.FluentPropertyReference;
import io.machinecode.chainlink.spi.jsl.task.CheckpointAlgorithm;
/**
* @author <a href="mailto:[email protected]">Brent Douglas</a>
* @since 1.0
*/
public class FluentCheckpointAlgorithm extends FluentPropertyReference<FluentCheckpointAlgorithm> implements CheckpointAlgorithm {
@Override
public FluentCheckpointAlgorithm copy() {
return copy(new FluentCheckpointAlgorithm());
}
}
|
apache-2.0
|
classmethod-sandbox/sparrow
|
src/main/java/jp/classmethod/sparrow/model/LineMessageEntityRepository.java
|
1590
|
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jp.classmethod.sparrow.model;
import java.util.List;
import org.springframework.data.repository.query.Param;
import jp.xet.sparwings.spring.data.repository.CreatableRepository;
/**
* Created by mochizukimasao on 2017/04/11.
*
* Calculator repository interface.
*
* @author mochizukimasao
* @since version
*/
public interface LineMessageEntityRepository extends CreatableRepository<LineMessageEntity, String> {
/**
* 引数で受け取るuserIdと一致するoffsetの位置(0始まり)からlimitに指定した数以下の要素を返します
*
* @param userId ユーザーID
* @param offset 読み飛ばす行数
* @param limit 取得行数
* @return offsetからlimitに指定した数以下の要素を返します。一致するデータがない場合は空のコレクションを返します。
*/
List<LineMessageEntity> findByUser(
@Param("userId") String userId, @Param("offset") int offset, @Param("size") int limit);
}
|
apache-2.0
|
nanorepublica/wedding
|
c4628be130c468a984728868d0f261680de56220.html
|
40
|
24d35e46ec4bd24f6cb2cfacf41fb8dfaa666fc8
|
apache-2.0
|
mdoering/backbone
|
life/Plantae/Magnoliophyta/Magnoliopsida/Ericales/Primulaceae/Tinus/Tinus wallichii/README.md
|
172
|
# Tinus wallichii Kuntze SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
apache-2.0
|
nghiant2710/base-images
|
balena-base-images/node/nitrogen6xq2g/debian/sid/15.7.0/run/Dockerfile
|
2937
|
# AUTOGENERATED FILE
FROM balenalib/nitrogen6xq2g-debian:sid-run
ENV NODE_VERSION 15.7.0
ENV YARN_VERSION 1.22.4
RUN buildDeps='curl libatomic1' \
&& set -x \
&& for key in \
6A010C5166006599AA17F08146C2130DFD2497F5 \
; do \
gpg --batch --keyserver pgp.mit.edu --recv-keys "$key" || \
gpg --batch --keyserver keyserver.pgp.com --recv-keys "$key" || \
gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "$key" ; \
done \
&& apt-get update && apt-get install -y $buildDeps --no-install-recommends \
&& rm -rf /var/lib/apt/lists/* \
&& curl -SLO "http://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION-linux-armv7l.tar.gz" \
&& echo "aa65f287bfe060321ee5e0b4f7134bd17690abb911c6fc1173ddbedddbf2c060 node-v$NODE_VERSION-linux-armv7l.tar.gz" | sha256sum -c - \
&& tar -xzf "node-v$NODE_VERSION-linux-armv7l.tar.gz" -C /usr/local --strip-components=1 \
&& rm "node-v$NODE_VERSION-linux-armv7l.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz" \
&& curl -fSLO --compressed "https://yarnpkg.com/downloads/$YARN_VERSION/yarn-v$YARN_VERSION.tar.gz.asc" \
&& gpg --batch --verify yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& mkdir -p /opt/yarn \
&& tar -xzf yarn-v$YARN_VERSION.tar.gz -C /opt/yarn --strip-components=1 \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarn \
&& ln -s /opt/yarn/bin/yarn /usr/local/bin/yarnpkg \
&& rm yarn-v$YARN_VERSION.tar.gz.asc yarn-v$YARN_VERSION.tar.gz \
&& npm config set unsafe-perm true -g --unsafe-perm \
&& rm -rf /tmp/*
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/[email protected]" \
&& echo "Running test-stack@node" \
&& chmod +x [email protected] \
&& bash [email protected] \
&& rm -rf [email protected]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Debian Sid \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nNode.js v15.7.0, Yarn v1.22.4 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
apache-2.0
|
nghiant2710/base-images
|
balena-base-images/golang/n510-tx2/fedora/32/1.15.8/run/Dockerfile
|
2069
|
# AUTOGENERATED FILE
FROM balenalib/n510-tx2-fedora:32-run
ENV GO_VERSION 1.15.8
# gcc for cgo
RUN dnf install -y \
gcc-c++ \
gcc \
git \
&& dnf clean all
RUN mkdir -p /usr/local/go \
&& curl -SLO "https://storage.googleapis.com/golang/go$GO_VERSION.linux-arm64.tar.gz" \
&& echo "0e31ea4bf53496b0f0809730520dee98c0ae5c530f3701a19df0ba0a327bf3d2 go$GO_VERSION.linux-arm64.tar.gz" | sha256sum -c - \
&& tar -xzf "go$GO_VERSION.linux-arm64.tar.gz" -C /usr/local/go --strip-components=1 \
&& rm -f go$GO_VERSION.linux-arm64.tar.gz
ENV GOROOT /usr/local/go
ENV GOPATH /go
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
WORKDIR $GOPATH
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/[email protected]" \
&& echo "Running test-stack@golang" \
&& chmod +x [email protected] \
&& bash [email protected] \
&& rm -rf [email protected]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo $'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v8 \nOS: Fedora 32 \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nGo v1.15.8 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo $'#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
apache-2.0
|
paulswithers/Key-Dates
|
documentation/xpages-java/com/timtripcony/package-tree.html
|
6298
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_45) on Tue Aug 11 18:46:50 BST 2015 -->
<TITLE>
com.timtripcony Class Hierarchy
</TITLE>
<META NAME="date" CONTENT="2015-08-11">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="com.timtripcony Class Hierarchy";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Class</FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Use</FONT> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Tree</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
<A HREF="../../uk/co/intec/beans/package-tree.html"><B>NEXT</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../index.html?com/timtripcony/package-tree.html" target="_top"><B>FRAMES</B></A>
<A HREF="package-tree.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
Hierarchy For Package com.timtripcony
</H2>
</CENTER>
<DL>
<DT><B>Package Hierarchies:</B><DD><A HREF="../../overview-tree.html">All Packages</A></DL>
<HR>
<H2>
Class Hierarchy
</H2>
<UL>
<LI TYPE="circle">java.lang.Object<UL>
<LI TYPE="circle">com.timtripcony.<A HREF="../../com/timtripcony/AbstractMapModel.html" title="class in com.timtripcony"><B>AbstractMapModel</B></A> (implements java.io.Serializable)
<UL>
<LI TYPE="circle">com.timtripcony.<A HREF="../../com/timtripcony/AbstractDocumentMapModel.html" title="class in com.timtripcony"><B>AbstractDocumentMapModel</B></A><UL>
<LI TYPE="circle">com.timtripcony.<A HREF="../../com/timtripcony/AbstractSmartDocumentModel.html" title="class in com.timtripcony"><B>AbstractSmartDocumentModel</B></A></UL>
</UL>
</UL>
</UL>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Class</FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Use</FONT> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Tree</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../index-files/index-1.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
<A HREF="../../uk/co/intec/beans/package-tree.html"><B>NEXT</B></A></FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../index.html?com/timtripcony/package-tree.html" target="_top"><B>FRAMES</B></A>
<A HREF="package-tree.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
</BODY>
</HTML>
|
apache-2.0
|
dagnir/aws-sdk-java
|
aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/InstancePatchStateMarshaller.java
|
5993
|
/*
* Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* InstancePatchStateMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class InstancePatchStateMarshaller {
private static final MarshallingInfo<String> INSTANCEID_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("InstanceId").build();
private static final MarshallingInfo<String> PATCHGROUP_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("PatchGroup").build();
private static final MarshallingInfo<String> BASELINEID_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("BaselineId").build();
private static final MarshallingInfo<String> SNAPSHOTID_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("SnapshotId").build();
private static final MarshallingInfo<String> OWNERINFORMATION_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("OwnerInformation").build();
private static final MarshallingInfo<Integer> INSTALLEDCOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("InstalledCount").build();
private static final MarshallingInfo<Integer> INSTALLEDOTHERCOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("InstalledOtherCount").build();
private static final MarshallingInfo<Integer> MISSINGCOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("MissingCount").build();
private static final MarshallingInfo<Integer> FAILEDCOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("FailedCount").build();
private static final MarshallingInfo<Integer> NOTAPPLICABLECOUNT_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("NotApplicableCount").build();
private static final MarshallingInfo<java.util.Date> OPERATIONSTARTTIME_BINDING = MarshallingInfo.builder(MarshallingType.DATE)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("OperationStartTime").build();
private static final MarshallingInfo<java.util.Date> OPERATIONENDTIME_BINDING = MarshallingInfo.builder(MarshallingType.DATE)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("OperationEndTime").build();
private static final MarshallingInfo<String> OPERATION_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PAYLOAD)
.marshallLocationName("Operation").build();
private static final InstancePatchStateMarshaller instance = new InstancePatchStateMarshaller();
public static InstancePatchStateMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(InstancePatchState instancePatchState, ProtocolMarshaller protocolMarshaller) {
if (instancePatchState == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(instancePatchState.getInstanceId(), INSTANCEID_BINDING);
protocolMarshaller.marshall(instancePatchState.getPatchGroup(), PATCHGROUP_BINDING);
protocolMarshaller.marshall(instancePatchState.getBaselineId(), BASELINEID_BINDING);
protocolMarshaller.marshall(instancePatchState.getSnapshotId(), SNAPSHOTID_BINDING);
protocolMarshaller.marshall(instancePatchState.getOwnerInformation(), OWNERINFORMATION_BINDING);
protocolMarshaller.marshall(instancePatchState.getInstalledCount(), INSTALLEDCOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getInstalledOtherCount(), INSTALLEDOTHERCOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getMissingCount(), MISSINGCOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getFailedCount(), FAILEDCOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getNotApplicableCount(), NOTAPPLICABLECOUNT_BINDING);
protocolMarshaller.marshall(instancePatchState.getOperationStartTime(), OPERATIONSTARTTIME_BINDING);
protocolMarshaller.marshall(instancePatchState.getOperationEndTime(), OPERATIONENDTIME_BINDING);
protocolMarshaller.marshall(instancePatchState.getOperation(), OPERATION_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
|
apache-2.0
|
common-workflow-language/cwljava
|
sdk-and-javadoc-generation/org/commonwl/lang/CommandOutputParameter.java
|
14693
|
/*****************************************************************************************************
*
* Authors:
*
* <b> Java SDK for CWL </b>
*
* @author Paul Grosu ([email protected]), Northeastern University
* @version 0.20
* @since April 28, 2016
*
* <p> Alternate SDK (via Avro):
*
* Denis Yuen ([email protected])
*
* CWL Draft:
*
* Peter Amstutz ([email protected]), Curoverse
* Nebojsa Tijanic ([email protected]), Seven Bridges Genomics
*
* Contributors:
*
* Luka Stojanovic ([email protected]), Seven Bridges Genomics
* John Chilton ([email protected]), Galaxy Project, Pennsylvania State University
* Michael R. Crusoe ([email protected]), University of California, Davis
* Herve Menager ([email protected]), Institut Pasteur
* Maxim Mikheev ([email protected]), BioDatomics
* Stian Soiland-Reyes ([email protected]), University of Manchester
*
*****************************************************************************************************/
package org.commonwl.lang;
/*****************************************************************************************************
*
* An output parameter for a CommandLineTool.
*/
public class CommandOutputParameter extends OutputParameter {
/*****************************************************************************************************
*
* Specify valid types of data that may be assigned to this parameter.
*/
Object type = null;
/*****************************************************************************************************
*
* Describes how to handle the outputs of a process.
*/
CommandOutputBinding outputBinding = null;
/*****************************************************************************************************
*
* The unique identifier for this parameter object.
*/
String id = null;
/*****************************************************************************************************
*
* Only valid when `type: File` or is an array of `items: File`. A value of `true` indicates that the file is read or written sequentially without seeking. An implementation may use this flag to indicate whether it is valid to stream file contents using a named pipe. Default: `false`.
*/
Boolean streamable = null;
/*****************************************************************************************************
*
* Only valid when `type: File` or is an array of `items: File`. For input parameters, this must be one or more IRIs of concept nodes that represents file formats which are allowed as input to this parameter, preferrably defined within an ontology. If no ontology is available, file formats may be tested by exact match. For output parameters, this is the file format that will be assigned to the output parameter.
*/
Object format = null;
/*****************************************************************************************************
*
* A documentation string for this type, or an array of strings which should be concatenated.
*/
Object doc = null;
/*****************************************************************************************************
*
* Only valid when `type: File` or is an array of `items: File`. Describes files that must be included alongside the primary file(s). If the value is an expression, the value of `self` in the expression must be the primary input or output File to which this binding applies. If the value is a string, it specifies that the following pattern should be applied to the primary file: 1. If string begins with one or more caret `^` characters, for each caret, remove the last file extension from the path (the last period `.` and all following characters). If there are no file extensions, the path is unchanged. 2. Append the remainder of the string to the end of the file path.
*/
Object secondaryFiles = null;
/*****************************************************************************************************
*
* A short, human-readable label of this object.
*/
String label = null;
public CommandOutputParameter() { super(); }
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputArraySchema type.
*
*/
public void settype( CommandOutputArraySchema value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a stderr type.
*
*/
public void settype( stderr value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a String array.
*
*/
public void settype( String [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputArraySchema array.
*
*/
public void settype( CommandOutputArraySchema [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CWLType array.
*
*/
public void settype( CWLType [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputRecordSchema array.
*
*/
public void settype( CommandOutputRecordSchema [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CWLType type.
*
*/
public void settype( CWLType value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a stdout type.
*
*/
public void settype( stdout value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputEnumSchema type.
*
*/
public void settype( CommandOutputEnumSchema value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a String type.
*
*/
public void settype( String value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputRecordSchema type.
*
*/
public void settype( CommandOutputRecordSchema value ) {
type = value;
}
/*****************************************************************************************************
*
* This method sets the value of type.
*
* @param value will update type, which is a CommandOutputEnumSchema array.
*
*/
public void settype( CommandOutputEnumSchema [] value ) {
type = value;
}
/*****************************************************************************************************
*
* This method returns the value of type.
*
* @return This method will return the value of type, which is a Object type.
*
*/
public Object gettype() {
return type;
}
/*****************************************************************************************************
*
* This method sets the value of outputBinding.
*
* @param value will update outputBinding, which is a CommandOutputBinding type.
*
*/
public void setoutputBinding( CommandOutputBinding value ) {
outputBinding = value;
}
/*****************************************************************************************************
*
* This method returns the value of outputBinding.
*
* @return This method will return the value of outputBinding, which is a CommandOutputBinding type.
*
*/
public CommandOutputBinding getoutputBinding() {
return outputBinding;
}
/*****************************************************************************************************
*
* This method sets the value of id.
*
* @param value will update id, which is a String type.
*
*/
public void setid( String value ) {
id = value;
}
/*****************************************************************************************************
*
* This method returns the value of id.
*
* @return This method will return the value of id, which is a String type.
*
*/
public String getid() {
return id;
}
/*****************************************************************************************************
*
* This method sets the value of streamable.
*
* @param value will update streamable, which is a Boolean type.
*
*/
public void setstreamable( Boolean value ) {
streamable = value;
}
/*****************************************************************************************************
*
* This method returns the value of streamable.
*
* @return This method will return the value of streamable, which is a Boolean type.
*
*/
public Boolean getstreamable() {
return streamable;
}
/*****************************************************************************************************
*
* This method sets the value of format.
*
* @param value will update format, which is a Expression array.
*
*/
public void setformat( Expression [] value ) {
format = value;
}
/*****************************************************************************************************
*
* This method sets the value of format.
*
* @param value will update format, which is a String type.
*
*/
public void setformat( String value ) {
format = value;
}
/*****************************************************************************************************
*
* This method sets the value of format.
*
* @param value will update format, which is a String array.
*
*/
public void setformat( String [] value ) {
format = value;
}
/*****************************************************************************************************
*
* This method returns the value of format.
*
* @return This method will return the value of format, which is a Object type.
*
*/
public Object getformat() {
return format;
}
/*****************************************************************************************************
*
* This method sets the value of doc.
*
* @param value will update doc, which is a String type.
*
*/
public void setdoc( String value ) {
doc = value;
}
/*****************************************************************************************************
*
* This method sets the value of doc.
*
* @param value will update doc, which is a String array.
*
*/
public void setdoc( String [] value ) {
doc = value;
}
/*****************************************************************************************************
*
* This method returns the value of doc.
*
* @return This method will return the value of doc, which is a Object type.
*
*/
public Object getdoc() {
return doc;
}
/*****************************************************************************************************
*
* This method sets the value of secondaryFiles.
*
* @param value will update secondaryFiles, which is a Expression array.
*
*/
public void setsecondaryFiles( Expression [] value ) {
secondaryFiles = value;
}
/*****************************************************************************************************
*
* This method sets the value of secondaryFiles.
*
* @param value will update secondaryFiles, which is a String type.
*
*/
public void setsecondaryFiles( String value ) {
secondaryFiles = value;
}
/*****************************************************************************************************
*
* This method sets the value of secondaryFiles.
*
* @param value will update secondaryFiles, which is a String array.
*
*/
public void setsecondaryFiles( String [] value ) {
secondaryFiles = value;
}
/*****************************************************************************************************
*
* This method sets the value of secondaryFiles.
*
* @param value will update secondaryFiles, which is a Expression type.
*
*/
public void setsecondaryFiles( Expression value ) {
secondaryFiles = value;
}
/*****************************************************************************************************
*
* This method returns the value of secondaryFiles.
*
* @return This method will return the value of secondaryFiles, which is a Object type.
*
*/
public Object getsecondaryFiles() {
return secondaryFiles;
}
/*****************************************************************************************************
*
* This method sets the value of label.
*
* @param value will update label, which is a String type.
*
*/
public void setlabel( String value ) {
label = value;
}
/*****************************************************************************************************
*
* This method returns the value of label.
*
* @return This method will return the value of label, which is a String type.
*
*/
public String getlabel() {
return label;
}
}
|
apache-2.0
|
Bersaelor/D2
|
iOS_app/Classes/Native/mscorlib_System_Collections_Generic_Dictionary_2_V2024483409.h
|
1320
|
#pragma once
#include "il2cpp-config.h"
#ifndef _MSC_VER
# include <alloca.h>
#else
# include <malloc.h>
#endif
#include <stdint.h>
// System.Collections.Generic.Dictionary`2<System.Object,System.Int32>
struct Dictionary_2_t3323877696;
#include "mscorlib_System_Object4170816371.h"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
#pragma clang diagnostic ignored "-Wunused-variable"
#endif
// System.Collections.Generic.Dictionary`2/ValueCollection<System.Object,System.Int32>
struct ValueCollection_t2024483409 : public Il2CppObject
{
public:
// System.Collections.Generic.Dictionary`2<TKey,TValue> System.Collections.Generic.Dictionary`2/ValueCollection::dictionary
Dictionary_2_t3323877696 * ___dictionary_0;
public:
inline static int32_t get_offset_of_dictionary_0() { return static_cast<int32_t>(offsetof(ValueCollection_t2024483409, ___dictionary_0)); }
inline Dictionary_2_t3323877696 * get_dictionary_0() const { return ___dictionary_0; }
inline Dictionary_2_t3323877696 ** get_address_of_dictionary_0() { return &___dictionary_0; }
inline void set_dictionary_0(Dictionary_2_t3323877696 * value)
{
___dictionary_0 = value;
Il2CppCodeGenWriteBarrier(&___dictionary_0, value);
}
};
#ifdef __clang__
#pragma clang diagnostic pop
#endif
|
apache-2.0
|
nghiant2710/base-images
|
balena-base-images/golang/odroid-u3+/debian/stretch/1.15.8/build/Dockerfile
|
2030
|
# AUTOGENERATED FILE
FROM balenalib/odroid-u3+-debian:stretch-build
ENV GO_VERSION 1.15.8
RUN mkdir -p /usr/local/go \
&& curl -SLO "http://resin-packages.s3.amazonaws.com/golang/v$GO_VERSION/go$GO_VERSION.linux-armv7hf.tar.gz" \
&& echo "bde22202576c3920ff5646fb1d19877cedc19501939d6ccd7b16ff89071abd0a go$GO_VERSION.linux-armv7hf.tar.gz" | sha256sum -c - \
&& tar -xzf "go$GO_VERSION.linux-armv7hf.tar.gz" -C /usr/local/go --strip-components=1 \
&& rm -f go$GO_VERSION.linux-armv7hf.tar.gz
ENV GOROOT /usr/local/go
ENV GOPATH /go
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
WORKDIR $GOPATH
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/[email protected]" \
&& echo "Running test-stack@golang" \
&& chmod +x [email protected] \
&& bash [email protected] \
&& rm -rf [email protected]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Debian Stretch \nVariant: build variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nGo v1.15.8 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
apache-2.0
|
quarkusio/quarkus
|
test-framework/maven/src/main/java/io/quarkus/maven/it/assertions/SetupVerifier.java
|
5917
|
package io.quarkus.maven.it.assertions;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.io.File;
import java.io.FileInputStream;
import java.util.Optional;
import java.util.Properties;
import org.apache.maven.model.Model;
import org.apache.maven.model.Plugin;
import org.apache.maven.model.Profile;
import org.apache.maven.model.io.xpp3.MavenXpp3Reader;
import org.apache.maven.project.MavenProject;
import org.codehaus.plexus.util.xml.Xpp3Dom;
import io.quarkus.devtools.project.QuarkusProjectHelper;
import io.quarkus.devtools.testing.RegistryClientTestHelper;
import io.quarkus.maven.utilities.MojoUtils;
import io.quarkus.platform.tools.ToolsConstants;
import io.quarkus.registry.catalog.ExtensionCatalog;
public class SetupVerifier {
public static void assertThatJarExists(File archive) throws Exception {
JarVerifier jarVerifier = new JarVerifier(archive);
jarVerifier.assertThatJarIsCreated();
jarVerifier.assertThatJarHasManifest();
}
public static void assertThatJarContainsFile(File archive, String file) throws Exception {
JarVerifier jarVerifier = new JarVerifier(archive);
jarVerifier.assertThatFileIsContained(file);
}
public static void assertThatJarDoesNotContainFile(File archive, String file) throws Exception {
JarVerifier jarVerifier = new JarVerifier(archive);
jarVerifier.assertThatFileIsNotContained(file);
}
public static void assertThatJarContainsFileWithContent(File archive, String path, String... lines) throws Exception {
JarVerifier jarVerifier = new JarVerifier(archive);
jarVerifier.assertThatFileContains(path, lines);
}
public static void verifySetup(File pomFile) throws Exception {
assertNotNull(pomFile, "Unable to find pom.xml");
MavenXpp3Reader xpp3Reader = new MavenXpp3Reader();
Model model = xpp3Reader.read(new FileInputStream(pomFile));
MavenProject project = new MavenProject(model);
Optional<Plugin> maybe = hasPlugin(project, ToolsConstants.IO_QUARKUS + ":" + ToolsConstants.QUARKUS_MAVEN_PLUGIN);
assertThat(maybe).isNotEmpty();
//Check if the properties have been set correctly
Properties properties = model.getProperties();
assertThat(properties.containsKey(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLATFORM_GROUP_ID_NAME)).isTrue();
assertThat(properties.containsKey(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLATFORM_ARTIFACT_ID_NAME)).isTrue();
assertThat(properties.containsKey(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLATFORM_VERSION_NAME)).isTrue();
assertThat(properties.containsKey(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLUGIN_VERSION_NAME)).isTrue();
// Check plugin is set
Plugin plugin = maybe.orElseThrow(() -> new AssertionError("Plugin expected"));
assertThat(plugin).isNotNull().satisfies(p -> {
assertThat(p.getArtifactId()).isEqualTo(ToolsConstants.QUARKUS_MAVEN_PLUGIN);
assertThat(p.getGroupId()).isEqualTo(ToolsConstants.IO_QUARKUS);
assertThat(p.getVersion()).isEqualTo(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLUGIN_VERSION_VALUE);
});
// Check build execution Configuration
assertThat(plugin.getExecutions()).hasSize(1).allSatisfy(execution -> {
assertThat(execution.getGoals()).containsExactly("build");
assertThat(execution.getConfiguration()).isNull();
});
// Check profile
assertThat(model.getProfiles()).hasSize(1);
Profile profile = model.getProfiles().get(0);
assertThat(profile.getId()).isEqualTo("native");
Plugin actual = profile.getBuild().getPluginsAsMap()
.get(ToolsConstants.IO_QUARKUS + ":" + ToolsConstants.QUARKUS_MAVEN_PLUGIN);
assertThat(actual).isNotNull();
assertThat(actual.getExecutions()).hasSize(1).allSatisfy(exec -> {
assertThat(exec.getGoals()).containsExactly("native-image");
assertThat(exec.getConfiguration()).isInstanceOf(Xpp3Dom.class)
.satisfies(o -> assertThat(o.toString()).contains("enableHttpUrlHandler"));
});
}
public static Optional<Plugin> hasPlugin(MavenProject project, String pluginKey) {
Optional<Plugin> optPlugin = project.getBuildPlugins().stream()
.filter(plugin -> pluginKey.equals(plugin.getKey()))
.findFirst();
if (!optPlugin.isPresent() && project.getPluginManagement() != null) {
optPlugin = project.getPluginManagement().getPlugins().stream()
.filter(plugin -> pluginKey.equals(plugin.getKey()))
.findFirst();
}
return optPlugin;
}
public static void verifySetupWithVersion(File pomFile) throws Exception {
MavenXpp3Reader xpp3Reader = new MavenXpp3Reader();
Model model = xpp3Reader.read(new FileInputStream(pomFile));
MavenProject project = new MavenProject(model);
Properties projectProps = project.getProperties();
assertNotNull(projectProps);
assertFalse(projectProps.isEmpty());
final String quarkusVersion = getPlatformDescriptor().getQuarkusCoreVersion();
assertEquals(quarkusVersion, projectProps.getProperty(MojoUtils.TEMPLATE_PROPERTY_QUARKUS_PLUGIN_VERSION_NAME));
}
private static ExtensionCatalog getPlatformDescriptor() throws Exception {
RegistryClientTestHelper.enableRegistryClientTestConfig();
try {
return QuarkusProjectHelper.getCatalogResolver().resolveExtensionCatalog();
} finally {
RegistryClientTestHelper.disableRegistryClientTestConfig();
}
}
}
|
apache-2.0
|
emmartins/wildfly-server-migration
|
servers/wildfly23.0/src/main/java/org/jboss/migration/wfly/task/subsystem/microprofile/WildFly23_0AddMicroprofileJwtSmallryeSubsystem.java
|
2097
|
/*
* Copyright 2021 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.migration.wfly.task.subsystem.microprofile;
import org.jboss.migration.core.jboss.JBossExtensionNames;
import org.jboss.migration.core.jboss.JBossSubsystemNames;
import org.jboss.migration.wfly10.config.management.ProfileResource;
import org.jboss.migration.wfly10.config.task.management.subsystem.AddSubsystemResourceSubtaskBuilder;
import org.jboss.migration.wfly10.config.task.management.subsystem.AddSubsystemResources;
/**
* @author emmartins
*/
public class WildFly23_0AddMicroprofileJwtSmallryeSubsystem<S> extends AddSubsystemResources<S> {
public WildFly23_0AddMicroprofileJwtSmallryeSubsystem() {
super(JBossExtensionNames.MICROPROFILE_JWT_SMALLRYE, new SubtaskBuilder<>());
// do not add subsystem config to "standalone-load-balancer.xml" config
skipPolicyBuilders(getSkipPolicyBuilder(),
buildParameters -> context -> buildParameters.getServerConfiguration().getConfigurationPath().getPath().endsWith("standalone-load-balancer.xml"));
}
static class SubtaskBuilder<S> extends AddSubsystemResourceSubtaskBuilder<S> {
SubtaskBuilder() {
super(JBossSubsystemNames.MICROPROFILE_JWT_SMALLRYE);
// do not add subsystem config to profile "load-balancer"
skipPolicyBuilder(buildParameters -> context -> buildParameters.getResource().getResourceType() == ProfileResource.RESOURCE_TYPE && buildParameters.getResource().getResourceName().equals("load-balancer"));
}
}
}
|
apache-2.0
|
baishuai/leetcode
|
algorithms/p321/321_test.go
|
565
|
package p321
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test0(t *testing.T) {
assert.Equal(t, []int{5, 4}, maxOneArray([]int{5, 2, 3, 4, 1}, 2))
assert.Equal(t, []int{6}, maxOneArray([]int{2, 4, 6, 5}, 1))
assert.Equal(t, []int{6, 5}, maxOneArray([]int{2, 4, 6, 5}, 2))
assert.Equal(t, []int{4, 6, 5}, maxOneArray([]int{2, 4, 6, 5}, 3))
assert.Equal(t, []int{2, 4, 6, 5}, maxOneArray([]int{2, 4, 6, 5}, 4))
}
func Test1(t *testing.T) {
assert.Equal(t, []int{9, 8, 6, 5, 3}, maxNumber([]int{3, 4, 6, 5}, []int{9, 1, 2, 5, 8, 3}, 5))
}
|
apache-2.0
|
lapcat/vienna-rss
|
Vienna/Sources/Fetching/RefreshManager.h
|
2093
|
//
// RefreshManager.h
// Vienna
//
// Created by Steve on 7/19/05.
// Copyright (c) 2004-2018 Steve Palmer and Vienna contributors (see menu item 'About Vienna' for list of contributors). All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
@import Foundation;
@class Database;
@class FeedCredentials;
@class Folder;
@interface RefreshManager : NSObject <NSURLSessionDelegate> {
NSUInteger countOfNewArticles;
NSMutableArray * authQueue;
FeedCredentials * credentialsController;
BOOL hasStarted;
NSString * statusMessageDuringRefresh;
NSOperationQueue *networkQueue;
dispatch_queue_t _queue;
NSURLSession * session;
}
+(RefreshManager *)sharedManager;
@property (readonly, copy) NSString *statusMessage;
@property (nonatomic, getter=isConnecting, readonly) BOOL connecting;
@property (nonatomic, readonly) NSUInteger countOfNewArticles;
-(void)refreshFolderIconCacheForSubscriptions:(NSArray *)foldersArray;
-(void)refreshSubscriptions:(NSArray *)foldersArray ignoringSubscriptionStatus:(BOOL)ignoreSubStatus;
-(void)forceRefreshSubscriptionForFolders:(NSArray*)foldersArray;
-(void)cancelAll;
-(void)refreshFavIconForFolder:(Folder *)folder;
-(NSOperation *)addConnection:(NSURLRequest *)conn completionHandler:(void (^)(NSData *data, NSURLResponse *response, NSError *error))completionHandler;
-(void)suspendConnectionsQueue;
-(void)resumeConnectionsQueue;
@end
// Refresh types
typedef NS_ENUM(int, RefreshTypes) {
MA_Refresh_NilType = -1,
MA_Refresh_Feed,
MA_Refresh_FavIcon,
MA_Refresh_GoogleFeed,
MA_ForceRefresh_Google_Feed
};
|
apache-2.0
|
salv-orlando/MyRepo
|
nova/tests/api/openstack/contrib/test_volume_types.py
|
5886
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import stubout
import webob
from nova import exception
from nova import context
from nova import test
from nova import log as logging
from nova.api.openstack.contrib import volumetypes
from nova.volume import volume_types
from nova.tests.api.openstack import fakes
LOG = logging.getLogger('nova.tests.api.openstack.test_volume_types')
last_param = {}
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs)
def return_volume_types_get_all_types(context):
return dict(vol_type_1=stub_volume_type(1),
vol_type_2=stub_volume_type(2),
vol_type_3=stub_volume_type(3))
def return_empty_volume_types_get_all_types(context):
return {}
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(int(id))
def return_volume_types_destroy(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
pass
def return_volume_types_create(context, name, specs):
pass
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
class VolumeTypesApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.controller = volumetypes.VolumeTypesController()
def tearDown(self):
self.stubs.UnsetAll()
super(VolumeTypesApiTest, self).tearDown()
def test_volume_types_index(self):
self.stubs.Set(volume_types, 'get_all_types',
return_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict))
for name in ['vol_type_1', 'vol_type_2', 'vol_type_3']:
self.assertEqual(name, res_dict[name]['name'])
self.assertEqual('value1', res_dict[name]['extra_specs']['key1'])
def test_volume_types_index_no_data(self):
self.stubs.Set(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict))
def test_volume_types_show(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
def test_volume_types_delete(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/1')
self.controller.delete(req, 1)
def test_volume_types_delete_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
self.stubs.Set(volume_types, 'destroy',
return_volume_types_destroy)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, '777')
def test_create(self):
self.stubs.Set(volume_types, 'create',
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
body = {"volume_type": {"name": "vol_type_1",
"extra_specs": {"key1": "value1"}}}
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
res_dict = self.controller.create(req, body)
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
def test_create_empty_body(self):
self.stubs.Set(volume_types, 'create',
return_volume_types_create)
self.stubs.Set(volume_types, 'get_volume_type_by_name',
return_volume_types_get_by_name)
req = fakes.HTTPRequest.blank('/v1.1/123/os-volume-types')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, '')
|
apache-2.0
|
GPflow/GPflow
|
tests/gpflow/conditionals/test_multioutput.py
|
31704
|
from typing import Callable, List, Sequence, Tuple
import numpy as np
import pytest
import scipy
import tensorflow as tf
from _pytest.fixtures import SubRequest
import gpflow
import gpflow.inducing_variables.multioutput as mf
import gpflow.kernels.multioutput as mk
from gpflow import set_trainable
from gpflow.base import AnyNDArray, RegressionData
from gpflow.conditionals import sample_conditional
from gpflow.conditionals.util import (
fully_correlated_conditional,
fully_correlated_conditional_repeat,
independent_interdomain_conditional,
sample_mvn,
)
from gpflow.config import default_float, default_jitter
from gpflow.inducing_variables import InducingPoints
from gpflow.kernels import SquaredExponential
from gpflow.likelihoods import Gaussian
from gpflow.models import SVGP
float_type = default_float()
rng = np.random.RandomState(99201)
# ------------------------------------------
# Helpers
# ------------------------------------------
def predict_all(
models: Sequence[SVGP], Xnew: tf.Tensor, full_cov: bool, full_output_cov: bool
) -> Tuple[List[tf.Tensor], List[tf.Tensor]]:
"""
Returns the mean and variance of f(Xnew) for each model in `models`.
"""
ms, vs = [], []
for model in models:
m, v = model.predict_f(Xnew, full_cov=full_cov, full_output_cov=full_output_cov)
ms.append(m)
vs.append(v)
return ms, vs
def assert_all_array_elements_almost_equal(arr: Sequence[tf.Tensor]) -> None:
"""
Check if consecutive elements of `arr` are almost equal.
"""
for i in range(len(arr) - 1):
np.testing.assert_allclose(arr[i], arr[i + 1], atol=1e-5)
def check_equality_predictions(
data: RegressionData, models: Sequence[SVGP], decimal: int = 3
) -> None:
"""
Executes a couple of checks to compare the equality of predictions
of different models. The models should be configured with the same
training data (X, Y). The following checks are done:
- check if elbo is (almost) equal for all models
- check if predicted mean is (almost) equal
- check if predicted variance is (almost) equal.
All possible variances over the inputs and outputs are calculated
and equality is checked.
- check if variances within model are consistent. Parts of the covariance
matrices should overlap, and this is tested.
"""
elbos = [m.elbo(data) for m in models]
# Check equality of log likelihood
assert_all_array_elements_almost_equal(elbos)
# Predict: full_cov = True and full_output_cov = True
means_tt, vars_tt = predict_all(models, Data.Xs, full_cov=True, full_output_cov=True)
# Predict: full_cov = True and full_output_cov = False
means_tf, vars_tf = predict_all(models, Data.Xs, full_cov=True, full_output_cov=False)
# Predict: full_cov = False and full_output_cov = True
means_ft, vars_ft = predict_all(models, Data.Xs, full_cov=False, full_output_cov=True)
# Predict: full_cov = False and full_output_cov = False
means_ff, vars_ff = predict_all(models, Data.Xs, full_cov=False, full_output_cov=False)
# check equality of all the means
all_means = means_tt + means_tf + means_ft + means_ff
assert_all_array_elements_almost_equal(all_means)
# check equality of all the variances within a category
# (e.g. full_cov=True and full_output_cov=False)
for var in [vars_tt, vars_tf, vars_ft, vars_ff]:
assert_all_array_elements_almost_equal(var)
# Here we check that the variance in different categories are equal
# after transforming to the right shape.
var_tt = vars_tt[0] # N x P x N x P
var_tf = vars_tf[0] # P x N x c
var_ft = vars_ft[0] # N x P x P
var_ff = vars_ff[0] # N x P
np.testing.assert_almost_equal(
np.diagonal(var_tt, axis1=1, axis2=3),
np.transpose(var_tf, [1, 2, 0]),
decimal=decimal,
)
np.testing.assert_almost_equal(
np.diagonal(var_tt, axis1=0, axis2=2),
np.transpose(var_ft, [1, 2, 0]),
decimal=decimal,
)
np.testing.assert_almost_equal(
np.diagonal(np.diagonal(var_tt, axis1=0, axis2=2)), var_ff, decimal=decimal
)
def expand_cov(q_sqrt: tf.Tensor, W: tf.Tensor) -> tf.Tensor:
"""
:param G: cholesky of covariance matrices, L x M x M
:param W: mixing matrix (square), L x L
:return: cholesky of 1 x LM x LM covariance matrix
"""
q_cov = np.matmul(q_sqrt, q_sqrt.transpose([0, 2, 1])) # [L, M, M]
q_cov_expanded = scipy.linalg.block_diag(*q_cov) # [LM, LM]
q_sqrt_expanded = np.linalg.cholesky(q_cov_expanded) # [LM, LM]
return q_sqrt_expanded[None, ...]
def create_q_sqrt(M: int, L: int) -> AnyNDArray:
""" returns an array of L lower triangular matrices of size M x M """
return np.array([np.tril(rng.randn(M, M)) for _ in range(L)]) # [L, M, M]
# ------------------------------------------
# Data classes: storing constants
# ------------------------------------------
class Data:
N, Ntest = 20, 5
D = 1 # input dimension
M = 3 # inducing points
L = 2 # latent gps
P = 3 # output dimension
MAXITER = int(15e2)
X = tf.random.normal((N,), dtype=tf.float64)[:, None] * 10 - 5
G = np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X))
Ptrue = np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]]) # [L, P]
Y = tf.convert_to_tensor(G @ Ptrue)
G = tf.convert_to_tensor(np.hstack((0.5 * np.sin(3 * X) + X, 3.0 * np.cos(X) - X)))
Ptrue = tf.convert_to_tensor(np.array([[0.5, -0.3, 1.5], [-0.4, 0.43, 0.0]])) # [L, P]
Y += tf.random.normal(Y.shape, dtype=tf.float64) * [0.2, 0.2, 0.2]
Xs = tf.convert_to_tensor(np.linspace(-6, 6, Ntest)[:, None])
data = (X, Y)
class DataMixedKernelWithEye(Data):
""" Note in this class L == P """
M, L = 4, 3
W = np.eye(L)
G = np.hstack(
[0.5 * np.sin(3 * Data.X) + Data.X, 3.0 * np.cos(Data.X) - Data.X, 1.0 + Data.X]
) # [N, P]
mu_data = tf.random.uniform((M, L), dtype=tf.float64) # [M, L]
sqrt_data = create_q_sqrt(M, L) # [L, M, M]
mu_data_full = tf.reshape(mu_data @ W, [-1, 1]) # [L, 1]
sqrt_data_full = expand_cov(sqrt_data, W) # [1, LM, LM]
Y = tf.convert_to_tensor(G @ W)
G = tf.convert_to_tensor(G)
W = tf.convert_to_tensor(W)
sqrt_data = tf.convert_to_tensor(sqrt_data)
sqrt_data_full = tf.convert_to_tensor(sqrt_data_full)
Y += tf.random.normal(Y.shape, dtype=tf.float64) * tf.ones((L,), dtype=tf.float64) * 0.2
data = (Data.X, Y)
class DataMixedKernel(Data):
M = 5
L = 2
P = 3
W = rng.randn(P, L)
G = np.hstack([0.5 * np.sin(3 * Data.X) + Data.X, 3.0 * np.cos(Data.X) - Data.X]) # [N, L]
mu_data = tf.random.normal((M, L), dtype=tf.float64) # [M, L]
sqrt_data = create_q_sqrt(M, L) # [L, M, M]
Y = tf.convert_to_tensor(G @ W.T)
G = tf.convert_to_tensor(G)
W = tf.convert_to_tensor(W)
sqrt_data = tf.convert_to_tensor(sqrt_data)
Y += tf.random.normal(Y.shape, dtype=tf.float64) * tf.ones((P,), dtype=tf.float64) * 0.1
data = (Data.X, Y)
# ------------------------------------------
# Test sample conditional
# ------------------------------------------
def test_sample_mvn(full_cov: bool) -> None:
"""
Draws 10,000 samples from a distribution
with known mean and covariance. The test checks
if the mean and covariance of the samples is
close to the true mean and covariance.
"""
N, D = 10000, 2
means = tf.ones((N, D), dtype=float_type)
if full_cov:
covs = tf.eye(D, batch_shape=[N], dtype=float_type)
else:
covs = tf.ones((N, D), dtype=float_type)
samples = sample_mvn(means, covs, full_cov)
samples_mean = np.mean(samples, axis=0)
samples_cov = np.cov(samples, rowvar=False)
np.testing.assert_array_almost_equal(samples_mean, [1.0, 1.0], decimal=1)
np.testing.assert_array_almost_equal(samples_cov, [[1.0, 0.0], [0.0, 1.0]], decimal=1)
def test_sample_conditional(whiten: bool, full_cov: bool, full_output_cov: bool) -> None:
if full_cov and full_output_cov:
return
q_mu = tf.random.uniform((Data.M, Data.P), dtype=tf.float64) # [M, P]
q_sqrt = tf.convert_to_tensor(
[np.tril(tf.random.uniform((Data.M, Data.M), dtype=tf.float64)) for _ in range(Data.P)]
) # [P, M, M]
Z = Data.X[: Data.M, ...] # [M, D]
Xs: AnyNDArray = np.ones((Data.N, Data.D), dtype=float_type)
inducing_variable = InducingPoints(Z)
kernel = SquaredExponential()
# Path 1
value_f, mean_f, var_f = sample_conditional(
Xs,
inducing_variable,
kernel,
q_mu,
q_sqrt=q_sqrt,
white=whiten,
full_cov=full_cov,
full_output_cov=full_output_cov,
num_samples=int(1e5),
)
value_f = value_f.numpy().reshape((-1,) + value_f.numpy().shape[2:])
# Path 2
if full_output_cov:
pytest.skip(
"sample_conditional with X instead of inducing_variable does not support full_output_cov"
)
value_x, mean_x, var_x = sample_conditional(
Xs,
Z,
kernel,
q_mu,
q_sqrt=q_sqrt,
white=whiten,
full_cov=full_cov,
full_output_cov=full_output_cov,
num_samples=int(1e5),
)
value_x = value_x.numpy().reshape((-1,) + value_x.numpy().shape[2:])
# check if mean and covariance of samples are similar
np.testing.assert_array_almost_equal(
np.mean(value_x, axis=0), np.mean(value_f, axis=0), decimal=1
)
np.testing.assert_array_almost_equal(
np.cov(value_x, rowvar=False), np.cov(value_f, rowvar=False), decimal=1
)
np.testing.assert_allclose(mean_x, mean_f)
np.testing.assert_allclose(var_x, var_f)
def test_sample_conditional_mixedkernel() -> None:
q_mu = tf.random.uniform((Data.M, Data.L), dtype=tf.float64) # M x L
q_sqrt = tf.convert_to_tensor(
[np.tril(tf.random.uniform((Data.M, Data.M), dtype=tf.float64)) for _ in range(Data.L)]
) # L x M x M
Z = Data.X[: Data.M, ...] # M x D
N = int(10e5)
Xs: AnyNDArray = np.ones((N, Data.D), dtype=float_type)
# Path 1: mixed kernel: most efficient route
W = np.random.randn(Data.P, Data.L)
mixed_kernel = mk.LinearCoregionalization([SquaredExponential() for _ in range(Data.L)], W)
optimal_inducing_variable = mf.SharedIndependentInducingVariables(InducingPoints(Z))
value, mean, var = sample_conditional(
Xs, optimal_inducing_variable, mixed_kernel, q_mu, q_sqrt=q_sqrt, white=True
)
# Path 2: independent kernels, mixed later
separate_kernel = mk.SeparateIndependent([SquaredExponential() for _ in range(Data.L)])
fallback_inducing_variable = mf.SharedIndependentInducingVariables(InducingPoints(Z))
value2, mean2, var2 = sample_conditional(
Xs, fallback_inducing_variable, separate_kernel, q_mu, q_sqrt=q_sqrt, white=True
)
value2 = np.matmul(value2, W.T)
# check if mean and covariance of samples are similar
np.testing.assert_array_almost_equal(np.mean(value, axis=0), np.mean(value2, axis=0), decimal=1)
np.testing.assert_array_almost_equal(
np.cov(value, rowvar=False), np.cov(value2, rowvar=False), decimal=1
)
QSqrtFactory = Callable[[tf.Tensor, int], tf.Tensor]
@pytest.fixture(
name="fully_correlated_q_sqrt_factory",
params=[lambda _, __: None, lambda LM, R: tf.eye(LM, batch_shape=(R,))],
)
def _q_sqrt_factory_fixture(request: SubRequest) -> QSqrtFactory:
return request.param
@pytest.mark.parametrize("R", [1, 2, 5])
def test_fully_correlated_conditional_repeat_shapes_fc_and_foc(
R: int,
fully_correlated_q_sqrt_factory: QSqrtFactory,
full_cov: bool,
full_output_cov: bool,
whiten: bool,
) -> None:
L, M, N, P = Data.L, Data.M, Data.N, Data.P
Kmm = tf.ones((L * M, L * M)) + default_jitter() * tf.eye(L * M)
Kmn = tf.ones((L * M, N, P))
if full_cov and full_output_cov:
Knn = tf.ones((N, P, N, P))
expected_v_shape = [R, N, P, N, P]
elif not full_cov and full_output_cov:
Knn = tf.ones((N, P, P))
expected_v_shape = [R, N, P, P]
elif full_cov and not full_output_cov:
Knn = tf.ones((P, N, N))
expected_v_shape = [R, P, N, N]
else:
Knn = tf.ones((N, P))
expected_v_shape = [R, N, P]
f = tf.ones((L * M, R))
q_sqrt = fully_correlated_q_sqrt_factory(L * M, R)
m, v = fully_correlated_conditional_repeat(
Kmn,
Kmm,
Knn,
f,
full_cov=full_cov,
full_output_cov=full_output_cov,
q_sqrt=q_sqrt,
white=whiten,
)
assert m.shape.as_list() == [R, N, P]
assert v.shape.as_list() == expected_v_shape
def test_fully_correlated_conditional_repeat_whiten(whiten: bool) -> None:
"""
This test checks the effect of the `white` flag, which changes the projection matrix `A`.
The impact of the flag on the value of `A` can be easily verified by its effect on the
predicted mean. While the predicted covariance is also a function of `A` this test does not
inspect that value.
"""
N, P = Data.N, Data.P
Lm = np.random.randn(1, 1).astype(np.float32) ** 2
Kmm = Lm * Lm + default_jitter()
Kmn = tf.ones((1, N, P))
Knn = tf.ones((N, P))
f = np.random.randn(1, 1).astype(np.float32)
mean, _ = fully_correlated_conditional_repeat(
Kmn,
Kmm,
Knn,
f,
white=whiten,
)
if whiten:
expected_mean = (f * Kmn) / Lm
else:
expected_mean = (f * Kmn) / Kmm
np.testing.assert_allclose(mean, expected_mean, rtol=1e-3)
def test_fully_correlated_conditional_shapes_fc_and_foc(
fully_correlated_q_sqrt_factory: QSqrtFactory,
full_cov: bool,
full_output_cov: bool,
whiten: bool,
) -> None:
L, M, N, P = Data.L, Data.M, Data.N, Data.P
Kmm = tf.ones((L * M, L * M)) + default_jitter() * tf.eye(L * M)
Kmn = tf.ones((L * M, N, P))
if full_cov and full_output_cov:
Knn = tf.ones((N, P, N, P))
expected_v_shape = [N, P, N, P]
elif not full_cov and full_output_cov:
Knn = tf.ones((N, P, P))
expected_v_shape = [N, P, P]
elif full_cov and not full_output_cov:
Knn = tf.ones((P, N, N))
expected_v_shape = [P, N, N]
else:
Knn = tf.ones((N, P))
expected_v_shape = [N, P]
f = tf.ones((L * M, 1))
q_sqrt = fully_correlated_q_sqrt_factory(L * M, 1)
m, v = fully_correlated_conditional(
Kmn,
Kmm,
Knn,
f,
full_cov=full_cov,
full_output_cov=full_output_cov,
q_sqrt=q_sqrt,
white=whiten,
)
assert m.shape.as_list() == [N, P]
assert v.shape.as_list() == expected_v_shape
# ------------------------------------------
# Test Mok Output Dims
# ------------------------------------------
def test_shapes_of_mok() -> None:
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
k1 = mk.LinearCoregionalization(kern_list, W=data.W)
assert k1.num_latent_gps == data.L
k2 = mk.SeparateIndependent(kern_list)
assert k2.num_latent_gps == data.L
dims = 5
k3 = mk.SharedIndependent(SquaredExponential(), dims)
assert k3.num_latent_gps == dims
# ------------------------------------------
# Test Mixed Mok Kgg
# ------------------------------------------
def test_MixedMok_Kgg() -> None:
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
kernel = mk.LinearCoregionalization(kern_list, W=data.W)
Kgg = kernel.Kgg(Data.X, Data.X) # L x N x N
Kff = kernel.K(Data.X, Data.X) # N x P x N x P
# Kff = W @ Kgg @ W^T
Kff_infered = np.einsum("lnm,pl,ql->npmq", Kgg, data.W, data.W)
np.testing.assert_array_almost_equal(Kff, Kff_infered, decimal=5)
# ------------------------------------------
# Integration tests
# ------------------------------------------
def test_shared_independent_mok() -> None:
"""
In this test we use the same kernel and the same inducing inducing
for each of the outputs. The outputs are considered to be uncorrelated.
This is how GPflow handled multiple outputs before the multioutput framework was added.
We compare three models here:
1) an ineffient one, where we use a SharedIndepedentMok with InducingPoints.
This combination will uses a Kff of size N x P x N x P, Kfu if size N x P x M x P
which is extremely inefficient as most of the elements are zero.
2) efficient: SharedIndependentMok and SharedIndependentMof
This combinations uses the most efficient form of matrices
3) the old way, efficient way: using Kernel and InducingPoints
Model 2) and 3) follow more or less the same code path.
"""
np.random.seed(0)
# Model 1
q_mu_1 = np.random.randn(Data.M * Data.P, 1) # MP x 1
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kernel_1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P)
inducing_variable = InducingPoints(Data.X[: Data.M, ...])
model_1 = SVGP(
kernel_1,
Gaussian(),
inducing_variable,
q_mu=q_mu_1,
q_sqrt=q_sqrt_1,
num_latent_gps=Data.Y.shape[-1],
)
set_trainable(model_1, False)
set_trainable(model_1.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_1.training_loss_closure(Data.data),
variables=model_1.trainable_variables,
options=dict(maxiter=500),
method="BFGS",
compile=True,
)
# Model 2
q_mu_2 = np.reshape(q_mu_1, [Data.M, Data.P]) # M x P
q_sqrt_2 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kernel_2 = SquaredExponential(variance=0.5, lengthscales=1.2)
inducing_variable_2 = InducingPoints(Data.X[: Data.M, ...])
model_2 = SVGP(
kernel_2,
Gaussian(),
inducing_variable_2,
num_latent_gps=Data.P,
q_mu=q_mu_2,
q_sqrt=q_sqrt_2,
)
set_trainable(model_2, False)
set_trainable(model_2.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_2.training_loss_closure(Data.data),
variables=model_2.trainable_variables,
options=dict(maxiter=500),
method="BFGS",
compile=True,
)
# Model 3
q_mu_3 = np.reshape(q_mu_1, [Data.M, Data.P]) # M x P
q_sqrt_3 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kernel_3 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P)
inducing_variable_3 = mf.SharedIndependentInducingVariables(
InducingPoints(Data.X[: Data.M, ...])
)
model_3 = SVGP(
kernel_3,
Gaussian(),
inducing_variable_3,
num_latent_gps=Data.P,
q_mu=q_mu_3,
q_sqrt=q_sqrt_3,
)
set_trainable(model_3, False)
set_trainable(model_3.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_3.training_loss_closure(Data.data),
variables=model_3.trainable_variables,
options=dict(maxiter=500),
method="BFGS",
compile=True,
)
check_equality_predictions(Data.data, [model_1, model_2, model_3])
def test_separate_independent_mok() -> None:
"""
We use different independent kernels for each of the output dimensions.
We can achieve this in two ways:
1) efficient: SeparateIndependentMok with Shared/SeparateIndependentMof
2) inefficient: SeparateIndependentMok with InducingPoints
However, both methods should return the same conditional,
and after optimization return the same log likelihood.
"""
# Model 1 (Inefficient)
q_mu_1 = np.random.randn(Data.M * Data.P, 1)
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kern_list_1 = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_1 = mk.SeparateIndependent(kern_list_1)
inducing_variable_1 = InducingPoints(Data.X[: Data.M, ...])
model_1 = SVGP(
kernel_1,
Gaussian(),
inducing_variable_1,
num_latent_gps=1,
q_mu=q_mu_1,
q_sqrt=q_sqrt_1,
)
set_trainable(model_1, False)
set_trainable(model_1.q_sqrt, True)
set_trainable(model_1.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_1.training_loss_closure(Data.data),
variables=model_1.trainable_variables,
method="BFGS",
compile=True,
)
# Model 2 (efficient)
q_mu_2 = np.random.randn(Data.M, Data.P)
q_sqrt_2 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kern_list_2 = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_2 = mk.SeparateIndependent(kern_list_2)
inducing_variable_2 = mf.SharedIndependentInducingVariables(
InducingPoints(Data.X[: Data.M, ...])
)
model_2 = SVGP(
kernel_2,
Gaussian(),
inducing_variable_2,
num_latent_gps=Data.P,
q_mu=q_mu_2,
q_sqrt=q_sqrt_2,
)
set_trainable(model_2, False)
set_trainable(model_2.q_sqrt, True)
set_trainable(model_2.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_2.training_loss_closure(Data.data),
variables=model_2.trainable_variables,
method="BFGS",
compile=True,
)
check_equality_predictions(Data.data, [model_1, model_2])
def test_separate_independent_mof() -> None:
"""
Same test as above but we use different (i.e. separate) inducing inducing
for each of the output dimensions.
"""
np.random.seed(0)
# Model 1 (INefficient)
q_mu_1 = np.random.randn(Data.M * Data.P, 1)
q_sqrt_1 = np.tril(np.random.randn(Data.M * Data.P, Data.M * Data.P))[None, ...] # 1 x MP x MP
kernel_1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P)
inducing_variable_1 = InducingPoints(Data.X[: Data.M, ...])
model_1 = SVGP(kernel_1, Gaussian(), inducing_variable_1, q_mu=q_mu_1, q_sqrt=q_sqrt_1)
set_trainable(model_1, False)
set_trainable(model_1.q_sqrt, True)
set_trainable(model_1.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_1.training_loss_closure(Data.data),
variables=model_1.trainable_variables,
method="BFGS",
compile=True,
)
# Model 2 (efficient)
q_mu_2 = np.random.randn(Data.M, Data.P)
q_sqrt_2 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kernel_2 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), Data.P)
inducing_variable_list_2 = [InducingPoints(Data.X[: Data.M, ...]) for _ in range(Data.P)]
inducing_variable_2 = mf.SeparateIndependentInducingVariables(inducing_variable_list_2)
model_2 = SVGP(kernel_2, Gaussian(), inducing_variable_2, q_mu=q_mu_2, q_sqrt=q_sqrt_2)
set_trainable(model_2, False)
set_trainable(model_2.q_sqrt, True)
set_trainable(model_2.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_2.training_loss_closure(Data.data),
variables=model_2.trainable_variables,
method="BFGS",
compile=True,
)
# Model 3 (Inefficient): an idenitical inducing variable is used P times,
# and treated as a separate one.
q_mu_3 = np.random.randn(Data.M, Data.P)
q_sqrt_3 = np.array(
[np.tril(np.random.randn(Data.M, Data.M)) for _ in range(Data.P)]
) # P x M x M
kern_list = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(Data.P)]
kernel_3 = mk.SeparateIndependent(kern_list)
inducing_variable_list_3 = [InducingPoints(Data.X[: Data.M, ...]) for _ in range(Data.P)]
inducing_variable_3 = mf.SeparateIndependentInducingVariables(inducing_variable_list_3)
model_3 = SVGP(kernel_3, Gaussian(), inducing_variable_3, q_mu=q_mu_3, q_sqrt=q_sqrt_3)
set_trainable(model_3, False)
set_trainable(model_3.q_sqrt, True)
set_trainable(model_3.q_mu, True)
gpflow.optimizers.Scipy().minimize(
model_3.training_loss_closure(Data.data),
variables=model_3.trainable_variables,
method="BFGS",
compile=True,
)
check_equality_predictions(Data.data, [model_1, model_2, model_3])
def test_mixed_mok_with_Id_vs_independent_mok() -> None:
data = DataMixedKernelWithEye
# Independent model
k1 = mk.SharedIndependent(SquaredExponential(variance=0.5, lengthscales=1.2), data.L)
f1 = InducingPoints(data.X[: data.M, ...])
model_1 = SVGP(k1, Gaussian(), f1, q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full)
set_trainable(model_1, False)
set_trainable(model_1.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_1.training_loss_closure(Data.data),
variables=model_1.trainable_variables,
method="BFGS",
compile=True,
)
# Mixed Model
kern_list = [SquaredExponential(variance=0.5, lengthscales=1.2) for _ in range(data.L)]
k2 = mk.LinearCoregionalization(kern_list, data.W)
f2 = InducingPoints(data.X[: data.M, ...])
model_2 = SVGP(k2, Gaussian(), f2, q_mu=data.mu_data_full, q_sqrt=data.sqrt_data_full)
set_trainable(model_2, False)
set_trainable(model_2.q_sqrt, True)
gpflow.optimizers.Scipy().minimize(
model_2.training_loss_closure(Data.data),
variables=model_2.trainable_variables,
method="BFGS",
compile=True,
)
check_equality_predictions(Data.data, [model_1, model_2])
def test_compare_mixed_kernel() -> None:
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
k1 = mk.LinearCoregionalization(kern_list, W=data.W)
f1 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...]))
model_1 = SVGP(k1, Gaussian(), inducing_variable=f1, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
kern_list = [SquaredExponential() for _ in range(data.L)]
k2 = mk.LinearCoregionalization(kern_list, W=data.W)
f2 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...]))
model_2 = SVGP(k2, Gaussian(), inducing_variable=f2, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
check_equality_predictions(Data.data, [model_1, model_2])
def test_multioutput_with_diag_q_sqrt() -> None:
data = DataMixedKernel
q_sqrt_diag = np.ones((data.M, data.L)) * 2
q_sqrt = np.repeat(np.eye(data.M)[None, ...], data.L, axis=0) * 2 # L x M x M
kern_list = [SquaredExponential() for _ in range(data.L)]
k1 = mk.LinearCoregionalization(kern_list, W=data.W)
f1 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...]))
model_1 = SVGP(
k1,
Gaussian(),
inducing_variable=f1,
q_mu=data.mu_data,
q_sqrt=q_sqrt_diag,
q_diag=True,
)
kern_list = [SquaredExponential() for _ in range(data.L)]
k2 = mk.LinearCoregionalization(kern_list, W=data.W)
f2 = mf.SharedIndependentInducingVariables(InducingPoints(data.X[: data.M, ...]))
model_2 = SVGP(
k2,
Gaussian(),
inducing_variable=f2,
q_mu=data.mu_data,
q_sqrt=q_sqrt,
q_diag=False,
)
check_equality_predictions(Data.data, [model_1, model_2])
def test_MixedKernelSeparateMof() -> None:
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)]
k1 = mk.LinearCoregionalization(kern_list, W=data.W)
f1 = mf.SeparateIndependentInducingVariables(inducing_variable_list)
model_1 = SVGP(k1, Gaussian(), inducing_variable=f1, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
kern_list = [SquaredExponential() for _ in range(data.L)]
inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)]
k2 = mk.LinearCoregionalization(kern_list, W=data.W)
f2 = mf.SeparateIndependentInducingVariables(inducing_variable_list)
model_2 = SVGP(k2, Gaussian(), inducing_variable=f2, q_mu=data.mu_data, q_sqrt=data.sqrt_data)
check_equality_predictions(Data.data, [model_1, model_2])
def test_separate_independent_conditional_with_q_sqrt_none() -> None:
"""
In response to bug #1523, this test checks that separate_independent_condtional
does not fail when q_sqrt=None.
"""
q_sqrt = None
data = DataMixedKernel
kern_list = [SquaredExponential() for _ in range(data.L)]
kernel = gpflow.kernels.SeparateIndependent(kern_list)
inducing_variable_list = [InducingPoints(data.X[: data.M, ...]) for _ in range(data.L)]
inducing_variable = mf.SeparateIndependentInducingVariables(inducing_variable_list)
mu_1, var_1 = gpflow.conditionals.conditional(
data.X,
inducing_variable,
kernel,
data.mu_data,
full_cov=False,
full_output_cov=False,
q_sqrt=q_sqrt,
white=True,
)
def test_independent_interdomain_conditional_bug_regression() -> None:
"""
Regression test for https://github.com/GPflow/GPflow/issues/818
Not an exhaustive test
"""
M = 31
N = 11
D_lat = 5
D_inp = D_lat * 7
L = 2
P = 3
X = np.random.randn(N, D_inp)
Zs = [np.random.randn(M, D_lat) for _ in range(L)]
k = gpflow.kernels.SquaredExponential(lengthscales=np.ones(D_lat))
def compute_Kmn(Z: tf.Tensor, X: tf.Tensor) -> tf.Tensor:
return tf.stack([k(Z, X[:, i * D_lat : (i + 1) * D_lat]) for i in range(P)])
def compute_Knn(X: tf.Tensor) -> tf.Tensor:
return tf.stack([k(X[:, i * D_lat : (i + 1) * D_lat], full_cov=False) for i in range(P)])
Kmm = tf.stack([k(Z) for Z in Zs]) # L x M x M
Kmn = tf.stack([compute_Kmn(Z, X) for Z in Zs]) # L x P x M x N
Kmn = tf.transpose(Kmn, [2, 0, 3, 1]) # -> M x L x N x P
Knn = tf.transpose(compute_Knn(X)) # N x P
q_mu = tf.convert_to_tensor(np.zeros((M, L)))
q_sqrt = tf.convert_to_tensor(np.stack([np.eye(M) for _ in range(L)]))
tf.debugging.assert_shapes(
[
(Kmm, ["L", "M", "M"]),
(Kmn, ["M", "L", "N", "P"]),
(Knn, ["N", "P"]),
(q_mu, ["M", "L"]),
(q_sqrt, ["L", "M", "M"]),
]
)
_, _ = independent_interdomain_conditional(
Kmn, Kmm, Knn, q_mu, q_sqrt=q_sqrt, full_cov=False, full_output_cov=False
)
def test_independent_interdomain_conditional_whiten(whiten: bool) -> None:
"""
This test checks the effect of the `white` flag, which changes the projection matrix `A`.
The impact of the flag on the value of `A` can be easily verified by its effect on the
predicted mean. While the predicted covariance is also a function of `A` this test does not
inspect that value.
"""
N, P = Data.N, Data.P
Lm = np.random.randn(1, 1, 1).astype(np.float32) ** 2
Kmm = Lm * Lm + default_jitter()
Kmn = tf.ones((1, 1, N, P))
Knn = tf.ones((N, P))
f = np.random.randn(1, 1).astype(np.float32)
mean, _ = independent_interdomain_conditional(
Kmn,
Kmm,
Knn,
f,
white=whiten,
)
if whiten:
expected_mean = (f * Kmn) / Lm
else:
expected_mean = (f * Kmn) / Kmm
np.testing.assert_allclose(mean, expected_mean[0][0], rtol=1e-2)
|
apache-2.0
|
googleads/google-ads-perl
|
lib/Google/Ads/GoogleAds/V8/Services/ConversionCustomVariableService/MutateConversionCustomVariablesResponse.pm
|
1150
|
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V8::Services::ConversionCustomVariableService::MutateConversionCustomVariablesResponse;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
partialFailureError => $args->{partialFailureError},
results => $args->{results}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
|
apache-2.0
|
mongodb/mongo-ruby-driver
|
lib/mongo/operation/aggregate/op_msg.rb
|
983
|
# frozen_string_literal: true
# encoding: utf-8
# Copyright (C) 2018-2020 MongoDB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
module Mongo
module Operation
class Aggregate
# A MongoDB aggregate operation sent as an op message.
#
# @api private
#
# @since 2.5.2
class OpMsg < OpMsgBase
include CausalConsistencySupported
include ExecutableTransactionLabel
include PolymorphicResult
end
end
end
end
|
apache-2.0
|
emag/codereading-undertow
|
core/src/test/java/io/undertow/server/WriteTimeoutTestCase.java
|
4935
|
/*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.undertow.server;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.channels.Channel;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import io.undertow.testutils.AjpIgnore;
import io.undertow.testutils.DefaultServer;
import io.undertow.testutils.SpdyIgnore;
import io.undertow.testutils.TestHttpClient;
import org.apache.http.HttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.xnio.ChannelListener;
import org.xnio.Options;
import org.xnio.channels.StreamSinkChannel;
import org.xnio.channels.WriteTimeoutException;
/**
* Tests read timeout with a client that is slow to read the response
*
* @author Stuart Douglas
*/
@RunWith(DefaultServer.class)
@AjpIgnore
@SpdyIgnore
@Ignore("This test fails intermittently")
public class WriteTimeoutTestCase {
private volatile Exception exception;
private static final CountDownLatch errorLatch = new CountDownLatch(1);
@Test
public void testWriteTimeout() throws IOException, InterruptedException {
DefaultServer.setRootHandler(new HttpHandler() {
@Override
public void handleRequest(final HttpServerExchange exchange) throws Exception {
final StreamSinkChannel response = exchange.getResponseChannel();
try {
response.setOption(Options.WRITE_TIMEOUT, 10);
} catch (IOException e) {
throw new RuntimeException(e);
}
final int capacity = 1 * 1024 * 1024; //1mb
final ByteBuffer originalBuffer = ByteBuffer.allocateDirect(capacity);
for (int i = 0; i < capacity; ++i) {
originalBuffer.put((byte) '*');
}
originalBuffer.flip();
response.getWriteSetter().set(new ChannelListener<Channel>() {
private ByteBuffer buffer = originalBuffer.duplicate();
int count = 0;
@Override
public void handleEvent(final Channel channel) {
do {
try {
int res = response.write(buffer);
if (res == 0) {
return;
}
} catch (IOException e) {
exception = e;
errorLatch.countDown();
}
if(!buffer.hasRemaining()) {
count++;
buffer = originalBuffer.duplicate();
}
} while (count < 1000);
exchange.endExchange();
}
});
response.wakeupWrites();
}
});
final TestHttpClient client = new TestHttpClient();
try {
HttpGet get = new HttpGet(DefaultServer.getDefaultServerURL());
try {
HttpResponse result = client.execute(get);
InputStream content = result.getEntity().getContent();
byte[] buffer = new byte[512];
int r = 0;
while ((r = content.read(buffer)) > 0) {
Thread.sleep(200);
if (exception != null) {
Assert.assertEquals(WriteTimeoutException.class, exception.getClass());
return;
}
}
Assert.fail("Write did not time out");
} catch (IOException e) {
if (errorLatch.await(5, TimeUnit.SECONDS)) {
Assert.assertEquals(WriteTimeoutException.class, exception.getClass());
} else {
Assert.fail("Write did not time out");
}
}
} finally {
client.getConnectionManager().shutdown();
}
}
}
|
apache-2.0
|
mdoering/backbone
|
life/Fungi/Basidiomycota/Agaricomycetes/Hymenochaetales/Hymenochaetaceae/Inonotus/Inonotus euphoriae/ Syn. Xanthochrous euphoriae/README.md
|
197
|
# Xanthochrous euphoriae (Pat.) Pat., 1900 SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null
|
apache-2.0
|
NetApp/trident
|
cli/main.go
|
282
|
// Copyright 2018 NetApp, Inc. All Rights Reserved.
package main
import (
"os"
"github.com/netapp/trident/cli/cmd"
)
func main() {
cmd.ExitCode = cmd.ExitCodeSuccess
if err := cmd.RootCmd.Execute(); err != nil {
cmd.SetExitCodeFromError(err)
}
os.Exit(cmd.ExitCode)
}
|
apache-2.0
|
ldaniels528/shocktrade-server
|
app-server/app/com/shocktrade/models/quote/StockQuotes.scala
|
7187
|
package com.shocktrade.models.quote
import akka.actor.Props
import akka.pattern.ask
import akka.routing.RoundRobinPool
import akka.util.Timeout
import com.ldaniels528.commons.helpers.OptionHelper._
import com.shocktrade.actors.QuoteMessages._
import com.shocktrade.actors.WebSockets.QuoteUpdated
import com.shocktrade.actors.{DBaseQuoteActor, RealTimeQuoteActor, WebSockets}
import com.shocktrade.controllers.Application._
import com.shocktrade.util.BSONHelper._
import com.shocktrade.util.{ConcurrentCache, DateUtil}
import org.joda.time.DateTime
import play.api.Logger
import play.api.libs.json.Json.{obj => JS}
import play.api.libs.json.{JsArray, JsObject}
import play.libs.Akka
import play.modules.reactivemongo.json.collection.JSONCollection
import reactivemongo.api.collections.default.BSONCollection
import reactivemongo.bson.{BSONArray, BSONDocument => BS}
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
/**
* Stock Quote Proxy
* @author [email protected]
*/
object StockQuotes {
private val realTimeCache = ConcurrentCache[String, JsObject](1.minute)
private val diskCache = ConcurrentCache[String, JsObject](4.hours)
private val system = Akka.system
private val quoteActor = system.actorOf(Props[RealTimeQuoteActor].withRouter(RoundRobinPool(nrOfInstances = 50)), name = "QuoteRealTime")
private val mongoReader = system.actorOf(Props[DBaseQuoteActor].withRouter(RoundRobinPool(nrOfInstances = 50)), name = "QuoteReader")
private val mongoWriter = system.actorOf(Props[DBaseQuoteActor], name = "QuoteWriter")
private lazy val mcQBS = db.collection[BSONCollection]("Stocks")
implicit val timeout: Timeout = 45.seconds
lazy val mcQ = db.collection[JSONCollection]("Stocks")
import system.dispatcher
def init(fields: JsObject): Unit = {
mcQ.find(JS("active" -> true), fields).cursor[JsObject].collect[Seq]() foreach { objects =>
Logger.info(s"Pre-loaded ${objects.length} quote(s)")
objects foreach { jo =>
for {
symbol <- (jo \ "symbol").asOpt[String]
} {
diskCache.put(symbol, jo)
}
}
}
}
def findQuotes(filter: QuoteFilter): Future[Seq[JsObject]] = {
(mongoReader ? FindQuotes(filter)) map {
case e: Throwable => throw new IllegalStateException(e)
case response => response.asInstanceOf[Seq[JsObject]]
}
}
/**
* Retrieves a real-time quote for the given symbol
* @param symbol the given symbol (e.g. 'AAPL')
* @param ec the given [[ExecutionContext]]
* @return a promise of an option of a [[JsObject quote]]
*/
def findRealTimeQuote(symbol: String)(implicit ec: ExecutionContext): Future[Option[JsObject]] = {
def relayQuote(task: Future[Option[JsObject]]) = {
task.foreach(_ foreach { quote =>
realTimeCache.put(symbol, quote, if (DateUtil.isTradingActive) 1.minute else 15.minute)
WebSockets ! QuoteUpdated(quote)
mongoWriter ! SaveQuote(symbol, quote)
})
task
}
val mySymbol = symbol.toUpperCase.trim
if (DateUtil.isTradingActive) relayQuote(findRealTimeQuoteFromService(mySymbol))
else
realTimeCache.get(mySymbol) match {
case quote@Some(_) => Future.successful(quote)
case None =>
relayQuote(findRealTimeQuoteFromService(mySymbol))
}
}
def findRealTimeQuotes(symbols: Seq[String])(implicit ec: ExecutionContext): Future[Seq[JsObject]] = {
val quotes = Future.sequence(symbols map findRealTimeQuote)
quotes.map(_.flatten)
}
def findRealTimeQuoteFromService(symbol: String)(implicit ec: ExecutionContext): Future[Option[JsObject]] = {
(quoteActor ? GetQuote(symbol)).mapTo[Option[JsObject]]
}
/**
* Retrieves a database quote for the given symbol
* @param symbol the given symbol (e.g. 'AAPL')
* @param ec the given [[ExecutionContext]]
* @return a promise of an option of a [[JsObject quote]]
*/
def findDBaseQuote(symbol: String)(implicit ec: ExecutionContext): Future[Option[JsObject]] = {
val mySymbol = symbol.toUpperCase.trim
diskCache.get(mySymbol) match {
case quote@Some(_) => Future.successful(quote)
case None =>
val quote = (mongoReader ? GetQuote(mySymbol)).mapTo[Option[JsObject]]
quote.foreach(_ foreach (diskCache.put(mySymbol, _)))
quote
}
}
def findDBaseFullQuote(symbol: String)(implicit ec: ExecutionContext): Future[Option[JsObject]] = {
(mongoReader ? GetFullQuote(symbol)).mapTo[Option[JsObject]]
}
/**
* Retrieves a database quote for the given symbol
* @param symbols the given collection of symbols (e.g. 'AAPL', 'AMD')
* @param ec the given [[ExecutionContext]]
* @return a promise of an option of a [[JsObject quote]]
*/
def findDBaseQuotes(symbols: Seq[String])(implicit ec: ExecutionContext): Future[JsArray] = {
// first, get as many of the quote from the cache as we can
val cachedQuotes = symbols flatMap diskCache.get
val remainingSymbols = symbols.filterNot(diskCache.contains)
if (remainingSymbols.isEmpty) Future.successful(JsArray(cachedQuotes))
else {
// query any remaining quotes from disk
val task = (mongoReader ? GetQuotes(remainingSymbols)).mapTo[JsArray]
task.foreach { case JsArray(values) =>
values foreach { js =>
(js \ "symbol").asOpt[String].foreach(diskCache.put(_, js.asInstanceOf[JsObject]))
}
}
task
}
}
/**
* Retrieves a complete quote; the composition of real-time quote and a disc-based quote
* @param symbol the given ticker symbol
* @param ec the given [[ExecutionContext]]
* @return the [[Future promise]] of an option of a [[JsObject quote]]
*/
def findFullQuote(symbol: String)(implicit ec: ExecutionContext): Future[Option[JsObject]] = {
val mySymbol = symbol.toUpperCase.trim
val rtQuoteFuture = findRealTimeQuote(mySymbol)
val dbQuoteFuture = findDBaseFullQuote(mySymbol)
for {
rtQuote <- rtQuoteFuture
dbQuote <- dbQuoteFuture
} yield rtQuote.map(q => dbQuote.getOrElse(JS()) ++ q) ?? dbQuote
}
def findQuotes(symbols: Seq[String])(fields: String*)(implicit ec: ExecutionContext): Future[Seq[JsObject]] = {
mcQ.find(JS("symbol" -> JS("$in" -> symbols)), fields.toJsonFields).cursor[JsObject].collect[Seq]()
}
def getSymbolsForCsvUpdate(implicit ec: ExecutionContext): Future[Seq[BS]] = {
mcQBS.find(BS("active" -> true, "$or" -> BSONArray(Seq(
BS("yfDynLastUpdated" -> BS("$exists" -> false)),
BS("yfDynLastUpdated" -> BS("$lte" -> new DateTime().minusMinutes(15)))
))), BS("symbol" -> 1))
.cursor[BS]
.collect[Seq]()
}
def getSymbolsForKeyStatisticsUpdate(implicit ec: ExecutionContext): Future[Seq[BS]] = {
mcQBS.find(BS("active" -> true, "$or" -> BSONArray(Seq(
BS("yfKeyStatsLastUpdated" -> BS("$exists" -> false)),
BS("yfKeyStatsLastUpdated" -> BS("$lte" -> new DateTime().minusDays(2)))
))), BS("symbol" -> 1))
.cursor[BS]
.collect[Seq]()
}
def updateQuote(symbol: String, doc: BS): Unit = {
mcQBS.update(BS("symbol" -> symbol), BS("$set" -> doc))
}
}
|
apache-2.0
|
martyanova/java_pft
|
mantis-tests/src/test/java/ru/stqa/pft/mantis/appmanager/ApplicationManager.java
|
1475
|
package ru.stqa.pft.mantis.appmanager;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.openqa.selenium.ie.InternetExplorerDriver;
import org.openqa.selenium.remote.BrowserType;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.Objects;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
/**
* Created by Александр on 06.11.2016.
*/
public class ApplicationManager {
WebDriver wd;
private final Properties properties;
private String browser;
public ApplicationManager(String browser){
this.browser = browser;
properties = new Properties();
}
public void init() throws IOException {
//String browser = BrowserType.CHROME;
String target = System.getProperty("target", "local");
properties.load(new FileReader(new File(String.format("L:/Devel/java_pft/addressbook-web-tests/src/test/resources/%s.properties", target))));
if (Objects.equals(browser, BrowserType.FIREFOX)) {
wd = new FirefoxDriver();
} else if (Objects.equals(browser, BrowserType.CHROME)){
wd = new ChromeDriver();
} else if (Objects.equals(browser, BrowserType.IE)){
wd = new InternetExplorerDriver();
}
wd.manage().timeouts().implicitlyWait(0, TimeUnit.SECONDS);
wd.get(properties.getProperty("web.baseUrl"));
}
public void stop() {
wd.quit();
}
}
|
apache-2.0
|
fluent/fluentd-ui
|
app/models/fluentd/setting/buffer_memory.rb
|
302
|
class Fluentd
module Setting
class BufferMemory
include Fluentd::Setting::Plugin
register_plugin("buffer", "memory")
def self.initial_params
{}
end
def common_options
[]
end
def advanced_options
[]
end
end
end
end
|
apache-2.0
|
phdfbk/phdfbk.github.io
|
students/_posts/2016-08-23-lakew.md
|
432
|
---
layout: default
id: 2016-08-23-Lakew-Surafel_Melaku
surname: Lakew
name: Surafel Melaku
university: University of Trento
date: 23/08/2016
aboutme:
from: Ethiopia
research_topic: Deep learning for human-in-the-loop advanced machine translation
abstract:
advisor: Federico Marcello
keywords:
website:
img: lakew.jpg
email: lakew<i class="fa fa-at" aria-hidden="true"></i>fbk.eu
alt: Surafel Melaku Lakew
modal-id: stud81
---
|
apache-2.0
|
jentfoo/aws-sdk-java
|
aws-java-sdk-elasticache/src/main/java/com/amazonaws/services/elasticache/model/transform/DescribeUpdateActionsResultStaxUnmarshaller.java
|
3071
|
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.elasticache.model.transform;
import java.util.ArrayList;
import javax.xml.stream.events.XMLEvent;
import javax.annotation.Generated;
import com.amazonaws.services.elasticache.model.*;
import com.amazonaws.transform.Unmarshaller;
import com.amazonaws.transform.StaxUnmarshallerContext;
import com.amazonaws.transform.SimpleTypeStaxUnmarshallers.*;
/**
* DescribeUpdateActionsResult StAX Unmarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class DescribeUpdateActionsResultStaxUnmarshaller implements Unmarshaller<DescribeUpdateActionsResult, StaxUnmarshallerContext> {
public DescribeUpdateActionsResult unmarshall(StaxUnmarshallerContext context) throws Exception {
DescribeUpdateActionsResult describeUpdateActionsResult = new DescribeUpdateActionsResult();
int originalDepth = context.getCurrentDepth();
int targetDepth = originalDepth + 1;
if (context.isStartOfDocument())
targetDepth += 2;
while (true) {
XMLEvent xmlEvent = context.nextEvent();
if (xmlEvent.isEndDocument())
return describeUpdateActionsResult;
if (xmlEvent.isAttribute() || xmlEvent.isStartElement()) {
if (context.testExpression("Marker", targetDepth)) {
describeUpdateActionsResult.setMarker(StringStaxUnmarshaller.getInstance().unmarshall(context));
continue;
}
if (context.testExpression("UpdateActions", targetDepth)) {
describeUpdateActionsResult.withUpdateActions(new ArrayList<UpdateAction>());
continue;
}
if (context.testExpression("UpdateActions/UpdateAction", targetDepth)) {
describeUpdateActionsResult.withUpdateActions(UpdateActionStaxUnmarshaller.getInstance().unmarshall(context));
continue;
}
} else if (xmlEvent.isEndElement()) {
if (context.getCurrentDepth() < originalDepth) {
return describeUpdateActionsResult;
}
}
}
}
private static DescribeUpdateActionsResultStaxUnmarshaller instance;
public static DescribeUpdateActionsResultStaxUnmarshaller getInstance() {
if (instance == null)
instance = new DescribeUpdateActionsResultStaxUnmarshaller();
return instance;
}
}
|
apache-2.0
|
deyantodorov/Zeus-WebServicesCould-TeamWork
|
Source/ChatSystem/Server/ChatSystem.Server/Models/Account/AddExternalLoginBindingModel.cs
|
278
|
using System.ComponentModel.DataAnnotations;
namespace ChatSystem.Server.Models.Account
{
public class AddExternalLoginBindingModel
{
[Required]
[Display(Name = "External access token")]
public string ExternalAccessToken { get; set; }
}
}
|
apache-2.0
|
mabetle/mcore
|
mtag/tag_label.go
|
1652
|
package mtag
import (
"github.com/mabetle/mcore"
"strings"
)
// label tag format:
// label="zh='' en=''"
// GetLabelTag returns field "label" tag value.
func GetLabelTag(v interface{}, fieldName string) (string, bool) {
return GetTag(v, fieldName, "label")
}
// parse string to KeyValue map.
func ParseKeyValueMap(value string) map[string]string {
result := make(map[string]string)
rows := strings.Split(value, " ")
for _, row := range rows {
// skip blank
if strings.TrimSpace(row) == "" {
continue
}
kv := strings.Split(row, "=")
if len(kv) == 2 {
k := strings.TrimSpace(kv[0])
v := strings.TrimSpace(kv[1])
v = strings.Trim(v, "'")
v = strings.Trim(v, "\"")
v = strings.TrimSpace(v)
result[k] = v
}
}
return result
}
// GetLocaleLabel returns field label by locale.
// locale format: en en_US / zh zh_CN zh_HK etc.
func GetLocaleLabel(v interface{}, fieldName string, locale string) string {
labelValue, e := GetLabelTag(v, fieldName)
// not exist
if !e {
return mcore.ToLabel(fieldName)
}
locale = strings.Replace(locale, "-", "_", -1)
lang := strings.Split(locale, "_")[0]
m := ParseKeyValueMap(labelValue)
// include lang_coutry locale
if v, ok := m[locale]; ok {
return v
}
// include lang
if v, ok := m[lang]; ok {
return v
}
// defult en
if v, ok := m["en"]; ok {
return v
}
// default return
return mcore.ToLabel(fieldName)
}
// GetLabelZH
func GetLabelZH(v interface{}, fieldName string) string {
return GetLocaleLabel(v, fieldName, "zh")
}
// GetLabelEN
func GetLabelEN(v interface{}, fieldName string) string {
return GetLocaleLabel(v, fieldName, "en")
}
|
apache-2.0
|
liamw9534/mopidy-rtp
|
mopidy_rtp/sink.py
|
1944
|
from __future__ import unicode_literals
import pygst
pygst.require('0.10')
import gst # noqa
from mopidy.audio import output
import logging
logger = logging.getLogger(__name__)
# This variable is a global that is set by the Backend
# during initialization from the extension properties
encoder = 'identity'
class RtpSink(gst.Bin):
def __init__(self):
super(RtpSink, self).__init__()
# These elements are 'always on' even if nobody is
# subscribed to listen. It streamlines the process
# of adding/removing listeners.
queue = gst.element_factory_make('queue')
rate = gst.element_factory_make('audiorate')
enc = gst.element_factory_make(encoder)
pay = gst.element_factory_make('rtpgstpay')
# Re-use of the audio output bin which handles
# dynamic element addition/removal nicely
self.tee = output.AudioOutput()
self.add_many(queue, rate, enc, pay, self.tee)
gst.element_link_many(queue, rate, enc, pay, self.tee)
pad = queue.get_pad('sink')
ghost_pad = gst.GhostPad('sink', pad)
self.add_pad(ghost_pad)
def add(self, host, port):
b = gst.Bin()
queue = gst.element_factory_make('queue')
udpsink = gst.element_factory_make('udpsink')
udpsink.set_property('host', host)
udpsink.set_property('port', port)
# Both async and sync must be true to avoid seek
# timestamp sync problems
udpsink.set_property('sync', True)
udpsink.set_property('async', True)
b.add_many(queue, udpsink)
gst.element_link_many(queue, udpsink)
pad = queue.get_pad('sink')
ghost_pad = gst.GhostPad('sink', pad)
b.add_pad(ghost_pad)
ident = str(port) + '@' + host
self.tee.add_sink(ident, b)
def remove(self, host, port):
ident = str(port) + '@' + host
self.tee.remove_sink(ident)
|
apache-2.0
|
jentfoo/aws-sdk-java
|
aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/DescribePatchPropertiesRequestProtocolMarshaller.java
|
2847
|
/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.Request;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.protocol.*;
import com.amazonaws.protocol.Protocol;
import com.amazonaws.annotation.SdkInternalApi;
/**
* DescribePatchPropertiesRequest Marshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class DescribePatchPropertiesRequestProtocolMarshaller implements Marshaller<Request<DescribePatchPropertiesRequest>, DescribePatchPropertiesRequest> {
private static final OperationInfo SDK_OPERATION_BINDING = OperationInfo.builder().protocol(Protocol.AWS_JSON).requestUri("/")
.httpMethodName(HttpMethodName.POST).hasExplicitPayloadMember(false).hasPayloadMembers(true)
.operationIdentifier("AmazonSSM.DescribePatchProperties").serviceName("AWSSimpleSystemsManagement").build();
private final com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory;
public DescribePatchPropertiesRequestProtocolMarshaller(com.amazonaws.protocol.json.SdkJsonProtocolFactory protocolFactory) {
this.protocolFactory = protocolFactory;
}
public Request<DescribePatchPropertiesRequest> marshall(DescribePatchPropertiesRequest describePatchPropertiesRequest) {
if (describePatchPropertiesRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
final ProtocolRequestMarshaller<DescribePatchPropertiesRequest> protocolMarshaller = protocolFactory.createProtocolMarshaller(
SDK_OPERATION_BINDING, describePatchPropertiesRequest);
protocolMarshaller.startMarshalling();
DescribePatchPropertiesRequestMarshaller.getInstance().marshall(describePatchPropertiesRequest, protocolMarshaller);
return protocolMarshaller.finishMarshalling();
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
|
apache-2.0
|
mdoering/backbone
|
life/Plantae/Magnoliophyta/Magnoliopsida/Lamiales/Lamiaceae/Scutellaria/Scutellaria purpurascens/Scutellaria purpurascens obtusifolia/README.md
|
198
|
# Scutellaria purpurascens var. obtusifolia Kuntze VARIETY
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null
|
apache-2.0
|
Data-to-Insight-Center/komadu
|
service-core/src/main/java/edu/indiana/d2i/komadu/ingest/db/TableAttributeData.java
|
1714
|
/*
#
# Copyright 2014 The Trustees of Indiana University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
*/
package edu.indiana.d2i.komadu.ingest.db;
public class TableAttributeData {
/**
* enum to keep data types of the value to be inserted
* add to this list if a new type is needed
*/
public static enum DataType {
STRING,
INT,
LONG,
FLOAT,
DOUBLE,
SHORT,
DATE,
TIME,
TIMESTAMP
}
private String attributeName;
private Object value;
private DataType type;
public TableAttributeData(String attributeName, Object value, DataType type) {
this.attributeName = attributeName;
this.value = value;
this.type = type;
}
public String getAttributeName() {
return attributeName;
}
public void setAttributeName(String attributeName) {
this.attributeName = attributeName;
}
public Object getValue() {
return value;
}
public void setValue(Object value) {
this.value = value;
}
public DataType getType() {
return type;
}
public void setType(DataType type) {
this.type = type;
}
}
|
apache-2.0
|
libyal/libyal
|
data/dtfabric/runtime_structure.h/functions-read_file_io_handle.h
|
220
|
int ${library_name}_${structure_name}_read_file_io_handle(
${library_name}_${structure_name}_t *${structure_name},
libbfio_handle_t *file_io_handle,
off64_t file_offset,
libcerror_error_t **error );
|
apache-2.0
|
sagiegurari/node-go-require
|
README.md
|
5134
|
# node-go-require
[](https://www.npmjs.org/package/node-go-require) [](https://github.com/sagiegurari/node-go-require/actions) [](https://coveralls.io/r/sagiegurari/node-go-require) [](https://snyk.io/test/github/sagiegurari/node-go-require) [](http://inch-ci.org/github/sagiegurari/node-go-require) [](https://github.com/sagiegurari/node-go-require/blob/master/LICENSE) [](https://www.npmjs.org/package/node-go-require)
> Load google go script as any javascript modules under nodeJS runtime.
* [Overview](#overview)
* [Usage](#usage)
* [Installation](#installation)
* [Limitations](#limitations)
* [API Documentation](docs/api.md)
* [Contributing](.github/CONTRIBUTING.md)
* [Release History](#history)
* [License](#license)
<a name="overview"></a>
## Overview
Go is an open source programming language that makes it easy to build simple, reliable, and efficient software.
See [golang.org](https://golang.org/) for more information.
<a name="usage"></a>
## Usage
In order to use google go scripts under node, you need to first require this library as follows
```js
require('node-go-require');
```
Now you can require your google go files like any other javascript files, for example:
```js
var petGo = require('./pet.go');
var pet = petGo.pet.New('my pet');
console.log(pet.Name());
pet.SetName('new name...');
console.log(pet.Name());
```
In your go file, instead of doing module.exports as in any JS file, use the gopherjs solution for exporting objects/functions.
Do not export to the global namespace, instead export to the module namespace.
For example:
```go
js.Module.Get("exports").Set("pet", map[string]interface{}{
"New": New,
})
```
Full example (GO):
```go
package main
import "github.com/gopherjs/gopherjs/js"
type Pet struct {
name string
}
func New(name string) *js.Object {
return js.MakeWrapper(&Pet{name})
}
func (p *Pet) Name() string {
return p.name
}
func (p *Pet) SetName(name string) {
p.name = name
}
func main() {
js.Module.Get("exports").Set("pet", map[string]interface{}{
"New": New,
})
}
```
Full example (JavaScript):
```js
require('node-go-require');
var petGo = require('./pet.go');
var pet = petGo.pet.New('my pet');
console.log(pet.Name());
pet.SetName('new name...');
console.log(pet.Name());
```
In order to generate minified javascript code, first set the following environment variable:
```sh
NODE_GO_REQUIRE_MINIFY=TRUE
```
<a name="installation"></a>
## Installation
In order to use this library, just run the following npm install command:
```sh
npm install --save node-go-require
```
Apart of installing the NPM modules, you will need to setup the following:
* Install Google Go - [installation guide](https://golang.org/doc/install) (make sure that GOPATH env variable is defined)
* Install gopherjs - [gopherjs](https://github.com/gopherjs/gopherjs) by running
```sh
go get -u github.com/gopherjs/gopherjs
```
<a name="limitations"></a>
## Limitations
The Google Go to javascript conversion is done by gopherjs and there are some limitations of running the gopherjs generated code under node runtime.
To see exact limitations please see gopherjs project at: [gopherjs](https://github.com/gopherjs/gopherjs)
## API Documentation
See full docs at: [API Docs](docs/api.md)
## Contributing
See [contributing guide](.github/CONTRIBUTING.md)
<a name="history"></a>
## Release History
| Date | Version | Description |
| ----------- | ------- | ----------- |
| 2020-05-13 | v2.0.0 | Migrate to github actions and upgrade minimal node version |
| 2019-02-08 | v1.1.5 | Maintenance |
| 2018-01-22 | v1.1.0 | Removed shelljs dependency and raised minimum node.js version to 0.12 |
| 2017-02-07 | v1.0.25 | Ability to generate minified js code |
| 2016-07-26 | v0.1.2 | Add integration test via docker |
| 2015-02-14 | v0.0.16 | Modified tests and examples due to changes in gopherjs API |
| 2015-02-09 | v0.0.15 | Grunt cleanups. |
| 2015-02-06 | v0.0.14 | Doc changes |
| 2015-02-05 | v0.0.13 | Fix continues integrations |
| 2015-02-05 | v0.0.12 | Minor internal quality changes |
| 2014-12-30 | v0.0.11 | Doc changes |
| 2014-12-07 | v0.0.10 | Minor internal changes |
| 2014-12-03 | v0.0.9 | No need to modify generated code |
| 2014-12-03 | v0.0.8 | Simplified code generation modification |
| 2014-12-02 | v0.0.7 | Mock gopherjs calls for continues integration tests. |
| 2014-12-02 | v0.0.3 | Initial release. |
<a name="license"></a>
## License
Developed by Sagie Gur-Ari and licensed under the Apache 2 open source license.
|
apache-2.0
|
flitzi/AC_SERVER_APPS
|
AC_ServiceClient/ACServiceSessionReportHandler.cs
|
494
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using AC_ServiceClient.ACServiceReference;
using AC_SessionReport;
namespace AC_ServiceClient
{
public class ACServiceSessionReportHandler : ISessionReportHandler
{
public void HandleReport(SessionReport report)
{
ACServiceClient client = new ACServiceClient();
client.PostResult(report);
}
}
}
|
apache-2.0
|
sedna/sedna
|
kernel/tr/xqp/XQCommon.h
|
2596
|
#ifndef __XQ_COMMON_H__
#define __XQ_COMMON_H__
#include "tr/xqp/ast/ASTNode.h"
#include "tr/xqp/ast/ASTVarDecl.h"
#include "tr/xqp/ast/ASTVar.h"
#include "tr/executor/base/dynamic_context.h"
#include <string.h>
#include <map>
#define CREATE_INTNAME(u, l) ((u == "") ? (l) : (std::string("{") + (u) + std::string("}") + (l)))
#define CREATE_INTNAME_FUN(u, l, a) (CREATE_INTNAME(u, l) + std::string("/") + int2string(a))
namespace sedna
{
struct xqExprInfo
{
bool isOrdered; // expr is ordered
bool isDistincted; // expr contains distincted values
bool isMax1; // expr emits singleton or empty sequence
bool isSingleLevel; // all nodes are on the same level in node-sequence
bool useConstructors; // true, if subexpression uses constructor (direct or computed)
};
class XQueryModule;
/*
* Variable info: used throughout the parsing-qep process
*/
struct XQVariable
{
// internal info
std::string int_name;
XQueryModule *mod;
ASTNode *var;
// lreturn info
xqExprInfo exp_info;
bool isNodes; // true if var represents sequence of nodes (singletons also go here)(this is only for typed vars)
// lr2por info
global_var_dsc id; // id for physical plan
// true if variable is actually being used
bool is_used;
XQVariable(const char *name, ASTNode *var_, XQueryModule *mod_ = NULL)
{
int_name = name;
mod = mod_;
var = var_;
isNodes = false;
exp_info.isDistincted = true;
exp_info.useConstructors = false;
exp_info.isSingleLevel = true;
exp_info.isOrdered = true;
exp_info.isMax1 = true;
id = global_var_dsc((dynamic_context *)NULL, INVALID_VAR_DSC);
is_used = false;
}
XQVariable()
{
int_name = "$%dummy";
mod = NULL;
var = NULL;
id = global_var_dsc((dynamic_context *)NULL, INVALID_VAR_DSC);
is_used = false;
}
};
struct XQFunction;
typedef std::pair<std::string, ASTLocation *> nsPair;
typedef std::map<std::string, nsPair> nsBindType; // location is used to diagnoze illegal redefinition
typedef std::map<std::string, XQFunction *> XQFunctionInfo;
typedef std::map<std::string, XQFunction> XQStdFunctionInfo;
typedef std::map<std::string, XQVariable *> XQVariablesInfo;
typedef std::map<std::string, ASTNode *> XQStringHash;
}
#endif
|
apache-2.0
|
OSBI/meteorite-core-ui
|
src/main/resources/docs/class/js/collections/SidebarCollection.js~SidebarCollection.html
|
14651
|
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<base data-ice="baseUrl" href="../../../">
<title data-ice="title">SidebarCollection | API Document</title>
<link type="text/css" rel="stylesheet" href="css/style.css">
<link type="text/css" rel="stylesheet" href="css/prettify-tomorrow.css">
<script src="script/prettify/prettify.js"></script>
<script src="script/manual.js"></script>
</head>
<body class="layout-container" data-ice="rootContainer">
<header>
<a href="./">Home</a>
<a href="identifiers.html">Reference</a>
<a href="source.html">Source</a>
<a data-ice="repoURL" href="https://github.com/OSBI/meteorite-core-ui" class="repo-url-github">Repository</a>
<div class="search-box">
<span>
<img src="./image/search.png">
<span class="search-input-edge"></span><input class="search-input"><span class="search-input-edge"></span>
</span>
<ul class="search-result"></ul>
</div>
</header>
<nav class="navigation" data-ice="nav"><div>
<ul>
<li data-ice="doc"><div data-ice="dirPath" class="nav-dir-path">collections</div><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/collections/CubesCollection.js~CubesCollection.html">CubesCollection</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/collections/MenubarCollection.js~MenubarCollection.html">MenubarCollection</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/collections/SidebarCollection.js~SidebarCollection.html">SidebarCollection</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/collections/ToolbarCollection.js~ToolbarCollection.html">ToolbarCollection</a></span></span></li>
<li data-ice="doc"><div data-ice="dirPath" class="nav-dir-path">components/bootstrap</div><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/bootstrap/Clearfix.jsx~Clearfix.html">Clearfix</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/bootstrap/FormGroup.jsx~FormGroup.html">FormGroup</a></span></span></li>
<li data-ice="doc"><div data-ice="dirPath" class="nav-dir-path">components/saiku</div><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/Content.jsx~Content.html">Content</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/Icon.jsx~Icon.html">Icon</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/Logo.jsx~Logo.html">Logo</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/MenuBar.jsx~MenuBar.html">MenuBar</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/NotFound.jsx~NotFound.html">NotFound</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/Sidebar.jsx~Toolbar.html">Toolbar</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/Tab.jsx~Tab.html">Tab</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/Tabs.jsx~Tabs.html">Tabs</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/Toolbar.jsx~Toolbar.html">Toolbar</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/Wrapper.jsx~Wrapper.html">Wrapper</a></span></span></li>
<li data-ice="doc"><div data-ice="dirPath" class="nav-dir-path">components/saiku/QueryDesigner/components</div><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/QueryDesigner/components/CubeSelector.jsx~CubeSelector.html">CubeSelector</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/QueryDesigner/components/ReportPreview.jsx~ReportPreview.html">ReportPreview</a></span></span></li>
<li data-ice="doc"><div data-ice="dirPath" class="nav-dir-path">components/saiku/QueryDesigner/stores</div><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/QueryDesigner/stores/CubesStore.js~CubesStoreFactory.html">CubesStoreFactory</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/QueryDesigner/stores/SelectedDimensionsStore.js~SelectedDimensionsStoreFactory.html">SelectedDimensionsStoreFactory</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/components/saiku/QueryDesigner/stores/SelectedMeasuresStore.js~SelectedMeasuresStoreFactory.html">SelectedMeasuresStoreFactory</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-variable">V</span><span data-ice="name"><span><a href="variable/index.html#static-variable-CubesStore">CubesStore</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-variable">V</span><span data-ice="name"><span><a href="variable/index.html#static-variable-SelectedDimensionsStore">SelectedDimensionsStore</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-variable">V</span><span data-ice="name"><span><a href="variable/index.html#static-variable-SelectedMeasuresStore">SelectedMeasuresStore</a></span></span></li>
<li data-ice="doc"><div data-ice="dirPath" class="nav-dir-path">models</div><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/models/CubeModel.js~CubeModel.html">CubeModel</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/models/MenubarModel.js~MenubarModel.html">MenubarModel</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/models/Session.js~Session.html">Session</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/models/SidebarModel.js~SidebarModel.html">SidebarModel</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/models/ToolbarModel.js~ToolbarModel.html">ToolbarModel</a></span></span></li>
<li data-ice="doc"><div data-ice="dirPath" class="nav-dir-path">utils</div><span data-ice="kind" class="kind-variable">V</span><span data-ice="name"><span><a href="variable/index.html#static-variable-Base64">Base64</a></span></span></li>
<li data-ice="doc"><span data-ice="kind" class="kind-variable">V</span><span data-ice="name"><span><a href="variable/index.html#static-variable-Settings">Settings</a></span></span></li>
<li data-ice="doc"><div data-ice="dirPath" class="nav-dir-path">views</div><span data-ice="kind" class="kind-class">C</span><span data-ice="name"><span><a href="class/js/views/Workspace.jsx~Workspace.html">Workspace</a></span></span></li>
</ul>
</div>
</nav>
<div class="content" data-ice="content"><div class="header-notice">
<div data-ice="importPath" class="import-path"><pre class="prettyprint"><code data-ice="importPathCode">import SidebarCollection from '<span><a href="file/js/collections/SidebarCollection.js.html#lineNumber20">meteorite-core-ui/js/collections/SidebarCollection.js</a></span>'</code></pre></div>
<span data-ice="access">public</span>
<span data-ice="kind">class</span>
<span data-ice="source">| <span><a href="file/js/collections/SidebarCollection.js.html#lineNumber20">source</a></span></span>
</div>
<div class="self-detail detail">
<h1 data-ice="name">SidebarCollection</h1>
<div class="flat-list" data-ice="extendsChain"><h4>Extends:</h4><div><span>backbone~Backbone.Collection</span> → SidebarCollection</div></div>
</div>
<div data-ice="constructorSummary"><h2>Constructor Summary</h2><table class="summary" data-ice="summary">
<thead><tr><td data-ice="title" colspan="3">Public Constructor</td></tr></thead>
<tbody>
<tr data-ice="target">
<td>
<span class="access" data-ice="access">public</span>
<span class="override" data-ice="override"></span>
</td>
<td>
<div>
<p>
<span data-ice="name"><span><a href="class/js/collections/SidebarCollection.js~SidebarCollection.html#instance-constructor-constructor">constructor</a></span></span><span data-ice="signature">(options: <span>*</span>)</span>
</p>
</div>
<div>
</div>
</td>
<td>
</td>
</tr>
</tbody>
</table>
</div>
<div data-ice="memberSummary"><h2>Member Summary</h2><table class="summary" data-ice="summary">
<thead><tr><td data-ice="title" colspan="3">Public Members</td></tr></thead>
<tbody>
<tr data-ice="target">
<td>
<span class="access" data-ice="access">public</span>
<span class="override" data-ice="override"></span>
</td>
<td>
<div>
<p>
<span data-ice="name"><span><a href="class/js/collections/SidebarCollection.js~SidebarCollection.html#instance-member-model">model</a></span></span><span data-ice="signature">: <span>*</span></span>
</p>
</div>
<div>
</div>
</td>
<td>
</td>
</tr>
</tbody>
</table>
</div>
<div data-ice="methodSummary"><h2>Method Summary</h2><table class="summary" data-ice="summary">
<thead><tr><td data-ice="title" colspan="3">Public Methods</td></tr></thead>
<tbody>
<tr data-ice="target">
<td>
<span class="access" data-ice="access">public</span>
<span class="override" data-ice="override"></span>
</td>
<td>
<div>
<p>
<span data-ice="name"><span><a href="class/js/collections/SidebarCollection.js~SidebarCollection.html#instance-method-url">url</a></span></span><span data-ice="signature">(): <span><a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String">string</a></span></span>
</p>
</div>
<div>
</div>
</td>
<td>
</td>
</tr>
</tbody>
</table>
</div>
<div data-ice="constructorDetails"><h2 data-ice="title">Public Constructors</h2>
<div class="detail" data-ice="detail">
<h3 data-ice="anchor" id="instance-constructor-constructor">
<span class="access" data-ice="access">public</span>
<span data-ice="name">constructor</span><span data-ice="signature">(options: <span>*</span>)</span>
<span class="right-info">
<span data-ice="source"><span><a href="file/js/collections/SidebarCollection.js.html#lineNumber21">source</a></span></span>
</span>
</h3>
<div data-ice="properties"><div data-ice="properties">
<h4 data-ice="title">Params:</h4>
<table class="params">
<thead>
<tr><td>Name</td><td>Type</td><td>Attribute</td><td>Description</td></tr>
</thead>
<tbody>
<tr data-ice="property" data-depth="0">
<td data-ice="name" data-depth="0">options</td>
<td data-ice="type"><span>*</span></td>
<td data-ice="appendix"></td>
<td data-ice="description"></td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
</div>
<div data-ice="memberDetails"><h2 data-ice="title">Public Members</h2>
<div class="detail" data-ice="detail">
<h3 data-ice="anchor" id="instance-member-model">
<span class="access" data-ice="access">public</span>
<span data-ice="name">model</span><span data-ice="signature">: <span>*</span></span>
<span class="right-info">
<span data-ice="source"><span><a href="file/js/collections/SidebarCollection.js.html#lineNumber24">source</a></span></span>
</span>
</h3>
<div data-ice="properties">
</div>
</div>
</div>
<div data-ice="methodDetails"><h2 data-ice="title">Public Methods</h2>
<div class="detail" data-ice="detail">
<h3 data-ice="anchor" id="instance-method-url">
<span class="access" data-ice="access">public</span>
<span data-ice="name">url</span><span data-ice="signature">(): <span><a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String">string</a></span></span>
<span class="right-info">
<span data-ice="source"><span><a href="file/js/collections/SidebarCollection.js.html#lineNumber27">source</a></span></span>
</span>
</h3>
<div data-ice="properties">
</div>
<div class="return-params" data-ice="returnParams">
<h4>Return:</h4>
<table>
<tr>
<td class="return-type" data-ice="returnType"><span><a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String">string</a></span></td>
</tr>
</table>
<div data-ice="returnProperties">
</div>
</div>
</div>
</div>
</div>
<footer class="footer">
Generated by <a href="https://esdoc.org">ESDoc<span data-ice="esdocVersion">(0.4.6)</span></a>
</footer>
<script src="script/search_index.js"></script>
<script src="script/search.js"></script>
<script src="script/pretty-print.js"></script>
<script src="script/inherited-summary.js"></script>
<script src="script/test-summary.js"></script>
<script src="script/inner-link.js"></script>
<script src="script/patch-for-local.js"></script>
</body>
</html>
|
apache-2.0
|
oscarceballos/flink-1.3.2
|
flink-runtime-web/src/main/java/org/apache/flink/runtime/webmonitor/utils/WebFrontendBootstrap.java
|
4828
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.webmonitor.utils;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpServerCodec;
import io.netty.handler.codec.http.router.Handler;
import io.netty.handler.codec.http.router.Router;
import io.netty.handler.ssl.SslHandler;
import io.netty.handler.stream.ChunkedWriteHandler;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.net.SSLUtils;
import org.apache.flink.runtime.webmonitor.HttpRequestHandler;
import org.apache.flink.runtime.webmonitor.PipelineErrorHandler;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLEngine;
import java.io.File;
import java.net.InetSocketAddress;
/**
* This classes encapsulates the boot-strapping of netty for the web-frontend.
*/
public class WebFrontendBootstrap {
private final Router router;
private final Logger log;
private final File uploadDir;
private final SSLContext serverSSLContext;
private final ServerBootstrap bootstrap;
private final Channel serverChannel;
public WebFrontendBootstrap(
Router router,
Logger log,
File directory,
SSLContext sslContext,
String configuredAddress,
int configuredPort,
final Configuration config) throws InterruptedException {
this.router = Preconditions.checkNotNull(router);
this.log = Preconditions.checkNotNull(log);
this.uploadDir = directory;
this.serverSSLContext = sslContext;
ChannelInitializer<SocketChannel> initializer = new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) {
Handler handler = new Handler(WebFrontendBootstrap.this.router);
// SSL should be the first handler in the pipeline
if (serverSSLContext != null) {
SSLEngine sslEngine = serverSSLContext.createSSLEngine();
SSLUtils.setSSLVerAndCipherSuites(sslEngine, config);
sslEngine.setUseClientMode(false);
ch.pipeline().addLast("ssl", new SslHandler(sslEngine));
}
ch.pipeline()
.addLast(new HttpServerCodec())
.addLast(new ChunkedWriteHandler())
.addLast(new HttpRequestHandler(uploadDir))
.addLast(handler.name(), handler)
.addLast(new PipelineErrorHandler(WebFrontendBootstrap.this.log));
}
};
NioEventLoopGroup bossGroup = new NioEventLoopGroup(1);
NioEventLoopGroup workerGroup = new NioEventLoopGroup();
this.bootstrap = new ServerBootstrap();
this.bootstrap
.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(initializer);
ChannelFuture ch;
if (configuredAddress == null) {
ch = this.bootstrap.bind(configuredPort);
} else {
ch = this.bootstrap.bind(configuredAddress, configuredPort);
}
this.serverChannel = ch.sync().channel();
InetSocketAddress bindAddress = (InetSocketAddress) serverChannel.localAddress();
String address = bindAddress.getAddress().getHostAddress();
int port = bindAddress.getPort();
this.log.info("Web frontend listening at {}" + ':' + "{}", address, port);
}
public ServerBootstrap getBootstrap() {
return bootstrap;
}
public int getServerPort() {
Channel server = this.serverChannel;
if (server != null) {
try {
return ((InetSocketAddress) server.localAddress()).getPort();
}
catch (Exception e) {
log.error("Cannot access local server port", e);
}
}
return -1;
}
public void shutdown() {
if (this.serverChannel != null) {
this.serverChannel.close().awaitUninterruptibly();
}
if (bootstrap != null) {
if (bootstrap.group() != null) {
bootstrap.group().shutdownGracefully();
}
if (bootstrap.childGroup() != null) {
bootstrap.childGroup().shutdownGracefully();
}
}
}
}
|
apache-2.0
|
nghiant2710/base-images
|
balena-base-images/golang/vab820-quad/debian/sid/1.15.8/run/Dockerfile
|
2349
|
# AUTOGENERATED FILE
FROM balenalib/vab820-quad-debian:sid-run
ENV GO_VERSION 1.15.8
# gcc for cgo
RUN apt-get update && apt-get install -y --no-install-recommends \
g++ \
gcc \
libc6-dev \
make \
pkg-config \
git \
&& rm -rf /var/lib/apt/lists/*
RUN set -x \
&& fetchDeps=' \
curl \
' \
&& apt-get update && apt-get install -y $fetchDeps --no-install-recommends && rm -rf /var/lib/apt/lists/* \
&& mkdir -p /usr/local/go \
&& curl -SLO "http://resin-packages.s3.amazonaws.com/golang/v$GO_VERSION/go$GO_VERSION.linux-armv7hf.tar.gz" \
&& echo "bde22202576c3920ff5646fb1d19877cedc19501939d6ccd7b16ff89071abd0a go$GO_VERSION.linux-armv7hf.tar.gz" | sha256sum -c - \
&& tar -xzf "go$GO_VERSION.linux-armv7hf.tar.gz" -C /usr/local/go --strip-components=1 \
&& rm -f go$GO_VERSION.linux-armv7hf.tar.gz
ENV GOROOT /usr/local/go
ENV GOPATH /go
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
WORKDIR $GOPATH
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/[email protected]" \
&& echo "Running test-stack@golang" \
&& chmod +x [email protected] \
&& bash [email protected] \
&& rm -rf [email protected]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo 'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v7 \nOS: Debian Sid \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nGo v1.15.8 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo '#!/bin/sh.real\nbalena-info\nrm -f /bin/sh\ncp /bin/sh.real /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& cp /bin/sh /bin/sh.real \
&& mv /bin/sh-shim /bin/sh
|
apache-2.0
|
YihuaWanglv/github-trending
|
oschina/2017/2017-07-22.md
|
20593
|
# oschina/2017/2017-07-22.md
## daily
- [zhoubang85/zb](http://git.oschina.net/zhoubang85/zb) : 随着技术积累而慢慢开发一套分布式架构系统。全部采用目前主流技术与框架,为在技术方面有需要的朋友提供一套完整的研究学习实例。项目创建于2017年5月12日,正在慢慢成长中.
- [darkidiot/DistributedSession](http://git.oschina.net/darkidiot/distributedsession) : 基于Redis的分布式Session,简单高效。
- [贝密游戏/beimi](http://git.oschina.net/beimigame/beimi) : 贝密游戏是一系列棋牌游戏的名称,其中包含麻将、斗地主、德州,目前正在进行UI设计以及后台系统(JAVA)开发,7月份发布0.1.0版本,仅包含前端UI方案,预计8月份发布首个版本,敬请关注!
- [shuzheng/zheng](http://git.oschina.net/shuzheng/zheng) : 基于Spring+SpringMVC+Mybatis分布式敏捷开发系统架构,提供整套公共微服务服务模块:集中权限管理(单点登录)、内容管理、支付中心、用户管理(支持第三方登录)、微信平台、存储系统、配置中心、日志分析、任务和通知等,支持服务治理、监控和追踪,努力为中小型企业打造全方位J2EE企业级开发解决方案。
- [zhuan/shop-dubbox](http://git.oschina.net/catshen/shop-dubbox) : 商城 springmvc dubbox mybatis bootstreap ace
- [ruanyf/loppo](http://git.oschina.net/ruanyf/loppo) : Loppo 是一个简单、好用的静态文档生成器,支持一键生成 Markdown 文档站点。
- [stylefeng/guns](http://git.oschina.net/naan1993/guns) : Guns基于SpringBoot,致力于做更简洁的后台管理系统,完美整合springmvc + shiro + mybatis-plus + beetl!Guns项目代码简洁,注释丰富,上手容易,同时Guns包含许多基础模块(用户管理,角色管理,部门管理,字典管理等10个模块),可以直接作为一个后台管理系统的脚手架.
- [人人开源/renren-fast](http://git.oschina.net/babaio/renren-fast) : renren-fast是一个轻量级的Java快速开发平台,其核心设计目标是开发迅速、学习简单、轻量级、易扩展;使用Spring Boot、Shiro、MyBatis、Bootstrap、Vue2.x等框架,包含:管理员列表、角色管理、菜单管理、定时任务、参数管理、代码生成器、日志管理、云存储、API模块(APP接口开发利器)、前后端分离等。
- [程序猿DD/SpringBoot-Learning](http://git.oschina.net/didispace/SpringBoot-Learning) : Spring Boot教程与Spring Cloud教程
- [iBase4J/iBase4J](http://git.oschina.net/iBase4J/iBase4J) : JAVA分布式快速开发基础平台:SpringMVC,Mybatis,mybatis-plus,dubbo分布式服务,Redis缓存,shiro权限管理,Spring-Session单点登录,分布式集群调度,Restful服务,QQ/微信登录,app token登录,微信/支付宝支付;日期转换、数据类型转换、序列化、汉字转拼音、身份证号码验证、数字转人民币、发送邮件、加密解密、图片处理、excel导入导出、FTP/SFTP/fastDFS上传下载、生成二维码、XML读写、高精度计算、系统配置工具类等等。
- [layuiCMS/layuiCMS](http://git.oschina.net/layuicms/layuicms) : 基于layui的后台管理模版,纯前端实现,无数据库,关闭浏览器或者清除缓存后,所有操作无效。
- [QiZW/FileManager](http://git.oschina.net/qzwcom/FileManager) : 快客文件管理器:完整的基于Android的文件管理器的APP,百度手机助手已上线。
- [zhuan/zsCat分布式框架](http://git.oschina.net/catshen/cat) : springmvc spring mybatis dubbo分布式框架开源分布式框架zaCat 群2 559182393 473023475 演示地址 http://www.zscat.top/front
- [backflow/framework-admin](http://git.oschina.net/backflow/framework-admin) : 尽管oschina已经存在大量优秀的Java平台管理系统, 但还有不少数量的项目前端架构还是上古时代遗留下来的iframe + jsp, 较新一点的是ajax + HistoryAPI, 前一秒刚被各项目琳琅满目的功能吓尿的同时, 再一看那粗鄙的前端交互设计, 就像远处看见一个肤白貌美大长腿的大美女, 走近才发现竟然他妈的长得跟凤姐一样?(凤姐对不起) 在这个全民ES6 + Babel的大时代下, 总感觉像吃了苍蝇一样难受! 这个项目就是告诉大家什么叫高颜值的后台系统!
- [HAO/maple](http://git.oschina.net/blind/maple) : Maple 是一个企业信息化的开发基础平台,Java EE(J2EE)快速开发框架,可用于搭建后台管理系统、APP服务端等,核心技术包含(Spring、Spring MVC、Apache Shiro、MyBatis、MyBatis-Plus、EasyUI),核心模块包括:用户权限管理、授权认证、日志管理、字典管理等。
- [云智慧/SAPNetworkMonitor](http://git.oschina.net/CloudWise/SAPNetworkMonitor) : 基于niping的SAP网络分布式监控平台
- [JFinal/JFinal](http://git.oschina.net/jfinal/jfinal) : JFinal 是基于 Java 语言的极速 WEB + ORM 框架,其核心设计目标是开发迅速、代码量少、学习简单、功能强大、轻量级、易扩展、Restful。在拥有Java语言所有优势的同时再拥有ruby、python、php等动态语言的开发效率!为您节约更多时间,去陪恋人、家人和朋友 :)
- [班纳睿/weixin-java-tools](http://git.oschina.net/binary/weixin-java-tools) : 微信支付、企业号、公众号、小程序开发的Java SDK
- [Karson/fastadmin](http://git.oschina.net/karson/fastadmin) : 基于ThinkPHP5和Bootstrap的极速后台开发框架
- [wangyi/基于vue的滚动条插件vuescroll](http://git.oschina.net/wangyi_7099/JiYuvueDeGunDongTiaoChaJianvuescroll) : 基于vue的滚动条插件 vuescroll1.0 发布啦 有什么问题 大家 提出来吧 欢迎讨论 。
- [红薯/J2Cache](http://git.oschina.net/ld/J2Cache) : 这是一个 Java 的缓存框架,可以让应用支持两级缓存框架 ehcache + redis (memcached)。避免完全使用独立缓存系统所带来的网络IO开销问题。【目前OSChina网站正在使用该缓存框架】
- [Looly/hutool](http://git.oschina.net/loolly/hutool) : 一个Java基础工具类,对文件、流、加密解密、转码、正则、线程、XML等JDK方法进行封装,组成各种Util工具类,同时提供以下组件: 1. 布隆过滤 2. 缓存 3. 数据库ORM(基于ActiveRecord思想)4. HTTP客户端 5. IO 6. JSON 7. 日志 8. System(JVM和系统信息等) 9. Setting(一种扩展Properties的配置文件)
- [wuqingjian/cj_datatrans](http://git.oschina.net/cj_ss/cj_datatrans) : 南京房产数据同步
- [tumobi/nideshop-mini-program](http://git.oschina.net/tumobi/nideshop-mini-program) : NideShop:基于Node.js+MySQL开发的高仿网易严选开源B2C商城(微信小程序客户端)
- [Mr.AG/AG-Admin](http://git.oschina.net/geek_qi/ace-security) : AG-Admin是基于spring cloud统一管理授权、认证后台管理系统,具备用户管理、资源权限管理、网关API管理等多个模块,支持多业务系统并行开发,可以作为后台管理系统的脚手架。代码简洁,架构清晰,适合学习和直接项目中使用。核心技术采用Eureka、Fegin、Ribbon、Zuul、Hystrix、Security、JWT Token、Mybatis等主要框架和中间件,前端采用Layui组件。
## monthly
## recommend
- [阿信sxq/SpringBootSwaggerStarter](http://git.oschina.net/songxinqiang/SpringBootSwaggerStarter) : 在spring-boot中使用swagger,自动配置
- [darkidiot/DistributedSession](http://git.oschina.net/darkidiot/distributedsession) : 基于Redis的分布式Session,简单高效。
- [wangyi/基于vue的滚动条插件vuescroll](http://git.oschina.net/wangyi_7099/JiYuvueDeGunDongTiaoChaJianvuescroll) : 基于vue的滚动条插件 vuescroll1.0 发布啦 有什么问题 大家 提出来吧 欢迎讨论 。
- [gaengine/titan-script](http://git.oschina.net/gaengine/titan-script) : TITAN引擎开源处理脚本
- [ruanyf/loppo](http://git.oschina.net/ruanyf/loppo) : Loppo 是一个简单、好用的静态文档生成器,支持一键生成 Markdown 文档站点。
- [云智慧/SAPNetworkMonitor](http://git.oschina.net/CloudWise/SAPNetworkMonitor) : 基于niping的SAP网络分布式监控平台
- [轶哥/koa2-API-scaffold](http://git.oschina.net/yi-ge/koa2-API-scaffold) : 一个基于Koa2的轻量级RESTful API Server脚手架。(Node.js不是语言)
- [留天下/CreateCocos2dxProject](http://git.oschina.net/lsylovews/CreateCocos2dxProject) : 用于方便快速的使用Cocos2d-x-3.15.1源码包的cocos.py创建Cocos2dx的cpp(即C++)项目,主要是由于vs2017没有Cocos2dx的项目创建模板,所以促使自己编写一个程序用于快速新建Cocos2dx项目,同时也希望能够帮到广大Cocos2dx的cpp开发者。
- [QiZW/ColorUtils](http://git.oschina.net/qzwcom/ColorUtils) : Android的颜色变化工具,可以实现主题变色,颜色的加深变浅,颜色透明度的改变等,AS一键导入。
- [QiZW/FileManager](http://git.oschina.net/qzwcom/FileManager) : 快客文件管理器:完整的基于Android的文件管理器的APP,百度手机助手已上线。
- [zhoubang85/zb](http://git.oschina.net/zhoubang85/zb) : 随着技术积累而慢慢开发一套分布式架构系统。全部采用目前主流技术与框架,为在技术方面有需要的朋友提供一套完整的研究学习实例。项目创建于2017年5月12日,正在慢慢成长中.
- [zhuan/shop-dubbox](http://git.oschina.net/catshen/shop-dubbox) : 商城 springmvc dubbox mybatis bootstreap ace
- [tumobi/nideshop-mini-program](http://git.oschina.net/tumobi/nideshop-mini-program) : NideShop:基于Node.js+MySQL开发的高仿网易严选开源B2C商城(微信小程序客户端)
- [ev-ui/ev-ui](http://git.oschina.net/evui/evui) : An ui library, some awesome components.
- [Payne/aetherupload-laravel](http://git.oschina.net/peinhu/aetherupload-laravel) : Upload big files for Laravel 上传大文件的laravel扩展
- [chengww5217/BiliBiliMerge](http://git.oschina.net/chengww5217/BiliBiliMerge) : 对Bilibili手机客户端下载的视频文件进行合并
- [lzhou/spring-cloud-7simple](http://git.oschina.net/zhou666/spring-cloud-7simple) : 7天学会,spring cloud系列
- [Wang丶Yan/YanRefreshListView](http://git.oschina.net/WangYan2017/YanRefreshListView) : Listview的下拉刷新和上啦加载更多,以及上啦的开关
- [學無止境/springboot-fastdfs](http://git.oschina.net/keeplearning996/springboot-fastdfs) : spring boot 简单集成 fastdfs使用
- [xiwa/spring-nc](http://git.oschina.net/xiwa/spring-nc) : spring-nc=spring no controller, a spring mvc extension, auto generate controller from interface .
- [重归混沌/silly](http://git.oschina.net/findstr/silly) : A lightweight server framework based on lua
- [闲.大赋/xlsunit](http://git.oschina.net/xiandafu/xlsunit) : 面向数据库应用的单元测试框架,使用excel,作为输入,以及输出比较,适合大量数据库操作
- [weiboad/fierysdk](http://git.oschina.net/weiboad/fierysdk) : PHP分布式跟踪系统埋点库
- [weiboad/fiery](http://git.oschina.net/weiboad/fiery) : Fiery 是一款为PHP性能跟踪监控系统,可以方便的查看线上调用关系,性能,回放请求过程,参数,系统异常统计等
- [24K_铂金会员/bee-scheduler](http://git.oschina.net/hellovivi/bee-scheduler) : 灵活、高效的分布式任务调度平台
## weekly
- [zhoubang85/zb](http://git.oschina.net/zhoubang85/zb) : 随着技术积累而慢慢开发一套分布式架构系统。全部采用目前主流技术与框架,为在技术方面有需要的朋友提供一套完整的研究学习实例。项目创建于2017年5月12日,正在慢慢成长中.
- [HAO/maple](http://git.oschina.net/blind/maple) : Maple 是一个企业信息化的开发基础平台,Java EE(J2EE)快速开发框架,可用于搭建后台管理系统、APP服务端等,核心技术包含(Spring、Spring MVC、Apache Shiro、MyBatis、MyBatis-Plus、EasyUI),核心模块包括:用户权限管理、授权认证、日志管理、字典管理等。
- [shuzheng/zheng](http://git.oschina.net/shuzheng/zheng) : 基于Spring+SpringMVC+Mybatis分布式敏捷开发系统架构,提供整套公共微服务服务模块:集中权限管理(单点登录)、内容管理、支付中心、用户管理(支持第三方登录)、微信平台、存储系统、配置中心、日志分析、任务和通知等,支持服务治理、监控和追踪,努力为中小型企业打造全方位J2EE企业级开发解决方案。
- [24K_铂金会员/bee-scheduler](http://git.oschina.net/hellovivi/bee-scheduler) : 灵活、高效的分布式任务调度平台
- [zhuan/shop-dubbox](http://git.oschina.net/catshen/shop-dubbox) : 商城 springmvc dubbox mybatis bootstreap ace
- [程序猿DD/SpringBoot-Learning](http://git.oschina.net/didispace/SpringBoot-Learning) : Spring Boot教程与Spring Cloud教程
- [stylefeng/guns](http://git.oschina.net/naan1993/guns) : Guns基于SpringBoot,致力于做更简洁的后台管理系统,完美整合springmvc + shiro + mybatis-plus + beetl!Guns项目代码简洁,注释丰富,上手容易,同时Guns包含许多基础模块(用户管理,角色管理,部门管理,字典管理等10个模块),可以直接作为一个后台管理系统的脚手架.
- [贝密游戏/beimi](http://git.oschina.net/beimigame/beimi) : 贝密游戏是一系列棋牌游戏的名称,其中包含麻将、斗地主、德州,目前正在进行UI设计以及后台系统(JAVA)开发,7月份发布0.1.0版本,仅包含前端UI方案,预计8月份发布首个版本,敬请关注!
- [ecitlm/SpliderApi](http://git.oschina.net/ecitlm/splider) : 基于nodejs 的爬虫 API接口项目,包括前端开发日报、知乎日报、前端top框架排行、妹纸福利、搞笑视频、各类视频新闻资讯 热点详情接口数据
- [人人开源/renren-fast](http://git.oschina.net/babaio/renren-fast) : renren-fast是一个轻量级的Java快速开发平台,其核心设计目标是开发迅速、学习简单、轻量级、易扩展;使用Spring Boot、Shiro、MyBatis、Bootstrap、Vue2.x等框架,包含:管理员列表、角色管理、菜单管理、定时任务、参数管理、代码生成器、日志管理、云存储、API模块(APP接口开发利器)、前后端分离等。
- [lzhou/spring-cloud-7simple](http://git.oschina.net/zhou666/spring-cloud-7simple) : 7天学会,spring cloud系列
- [iBase4J/iBase4J](http://git.oschina.net/iBase4J/iBase4J) : JAVA分布式快速开发基础平台:SpringMVC,Mybatis,mybatis-plus,dubbo分布式服务,Redis缓存,shiro权限管理,Spring-Session单点登录,分布式集群调度,Restful服务,QQ/微信登录,app token登录,微信/支付宝支付;日期转换、数据类型转换、序列化、汉字转拼音、身份证号码验证、数字转人民币、发送邮件、加密解密、图片处理、excel导入导出、FTP/SFTP/fastDFS上传下载、生成二维码、XML读写、高精度计算、系统配置工具类等等。
- [acsoft/EHR2](http://git.oschina.net/dongfangx/EHR2) : 人力资源管理系统
- [layuiCMS/layuiCMS](http://git.oschina.net/layuicms/layuicms) : 基于layui的后台管理模版,纯前端实现,无数据库,关闭浏览器或者清除缓存后,所有操作无效。
- [QiZW/FileManager](http://git.oschina.net/qzwcom/FileManager) : 快客文件管理器:完整的基于Android的文件管理器的APP,百度手机助手已上线。
- [LD/LucenePlus](http://git.oschina.net/Myzhang/luceneplus) : LucenePlus 让搜索开发更简单,更快捷,更容易维护
- [tumobi/nideshop-mini-program](http://git.oschina.net/tumobi/nideshop-mini-program) : NideShop:基于Node.js+MySQL开发的高仿网易严选开源B2C商城(微信小程序客户端)
- [JeeWeb敏捷开发平台/jeeweb](http://git.oschina.net/dataact/jeeweb) : JEEWEB是一款基于SpringMVC+Spring+Hibernate的JAVA WEB敏捷开发系统;它是一款具有代码生成功能的智能快速开发平台;是以Spring Framework为核心容器,Spring MVC为模型视图控制器,Hibernate为数据访问层, Apache Shiro为权限授权层,Ehcahe对常用数据进行缓存,Disruptor作为并发框架,Bootstrap作为前端框架的优秀开源系统。
- [ruanyf/loppo](http://git.oschina.net/ruanyf/loppo) : Loppo 是一个简单、好用的静态文档生成器,支持一键生成 Markdown 文档站点。
- [weiboad/fiery](http://git.oschina.net/weiboad/fiery) : Fiery 是一款为PHP性能跟踪监控系统,可以方便的查看线上调用关系,性能,回放请求过程,参数,系统异常统计等
- [Looly/hutool](http://git.oschina.net/loolly/hutool) : 一个Java基础工具类,对文件、流、加密解密、转码、正则、线程、XML等JDK方法进行封装,组成各种Util工具类,同时提供以下组件: 1. 布隆过滤 2. 缓存 3. 数据库ORM(基于ActiveRecord思想)4. HTTP客户端 5. IO 6. JSON 7. 日志 8. System(JVM和系统信息等) 9. Setting(一种扩展Properties的配置文件)
- [大漠穷秋/NiceFish](http://git.oschina.net/mumu-osc/NiceFish) : NiceFish(美人鱼)是一个系列项目,全面演示了Angular最新版本的各种用法,从桌面端到移动端都有demo,来抄啊!
- [darkidiot/DistributedSession](http://git.oschina.net/darkidiot/distributedsession) : 基于Redis的分布式Session,简单高效。
- [人人开源/renren-fastplus](http://git.oschina.net/babaio/renren-fastplus) : renren-fastplus是renren-fast的企业版,针对企业的Java快速开发平台,引入部门管理、实现数据权限等功能
- [JFinal/JFinal](http://git.oschina.net/jfinal/jfinal) : JFinal 是基于 Java 语言的极速 WEB + ORM 框架,其核心设计目标是开发迅速、代码量少、学习简单、功能强大、轻量级、易扩展、Restful。在拥有Java语言所有优势的同时再拥有ruby、python、php等动态语言的开发效率!为您节约更多时间,去陪恋人、家人和朋友 :)
## latest
- [郑小檩/0609mj_client_with_bugly](http://git.oschina.net/xiaooman07/0609mj_client_with_bugly) :
- [柯楠/ZendollarJS](http://git.oschina.net/columsys/ZendollarJS) : Zendollarjs JavaScript Library
- [啊喂/Calculator_The_Game_Resolver](http://git.oschina.net/xmt328/Calculator_The_Game_Resolver) : Calculator:The Game的解答器
- [我要的惬意/iOS常用工具类 - OC](http://git.oschina.net/zp0210/Tools-OC) : iOS常用工具类 - OC
- [shizhiyuan/Node.js](http://git.oschina.net/svector/Node.js) : Node.js 学习笔记
- [jakinYLiu/jakin_node](http://git.oschina.net/jakinYLiu/jakin_node) : 搭建node脚手架
- [daishixiong/my_object](http://git.oschina.net/daishixiong/my_object) : 我的项目
- [CM/wechat-website](http://git.oschina.net/codingmates/wechat-website) : 微信商城+企业网站+营销类活动
- [pengbinhua/yym2](http://git.oschina.net/pengbinhua/yym2) :
- [esther/前端知识点](http://git.oschina.net/estherChen/qianduanzhishidian) : 复习知识点
- [大波/SwiftDesignPattern](http://git.oschina.net/maimaile/swiftdesignpattern) : 介绍24中设计模式的优缺点 以及在swift中的应用
- [donzell/ssh](http://git.oschina.net/donzell/ssh) : ssh
- [Cc小蛇/vueAndnode](http://git.oschina.net/1110qing/vueAndnode) :
- [mx/open-ocr-open-ocr-preprocessor](http://git.oschina.net/xjdata/open-ocr-open-ocr-preprocessor) : https://github.com/tleyden/docker/blob/master/open-ocr-preprocessor/Dockerfile 搬用工
- [Jolie/chetie](http://git.oschina.net/jolie105020/chetie) :
- [mx/open-ocr-stroke-width-transform](http://git.oschina.net/xjdata/open-ocr-stroke-width-transform) : https://github.com/tleyden/docker/blob/master/stroke-width-transform/Dockerfile 搬用工
- [rome/awb](http://git.oschina.net/fieldrome/awb) : 基于laravel5.3的安维保后台系统
- [xxzhaa/PAT-A-Practice](http://git.oschina.net/xxzhaa/PAT-A-Practice) : PAT-甲级考试训练题
- [死撑/用户表](http://git.oschina.net/si_cheng/yonghubiao) :
- [fffy2366/宝儿安](http://git.oschina.net/fffy/babyphoto) : 宝儿安选片系统
- [喵灵/123](http://git.oschina.net/miaoling/123) : 123
- [xxzhaa/ggg](http://git.oschina.net/xxzhaa/ggg) : ggg
- [Zhouxianfei/com.zhouxianfei.common.algorithm](http://git.oschina.net/mengdiangame/com.zhouxianfei.common.algorithm) : 算法通用
- [EdgarX/GitHubHands](http://git.oschina.net/edgarx/GitHubHands) : GitHub 客户端.
- [dEdge/test](http://git.oschina.net/dedge/test) : open test
|
apache-2.0
|
googleapis/nodejs-kms
|
samples/generated/v1/key_management_service.generate_random_bytes.js
|
2228
|
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ** This file is automatically generated by gapic-generator-typescript. **
// ** https://github.com/googleapis/gapic-generator-typescript **
// ** All changes to this file may be overwritten. **
'use strict';
function main() {
// [START cloudkms_v1_generated_KeyManagementService_GenerateRandomBytes_async]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
/**
* The project-specific location in which to generate random bytes.
* For example, "projects/my-project/locations/us-central1".
*/
// const location = 'abc123'
/**
* The length in bytes of the amount of randomness to retrieve. Minimum 8
* bytes, maximum 1024 bytes.
*/
// const lengthBytes = 1234
/**
* The ProtectionLevel google.cloud.kms.v1.ProtectionLevel to use when
* generating the random data. Currently, only
* HSM google.cloud.kms.v1.ProtectionLevel.HSM protection level is
* supported.
*/
// const protectionLevel = {}
// Imports the Kms library
const {KeyManagementServiceClient} = require('@google-cloud/kms').v1;
// Instantiates a client
const kmsClient = new KeyManagementServiceClient();
async function callGenerateRandomBytes() {
// Construct request
const request = {
};
// Run request
const response = await kmsClient.generateRandomBytes(request);
console.log(response);
}
callGenerateRandomBytes();
// [END cloudkms_v1_generated_KeyManagementService_GenerateRandomBytes_async]
}
process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});
main(...process.argv.slice(2));
|
apache-2.0
|
CollegeBoreal/Tutoriel
|
E.Education/G.Github/1.Github-Learning-Lab/0.Installation/README.md
|
3004
|
# Github Learning
https://lab.github.com/docs/install
### :o: Démarrage d'une nouvelle lecon
<img src="images/github-learning/-.Start-Learning.png" width="403" height="334" ></img>
### :a: Inscription à [Github Learning Lab](https://lab.github.com/)
:round_pushpin: Démarrer l'inscription
Si vous n'êtes pas encore inscrit, `Github Learning Lab` va vous demander l'installation de l'application dans votre profil
<img src="images/github-learning/0.Start-lab-github.png" width="264" height="361" ></img>
`Github Learning Lab` effectuera des actions et utilsera des ressources liées a votre compte.
:round_pushpin: Aprés avoir démarrer l'inscription, Accepter les conditions d'utilisation
<img src="images/github-learning/1.Accept-Terms.png" width="214" height="95" ></img>
:round_pushpin: Aprés avoir accepter les conditions d'utilisation, `Github Learning Lab` demandera si toutes les référentiels (repositories) peuvent être accédé
<img src="images/github-learning/2.Install-on-all-repositories.png" width="424" height="334" ></img>
:round_pushpin: Confirmer l'installation
<img src="images/github-learning/3.Install-Github-Learning.png" width="282" height="371" ></img>
### :b: Commencer son premier cours sur [Github Learning Lab](https://lab.github.com)
:star: https://lab.github.com
:pushpin: Chercher son cours
<img src="images/github-learning/4.Launch-your-first-course.png" width="400" height="140" ></img>
:pushpin: Quand le cours est trouv/, cliquer sur son lien
<img src="images/github-learning/5.Introduction-Python.png" width="432" height="242" ></img>
:pushpin: Démarrer la nouvelle lecon
<img src="images/github-learning/6.Start-Free-Course.png" width="430" height="340" ></img>
:pushpin: Accepter l'installation de la nouvelle lecon dans son compte
<img src="images/github-learning/7.Register-Free-Course.png" width="429" height="151" ></img>
:bulb: Pour certaines lecons, assurez vous de sélectionner le `CLI` pour utiliser les commandes
<img src="images/github-learning/7.Register-Free-Course-CLI.png" width="179" height="217" ></img>
### :ab: Vérifier l'installation de la nouvelle lecon dans son compte github
:bulb: https://github.com/ `(mon ` :id: ` github)`
<img src="images/github-learning/8.Start-Coding.png" width="372" height="200" ></img>
## Vérification `Github Learning Lab` Application
:warning: Vérifier que l'Application `Github Learning Lab` est bien installée dans votre profil
:round_pushpin: Cliquez sur le sous-menu `settings` dans le menu de l'utilisateur
<img src="images/settings/0.Settings.png" width="99" height="205" ></img>
:round_pushpin: Cliquer le sous-menu `Applications` pour vérifier que l'application soit bien installée
<img src="images/settings/1.Installed-Github-Apps.png" width="474" height="241" ></img>
:round_pushpin: Cliquer l'onglet `Authorized ...` pour vérifier qu'elle aie tous les droits
<img src="images/settings/2.Authorized-Github-Apps.png" width="474" height="241" ></img>
|
apache-2.0
|
cedral/aws-sdk-cpp
|
aws-cpp-sdk-directconnect/include/aws/directconnect/model/DirectConnectGateway.h
|
13921
|
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#pragma once
#include <aws/directconnect/DirectConnect_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/directconnect/model/DirectConnectGatewayState.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace DirectConnect
{
namespace Model
{
/**
* <p>Information about a Direct Connect gateway, which enables you to connect
* virtual interfaces and virtual private gateway or transit
* gateways.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/DirectConnectGateway">AWS
* API Reference</a></p>
*/
class AWS_DIRECTCONNECT_API DirectConnectGateway
{
public:
DirectConnectGateway();
DirectConnectGateway(Aws::Utils::Json::JsonView jsonValue);
DirectConnectGateway& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>The ID of the Direct Connect gateway.</p>
*/
inline const Aws::String& GetDirectConnectGatewayId() const{ return m_directConnectGatewayId; }
/**
* <p>The ID of the Direct Connect gateway.</p>
*/
inline bool DirectConnectGatewayIdHasBeenSet() const { return m_directConnectGatewayIdHasBeenSet; }
/**
* <p>The ID of the Direct Connect gateway.</p>
*/
inline void SetDirectConnectGatewayId(const Aws::String& value) { m_directConnectGatewayIdHasBeenSet = true; m_directConnectGatewayId = value; }
/**
* <p>The ID of the Direct Connect gateway.</p>
*/
inline void SetDirectConnectGatewayId(Aws::String&& value) { m_directConnectGatewayIdHasBeenSet = true; m_directConnectGatewayId = std::move(value); }
/**
* <p>The ID of the Direct Connect gateway.</p>
*/
inline void SetDirectConnectGatewayId(const char* value) { m_directConnectGatewayIdHasBeenSet = true; m_directConnectGatewayId.assign(value); }
/**
* <p>The ID of the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithDirectConnectGatewayId(const Aws::String& value) { SetDirectConnectGatewayId(value); return *this;}
/**
* <p>The ID of the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithDirectConnectGatewayId(Aws::String&& value) { SetDirectConnectGatewayId(std::move(value)); return *this;}
/**
* <p>The ID of the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithDirectConnectGatewayId(const char* value) { SetDirectConnectGatewayId(value); return *this;}
/**
* <p>The name of the Direct Connect gateway.</p>
*/
inline const Aws::String& GetDirectConnectGatewayName() const{ return m_directConnectGatewayName; }
/**
* <p>The name of the Direct Connect gateway.</p>
*/
inline bool DirectConnectGatewayNameHasBeenSet() const { return m_directConnectGatewayNameHasBeenSet; }
/**
* <p>The name of the Direct Connect gateway.</p>
*/
inline void SetDirectConnectGatewayName(const Aws::String& value) { m_directConnectGatewayNameHasBeenSet = true; m_directConnectGatewayName = value; }
/**
* <p>The name of the Direct Connect gateway.</p>
*/
inline void SetDirectConnectGatewayName(Aws::String&& value) { m_directConnectGatewayNameHasBeenSet = true; m_directConnectGatewayName = std::move(value); }
/**
* <p>The name of the Direct Connect gateway.</p>
*/
inline void SetDirectConnectGatewayName(const char* value) { m_directConnectGatewayNameHasBeenSet = true; m_directConnectGatewayName.assign(value); }
/**
* <p>The name of the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithDirectConnectGatewayName(const Aws::String& value) { SetDirectConnectGatewayName(value); return *this;}
/**
* <p>The name of the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithDirectConnectGatewayName(Aws::String&& value) { SetDirectConnectGatewayName(std::move(value)); return *this;}
/**
* <p>The name of the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithDirectConnectGatewayName(const char* value) { SetDirectConnectGatewayName(value); return *this;}
/**
* <p>The autonomous system number (ASN) for the Amazon side of the connection.</p>
*/
inline long long GetAmazonSideAsn() const{ return m_amazonSideAsn; }
/**
* <p>The autonomous system number (ASN) for the Amazon side of the connection.</p>
*/
inline bool AmazonSideAsnHasBeenSet() const { return m_amazonSideAsnHasBeenSet; }
/**
* <p>The autonomous system number (ASN) for the Amazon side of the connection.</p>
*/
inline void SetAmazonSideAsn(long long value) { m_amazonSideAsnHasBeenSet = true; m_amazonSideAsn = value; }
/**
* <p>The autonomous system number (ASN) for the Amazon side of the connection.</p>
*/
inline DirectConnectGateway& WithAmazonSideAsn(long long value) { SetAmazonSideAsn(value); return *this;}
/**
* <p>The ID of the AWS account that owns the Direct Connect gateway.</p>
*/
inline const Aws::String& GetOwnerAccount() const{ return m_ownerAccount; }
/**
* <p>The ID of the AWS account that owns the Direct Connect gateway.</p>
*/
inline bool OwnerAccountHasBeenSet() const { return m_ownerAccountHasBeenSet; }
/**
* <p>The ID of the AWS account that owns the Direct Connect gateway.</p>
*/
inline void SetOwnerAccount(const Aws::String& value) { m_ownerAccountHasBeenSet = true; m_ownerAccount = value; }
/**
* <p>The ID of the AWS account that owns the Direct Connect gateway.</p>
*/
inline void SetOwnerAccount(Aws::String&& value) { m_ownerAccountHasBeenSet = true; m_ownerAccount = std::move(value); }
/**
* <p>The ID of the AWS account that owns the Direct Connect gateway.</p>
*/
inline void SetOwnerAccount(const char* value) { m_ownerAccountHasBeenSet = true; m_ownerAccount.assign(value); }
/**
* <p>The ID of the AWS account that owns the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithOwnerAccount(const Aws::String& value) { SetOwnerAccount(value); return *this;}
/**
* <p>The ID of the AWS account that owns the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithOwnerAccount(Aws::String&& value) { SetOwnerAccount(std::move(value)); return *this;}
/**
* <p>The ID of the AWS account that owns the Direct Connect gateway.</p>
*/
inline DirectConnectGateway& WithOwnerAccount(const char* value) { SetOwnerAccount(value); return *this;}
/**
* <p>The state of the Direct Connect gateway. The following are the possible
* values:</p> <ul> <li> <p> <code>pending</code>: The initial state after calling
* <a>CreateDirectConnectGateway</a>.</p> </li> <li> <p> <code>available</code>:
* The Direct Connect gateway is ready for use.</p> </li> <li> <p>
* <code>deleting</code>: The initial state after calling
* <a>DeleteDirectConnectGateway</a>.</p> </li> <li> <p> <code>deleted</code>: The
* Direct Connect gateway is deleted and cannot pass traffic.</p> </li> </ul>
*/
inline const DirectConnectGatewayState& GetDirectConnectGatewayState() const{ return m_directConnectGatewayState; }
/**
* <p>The state of the Direct Connect gateway. The following are the possible
* values:</p> <ul> <li> <p> <code>pending</code>: The initial state after calling
* <a>CreateDirectConnectGateway</a>.</p> </li> <li> <p> <code>available</code>:
* The Direct Connect gateway is ready for use.</p> </li> <li> <p>
* <code>deleting</code>: The initial state after calling
* <a>DeleteDirectConnectGateway</a>.</p> </li> <li> <p> <code>deleted</code>: The
* Direct Connect gateway is deleted and cannot pass traffic.</p> </li> </ul>
*/
inline bool DirectConnectGatewayStateHasBeenSet() const { return m_directConnectGatewayStateHasBeenSet; }
/**
* <p>The state of the Direct Connect gateway. The following are the possible
* values:</p> <ul> <li> <p> <code>pending</code>: The initial state after calling
* <a>CreateDirectConnectGateway</a>.</p> </li> <li> <p> <code>available</code>:
* The Direct Connect gateway is ready for use.</p> </li> <li> <p>
* <code>deleting</code>: The initial state after calling
* <a>DeleteDirectConnectGateway</a>.</p> </li> <li> <p> <code>deleted</code>: The
* Direct Connect gateway is deleted and cannot pass traffic.</p> </li> </ul>
*/
inline void SetDirectConnectGatewayState(const DirectConnectGatewayState& value) { m_directConnectGatewayStateHasBeenSet = true; m_directConnectGatewayState = value; }
/**
* <p>The state of the Direct Connect gateway. The following are the possible
* values:</p> <ul> <li> <p> <code>pending</code>: The initial state after calling
* <a>CreateDirectConnectGateway</a>.</p> </li> <li> <p> <code>available</code>:
* The Direct Connect gateway is ready for use.</p> </li> <li> <p>
* <code>deleting</code>: The initial state after calling
* <a>DeleteDirectConnectGateway</a>.</p> </li> <li> <p> <code>deleted</code>: The
* Direct Connect gateway is deleted and cannot pass traffic.</p> </li> </ul>
*/
inline void SetDirectConnectGatewayState(DirectConnectGatewayState&& value) { m_directConnectGatewayStateHasBeenSet = true; m_directConnectGatewayState = std::move(value); }
/**
* <p>The state of the Direct Connect gateway. The following are the possible
* values:</p> <ul> <li> <p> <code>pending</code>: The initial state after calling
* <a>CreateDirectConnectGateway</a>.</p> </li> <li> <p> <code>available</code>:
* The Direct Connect gateway is ready for use.</p> </li> <li> <p>
* <code>deleting</code>: The initial state after calling
* <a>DeleteDirectConnectGateway</a>.</p> </li> <li> <p> <code>deleted</code>: The
* Direct Connect gateway is deleted and cannot pass traffic.</p> </li> </ul>
*/
inline DirectConnectGateway& WithDirectConnectGatewayState(const DirectConnectGatewayState& value) { SetDirectConnectGatewayState(value); return *this;}
/**
* <p>The state of the Direct Connect gateway. The following are the possible
* values:</p> <ul> <li> <p> <code>pending</code>: The initial state after calling
* <a>CreateDirectConnectGateway</a>.</p> </li> <li> <p> <code>available</code>:
* The Direct Connect gateway is ready for use.</p> </li> <li> <p>
* <code>deleting</code>: The initial state after calling
* <a>DeleteDirectConnectGateway</a>.</p> </li> <li> <p> <code>deleted</code>: The
* Direct Connect gateway is deleted and cannot pass traffic.</p> </li> </ul>
*/
inline DirectConnectGateway& WithDirectConnectGatewayState(DirectConnectGatewayState&& value) { SetDirectConnectGatewayState(std::move(value)); return *this;}
/**
* <p>The error message if the state of an object failed to advance.</p>
*/
inline const Aws::String& GetStateChangeError() const{ return m_stateChangeError; }
/**
* <p>The error message if the state of an object failed to advance.</p>
*/
inline bool StateChangeErrorHasBeenSet() const { return m_stateChangeErrorHasBeenSet; }
/**
* <p>The error message if the state of an object failed to advance.</p>
*/
inline void SetStateChangeError(const Aws::String& value) { m_stateChangeErrorHasBeenSet = true; m_stateChangeError = value; }
/**
* <p>The error message if the state of an object failed to advance.</p>
*/
inline void SetStateChangeError(Aws::String&& value) { m_stateChangeErrorHasBeenSet = true; m_stateChangeError = std::move(value); }
/**
* <p>The error message if the state of an object failed to advance.</p>
*/
inline void SetStateChangeError(const char* value) { m_stateChangeErrorHasBeenSet = true; m_stateChangeError.assign(value); }
/**
* <p>The error message if the state of an object failed to advance.</p>
*/
inline DirectConnectGateway& WithStateChangeError(const Aws::String& value) { SetStateChangeError(value); return *this;}
/**
* <p>The error message if the state of an object failed to advance.</p>
*/
inline DirectConnectGateway& WithStateChangeError(Aws::String&& value) { SetStateChangeError(std::move(value)); return *this;}
/**
* <p>The error message if the state of an object failed to advance.</p>
*/
inline DirectConnectGateway& WithStateChangeError(const char* value) { SetStateChangeError(value); return *this;}
private:
Aws::String m_directConnectGatewayId;
bool m_directConnectGatewayIdHasBeenSet;
Aws::String m_directConnectGatewayName;
bool m_directConnectGatewayNameHasBeenSet;
long long m_amazonSideAsn;
bool m_amazonSideAsnHasBeenSet;
Aws::String m_ownerAccount;
bool m_ownerAccountHasBeenSet;
DirectConnectGatewayState m_directConnectGatewayState;
bool m_directConnectGatewayStateHasBeenSet;
Aws::String m_stateChangeError;
bool m_stateChangeErrorHasBeenSet;
};
} // namespace Model
} // namespace DirectConnect
} // namespace Aws
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.